From c26f215a55718d31d8d0778f750325c0dcdb915d Mon Sep 17 00:00:00 2001 From: Dimitri Staessens Date: Wed, 6 Aug 2025 12:29:03 +0200 Subject: ipcpd: Add protocol debug option for link-state The link-state routing component now has a protocol debugging option, activated by enabling the DEBUG_PROTO_LS flag at build time in a debug build. Example output (sender and receiver): LSU [81:8a:1f:9b -- 50:c6:2d:03 seq: 000000000] --> 50:c6:2d:03 LSU [81:8a:1f:9b -- 50:c6:2d:03 seq: 000000000] <-- 50:c6:2d:03 In larger networks, forwarded LSUs are marked as such: LSU [ee:53:ae:f8 -- e5:33:e4:8d seq: 000000006] --> e5:33:e4:8d [forwarded] This also aligns the address printing using a similar ethernet-like formatting such as the DHT component. Small code cleanup in the graph component. Note: eventually the link state dissemination should move to a broadcast Layer instead of the link state component doing the forwarding. Signed-off-by: Dimitri Staessens Signed-off-by: Sander Vrijders --- src/ipcpd/unicast/CMakeLists.txt | 4 + src/ipcpd/unicast/dir/dht.c | 2 +- src/ipcpd/unicast/dir/tests/CMakeLists.txt | 3 - src/ipcpd/unicast/dt.c | 4 +- src/ipcpd/unicast/routing/graph.c | 156 ++++++++++++++--------------- src/ipcpd/unicast/routing/link-state.c | 141 +++++++++++++++++--------- 6 files changed, 172 insertions(+), 138 deletions(-) (limited to 'src/ipcpd/unicast') diff --git a/src/ipcpd/unicast/CMakeLists.txt b/src/ipcpd/unicast/CMakeLists.txt index 7d2a765b..2df0bae0 100644 --- a/src/ipcpd/unicast/CMakeLists.txt +++ b/src/ipcpd/unicast/CMakeLists.txt @@ -15,6 +15,10 @@ include_directories(${CMAKE_BINARY_DIR}/include) set(IPCP_UNICAST_TARGET ipcpd-unicast CACHE INTERNAL "") set(IPCP_UNICAST_MPL 10000 CACHE STRING "Default maximum packet lifetime for the unicast IPCP, in ms") +set(DEBUG_PROTO_DHT FALSE CACHE BOOL + "Add DHT protocol message output to debug logging") +set(DEBUG_PROTO_LS FALSE CACHE BOOL + "Add link state protocol message output to debug logging") protobuf_generate_c(DHT_PROTO_SRCS DHT_PROTO_HDRS dir/dht.proto) diff --git a/src/ipcpd/unicast/dir/dht.c b/src/ipcpd/unicast/dir/dht.c index c7205505..4a243059 100644 --- a/src/ipcpd/unicast/dir/dht.c +++ b/src/ipcpd/unicast/dir/dht.c @@ -2863,7 +2863,7 @@ static void do_dht_kv_store(const dht_store_msg_t * store) tm = gmtime(&exp); strftime(tmstr, sizeof(tmstr), RIB_TM_FORMAT, tm); - log_info(KV_FMT " Stored value until %s.", KV_VAL(key, val), tmstr); + log_dbg(KV_FMT " Stored value until %s.", KV_VAL(key, val), tmstr); } static dht_msg_t * do_dht_kv_find_node_req(const dht_find_req_msg_t * req) diff --git a/src/ipcpd/unicast/dir/tests/CMakeLists.txt b/src/ipcpd/unicast/dir/tests/CMakeLists.txt index 41a18c27..f62ed993 100644 --- a/src/ipcpd/unicast/dir/tests/CMakeLists.txt +++ b/src/ipcpd/unicast/dir/tests/CMakeLists.txt @@ -15,9 +15,6 @@ include_directories(${CMAKE_BINARY_DIR}/include) get_filename_component(PARENT_PATH ${CMAKE_CURRENT_SOURCE_DIR} DIRECTORY) get_filename_component(PARENT_DIR ${PARENT_PATH} NAME) -set(DEBUG_PROTO_DHT FALSE CACHE BOOL - "Add DHT protocol message output to debug logging") - create_test_sourcelist(${PARENT_DIR}_tests test_suite.c # Add new tests here dht_test.c diff --git a/src/ipcpd/unicast/dt.c b/src/ipcpd/unicast/dt.c index a4e3be36..39b0025f 100644 --- a/src/ipcpd/unicast/dt.c +++ b/src/ipcpd/unicast/dt.c @@ -313,7 +313,6 @@ static int dt_rib_readdir(char *** buf) strcpy((*buf)[idx++], entry); } - assert((size_t) idx == dt.n_flows); no_flows: pthread_rwlock_unlock(&dt.lock); @@ -323,10 +322,9 @@ static int dt_rib_readdir(char *** buf) while (idx-- > 0) free((*buf)[idx]); free(*buf); -fail_entries: + fail_entries: pthread_rwlock_unlock(&dt.lock); return -ENOMEM; - #else (void) buf; return 0; diff --git a/src/ipcpd/unicast/routing/graph.c b/src/ipcpd/unicast/routing/graph.c index 32f3e6fb..32442dad 100644 --- a/src/ipcpd/unicast/routing/graph.c +++ b/src/ipcpd/unicast/routing/graph.c @@ -57,8 +57,11 @@ struct edge { }; struct graph { - size_t nr_vertices; - struct list_head vertices; + struct { + struct list_head list; + size_t len; + } vertices; + pthread_mutex_t lock; }; @@ -67,7 +70,7 @@ static struct edge * find_edge_by_addr(struct vertex * vertex, { struct list_head * p; - assert(vertex); + assert(vertex != NULL); list_for_each(p, &vertex->edges) { struct edge * e = list_entry(p, struct edge, next); @@ -85,7 +88,7 @@ static struct vertex * find_vertex_by_addr(struct graph * graph, assert(graph); - list_for_each(p, &graph->vertices) { + list_for_each(p, &graph->vertices.list) { struct vertex * e = list_entry(p, struct vertex, next); if (e->addr == addr) return e; @@ -99,8 +102,8 @@ static struct edge * add_edge(struct vertex * vertex, { struct edge * edge; - assert(vertex); - assert(nb); + assert(vertex != NULL); + assert(nb != NULL); edge = malloc(sizeof(*edge)); if (edge == NULL) @@ -139,7 +142,7 @@ static struct vertex * add_vertex(struct graph * graph, vertex->addr = addr; /* Keep them ordered on address. */ - list_for_each(p, &graph->vertices) { + list_for_each(p, &graph->vertices.list) { struct vertex * v = list_entry(p, struct vertex, next); if (v->addr > addr) break; @@ -151,13 +154,13 @@ static struct vertex * add_vertex(struct graph * graph, list_add_tail(&vertex->next, p); /* Increase the index of the vertices to the right. */ - list_for_each(p, &graph->vertices) { + list_for_each(p, &vertex->next) { struct vertex * v = list_entry(p, struct vertex, next); if (v->addr > addr) v->index++; } - graph->nr_vertices++; + ++graph->vertices.len; return vertex; } @@ -168,13 +171,13 @@ static void del_vertex(struct graph * graph, struct list_head * p; struct list_head * h; - assert(graph); - assert(vertex); + assert(graph != NULL); + assert(vertex != NULL); list_del(&vertex->next); /* Decrease the index of the vertices to the right. */ - list_for_each(p, &graph->vertices) { + list_for_each(p, &graph->vertices.list) { struct vertex * v = list_entry(p, struct vertex, next); if (v->addr > vertex->addr) v->index--; @@ -187,7 +190,7 @@ static void del_vertex(struct graph * graph, free(vertex); - graph->nr_vertices--; + --graph->vertices.len; } struct graph * graph_create(void) @@ -203,8 +206,8 @@ struct graph * graph_create(void) return NULL; } - graph->nr_vertices = 0; - list_head_init(&graph->vertices); + graph->vertices.len = 0; + list_head_init(&graph->vertices.list); return graph; } @@ -218,7 +221,7 @@ void graph_destroy(struct graph * graph) pthread_mutex_lock(&graph->lock); - list_for_each_safe(p, n, &graph->vertices) { + list_for_each_safe(p, n, &graph->vertices.list) { struct vertex * e = list_entry(p, struct vertex, next); del_vertex(graph, e); } @@ -227,6 +230,8 @@ void graph_destroy(struct graph * graph) pthread_mutex_destroy(&graph->lock); + assert(graph->vertices.len == 0); + free(graph); } @@ -240,63 +245,35 @@ int graph_update_edge(struct graph * graph, struct vertex * nb; struct edge * nb_e; - assert(graph); + assert(graph != NULL); pthread_mutex_lock(&graph->lock); v = find_vertex_by_addr(graph, s_addr); - if (v == NULL) { - v = add_vertex(graph, s_addr); - if (v == NULL) { - pthread_mutex_unlock(&graph->lock); - log_err("Failed to add vertex."); - return -ENOMEM; - } + if (v == NULL && ((v = add_vertex(graph, s_addr)) == NULL)) {; + log_err("Failed to add src vertex."); + goto fail_add_s; } nb = find_vertex_by_addr(graph, d_addr); - if (nb == NULL) { - nb = add_vertex(graph, d_addr); - if (nb == NULL) { - if (list_is_empty(&v->edges)) - del_vertex(graph, v); - pthread_mutex_unlock(&graph->lock); - log_err("Failed to add vertex."); - return -ENOMEM; - } + if (nb == NULL && ((nb = add_vertex(graph, d_addr)) == NULL)) { + log_err("Failed to add dst vertex."); + goto fail_add_d; } e = find_edge_by_addr(v, d_addr); - if (e == NULL) { - e = add_edge(v, nb); - if (e == NULL) { - if (list_is_empty(&v->edges)) - del_vertex(graph, v); - if (list_is_empty(&nb->edges)) - del_vertex(graph, nb); - pthread_mutex_unlock(&graph->lock); - log_err("Failed to add edge."); - return -ENOMEM; - } + if (e == NULL && ((e = add_edge(v, nb)) == NULL)) { + log_err("Failed to add edge to dst."); + goto fail_add_edge_d; } e->announced++; e->qs = qs; nb_e = find_edge_by_addr(nb, s_addr); - if (nb_e == NULL) { - nb_e = add_edge(nb, v); - if (nb_e == NULL) { - if (--e->announced == 0) - del_edge(e); - if (list_is_empty(&v->edges)) - del_vertex(graph, v); - if (list_is_empty(&nb->edges)) - del_vertex(graph, nb); - pthread_mutex_unlock(&graph->lock); - log_err("Failed to add edge."); - return -ENOMEM; - } + if (nb_e == NULL && ((nb_e = add_edge(nb, v)) == NULL)) {; + log_err("Failed to add edge to src."); + goto fail_add_edge_s; } nb_e->announced++; @@ -305,6 +282,19 @@ int graph_update_edge(struct graph * graph, pthread_mutex_unlock(&graph->lock); return 0; + fail_add_edge_s: + if (--e->announced == 0) + del_edge(e); + fail_add_edge_d: + if (list_is_empty(&nb->edges)) + del_vertex(graph, nb); + fail_add_d: + if (list_is_empty(&v->edges)) + del_vertex(graph, v); + fail_add_s: + pthread_mutex_unlock(&graph->lock); + return -ENOMEM; + } int graph_del_edge(struct graph * graph, @@ -322,30 +312,26 @@ int graph_del_edge(struct graph * graph, v = find_vertex_by_addr(graph, s_addr); if (v == NULL) { - pthread_mutex_unlock(&graph->lock); - log_err("No such source vertex."); - return -1; + log_err("Failed to find src vertex."); + goto fail; } nb = find_vertex_by_addr(graph, d_addr); if (nb == NULL) { - pthread_mutex_unlock(&graph->lock); log_err("No such destination vertex."); - return -1; + goto fail; } e = find_edge_by_addr(v, d_addr); if (e == NULL) { - pthread_mutex_unlock(&graph->lock); log_err("No such source edge."); - return -1; + goto fail; } nb_e = find_edge_by_addr(nb, s_addr); if (nb_e == NULL) { - pthread_mutex_unlock(&graph->lock); log_err("No such destination edge."); - return -1; + goto fail; } if (--e->announced == 0) @@ -362,6 +348,10 @@ int graph_del_edge(struct graph * graph, pthread_mutex_unlock(&graph->lock); return 0; + + fail: + pthread_mutex_unlock(&graph->lock); + return -1; } static int get_min_vertex(struct graph * graph, @@ -381,7 +371,7 @@ static int get_min_vertex(struct graph * graph, *v = NULL; - list_for_each(p, &graph->vertices) { + list_for_each(p, &graph->vertices.list) { if (!used[i] && dist[i] < min) { min = dist[i]; index = i; @@ -413,24 +403,24 @@ static int dijkstra(struct graph * graph, assert(nhops); assert(dist); - *nhops = malloc(sizeof(**nhops) * graph->nr_vertices); + *nhops = malloc(sizeof(**nhops) * graph->vertices.len); if (*nhops == NULL) goto fail_pnhops; - *dist = malloc(sizeof(**dist) * graph->nr_vertices); + *dist = malloc(sizeof(**dist) * graph->vertices.len); if (*dist == NULL) goto fail_pdist; - used = malloc(sizeof(*used) * graph->nr_vertices); + used = malloc(sizeof(*used) * graph->vertices.len); if (used == NULL) goto fail_used; /* Init the data structures */ - memset(used, 0, sizeof(*used) * graph->nr_vertices); - memset(*nhops, 0, sizeof(**nhops) * graph->nr_vertices); - memset(*dist, 0, sizeof(**dist) * graph->nr_vertices); + memset(used, 0, sizeof(*used) * graph->vertices.len); + memset(*nhops, 0, sizeof(**nhops) * graph->vertices.len); + memset(*dist, 0, sizeof(**dist) * graph->vertices.len); - list_for_each(p, &graph->vertices) { + list_for_each(p, &graph->vertices.list) { v = list_entry(p, struct vertex, next); (*dist)[i++] = (v->addr == src) ? 0 : INT_MAX; } @@ -527,7 +517,7 @@ static int graph_routing_table_simple(struct graph * graph, assert(dist); /* We need at least 2 vertices for a table */ - if (graph->nr_vertices < 2) + if (graph->vertices.len < 2) goto fail_vertices; if (dijkstra(graph, s_addr, &nhops, dist)) @@ -536,7 +526,7 @@ static int graph_routing_table_simple(struct graph * graph, list_head_init(table); /* Now construct the routing table from the nhops. */ - list_for_each(p, &graph->vertices) { + list_for_each(p, &graph->vertices.list) { v = list_entry(p, struct vertex, next); /* This is the src */ @@ -634,7 +624,7 @@ static int graph_routing_table_lfa(struct graph * graph, addrs[j] = -1; } - list_for_each(p, &graph->vertices) { + list_for_each(p, &graph->vertices.list) { v = list_entry(p, struct vertex, next); if (v->addr != s_addr) @@ -660,7 +650,7 @@ static int graph_routing_table_lfa(struct graph * graph, } /* Loop though all nodes to see if we have a LFA for them. */ - list_for_each(p, &graph->vertices) { + list_for_each(p, &graph->vertices.list) { v = list_entry(p, struct vertex, next); if (v->addr == s_addr) @@ -717,14 +707,14 @@ static int graph_routing_table_ecmp(struct graph * graph, assert(graph); assert(dist); - if (graph-> nr_vertices < 2) + if (graph->vertices.len < 2) goto fail_vertices; - forwarding = malloc(sizeof(*forwarding) * graph->nr_vertices); + forwarding = malloc(sizeof(*forwarding) * graph->vertices.len); if (forwarding == NULL) goto fail_vertices; - for (i = 0; i < graph->nr_vertices; ++i) + for (i = 0; i < graph->vertices.len; ++i) list_head_init(&forwarding[i]); if (dijkstra(graph, s_addr, &nhops, dist)) @@ -745,7 +735,7 @@ static int graph_routing_table_ecmp(struct graph * graph, free(nhops); - list_for_each(h, &graph->vertices) { + list_for_each(h, &graph->vertices.list) { v = list_entry(h, struct vertex, next); if (tmp_dist[v->index] + 1 == (*dist)[v->index]) { n = malloc(sizeof(*n)); @@ -763,7 +753,7 @@ static int graph_routing_table_ecmp(struct graph * graph, list_head_init(table); i = 0; - list_for_each(p, &graph->vertices) { + list_for_each(p, &graph->vertices.list) { v = list_entry(p, struct vertex, next); if (v->addr == s_addr) { ++i; diff --git a/src/ipcpd/unicast/routing/link-state.c b/src/ipcpd/unicast/routing/link-state.c index fa6b9f4a..7ad40df7 100644 --- a/src/ipcpd/unicast/routing/link-state.c +++ b/src/ipcpd/unicast/routing/link-state.c @@ -65,6 +65,12 @@ #define CLOCK_REALTIME_COARSE CLOCK_REALTIME #endif +#define LINK_FMT ADDR_FMT32 " -- " ADDR_FMT32 +#define LINK_VAL(src, dst) ADDR_VAL32(&src), ADDR_VAL32(&dst) + +#define LSU_FMT "LSU ["ADDR_FMT32 " -- " ADDR_FMT32 " seq: %09" PRIu64 "]" +#define LSU_VAL(src, dst, seqno) ADDR_VAL32(&src), ADDR_VAL32(&dst), seqno + struct lsa { uint64_t d_addr; uint64_t s_addr; @@ -155,8 +161,8 @@ static int str_adj(struct adjacency * adj, tm = gmtime(&adj->stamp); strftime(tmstr, sizeof(tmstr), RIB_TM_FORMAT, tm); - sprintf(srcbuf, "%" PRIu64, adj->src); - sprintf(dstbuf, "%" PRIu64, adj->dst); + sprintf(srcbuf, ADDR_FMT32, ADDR_VAL32(&adj->src)); + sprintf(dstbuf, ADDR_FMT32, ADDR_VAL32(&adj->dst)); sprintf(seqnobuf, "%" PRIu64, adj->seqno); sprintf(buf, "src: %20s\ndst: %20s\nseqno: %18s\n" @@ -267,8 +273,8 @@ static int lsdb_rib_readdir(char *** buf) list_for_each(p, &ls.nbs) { struct nb * nb = list_entry(p, struct nb, next); - char * str = (nb->type == NB_DT ? "dt." : "mgmt."); - sprintf(entry, "%s%" PRIu64, str, nb->addr); + char * str = (nb->type == NB_DT ? ".dt " : ".mgmt "); + sprintf(entry, "%s" ADDR_FMT32 , str, ADDR_VAL32(&nb->addr)); (*buf)[idx] = malloc(strlen(entry) + 1); if ((*buf)[idx] == NULL) goto fail_entry; @@ -278,7 +284,7 @@ static int lsdb_rib_readdir(char *** buf) list_for_each(p, &ls.db) { struct adjacency * a = list_entry(p, struct adjacency, next); - sprintf(entry, "%" PRIu64 ".%" PRIu64, a->src, a->dst); + sprintf(entry, LINK_FMT, LINK_VAL(a->src, a->dst)); (*buf)[idx] = malloc(strlen(entry) + 1); if ((*buf)[idx] == NULL) goto fail_entry; @@ -316,19 +322,19 @@ static int lsdb_add_nb(uint64_t addr, list_for_each(p, &ls.nbs) { struct nb * el = list_entry(p, struct nb, next); - if (el->addr == addr && el->type == type) { - log_dbg("Already know %s neighbor %" PRIu64 ".", - type == NB_DT ? "dt" : "mgmt", addr); - if (el->fd != fd) { - log_warn("Existing neighbor assigned new fd."); - el->fd = fd; - } - pthread_rwlock_unlock(&ls.db_lock); - return -EPERM; - } - if (addr > el->addr) break; + if (el->addr != addr || el->type != type) + continue; + + log_dbg("Already know %s neighbor " ADDR_FMT32 ".", + type == NB_DT ? "dt" : "mgmt", ADDR_VAL32(&addr)); + if (el->fd != fd) { + log_warn("Existing neighbor assigned new fd."); + el->fd = fd; + } + pthread_rwlock_unlock(&ls.db_lock); + return -EPERM; } nb = malloc(sizeof(*nb)); @@ -345,8 +351,8 @@ static int lsdb_add_nb(uint64_t addr, ++ls.nbs_len; - log_dbg("Type %s neighbor %" PRIu64 " added.", - nb->type == NB_DT ? "dt" : "mgmt", addr); + log_dbg("Type %s neighbor " ADDR_FMT32 " added.", + nb->type == NB_DT ? "dt" : "mgmt", ADDR_VAL32(&addr)); pthread_rwlock_unlock(&ls.db_lock); @@ -363,15 +369,16 @@ static int lsdb_del_nb(uint64_t addr, list_for_each_safe(p, h, &ls.nbs) { struct nb * nb = list_entry(p, struct nb, next); - if (nb->addr == addr && nb->fd == fd) { - list_del(&nb->next); - --ls.nbs_len; - pthread_rwlock_unlock(&ls.db_lock); - log_dbg("Type %s neighbor %" PRIu64 " deleted.", - nb->type == NB_DT ? "dt" : "mgmt", addr); - free(nb); - return 0; - } + if (nb->addr != addr || nb->fd != fd) + continue; + + list_del(&nb->next); + --ls.nbs_len; + pthread_rwlock_unlock(&ls.db_lock); + log_dbg("Type %s neighbor " ADDR_FMT32 " deleted.", + nb->type == NB_DT ? "dt" : "mgmt", ADDR_VAL32(&addr)); + free(nb); + return 0; } pthread_rwlock_unlock(&ls.db_lock); @@ -582,8 +589,18 @@ static void send_lsm(uint64_t src, list_for_each(p, &ls.nbs) { struct nb * nb = list_entry(p, struct nb, next); - if (nb->type == NB_MGMT) - flow_write(nb->fd, &lsm, sizeof(lsm)); + if (nb->type != NB_MGMT) + continue; + + if (flow_write(nb->fd, &lsm, sizeof(lsm)) < 0) + log_err("Failed to send LSM to " ADDR_FMT32, + ADDR_VAL32(&nb->addr)); +#ifdef DEBUG_PROTO_LS + else + log_proto(LSU_FMT " --> " ADDR_FMT32, + LSU_VAL(src, dst, seqno), + ADDR_VAL32(&nb->addr)); +#endif } } @@ -649,10 +666,10 @@ static void * lsupdate(void * o) list_for_each_safe(p, h, &ls.db) { struct adjacency * adj; adj = list_entry(p, struct adjacency, next); - if (now.tv_sec - adj->stamp > LS_TIMEO) { + if (now.tv_sec > adj->stamp + LS_TIMEO) { list_del(&adj->next); - log_dbg("%" PRIu64 " - %" PRIu64" timed out.", - adj->src, adj->dst); + log_dbg(LINK_FMT " timed out.", + LINK_VAL(adj->src, adj->dst)); if (graph_del_edge(ls.graph, adj->src, adj->dst)) log_err("Failed to del edge."); @@ -701,15 +718,36 @@ static void forward_lsm(uint8_t * buf, int in_fd) { struct list_head * p; +#ifdef DEBUG_PROTO_LS + struct lsa lsm; + + assert(buf); + assert(len >= sizeof(struct lsa)); + memcpy(&lsm, buf, sizeof(lsm)); + + lsm.s_addr = ntoh64(lsm.s_addr); + lsm.d_addr = ntoh64(lsm.d_addr); + lsm.seqno = ntoh64(lsm.seqno); +#endif pthread_rwlock_rdlock(&ls.db_lock); pthread_cleanup_push(__cleanup_rwlock_unlock, &ls.db_lock); list_for_each(p, &ls.nbs) { struct nb * nb = list_entry(p, struct nb, next); - if (nb->type == NB_MGMT && nb->fd != in_fd) - flow_write(nb->fd, buf, len); + if (nb->type != NB_MGMT || nb->fd == in_fd) + continue; + + if (flow_write(nb->fd, buf, len) < 0) + log_err("Failed to forward LSM to " ADDR_FMT32, + ADDR_VAL32(&nb->addr)); +#ifdef DEBUG_PROTO_LS + else + log_proto(LSU_FMT " --> " ADDR_FMT32 " [forwarded]", + LSU_VAL(lsm.s_addr, lsm.d_addr, lsm.seqno), + ADDR_VAL32(&nb->addr)); +#endif } pthread_cleanup_pop(true); @@ -722,13 +760,13 @@ static void cleanup_fqueue(void * fq) static void * lsreader(void * o) { - fqueue_t * fq; - int ret; - uint8_t buf[sizeof(struct lsa)]; - int fd; - qosspec_t qs; - struct lsa * msg; - size_t len; + fqueue_t * fq; + int ret; + uint8_t buf[sizeof(struct lsa)]; + int fd; + qosspec_t qs; + struct lsa msg; + size_t len; (void) o; @@ -751,15 +789,22 @@ static void * lsreader(void * o) if (fqueue_type(fq) != FLOW_PKT) continue; - len = flow_read(fd, buf, sizeof(*msg)); - if (len <= 0 || len != sizeof(*msg)) + len = flow_read(fd, buf, sizeof(msg)); + if (len <= 0 || len != sizeof(msg)) continue; - msg = (struct lsa *) buf; - - if (lsdb_add_link(ntoh64(msg->s_addr), - ntoh64(msg->d_addr), - ntoh64(msg->seqno), + memcpy(&msg, buf, sizeof(msg)); + msg.s_addr = ntoh64(msg.s_addr); + msg.d_addr = ntoh64(msg.d_addr); + msg.seqno = ntoh64(msg.seqno); +#ifdef DEBUG_PROTO_LS + log_proto(LSU_FMT " <-- " ADDR_FMT32, + LSU_VAL(msg.s_addr, msg.d_addr, msg.seqno), + ADDR_VAL32(&ls.addr)); +#endif + if (lsdb_add_link(msg.s_addr, + msg.d_addr, + msg.seqno, &qs)) continue; -- cgit v1.2.3