From e3dba5812b1422a79e6e77ce9f923bade5a480e4 Mon Sep 17 00:00:00 2001 From: Sander Vrijders Date: Wed, 27 Sep 2017 15:33:39 +0200 Subject: ipcpd: normal: Add Loop-Free Alternates routing This adds the Loop-Free Alternates (LFA) policy. In case a link goes down a LFA may be selected to route the SDUs on without causing loops instead of the main hop that just went down. --- src/ipcpd/normal/pol-routing-ops.h | 2 +- src/ipcpd/normal/pol/graph.c | 199 ++++++++++++++++++++++++++++++++----- src/ipcpd/normal/pol/graph.h | 4 + src/ipcpd/normal/pol/link_state.c | 21 +++- src/ipcpd/normal/pol/link_state.h | 2 +- src/ipcpd/normal/routing.c | 3 +- src/tools/irm/irm_ipcp_bootstrap.c | 51 +++++----- 7 files changed, 227 insertions(+), 55 deletions(-) (limited to 'src') diff --git a/src/ipcpd/normal/pol-routing-ops.h b/src/ipcpd/normal/pol-routing-ops.h index 9804d5ad..aa968e9f 100644 --- a/src/ipcpd/normal/pol-routing-ops.h +++ b/src/ipcpd/normal/pol-routing-ops.h @@ -26,7 +26,7 @@ #include "pff.h" struct pol_routing_ops { - int (* init)(void); + int (* init)(enum pol_routing pr); void (* fini)(void); diff --git a/src/ipcpd/normal/pol/graph.c b/src/ipcpd/normal/pol/graph.c index 9e723737..eade23e0 100644 --- a/src/ipcpd/normal/pol/graph.c +++ b/src/ipcpd/normal/pol/graph.c @@ -379,37 +379,43 @@ static int get_min_vertex(struct graph * graph, return index; } -static struct vertex ** dijkstra(struct graph * graph, - uint64_t src) +static int dijkstra(struct graph * graph, + uint64_t src, + struct vertex *** nhops, + int ** dist) { - int dist[graph->nr_vertices]; bool used[graph->nr_vertices]; struct list_head * p = NULL; int i = 0; struct vertex * v = NULL; struct edge * e = NULL; int alt; - struct vertex ** nhop; - nhop = malloc(sizeof(*nhop) * graph->nr_vertices); - if (nhop == NULL) - return NULL; + *nhops = malloc(sizeof(**nhops) * graph->nr_vertices); + if (*nhops == NULL) + return -1; + + *dist = malloc(sizeof(**dist) * graph->nr_vertices); + if (*dist == NULL) { + free(*nhops); + return -1; + } /* Init the data structures */ list_for_each(p, &graph->vertices) { v = list_entry(p, struct vertex, next); if (v->addr == src) - dist[i] = 0; + (*dist)[i] = 0; else - dist[i] = INT_MAX; + (*dist)[i] = INT_MAX; - nhop[i] = NULL; + (*nhops)[i] = NULL; used[i] = false; i++; } /* Perform actual Dijkstra */ - i = get_min_vertex(graph, dist, used, &v); + i = get_min_vertex(graph, *dist, used, &v); while (v != NULL) { list_for_each(p, &v->edges) { e = list_entry(p, struct edge, next); @@ -423,19 +429,19 @@ static struct vertex ** dijkstra(struct graph * graph, * Method could be extended to use a different * weight for a different QoS cube. */ - alt = dist[i] + 1; - if (alt < dist[e->nb->index]) { - dist[e->nb->index] = alt; + alt = (*dist)[i] + 1; + if (alt < (*dist)[e->nb->index]) { + (*dist)[e->nb->index] = alt; if (v->addr == src) - nhop[e->nb->index] = e->nb; + (*nhops)[e->nb->index] = e->nb; else - nhop[e->nb->index] = nhop[i]; + (*nhops)[e->nb->index] = (*nhops)[i]; } } - i = get_min_vertex(graph, dist, used, &v); + i = get_min_vertex(graph, *dist, used, &v); } - return nhop; + return 0; } static void free_routing_table(struct list_head * table) @@ -471,9 +477,10 @@ void graph_free_routing_table(struct graph * graph, pthread_mutex_unlock(&graph->lock); } -int graph_routing_table(struct graph * graph, - uint64_t s_addr, - struct list_head * table) +static int graph_routing_table_simple(struct graph * graph, + uint64_t s_addr, + struct list_head * table, + int ** dist) { struct vertex ** nhops; struct list_head * p; @@ -482,14 +489,11 @@ int graph_routing_table(struct graph * graph, struct routing_table * t; struct nhop * n; - pthread_mutex_lock(&graph->lock); - /* We need at least 2 vertices for a table */ if (graph->nr_vertices < 2) goto fail_vertices; - nhops = dijkstra(graph, s_addr); - if (nhops == NULL) + if (dijkstra(graph, s_addr, &nhops, dist)) goto fail_vertices; list_head_init(table); @@ -523,8 +527,6 @@ int graph_routing_table(struct graph * graph, i++; } - pthread_mutex_unlock(&graph->lock); - free(nhops); return 0; @@ -535,6 +537,149 @@ int graph_routing_table(struct graph * graph, free_routing_table(table); free(nhops); fail_vertices: + return -1; +} + +int graph_routing_table(struct graph * graph, + uint64_t s_addr, + struct list_head * table) +{ + int ret = 0; + int * dist; + + assert(graph); + assert(table); + + pthread_mutex_lock(&graph->lock); + + ret = graph_routing_table_simple(graph, s_addr, table, &dist); + + free(dist); + pthread_mutex_unlock(&graph->lock); + + return ret; +} + +static int add_lfa_to_table(struct list_head * table, + uint64_t addr, + uint64_t lfa) +{ + struct list_head * p = NULL; + struct nhop * n; + + n = malloc(sizeof(*n)); + if (n == NULL) + return -1; + + n->nhop = lfa; + + list_for_each(p, table) { + struct routing_table * t = + list_entry(p, struct routing_table, next); + if (t->dst == addr) { + list_add_tail(&n->next, &t->nhops); + return 0; + } + } + + return -1; +} + +int graph_routing_table_lfa(struct graph * graph, + uint64_t s_addr, + struct list_head * table) +{ + int * s_dist; + int * n_dist[AP_MAX_FLOWS]; + uint64_t addrs[AP_MAX_FLOWS]; + int n_index[AP_MAX_FLOWS]; + struct list_head * p; + struct list_head * q; + struct vertex * v; + struct edge * e; + struct vertex ** nhops; + int i = 0; + int j = 0; + int k; + + assert(graph); + assert(table); + + pthread_mutex_lock(&graph->lock); + + for (j = 0; j < AP_MAX_FLOWS; j++) { + n_dist[i] = NULL; + n_index[i] = -1; + addrs[i] = -1; + } + + /* Get the normal next hops routing table. */ + if (graph_routing_table_simple(graph, s_addr, table, &s_dist)) + goto fail_table_simple; + + list_for_each(p, &graph->vertices) { + v = list_entry(p, struct vertex, next); + + if (v->addr != s_addr) + continue; + + /* Get the distances for every neighbor of the source. */ + list_for_each(q, &v->edges) { + e = list_entry(q, struct edge, next); + + addrs[i] = e->nb->addr; + n_index[i] = e->nb->index; + if (dijkstra(graph, e->nb->addr, + &nhops, &(n_dist[i++]))) + goto fail_dijkstra; + + free(nhops); + } + + break; + } + + /* Loop though all nodes to see if we have a LFA for them. */ + list_for_each(p, &graph->vertices) { + v = list_entry(p, struct vertex, next); + + if (v->addr == s_addr) + continue; + + /* + * Check for every neighbor if dist(neighbor, destination) < + * dist(neighbor, source) + dist(source, destination). + */ + for (j = 0; j < i; j++) { + /* Exclude ourselves. */ + if (addrs[j] == v->addr) + continue; + + if (n_dist[j][v->index] < + s_dist[n_index[j]] + s_dist[v->index]) { + if (add_lfa_to_table(table, v->addr, addrs[j])) + goto fail_add_lfa; + } + + free(n_dist[j]); + } + } + + pthread_mutex_unlock(&graph->lock); + + free(s_dist); + + return 0; + + fail_add_lfa: + for (k = j; k < i; k++) + free(n_dist[k]); + fail_dijkstra: + free_routing_table(table); + free(s_dist); + fail_table_simple: + pthread_mutex_unlock(&graph->lock); + return -1; } diff --git a/src/ipcpd/normal/pol/graph.h b/src/ipcpd/normal/pol/graph.h index 7340ecb9..66c8f780 100644 --- a/src/ipcpd/normal/pol/graph.h +++ b/src/ipcpd/normal/pol/graph.h @@ -56,6 +56,10 @@ int graph_routing_table(struct graph * graph, uint64_t s_addr, struct list_head * table); +int graph_routing_table_lfa(struct graph * graph, + uint64_t s_addr, + struct list_head * table); + void graph_free_routing_table(struct graph * graph, struct list_head * table); diff --git a/src/ipcpd/normal/pol/link_state.c b/src/ipcpd/normal/pol/link_state.c index 51d317bc..6330cf81 100644 --- a/src/ipcpd/normal/pol/link_state.c +++ b/src/ipcpd/normal/pol/link_state.c @@ -89,6 +89,10 @@ struct nb { enum nb_type type; }; +typedef int (* rtable_fn_t)(struct graph * graph, + uint64_t s_addr, + struct list_head * table); + struct { struct list_head nbs; fset_t * mgmt_set; @@ -103,6 +107,8 @@ struct { pthread_t lsupdate; pthread_t lsreader; pthread_t listener; + + rtable_fn_t rtable; } ls; struct pol_routing_ops link_state_ops = { @@ -388,7 +394,7 @@ static void * calculate_pff(void * o) instance = (struct routing_i *) o; while (true) { - if (graph_routing_table(ls.graph, ipcpi.dt_addr, &table)) { + if (ls.rtable(ls.graph, ipcpi.dt_addr, &table)) { sleep(RECALC_TIME); continue; } @@ -664,7 +670,7 @@ void link_state_routing_i_destroy(struct routing_i * instance) free(instance); } -int link_state_init(void) +int link_state_init(enum pol_routing pr) { struct conn_info info; @@ -676,6 +682,17 @@ int link_state_init(void) info.pref_syntax = PROTO_GPB; info.addr = ipcpi.dt_addr; + switch (pr) { + case ROUTING_LINK_STATE: + ls.rtable = graph_routing_table; + break; + case ROUTING_LINK_STATE_LFA: + ls.rtable = graph_routing_table_lfa; + break; + default: + goto fail_graph; + } + ls.graph = graph_create(); if (ls.graph == NULL) goto fail_graph; diff --git a/src/ipcpd/normal/pol/link_state.h b/src/ipcpd/normal/pol/link_state.h index 58f90d91..cfdeb09d 100644 --- a/src/ipcpd/normal/pol/link_state.h +++ b/src/ipcpd/normal/pol/link_state.h @@ -28,7 +28,7 @@ #include "pol-routing-ops.h" -int link_state_init(void); +int link_state_init(enum pol_routing pr); void link_state_fini(void); diff --git a/src/ipcpd/normal/routing.c b/src/ipcpd/normal/routing.c index afef23a2..909f74cc 100644 --- a/src/ipcpd/normal/routing.c +++ b/src/ipcpd/normal/routing.c @@ -33,13 +33,14 @@ int routing_init(enum pol_routing pr) { switch (pr) { case ROUTING_LINK_STATE: + case ROUTING_LINK_STATE_LFA: r_ops = &link_state_ops; break; default: return -ENOTSUP; } - return r_ops->init(); + return r_ops->init(pr); } struct routing_i * routing_i_create(struct pff * pff) diff --git a/src/tools/irm/irm_ipcp_bootstrap.c b/src/tools/irm/irm_ipcp_bootstrap.c index 9812f860..26b86a16 100644 --- a/src/tools/irm/irm_ipcp_bootstrap.c +++ b/src/tools/irm/irm_ipcp_bootstrap.c @@ -33,28 +33,29 @@ #include "irm_ops.h" #include "irm_utils.h" -#define NORMAL "normal" -#define SHIM_UDP "shim-udp" -#define SHIM_ETH_LLC "shim-eth-llc" -#define LOCAL "local" - -#define MD5 "MD5" -#define SHA3_224 "SHA3_224" -#define SHA3_256 "SHA3_256" -#define SHA3_384 "SHA3_384" -#define SHA3_512 "SHA3_512" - -#define DEFAULT_ADDR_SIZE 4 -#define DEFAULT_FD_SIZE 2 -#define DEFAULT_DDNS 0 -#define DEFAULT_ADDR_AUTH ADDR_AUTH_FLAT_RANDOM -#define DEFAULT_ROUTING ROUTING_LINK_STATE -#define DEFAULT_PFF PFF_SIMPLE -#define DEFAULT_HASH_ALGO DIR_HASH_SHA3_256 -#define FLAT_RANDOM_ADDR_AUTH "flat" -#define LINK_STATE_ROUTING "link_state" -#define SIMPLE_PFF "simple" -#define ALTERNATE_PFF "alternate" +#define NORMAL "normal" +#define SHIM_UDP "shim-udp" +#define SHIM_ETH_LLC "shim-eth-llc" +#define LOCAL "local" + +#define MD5 "MD5" +#define SHA3_224 "SHA3_224" +#define SHA3_256 "SHA3_256" +#define SHA3_384 "SHA3_384" +#define SHA3_512 "SHA3_512" + +#define DEFAULT_ADDR_SIZE 4 +#define DEFAULT_FD_SIZE 2 +#define DEFAULT_DDNS 0 +#define DEFAULT_ADDR_AUTH ADDR_AUTH_FLAT_RANDOM +#define DEFAULT_ROUTING ROUTING_LINK_STATE +#define DEFAULT_PFF PFF_SIMPLE +#define DEFAULT_HASH_ALGO DIR_HASH_SHA3_256 +#define FLAT_RANDOM_ADDR_AUTH "flat" +#define LINK_STATE_ROUTING "link_state" +#define LINK_STATE_LFA_ROUTING "lfa" +#define SIMPLE_PFF "simple" +#define ALTERNATE_PFF "alternate" static void usage(void) { @@ -74,7 +75,8 @@ static void usage(void) " [pff [PFF_POLICY] (default: %s)]\n" " [hash [ALGORITHM] (default: %s)]\n" "where ADDRESS_POLICY = {"FLAT_RANDOM_ADDR_AUTH"}\n" - " ROUTING_POLICY = {"LINK_STATE_ROUTING"}\n" + " ROUTING_POLICY = {"LINK_STATE_ROUTING " " + LINK_STATE_LFA_ROUTING "}\n" " PFF_POLICY = {" SIMPLE_PFF " " ALTERNATE_PFF "}\n" " ALGORITHM = {" SHA3_224 " " SHA3_256 " " SHA3_384 " " SHA3_512 "}\n\n" @@ -151,6 +153,9 @@ int do_bootstrap_ipcp(int argc, char ** argv) } else if (matches(*argv, "routing") == 0) { if (strcmp(LINK_STATE_ROUTING, *(argv + 1)) == 0) routing_type = ROUTING_LINK_STATE; + else if (strcmp(LINK_STATE_LFA_ROUTING, + *(argv + 1)) == 0) + routing_type = ROUTING_LINK_STATE_LFA; else goto unknown_param; } else if (matches(*argv, "pff") == 0) { -- cgit v1.2.3 From 39c7f82f4714f8515860d1c0e2726bff29e22944 Mon Sep 17 00:00:00 2001 From: Sander Vrijders Date: Wed, 27 Sep 2017 17:24:59 +0200 Subject: ipcpd: normal: React to flow events in link state policy This will let the link state policy react to flow up and down events by notifying the PFFs of the routing instances of this event so they can take an appropriate action. --- src/ipcpd/normal/connmgr.h | 4 +-- src/ipcpd/normal/enroll.c | 1 + src/ipcpd/normal/pol/link_state.c | 55 +++++++++++++++++++++++++++++++++++++-- 3 files changed, 56 insertions(+), 4 deletions(-) (limited to 'src') diff --git a/src/ipcpd/normal/connmgr.h b/src/ipcpd/normal/connmgr.h index a8edee7d..2ad5316d 100644 --- a/src/ipcpd/normal/connmgr.h +++ b/src/ipcpd/normal/connmgr.h @@ -31,11 +31,11 @@ #define NOTIFY_DT_CONN_ADD 0x00D0 #define NOTIFY_DT_CONN_DEL 0x00D1 #define NOTIFY_DT_CONN_QOS 0x00D2 -#define NOTIFY_DT_CONN_DOWN 0x00D3 +#define NOTIFY_DT_CONN_UP 0x00D3 +#define NOTIFY_DT_CONN_DOWN 0x00D4 #define NOTIFY_MGMT_CONN_ADD 0x00F0 #define NOTIFY_MGMT_CONN_DEL 0x00F1 -#define NOTIFY_MGMT_CONN_DOWN 0x00F2 int connmgr_init(void); diff --git a/src/ipcpd/normal/enroll.c b/src/ipcpd/normal/enroll.c index d245d0bd..ee87aa23 100644 --- a/src/ipcpd/normal/enroll.c +++ b/src/ipcpd/normal/enroll.c @@ -131,6 +131,7 @@ static int send_rcv_enroll_msg(int fd) enroll.conf.has_ttl = reply->conf->has_ttl; enroll.conf.addr_auth_type = reply->conf->addr_auth_type; enroll.conf.routing_type = reply->conf->routing_type; + enroll.conf.pff_type = reply->conf->pff_type; enroll.conf.dif_info.dir_hash_algo = reply->conf->dif_info->dir_hash_algo; diff --git a/src/ipcpd/normal/pol/link_state.c b/src/ipcpd/normal/pol/link_state.c index 6330cf81..f3af2771 100644 --- a/src/ipcpd/normal/pol/link_state.c +++ b/src/ipcpd/normal/pol/link_state.c @@ -62,8 +62,10 @@ typedef LinkStateMsg link_state_msg_t; #endif struct routing_i { - struct pff * pff; - pthread_t calculator; + struct list_head next; + + struct pff * pff; + pthread_t calculator; }; /* TODO: link weight support. */ @@ -108,6 +110,9 @@ struct { pthread_t lsreader; pthread_t listener; + struct list_head routing_instances; + pthread_mutex_t routing_i_lock; + rtable_fn_t rtable; } ls; @@ -590,6 +595,24 @@ static void * lsreader(void * o) return (void *) 0; } +static void flow_event(int fd, + bool up) +{ + + struct list_head * p; + + log_dbg("Notifying routing instances of flow event."); + + pthread_mutex_lock(&ls.routing_i_lock); + + list_for_each(p, &ls.routing_instances) { + struct routing_i * ri = list_entry(p, struct routing_i, next); + pff_flow_state_change(ri->pff, fd, up); + } + + pthread_mutex_unlock(&ls.routing_i_lock); +} + static void handle_event(void * self, int event, const void * o) @@ -614,6 +637,8 @@ static void handle_event(void * self, send_lsm(ipcpi.dt_addr, c->conn_info.addr); break; case NOTIFY_DT_CONN_DEL: + flow_event(c->flow_info.fd, false); + if (lsdb_del_nb(c->conn_info.addr, c->flow_info.fd)) log_dbg("Failed to delete neighbor from LSDB."); @@ -623,6 +648,12 @@ static void handle_event(void * self, case NOTIFY_DT_CONN_QOS: log_dbg("QoS changes currently unsupported."); break; + case NOTIFY_DT_CONN_UP: + flow_event(c->flow_info.fd, true); + break; + case NOTIFY_DT_CONN_DOWN: + flow_event(c->flow_info.fd, false); + break; case NOTIFY_MGMT_CONN_ADD: fset_add(ls.mgmt_set, c->flow_info.fd); if (lsdb_add_nb(c->conn_info.addr, c->flow_info.fd, NB_MGMT)) @@ -656,6 +687,12 @@ struct routing_i * link_state_routing_i_create(struct pff * pff) return NULL; } + pthread_mutex_lock(&ls.routing_i_lock); + + list_add(&tmp->next, &ls.routing_instances); + + pthread_mutex_unlock(&ls.routing_i_lock); + return tmp; } @@ -663,6 +700,12 @@ void link_state_routing_i_destroy(struct routing_i * instance) { assert(instance); + pthread_mutex_lock(&ls.routing_i_lock); + + list_del(&instance->next); + + pthread_mutex_unlock(&ls.routing_i_lock); + pthread_cancel(instance->calculator); pthread_join(instance->calculator, NULL); @@ -703,6 +746,9 @@ int link_state_init(enum pol_routing pr) if (pthread_rwlock_init(&ls.db_lock, NULL)) goto fail_db_lock_init; + if (pthread_mutex_init(&ls.routing_i_lock, NULL)) + goto fail_routing_i_lock_init; + if (connmgr_ae_init(AEID_MGMT, &info)) goto fail_connmgr_ae_init; @@ -712,6 +758,7 @@ int link_state_init(enum pol_routing pr) list_head_init(&ls.db); list_head_init(&ls.nbs); + list_head_init(&ls.routing_instances); if (pthread_create(&ls.lsupdate, NULL, lsupdate, NULL)) goto fail_pthread_create_lsupdate; @@ -739,6 +786,8 @@ int link_state_init(enum pol_routing pr) fail_fset_create: connmgr_ae_fini(AEID_MGMT); fail_connmgr_ae_init: + pthread_mutex_destroy(&ls.routing_i_lock); + fail_routing_i_lock_init: pthread_rwlock_destroy(&ls.db_lock); fail_db_lock_init: notifier_unreg(handle_event); @@ -782,5 +831,7 @@ void link_state_fini(void) pthread_rwlock_destroy(&ls.db_lock); + pthread_mutex_destroy(&ls.routing_i_lock); + notifier_unreg(handle_event); } -- cgit v1.2.3