summaryrefslogtreecommitdiff
path: root/src/ipcpd
diff options
context:
space:
mode:
Diffstat (limited to 'src/ipcpd')
-rw-r--r--src/ipcpd/normal/connmgr.h4
-rw-r--r--src/ipcpd/normal/enroll.c1
-rw-r--r--src/ipcpd/normal/pol-routing-ops.h2
-rw-r--r--src/ipcpd/normal/pol/graph.c199
-rw-r--r--src/ipcpd/normal/pol/graph.h4
-rw-r--r--src/ipcpd/normal/pol/link_state.c76
-rw-r--r--src/ipcpd/normal/pol/link_state.h2
-rw-r--r--src/ipcpd/normal/routing.c3
8 files changed, 255 insertions, 36 deletions
diff --git a/src/ipcpd/normal/connmgr.h b/src/ipcpd/normal/connmgr.h
index a8edee7d..2ad5316d 100644
--- a/src/ipcpd/normal/connmgr.h
+++ b/src/ipcpd/normal/connmgr.h
@@ -31,11 +31,11 @@
#define NOTIFY_DT_CONN_ADD 0x00D0
#define NOTIFY_DT_CONN_DEL 0x00D1
#define NOTIFY_DT_CONN_QOS 0x00D2
-#define NOTIFY_DT_CONN_DOWN 0x00D3
+#define NOTIFY_DT_CONN_UP 0x00D3
+#define NOTIFY_DT_CONN_DOWN 0x00D4
#define NOTIFY_MGMT_CONN_ADD 0x00F0
#define NOTIFY_MGMT_CONN_DEL 0x00F1
-#define NOTIFY_MGMT_CONN_DOWN 0x00F2
int connmgr_init(void);
diff --git a/src/ipcpd/normal/enroll.c b/src/ipcpd/normal/enroll.c
index d245d0bd..ee87aa23 100644
--- a/src/ipcpd/normal/enroll.c
+++ b/src/ipcpd/normal/enroll.c
@@ -131,6 +131,7 @@ static int send_rcv_enroll_msg(int fd)
enroll.conf.has_ttl = reply->conf->has_ttl;
enroll.conf.addr_auth_type = reply->conf->addr_auth_type;
enroll.conf.routing_type = reply->conf->routing_type;
+ enroll.conf.pff_type = reply->conf->pff_type;
enroll.conf.dif_info.dir_hash_algo
= reply->conf->dif_info->dir_hash_algo;
diff --git a/src/ipcpd/normal/pol-routing-ops.h b/src/ipcpd/normal/pol-routing-ops.h
index 9804d5ad..aa968e9f 100644
--- a/src/ipcpd/normal/pol-routing-ops.h
+++ b/src/ipcpd/normal/pol-routing-ops.h
@@ -26,7 +26,7 @@
#include "pff.h"
struct pol_routing_ops {
- int (* init)(void);
+ int (* init)(enum pol_routing pr);
void (* fini)(void);
diff --git a/src/ipcpd/normal/pol/graph.c b/src/ipcpd/normal/pol/graph.c
index 9e723737..eade23e0 100644
--- a/src/ipcpd/normal/pol/graph.c
+++ b/src/ipcpd/normal/pol/graph.c
@@ -379,37 +379,43 @@ static int get_min_vertex(struct graph * graph,
return index;
}
-static struct vertex ** dijkstra(struct graph * graph,
- uint64_t src)
+static int dijkstra(struct graph * graph,
+ uint64_t src,
+ struct vertex *** nhops,
+ int ** dist)
{
- int dist[graph->nr_vertices];
bool used[graph->nr_vertices];
struct list_head * p = NULL;
int i = 0;
struct vertex * v = NULL;
struct edge * e = NULL;
int alt;
- struct vertex ** nhop;
- nhop = malloc(sizeof(*nhop) * graph->nr_vertices);
- if (nhop == NULL)
- return NULL;
+ *nhops = malloc(sizeof(**nhops) * graph->nr_vertices);
+ if (*nhops == NULL)
+ return -1;
+
+ *dist = malloc(sizeof(**dist) * graph->nr_vertices);
+ if (*dist == NULL) {
+ free(*nhops);
+ return -1;
+ }
/* Init the data structures */
list_for_each(p, &graph->vertices) {
v = list_entry(p, struct vertex, next);
if (v->addr == src)
- dist[i] = 0;
+ (*dist)[i] = 0;
else
- dist[i] = INT_MAX;
+ (*dist)[i] = INT_MAX;
- nhop[i] = NULL;
+ (*nhops)[i] = NULL;
used[i] = false;
i++;
}
/* Perform actual Dijkstra */
- i = get_min_vertex(graph, dist, used, &v);
+ i = get_min_vertex(graph, *dist, used, &v);
while (v != NULL) {
list_for_each(p, &v->edges) {
e = list_entry(p, struct edge, next);
@@ -423,19 +429,19 @@ static struct vertex ** dijkstra(struct graph * graph,
* Method could be extended to use a different
* weight for a different QoS cube.
*/
- alt = dist[i] + 1;
- if (alt < dist[e->nb->index]) {
- dist[e->nb->index] = alt;
+ alt = (*dist)[i] + 1;
+ if (alt < (*dist)[e->nb->index]) {
+ (*dist)[e->nb->index] = alt;
if (v->addr == src)
- nhop[e->nb->index] = e->nb;
+ (*nhops)[e->nb->index] = e->nb;
else
- nhop[e->nb->index] = nhop[i];
+ (*nhops)[e->nb->index] = (*nhops)[i];
}
}
- i = get_min_vertex(graph, dist, used, &v);
+ i = get_min_vertex(graph, *dist, used, &v);
}
- return nhop;
+ return 0;
}
static void free_routing_table(struct list_head * table)
@@ -471,9 +477,10 @@ void graph_free_routing_table(struct graph * graph,
pthread_mutex_unlock(&graph->lock);
}
-int graph_routing_table(struct graph * graph,
- uint64_t s_addr,
- struct list_head * table)
+static int graph_routing_table_simple(struct graph * graph,
+ uint64_t s_addr,
+ struct list_head * table,
+ int ** dist)
{
struct vertex ** nhops;
struct list_head * p;
@@ -482,14 +489,11 @@ int graph_routing_table(struct graph * graph,
struct routing_table * t;
struct nhop * n;
- pthread_mutex_lock(&graph->lock);
-
/* We need at least 2 vertices for a table */
if (graph->nr_vertices < 2)
goto fail_vertices;
- nhops = dijkstra(graph, s_addr);
- if (nhops == NULL)
+ if (dijkstra(graph, s_addr, &nhops, dist))
goto fail_vertices;
list_head_init(table);
@@ -523,8 +527,6 @@ int graph_routing_table(struct graph * graph,
i++;
}
- pthread_mutex_unlock(&graph->lock);
-
free(nhops);
return 0;
@@ -535,6 +537,149 @@ int graph_routing_table(struct graph * graph,
free_routing_table(table);
free(nhops);
fail_vertices:
+ return -1;
+}
+
+int graph_routing_table(struct graph * graph,
+ uint64_t s_addr,
+ struct list_head * table)
+{
+ int ret = 0;
+ int * dist;
+
+ assert(graph);
+ assert(table);
+
+ pthread_mutex_lock(&graph->lock);
+
+ ret = graph_routing_table_simple(graph, s_addr, table, &dist);
+
+ free(dist);
+
pthread_mutex_unlock(&graph->lock);
+
+ return ret;
+}
+
+static int add_lfa_to_table(struct list_head * table,
+ uint64_t addr,
+ uint64_t lfa)
+{
+ struct list_head * p = NULL;
+ struct nhop * n;
+
+ n = malloc(sizeof(*n));
+ if (n == NULL)
+ return -1;
+
+ n->nhop = lfa;
+
+ list_for_each(p, table) {
+ struct routing_table * t =
+ list_entry(p, struct routing_table, next);
+ if (t->dst == addr) {
+ list_add_tail(&n->next, &t->nhops);
+ return 0;
+ }
+ }
+
+ return -1;
+}
+
+int graph_routing_table_lfa(struct graph * graph,
+ uint64_t s_addr,
+ struct list_head * table)
+{
+ int * s_dist;
+ int * n_dist[AP_MAX_FLOWS];
+ uint64_t addrs[AP_MAX_FLOWS];
+ int n_index[AP_MAX_FLOWS];
+ struct list_head * p;
+ struct list_head * q;
+ struct vertex * v;
+ struct edge * e;
+ struct vertex ** nhops;
+ int i = 0;
+ int j = 0;
+ int k;
+
+ assert(graph);
+ assert(table);
+
+ pthread_mutex_lock(&graph->lock);
+
+ for (j = 0; j < AP_MAX_FLOWS; j++) {
+ n_dist[i] = NULL;
+ n_index[i] = -1;
+ addrs[i] = -1;
+ }
+
+ /* Get the normal next hops routing table. */
+ if (graph_routing_table_simple(graph, s_addr, table, &s_dist))
+ goto fail_table_simple;
+
+ list_for_each(p, &graph->vertices) {
+ v = list_entry(p, struct vertex, next);
+
+ if (v->addr != s_addr)
+ continue;
+
+ /* Get the distances for every neighbor of the source. */
+ list_for_each(q, &v->edges) {
+ e = list_entry(q, struct edge, next);
+
+ addrs[i] = e->nb->addr;
+ n_index[i] = e->nb->index;
+ if (dijkstra(graph, e->nb->addr,
+ &nhops, &(n_dist[i++])))
+ goto fail_dijkstra;
+
+ free(nhops);
+ }
+
+ break;
+ }
+
+ /* Loop though all nodes to see if we have a LFA for them. */
+ list_for_each(p, &graph->vertices) {
+ v = list_entry(p, struct vertex, next);
+
+ if (v->addr == s_addr)
+ continue;
+
+ /*
+ * Check for every neighbor if dist(neighbor, destination) <
+ * dist(neighbor, source) + dist(source, destination).
+ */
+ for (j = 0; j < i; j++) {
+ /* Exclude ourselves. */
+ if (addrs[j] == v->addr)
+ continue;
+
+ if (n_dist[j][v->index] <
+ s_dist[n_index[j]] + s_dist[v->index]) {
+ if (add_lfa_to_table(table, v->addr, addrs[j]))
+ goto fail_add_lfa;
+ }
+
+ free(n_dist[j]);
+ }
+ }
+
+ pthread_mutex_unlock(&graph->lock);
+
+ free(s_dist);
+
+ return 0;
+
+ fail_add_lfa:
+ for (k = j; k < i; k++)
+ free(n_dist[k]);
+ fail_dijkstra:
+ free_routing_table(table);
+ free(s_dist);
+ fail_table_simple:
+ pthread_mutex_unlock(&graph->lock);
+
return -1;
}
diff --git a/src/ipcpd/normal/pol/graph.h b/src/ipcpd/normal/pol/graph.h
index 7340ecb9..66c8f780 100644
--- a/src/ipcpd/normal/pol/graph.h
+++ b/src/ipcpd/normal/pol/graph.h
@@ -56,6 +56,10 @@ int graph_routing_table(struct graph * graph,
uint64_t s_addr,
struct list_head * table);
+int graph_routing_table_lfa(struct graph * graph,
+ uint64_t s_addr,
+ struct list_head * table);
+
void graph_free_routing_table(struct graph * graph,
struct list_head * table);
diff --git a/src/ipcpd/normal/pol/link_state.c b/src/ipcpd/normal/pol/link_state.c
index 51d317bc..f3af2771 100644
--- a/src/ipcpd/normal/pol/link_state.c
+++ b/src/ipcpd/normal/pol/link_state.c
@@ -62,8 +62,10 @@ typedef LinkStateMsg link_state_msg_t;
#endif
struct routing_i {
- struct pff * pff;
- pthread_t calculator;
+ struct list_head next;
+
+ struct pff * pff;
+ pthread_t calculator;
};
/* TODO: link weight support. */
@@ -89,6 +91,10 @@ struct nb {
enum nb_type type;
};
+typedef int (* rtable_fn_t)(struct graph * graph,
+ uint64_t s_addr,
+ struct list_head * table);
+
struct {
struct list_head nbs;
fset_t * mgmt_set;
@@ -103,6 +109,11 @@ struct {
pthread_t lsupdate;
pthread_t lsreader;
pthread_t listener;
+
+ struct list_head routing_instances;
+ pthread_mutex_t routing_i_lock;
+
+ rtable_fn_t rtable;
} ls;
struct pol_routing_ops link_state_ops = {
@@ -388,7 +399,7 @@ static void * calculate_pff(void * o)
instance = (struct routing_i *) o;
while (true) {
- if (graph_routing_table(ls.graph, ipcpi.dt_addr, &table)) {
+ if (ls.rtable(ls.graph, ipcpi.dt_addr, &table)) {
sleep(RECALC_TIME);
continue;
}
@@ -584,6 +595,24 @@ static void * lsreader(void * o)
return (void *) 0;
}
+static void flow_event(int fd,
+ bool up)
+{
+
+ struct list_head * p;
+
+ log_dbg("Notifying routing instances of flow event.");
+
+ pthread_mutex_lock(&ls.routing_i_lock);
+
+ list_for_each(p, &ls.routing_instances) {
+ struct routing_i * ri = list_entry(p, struct routing_i, next);
+ pff_flow_state_change(ri->pff, fd, up);
+ }
+
+ pthread_mutex_unlock(&ls.routing_i_lock);
+}
+
static void handle_event(void * self,
int event,
const void * o)
@@ -608,6 +637,8 @@ static void handle_event(void * self,
send_lsm(ipcpi.dt_addr, c->conn_info.addr);
break;
case NOTIFY_DT_CONN_DEL:
+ flow_event(c->flow_info.fd, false);
+
if (lsdb_del_nb(c->conn_info.addr, c->flow_info.fd))
log_dbg("Failed to delete neighbor from LSDB.");
@@ -617,6 +648,12 @@ static void handle_event(void * self,
case NOTIFY_DT_CONN_QOS:
log_dbg("QoS changes currently unsupported.");
break;
+ case NOTIFY_DT_CONN_UP:
+ flow_event(c->flow_info.fd, true);
+ break;
+ case NOTIFY_DT_CONN_DOWN:
+ flow_event(c->flow_info.fd, false);
+ break;
case NOTIFY_MGMT_CONN_ADD:
fset_add(ls.mgmt_set, c->flow_info.fd);
if (lsdb_add_nb(c->conn_info.addr, c->flow_info.fd, NB_MGMT))
@@ -650,6 +687,12 @@ struct routing_i * link_state_routing_i_create(struct pff * pff)
return NULL;
}
+ pthread_mutex_lock(&ls.routing_i_lock);
+
+ list_add(&tmp->next, &ls.routing_instances);
+
+ pthread_mutex_unlock(&ls.routing_i_lock);
+
return tmp;
}
@@ -657,6 +700,12 @@ void link_state_routing_i_destroy(struct routing_i * instance)
{
assert(instance);
+ pthread_mutex_lock(&ls.routing_i_lock);
+
+ list_del(&instance->next);
+
+ pthread_mutex_unlock(&ls.routing_i_lock);
+
pthread_cancel(instance->calculator);
pthread_join(instance->calculator, NULL);
@@ -664,7 +713,7 @@ void link_state_routing_i_destroy(struct routing_i * instance)
free(instance);
}
-int link_state_init(void)
+int link_state_init(enum pol_routing pr)
{
struct conn_info info;
@@ -676,6 +725,17 @@ int link_state_init(void)
info.pref_syntax = PROTO_GPB;
info.addr = ipcpi.dt_addr;
+ switch (pr) {
+ case ROUTING_LINK_STATE:
+ ls.rtable = graph_routing_table;
+ break;
+ case ROUTING_LINK_STATE_LFA:
+ ls.rtable = graph_routing_table_lfa;
+ break;
+ default:
+ goto fail_graph;
+ }
+
ls.graph = graph_create();
if (ls.graph == NULL)
goto fail_graph;
@@ -686,6 +746,9 @@ int link_state_init(void)
if (pthread_rwlock_init(&ls.db_lock, NULL))
goto fail_db_lock_init;
+ if (pthread_mutex_init(&ls.routing_i_lock, NULL))
+ goto fail_routing_i_lock_init;
+
if (connmgr_ae_init(AEID_MGMT, &info))
goto fail_connmgr_ae_init;
@@ -695,6 +758,7 @@ int link_state_init(void)
list_head_init(&ls.db);
list_head_init(&ls.nbs);
+ list_head_init(&ls.routing_instances);
if (pthread_create(&ls.lsupdate, NULL, lsupdate, NULL))
goto fail_pthread_create_lsupdate;
@@ -722,6 +786,8 @@ int link_state_init(void)
fail_fset_create:
connmgr_ae_fini(AEID_MGMT);
fail_connmgr_ae_init:
+ pthread_mutex_destroy(&ls.routing_i_lock);
+ fail_routing_i_lock_init:
pthread_rwlock_destroy(&ls.db_lock);
fail_db_lock_init:
notifier_unreg(handle_event);
@@ -765,5 +831,7 @@ void link_state_fini(void)
pthread_rwlock_destroy(&ls.db_lock);
+ pthread_mutex_destroy(&ls.routing_i_lock);
+
notifier_unreg(handle_event);
}
diff --git a/src/ipcpd/normal/pol/link_state.h b/src/ipcpd/normal/pol/link_state.h
index 58f90d91..cfdeb09d 100644
--- a/src/ipcpd/normal/pol/link_state.h
+++ b/src/ipcpd/normal/pol/link_state.h
@@ -28,7 +28,7 @@
#include "pol-routing-ops.h"
-int link_state_init(void);
+int link_state_init(enum pol_routing pr);
void link_state_fini(void);
diff --git a/src/ipcpd/normal/routing.c b/src/ipcpd/normal/routing.c
index afef23a2..909f74cc 100644
--- a/src/ipcpd/normal/routing.c
+++ b/src/ipcpd/normal/routing.c
@@ -33,13 +33,14 @@ int routing_init(enum pol_routing pr)
{
switch (pr) {
case ROUTING_LINK_STATE:
+ case ROUTING_LINK_STATE_LFA:
r_ops = &link_state_ops;
break;
default:
return -ENOTSUP;
}
- return r_ops->init();
+ return r_ops->init(pr);
}
struct routing_i * routing_i_create(struct pff * pff)