diff options
Diffstat (limited to 'src/ipcpd/unicast/routing')
| -rw-r--r-- | src/ipcpd/unicast/routing/graph.c | 849 | ||||
| -rw-r--r-- | src/ipcpd/unicast/routing/graph.h | 69 | ||||
| -rw-r--r-- | src/ipcpd/unicast/routing/link-state.c | 1055 | ||||
| -rw-r--r-- | src/ipcpd/unicast/routing/link-state.h | 41 | ||||
| -rw-r--r-- | src/ipcpd/unicast/routing/ops.h | 38 | ||||
| -rw-r--r-- | src/ipcpd/unicast/routing/tests/CMakeLists.txt | 34 | ||||
| -rw-r--r-- | src/ipcpd/unicast/routing/tests/graph_test.c | 351 | 
7 files changed, 2437 insertions, 0 deletions
| diff --git a/src/ipcpd/unicast/routing/graph.c b/src/ipcpd/unicast/routing/graph.c new file mode 100644 index 00000000..6ea5c507 --- /dev/null +++ b/src/ipcpd/unicast/routing/graph.c @@ -0,0 +1,849 @@ +/* + * Ouroboros - Copyright (C) 2016 - 2021 + * + * Undirected graph structure + * + *    Dimitri Staessens <dimitri@ouroboros.rocks> + *    Sander Vrijders   <sander@ouroboros.rocks> + *    Nick Aerts        <nick.aerts@ugent.be> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., http://www.fsf.org/about/contact/. + */ + +#if defined(__linux__) || defined(__CYGWIN__) +#define _DEFAULT_SOURCE +#else +#define _POSIX_C_SOURCE 200112L +#endif + +#define OUROBOROS_PREFIX "graph" + +#include <ouroboros/logs.h> +#include <ouroboros/errno.h> +#include <ouroboros/list.h> + +#include "graph.h" +#include "ipcp.h" + +#include <assert.h> +#include <pthread.h> +#include <stdlib.h> +#include <limits.h> +#include <string.h> + +struct vertex { +        struct list_head next; +        uint64_t         addr; +        struct list_head edges; +        int              index; +}; + +struct edge { +        struct list_head next; +        struct vertex *  nb; +        qosspec_t        qs; +        int              announced; +}; + +struct graph { +        size_t           nr_vertices; +        struct list_head vertices; +        pthread_mutex_t  lock; +}; + +static struct edge * find_edge_by_addr(struct vertex * vertex, +                                       uint64_t        dst_addr) +{ +        struct list_head * p; + +        assert(vertex); + +        list_for_each(p, &vertex->edges) { +                struct edge * e = list_entry(p, struct edge, next); +                if (e->nb->addr == dst_addr) +                        return e; +        } + +        return NULL; +} + +static struct vertex * find_vertex_by_addr(struct graph * graph, +                                           uint64_t       addr) +{ +        struct list_head * p; + +        assert(graph); + +        list_for_each(p, &graph->vertices) { +                struct vertex * e = list_entry(p, struct vertex, next); +                if (e->addr == addr) +                        return e; +        } + +        return NULL; +} + +static struct edge * add_edge(struct vertex * vertex, +                              struct vertex * nb) +{ +        struct edge * edge; + +        assert(vertex); +        assert(nb); + +        edge = malloc(sizeof(*edge)); +        if (edge == NULL) +                return NULL; + +        edge->nb = nb; +        edge->announced = 0; + +        list_add(&edge->next, &vertex->edges); + +        return edge; +} + +static void del_edge(struct edge * edge) +{ +        assert(edge); + +        list_del(&edge->next); +        free(edge); +} + +static struct vertex * add_vertex(struct graph * graph, +                                  uint64_t       addr) +{ +        struct vertex *    vertex; +        struct list_head * p; +        int                i = 0; + +        assert(graph); + +        vertex = malloc(sizeof(*vertex)); +        if (vertex == NULL) +                return NULL; + +        list_head_init(&vertex->edges); +        vertex->addr = addr; + +        /* Keep them ordered on address. */ +        list_for_each(p, &graph->vertices) { +                struct vertex * v = list_entry(p, struct vertex, next); +                if (v->addr > addr) +                        break; +                i++; +        } + +        vertex->index = i; + +        list_add_tail(&vertex->next, p); + +        /* Increase the index of the vertices to the right. */ +        list_for_each(p, &graph->vertices) { +                struct vertex * v = list_entry(p, struct vertex, next); +                if (v->addr > addr) +                        v->index++; +        } + +        graph->nr_vertices++; + +        return vertex; +} + +static void del_vertex(struct graph *  graph, +                       struct vertex * vertex) +{ +        struct list_head * p; +        struct list_head * h; + +        assert(graph); +        assert(vertex); + +        list_del(&vertex->next); + +        /* Decrease the index of the vertices to the right. */ +        list_for_each(p, &graph->vertices) { +                struct vertex * v = list_entry(p, struct vertex, next); +                if (v->addr > vertex->addr) +                        v->index--; +        } + +        list_for_each_safe(p, h, &vertex->edges) { +                struct edge * e = list_entry(p, struct edge, next); +                del_edge(e); +        } + +        free(vertex); + +        graph->nr_vertices--; +} + +struct graph * graph_create(void) +{ +        struct graph * graph; + +        graph = malloc(sizeof(*graph)); +        if (graph == NULL) +                return NULL; + +        if (pthread_mutex_init(&graph->lock, NULL)) { +                free(graph); +                return NULL; +        } + +        graph->nr_vertices = 0; +        list_head_init(&graph->vertices); + +        return graph; +} + +void graph_destroy(struct graph * graph) +{ +        struct list_head * p = NULL; +        struct list_head * n = NULL; + +        assert(graph); + +        pthread_mutex_lock(&graph->lock); + +        list_for_each_safe(p, n, &graph->vertices) { +                struct vertex * e = list_entry(p, struct vertex, next); +                del_vertex(graph, e); +        } + +        pthread_mutex_unlock(&graph->lock); + +        pthread_mutex_destroy(&graph->lock); + +        free(graph); +} + +int graph_update_edge(struct graph * graph, +                      uint64_t       s_addr, +                      uint64_t       d_addr, +                      qosspec_t      qs) +{ +        struct vertex * v; +        struct edge *   e; +        struct vertex * nb; +        struct edge *   nb_e; + +        assert(graph); + +        pthread_mutex_lock(&graph->lock); + +        v = find_vertex_by_addr(graph, s_addr); +        if (v == NULL) { +                v = add_vertex(graph, s_addr); +                if (v == NULL) { +                        pthread_mutex_unlock(&graph->lock); +                        log_err("Failed to add vertex."); +                        return -ENOMEM; +                } +        } + +        nb = find_vertex_by_addr(graph, d_addr); +        if (nb == NULL) { +                nb = add_vertex(graph, d_addr); +                if (nb == NULL) { +                        if (list_is_empty(&v->edges)) +                                del_vertex(graph, v); +                        pthread_mutex_unlock(&graph->lock); +                        log_err("Failed to add vertex."); +                        return -ENOMEM; +                } +        } + +        e = find_edge_by_addr(v, d_addr); +        if (e == NULL) { +                e = add_edge(v, nb); +                if (e == NULL) { +                        if (list_is_empty(&v->edges)) +                                del_vertex(graph, v); +                        if (list_is_empty(&nb->edges)) +                                del_vertex(graph, nb); +                        pthread_mutex_unlock(&graph->lock); +                        log_err("Failed to add edge."); +                        return -ENOMEM; +                } +        } + +        e->announced++; +        e->qs = qs; + +        nb_e = find_edge_by_addr(nb, s_addr); +        if (nb_e == NULL) { +                nb_e = add_edge(nb, v); +                if (nb_e == NULL) { +                        if (--e->announced == 0) +                                del_edge(e); +                        if (list_is_empty(&v->edges)) +                                del_vertex(graph, v); +                        if (list_is_empty(&nb->edges)) +                                del_vertex(graph, nb); +                        pthread_mutex_unlock(&graph->lock); +                        log_err("Failed to add edge."); +                        return -ENOMEM; +                } +        } + +        nb_e->announced++; +        nb_e->qs = qs; + +        pthread_mutex_unlock(&graph->lock); + +        return 0; +} + +int graph_del_edge(struct graph * graph, +                   uint64_t       s_addr, +                   uint64_t       d_addr) +{ +        struct vertex * v; +        struct edge *   e; +        struct vertex * nb; +        struct edge *   nb_e; + +        assert(graph); + +        pthread_mutex_lock(&graph->lock); + +        v = find_vertex_by_addr(graph, s_addr); +        if (v == NULL) { +                pthread_mutex_unlock(&graph->lock); +                log_err("No such source vertex."); +                return -1; +        } + +        nb = find_vertex_by_addr(graph, d_addr); +        if (nb == NULL) { +                pthread_mutex_unlock(&graph->lock); +                log_err("No such destination vertex."); +                return -1; +        } + +        e = find_edge_by_addr(v, d_addr); +        if (e == NULL) { +                pthread_mutex_unlock(&graph->lock); +                log_err("No such source edge."); +                return -1; +        } + +        nb_e = find_edge_by_addr(nb, s_addr); +        if (nb_e == NULL) { +                pthread_mutex_unlock(&graph->lock); +                log_err("No such destination edge."); +                return -1; +        } + +        if (--e->announced == 0) +                del_edge(e); +        if (--nb_e->announced == 0) +                del_edge(nb_e); + +        /* Removing vertex if it was the last edge */ +        if (list_is_empty(&v->edges)) +                del_vertex(graph, v); +        if (list_is_empty(&nb->edges)) +                del_vertex(graph, nb); + +        pthread_mutex_unlock(&graph->lock); + +        return 0; +} + +static int get_min_vertex(struct graph *   graph, +                          int *            dist, +                          bool *           used, +                          struct vertex ** v) +{ +        int                min = INT_MAX; +        int                index = -1; +        int                i = 0; +        struct list_head * p; + +        assert(v); +        assert(graph); +        assert(dist); +        assert(used); + +        *v = NULL; + +        list_for_each(p, &graph->vertices) { +                if (!used[i] && dist[i] < min) { +                        min = dist[i]; +                        index = i; +                        *v = list_entry(p, struct vertex, next); +                } + +                i++; +        } + +        if (index != -1) +                used[index] = true; + +        return index; +} + +static int dijkstra(struct graph *    graph, +                    uint64_t          src, +                    struct vertex *** nhops, +                    int **            dist) +{ +        bool *             used; +        struct list_head * p = NULL; +        int                i = 0; +        struct vertex *    v = NULL; +        struct edge *      e = NULL; +        int                alt; + +        assert(graph); +        assert(nhops); +        assert(dist); + +        *nhops = malloc(sizeof(**nhops) * graph->nr_vertices); +        if (*nhops == NULL) +                goto fail_pnhops; + +        *dist = malloc(sizeof(**dist) * graph->nr_vertices); +        if (*dist == NULL) +                goto fail_pdist; + +        used = malloc(sizeof(*used) * graph->nr_vertices); +        if (used == NULL) +                goto fail_used; + +        /* Init the data structures */ +        memset(used, 0, sizeof(*used) * graph->nr_vertices); +        memset(*nhops, 0, sizeof(**nhops) * graph->nr_vertices); +        memset(*dist, 0, sizeof(**dist) * graph->nr_vertices); + +        list_for_each(p, &graph->vertices) { +                v = list_entry(p, struct vertex, next); +                (*dist)[i++]  = (v->addr == src) ? 0 : INT_MAX; +        } + +        /* Perform actual Dijkstra */ +        i = get_min_vertex(graph, *dist, used, &v); +        while (v != NULL) { +                list_for_each(p, &v->edges) { +                        e = list_entry(p, struct edge, next); + +                        /* Only include it if both sides announced it. */ +                        if (e->announced != 2) +                                continue; + +                        /* +                         * NOTE: Current weight is just hop count. +                         * Method could be extended to use a different +                         * weight for a different QoS cube. +                         */ +                        alt = (*dist)[i] + 1; +                        if (alt < (*dist)[e->nb->index]) { +                                (*dist)[e->nb->index] = alt; +                                if (v->addr == src) +                                        (*nhops)[e->nb->index] = e->nb; +                                else +                                        (*nhops)[e->nb->index] = (*nhops)[i]; +                        } +                } +                i = get_min_vertex(graph, *dist, used, &v); +        } + +        free(used); + +        return 0; + + fail_used: +        free(*dist); + fail_pdist: +        free(*nhops); + fail_pnhops: +        return -1; + +} + +static void free_routing_table(struct list_head * table) +{ +        struct list_head * h; +        struct list_head * p; +        struct list_head * q; +        struct list_head * i; + +        assert(table); + +        list_for_each_safe(p, h, table) { +                struct routing_table * t = +                        list_entry(p, struct routing_table, next); +                list_for_each_safe(q, i, &t->nhops) { +                        struct nhop * n = +                                list_entry(q, struct nhop, next); +                        list_del(&n->next); +                        free(n); +                } +                list_del(&t->next); +                free(t); +        } +} + +void graph_free_routing_table(struct graph *     graph, +                              struct list_head * table) +{ +        assert(table); + +        pthread_mutex_lock(&graph->lock); + +        free_routing_table(table); + +        pthread_mutex_unlock(&graph->lock); +} + +static int graph_routing_table_simple(struct graph *     graph, +                                      uint64_t           s_addr, +                                      struct list_head * table, +                                      int **             dist) +{ +        struct vertex **       nhops; +        struct list_head *     p; +        int                    i = 0; +        struct vertex *        v; +        struct routing_table * t; +        struct nhop *          n; + +        assert(graph); +        assert(table); +        assert(dist); + +        /* We need at least 2 vertices for a table */ +        if (graph->nr_vertices < 2) +                goto fail_vertices; + +        if (dijkstra(graph, s_addr, &nhops, dist)) +                goto fail_vertices; + +        list_head_init(table); + +        /* Now construct the routing table from the nhops. */ +        list_for_each(p, &graph->vertices) { +                v = list_entry(p, struct vertex, next); + +                /* This is the src */ +                if (nhops[i] == NULL) { +                        i++; +                        continue; +                } + +                t = malloc(sizeof(*t)); +                if (t == NULL) +                        goto fail_t; + +                list_head_init(&t->nhops); + +                n = malloc(sizeof(*n)); +                if (n == NULL) +                        goto fail_n; + +                t->dst = v->addr; +                n->nhop = nhops[i]->addr; + +                list_add(&n->next, &t->nhops); +                list_add(&t->next, table); + +                i++; +        } + +        free(nhops); + +        return 0; + + fail_n: +        free(t); + fail_t: +        free_routing_table(table); +        free(nhops); +        free(*dist); + fail_vertices: +        *dist = NULL; +        return -1; +} + +static int add_lfa_to_table(struct list_head * table, +                            uint64_t           addr, +                            uint64_t           lfa) +{ +        struct list_head * p; +        struct nhop *      n; + +        assert(table); + +        n = malloc(sizeof(*n)); +        if (n == NULL) +                return -1; + +        n->nhop = lfa; + +        list_for_each(p, table) { +                struct routing_table * t = +                        list_entry(p, struct routing_table, next); +                if (t->dst == addr) { +                        list_add_tail(&n->next, &t->nhops); +                        return 0; +                } +        } + +        free(n); + +        return -1; +} + +static int graph_routing_table_lfa(struct graph *     graph, +                                   uint64_t           s_addr, +                                   struct list_head * table, +                                   int **             dist) +{ +        int *              n_dist[PROG_MAX_FLOWS]; +        uint64_t           addrs[PROG_MAX_FLOWS]; +        int                n_index[PROG_MAX_FLOWS]; +        struct list_head * p; +        struct list_head * q; +        struct vertex *    v; +        struct edge *      e; +        struct vertex **   nhops; +        int                i = 0; +        int                j; +        int                k; + +        if (graph_routing_table_simple(graph, s_addr, table, dist)) +                goto fail_table; + +        for (j = 0; j < PROG_MAX_FLOWS; j++) { +                n_dist[j] = NULL; +                n_index[j] = -1; +                addrs[j] = -1; +        } + +        list_for_each(p, &graph->vertices) { +                v = list_entry(p, struct vertex, next); + +                if (v->addr != s_addr) +                        continue; + +                /* +                 * Get the distances for every neighbor +                 * of the source. +                 */ +                list_for_each(q, &v->edges) { +                        e = list_entry(q, struct edge, next); + +                        addrs[i] = e->nb->addr; +                        n_index[i] = e->nb->index; +                        if (dijkstra(graph, e->nb->addr, +                                     &nhops, &(n_dist[i++]))) +                                goto fail_dijkstra; + +                        free(nhops); +                } + +                break; +        } + +        /* Loop though all nodes to see if we have a LFA for them. */ +        list_for_each(p, &graph->vertices) { +                v = list_entry(p, struct vertex, next); + +                if (v->addr == s_addr) +                        continue; + +                /* +                 * Check for every neighbor if +                 * dist(neighbor, destination) < +                 * dist(neighbor, source) + dist(source, destination). +                 */ +                for (j = 0; j < i; j++) { +                        /* Exclude ourselves. */ +                        if (addrs[j] == v->addr) +                                continue; + +                        if (n_dist[j][v->index] < +                            (*dist)[n_index[j]] + (*dist)[v->index]) +                                if (add_lfa_to_table(table, v->addr, +                                                     addrs[j])) +                                        goto fail_add_lfa; +                } +        } + +        for (j = 0; j < i; j++) +                free(n_dist[j]); + +        return 0; + + fail_add_lfa: +        for (k = j; k < i; k++) +                free(n_dist[k]); + fail_dijkstra: +        free_routing_table(table); + fail_table: +        return -1; +} + +static int graph_routing_table_ecmp(struct graph *     graph, +                                    uint64_t           s_addr, +                                    struct list_head * table, +                                    int **             dist) +{ +        struct vertex **       nhops; +        struct list_head *     p; +        struct list_head *     h; +        size_t                 i; +        struct vertex *        v; +        struct vertex *        src_v; +        struct edge *          e; +        struct routing_table * t; +        struct nhop *          n; +        struct list_head *     forwarding; + +        assert(graph); +        assert(dist); + +        if (graph-> nr_vertices < 2) +                goto fail_vertices; + +        forwarding = malloc(sizeof(*forwarding) * graph->nr_vertices); +        if (forwarding == NULL) +                goto fail_vertices; + +        for (i = 0; i < graph->nr_vertices; ++i) +                list_head_init(&forwarding[i]); + +        if (dijkstra(graph, s_addr, &nhops, dist)) +                goto fail_dijkstra; + +        free(nhops); + +        src_v = find_vertex_by_addr(graph, s_addr); +        if (src_v == NULL) +                goto fail_src_v; + +        list_for_each(p, &src_v->edges) { +                int * tmp_dist; + +                e = list_entry(p, struct edge, next); +                if (dijkstra(graph, e->nb->addr, &nhops, &tmp_dist)) +                        goto fail_src_v; + +                free(nhops); + +                list_for_each(h, &graph->vertices) { +                        v = list_entry(h, struct vertex, next); +                        if (tmp_dist[v->index] + 1 == (*dist)[v->index]) { +                                n = malloc(sizeof(*n)); +                                if (n == NULL) { +                                        free(tmp_dist); +                                        goto fail_src_v; +                                } +                                n->nhop = e->nb->addr; +                                list_add_tail(&n->next, &forwarding[v->index]); +                        } +                } + +                free(tmp_dist); +        } + +        list_head_init(table); +        i = 0; +        list_for_each(p, &graph->vertices) { +                v = list_entry(p, struct vertex, next); +                if (v->addr == s_addr) { +                        ++i; +                        continue; +                } + +                t = malloc(sizeof(*t)); +                if (t == NULL) +                        goto fail_t; + +                t->dst = v->addr; + +                list_head_init(&t->nhops); +                if (&forwarding[i] != forwarding[i].nxt) { +                        t->nhops.nxt = forwarding[i].nxt; +                        t->nhops.prv = forwarding[i].prv; +                        forwarding[i].prv->nxt = &t->nhops; +                        forwarding[i].nxt->prv = &t->nhops; +                } + +                list_add(&t->next, table); +                ++i; +        } + +        free(*dist); +        *dist = NULL; +        free(forwarding); + +        return 0; + + fail_t: +        free_routing_table(table); + fail_src_v: +        free(*dist); + fail_dijkstra: +        free(forwarding); + fail_vertices: +        *dist = NULL; +        return -1; +} + +int graph_routing_table(struct graph *     graph, +                        enum routing_algo  algo, +                        uint64_t           s_addr, +                        struct list_head * table) +{ +        int * s_dist; + +        assert(graph); +        assert(table); + +        pthread_mutex_lock(&graph->lock); + +        switch (algo) { +        case ROUTING_SIMPLE: +                /* LFA uses the s_dist this returns. */ +                if (graph_routing_table_simple(graph, s_addr, table, &s_dist)) +                        goto fail_table; +                break; +        case ROUTING_LFA: +                if (graph_routing_table_lfa(graph, s_addr, table, &s_dist)) +                        goto fail_table; +                break; + +        case ROUTING_ECMP: +                if (graph_routing_table_ecmp(graph, s_addr, table, &s_dist)) +                        goto fail_table; +                break; +        default: +                log_err("Unsupported algorithm."); +                goto fail_table; +        } + +        pthread_mutex_unlock(&graph->lock); + +        free(s_dist); + +        return 0; + + fail_table: +        pthread_mutex_unlock(&graph->lock); +        return -1; +} diff --git a/src/ipcpd/unicast/routing/graph.h b/src/ipcpd/unicast/routing/graph.h new file mode 100644 index 00000000..632cc5a0 --- /dev/null +++ b/src/ipcpd/unicast/routing/graph.h @@ -0,0 +1,69 @@ +/* + * Ouroboros - Copyright (C) 2016 - 2021 + * + * Undirected graph structure + * + *    Dimitri Staessens <dimitri@ouroboros.rocks> + *    Sander Vrijders   <sander@ouroboros.rocks> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., http://www.fsf.org/about/contact/. + */ + +#ifndef OUROBOROS_IPCPD_UNICAST_GRAPH_H +#define OUROBOROS_IPCPD_UNICAST_GRAPH_H + +#include <ouroboros/list.h> +#include <ouroboros/qos.h> + +#include <inttypes.h> + +enum routing_algo { +         ROUTING_SIMPLE = 0, +         ROUTING_LFA, +         ROUTING_ECMP +}; + +struct nhop { +        struct list_head next; +        uint64_t         nhop; +}; + +struct routing_table { +        struct list_head next; +        uint64_t         dst; +        struct list_head nhops; +}; + +struct graph * graph_create(void); + +void           graph_destroy(struct graph * graph); + +int            graph_update_edge(struct graph * graph, +                                 uint64_t       s_addr, +                                 uint64_t       d_addr, +                                 qosspec_t      qs); + +int            graph_del_edge(struct graph * graph, +                              uint64_t       s_addr, +                              uint64_t       d_addr); + +int            graph_routing_table(struct graph *     graph, +                                   enum routing_algo  algo, +                                   uint64_t           s_addr, +                                   struct list_head * table); + +void           graph_free_routing_table(struct graph *     graph, +                                        struct list_head * table); + +#endif /* OUROBOROS_IPCPD_UNICAST_GRAPH_H */ diff --git a/src/ipcpd/unicast/routing/link-state.c b/src/ipcpd/unicast/routing/link-state.c new file mode 100644 index 00000000..7ceb86a1 --- /dev/null +++ b/src/ipcpd/unicast/routing/link-state.c @@ -0,0 +1,1055 @@ +/* + * Ouroboros - Copyright (C) 2016 - 2021 + * + * Link state routing policy + * + *    Dimitri Staessens <dimitri@ouroboros.rocks> + *    Sander Vrijders   <sander@ouroboros.rocks> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., http://www.fsf.org/about/contact/. + */ + +#if defined(__linux__) || defined(__CYGWIN__) +#define _DEFAULT_SOURCE +#else +#define _POSIX_C_SOURCE 200112L +#endif + +#include "config.h" + +#define OUROBOROS_PREFIX "link-state-routing" + +#include <ouroboros/endian.h> +#include <ouroboros/dev.h> +#include <ouroboros/errno.h> +#include <ouroboros/fccntl.h> +#include <ouroboros/fqueue.h> +#include <ouroboros/list.h> +#include <ouroboros/logs.h> +#include <ouroboros/notifier.h> +#include <ouroboros/pthread.h> +#include <ouroboros/rib.h> +#include <ouroboros/utils.h> + +#include "common/comp.h" +#include "common/connmgr.h" +#include "graph.h" +#include "ipcp.h" +#include "link-state.h" +#include "pff.h" + +#include <assert.h> +#include <stdlib.h> +#include <inttypes.h> +#include <string.h> + +#define RECALC_TIME    4 +#define LS_UPDATE_TIME 15 +#define LS_TIMEO       60 +#define LS_ENTRY_SIZE  104 +#define LSDB           "lsdb" + +#ifndef CLOCK_REALTIME_COARSE +#define CLOCK_REALTIME_COARSE CLOCK_REALTIME +#endif + +struct lsa { +        uint64_t d_addr; +        uint64_t s_addr; +        uint64_t seqno; +} __attribute__((packed)); + +struct routing_i { +        struct list_head next; + +        struct pff *     pff; +        pthread_t        calculator; + +        bool             modified; +        pthread_mutex_t  lock; +}; + +/* TODO: link weight support. */ +struct adjacency { +        struct list_head next; + +        uint64_t         dst; +        uint64_t         src; + +        uint64_t         seqno; + +        time_t           stamp; +}; + +enum nb_type { +        NB_DT = 0, +        NB_MGMT +}; + +struct nb { +        struct list_head next; + +        uint64_t         addr; +        int              fd; +        enum nb_type     type; +}; + +struct { +        struct list_head  nbs; +        size_t            nbs_len; +        fset_t *          mgmt_set; + +        struct list_head  db; +        size_t            db_len; + +        pthread_rwlock_t  db_lock; + +        struct graph *    graph; + +        pthread_t         lsupdate; +        pthread_t         lsreader; +        pthread_t         listener; + +        struct list_head  routing_instances; +        pthread_mutex_t   routing_i_lock; + +        enum routing_algo routing_algo; +} ls; + +struct routing_ops link_state_ops = { +        .init              = link_state_init, +        .fini              = link_state_fini, +        .routing_i_create  = link_state_routing_i_create, +        .routing_i_destroy = link_state_routing_i_destroy +}; + +static int str_adj(struct adjacency * adj, +                   char *             buf, +                   size_t             len) +{ +        char        tmbuf[64]; +        char        srcbuf[64]; +        char        dstbuf[64]; +        char        seqnobuf[64]; +        struct tm * tm; + +        assert(adj); + +        if (len < LS_ENTRY_SIZE) +                return -1; + +        tm = localtime(&adj->stamp); +        strftime(tmbuf, sizeof(tmbuf), "%F %T", tm); /* 19 chars */ + +        sprintf(srcbuf, "%" PRIu64, adj->src); +        sprintf(dstbuf, "%" PRIu64, adj->dst); +        sprintf(seqnobuf, "%" PRIu64, adj->seqno); + +        sprintf(buf, "src: %20s\ndst: %20s\nseqno: %18s\nupd: %20s\n", +                srcbuf, dstbuf, seqnobuf, tmbuf); + +        return LS_ENTRY_SIZE; +} + +static struct adjacency * get_adj(const char * path) +{ +        struct list_head * p; +        char               entry[RIB_PATH_LEN + 1]; + +        assert(path); + +        list_for_each(p, &ls.db) { +                struct adjacency * a = list_entry(p, struct adjacency, next); +                sprintf(entry, "%" PRIu64 ".%" PRIu64, a->src, a->dst); +                if (strcmp(entry, path) == 0) +                        return a; +        } + +        return NULL; +} + +static int lsdb_rib_getattr(const char *      path, +                            struct rib_attr * attr) +{ +        struct adjacency * adj; +        struct timespec    now; +        char *             entry; + +        assert(path); +        assert(attr); + +        entry = strstr(path, RIB_SEPARATOR) + 1; +        assert(entry); + +        clock_gettime(CLOCK_REALTIME_COARSE, &now); + +        pthread_rwlock_rdlock(&ls.db_lock); + +        adj = get_adj(entry); +        if (adj != NULL) { +                attr->mtime = adj->stamp; +                attr->size  = LS_ENTRY_SIZE; +        } else { +                attr->mtime = now.tv_sec; +                attr->size  = 0; +        } + +        pthread_rwlock_unlock(&ls.db_lock); + +        return 0; +} + +static int lsdb_rib_read(const char * path, +                         char *       buf, +                         size_t       len) +{ +        struct adjacency * a; +        char *             entry; +        int                size; + +        assert(path); + +        entry = strstr(path, RIB_SEPARATOR) + 1; +        assert(entry); + +        pthread_rwlock_rdlock(&ls.db_lock); + +        if (ls.db_len + ls.nbs_len == 0) +                goto fail; + +        a = get_adj(entry); +        if (a == NULL) +                goto fail; + +        size = str_adj(a, buf, len); +        if (size < 0) +                goto fail; + +        pthread_rwlock_unlock(&ls.db_lock); +        return size; + + fail: +        pthread_rwlock_unlock(&ls.db_lock); +        return -1; +} + +static int lsdb_rib_readdir(char *** buf) +{ +        struct list_head * p; +        char               entry[RIB_PATH_LEN + 1]; +        ssize_t            idx = 0; + +        assert(buf); + +        pthread_rwlock_rdlock(&ls.db_lock); + +        if (ls.db_len + ls.nbs_len == 0) { +                pthread_rwlock_unlock(&ls.db_lock); +                return 0; +        } + +        *buf = malloc(sizeof(**buf) * (ls.db_len + ls.nbs_len)); +        if (*buf == NULL) { +                pthread_rwlock_unlock(&ls.db_lock); +                return -ENOMEM; +        } + +        list_for_each(p, &ls.nbs) { +                struct nb * nb = list_entry(p, struct nb, next); +                char * str = (nb->type == NB_DT ? "dt." : "mgmt."); +                sprintf(entry, "%s%" PRIu64, str, nb->addr); +                (*buf)[idx] = malloc(strlen(entry) + 1); +                if ((*buf)[idx] == NULL) { +                        while (idx-- > 0) +                                free((*buf)[idx]); +                        free(buf); +                        pthread_rwlock_unlock(&ls.db_lock); +                        return -ENOMEM; +                } + +                strcpy((*buf)[idx], entry); + +                idx++; +        } + +        list_for_each(p, &ls.db) { +                struct adjacency * a = list_entry(p, struct adjacency, next); +                sprintf(entry, "%" PRIu64 ".%" PRIu64, a->src, a->dst); +                (*buf)[idx] = malloc(strlen(entry) + 1); +                if ((*buf)[idx] == NULL) { +                        ssize_t j; +                        for (j = 0; j < idx; ++j) +                                free(*buf[j]); +                        free(buf); +                        pthread_rwlock_unlock(&ls.db_lock); +                        return -ENOMEM; +                } + +                strcpy((*buf)[idx], entry); + +                idx++; +        } + +        pthread_rwlock_unlock(&ls.db_lock); + +        return idx; +} + +static struct rib_ops r_ops = { +        .read    = lsdb_rib_read, +        .readdir = lsdb_rib_readdir, +        .getattr = lsdb_rib_getattr +}; + +static int lsdb_add_nb(uint64_t     addr, +                       int          fd, +                       enum nb_type type) +{ +        struct list_head * p; +        struct nb *        nb; + +        pthread_rwlock_wrlock(&ls.db_lock); + +        list_for_each(p, &ls.nbs) { +                struct nb * el = list_entry(p, struct nb, next); +                if (el->addr == addr && el->type == type) { +                        log_dbg("Already know %s neighbor %" PRIu64 ".", +                                type == NB_DT ? "dt" : "mgmt", addr); +                        if (el->fd != fd) { +                                log_warn("Existing neighbor assigned new fd."); +                                el->fd = fd; +                        } +                        pthread_rwlock_unlock(&ls.db_lock); +                        return -EPERM; +                } + +                if (addr > el->addr) +                        break; +        } + +        nb = malloc(sizeof(*nb)); +        if (nb == NULL) { +                pthread_rwlock_unlock(&ls.db_lock); +                return -ENOMEM; +        } + +        nb->addr  = addr; +        nb->fd    = fd; +        nb->type  = type; + +        list_add_tail(&nb->next, p); + +        ++ls.nbs_len; + +        log_dbg("Type %s neighbor %" PRIu64 " added.", +                nb->type == NB_DT ? "dt" : "mgmt", addr); + +        pthread_rwlock_unlock(&ls.db_lock); + +        return 0; +} + +static int lsdb_del_nb(uint64_t addr, +                       int      fd) +{ +        struct list_head * p; +        struct list_head * h; + +        pthread_rwlock_wrlock(&ls.db_lock); + +        list_for_each_safe(p, h, &ls.nbs) { +                struct nb * nb = list_entry(p, struct nb, next); +                if (nb->addr == addr && nb->fd == fd) { +                        list_del(&nb->next); +                        --ls.nbs_len; +                        pthread_rwlock_unlock(&ls.db_lock); +                        log_dbg("Type %s neighbor %" PRIu64 " deleted.", +                                nb->type == NB_DT ? "dt" : "mgmt", addr); +                        free(nb); +                        return 0; +                } +        } + +        pthread_rwlock_unlock(&ls.db_lock); + +        return -EPERM; +} + +static int nbr_to_fd(uint64_t addr) +{ +        struct list_head * p; +        int                fd; + +        pthread_rwlock_rdlock(&ls.db_lock); + +        list_for_each(p, &ls.nbs) { +                struct nb * nb = list_entry(p, struct nb, next); +                if (nb->addr == addr && nb->type == NB_DT) { +                        fd = nb->fd; +                        pthread_rwlock_unlock(&ls.db_lock); +                        return fd; +                } +        } + +        pthread_rwlock_unlock(&ls.db_lock); + +        return -1; +} + +static void calculate_pff(struct routing_i * instance) +{ +        int                fd; +        struct list_head   table; +        struct list_head * p; +        struct list_head * q; +        int                fds[PROG_MAX_FLOWS]; + +        assert(instance); + +        if (graph_routing_table(ls.graph, ls.routing_algo, +                                ipcpi.dt_addr, &table)) +                return; + +        pff_lock(instance->pff); + +        pff_flush(instance->pff); + +        /* Calculate forwarding table from routing table. */ +        list_for_each(p, &table) { +                int                    i = 0; +                struct routing_table * t = +                        list_entry(p, struct routing_table, next); + +                list_for_each(q, &t->nhops) { +                        struct nhop * n = list_entry(q, struct nhop, next); + +                        fd = nbr_to_fd(n->nhop); +                        if (fd == -1) +                                continue; + +                        fds[i++] = fd; +                } +                if (i > 0) +                        pff_add(instance->pff, t->dst, fds, i); +        } + +        pff_unlock(instance->pff); + +        graph_free_routing_table(ls.graph, &table); +} + +static void set_pff_modified(bool calc) +{ +        struct list_head * p; + +        pthread_mutex_lock(&ls.routing_i_lock); +        list_for_each(p, &ls.routing_instances) { +                struct routing_i * inst = +                        list_entry(p, struct routing_i, next); +                pthread_mutex_lock(&inst->lock); +                inst->modified = true; +                pthread_mutex_unlock(&inst->lock); +                if (calc) +                        calculate_pff(inst); +        } +        pthread_mutex_unlock(&ls.routing_i_lock); +} + +static int lsdb_add_link(uint64_t    src, +                         uint64_t    dst, +                         uint64_t    seqno, +                         qosspec_t * qs) +{ +        struct list_head * p; +        struct adjacency * adj; +        struct timespec    now; +        int                ret = -1; + +        assert(qs); + +        clock_gettime(CLOCK_REALTIME_COARSE, &now); + +        pthread_rwlock_wrlock(&ls.db_lock); + +        list_for_each(p, &ls.db) { +                struct adjacency * a = list_entry(p, struct adjacency, next); +                if (a->dst == dst && a->src == src) { +                        if (a->seqno < seqno) { +                                a->stamp = now.tv_sec; +                                a->seqno = seqno; +                                ret = 0; +                        } +                        pthread_rwlock_unlock(&ls.db_lock); +                        return ret; +                } + +                if (a->dst > dst || (a->dst == dst && a->src > src)) +                        break; +        } + +        adj = malloc(sizeof(*adj)); +        if (adj == NULL) { +                pthread_rwlock_unlock(&ls.db_lock); +                return -ENOMEM; +        } + +        adj->dst   = dst; +        adj->src   = src; +        adj->seqno = seqno; +        adj->stamp = now.tv_sec; + +        list_add_tail(&adj->next, p); + +        ls.db_len++; + +        if (graph_update_edge(ls.graph, src, dst, *qs)) +                log_warn("Failed to add edge to graph."); + +        pthread_rwlock_unlock(&ls.db_lock); + +        set_pff_modified(true); + +        return 0; +} + +static int lsdb_del_link(uint64_t src, +                         uint64_t dst) +{ +        struct list_head * p; +        struct list_head * h; + +        pthread_rwlock_wrlock(&ls.db_lock); + +        list_for_each_safe(p, h, &ls.db) { +                struct adjacency * a = list_entry(p, struct adjacency, next); +                if (a->dst == dst && a->src == src) { +                        list_del(&a->next); +                        if (graph_del_edge(ls.graph, src, dst)) +                                log_warn("Failed to delete edge from graph."); + +                        ls.db_len--; + +                        pthread_rwlock_unlock(&ls.db_lock); +                        set_pff_modified(false); +                        free(a); +                        return 0; +                } +        } + +        pthread_rwlock_unlock(&ls.db_lock); + +        return -EPERM; +} + +static void * periodic_recalc_pff(void * o) +{ +        bool               modified; +        struct routing_i * inst; + +        assert(o); + +        inst = (struct routing_i *) o; + +        while (true) { +                pthread_mutex_lock(&inst->lock); +                modified = inst->modified; +                inst->modified = false; +                pthread_mutex_unlock(&inst->lock); + +                if (modified) +                        calculate_pff(inst); + +                sleep(RECALC_TIME); +        } + +        return (void *) 0; +} + +static void send_lsm(uint64_t src, +                     uint64_t dst, +                     uint64_t seqno) +{ +        struct lsa         lsm; +        struct list_head * p; + +        lsm.d_addr = hton64(dst); +        lsm.s_addr = hton64(src); +        lsm.seqno  = hton64(seqno); + +        list_for_each(p, &ls.nbs) { +                struct nb * nb = list_entry(p, struct nb, next); +                if (nb->type == NB_MGMT) +                        flow_write(nb->fd, &lsm, sizeof(lsm)); +        } +} + +/* replicate the lsdb to a mgmt neighbor */ +static void lsdb_replicate(int fd) +{ +        struct list_head * p; +        struct list_head * h; +        struct list_head   copy; + +        list_head_init(©); + +        /* Lock the lsdb, copy the lsms and send outside of lock. */ +        pthread_rwlock_rdlock(&ls.db_lock); + +        list_for_each(p, &ls.db) { +                struct adjacency * adj; +                struct adjacency * cpy; +                adj = list_entry(p, struct adjacency, next); +                cpy = malloc(sizeof(*cpy)); +                if (cpy == NULL) { +                        log_warn("Failed to replicate full lsdb."); +                        break; +                } + +                cpy->dst   = adj->dst; +                cpy->src   = adj->src; +                cpy->seqno = adj->seqno; + +                list_add_tail(&cpy->next, ©); +        } + +        pthread_rwlock_unlock(&ls.db_lock); + +        list_for_each_safe(p, h, ©) { +                struct lsa         lsm; +                struct adjacency * adj; +                adj = list_entry(p, struct adjacency, next); +                lsm.d_addr = hton64(adj->dst); +                lsm.s_addr = hton64(adj->src); +                lsm.seqno  = hton64(adj->seqno); +                list_del(&adj->next); +                free(adj); +                flow_write(fd, &lsm, sizeof(lsm)); +        } +} + +static void * lsupdate(void * o) +{ +        struct list_head * p; +        struct list_head * h; +        struct timespec    now; + +        (void) o; + +        while (true) { +                clock_gettime(CLOCK_REALTIME_COARSE, &now); + +                pthread_rwlock_wrlock(&ls.db_lock); + +                pthread_cleanup_push(__cleanup_rwlock_unlock, &ls.db_lock); + +                list_for_each_safe(p, h, &ls.db) { +                        struct adjacency * adj; +                        adj = list_entry(p, struct adjacency, next); +                        if (now.tv_sec - adj->stamp > LS_TIMEO) { +                                list_del(&adj->next); +                                log_dbg("%" PRIu64 " - %" PRIu64" timed out.", +                                        adj->src, adj->dst); +                                if (graph_del_edge(ls.graph, adj->src, +                                                   adj->dst)) +                                        log_err("Failed to del edge."); +                                free(adj); +                                continue; +                        } + +                        if (adj->src == ipcpi.dt_addr) { +                                adj->seqno++; +                                send_lsm(adj->src, adj->dst, adj->seqno); +                                adj->stamp = now.tv_sec; +                        } +                } + +                pthread_cleanup_pop(true); + +                sleep(LS_UPDATE_TIME); +        } + +        return (void *) 0; +} + +static void * ls_conn_handle(void * o) +{ +        struct conn conn; + +        (void) o; + +        while (true) { +                if (connmgr_wait(COMPID_MGMT, &conn)) { +                        log_err("Failed to get next MGMT connection."); +                        continue; +                } + +                /* NOTE: connection acceptance policy could be here. */ + +                notifier_event(NOTIFY_MGMT_CONN_ADD, &conn); +        } + +        return 0; +} + + +static void forward_lsm(uint8_t * buf, +                        size_t    len, +                        int       in_fd) +{ +        struct list_head * p; + +        pthread_rwlock_rdlock(&ls.db_lock); + +        pthread_cleanup_push(__cleanup_rwlock_unlock, &ls.db_lock); + +        list_for_each(p, &ls.nbs) { +                struct nb * nb = list_entry(p, struct nb, next); +                if (nb->type == NB_MGMT && nb->fd != in_fd) +                        flow_write(nb->fd, buf, len); +        } + +        pthread_cleanup_pop(true); +} + +static void cleanup_fqueue(void * fq) +{ +        fqueue_destroy((fqueue_t *) fq); +} + +static void * lsreader(void * o) +{ +        fqueue_t *   fq; +        int          ret; +        uint8_t      buf[sizeof(struct lsa)]; +        int          fd; +        qosspec_t    qs; +        struct lsa * msg; +        size_t       len; + +        (void) o; + +        memset(&qs, 0, sizeof(qs)); + +        fq = fqueue_create(); +        if (fq == NULL) +                return (void *) -1; + +        pthread_cleanup_push(cleanup_fqueue, fq); + +        while (true) { +                ret = fevent(ls.mgmt_set, fq, NULL); +                if (ret < 0) { +                        log_warn("Event error: %d.", ret); +                        continue; +                } + +                while ((fd = fqueue_next(fq)) >= 0) { +                        if (fqueue_type(fq) != FLOW_PKT) +                                continue; + +                        len = flow_read(fd, buf, sizeof(*msg)); +                        if (len <= 0 || len != sizeof(*msg)) +                                continue; + +                        msg = (struct lsa *) buf; + +                        if (lsdb_add_link(ntoh64(msg->s_addr), +                                          ntoh64(msg->d_addr), +                                          ntoh64(msg->seqno), +                                          &qs)) +                                continue; + +                        forward_lsm(buf, len, fd); +                } +        } + +        pthread_cleanup_pop(true); + +        return (void *) 0; +} + +static void flow_event(int  fd, +                       bool up) +{ + +        struct list_head * p; + +        log_dbg("Notifying routing instances of flow event."); + +        pthread_mutex_lock(&ls.routing_i_lock); + +        list_for_each(p, &ls.routing_instances) { +                struct routing_i * ri = list_entry(p, struct routing_i, next); +                pff_flow_state_change(ri->pff, fd, up); +        } + +        pthread_mutex_unlock(&ls.routing_i_lock); +} + +static void handle_event(void *       self, +                         int          event, +                         const void * o) +{ +        /* FIXME: Apply correct QoS on graph */ +        struct conn *      c; +        qosspec_t          qs; +        int                flags; + +        (void) self; + +        assert(o); + +        c = (struct conn *) o; + +        memset(&qs, 0, sizeof(qs)); + +        switch (event) { +        case NOTIFY_DT_CONN_ADD: +                pthread_rwlock_rdlock(&ls.db_lock); + +                pthread_cleanup_push(__cleanup_rwlock_unlock, &ls.db_lock); + +                send_lsm(ipcpi.dt_addr, c->conn_info.addr, 0); +                pthread_cleanup_pop(true); + +                if (lsdb_add_nb(c->conn_info.addr, c->flow_info.fd, NB_DT)) +                        log_dbg("Failed to add neighbor to LSDB."); + +                if (lsdb_add_link(ipcpi.dt_addr, c->conn_info.addr, 0, &qs)) +                        log_dbg("Failed to add new adjacency to LSDB."); +                break; +        case NOTIFY_DT_CONN_DEL: +                flow_event(c->flow_info.fd, false); + +                if (lsdb_del_nb(c->conn_info.addr, c->flow_info.fd)) +                        log_dbg("Failed to delete neighbor from LSDB."); + +                if (lsdb_del_link(ipcpi.dt_addr, c->conn_info.addr)) +                        log_dbg("Local link was not in LSDB."); +                break; +        case NOTIFY_DT_CONN_QOS: +                log_dbg("QoS changes currently unsupported."); +                break; +        case NOTIFY_DT_CONN_UP: +                flow_event(c->flow_info.fd, true); +                break; +        case NOTIFY_DT_CONN_DOWN: +                flow_event(c->flow_info.fd, false); +                break; +        case NOTIFY_MGMT_CONN_ADD: +                fccntl(c->flow_info.fd, FLOWGFLAGS, &flags); +                fccntl(c->flow_info.fd, FLOWSFLAGS, flags | FLOWFRNOPART); +                fset_add(ls.mgmt_set, c->flow_info.fd); +                if (lsdb_add_nb(c->conn_info.addr, c->flow_info.fd, NB_MGMT)) +                        log_warn("Failed to add mgmt neighbor to LSDB."); +                /* replicate the entire lsdb */ +                lsdb_replicate(c->flow_info.fd); +                break; +        case NOTIFY_MGMT_CONN_DEL: +                fset_del(ls.mgmt_set, c->flow_info.fd); +                if (lsdb_del_nb(c->conn_info.addr, c->flow_info.fd)) +                        log_warn("Failed to delete mgmt neighbor from LSDB."); +                break; +        default: +                break; +        } +} + +struct routing_i * link_state_routing_i_create(struct pff * pff) +{ +        struct routing_i * tmp; + +        assert(pff); + +        tmp = malloc(sizeof(*tmp)); +        if (tmp == NULL) +                goto fail_tmp; + +        tmp->pff      = pff; +        tmp->modified = false; + +        if (pthread_mutex_init(&tmp->lock, NULL)) +                goto fail_instance_lock_init; + +        if (pthread_create(&tmp->calculator, NULL, +                           periodic_recalc_pff, tmp)) +                goto fail_pthread_create_lsupdate; + +        pthread_mutex_lock(&ls.routing_i_lock); + +        list_add(&tmp->next, &ls.routing_instances); + +        pthread_mutex_unlock(&ls.routing_i_lock); + +        return tmp; + + fail_pthread_create_lsupdate: +        pthread_mutex_destroy(&tmp->lock); + fail_instance_lock_init: +        free(tmp); + fail_tmp: +        return NULL; +} + +void link_state_routing_i_destroy(struct routing_i * instance) +{ +        assert(instance); + +        pthread_mutex_lock(&ls.routing_i_lock); + +        list_del(&instance->next); + +        pthread_mutex_unlock(&ls.routing_i_lock); + +        pthread_cancel(instance->calculator); + +        pthread_join(instance->calculator, NULL); + +        pthread_mutex_destroy(&instance->lock); + +        free(instance); +} + +int link_state_init(enum pol_routing pr) +{ +        struct conn_info info; + +        memset(&info, 0, sizeof(info)); + +        strcpy(info.comp_name, LS_COMP); +        strcpy(info.protocol, LS_PROTO); +        info.pref_version = 1; +        info.pref_syntax  = PROTO_GPB; +        info.addr         = ipcpi.dt_addr; + +        switch (pr) { +        case ROUTING_LINK_STATE: +                log_dbg("Using link state routing policy."); +                ls.routing_algo = ROUTING_SIMPLE; +                break; +        case ROUTING_LINK_STATE_LFA: +                log_dbg("Using Loop-Free Alternates policy."); +                ls.routing_algo = ROUTING_LFA; +                break; +        case ROUTING_LINK_STATE_ECMP: +                log_dbg("Using Equal-Cost Multipath policy."); +                ls.routing_algo = ROUTING_ECMP; +                break; +        default: +                goto fail_graph; +        } + +        ls.graph = graph_create(); +        if (ls.graph == NULL) +                goto fail_graph; + +        if (notifier_reg(handle_event, NULL)) +                goto fail_notifier_reg; + +        if (pthread_rwlock_init(&ls.db_lock, NULL)) +                goto fail_db_lock_init; + +        if (pthread_mutex_init(&ls.routing_i_lock, NULL)) +                goto fail_routing_i_lock_init; + +        if (connmgr_comp_init(COMPID_MGMT, &info)) +                goto fail_connmgr_comp_init; + +        ls.mgmt_set = fset_create(); +        if (ls.mgmt_set == NULL) +                goto fail_fset_create; + +        list_head_init(&ls.db); +        list_head_init(&ls.nbs); +        list_head_init(&ls.routing_instances); + +        if (pthread_create(&ls.lsupdate, NULL, lsupdate, NULL)) +                goto fail_pthread_create_lsupdate; + +        if (pthread_create(&ls.lsreader, NULL, lsreader, NULL)) +                goto fail_pthread_create_lsreader; + +        if (pthread_create(&ls.listener, NULL, ls_conn_handle, NULL)) +                goto fail_pthread_create_listener; + +        if (rib_reg(LSDB, &r_ops)) +                goto fail_rib_reg; + +        ls.db_len  = 0; +        ls.nbs_len = 0; + +        return 0; + + fail_rib_reg: +        pthread_cancel(ls.listener); +        pthread_join(ls.listener, NULL); + fail_pthread_create_listener: +        pthread_cancel(ls.lsreader); +        pthread_join(ls.lsreader, NULL); + fail_pthread_create_lsreader: +        pthread_cancel(ls.lsupdate); +        pthread_join(ls.lsupdate, NULL); + fail_pthread_create_lsupdate: +        fset_destroy(ls.mgmt_set); + fail_fset_create: +        connmgr_comp_fini(COMPID_MGMT); + fail_connmgr_comp_init: +        pthread_mutex_destroy(&ls.routing_i_lock); + fail_routing_i_lock_init: +        pthread_rwlock_destroy(&ls.db_lock); + fail_db_lock_init: +        notifier_unreg(handle_event); + fail_notifier_reg: +        graph_destroy(ls.graph); + fail_graph: +        return -1; +} + +void link_state_fini(void) +{ +        struct list_head * p; +        struct list_head * h; + +        rib_unreg(LSDB); + +        notifier_unreg(handle_event); + +        pthread_cancel(ls.listener); +        pthread_cancel(ls.lsreader); +        pthread_cancel(ls.lsupdate); + +        pthread_join(ls.listener, NULL); +        pthread_join(ls.lsreader, NULL); +        pthread_join(ls.lsupdate, NULL); + +        fset_destroy(ls.mgmt_set); + +        connmgr_comp_fini(COMPID_MGMT); + +        graph_destroy(ls.graph); + +        pthread_rwlock_wrlock(&ls.db_lock); + +        list_for_each_safe(p, h, &ls.db) { +                struct adjacency * a = list_entry(p, struct adjacency, next); +                list_del(&a->next); +                free(a); +        } + +        pthread_rwlock_unlock(&ls.db_lock); + +        pthread_rwlock_destroy(&ls.db_lock); + +        pthread_mutex_destroy(&ls.routing_i_lock); +} diff --git a/src/ipcpd/unicast/routing/link-state.h b/src/ipcpd/unicast/routing/link-state.h new file mode 100644 index 00000000..c6e573ff --- /dev/null +++ b/src/ipcpd/unicast/routing/link-state.h @@ -0,0 +1,41 @@ +/* + * Ouroboros - Copyright (C) 2016 - 2021 + * + * Link state routing policy + * + *    Dimitri Staessens <dimitri@ouroboros.rocks> + *    Sander Vrijders   <sander@ouroboros.rocks> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., http://www.fsf.org/about/contact/. + */ + +#ifndef OUROBOROS_IPCPD_UNICAST_POL_LINK_STATE_H +#define OUROBOROS_IPCPD_UNICAST_POL_LINK_STATE_H + +#define LS_COMP  "Management" +#define LS_PROTO "LSP" + +#include "ops.h" + +int                link_state_init(enum pol_routing pr); + +void               link_state_fini(void); + +struct routing_i * link_state_routing_i_create(struct pff * pff); + +void               link_state_routing_i_destroy(struct routing_i * instance); + +extern struct routing_ops link_state_ops; + +#endif /* OUROBOROS_IPCPD_UNICAST_POL_LINK_STATE_H */ diff --git a/src/ipcpd/unicast/routing/ops.h b/src/ipcpd/unicast/routing/ops.h new file mode 100644 index 00000000..1522ccd9 --- /dev/null +++ b/src/ipcpd/unicast/routing/ops.h @@ -0,0 +1,38 @@ +/* + * Ouroboros - Copyright (C) 2016 - 2021 + * + * Routing policy ops + * + *    Dimitri Staessens <dimitri@ouroboros.rocks> + *    Sander Vrijders   <sander@ouroboros.rocks> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., http://www.fsf.org/about/contact/. + */ + +#ifndef OUROBOROS_IPCPD_UNICAST_ROUTING_OPS_H +#define OUROBOROS_IPCPD_UNICAST_ROUTING_OPS_H + +#include "pff.h" + +struct routing_ops { +        int                (* init)(enum pol_routing pr); + +        void               (* fini)(void); + +        struct routing_i * (* routing_i_create)(struct pff * pff); + +        void               (* routing_i_destroy)(struct routing_i * instance); +}; + +#endif /* OUROBOROS_IPCPD_UNICAST_ROUTING_OPS_H */ diff --git a/src/ipcpd/unicast/routing/tests/CMakeLists.txt b/src/ipcpd/unicast/routing/tests/CMakeLists.txt new file mode 100644 index 00000000..d0652533 --- /dev/null +++ b/src/ipcpd/unicast/routing/tests/CMakeLists.txt @@ -0,0 +1,34 @@ +get_filename_component(CURRENT_SOURCE_PARENT_DIR +  ${CMAKE_CURRENT_SOURCE_DIR} DIRECTORY) +get_filename_component(CURRENT_BINARY_PARENT_DIR +  ${CMAKE_CURRENT_BINARY_DIR} DIRECTORY) + +include_directories(${CMAKE_CURRENT_SOURCE_DIR}) +include_directories(${CMAKE_CURRENT_BINARY_DIR}) + +include_directories(${CURRENT_SOURCE_PARENT_DIR}) +include_directories(${CURRENT_BINARY_PARENT_DIR}) + +include_directories(${CMAKE_SOURCE_DIR}/include) +include_directories(${CMAKE_BINARY_DIR}/include) + +get_filename_component(PARENT_PATH ${CMAKE_CURRENT_SOURCE_DIR} DIRECTORY) +get_filename_component(PARENT_DIR ${PARENT_PATH} NAME) + +create_test_sourcelist(${PARENT_DIR}_tests test_suite.c +  # Add new tests here +  graph_test.c +  ) + +add_executable(${PARENT_DIR}_test EXCLUDE_FROM_ALL ${${PARENT_DIR}_tests}) +target_link_libraries(${PARENT_DIR}_test ouroboros-common) + +add_dependencies(check ${PARENT_DIR}_test) + +set(tests_to_run ${${PARENT_DIR}_tests}) +remove(tests_to_run test_suite.c) + +foreach (test ${tests_to_run}) +  get_filename_component(test_name ${test} NAME_WE) +  add_test(${test_name} ${C_TEST_PATH}/${PARENT_DIR}_test ${test_name}) +endforeach (test) diff --git a/src/ipcpd/unicast/routing/tests/graph_test.c b/src/ipcpd/unicast/routing/tests/graph_test.c new file mode 100644 index 00000000..217c7eab --- /dev/null +++ b/src/ipcpd/unicast/routing/tests/graph_test.c @@ -0,0 +1,351 @@ +/* + * Ouroboros - Copyright (C) 2016 - 2021 + * + * Test of the graph structure + * + *    Dimitri Staessens <dimitri@ouroboros.rocks> + *    Sander Vrijders   <sander@ouroboros.rocks> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., http://www.fsf.org/about/contact/. + */ + +#define _POSIX_C_SOURCE 200112L + +#include <ouroboros/utils.h> + +#include <stdio.h> +#include <stdlib.h> +#include <string.h> + +#include "graph.c" + +struct graph *   graph; +struct list_head table; +qosspec_t        qs; + +int graph_test_entries(int entries) +{ +        struct list_head * p; +        int                i = 0; + +        if (graph_routing_table(graph, ROUTING_SIMPLE, 1, &table)) { +                printf("Failed to get routing table.\n"); +                return -1; +        } + +        list_for_each(p, &table) +                i++; + +        if (i != entries) { +                printf("Wrong number of entries.\n"); +                graph_free_routing_table(graph, &table); +                return -1; +        } + +        graph_free_routing_table(graph, &table); + +        return 0; +} + +int graph_test_double_link(void) +{ +        struct list_head * p; +        int                i = 0; + +        if (graph_routing_table(graph, ROUTING_SIMPLE, 1, &table)) { +                printf("Failed to get routing table.\n"); +                return -1; +        } + +        list_for_each(p, &table) +                i++; + +        if (i != 2) { +                printf("Wrong number of entries.\n"); +                graph_free_routing_table(graph, &table); +                return -1; +        } + +        list_for_each(p, &table) { +                struct routing_table * t = +                        list_entry(p, struct routing_table, next); +                struct nhop *          n = +                        list_first_entry(&t->nhops, struct nhop, next); + +                if ((t->dst != 2 && n->nhop != 2) || +                    (t->dst != 3 && n->nhop != 2)) { +                        printf("Wrong routing entry.\n"); +                        graph_free_routing_table(graph, &table); +                        return -1; +                } +        } + +        graph_free_routing_table(graph, &table); + +        return 0; +} + +int graph_test_single_link(void) +{ +        struct list_head * p; +        int                i = 0; + +        if (graph_routing_table(graph, ROUTING_SIMPLE, 1, &table)) { +                printf("Failed to get routing table.\n"); +                return -1; +        } + +        list_for_each(p, &table) +                i++; + +        if (i != 1) { +                printf("Wrong number of entries.\n"); +                graph_free_routing_table(graph, &table); +                return -1; +        } + +        list_for_each(p, &table) { +                struct routing_table * t = +                        list_entry(p, struct routing_table, next); +                struct nhop *          n = +                        list_first_entry(&t->nhops, struct nhop, next); + +                if (t->dst != 2 && n->nhop != 2) { +                        printf("Wrong routing entry.\n"); +                        graph_free_routing_table(graph, &table); +                        return -1; +                } +        } + +        graph_free_routing_table(graph, &table); + +        return 0; +} + +static int distance_check(int               dst, +                          int               nhop, +                          enum routing_algo algo) +{ +        (void) algo; + +        /* Same distances for all algorithms at the moment. */ +        if (dst == 2 && nhop != 2) { +                printf("Wrong entry: 2.\n"); +                return -1; +        } + +        if (dst == 3 && nhop != 3) { +                printf("Wrong entry: 3.\n"); +                return -1; +        } + +        if (dst == 4 && nhop != 3) { +                printf("Wrong entry: 4.\n"); +                return -1; +        } + +        if (dst == 5 && nhop != 3) { +                printf("Wrong entry: 5.\n"); +                return -1; +        } + +        if (dst == 6 && nhop != 2) { +                printf("Wrong entry: 6.\n"); +                return -1; +        } + +        if (dst == 7 && nhop != 3) { +                printf("Wrong entry: 7.\n"); +                return -1; +        } + +        return 0; +} + +int graph_test(int     argc, +               char ** argv) +{ +        int                nhop; +        int                dst; +        struct list_head * p; + +        (void) argc; +        (void) argv; + +        memset(&qs, 0, sizeof(qs)); + +        graph = graph_create(); +        if (graph == NULL) { +                printf("Failed to create graph.\n"); +                return -1; +        } + +        graph_destroy(graph); + +        graph = graph_create(); +        if (graph == NULL) { +                printf("Failed to create graph.\n"); +                return -1; +        } + +        if (graph_update_edge(graph, 1, 2, qs)) { +                printf("Failed to add edge.\n"); +                graph_destroy(graph); +                return -1; +        } + +        if (graph_update_edge(graph, 2, 1, qs)) { +                printf("Failed to add edge.\n"); +                graph_destroy(graph); +                return -1; +        } + +        if (graph_test_single_link()) { +                graph_destroy(graph); +                return -1; +        } + +        if (graph_update_edge(graph, 2, 3, qs)) { +                printf("Failed to add edge.\n"); +                graph_destroy(graph); +                return -1; +        } + +        if (graph_update_edge(graph, 3, 2, qs)) { +                printf("Failed to add edge.\n"); +                graph_destroy(graph); +                return -1; +        } + +        if (graph_test_double_link()) { +                graph_destroy(graph); +                return -1; +        } + +        if (graph_del_edge(graph, 2, 3)) { +                printf("Failed to delete edge.\n"); +                goto fail_graph; +        } + +        if (graph_del_edge(graph, 3, 2)) { +                printf("Failed to delete edge.\n"); +                goto fail_graph; +        } + +        if (graph_test_single_link()) +                goto fail_graph; + +        graph_update_edge(graph, 2, 3, qs); +        graph_update_edge(graph, 3, 2, qs); +        graph_update_edge(graph, 1, 3, qs); +        graph_update_edge(graph, 3, 1, qs); + +        if (graph_test_entries(2)) +                goto fail_graph; + +        graph_update_edge(graph, 3, 4, qs); +        graph_update_edge(graph, 4, 3, qs); +        graph_update_edge(graph, 4, 5, qs); +        graph_update_edge(graph, 5, 4, qs); + +        if (graph_test_entries(4)) +                goto fail_graph; + +        graph_update_edge(graph, 2, 6, qs); +        graph_update_edge(graph, 6, 2, qs); +        graph_update_edge(graph, 6, 7, qs); +        graph_update_edge(graph, 7, 6, qs); +        graph_update_edge(graph, 3, 7, qs); +        graph_update_edge(graph, 7, 3, qs); + +        if (graph_test_entries(6)) +                goto fail_graph; + + +        if (graph_routing_table(graph, ROUTING_SIMPLE, 1, &table)) { +                printf("Failed to get routing table.\n"); +                goto fail_graph; +        } + +        list_for_each(p, &table) { +                struct routing_table * t = +                        list_entry(p, struct routing_table, next); +                struct nhop *          n = +                        list_first_entry(&t->nhops, struct nhop, next); + +                dst = t->dst; +                nhop = n->nhop; + +                if (distance_check(dst, nhop, ROUTING_SIMPLE)) { +                        printf("Simple distance check failed.\n"); +                        goto fail_routing; +                } +        } + +        graph_free_routing_table(graph, &table); + +        if (graph_routing_table(graph, ROUTING_LFA, 1, &table)) { +                printf("Failed to get routing table for LFA.\n"); +                goto fail_graph; +        } + +        list_for_each(p, &table) { +                struct routing_table * t = +                        list_entry(p, struct routing_table, next); +                struct nhop *          n = +                        list_first_entry(&t->nhops, struct nhop, next); + +                dst = t->dst; +                nhop = n->nhop; + +                if (distance_check(dst, nhop, ROUTING_LFA)) { +                        printf("LFA distance check failed.\n"); +                        goto fail_routing; +                } +        } + +        graph_free_routing_table(graph, &table); + +        if (graph_routing_table(graph, ROUTING_ECMP, 1, &table)) { +                printf("Failed to get routing table for ECMP.\n"); +                goto fail_graph; +        } + +        list_for_each(p, &table) { +                struct routing_table * t = +                        list_entry(p, struct routing_table, next); +                struct nhop *          n = +                        list_first_entry(&t->nhops, struct nhop, next); + +                dst = t->dst; +                nhop = n->nhop; + +                if (distance_check(dst, nhop, ROUTING_LFA)) { +                        printf("LFA distance check failed.\n"); +                        goto fail_routing; +                } +        } + +        graph_free_routing_table(graph, &table); + +        graph_destroy(graph); + +        return 0; + + fail_routing: +        graph_free_routing_table(graph, &table); + fail_graph: +        graph_destroy(graph); +        return -1; +} | 
