diff options
Diffstat (limited to 'src/ipcpd/unicast/routing/link-state.c')
-rw-r--r-- | src/ipcpd/unicast/routing/link-state.c | 540 |
1 files changed, 315 insertions, 225 deletions
diff --git a/src/ipcpd/unicast/routing/link-state.c b/src/ipcpd/unicast/routing/link-state.c index 57c0c7cb..e5edf539 100644 --- a/src/ipcpd/unicast/routing/link-state.c +++ b/src/ipcpd/unicast/routing/link-state.c @@ -42,6 +42,7 @@ #include <ouroboros/rib.h> #include <ouroboros/utils.h> +#include "addr-auth.h" #include "common/comp.h" #include "common/connmgr.h" #include "graph.h" @@ -54,9 +55,6 @@ #include <inttypes.h> #include <string.h> -#define RECALC_TIME 4 -#define LS_UPDATE_TIME 15 -#define LS_TIMEO 60 #define LS_ENTRY_SIZE 104 #define LSDB "lsdb" @@ -64,6 +62,12 @@ #define CLOCK_REALTIME_COARSE CLOCK_REALTIME #endif +#define LINK_FMT ADDR_FMT32 "--" ADDR_FMT32 +#define LINK_VAL(src, dst) ADDR_VAL32(&src), ADDR_VAL32(&dst) + +#define LSU_FMT "LSU ["ADDR_FMT32 " -- " ADDR_FMT32 " seq: %09" PRIu64 "]" +#define LSU_VAL(src, dst, seqno) ADDR_VAL32(&src), ADDR_VAL32(&dst), seqno + struct lsa { uint64_t d_addr; uint64_t s_addr; @@ -106,30 +110,45 @@ struct nb { }; struct { - struct list_head nbs; - size_t nbs_len; + uint64_t addr; + + enum routing_algo routing_algo; + + struct ls_config conf; + fset_t * mgmt_set; - struct list_head db; - size_t db_len; + struct graph * graph; + + struct { + struct { + struct list_head list; + size_t len; + } nbs; + + struct { + struct list_head list; + size_t len; + } db; - pthread_rwlock_t db_lock; + pthread_rwlock_t lock; + }; - struct graph * graph; + struct { + struct list_head list; + pthread_mutex_t mtx; + } instances; pthread_t lsupdate; pthread_t lsreader; pthread_t listener; - - struct list_head routing_instances; - pthread_mutex_t routing_i_lock; - - enum routing_algo routing_algo; } ls; struct routing_ops link_state_ops = { - .init = link_state_init, + .init = (int (*)(void *, enum pol_pff *)) link_state_init, .fini = link_state_fini, + .start = link_state_start, + .stop = link_state_stop, .routing_i_create = link_state_routing_i_create, .routing_i_destroy = link_state_routing_i_destroy }; @@ -138,7 +157,7 @@ static int str_adj(struct adjacency * adj, char * buf, size_t len) { - char tmbuf[64]; + char tmstr[RIB_TM_STRLEN]; char srcbuf[64]; char dstbuf[64]; char seqnobuf[64]; @@ -149,15 +168,16 @@ static int str_adj(struct adjacency * adj, if (len < LS_ENTRY_SIZE) return -1; - tm = localtime(&adj->stamp); - strftime(tmbuf, sizeof(tmbuf), "%F %T", tm); /* 19 chars */ + tm = gmtime(&adj->stamp); + strftime(tmstr, sizeof(tmstr), RIB_TM_FORMAT, tm); - sprintf(srcbuf, "%" PRIu64, adj->src); - sprintf(dstbuf, "%" PRIu64, adj->dst); + sprintf(srcbuf, ADDR_FMT32, ADDR_VAL32(&adj->src)); + sprintf(dstbuf, ADDR_FMT32, ADDR_VAL32(&adj->dst)); sprintf(seqnobuf, "%" PRIu64, adj->seqno); - sprintf(buf, "src: %20s\ndst: %20s\nseqno: %18s\nupd: %20s\n", - srcbuf, dstbuf, seqnobuf, tmbuf); + sprintf(buf, "src: %20s\ndst: %20s\nseqno: %18s\n" + "upd: %s\n", + srcbuf, dstbuf, seqnobuf, tmstr); return LS_ENTRY_SIZE; } @@ -169,9 +189,9 @@ static struct adjacency * get_adj(const char * path) assert(path); - list_for_each(p, &ls.db) { + list_for_each(p, &ls.db.list) { struct adjacency * a = list_entry(p, struct adjacency, next); - sprintf(entry, "%" PRIu64 ".%" PRIu64, a->src, a->dst); + sprintf(entry, LINK_FMT, LINK_VAL(a->src, a->dst)); if (strcmp(entry, path) == 0) return a; } @@ -194,7 +214,7 @@ static int lsdb_rib_getattr(const char * path, clock_gettime(CLOCK_REALTIME_COARSE, &now); - pthread_rwlock_rdlock(&ls.db_lock); + pthread_rwlock_rdlock(&ls.lock); adj = get_adj(entry); if (adj != NULL) { @@ -205,7 +225,7 @@ static int lsdb_rib_getattr(const char * path, attr->size = 0; } - pthread_rwlock_unlock(&ls.db_lock); + pthread_rwlock_unlock(&ls.lock); return 0; } @@ -223,9 +243,9 @@ static int lsdb_rib_read(const char * path, entry = strstr(path, RIB_SEPARATOR) + 1; assert(entry); - pthread_rwlock_rdlock(&ls.db_lock); + pthread_rwlock_rdlock(&ls.lock); - if (ls.db_len + ls.nbs_len == 0) + if (ls.db.len + ls.nbs.len == 0) goto fail; a = get_adj(entry); @@ -236,11 +256,11 @@ static int lsdb_rib_read(const char * path, if (size < 0) goto fail; - pthread_rwlock_unlock(&ls.db_lock); + pthread_rwlock_unlock(&ls.lock); return size; fail: - pthread_rwlock_unlock(&ls.db_lock); + pthread_rwlock_unlock(&ls.lock); return -1; } @@ -250,60 +270,52 @@ static int lsdb_rib_readdir(char *** buf) char entry[RIB_PATH_LEN + 1]; ssize_t idx = 0; - assert(buf); + assert(buf != NULL); - pthread_rwlock_rdlock(&ls.db_lock); + pthread_rwlock_rdlock(&ls.lock); - if (ls.db_len + ls.nbs_len == 0) { - pthread_rwlock_unlock(&ls.db_lock); - return 0; + if (ls.db.len + ls.nbs.len == 0) { + *buf = NULL; + goto no_entries; } - *buf = malloc(sizeof(**buf) * (ls.db_len + ls.nbs_len)); - if (*buf == NULL) { - pthread_rwlock_unlock(&ls.db_lock); - return -ENOMEM; - } - list_for_each(p, &ls.nbs) { + *buf = malloc(sizeof(**buf) * (ls.db.len + ls.nbs.len)); + if (*buf == NULL) + goto fail_entries; + + list_for_each(p, &ls.nbs.list) { struct nb * nb = list_entry(p, struct nb, next); - char * str = (nb->type == NB_DT ? "dt." : "mgmt."); - sprintf(entry, "%s%" PRIu64, str, nb->addr); + char * str = (nb->type == NB_DT ? ".dt " : ".mgmt "); + sprintf(entry, "%s" ADDR_FMT32 , str, ADDR_VAL32(&nb->addr)); (*buf)[idx] = malloc(strlen(entry) + 1); - if ((*buf)[idx] == NULL) { - while (idx-- > 0) - free((*buf)[idx]); - free(*buf); - pthread_rwlock_unlock(&ls.db_lock); - return -ENOMEM; - } - - strcpy((*buf)[idx], entry); + if ((*buf)[idx] == NULL) + goto fail_entry; - idx++; + strcpy((*buf)[idx++], entry); } - list_for_each(p, &ls.db) { + list_for_each(p, &ls.db.list) { struct adjacency * a = list_entry(p, struct adjacency, next); - sprintf(entry, "%" PRIu64 ".%" PRIu64, a->src, a->dst); + sprintf(entry, LINK_FMT, LINK_VAL(a->src, a->dst)); (*buf)[idx] = malloc(strlen(entry) + 1); - if ((*buf)[idx] == NULL) { - ssize_t j; - for (j = 0; j < idx; ++j) - free(*buf[j]); - free(buf); - pthread_rwlock_unlock(&ls.db_lock); - return -ENOMEM; - } - - strcpy((*buf)[idx], entry); + if ((*buf)[idx] == NULL) + goto fail_entry; - idx++; + strcpy((*buf)[idx++], entry); } - - pthread_rwlock_unlock(&ls.db_lock); + no_entries: + pthread_rwlock_unlock(&ls.lock); return idx; + + fail_entry: + while (idx-- > 0) + free((*buf)[idx]); + free(*buf); + fail_entries: + pthread_rwlock_unlock(&ls.lock); + return -ENOMEM; } static struct rib_ops r_ops = { @@ -319,28 +331,28 @@ static int lsdb_add_nb(uint64_t addr, struct list_head * p; struct nb * nb; - pthread_rwlock_wrlock(&ls.db_lock); + pthread_rwlock_wrlock(&ls.lock); - list_for_each(p, &ls.nbs) { + list_for_each(p, &ls.nbs.list) { struct nb * el = list_entry(p, struct nb, next); - if (el->addr == addr && el->type == type) { - log_dbg("Already know %s neighbor %" PRIu64 ".", - type == NB_DT ? "dt" : "mgmt", addr); - if (el->fd != fd) { - log_warn("Existing neighbor assigned new fd."); - el->fd = fd; - } - pthread_rwlock_unlock(&ls.db_lock); - return -EPERM; - } - if (addr > el->addr) break; + if (el->addr != addr || el->type != type) + continue; + + log_dbg("Already know %s neighbor " ADDR_FMT32 ".", + type == NB_DT ? "dt" : "mgmt", ADDR_VAL32(&addr)); + if (el->fd != fd) { + log_warn("Existing neighbor assigned new fd."); + el->fd = fd; + } + pthread_rwlock_unlock(&ls.lock); + return -EPERM; } nb = malloc(sizeof(*nb)); if (nb == NULL) { - pthread_rwlock_unlock(&ls.db_lock); + pthread_rwlock_unlock(&ls.lock); return -ENOMEM; } @@ -350,12 +362,12 @@ static int lsdb_add_nb(uint64_t addr, list_add_tail(&nb->next, p); - ++ls.nbs_len; + ++ls.nbs.len; - log_dbg("Type %s neighbor %" PRIu64 " added.", - nb->type == NB_DT ? "dt" : "mgmt", addr); + log_dbg("Type %s neighbor " ADDR_FMT32 " added.", + nb->type == NB_DT ? "dt" : "mgmt", ADDR_VAL32(&addr)); - pthread_rwlock_unlock(&ls.db_lock); + pthread_rwlock_unlock(&ls.lock); return 0; } @@ -366,22 +378,23 @@ static int lsdb_del_nb(uint64_t addr, struct list_head * p; struct list_head * h; - pthread_rwlock_wrlock(&ls.db_lock); + pthread_rwlock_wrlock(&ls.lock); - list_for_each_safe(p, h, &ls.nbs) { + list_for_each_safe(p, h, &ls.nbs.list) { struct nb * nb = list_entry(p, struct nb, next); - if (nb->addr == addr && nb->fd == fd) { - list_del(&nb->next); - --ls.nbs_len; - pthread_rwlock_unlock(&ls.db_lock); - log_dbg("Type %s neighbor %" PRIu64 " deleted.", - nb->type == NB_DT ? "dt" : "mgmt", addr); - free(nb); - return 0; - } + if (nb->addr != addr || nb->fd != fd) + continue; + + list_del(&nb->next); + --ls.nbs.len; + pthread_rwlock_unlock(&ls.lock); + log_dbg("Type %s neighbor " ADDR_FMT32 " deleted.", + nb->type == NB_DT ? "dt" : "mgmt", ADDR_VAL32(&addr)); + free(nb); + return 0; } - pthread_rwlock_unlock(&ls.db_lock); + pthread_rwlock_unlock(&ls.lock); return -EPERM; } @@ -391,18 +404,18 @@ static int nbr_to_fd(uint64_t addr) struct list_head * p; int fd; - pthread_rwlock_rdlock(&ls.db_lock); + pthread_rwlock_rdlock(&ls.lock); - list_for_each(p, &ls.nbs) { + list_for_each(p, &ls.nbs.list) { struct nb * nb = list_entry(p, struct nb, next); if (nb->addr == addr && nb->type == NB_DT) { fd = nb->fd; - pthread_rwlock_unlock(&ls.db_lock); + pthread_rwlock_unlock(&ls.lock); return fd; } } - pthread_rwlock_unlock(&ls.db_lock); + pthread_rwlock_unlock(&ls.lock); return -1; } @@ -417,8 +430,7 @@ static void calculate_pff(struct routing_i * instance) assert(instance); - if (graph_routing_table(ls.graph, ls.routing_algo, - ipcpi.dt_addr, &table)) + if (graph_routing_table(ls.graph, ls.routing_algo, ls.addr, &table)) return; pff_lock(instance->pff); @@ -453,8 +465,8 @@ static void set_pff_modified(bool calc) { struct list_head * p; - pthread_mutex_lock(&ls.routing_i_lock); - list_for_each(p, &ls.routing_instances) { + pthread_mutex_lock(&ls.instances.mtx); + list_for_each(p, &ls.instances.list) { struct routing_i * inst = list_entry(p, struct routing_i, next); pthread_mutex_lock(&inst->lock); @@ -463,7 +475,7 @@ static void set_pff_modified(bool calc) if (calc) calculate_pff(inst); } - pthread_mutex_unlock(&ls.routing_i_lock); + pthread_mutex_unlock(&ls.instances.mtx); } static int lsdb_add_link(uint64_t src, @@ -480,9 +492,9 @@ static int lsdb_add_link(uint64_t src, clock_gettime(CLOCK_REALTIME_COARSE, &now); - pthread_rwlock_wrlock(&ls.db_lock); + pthread_rwlock_wrlock(&ls.lock); - list_for_each(p, &ls.db) { + list_for_each(p, &ls.db.list) { struct adjacency * a = list_entry(p, struct adjacency, next); if (a->dst == dst && a->src == src) { if (a->seqno < seqno) { @@ -490,7 +502,7 @@ static int lsdb_add_link(uint64_t src, a->seqno = seqno; ret = 0; } - pthread_rwlock_unlock(&ls.db_lock); + pthread_rwlock_unlock(&ls.lock); return ret; } @@ -500,7 +512,7 @@ static int lsdb_add_link(uint64_t src, adj = malloc(sizeof(*adj)); if (adj == NULL) { - pthread_rwlock_unlock(&ls.db_lock); + pthread_rwlock_unlock(&ls.lock); return -ENOMEM; } @@ -511,12 +523,12 @@ static int lsdb_add_link(uint64_t src, list_add_tail(&adj->next, p); - ls.db_len++; + ls.db.len++; if (graph_update_edge(ls.graph, src, dst, *qs)) log_warn("Failed to add edge to graph."); - pthread_rwlock_unlock(&ls.db_lock); + pthread_rwlock_unlock(&ls.lock); set_pff_modified(true); @@ -529,25 +541,25 @@ static int lsdb_del_link(uint64_t src, struct list_head * p; struct list_head * h; - pthread_rwlock_wrlock(&ls.db_lock); + pthread_rwlock_wrlock(&ls.lock); - list_for_each_safe(p, h, &ls.db) { + list_for_each_safe(p, h, &ls.db.list) { struct adjacency * a = list_entry(p, struct adjacency, next); if (a->dst == dst && a->src == src) { list_del(&a->next); if (graph_del_edge(ls.graph, src, dst)) log_warn("Failed to delete edge from graph."); - ls.db_len--; + ls.db.len--; - pthread_rwlock_unlock(&ls.db_lock); + pthread_rwlock_unlock(&ls.lock); set_pff_modified(false); free(a); return 0; } } - pthread_rwlock_unlock(&ls.db_lock); + pthread_rwlock_unlock(&ls.lock); return -EPERM; } @@ -570,7 +582,7 @@ static void * periodic_recalc_pff(void * o) if (modified) calculate_pff(inst); - sleep(RECALC_TIME); + sleep(ls.conf.t_recalc); } return (void *) 0; @@ -587,10 +599,20 @@ static void send_lsm(uint64_t src, lsm.s_addr = hton64(src); lsm.seqno = hton64(seqno); - list_for_each(p, &ls.nbs) { + list_for_each(p, &ls.nbs.list) { struct nb * nb = list_entry(p, struct nb, next); - if (nb->type == NB_MGMT) - flow_write(nb->fd, &lsm, sizeof(lsm)); + if (nb->type != NB_MGMT) + continue; + + if (flow_write(nb->fd, &lsm, sizeof(lsm)) < 0) + log_err("Failed to send LSM to " ADDR_FMT32, + ADDR_VAL32(&nb->addr)); +#ifdef DEBUG_PROTO_LS + else + log_proto(LSU_FMT " --> " ADDR_FMT32, + LSU_VAL(src, dst, seqno), + ADDR_VAL32(&nb->addr)); +#endif } } @@ -604,9 +626,9 @@ static void lsdb_replicate(int fd) list_head_init(©); /* Lock the lsdb, copy the lsms and send outside of lock. */ - pthread_rwlock_rdlock(&ls.db_lock); + pthread_rwlock_rdlock(&ls.lock); - list_for_each(p, &ls.db) { + list_for_each(p, &ls.db.list) { struct adjacency * adj; struct adjacency * cpy; adj = list_entry(p, struct adjacency, next); @@ -623,7 +645,7 @@ static void lsdb_replicate(int fd) list_add_tail(&cpy->next, ©); } - pthread_rwlock_unlock(&ls.db_lock); + pthread_rwlock_unlock(&ls.lock); list_for_each_safe(p, h, ©) { struct lsa lsm; @@ -649,17 +671,17 @@ static void * lsupdate(void * o) while (true) { clock_gettime(CLOCK_REALTIME_COARSE, &now); - pthread_rwlock_wrlock(&ls.db_lock); + pthread_rwlock_wrlock(&ls.lock); - pthread_cleanup_push(__cleanup_rwlock_unlock, &ls.db_lock); + pthread_cleanup_push(__cleanup_rwlock_unlock, &ls.lock); - list_for_each_safe(p, h, &ls.db) { + list_for_each_safe(p, h, &ls.db.list) { struct adjacency * adj; adj = list_entry(p, struct adjacency, next); - if (now.tv_sec - adj->stamp > LS_TIMEO) { + if (now.tv_sec > adj->stamp + ls.conf.t_timeo) { list_del(&adj->next); - log_dbg("%" PRIu64 " - %" PRIu64" timed out.", - adj->src, adj->dst); + log_dbg(LINK_FMT " timed out.", + LINK_VAL(adj->src, adj->dst)); if (graph_del_edge(ls.graph, adj->src, adj->dst)) log_err("Failed to del edge."); @@ -667,7 +689,7 @@ static void * lsupdate(void * o) continue; } - if (adj->src == ipcpi.dt_addr) { + if (adj->src == ls.addr) { adj->seqno++; send_lsm(adj->src, adj->dst, adj->seqno); adj->stamp = now.tv_sec; @@ -676,7 +698,7 @@ static void * lsupdate(void * o) pthread_cleanup_pop(true); - sleep(LS_UPDATE_TIME); + sleep(ls.conf.t_update); } return (void *) 0; @@ -708,15 +730,36 @@ static void forward_lsm(uint8_t * buf, int in_fd) { struct list_head * p; +#ifdef DEBUG_PROTO_LS + struct lsa lsm; - pthread_rwlock_rdlock(&ls.db_lock); + assert(buf); + assert(len >= sizeof(struct lsa)); + + memcpy(&lsm, buf, sizeof(lsm)); + + lsm.s_addr = ntoh64(lsm.s_addr); + lsm.d_addr = ntoh64(lsm.d_addr); + lsm.seqno = ntoh64(lsm.seqno); +#endif + pthread_rwlock_rdlock(&ls.lock); - pthread_cleanup_push(__cleanup_rwlock_unlock, &ls.db_lock); + pthread_cleanup_push(__cleanup_rwlock_unlock, &ls.lock); - list_for_each(p, &ls.nbs) { + list_for_each(p, &ls.nbs.list) { struct nb * nb = list_entry(p, struct nb, next); - if (nb->type == NB_MGMT && nb->fd != in_fd) - flow_write(nb->fd, buf, len); + if (nb->type != NB_MGMT || nb->fd == in_fd) + continue; + + if (flow_write(nb->fd, buf, len) < 0) + log_err("Failed to forward LSM to " ADDR_FMT32, + ADDR_VAL32(&nb->addr)); +#ifdef DEBUG_PROTO_LS + else + log_proto(LSU_FMT " --> " ADDR_FMT32 " [forwarded]", + LSU_VAL(lsm.s_addr, lsm.d_addr, lsm.seqno), + ADDR_VAL32(&nb->addr)); +#endif } pthread_cleanup_pop(true); @@ -729,13 +772,13 @@ static void cleanup_fqueue(void * fq) static void * lsreader(void * o) { - fqueue_t * fq; - int ret; - uint8_t buf[sizeof(struct lsa)]; - int fd; - qosspec_t qs; - struct lsa * msg; - size_t len; + fqueue_t * fq; + int ret; + uint8_t buf[sizeof(struct lsa)]; + int fd; + qosspec_t qs; + struct lsa msg; + size_t len; (void) o; @@ -758,15 +801,22 @@ static void * lsreader(void * o) if (fqueue_type(fq) != FLOW_PKT) continue; - len = flow_read(fd, buf, sizeof(*msg)); - if (len <= 0 || len != sizeof(*msg)) + len = flow_read(fd, buf, sizeof(msg)); + if (len <= 0 || len != sizeof(msg)) continue; - msg = (struct lsa *) buf; - - if (lsdb_add_link(ntoh64(msg->s_addr), - ntoh64(msg->d_addr), - ntoh64(msg->seqno), + memcpy(&msg, buf, sizeof(msg)); + msg.s_addr = ntoh64(msg.s_addr); + msg.d_addr = ntoh64(msg.d_addr); + msg.seqno = ntoh64(msg.seqno); +#ifdef DEBUG_PROTO_LS + log_proto(LSU_FMT " <-- " ADDR_FMT32, + LSU_VAL(msg.s_addr, msg.d_addr, msg.seqno), + ADDR_VAL32(&ls.addr)); +#endif + if (lsdb_add_link(msg.s_addr, + msg.d_addr, + msg.seqno, &qs)) continue; @@ -787,14 +837,14 @@ static void flow_event(int fd, log_dbg("Notifying routing instances of flow event."); - pthread_mutex_lock(&ls.routing_i_lock); + pthread_mutex_lock(&ls.instances.mtx); - list_for_each(p, &ls.routing_instances) { + list_for_each(p, &ls.instances.list) { struct routing_i * ri = list_entry(p, struct routing_i, next); pff_flow_state_change(ri->pff, fd, up); } - pthread_mutex_unlock(&ls.routing_i_lock); + pthread_mutex_unlock(&ls.instances.mtx); } static void handle_event(void * self, @@ -816,17 +866,17 @@ static void handle_event(void * self, switch (event) { case NOTIFY_DT_CONN_ADD: - pthread_rwlock_rdlock(&ls.db_lock); + pthread_rwlock_rdlock(&ls.lock); - pthread_cleanup_push(__cleanup_rwlock_unlock, &ls.db_lock); + pthread_cleanup_push(__cleanup_rwlock_unlock, &ls.lock); - send_lsm(ipcpi.dt_addr, c->conn_info.addr, 0); + send_lsm(ls.addr, c->conn_info.addr, 0); pthread_cleanup_pop(true); if (lsdb_add_nb(c->conn_info.addr, c->flow_info.fd, NB_DT)) log_dbg("Failed to add neighbor to LSDB."); - if (lsdb_add_link(ipcpi.dt_addr, c->conn_info.addr, 0, &qs)) + if (lsdb_add_link(ls.addr, c->conn_info.addr, 0, &qs)) log_dbg("Failed to add new adjacency to LSDB."); break; case NOTIFY_DT_CONN_DEL: @@ -835,7 +885,7 @@ static void handle_event(void * self, if (lsdb_del_nb(c->conn_info.addr, c->flow_info.fd)) log_dbg("Failed to delete neighbor from LSDB."); - if (lsdb_del_link(ipcpi.dt_addr, c->conn_info.addr)) + if (lsdb_del_link(ls.addr, c->conn_info.addr)) log_dbg("Local link was not in LSDB."); break; case NOTIFY_DT_CONN_QOS: @@ -886,11 +936,11 @@ struct routing_i * link_state_routing_i_create(struct pff * pff) periodic_recalc_pff, tmp)) goto fail_pthread_create_lsupdate; - pthread_mutex_lock(&ls.routing_i_lock); + pthread_mutex_lock(&ls.instances.mtx); - list_add(&tmp->next, &ls.routing_instances); + list_add(&tmp->next, &ls.instances.list); - pthread_mutex_unlock(&ls.routing_i_lock); + pthread_mutex_unlock(&ls.instances.mtx); return tmp; @@ -906,11 +956,11 @@ void link_state_routing_i_destroy(struct routing_i * instance) { assert(instance); - pthread_mutex_lock(&ls.routing_i_lock); + pthread_mutex_lock(&ls.instances.mtx); list_del(&instance->next); - pthread_mutex_unlock(&ls.routing_i_lock); + pthread_mutex_unlock(&ls.instances.mtx); pthread_cancel(instance->calculator); @@ -921,96 +971,146 @@ void link_state_routing_i_destroy(struct routing_i * instance) free(instance); } -int link_state_init(enum pol_routing pr) +int link_state_start(void) +{ + if (notifier_reg(handle_event, NULL)) { + log_err("Failed to register link-state with notifier."); + goto fail_notifier_reg; + } + + if (pthread_create(&ls.lsupdate, NULL, lsupdate, NULL)) { + log_err("Failed to create lsupdate thread."); + goto fail_pthread_create_lsupdate; + } + + if (pthread_create(&ls.lsreader, NULL, lsreader, NULL)) { + log_err("Failed to create lsreader thread."); + goto fail_pthread_create_lsreader; + } + + if (pthread_create(&ls.listener, NULL, ls_conn_handle, NULL)) { + log_err("Failed to create listener thread."); + goto fail_pthread_create_listener; + } + + return 0; + + fail_pthread_create_listener: + pthread_cancel(ls.lsreader); + pthread_join(ls.lsreader, NULL); + fail_pthread_create_lsreader: + pthread_cancel(ls.lsupdate); + pthread_join(ls.lsupdate, NULL); + fail_pthread_create_lsupdate: + notifier_unreg(handle_event); + fail_notifier_reg: + return -1; +} + +void link_state_stop(void) +{ + pthread_cancel(ls.listener); + pthread_cancel(ls.lsreader); + pthread_cancel(ls.lsupdate); + + pthread_join(ls.listener, NULL); + pthread_join(ls.lsreader, NULL); + pthread_join(ls.lsupdate, NULL); + + notifier_unreg(handle_event); +} + + +int link_state_init(struct ls_config * conf, + enum pol_pff * pff_type) { struct conn_info info; + assert(conf != NULL); + assert(pff_type != NULL); + memset(&info, 0, sizeof(info)); + ls.addr = addr_auth_address(); + strcpy(info.comp_name, LS_COMP); strcpy(info.protocol, LS_PROTO); info.pref_version = 1; info.pref_syntax = PROTO_GPB; - info.addr = ipcpi.dt_addr; + info.addr = ls.addr; - switch (pr) { - case ROUTING_LINK_STATE: - log_dbg("Using link state routing policy."); + ls.conf = *conf; + + switch (conf->pol) { + case LS_SIMPLE: + *pff_type = PFF_SIMPLE; ls.routing_algo = ROUTING_SIMPLE; + log_dbg("Using Link State Routing policy."); break; - case ROUTING_LINK_STATE_LFA: - log_dbg("Using Loop-Free Alternates policy."); + case LS_LFA: ls.routing_algo = ROUTING_LFA; + *pff_type = PFF_ALTERNATE; + log_dbg("Using Loop-Free Alternates policy."); break; - case ROUTING_LINK_STATE_ECMP: - log_dbg("Using Equal-Cost Multipath policy."); + case LS_ECMP: ls.routing_algo = ROUTING_ECMP; + *pff_type = PFF_MULTIPATH; + log_dbg("Using Equal-Cost Multipath policy."); break; default: goto fail_graph; } + log_dbg("LS update interval: %ld seconds.", ls.conf.t_update); + log_dbg("LS link timeout : %ld seconds.", ls.conf.t_timeo); + log_dbg("LS recalc interval: %ld seconds.", ls.conf.t_recalc); + ls.graph = graph_create(); if (ls.graph == NULL) goto fail_graph; - if (notifier_reg(handle_event, NULL)) - goto fail_notifier_reg; - - if (pthread_rwlock_init(&ls.db_lock, NULL)) - goto fail_db_lock_init; + if (pthread_rwlock_init(&ls.lock, NULL)) { + log_err("Failed to init lock."); + goto fail_lock_init; + } - if (pthread_mutex_init(&ls.routing_i_lock, NULL)) + if (pthread_mutex_init(&ls.instances.mtx, NULL)) { + log_err("Failed to init instances mutex."); goto fail_routing_i_lock_init; + } - if (connmgr_comp_init(COMPID_MGMT, &info)) + if (connmgr_comp_init(COMPID_MGMT, &info)) { + log_err("Failed to init connmgr."); goto fail_connmgr_comp_init; + } ls.mgmt_set = fset_create(); - if (ls.mgmt_set == NULL) + if (ls.mgmt_set == NULL) { + log_err("Failed to create fset."); goto fail_fset_create; + } - list_head_init(&ls.db); - list_head_init(&ls.nbs); - list_head_init(&ls.routing_instances); - - if (pthread_create(&ls.lsupdate, NULL, lsupdate, NULL)) - goto fail_pthread_create_lsupdate; - - if (pthread_create(&ls.lsreader, NULL, lsreader, NULL)) - goto fail_pthread_create_lsreader; - - if (pthread_create(&ls.listener, NULL, ls_conn_handle, NULL)) - goto fail_pthread_create_listener; + list_head_init(&ls.db.list); + list_head_init(&ls.nbs.list); + list_head_init(&ls.instances.list); if (rib_reg(LSDB, &r_ops)) goto fail_rib_reg; - ls.db_len = 0; - ls.nbs_len = 0; + ls.db.len = 0; + ls.nbs.len = 0; return 0; fail_rib_reg: - pthread_cancel(ls.listener); - pthread_join(ls.listener, NULL); - fail_pthread_create_listener: - pthread_cancel(ls.lsreader); - pthread_join(ls.lsreader, NULL); - fail_pthread_create_lsreader: - pthread_cancel(ls.lsupdate); - pthread_join(ls.lsupdate, NULL); - fail_pthread_create_lsupdate: fset_destroy(ls.mgmt_set); fail_fset_create: connmgr_comp_fini(COMPID_MGMT); fail_connmgr_comp_init: - pthread_mutex_destroy(&ls.routing_i_lock); + pthread_mutex_destroy(&ls.instances.mtx); fail_routing_i_lock_init: - pthread_rwlock_destroy(&ls.db_lock); - fail_db_lock_init: - notifier_unreg(handle_event); - fail_notifier_reg: + pthread_rwlock_destroy(&ls.lock); + fail_lock_init: graph_destroy(ls.graph); fail_graph: return -1; @@ -1023,33 +1123,23 @@ void link_state_fini(void) rib_unreg(LSDB); - notifier_unreg(handle_event); - - pthread_cancel(ls.listener); - pthread_cancel(ls.lsreader); - pthread_cancel(ls.lsupdate); - - pthread_join(ls.listener, NULL); - pthread_join(ls.lsreader, NULL); - pthread_join(ls.lsupdate, NULL); - fset_destroy(ls.mgmt_set); connmgr_comp_fini(COMPID_MGMT); graph_destroy(ls.graph); - pthread_rwlock_wrlock(&ls.db_lock); + pthread_rwlock_wrlock(&ls.lock); - list_for_each_safe(p, h, &ls.db) { + list_for_each_safe(p, h, &ls.db.list) { struct adjacency * a = list_entry(p, struct adjacency, next); list_del(&a->next); free(a); } - pthread_rwlock_unlock(&ls.db_lock); + pthread_rwlock_unlock(&ls.lock); - pthread_rwlock_destroy(&ls.db_lock); + pthread_rwlock_destroy(&ls.lock); - pthread_mutex_destroy(&ls.routing_i_lock); + pthread_mutex_destroy(&ls.instances.mtx); } |