summaryrefslogtreecommitdiff
path: root/src/ipcpd
diff options
context:
space:
mode:
Diffstat (limited to 'src/ipcpd')
-rw-r--r--src/ipcpd/CMakeLists.txt17
-rw-r--r--src/ipcpd/broadcast/CMakeLists.txt5
-rw-r--r--src/ipcpd/broadcast/comp.h45
-rw-r--r--src/ipcpd/broadcast/connmgr.c496
-rw-r--r--src/ipcpd/broadcast/connmgr.h71
-rw-r--r--src/ipcpd/broadcast/dt.c46
-rw-r--r--src/ipcpd/broadcast/dt.h6
-rw-r--r--src/ipcpd/broadcast/enroll.c363
-rw-r--r--src/ipcpd/broadcast/enroll.h47
-rw-r--r--src/ipcpd/broadcast/main.c153
-rw-r--r--src/ipcpd/common/comp.h (renamed from src/ipcpd/unicast/comp.h)18
-rw-r--r--src/ipcpd/common/connmgr.c563
-rw-r--r--src/ipcpd/common/connmgr.h (renamed from src/ipcpd/unicast/connmgr.h)14
-rw-r--r--src/ipcpd/common/enroll.c346
-rw-r--r--src/ipcpd/common/enroll.h (renamed from src/ipcpd/unicast/enroll.h)20
-rw-r--r--src/ipcpd/config.h.in28
-rw-r--r--src/ipcpd/eth/CMakeLists.txt2
-rw-r--r--src/ipcpd/eth/dix.c6
-rw-r--r--src/ipcpd/eth/eth.c767
-rw-r--r--src/ipcpd/eth/llc.c6
-rw-r--r--src/ipcpd/ipcp.c1282
-rw-r--r--src/ipcpd/ipcp.h100
-rw-r--r--src/ipcpd/local/CMakeLists.txt2
-rw-r--r--src/ipcpd/local/main.c215
-rw-r--r--src/ipcpd/raptor/CMakeLists.txt50
-rw-r--r--src/ipcpd/raptor/main.c1114
-rw-r--r--src/ipcpd/shim-data.c51
-rw-r--r--src/ipcpd/shim-data.h6
-rw-r--r--src/ipcpd/udp/CMakeLists.txt2
-rw-r--r--src/ipcpd/udp/main.c617
-rw-r--r--src/ipcpd/unicast/CMakeLists.txt42
-rw-r--r--src/ipcpd/unicast/addr-auth.c (renamed from src/ipcpd/unicast/addr_auth.c)13
-rw-r--r--src/ipcpd/unicast/addr-auth.h (renamed from src/ipcpd/unicast/addr_auth.h)14
-rw-r--r--src/ipcpd/unicast/addr-auth/flat.c (renamed from src/ipcpd/unicast/pol/flat.c)46
-rw-r--r--src/ipcpd/unicast/addr-auth/flat.h (renamed from src/ipcpd/unicast/pol/flat.h)10
-rw-r--r--src/ipcpd/unicast/addr-auth/ops.h (renamed from src/ipcpd/unicast/pol-addr-auth-ops.h)14
-rw-r--r--src/ipcpd/unicast/addr-auth/pol.h23
-rw-r--r--src/ipcpd/unicast/ca.c108
-rw-r--r--src/ipcpd/unicast/ca.h68
-rw-r--r--src/ipcpd/unicast/ca/mb-ecn.c296
-rw-r--r--src/ipcpd/unicast/ca/mb-ecn.h56
-rw-r--r--src/ipcpd/unicast/ca/nop.c98
-rw-r--r--src/ipcpd/unicast/ca/nop.h52
-rw-r--r--src/ipcpd/unicast/ca/ops.h58
-rw-r--r--src/ipcpd/unicast/ca/pol.h24
-rw-r--r--src/ipcpd/unicast/connmgr.c506
-rw-r--r--src/ipcpd/unicast/dht.c2840
-rw-r--r--src/ipcpd/unicast/dht.h52
-rw-r--r--src/ipcpd/unicast/dir.c77
-rw-r--r--src/ipcpd/unicast/dir.h17
-rw-r--r--src/ipcpd/unicast/dir/dht.c4035
-rw-r--r--src/ipcpd/unicast/dir/dht.h49
-rw-r--r--src/ipcpd/unicast/dir/dht.proto58
-rw-r--r--src/ipcpd/unicast/dir/ops.h42
-rw-r--r--src/ipcpd/unicast/dir/pol.h23
-rw-r--r--src/ipcpd/unicast/dir/tests/CMakeLists.txt40
-rw-r--r--src/ipcpd/unicast/dir/tests/dht_test.c1925
-rw-r--r--src/ipcpd/unicast/dt.c468
-rw-r--r--src/ipcpd/unicast/dt.h20
-rw-r--r--src/ipcpd/unicast/enroll.c379
-rw-r--r--src/ipcpd/unicast/fa.c839
-rw-r--r--src/ipcpd/unicast/fa.h26
-rw-r--r--src/ipcpd/unicast/kademlia.proto45
-rw-r--r--src/ipcpd/unicast/main.c237
-rw-r--r--src/ipcpd/unicast/pff.c19
-rw-r--r--src/ipcpd/unicast/pff.h6
-rw-r--r--src/ipcpd/unicast/pff/alternate.c (renamed from src/ipcpd/unicast/pol/alternate_pff.c)10
-rw-r--r--src/ipcpd/unicast/pff/alternate.h (renamed from src/ipcpd/unicast/pol/alternate_pff.h)10
-rw-r--r--src/ipcpd/unicast/pff/multipath.c (renamed from src/ipcpd/unicast/pol/multipath_pff.c)38
-rw-r--r--src/ipcpd/unicast/pff/multipath.h (renamed from src/ipcpd/unicast/pol/multipath_pff.h)10
-rw-r--r--src/ipcpd/unicast/pff/ops.h (renamed from src/ipcpd/unicast/pol-pff-ops.h)14
-rw-r--r--src/ipcpd/unicast/pff/pft.c (renamed from src/ipcpd/unicast/pol/pft.c)20
-rw-r--r--src/ipcpd/unicast/pff/pft.h (renamed from src/ipcpd/unicast/pol/pft.h)6
-rw-r--r--src/ipcpd/unicast/pff/pol.h25
-rw-r--r--src/ipcpd/unicast/pff/simple.c (renamed from src/ipcpd/unicast/pol/simple_pff.c)10
-rw-r--r--src/ipcpd/unicast/pff/simple.h (renamed from src/ipcpd/unicast/pol/simple_pff.h)10
-rw-r--r--src/ipcpd/unicast/pff/tests/CMakeLists.txt (renamed from src/ipcpd/unicast/tests/CMakeLists.txt)13
-rw-r--r--src/ipcpd/unicast/pff/tests/pft_test.c (renamed from src/ipcpd/unicast/pol/tests/pft_test.c)6
-rw-r--r--src/ipcpd/unicast/psched.c17
-rw-r--r--src/ipcpd/unicast/psched.h12
-rw-r--r--src/ipcpd/unicast/routing.c40
-rw-r--r--src/ipcpd/unicast/routing.h13
-rw-r--r--src/ipcpd/unicast/routing/graph.c (renamed from src/ipcpd/unicast/pol/graph.c)166
-rw-r--r--src/ipcpd/unicast/routing/graph.h (renamed from src/ipcpd/unicast/pol/graph.h)6
-rw-r--r--src/ipcpd/unicast/routing/link-state.c (renamed from src/ipcpd/unicast/pol/link_state.c)614
-rw-r--r--src/ipcpd/unicast/routing/link-state.h (renamed from src/ipcpd/unicast/pol/link_state.h)17
-rw-r--r--src/ipcpd/unicast/routing/ops.h (renamed from src/ipcpd/unicast/pol-routing-ops.h)21
-rw-r--r--src/ipcpd/unicast/routing/pol.h23
-rw-r--r--src/ipcpd/unicast/routing/tests/CMakeLists.txt (renamed from src/ipcpd/unicast/pol/tests/CMakeLists.txt)7
-rw-r--r--src/ipcpd/unicast/routing/tests/graph_test.c (renamed from src/ipcpd/unicast/pol/tests/graph_test.c)6
-rw-r--r--src/ipcpd/unicast/tests/dht_test.c99
91 files changed, 11471 insertions, 8825 deletions
diff --git a/src/ipcpd/CMakeLists.txt b/src/ipcpd/CMakeLists.txt
index d0b368a3..b3b049e3 100644
--- a/src/ipcpd/CMakeLists.txt
+++ b/src/ipcpd/CMakeLists.txt
@@ -1,3 +1,7 @@
+set(CONNMGR_RCV_TIMEOUT 1000 CACHE STRING
+ "Timeout for the connection manager to wait for OCEP info (ms).")
+set(IPCP_DEBUG_LOCAL FALSE CACHE BOOL
+ "Use PID as address for local debugging")
set(IPCP_QOS_CUBE_BE_PRIO 50 CACHE STRING
"Priority for best effort QoS cube (0-99)")
set(IPCP_QOS_CUBE_VIDEO_PRIO 90 CACHE STRING
@@ -10,12 +14,14 @@ set(IPCP_ADD_THREADS 4 CACHE STRING
"Number of extra threads to start when an IPCP faces thread starvation")
set(IPCP_SCHED_THR_MUL 2 CACHE STRING
"Number of scheduler threads per QoS cube")
-set(DISABLE_CORE_LOCK FALSE CACHE BOOL
+set(DISABLE_CORE_LOCK TRUE CACHE BOOL
"Disable locking performance threads to a core")
-set(IPCP_CONN_WAIT_DIR TRUE CACHE BOOL
- "Check the running state of the directory when adding a dt connection")
set(DHT_ENROLL_SLACK 50 CACHE STRING
"DHT enrollment waiting time (0-999, ms)")
+if (CMAKE_SYSTEM_NAME STREQUAL "Linux")
+ set(IPCP_LINUX_TIMERSLACK_NS 1000 CACHE STRING
+ "Slack value for high resolution timers on Linux systems.")
+endif ()
if ((IPCP_QOS_CUBE_BE_PRIO LESS 0) OR (IPCP_QOS_CUBE_BE_PRIO GREATER 99))
message(FATAL_ERROR "Invalid priority for best effort QoS cube")
@@ -40,10 +46,13 @@ set(IPCP_SOURCES
${CMAKE_CURRENT_SOURCE_DIR}/shim-data.c
)
+set (COMMON_SOURCES
+ ${CMAKE_CURRENT_SOURCE_DIR}/common/enroll.c
+ )
+
add_subdirectory(local)
add_subdirectory(eth)
add_subdirectory(udp)
-add_subdirectory(raptor)
add_subdirectory(unicast)
add_subdirectory(broadcast)
diff --git a/src/ipcpd/broadcast/CMakeLists.txt b/src/ipcpd/broadcast/CMakeLists.txt
index afcc8696..d85f335e 100644
--- a/src/ipcpd/broadcast/CMakeLists.txt
+++ b/src/ipcpd/broadcast/CMakeLists.txt
@@ -13,16 +13,17 @@ include_directories(${CMAKE_SOURCE_DIR}/include)
include_directories(${CMAKE_BINARY_DIR}/include)
set(IPCP_BROADCAST_TARGET ipcpd-broadcast CACHE INTERNAL "")
+set(IPCP_BROADCAST_MPL 60 CACHE STRING
+ "Default maximum packet lifetime for the broadcast IPCP, in seconds")
set(SOURCE_FILES
# Add source files here
connmgr.c
dt.c
- enroll.c
main.c
)
-add_executable(ipcpd-broadcast ${SOURCE_FILES} ${IPCP_SOURCES}
+add_executable(ipcpd-broadcast ${SOURCE_FILES} ${IPCP_SOURCES} ${COMMON_SOURCES}
${LAYER_CONFIG_PROTO_SRCS})
target_link_libraries(ipcpd-broadcast LINK_PUBLIC ouroboros-dev)
diff --git a/src/ipcpd/broadcast/comp.h b/src/ipcpd/broadcast/comp.h
deleted file mode 100644
index 07983978..00000000
--- a/src/ipcpd/broadcast/comp.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Ouroboros - Copyright (C) 2016 - 2020
- *
- * Components for the broadcast IPC process
- *
- * Dimitri Staessens <dimitri.staessens@ugent.be>
- * Sander Vrijders <sander.vrijders@ugent.be>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., http://www.fsf.org/about/contact/.
- */
-
-#ifndef OUROBOROS_IPCPD_BROADCAST_COMP_H
-#define OUROBOROS_IPCPD_BROADCAST_COMP_H
-
-#include <ouroboros/cacep.h>
-
-#define DST_MAX_STRLEN 64
-
-enum comp_id {
- COMPID_DT = 0,
- COMPID_ENROLL,
- COMPID_MAX
-};
-
-struct conn {
- struct conn_info conn_info;
- struct {
- char dst[DST_MAX_STRLEN + 1];
- int fd;
- qosspec_t qs;
- } flow_info;
-};
-
-#endif /* OUROBOROS_IPCPD_BROADCAST_COMP_H */
diff --git a/src/ipcpd/broadcast/connmgr.c b/src/ipcpd/broadcast/connmgr.c
index b023584a..f297175d 100644
--- a/src/ipcpd/broadcast/connmgr.c
+++ b/src/ipcpd/broadcast/connmgr.c
@@ -1,10 +1,10 @@
/*
- * Ouroboros - Copyright (C) 2016 - 2020
+ * Ouroboros - Copyright (C) 2016 - 2024
*
* Handles connections between components
*
- * Dimitri Staessens <dimitri.staessens@ugent.be>
- * Sander Vrijders <sander.vrijders@ugent.be>
+ * Dimitri Staessens <dimitri@ouroboros.rocks>
+ * Sander Vrijders <sander@ouroboros.rocks>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -20,496 +20,16 @@
* Foundation, Inc., http://www.fsf.org/about/contact/.
*/
+#include "config.h"
+
#if defined(__linux__) || defined(__CYGWIN__)
#define _DEFAULT_SOURCE
#else
#define _POSIX_C_SOURCE 200112L
#endif
-#define OUROBOROS_PREFIX "connection-manager"
-
-#include <ouroboros/dev.h>
-#include <ouroboros/cacep.h>
-#include <ouroboros/errno.h>
-#include <ouroboros/list.h>
-#include <ouroboros/logs.h>
-#include <ouroboros/notifier.h>
-
-#include "comp.h"
-#include "connmgr.h"
-#include "enroll.h"
-#include "ipcp.h"
-
-#include <pthread.h>
-#include <string.h>
-#include <stdlib.h>
-#include <assert.h>
-
-enum connmgr_state {
- CONNMGR_NULL = 0,
- CONNMGR_INIT,
- CONNMGR_RUNNING
-};
-
-struct conn_el {
- struct list_head next;
- struct conn conn;
-};
-
-struct comp {
- struct conn_info info;
-
- struct list_head conns;
- struct list_head pending;
-
- pthread_cond_t cond;
- pthread_mutex_t lock;
-};
-
-struct {
- struct comp comps[COMPID_MAX];
- enum connmgr_state state;
-
- pthread_t acceptor;
-} connmgr;
-
-static int get_id_by_name(const char * name)
-{
- enum comp_id i;
-
- for (i = 0; i < COMPID_MAX; ++i)
- if (strcmp(name, connmgr.comps[i].info.comp_name) == 0)
- return i;
-
- return -1;
-}
-
-static int get_conn_by_fd(int fd,
- enum comp_id id,
- struct conn * conn)
-{
- struct list_head * p;
-
- pthread_mutex_lock(&connmgr.comps[id].lock);
-
- list_for_each(p, &connmgr.comps[id].conns) {
- struct conn_el * c =
- list_entry(p, struct conn_el, next);
- if (c->conn.flow_info.fd == fd) {
- *conn = c->conn;
- pthread_mutex_unlock(&connmgr.comps[id].lock);
- return 0;
- }
- }
-
- pthread_mutex_unlock(&connmgr.comps[id].lock);
-
- return -1;
-}
-
-static int add_comp_conn(enum comp_id id,
- int fd,
- qosspec_t qs,
- struct conn_info * rcv_info)
-{
- struct conn_el * el;
-
- el = malloc(sizeof(*el));
- if (el == NULL) {
- log_err("Not enough memory.");
- return -1;
- }
-
- el->conn.conn_info = *rcv_info;
- el->conn.flow_info.fd = fd;
- el->conn.flow_info.qs = qs;
-
- pthread_mutex_lock(&connmgr.comps[id].lock);
-
- list_add(&el->next, &connmgr.comps[id].pending);
- pthread_cond_signal(&connmgr.comps[id].cond);
-
- pthread_mutex_unlock(&connmgr.comps[id].lock);
-
- return 0;
-}
-
-static void * flow_acceptor(void * o)
-{
- int fd;
- qosspec_t qs;
- struct conn_info rcv_info;
- struct conn_info fail_info;
-
- (void) o;
-
- memset(&fail_info, 0, sizeof(fail_info));
-
- while (true) {
- int id;
-
- fd = flow_accept(&qs, NULL);
- if (fd < 0) {
- if (fd != -EIRMD)
- log_warn("Flow accept failed: %d", fd);
- continue;
- }
-
- if (cacep_rcv(fd, &rcv_info)) {
- log_dbg("Error establishing application connection.");
- flow_dealloc(fd);
- continue;
- }
-
- id = get_id_by_name(rcv_info.comp_name);
- if (id < 0) {
- log_dbg("Connection request for unknown component %s.",
- rcv_info.comp_name);
- cacep_snd(fd, &fail_info);
- flow_dealloc(fd);
- continue;
- }
-
- assert(id < COMPID_MAX);
-
- if (cacep_snd(fd, &connmgr.comps[id].info)) {
- log_dbg("Failed to respond to request.");
- flow_dealloc(fd);
- continue;
- }
-
- if (add_comp_conn(id, fd, qs, &rcv_info)) {
- log_dbg("Failed to add new connection.");
- flow_dealloc(fd);
- continue;
- }
- }
-
- return (void *) 0;
-}
-
-static void handle_event(void * self,
- int event,
- const void * o)
-{
- struct conn conn;
-
- (void) self;
-
- if (!(event == NOTIFY_DT_FLOW_UP ||
- event == NOTIFY_DT_FLOW_DOWN ||
- event == NOTIFY_DT_FLOW_DEALLOC))
- return;
-
- if (get_conn_by_fd(*((int *) o), COMPID_DT, &conn))
- return;
-
- switch (event) {
- case NOTIFY_DT_FLOW_UP:
- notifier_event(NOTIFY_DT_CONN_UP, &conn);
- break;
- case NOTIFY_DT_FLOW_DOWN:
- notifier_event(NOTIFY_DT_CONN_DOWN, &conn);
- break;
- case NOTIFY_DT_FLOW_DEALLOC:
- notifier_event(NOTIFY_DT_CONN_DEL, &conn);
- break;
- default:
- break;
- }
-}
-
-int connmgr_init(void)
-{
- connmgr.state = CONNMGR_INIT;
-
- if (notifier_reg(handle_event, NULL))
- return -1;
-
- return 0;
-}
-
-void connmgr_fini(void)
-{
- int i;
-
- notifier_unreg(handle_event);
-
- if (connmgr.state == CONNMGR_RUNNING)
- pthread_join(connmgr.acceptor, NULL);
-
- for (i = 0; i < COMPID_MAX; ++i)
- connmgr_comp_fini(i);
-}
-
-int connmgr_start(void)
-{
- if (pthread_create(&connmgr.acceptor, NULL, flow_acceptor, NULL))
- return -1;
-
- connmgr.state = CONNMGR_RUNNING;
-
- return 0;
-}
-
-void connmgr_stop(void)
-{
- if (connmgr.state == CONNMGR_RUNNING)
- pthread_cancel(connmgr.acceptor);
-}
-
-int connmgr_comp_init(enum comp_id id,
- const struct conn_info * info)
-{
- struct comp * comp;
-
- assert(id >= 0 && id < COMPID_MAX);
-
- comp = connmgr.comps + id;
-
- if (pthread_mutex_init(&comp->lock, NULL))
- return -1;
-
- if (pthread_cond_init(&comp->cond, NULL)) {
- pthread_mutex_destroy(&comp->lock);
- return -1;
- }
-
- list_head_init(&comp->conns);
- list_head_init(&comp->pending);
-
- memcpy(&connmgr.comps[id].info, info, sizeof(connmgr.comps[id].info));
-
- return 0;
-}
-
-void connmgr_comp_fini(enum comp_id id)
-{
- struct list_head * p;
- struct list_head * h;
- struct comp * comp;
-
- assert(id >= 0 && id < COMPID_MAX);
-
- if (strlen(connmgr.comps[id].info.comp_name) == 0)
- return;
-
- comp = connmgr.comps + id;
-
- pthread_mutex_lock(&comp->lock);
-
- list_for_each_safe(p, h, &comp->conns) {
- struct conn_el * e = list_entry(p, struct conn_el, next);
- list_del(&e->next);
- free(e);
- }
-
- list_for_each_safe(p, h, &comp->pending) {
- struct conn_el * e = list_entry(p, struct conn_el, next);
- list_del(&e->next);
- free(e);
- }
-
- pthread_mutex_unlock(&comp->lock);
-
- pthread_cond_destroy(&comp->cond);
- pthread_mutex_destroy(&comp->lock);
-
- memset(&connmgr.comps[id].info, 0, sizeof(connmgr.comps[id].info));
-}
-
-int connmgr_ipcp_connect(const char * dst,
- const char * component,
- qosspec_t qs)
-{
- struct conn_el * ce;
- int id;
-
- assert(dst);
- assert(component);
-
- ce = malloc(sizeof(*ce));
- if (ce == NULL) {
- log_dbg("Out of memory.");
- return -1;
- }
-
- id = get_id_by_name(component);
- if (id < 0) {
- log_dbg("No such component: %s", component);
- free(ce);
- return -1;
- }
-
- if (connmgr_alloc(id, dst, &qs, &ce->conn)) {
- free(ce);
- return -1;
- }
-
- if (strlen(dst) > DST_MAX_STRLEN) {
- log_warn("Truncating dst length for connection.");
- memcpy(ce->conn.flow_info.dst, dst, DST_MAX_STRLEN);
- ce->conn.flow_info.dst[DST_MAX_STRLEN] = '\0';
- } else {
- strcpy(ce->conn.flow_info.dst, dst);
- }
-
- pthread_mutex_lock(&connmgr.comps[id].lock);
-
- list_add(&ce->next, &connmgr.comps[id].conns);
-
- pthread_mutex_unlock(&connmgr.comps[id].lock);
-
- return 0;
-}
-
-int connmgr_ipcp_disconnect(const char * dst,
- const char * component)
-{
- struct list_head * p;
- struct list_head * h;
- int id;
-
- assert(dst);
- assert(component);
-
- id = get_id_by_name(component);
- if (id < 0)
- return -1;
-
- pthread_mutex_lock(&connmgr.comps[id].lock);
-
- list_for_each_safe(p,h, &connmgr.comps[id].conns) {
- struct conn_el * el = list_entry(p, struct conn_el, next);
- if (strcmp(el->conn.flow_info.dst, dst) == 0) {
- int ret;
- pthread_mutex_unlock(&connmgr.comps[id].lock);
- list_del(&el->next);
- ret = connmgr_dealloc(id, &el->conn);
- free(el);
- return ret;
- }
- }
-
- pthread_mutex_unlock(&connmgr.comps[id].lock);
-
- return 0;
-}
-
-int connmgr_alloc(enum comp_id id,
- const char * dst,
- qosspec_t * qs,
- struct conn * conn)
-{
- assert(id >= 0 && id < COMPID_MAX);
- assert(dst);
-
- conn->flow_info.fd = flow_alloc(dst, qs, NULL);
- if (conn->flow_info.fd < 0) {
- log_dbg("Failed to allocate flow to %s.", dst);
- return -1;
- }
-
- if (qs != NULL)
- conn->flow_info.qs = *qs;
- else
- memset(&conn->flow_info.qs, 0, sizeof(conn->flow_info.qs));
-
- log_dbg("Sending cacep info for protocol %s to fd %d.",
- connmgr.comps[id].info.protocol, conn->flow_info.fd);
-
- if (cacep_snd(conn->flow_info.fd, &connmgr.comps[id].info)) {
- log_dbg("Failed to create application connection.");
- flow_dealloc(conn->flow_info.fd);
- return -1;
- }
-
- if (cacep_rcv(conn->flow_info.fd, &conn->conn_info)) {
- log_dbg("Failed to connect to application.");
- flow_dealloc(conn->flow_info.fd);
- return -1;
- }
-
- if (strcmp(connmgr.comps[id].info.protocol, conn->conn_info.protocol)) {
- log_dbg("Unknown protocol (requested %s, got %s).",
- connmgr.comps[id].info.protocol,
- conn->conn_info.protocol);
- flow_dealloc(conn->flow_info.fd);
- return -1;
- }
-
- if (connmgr.comps[id].info.pref_version !=
- conn->conn_info.pref_version) {
- log_dbg("Unknown protocol version.");
- flow_dealloc(conn->flow_info.fd);
- return -1;
- }
-
- if (connmgr.comps[id].info.pref_syntax != conn->conn_info.pref_syntax) {
- log_dbg("Unknown protocol syntax.");
- flow_dealloc(conn->flow_info.fd);
- return -1;
- }
-
- switch (id) {
- case COMPID_DT:
- notifier_event(NOTIFY_DT_CONN_ADD, conn);
- break;
- default:
- break;
- }
-
- return 0;
-}
-
-int connmgr_dealloc(enum comp_id id,
- struct conn * conn)
-{
- switch (id) {
- case COMPID_DT:
- notifier_event(NOTIFY_DT_CONN_DEL, conn);
- break;
- default:
- break;
- }
-
- return flow_dealloc(conn->flow_info.fd);
-}
-
-
-int connmgr_wait(enum comp_id id,
- struct conn * conn)
-{
- struct conn_el * el;
- struct comp * comp;
-
- assert(id >= 0 && id < COMPID_MAX);
- assert(conn);
-
- comp = connmgr.comps + id;
-
- pthread_mutex_lock(&comp->lock);
-
- pthread_cleanup_push((void(*)(void *))pthread_mutex_unlock,
- (void *) &comp->lock);
-
- while (list_is_empty(&comp->pending))
- pthread_cond_wait(&comp->cond, &comp->lock);
-
- pthread_cleanup_pop(false);
-
- el = list_first_entry((&comp->pending), struct conn_el, next);
- if (el == NULL) {
- pthread_mutex_unlock(&comp->lock);
- return -1;
- }
-
- *conn = el->conn;
-
- list_del(&el->next);
- list_add(&el->next, &connmgr.comps[id].conns);
+#include <ouroboros/ipcp.h>
- pthread_mutex_unlock(&comp->lock);
+#define BUILD_IPCP_BROADCAST
- return 0;
-}
+#include "common/connmgr.c"
diff --git a/src/ipcpd/broadcast/connmgr.h b/src/ipcpd/broadcast/connmgr.h
deleted file mode 100644
index 019056f2..00000000
--- a/src/ipcpd/broadcast/connmgr.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Ouroboros - Copyright (C) 2016 - 2020
- *
- * Handles the different AP connections
- *
- * Dimitri Staessens <dimitri.staessens@ugent.be>
- * Sander Vrijders <sander.vrijders@ugent.be>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., http://www.fsf.org/about/contact/.
- */
-
-#ifndef OUROBOROS_IPCPD_BROADCAST_CONNMGR_H
-#define OUROBOROS_IPCPD_BROADCAST_CONNMGR_H
-
-#include <ouroboros/cacep.h>
-#include <ouroboros/qos.h>
-
-#include "comp.h"
-
-#define NOTIFY_DT_CONN_ADD 0x00D0
-#define NOTIFY_DT_CONN_DEL 0x00D1
-#define NOTIFY_DT_CONN_QOS 0x00D2
-#define NOTIFY_DT_CONN_UP 0x00D3
-#define NOTIFY_DT_CONN_DOWN 0x00D4
-#define NOTIFY_DT_FLOW_UP 0x00D5
-#define NOTIFY_DT_FLOW_DOWN 0x00D6
-#define NOTIFY_DT_FLOW_DEALLOC 0x00D7
-
-int connmgr_init(void);
-
-void connmgr_fini(void);
-
-int connmgr_start(void);
-
-void connmgr_stop(void);
-
-int connmgr_comp_init(enum comp_id id,
- const struct conn_info * info);
-
-void connmgr_comp_fini(enum comp_id id);
-
-int connmgr_ipcp_connect(const char * dst,
- const char * component,
- qosspec_t qs);
-
-int connmgr_ipcp_disconnect(const char * dst,
- const char * component);
-
-int connmgr_alloc(enum comp_id id,
- const char * dst,
- qosspec_t * qs,
- struct conn * conn);
-
-int connmgr_dealloc(enum comp_id id,
- struct conn * conn);
-
-int connmgr_wait(enum comp_id id,
- struct conn * conn);
-
-#endif /* OUROBOROS_IPCPD_BROADCAST_CONNMGR_H */
diff --git a/src/ipcpd/broadcast/dt.c b/src/ipcpd/broadcast/dt.c
index 6e8bacf1..938c9085 100644
--- a/src/ipcpd/broadcast/dt.c
+++ b/src/ipcpd/broadcast/dt.c
@@ -1,10 +1,10 @@
/*
- * Ouroboros - Copyright (C) 2016 - 2020
+ * Ouroboros - Copyright (C) 2016 - 2024
*
* Forward loop for broadcast
*
- * Dimitri Staessens <dimitri.staessens@ugent.be>
- * Sander Vrijders <sander.vrijders@ugent.be>
+ * Dimitri Staessens <dimitri@ouroboros.rocks>
+ * Sander Vrijders <sander@ouroboros.rocks>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -33,7 +33,6 @@
#define DT "dt"
#define OUROBOROS_PREFIX DT
-#include <ouroboros/endian.h>
#include <ouroboros/dev.h>
#include <ouroboros/errno.h>
#include <ouroboros/fqueue.h>
@@ -41,17 +40,16 @@
#include <ouroboros/logs.h>
#include <ouroboros/notifier.h>
#include <ouroboros/utils.h>
+#include <ouroboros/pthread.h>
-#include "comp.h"
-#include "connmgr.h"
+#include "common/comp.h"
+#include "common/connmgr.h"
#include "dt.h"
-#include "ipcp.h"
#include <assert.h>
#include <stdlib.h>
#include <inttypes.h>
#include <string.h>
-#include <pthread.h>
struct nb {
struct list_head next;
@@ -80,15 +78,16 @@ static int dt_add_nb(int fd)
list_for_each(p, &fwd.nbs) {
struct nb * el = list_entry(p, struct nb, next);
if (el->fd == fd) {
- log_dbg("Already know neighbor.");
pthread_rwlock_unlock(&fwd.nbs_lock);
- return -EPERM;
+ log_warn("Already know neighbor on fd %d.", fd);
+ return 0;
}
}
nb = malloc(sizeof(*nb));
if (nb == NULL) {
pthread_rwlock_unlock(&fwd.nbs_lock);
+ log_err("Failed to malloc neighbor struct.");
return -ENOMEM;
}
@@ -98,10 +97,10 @@ static int dt_add_nb(int fd)
++fwd.nbs_len;
- log_dbg("Neighbor %d added.", fd);
-
pthread_rwlock_unlock(&fwd.nbs_lock);
+ log_dbg("Neighbor %d added.", fd);
+
return 0;
}
@@ -126,6 +125,8 @@ static int dt_del_nb(int fd)
pthread_rwlock_unlock(&fwd.nbs_lock);
+ log_err("Neighbor not found on fd %d.", fd);
+
return -EPERM;
}
@@ -158,8 +159,7 @@ static void dt_packet(uint8_t * buf,
pthread_rwlock_rdlock(&fwd.nbs_lock);
- pthread_cleanup_push((void (*))(void *) pthread_rwlock_unlock,
- &fwd.nbs_lock);
+ pthread_cleanup_push(__cleanup_rwlock_unlock, &fwd.nbs_lock);
list_for_each(p, &fwd.nbs) {
struct nb * nb = list_entry(p, struct nb, next);
@@ -170,6 +170,11 @@ static void dt_packet(uint8_t * buf,
pthread_cleanup_pop(true);
}
+static void __cleanup_fqueue_destroy(void * fq)
+{
+ fqueue_destroy((fqueue_t *) fq);
+}
+
static void * dt_reader(void * o)
{
fqueue_t * fq;
@@ -184,13 +189,12 @@ static void * dt_reader(void * o)
if (fq == NULL)
return (void *) -1;
- pthread_cleanup_push((void (*) (void *)) fqueue_destroy,
- (void *) fq);
+ pthread_cleanup_push(__cleanup_fqueue_destroy, (void *) fq);
while (true) {
ret = fevent(fwd.set, fq, NULL);
if (ret < 0) {
- log_warn("Event error: %d.", ret);
+ log_warn("Event warning: %d.", ret);
continue;
}
@@ -225,13 +229,13 @@ static void handle_event(void * self,
switch (event) {
case NOTIFY_DT_CONN_ADD:
- if (dt_add_nb(c->flow_info.fd))
- log_dbg("Failed to add neighbor.");
+ if (dt_add_nb(c->flow_info.fd) < 0)
+ log_err("Failed to add neighbor.");
fset_add(fwd.set, c->flow_info.fd);
break;
case NOTIFY_DT_CONN_DEL:
- if (dt_del_nb(c->flow_info.fd))
- log_dbg("Failed to delete neighbor.");
+ if (dt_del_nb(c->flow_info.fd) < 0)
+ log_err("Failed to delete neighbor.");
fset_del(fwd.set, c->flow_info.fd);
break;
default:
diff --git a/src/ipcpd/broadcast/dt.h b/src/ipcpd/broadcast/dt.h
index e573511c..8d3b83f8 100644
--- a/src/ipcpd/broadcast/dt.h
+++ b/src/ipcpd/broadcast/dt.h
@@ -1,10 +1,10 @@
/*
- * Ouroboros - Copyright (C) 2016 - 2020
+ * Ouroboros - Copyright (C) 2016 - 2024
*
* Forward loop for broadcast
*
- * Dimitri Staessens <dimitri.staessens@ugent.be>
- * Sander Vrijders <sander.vrijders@ugent.be>
+ * Dimitri Staessens <dimitri@ouroboros.rocks>
+ * Sander Vrijders <sander@ouroboros.rocks>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
diff --git a/src/ipcpd/broadcast/enroll.c b/src/ipcpd/broadcast/enroll.c
deleted file mode 100644
index 511892b7..00000000
--- a/src/ipcpd/broadcast/enroll.c
+++ /dev/null
@@ -1,363 +0,0 @@
-/*
- * Ouroboros - Copyright (C) 2016 - 2020
- *
- * Enrollment Task
- *
- * Dimitri Staessens <dimitri.staessens@ugent.be>
- * Sander Vrijders <sander.vrijders@ugent.be>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., http://www.fsf.org/about/contact/.
- */
-
-#if defined(__linux__) || defined(__CYGWIN__)
-#define _DEFAULT_SOURCE
-#else
-#define _POSIX_C_SOURCE 199309L
-#endif
-
-#define OUROBOROS_PREFIX "enrollment"
-
-#include <ouroboros/endian.h>
-#include <ouroboros/errno.h>
-#include <ouroboros/time_utils.h>
-#include <ouroboros/dev.h>
-#include <ouroboros/logs.h>
-#include <ouroboros/errno.h>
-#include <ouroboros/sockets.h>
-
-#include "connmgr.h"
-#include "enroll.h"
-#include "ipcp.h"
-
-#include <assert.h>
-#include <stdlib.h>
-#include <string.h>
-#include <pthread.h>
-
-#include "ipcp_config.pb-c.h"
-typedef EnrollMsg enroll_msg_t;
-
-#define ENROLL_COMP "Enrollment"
-#define ENROLL_PROTO "OEP" /* Ouroboros enrollment protocol */
-#define ENROLL_WARN_TIME_OFFSET 20
-#define ENROLL_BUF_LEN 1024
-
-enum enroll_state {
- ENROLL_NULL = 0,
- ENROLL_INIT,
- ENROLL_RUNNING
-};
-
-struct {
- struct ipcp_config conf;
- enum enroll_state state;
- pthread_t listener;
-} enroll;
-
-static int send_rcv_enroll_msg(int fd)
-{
- enroll_msg_t req = ENROLL_MSG__INIT;
- enroll_msg_t * reply;
- uint8_t buf[ENROLL_BUF_LEN];
- ssize_t len;
- ssize_t delta_t;
- struct timespec t0;
- struct timespec rtt;
-
- req.code = ENROLL_CODE__ENROLL_REQ;
-
- len = enroll_msg__get_packed_size(&req);
- if (len < 0) {
- log_dbg("Failed pack request message.");
- return -1;
- }
-
- enroll_msg__pack(&req, buf);
-
- clock_gettime(CLOCK_REALTIME, &t0);
-
- if (flow_write(fd, buf, len) < 0) {
- log_dbg("Failed to send request message.");
- return -1;
- }
-
- len = flow_read(fd, buf, ENROLL_BUF_LEN);
- if (len < 0) {
- log_dbg("No enrollment reply received.");
- return -1;
- }
-
- log_dbg("Received enrollment info (%zd bytes).", len);
-
- reply = enroll_msg__unpack(NULL, len, buf);
- if (reply == NULL) {
- log_dbg("No enrollment response.");
- return -1;
- }
-
- if (reply->code != ENROLL_CODE__ENROLL_BOOT) {
- log_dbg("Failed to unpack enrollment response.");
- enroll_msg__free_unpacked(reply, NULL);
- return -1;
- }
-
- if (!(reply->has_t_sec && reply->has_t_nsec)) {
- log_dbg("No time in response message.");
- enroll_msg__free_unpacked(reply, NULL);
- return -1;
- }
-
- clock_gettime(CLOCK_REALTIME, &rtt);
-
- delta_t = ts_diff_ms(&t0, &rtt);
-
- rtt.tv_sec = reply->t_sec;
- rtt.tv_nsec = reply->t_nsec;
-
- if (labs(ts_diff_ms(&t0, &rtt)) - delta_t > ENROLL_WARN_TIME_OFFSET)
- log_warn("Clock offset above threshold.");
-
- strcpy(enroll.conf.layer_info.layer_name,
- reply->conf->layer_info->layer_name);
- enroll.conf.layer_info.dir_hash_algo
- = reply->conf->layer_info->dir_hash_algo;
-
- enroll_msg__free_unpacked(reply, NULL);
-
- return 0;
-}
-
-static ssize_t enroll_pack(uint8_t ** buf)
-{
- enroll_msg_t msg = ENROLL_MSG__INIT;
- ipcp_config_msg_t config = IPCP_CONFIG_MSG__INIT;
- layer_info_msg_t layer_info = LAYER_INFO_MSG__INIT;
- struct timespec now;
- ssize_t len;
-
- clock_gettime(CLOCK_REALTIME, &now);
-
- msg.code = ENROLL_CODE__ENROLL_BOOT;
- msg.has_t_sec = true;
- msg.t_sec = now.tv_sec;
- msg.has_t_nsec = true;
- msg.t_nsec = now.tv_nsec;
- msg.conf = &config;
-
- config.ipcp_type = enroll.conf.type;
- config.layer_info = &layer_info;
-
- layer_info.layer_name = (char *) enroll.conf.layer_info.layer_name;
- layer_info.dir_hash_algo = enroll.conf.layer_info.dir_hash_algo;
-
- len = enroll_msg__get_packed_size(&msg);
-
- *buf = malloc(len);
- if (*buf == NULL)
- return -1;
-
- enroll_msg__pack(&msg, *buf);
-
- return len;
-}
-
-static void * enroll_handle(void * o)
-{
- struct conn conn;
- uint8_t buf[ENROLL_BUF_LEN];
- uint8_t * reply;
- ssize_t len;
- enroll_msg_t * msg;
-
- (void) o;
-
- while (true) {
- if (connmgr_wait(COMPID_ENROLL, &conn)) {
- log_err("Failed to get next connection.");
- continue;
- }
-
- len = flow_read(conn.flow_info.fd, buf, ENROLL_BUF_LEN);
- if (len < 0) {
- log_err("Failed to read from flow.");
- connmgr_dealloc(COMPID_ENROLL, &conn);
- continue;
- }
-
- msg = enroll_msg__unpack(NULL, len, buf);
- if (msg == NULL) {
- log_err("Failed to unpack message.");
- connmgr_dealloc(COMPID_ENROLL, &conn);
- continue;
- }
-
- if (msg->code != ENROLL_CODE__ENROLL_REQ) {
- log_err("Wrong message type.");
- connmgr_dealloc(COMPID_ENROLL, &conn);
- enroll_msg__free_unpacked(msg, NULL);
- continue;
- }
-
- log_dbg("Enrolling a new neighbor.");
-
- enroll_msg__free_unpacked(msg, NULL);
-
- len = enroll_pack(&reply);
- if (reply == NULL) {
- log_err("Failed to pack enrollment message.");
- connmgr_dealloc(COMPID_ENROLL, &conn);
- continue;
- }
-
- log_dbg("Sending enrollment info (%zd bytes).", len);
-
- if (flow_write(conn.flow_info.fd, reply, len) < 0) {
- log_err("Failed respond to enrollment request.");
- connmgr_dealloc(COMPID_ENROLL, &conn);
- free(reply);
- continue;
- }
-
- free(reply);
-
- len = flow_read(conn.flow_info.fd, buf, ENROLL_BUF_LEN);
- if (len < 0) {
- log_err("Failed to read from flow.");
- connmgr_dealloc(COMPID_ENROLL, &conn);
- continue;
- }
-
- msg = enroll_msg__unpack(NULL, len, buf);
- if (msg == NULL) {
- log_err("Failed to unpack message.");
- connmgr_dealloc(COMPID_ENROLL, &conn);
- continue;
- }
-
- if (msg->code != ENROLL_CODE__ENROLL_DONE || !msg->has_result) {
- log_err("Wrong message type.");
- enroll_msg__free_unpacked(msg, NULL);
- connmgr_dealloc(COMPID_ENROLL, &conn);
- continue;
- }
-
- if (msg->result == 0)
- log_dbg("Neighbor enrollment successful.");
- else
- log_dbg("Neigbor reported failed enrollment.");
-
- enroll_msg__free_unpacked(msg, NULL);
-
- connmgr_dealloc(COMPID_ENROLL, &conn);
- }
-
- return 0;
-}
-
-int enroll_boot(struct conn * conn)
-{
- log_dbg("Getting boot information.");
-
- if (send_rcv_enroll_msg(conn->flow_info.fd)) {
- log_err("Failed to enroll.");
- return -1;
- }
-
- return 0;
-}
-
-int enroll_done(struct conn * conn,
- int result)
-{
- enroll_msg_t msg = ENROLL_MSG__INIT;
- uint8_t buf[ENROLL_BUF_LEN];
- ssize_t len;
-
- msg.code = ENROLL_CODE__ENROLL_DONE;
- msg.has_result = true;
- msg.result = result;
-
- len = enroll_msg__get_packed_size(&msg);
- if (len < 0) {
- log_dbg("Failed pack request message.");
- return -1;
- }
-
- enroll_msg__pack(&msg, buf);
-
- if (flow_write(conn->flow_info.fd, buf, len) < 0) {
- log_dbg("Failed to send acknowledgment.");
- return -1;
- }
-
- return 0;
-}
-
-void enroll_bootstrap(const struct ipcp_config * conf)
-{
- assert(conf);
-
- memcpy(&enroll.conf, conf, sizeof(enroll.conf));
-}
-
-struct ipcp_config * enroll_get_conf(void)
-{
- return &enroll.conf;
-}
-
-int enroll_init(void)
-{
- struct conn_info info;
-
- memset(&info, 0, sizeof(info));
-
- strcpy(info.comp_name, ENROLL_COMP);
- strcpy(info.protocol, ENROLL_PROTO);
- info.pref_version = 1;
- info.pref_syntax = PROTO_GPB;
- info.addr = 0;
-
- if (connmgr_comp_init(COMPID_ENROLL, &info)) {
- log_err("Failed to register with connmgr.");
- return -1;
- }
-
- enroll.state = ENROLL_INIT;
-
- return 0;
-}
-
-void enroll_fini(void)
-{
- if (enroll.state == ENROLL_RUNNING)
- pthread_join(enroll.listener, NULL);
-
- connmgr_comp_fini(COMPID_ENROLL);
-}
-
-int enroll_start(void)
-{
- if (pthread_create(&enroll.listener, NULL, enroll_handle, NULL))
- return -1;
-
- enroll.state = ENROLL_RUNNING;
-
- return 0;
-}
-
-void enroll_stop(void)
-{
- if (enroll.state == ENROLL_RUNNING)
- pthread_cancel(enroll.listener);
-}
diff --git a/src/ipcpd/broadcast/enroll.h b/src/ipcpd/broadcast/enroll.h
deleted file mode 100644
index 837e0a28..00000000
--- a/src/ipcpd/broadcast/enroll.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Ouroboros - Copyright (C) 2016 - 2020
- *
- * Enrollment Task
- *
- * Dimitri Staessens <dimitri.staessens@ugent.be>
- * Sander Vrijders <sander.vrijders@ugent.be>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., http://www.fsf.org/about/contact/.
- */
-
-#ifndef OUROBOROS_IPCPD_BROADCAST_ENROLL_H
-#define OUROBOROS_IPCPD_BROADCAST_ENROLL_H
-
-#include <ouroboros/ipcp.h>
-
-#include "comp.h"
-
-int enroll_init(void);
-
-void enroll_fini(void);
-
-int enroll_start(void);
-
-void enroll_stop(void);
-
-void enroll_bootstrap(const struct ipcp_config * conf);
-
-int enroll_boot(struct conn * conn);
-
-int enroll_done(struct conn * conn,
- int result);
-
-struct ipcp_config * enroll_get_conf(void);
-
-#endif /* OUROBOROS_IPCPD_BROADCAST_ENROLL_H */
diff --git a/src/ipcpd/broadcast/main.c b/src/ipcpd/broadcast/main.c
index 120b2bf1..ebdb182c 100644
--- a/src/ipcpd/broadcast/main.c
+++ b/src/ipcpd/broadcast/main.c
@@ -1,10 +1,10 @@
/*
- * Ouroboros - Copyright (C) 2016 - 2020
+ * Ouroboros - Copyright (C) 2016 - 2024
*
* Broadcast IPC Process
*
- * Dimitri Staessens <dimitri.staessens@ugent.be>
- * Sander Vrijders <sander.vrijders@ugent.be>
+ * Dimitri Staessens <dimitri@ouroboros.rocks>
+ * Sander Vrijders <sander@ouroboros.rocks>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -29,19 +29,20 @@
#include "config.h"
#define OUROBOROS_PREFIX "broadcast-ipcp"
+#define THIS_TYPE IPCP_BROADCAST
-#include <ouroboros/errno.h>
-#include <ouroboros/hash.h>
#include <ouroboros/dev.h>
+#include <ouroboros/errno.h>
#include <ouroboros/ipcp-dev.h>
#include <ouroboros/logs.h>
#include <ouroboros/notifier.h>
+#include <ouroboros/random.h>
#include <ouroboros/rib.h>
-#include <ouroboros/time_utils.h>
+#include <ouroboros/time.h>
-#include "connmgr.h"
+#include "common/connmgr.h"
+#include "common/enroll.h"
#include "dt.h"
-#include "enroll.h"
#include "ipcp.h"
#include <stdbool.h>
@@ -51,54 +52,33 @@
#include <assert.h>
#include <inttypes.h>
-#define THIS_TYPE IPCP_BROADCAST
-
-static int initialize_components(const struct ipcp_config * conf)
+static int initialize_components(void)
{
- ipcpi.layer_name = strdup(conf->layer_info.layer_name);
- if (ipcpi.layer_name == NULL) {
- log_err("Failed to set layer name.");
- goto fail_layer_name;
- }
-
- ipcpi.dir_hash_algo = conf->layer_info.dir_hash_algo;
-
assert(ipcp_dir_hash_len() != 0);
- if (dt_init()) {
+ if (dt_init() < 0) {
log_err("Failed to initialize forwarding component.");
- goto fail_dt;
+ return -1;
}
ipcp_set_state(IPCP_INIT);
return 0;
-
- fail_dt:
- free(ipcpi.layer_name);
- fail_layer_name:
- return -1;
}
static void finalize_components(void)
{
dt_fini();
-
- free(ipcpi.layer_name);
}
static int start_components(void)
{
- assert(ipcp_get_state() == IPCP_INIT);
-
- ipcp_set_state(IPCP_OPERATIONAL);
-
- if (enroll_start()) {
+ if (enroll_start() < 0) {
log_err("Failed to start enrollment.");
goto fail_enroll_start;
}
- if (connmgr_start()) {
+ if (connmgr_start() < 0) {
log_err("Failed to start AP connection manager.");
goto fail_connmgr_start;
}
@@ -114,52 +94,56 @@ static int start_components(void)
static void stop_components(void)
{
- assert(ipcp_get_state() == IPCP_OPERATIONAL ||
- ipcp_get_state() == IPCP_SHUTDOWN);
-
connmgr_stop();
enroll_stop();
-
- ipcp_set_state(IPCP_INIT);
}
static int broadcast_ipcp_enroll(const char * dst,
struct layer_info * info)
{
+ struct ipcp_config * conf;
struct conn conn;
+ uint8_t id[ENROLL_ID_LEN];
- if (connmgr_alloc(COMPID_ENROLL, dst, NULL, &conn)) {
- log_err("Failed to get connection.");
- goto fail_er_flow;
+ if (random_buffer(id, ENROLL_ID_LEN) < 0) {
+ log_err("Failed to generate enrollment ID.");
+ goto fail_id;
+ }
+
+ log_info_id(id, "Requesting enrollment.");
+
+ if (connmgr_alloc(COMPID_ENROLL, dst, NULL, &conn) < 0) {
+ log_err_id(id, "Failed to get connection.");
+ goto fail_id;
}
/* Get boot state from peer. */
- if (enroll_boot(&conn)) {
- log_err("Failed to get boot information.");
+ if (enroll_boot(&conn, id) < 0) {
+ log_err_id(id, "Failed to get boot information.");
goto fail_enroll_boot;
}
- if (initialize_components(enroll_get_conf())) {
- log_err("Failed to initialize IPCP components.");
+ conf = enroll_get_conf();
+ *info = conf->layer_info;
+
+ if (initialize_components() < 0) {
+ log_err_id(id, "Failed to initialize components.");
goto fail_enroll_boot;
}
- if (start_components()) {
- log_err("Failed to start components.");
+ if (start_components() < 0) {
+ log_err_id(id, "Failed to start components.");
goto fail_start_comp;
}
- if (enroll_done(&conn, 0))
- log_warn("Failed to confirm enrollment with peer.");
+ if (enroll_ack(&conn, id, 0) < 0)
+ log_err_id(id, "Failed to confirm enrollment.");
- if (connmgr_dealloc(COMPID_ENROLL, &conn))
- log_warn("Failed to deallocate enrollment flow.");
+ if (connmgr_dealloc(COMPID_ENROLL, &conn) < 0)
+ log_warn_id(id, "Failed to dealloc enrollment flow.");
- log_info("Enrolled with %s.", dst);
-
- info->dir_hash_algo = ipcpi.dir_hash_algo;
- strcpy(info->layer_name, ipcpi.layer_name);
+ log_info_id(id, "Enrolled with %s.", dst);
return 0;
@@ -167,18 +151,19 @@ static int broadcast_ipcp_enroll(const char * dst,
finalize_components();
fail_enroll_boot:
connmgr_dealloc(COMPID_ENROLL, &conn);
- fail_er_flow:
+ fail_id:
return -1;
}
-static int broadcast_ipcp_bootstrap(const struct ipcp_config * conf)
+static int broadcast_ipcp_bootstrap(struct ipcp_config * conf)
{
assert(conf);
assert(conf->type == THIS_TYPE);
+ assert(conf->layer_info.dir_hash_algo == DIR_HASH_SHA3_256);
enroll_bootstrap(conf);
- if (initialize_components(conf)) {
+ if (initialize_components()) {
log_err("Failed to init IPCP components.");
goto fail_init;
}
@@ -188,8 +173,6 @@ static int broadcast_ipcp_bootstrap(const struct ipcp_config * conf)
goto fail_start;
}
- log_dbg("Bootstrapped in layer %s.", conf->layer_info.layer_name);
-
return 0;
fail_start:
@@ -204,12 +187,12 @@ static int name_check(const uint8_t * dst)
size_t len;
int ret;
- len = hash_len(ipcpi.dir_hash_algo);
+ len = ipcp_dir_hash_len();
buf = malloc(len);
if (buf == NULL)
return -ENOMEM;
- str_hash(ipcpi.dir_hash_algo, buf, ipcpi.layer_name);
+ str_hash(HASH_SHA3_256, buf, ipcp_get_name());
ret = memcmp(buf, dst, len);
@@ -223,6 +206,8 @@ static int broadcast_ipcp_join(int fd,
qosspec_t qs)
{
struct conn conn;
+ time_t mpl = IPCP_BROADCAST_MPL;
+ buffer_t data = BUF_INIT;
(void) qs;
@@ -230,12 +215,14 @@ static int broadcast_ipcp_join(int fd,
conn.flow_info.fd = fd;
- if (name_check(dst) != 0)
+ if (name_check(dst) != 0) {
+ log_err("Failed to check name.");
return -1;
+ }
notifier_event(NOTIFY_DT_CONN_ADD, &conn);
- ipcp_flow_alloc_reply(fd, 0, NULL, 0);
+ ipcp_flow_alloc_reply(fd, 0, mpl, &data);
return 0;
}
@@ -250,12 +237,11 @@ int broadcast_ipcp_dealloc(int fd)
notifier_event(NOTIFY_DT_CONN_DEL, &conn);
- flow_dealloc(fd);
+ ipcp_flow_dealloc(fd);
return 0;
}
-
static struct ipcp_ops broadcast_ops = {
.ipcp_bootstrap = broadcast_ipcp_bootstrap,
.ipcp_enroll = broadcast_ipcp_enroll,
@@ -273,17 +259,11 @@ static struct ipcp_ops broadcast_ops = {
int main(int argc,
char * argv[])
{
- if (ipcp_init(argc, argv, &broadcast_ops) < 0) {
- log_err("Failed to init IPCP.");
+ if (ipcp_init(argc, argv, &broadcast_ops, THIS_TYPE) < 0) {
+ log_err("Failed to initialize IPCP.");
goto fail_init;
}
- /* These components must be init at creation. */
- if (rib_init(ipcpi.name)) {
- log_err("Failed to initialize RIB.");
- goto fail_rib_init;
- }
-
if (notifier_init()) {
log_err("Failed to initialize notifier component.");
goto fail_notifier_init;
@@ -299,49 +279,38 @@ int main(int argc,
goto fail_enroll_init;
}
- if (ipcp_boot() < 0) {
+ if (ipcp_start() < 0) {
log_err("Failed to boot IPCP.");
- goto fail_boot;
- }
-
- if (ipcp_create_r(0)) {
- log_err("Failed to notify IRMd we are initialized.");
- ipcp_set_state(IPCP_NULL);
- goto fail_create_r;
+ goto fail_start;
}
- ipcp_shutdown();
+ ipcp_sigwait();
if (ipcp_get_state() == IPCP_SHUTDOWN) {
stop_components();
finalize_components();
}
+ ipcp_stop();
+
enroll_fini();
connmgr_fini();
notifier_fini();
- rib_fini();
-
ipcp_fini();
exit(EXIT_SUCCESS);
- fail_create_r:
- ipcp_shutdown();
- fail_boot:
+ fail_start:
enroll_fini();
fail_enroll_init:
connmgr_fini();
fail_connmgr_init:
notifier_fini();
fail_notifier_init:
- rib_fini();
- fail_rib_init:
ipcp_fini();
fail_init:
- ipcp_create_r(-1);
exit(EXIT_FAILURE);
}
diff --git a/src/ipcpd/unicast/comp.h b/src/ipcpd/common/comp.h
index 42367833..f3790d9c 100644
--- a/src/ipcpd/unicast/comp.h
+++ b/src/ipcpd/common/comp.h
@@ -1,10 +1,10 @@
/*
- * Ouroboros - Copyright (C) 2016 - 2020
+ * Ouroboros - Copyright (C) 2016 - 2024
*
- * Components for the unicast IPC process
+ * Components for the unicast/broadcast IPC process
*
- * Dimitri Staessens <dimitri.staessens@ugent.be>
- * Sander Vrijders <sander.vrijders@ugent.be>
+ * Dimitri Staessens <dimitri@ouroboros.rocks>
+ * Sander Vrijders <sander@ouroboros.rocks>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -20,12 +20,10 @@
* Foundation, Inc., http://www.fsf.org/about/contact/.
*/
-#ifndef OUROBOROS_IPCPD_UNICAST_COMP_H
-#define OUROBOROS_IPCPD_UNICAST_COMP_H
+#ifndef OUROBOROS_IPCPD_COMMON_COMP_H
+#define OUROBOROS_IPCPD_COMMON_COMP_H
-#include <ouroboros/cacep.h>
-
-#include "dt.h"
+#include <ouroboros/cep.h>
#define DST_MAX_STRLEN 64
@@ -45,4 +43,4 @@ struct conn {
} flow_info;
};
-#endif /* OUROBOROS_IPCPD_UNICAST_COMP_H */
+#endif /* OUROBOROS_IPCPD_COMMON_COMP_H */
diff --git a/src/ipcpd/common/connmgr.c b/src/ipcpd/common/connmgr.c
new file mode 100644
index 00000000..6dd5fed0
--- /dev/null
+++ b/src/ipcpd/common/connmgr.c
@@ -0,0 +1,563 @@
+/*
+ * Ouroboros - Copyright (C) 2016 - 2024
+ *
+ * Handles connections between components
+ *
+ * Dimitri Staessens <dimitri@ouroboros.rocks>
+ * Sander Vrijders <sander@ouroboros.rocks>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., http://www.fsf.org/about/contact/.
+ */
+
+#define OUROBOROS_PREFIX "connection-manager"
+
+#include <ouroboros/cep.h>
+#include <ouroboros/dev.h>
+#include <ouroboros/errno.h>
+#include <ouroboros/fccntl.h>
+#include <ouroboros/list.h>
+#include <ouroboros/logs.h>
+#include <ouroboros/notifier.h>
+#include <ouroboros/pthread.h>
+
+#include "connmgr.h"
+#include "ipcp.h"
+
+#include <assert.h>
+#include <stdlib.h>
+#include <string.h>
+
+enum connmgr_state {
+ CONNMGR_NULL = 0,
+ CONNMGR_INIT,
+ CONNMGR_RUNNING
+};
+
+struct conn_el {
+ struct list_head next;
+ struct conn conn;
+};
+
+struct comp {
+ struct conn_info info;
+
+ struct list_head conns;
+ struct list_head pending;
+
+ pthread_cond_t cond;
+ pthread_mutex_t lock;
+};
+
+struct {
+ struct comp comps[COMPID_MAX];
+ enum connmgr_state state;
+
+ pthread_t acceptor;
+} connmgr;
+
+static int get_id_by_name(const char * name)
+{
+ enum comp_id i;
+
+ for (i = 0; i < COMPID_MAX; ++i)
+ if (strcmp(name, connmgr.comps[i].info.comp_name) == 0)
+ return i;
+
+ return -1;
+}
+
+static int get_conn_by_fd(int fd,
+ enum comp_id id,
+ struct conn * conn)
+{
+ struct list_head * p;
+
+ pthread_mutex_lock(&connmgr.comps[id].lock);
+
+ list_for_each(p, &connmgr.comps[id].conns) {
+ struct conn_el * c =
+ list_entry(p, struct conn_el, next);
+ if (c->conn.flow_info.fd == fd) {
+ *conn = c->conn;
+ pthread_mutex_unlock(&connmgr.comps[id].lock);
+ return 0;
+ }
+ }
+
+ pthread_mutex_unlock(&connmgr.comps[id].lock);
+
+ return -1;
+}
+
+static int add_comp_conn(enum comp_id id,
+ int fd,
+ qosspec_t qs,
+ struct conn_info * rcv_info)
+{
+ struct conn_el * el;
+
+ el = malloc(sizeof(*el));
+ if (el == NULL) {
+ log_err("Not enough memory.");
+ return -1;
+ }
+
+ el->conn.conn_info = *rcv_info;
+ el->conn.flow_info.fd = fd;
+ el->conn.flow_info.qs = qs;
+
+ pthread_mutex_lock(&connmgr.comps[id].lock);
+
+ list_add(&el->next, &connmgr.comps[id].pending);
+ pthread_cond_signal(&connmgr.comps[id].cond);
+
+ pthread_mutex_unlock(&connmgr.comps[id].lock);
+
+ return 0;
+}
+
+static void * flow_acceptor(void * o)
+{
+ int fd;
+ qosspec_t qs;
+ struct conn_info rcv_info;
+ struct conn_info fail_info;
+ struct timespec timeo = TIMESPEC_INIT_MS(CONNMGR_RCV_TIMEOUT);
+ int err;
+
+ (void) o;
+
+ memset(&fail_info, 0, sizeof(fail_info));
+
+ while (true) {
+ int id;
+
+ fd = flow_accept(&qs, NULL);
+ if (fd < 0) {
+ if (fd != -EIRMD)
+ log_err("Flow accept failed: %d", fd);
+ continue;
+ }
+
+ log_info("Handling incoming flow %d.",fd);
+
+ fccntl(fd, FLOWSRCVTIMEO, &timeo);
+
+ err = cep_rcv(fd, &rcv_info);
+ if (err < 0) {
+ log_err("Error receiving OCEP info: %d.", err);
+ flow_dealloc(fd);
+ continue;
+ }
+
+ log_info("Request to connect to %s.", rcv_info.comp_name);
+
+ id = get_id_by_name(rcv_info.comp_name);
+ if (id < 0) {
+ log_err("Connection request for unknown component %s.",
+ rcv_info.comp_name);
+ cep_snd(fd, &fail_info);
+ flow_dealloc(fd);
+ continue;
+ }
+
+ err = cep_snd(fd, &connmgr.comps[id].info);
+ if (err < 0) {
+ log_err("Failed responding to OCEP request: %d.", err);
+ flow_dealloc(fd);
+ continue;
+ }
+
+ fccntl(fd, FLOWSRCVTIMEO, NULL);
+
+ err = add_comp_conn(id, fd, qs, &rcv_info);
+ if (err < 0) {
+ log_err("Failed to add new connection: %d.", err);
+ flow_dealloc(fd);
+ continue;
+ }
+
+ log_info("Finished handling incoming flow %d for %s.",
+ fd, rcv_info.comp_name);
+ }
+
+ return (void *) 0;
+}
+
+static void handle_event(void * self,
+ int event,
+ const void * o)
+{
+ struct conn conn;
+
+ (void) self;
+
+ if (!(event == NOTIFY_DT_FLOW_UP ||
+ event == NOTIFY_DT_FLOW_DOWN ||
+ event == NOTIFY_DT_FLOW_DEALLOC))
+ return;
+
+ if (get_conn_by_fd(*((int *) o), COMPID_DT, &conn))
+ return;
+
+ switch (event) {
+ case NOTIFY_DT_FLOW_UP:
+ notifier_event(NOTIFY_DT_CONN_UP, &conn);
+ break;
+ case NOTIFY_DT_FLOW_DOWN:
+ notifier_event(NOTIFY_DT_CONN_DOWN, &conn);
+ break;
+ case NOTIFY_DT_FLOW_DEALLOC:
+ notifier_event(NOTIFY_DT_CONN_DEL, &conn);
+ break;
+ default:
+ break;
+ }
+}
+
+int connmgr_init(void)
+{
+ connmgr.state = CONNMGR_INIT;
+
+ if (notifier_reg(handle_event, NULL)) {
+ log_err("Failed to register notifier.");
+ return -1;
+ }
+
+ return 0;
+}
+
+void connmgr_fini(void)
+{
+ int i;
+
+ notifier_unreg(handle_event);
+
+ if (connmgr.state == CONNMGR_RUNNING)
+ pthread_join(connmgr.acceptor, NULL);
+
+ for (i = 0; i < COMPID_MAX; ++i)
+ connmgr_comp_fini(i);
+}
+
+int connmgr_start(void)
+{
+ if (pthread_create(&connmgr.acceptor, NULL, flow_acceptor, NULL)) {
+ log_err("Failed to create pthread: %s.", strerror(errno));
+ return -1;
+ }
+
+ connmgr.state = CONNMGR_RUNNING;
+
+ return 0;
+}
+
+void connmgr_stop(void)
+{
+ if (connmgr.state == CONNMGR_RUNNING)
+ pthread_cancel(connmgr.acceptor);
+}
+
+int connmgr_comp_init(enum comp_id id,
+ const struct conn_info * info)
+{
+ struct comp * comp;
+
+ assert(id >= 0 && id < COMPID_MAX);
+
+ comp = connmgr.comps + id;
+
+ if (pthread_mutex_init(&comp->lock, NULL)) {
+ log_err("Failed to initialize mutex: %s.", strerror(errno));
+ goto fail_mutex;
+ }
+
+ if (pthread_cond_init(&comp->cond, NULL)) {
+ log_err("Failed to initialize condvar: %s.", strerror(errno));
+ goto fail_cond;
+ }
+
+ list_head_init(&comp->conns);
+ list_head_init(&comp->pending);
+
+ memcpy(&connmgr.comps[id].info, info, sizeof(connmgr.comps[id].info));
+
+ return 0;
+
+ fail_cond:
+ pthread_mutex_destroy(&comp->lock);
+ fail_mutex:
+ return -1;
+}
+
+void connmgr_comp_fini(enum comp_id id)
+{
+ struct list_head * p;
+ struct list_head * h;
+ struct comp * comp;
+
+ assert(id >= 0 && id < COMPID_MAX);
+
+ if (strlen(connmgr.comps[id].info.comp_name) == 0)
+ return;
+
+ comp = connmgr.comps + id;
+
+ pthread_mutex_lock(&comp->lock);
+
+ list_for_each_safe(p, h, &comp->conns) {
+ struct conn_el * e = list_entry(p, struct conn_el, next);
+ list_del(&e->next);
+ free(e);
+ }
+
+ list_for_each_safe(p, h, &comp->pending) {
+ struct conn_el * e = list_entry(p, struct conn_el, next);
+ list_del(&e->next);
+ free(e);
+ }
+
+ pthread_mutex_unlock(&comp->lock);
+
+ pthread_cond_destroy(&comp->cond);
+ pthread_mutex_destroy(&comp->lock);
+
+ memset(&connmgr.comps[id].info, 0, sizeof(connmgr.comps[id].info));
+}
+
+int connmgr_ipcp_connect(const char * dst,
+ const char * component,
+ qosspec_t qs)
+{
+ struct conn_el * ce;
+ int id;
+ int ret;
+
+ assert(dst);
+ assert(component);
+
+ ce = malloc(sizeof(*ce));
+ if (ce == NULL) {
+ log_err("Out of memory.");
+ goto fail_malloc;
+ }
+
+ id = get_id_by_name(component);
+ if (id < 0) {
+ log_err("No such component: %s", component);
+ goto fail_id;
+ }
+
+ pthread_cleanup_push(free, ce);
+
+ ret = connmgr_alloc(id, dst, &qs, &ce->conn);
+
+ pthread_cleanup_pop(false);
+
+ if (ret < 0) {
+ log_err("Failed to allocate flow.");
+ goto fail_id;
+ }
+
+ if (strlen(dst) > DST_MAX_STRLEN) {
+ log_warn("Truncating dst length for connection.");
+ memcpy(ce->conn.flow_info.dst, dst, DST_MAX_STRLEN);
+ ce->conn.flow_info.dst[DST_MAX_STRLEN] = '\0';
+ } else {
+ strcpy(ce->conn.flow_info.dst, dst);
+ }
+
+ pthread_mutex_lock(&connmgr.comps[id].lock);
+
+ list_add(&ce->next, &connmgr.comps[id].conns);
+
+ pthread_mutex_unlock(&connmgr.comps[id].lock);
+
+ return 0;
+
+ fail_id:
+ free(ce);
+ fail_malloc:
+ return -1;
+}
+
+int connmgr_ipcp_disconnect(const char * dst,
+ const char * component)
+{
+ struct list_head * p;
+ struct list_head * h;
+ int id;
+
+ assert(dst);
+ assert(component);
+
+ id = get_id_by_name(component);
+ if (id < 0) {
+ log_err("No such component: %s.", component);
+ return -1;
+ }
+
+ pthread_mutex_lock(&connmgr.comps[id].lock);
+
+ list_for_each_safe(p,h, &connmgr.comps[id].conns) {
+ struct conn_el * el = list_entry(p, struct conn_el, next);
+ if (strcmp(el->conn.flow_info.dst, dst) == 0) {
+ int ret;
+ pthread_mutex_unlock(&connmgr.comps[id].lock);
+ list_del(&el->next);
+ ret = connmgr_dealloc(id, &el->conn);
+ free(el);
+ return ret;
+ }
+ }
+
+ pthread_mutex_unlock(&connmgr.comps[id].lock);
+
+ return 0;
+}
+
+int connmgr_alloc(enum comp_id id,
+ const char * dst,
+ qosspec_t * qs,
+ struct conn * conn)
+{
+ struct comp * comp;
+ int fd;
+ struct timespec timeo = TIMESPEC_INIT_MS(CONNMGR_RCV_TIMEOUT);
+
+ assert(id >= 0 && id < COMPID_MAX);
+ assert(dst);
+
+ comp = connmgr.comps + id;
+
+ fd = flow_alloc(dst, qs, NULL);
+ if (fd < 0) {
+ log_err("Failed to allocate flow to %s.", dst);
+ goto fail_alloc;
+ }
+
+ conn->flow_info.fd = fd;
+
+ if (qs != NULL)
+ conn->flow_info.qs = *qs;
+ else
+ memset(&conn->flow_info.qs, 0, sizeof(conn->flow_info.qs));
+
+ log_dbg("Sending OCEP info for protocol %s to fd %d.",
+ comp->info.protocol, conn->flow_info.fd);
+
+ fccntl(fd, FLOWSRCVTIMEO, &timeo);
+
+ if (cep_snd(fd, &comp->info)) {
+ log_err("Failed to send OCEP info.");
+ goto fail_cep;
+ }
+
+ if (cep_rcv(fd, &conn->conn_info)) {
+ log_err("Failed to receive OCEP info.");
+ goto fail_cep;
+ }
+
+ if (strcmp(comp->info.protocol, conn->conn_info.protocol)) {
+ log_err("Unknown protocol (requested %s, got %s).",
+ comp->info.protocol, conn->conn_info.protocol);
+ goto fail_cep;
+ }
+
+ if (comp->info.pref_version != conn->conn_info.pref_version) {
+ log_err("Unknown protocol version %d.",
+ conn->conn_info.pref_version);
+ goto fail_cep;
+ }
+
+ if (comp->info.pref_syntax != conn->conn_info.pref_syntax) {
+ log_err("Unknown protocol syntax.");
+ goto fail_cep;
+ }
+
+ switch (id) {
+ case COMPID_DT:
+ notifier_event(NOTIFY_DT_CONN_ADD, conn);
+ break;
+ case COMPID_MGMT:
+ notifier_event(NOTIFY_MGMT_CONN_ADD, conn);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+
+ fail_cep:
+ flow_dealloc(conn->flow_info.fd);
+ fail_alloc:
+ return -1;
+}
+
+int connmgr_dealloc(enum comp_id id,
+ struct conn * conn)
+{
+ switch (id) {
+ case COMPID_DT:
+ notifier_event(NOTIFY_DT_CONN_DEL, conn);
+ break;
+#if defined(BUILD_IPCP_UNICAST)
+ case COMPID_MGMT:
+ notifier_event(NOTIFY_MGMT_CONN_DEL, conn);
+ break;
+#endif
+ default:
+ break;
+ }
+
+ return flow_dealloc(conn->flow_info.fd);
+}
+
+
+int connmgr_wait(enum comp_id id,
+ struct conn * conn)
+{
+ struct conn_el * el;
+ struct comp * comp;
+
+ assert(id >= 0 && id < COMPID_MAX);
+ assert(conn);
+
+ comp = connmgr.comps + id;
+
+ pthread_mutex_lock(&comp->lock);
+
+ pthread_cleanup_push(__cleanup_mutex_unlock, &comp->lock);
+
+ while (list_is_empty(&comp->pending))
+ pthread_cond_wait(&comp->cond, &comp->lock);
+
+ pthread_cleanup_pop(false);
+
+ el = list_first_entry((&comp->pending), struct conn_el, next);
+ if (el == NULL) {
+ pthread_mutex_unlock(&comp->lock);
+ log_err("Failed to get connection element.");
+ return -1;
+ }
+
+ *conn = el->conn;
+
+ list_del(&el->next);
+ list_add(&el->next, &connmgr.comps[id].conns);
+
+ pthread_mutex_unlock(&comp->lock);
+
+ return 0;
+}
diff --git a/src/ipcpd/unicast/connmgr.h b/src/ipcpd/common/connmgr.h
index 17f78245..0710dbbf 100644
--- a/src/ipcpd/unicast/connmgr.h
+++ b/src/ipcpd/common/connmgr.h
@@ -1,10 +1,10 @@
/*
- * Ouroboros - Copyright (C) 2016 - 2020
+ * Ouroboros - Copyright (C) 2016 - 2024
*
* Handles the different AP connections
*
- * Dimitri Staessens <dimitri.staessens@ugent.be>
- * Sander Vrijders <sander.vrijders@ugent.be>
+ * Dimitri Staessens <dimitri@ouroboros.rocks>
+ * Sander Vrijders <sander@ouroboros.rocks>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -20,10 +20,10 @@
* Foundation, Inc., http://www.fsf.org/about/contact/.
*/
-#ifndef OUROBOROS_IPCPD_UNICAST_CONNMGR_H
-#define OUROBOROS_IPCPD_UNICAST_CONNMGR_H
+#ifndef OUROBOROS_IPCPD_COMMON_CONNMGR_H
+#define OUROBOROS_IPCPD_COMMON_CONNMGR_H
-#include <ouroboros/cacep.h>
+#include <ouroboros/cep.h>
#include <ouroboros/qos.h>
#include "comp.h"
@@ -71,4 +71,4 @@ int connmgr_dealloc(enum comp_id id,
int connmgr_wait(enum comp_id id,
struct conn * conn);
-#endif /* OUROBOROS_IPCPD_UNICAST_CONNMGR_H */
+#endif /* OUROBOROS_IPCPD_COMMON_CONNMGR_H */
diff --git a/src/ipcpd/common/enroll.c b/src/ipcpd/common/enroll.c
new file mode 100644
index 00000000..4b437b27
--- /dev/null
+++ b/src/ipcpd/common/enroll.c
@@ -0,0 +1,346 @@
+/*
+ * Ouroboros - Copyright (C) 2016 - 2024
+ *
+ * Enrollment Task
+ *
+ * Dimitri Staessens <dimitri@ouroboros.rocks>
+ * Sander Vrijders <sander@ouroboros.rocks>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., http://www.fsf.org/about/contact/.
+ */
+
+#if defined(__linux__) || defined(__CYGWIN__)
+#define _DEFAULT_SOURCE
+#else
+#define _POSIX_C_SOURCE 199309L
+#endif
+
+#define OUROBOROS_PREFIX "enrollment"
+
+#include <ouroboros/dev.h>
+#include <ouroboros/errno.h>
+#include <ouroboros/logs.h>
+#include <ouroboros/serdes-oep.h>
+#include <ouroboros/time.h>
+
+#include "common/connmgr.h"
+#include "common/enroll.h"
+#include "ipcp.h"
+
+#include <assert.h>
+#include <stdlib.h>
+#include <string.h>
+#include <pthread.h>
+
+#ifdef __APPLE__
+#define llabs labs
+#endif
+
+#define ENROLL_COMP "Enrollment"
+#define ENROLL_PROTO "OEP" /* Ouroboros enrollment protocol */
+#define ENROLL_WARN_TIME_OFFSET 20
+#define ENROLL_BUF_LEN 1024
+
+enum enroll_state {
+ ENROLL_NULL = 0,
+ ENROLL_INIT,
+ ENROLL_RUNNING
+};
+
+struct {
+ struct ipcp_config conf;
+ enum enroll_state state;
+ pthread_t listener;
+} enroll;
+
+#ifdef DEBUG_PROTO_OEP
+
+
+#endif
+
+
+
+static void * enroll_handle(void * o)
+{
+ struct enroll_req req;
+ struct enroll_resp resp;
+ struct enroll_ack ack;
+ struct conn conn;
+ uint8_t __buf[ENROLL_BUF_LEN];
+ buffer_t buf;
+ ssize_t len;
+
+ (void) o;
+
+ buf.data = __buf;
+ buf.len = sizeof(__buf);
+
+ resp.response = 0;
+ resp.conf = enroll.conf;
+
+ while (true) {
+ buffer_t msg;
+ int fd;
+
+ if (connmgr_wait(COMPID_ENROLL, &conn)) {
+ log_err("Failed to get next connection.");
+ continue;
+ }
+
+ fd = conn.flow_info.fd;
+
+ log_info("Incoming enrollment connection on flow %d.", fd);
+
+ len = flow_read(fd, buf.data, buf.len);
+ if (len < 0) {
+ log_err("Failed to read from flow %d.", fd);
+ goto finish_flow;
+ }
+
+ msg.data = buf.data;
+ msg.len = (size_t) len;
+
+ if (enroll_req_des(&req, msg) < 0) {
+ log_err("Failed to unpack request message.");
+ goto finish_flow;
+ }
+
+ log_info_id(req.id, "Handling incoming enrollment.");
+
+ ack.result = -100;
+
+ clock_gettime(CLOCK_REALTIME, &resp.t);
+
+ memcpy(resp.id, req.id, ENROLL_ID_LEN);
+
+ len = enroll_resp_ser(&resp, buf);
+ if (len < 0) {
+ log_err_id(req.id, "Failed to pack reply.");
+ goto finish_enroll;
+ }
+
+ log_dbg_id(req.id, "Sending enrollment info (%zd bytes).", len);
+
+ if (flow_write(conn.flow_info.fd, buf.data, len) < 0) {
+ log_err_id(req.id, "Failed te send response.");
+ goto finish_enroll;
+ }
+
+ len = flow_read(conn.flow_info.fd, buf.data, buf.len);
+ if (len < 0) {
+ log_err_id(req.id, "Failed to read from flow.");
+ goto finish_enroll;
+ }
+
+ msg.data = buf.data;
+ msg.len = (size_t) len;
+
+ if (enroll_ack_des(&ack, msg) < 0) {
+ log_err_id(req.id, "Failed to unpack ack.");
+ goto finish_enroll;
+ }
+
+ if (memcmp(req.id, ack.id, ENROLL_ID_LEN) != 0)
+ log_warn_id(req.id, "Enrollment ID mismatch.");
+
+ finish_enroll:
+ switch(ack.result) {
+ case 0:
+ log_info_id(req.id, "Enrollment completed.");
+ break;
+ case -100:
+ log_warn_id(req.id, "Enrollment failed.");
+ break;
+ default:
+ log_warn_id(req.id, "Enrollment failed at remote.");
+ }
+ finish_flow:
+ connmgr_dealloc(COMPID_ENROLL, &conn);
+
+ log_info("Enrollment flow %d closed.", fd);
+ }
+
+ return 0;
+}
+
+int enroll_boot(struct conn * conn,
+ const uint8_t * id)
+{
+ uint8_t __buf[ENROLL_BUF_LEN];
+ buffer_t buf;
+ buffer_t msg;
+ ssize_t len;
+ ssize_t delta_t;
+ struct timespec t0;
+ struct timespec rtt;
+ int fd;
+ int ret;
+ struct enroll_req req;
+ struct enroll_resp resp;
+
+ fd = conn->flow_info.fd;
+
+ buf.data = __buf;
+ buf.len = sizeof(__buf);
+
+ memcpy(req.id, id, ENROLL_ID_LEN);
+
+ len = enroll_req_ser(&req, buf);
+ if (len < 0) {
+ log_err_id(id, "Failed to pack request message.");
+ return -1;
+ }
+
+ clock_gettime(CLOCK_REALTIME, &t0);
+
+ if (flow_write(fd, buf.data, len) < 0) {
+ log_err_id(id, "Failed to send request message.");
+ return -1;
+ }
+
+ len = flow_read(fd, buf.data, buf.len);
+ if (len < 0) {
+ log_err_id(id, "No reply received.");
+ return -1;
+ }
+
+ log_dbg_id(id, "Received configuration info (%zd bytes).", len);
+
+ msg.data = buf.data;
+ msg.len = len;
+
+ ret = enroll_resp_des(&resp, msg);
+ if (ret < 0) {
+ log_err_id(id, "Failed to unpack response message.");
+ return -1;
+ }
+
+ if (memcmp(resp.id, id, ENROLL_ID_LEN) != 0) {
+ log_err_id(id, "Enrollment ID mismatch.");
+ return -1;
+ }
+
+ if (resp.response < 0) {
+ log_warn_id(id, "Remote denied request: %d.", resp.response);
+ return -1;
+ }
+
+ if (resp.conf.type != ipcp_get_type()) {
+ log_err_id(id, "Wrong type in enrollment response %d (%d).",
+ resp.conf.type, ipcp_get_type());
+ return -1;
+ }
+
+ enroll.conf = resp.conf;
+
+ clock_gettime(CLOCK_REALTIME, &rtt);
+
+ delta_t = ts_diff_ms(&t0, &rtt);
+
+ rtt.tv_sec = resp.t.tv_sec;
+ rtt.tv_nsec = resp.t.tv_nsec;
+
+ if (llabs(ts_diff_ms(&t0, &rtt)) - delta_t > ENROLL_WARN_TIME_OFFSET)
+ log_warn_id(id, "Clock offset above threshold.");
+
+ return 0;
+}
+
+int enroll_ack(struct conn * conn,
+ const uint8_t * id,
+ const int result)
+{
+ struct enroll_ack ack;
+ uint8_t __buf[ENROLL_BUF_LEN];
+ buffer_t buf;
+ ssize_t len;
+
+ buf.data = __buf;
+ buf.len = sizeof(__buf);
+
+ ack.result = result;
+
+ memcpy(ack.id, id, ENROLL_ID_LEN);
+
+ len = enroll_ack_ser(&ack, buf);
+ if (len < 0) {
+ log_err_id(id, "Failed to pack acknowledgement.");
+ return -1;
+ }
+
+ if (flow_write(conn->flow_info.fd, buf.data, len) < 0) {
+ log_err_id(id, "Failed to send acknowledgment.");
+ return -1;
+ }
+
+ return 0;
+}
+
+void enroll_bootstrap(const struct ipcp_config * conf)
+{
+ assert(conf);
+
+ memcpy(&enroll.conf, conf, sizeof(enroll.conf));
+}
+
+struct ipcp_config * enroll_get_conf(void)
+{
+ return &enroll.conf;
+}
+
+int enroll_init(void)
+{
+ struct conn_info info;
+
+ memset(&info, 0, sizeof(info));
+
+ strcpy(info.comp_name, ENROLL_COMP);
+ strcpy(info.protocol, ENROLL_PROTO);
+ info.pref_version = 1;
+ info.pref_syntax = PROTO_GPB;
+ info.addr = 0;
+
+ if (connmgr_comp_init(COMPID_ENROLL, &info)) {
+ log_err("Failed to register with connmgr.");
+ return -1;
+ }
+
+ enroll.state = ENROLL_INIT;
+
+ return 0;
+}
+
+void enroll_fini(void)
+{
+ if (enroll.state == ENROLL_RUNNING)
+ pthread_join(enroll.listener, NULL);
+
+ connmgr_comp_fini(COMPID_ENROLL);
+}
+
+int enroll_start(void)
+{
+ if (pthread_create(&enroll.listener, NULL, enroll_handle, NULL))
+ return -1;
+
+ enroll.state = ENROLL_RUNNING;
+
+ return 0;
+}
+
+void enroll_stop(void)
+{
+ if (enroll.state == ENROLL_RUNNING)
+ pthread_cancel(enroll.listener);
+}
diff --git a/src/ipcpd/unicast/enroll.h b/src/ipcpd/common/enroll.h
index 804f5d5b..f26c31a3 100644
--- a/src/ipcpd/unicast/enroll.h
+++ b/src/ipcpd/common/enroll.h
@@ -1,10 +1,10 @@
/*
- * Ouroboros - Copyright (C) 2016 - 2020
+ * Ouroboros - Copyright (C) 2016 - 2024
*
* Enrollment Task
*
- * Dimitri Staessens <dimitri.staessens@ugent.be>
- * Sander Vrijders <sander.vrijders@ugent.be>
+ * Dimitri Staessens <dimitri@ouroboros.rocks>
+ * Sander Vrijders <sander@ouroboros.rocks>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -20,8 +20,8 @@
* Foundation, Inc., http://www.fsf.org/about/contact/.
*/
-#ifndef OUROBOROS_IPCPD_UNICAST_ENROLL_H
-#define OUROBOROS_IPCPD_UNICAST_ENROLL_H
+#ifndef OUROBOROS_IPCPD_COMMON_ENROLL_H
+#define OUROBOROS_IPCPD_COMMON_ENROLL_H
#include <ouroboros/ipcp.h>
@@ -37,11 +37,13 @@ void enroll_stop(void);
void enroll_bootstrap(const struct ipcp_config * conf);
-int enroll_boot(struct conn * conn);
+int enroll_boot(struct conn * conn,
+ const uint8_t * id);
-int enroll_done(struct conn * conn,
- int result);
+int enroll_ack(struct conn * conn,
+ const uint8_t * id,
+ const int result);
struct ipcp_config * enroll_get_conf(void);
-#endif /* OUROBOROS_IPCPD_UNICAST_ENROLL_H */
+#endif /* OUROBOROS_IPCPD_COMMON_ENROLL_H */
diff --git a/src/ipcpd/config.h.in b/src/ipcpd/config.h.in
index 3f69d327..d2af6440 100644
--- a/src/ipcpd/config.h.in
+++ b/src/ipcpd/config.h.in
@@ -1,10 +1,10 @@
/*
- * Ouroboros - Copyright (C) 2016 - 2020
+ * Ouroboros - Copyright (C) 2016 - 2024
*
* IPC process configuration
*
- * Dimitri Staessens <dimitri.staessens@ugent.be>
- * Sander Vrijders <sander.vrijders@ugent.be>
+ * Dimitri Staessens <dimitri@ouroboros.rocks>
+ * Sander Vrijders <sander@ouroboros.rocks>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -39,6 +39,8 @@
#define IPCP_ADD_THREADS @IPCP_ADD_THREADS@
#cmakedefine HAVE_LIBGCRYPT
+#define IPCP_LINUX_SLACK_NS @IPCP_LINUX_TIMERSLACK_NS@
+
/* unicast IPCP */
#define QOS_PRIO_BE @IPCP_QOS_CUBE_BE_PRIO@
#define QOS_PRIO_VIDEO @IPCP_QOS_CUBE_VIDEO_PRIO@
@@ -46,10 +48,17 @@
#define IPCP_SCHED_THR_MUL @IPCP_SCHED_THR_MUL@
#define PFT_SIZE @PFT_SIZE@
#define DHT_ENROLL_SLACK @DHT_ENROLL_SLACK@
+#define IPCP_UNICAST_MPL @IPCP_UNICAST_MPL@
+#define CONNMGR_RCV_TIMEOUT @CONNMGR_RCV_TIMEOUT@
-#cmakedefine IPCP_CONN_WAIT_DIR
#cmakedefine DISABLE_CORE_LOCK
#cmakedefine IPCP_FLOW_STATS
+#cmakedefine IPCP_DEBUG_LOCAL
+#ifdef CONFIG_OUROBOROS_DEBUG
+#cmakedefine DEBUG_PROTO_DHT
+#cmakedefine DEBUG_PROTO_OEP
+#cmakedefine DEBUG_PROTO_LS
+#endif
/* udp */
#cmakedefine HAVE_DDNS
@@ -57,8 +66,9 @@
#define NSLOOKUP_EXEC "@NSLOOKUP_EXECUTABLE@"
#define IPCP_UDP_RD_THR @IPCP_UDP_RD_THR@
#define IPCP_UDP_WR_THR @IPCP_UDP_WR_THR@
+#define IPCP_UDP_MPL @IPCP_UDP_MPL@
-/* eth-llc */
+/* eth */
#cmakedefine HAVE_NETMAP
#cmakedefine HAVE_BPF
#cmakedefine HAVE_RAW_SOCKETS
@@ -66,3 +76,11 @@
#define IPCP_ETH_RD_THR @IPCP_ETH_RD_THR@
#define IPCP_ETH_WR_THR @IPCP_ETH_WR_THR@
#define IPCP_ETH_LO_MTU @IPCP_ETH_LO_MTU@
+#define IPCP_ETH_MPL @IPCP_ETH_MPL@
+
+/* local */
+#define IPCP_LOCAL_MPL @IPCP_LOCAL_MPL@
+
+/* broadcast */
+/* local */
+#define IPCP_BROADCAST_MPL @IPCP_BROADCAST_MPL@
diff --git a/src/ipcpd/eth/CMakeLists.txt b/src/ipcpd/eth/CMakeLists.txt
index d7105b4f..17ae74fc 100644
--- a/src/ipcpd/eth/CMakeLists.txt
+++ b/src/ipcpd/eth/CMakeLists.txt
@@ -85,6 +85,8 @@ if (HAVE_ETH)
"Bypass the Qdisc in the kernel when using raw sockets")
set(IPCP_ETH_LO_MTU 1500 CACHE STRING
"Restrict Ethernet MTU over loopback interfaces")
+ set(IPCP_ETH_MPL 100 CACHE STRING
+ "Default maximum packet lifetime for the Ethernet IPCPs, in ms")
set(ETH_LLC_SOURCES
# Add source files here
diff --git a/src/ipcpd/eth/dix.c b/src/ipcpd/eth/dix.c
index dd007709..37b9896d 100644
--- a/src/ipcpd/eth/dix.c
+++ b/src/ipcpd/eth/dix.c
@@ -1,10 +1,10 @@
/*
- * Ouroboros - Copyright (C) 2016 - 2020
+ * Ouroboros - Copyright (C) 2016 - 2024
*
* IPC processes over Ethernet - DIX
*
- * Dimitri Staessens <dimitri.staessens@ugent.be>
- * Sander Vrijders <sander.vrijders@ugent.be>
+ * Dimitri Staessens <dimitri@ouroboros.rocks>
+ * Sander Vrijders <sander@ouroboros.rocks>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
diff --git a/src/ipcpd/eth/eth.c b/src/ipcpd/eth/eth.c
index 6b17912b..1028ee03 100644
--- a/src/ipcpd/eth/eth.c
+++ b/src/ipcpd/eth/eth.c
@@ -1,10 +1,10 @@
/*
- * Ouroboros - Copyright (C) 2016 - 2020
+ * Ouroboros - Copyright (C) 2016 - 2024
*
* IPC processes over Ethernet
*
- * Dimitri Staessens <dimitri.staessens@ugent.be>
- * Sander Vrijders <sander.vrijders@ugent.be>
+ * Dimitri Staessens <dimitri@ouroboros.rocks>
+ * Sander Vrijders <sander@ouroboros.rocks>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -37,6 +37,7 @@
#include "config.h"
+#include <ouroboros/endian.h>
#include <ouroboros/hash.h>
#include <ouroboros/errno.h>
#include <ouroboros/list.h>
@@ -46,15 +47,15 @@
#include <ouroboros/ipcp-dev.h>
#include <ouroboros/fqueue.h>
#include <ouroboros/logs.h>
-#include <ouroboros/time_utils.h>
+#include <ouroboros/time.h>
#include <ouroboros/fccntl.h>
+#include <ouroboros/pthread.h>
#include "ipcp.h"
#include "shim-data.h"
#include <signal.h>
#include <stdlib.h>
-#include <pthread.h>
#include <fcntl.h>
#include <unistd.h>
#include <string.h>
@@ -87,29 +88,30 @@
#include <sys/mman.h>
#if defined(HAVE_NETMAP)
-#define NETMAP_WITH_LIBS
-#include <net/netmap_user.h>
+ #define NETMAP_WITH_LIBS
+ #include <net/netmap_user.h>
#elif defined(HAVE_BPF)
-#define BPF_DEV_MAX 256
-#define BPF_BLEN sysconf(_SC_PAGESIZE)
-#include <net/bpf.h>
+ #define BPF_DEV_MAX 256
+ #define BPF_BLEN sysconf(_SC_PAGESIZE)
+ #include <net/bpf.h>
#endif
-#ifdef __linux__
+#define MAC_FMT "%02x:%02x:%02x:%02x:%02x:%02x"
+#define MAC_VAL(a) \
+ (uint8_t)(a)[0], (uint8_t)(a)[1], (uint8_t)(a)[2], \
+ (uint8_t)(a)[3], (uint8_t)(a)[4], (uint8_t)(a)[5]
+
+
#ifndef ETH_MAX_MTU /* In if_ether.h as of Linux 4.10. */
-#define ETH_MAX_MTU 0xFFFFU
+ #define ETH_MAX_MTU 0xFFFFU
#endif /* ETH_MAX_MTU */
#ifdef BUILD_ETH_DIX
-#define ETH_MTU eth_data.mtu
-#define ETH_MTU_MAX ETH_MAX_MTU
+ #define ETH_MTU eth_data.mtu
+ #define ETH_MTU_MAX ETH_MAX_MTU
#else
-#define ETH_MTU eth_data.mtu
-#define ETH_MTU_MAX 1500
+ #define ETH_MTU eth_data.mtu
+ #define ETH_MTU_MAX 1500
#endif /* BUILD_ETH_DIX */
-#else /* __linux__ */
-#define ETH_MTU 1500
-#define ETH_MTU_MAX ETH_MTU
-#endif /* __linux__ */
#define MAC_SIZE 6
#define ETH_TYPE_LENGTH_SIZE sizeof(uint16_t)
@@ -135,7 +137,6 @@
#define ETH_FRAME_SIZE (ETH_HEADER_SIZE + ETH_MTU_MAX)
#endif
-#define ALLOC_TIMEO 10 /* ms */
#define NAME_QUERY_TIMEO 2000 /* ms */
#define MGMT_TIMEO 100 /* ms */
#define MGMT_FRAME_SIZE 2048
@@ -162,6 +163,7 @@ struct mgmt_msg {
uint32_t ber;
uint32_t max_gap;
uint32_t delay;
+ uint32_t timeout;
uint16_t cypher_s;
uint8_t in_order;
#if defined (BUILD_ETH_DIX)
@@ -206,8 +208,9 @@ struct mgmt_frame {
struct {
struct shim_data * shim_data;
-#ifdef __linux__
+
int mtu;
+#ifdef __linux__
int if_idx;
#endif
#if defined(HAVE_NETMAP)
@@ -452,16 +455,15 @@ static int eth_ipcp_send_frame(const uint8_t * dst_addr,
return 0;
}
-static int eth_ipcp_alloc(const uint8_t * dst_addr,
+static int eth_ipcp_alloc(const uint8_t * dst_addr,
#if defined(BUILD_ETH_DIX)
- uint16_t eid,
+ uint16_t eid,
#elif defined(BUILD_ETH_LLC)
- uint8_t ssap,
+ uint8_t ssap,
#endif
- const uint8_t * hash,
- qosspec_t qs,
- const void * data,
- size_t dlen)
+ const uint8_t * hash,
+ qosspec_t qs,
+ const buffer_t * data)
{
uint8_t * buf;
struct mgmt_msg * msg;
@@ -470,7 +472,7 @@ static int eth_ipcp_alloc(const uint8_t * dst_addr,
len = sizeof(*msg) + ipcp_dir_hash_len();
- buf = malloc(len + ETH_HEADER_TOT_SIZE + dlen);
+ buf = malloc(len + ETH_HEADER_TOT_SIZE + data->len);
if (buf == NULL)
return -1;
@@ -490,9 +492,11 @@ static int eth_ipcp_alloc(const uint8_t * dst_addr,
msg->in_order = qs.in_order;
msg->max_gap = hton32(qs.max_gap);
msg->cypher_s = hton16(qs.cypher_s);
+ msg->timeout = hton32(qs.timeout);
memcpy(msg + 1, hash, ipcp_dir_hash_len());
- memcpy(buf + len + ETH_HEADER_TOT_SIZE, data, dlen);
+ if (data->len > 0)
+ memcpy(buf + len + ETH_HEADER_TOT_SIZE, data->data, data->len);
ret = eth_ipcp_send_frame(dst_addr,
#if defined(BUILD_ETH_DIX)
@@ -501,28 +505,27 @@ static int eth_ipcp_alloc(const uint8_t * dst_addr,
reverse_bits(MGMT_SAP),
reverse_bits(MGMT_SAP),
#endif
- buf, len + dlen);
+ buf, len + data->len);
free(buf);
return ret;
}
-static int eth_ipcp_alloc_resp(uint8_t * dst_addr,
+static int eth_ipcp_alloc_resp(uint8_t * dst_addr,
#if defined(BUILD_ETH_DIX)
- uint16_t seid,
- uint16_t deid,
+ uint16_t seid,
+ uint16_t deid,
#elif defined(BUILD_ETH_LLC)
- uint8_t ssap,
- uint8_t dsap,
+ uint8_t ssap,
+ uint8_t dsap,
#endif
- int response,
- const void * data,
- size_t len)
+ int response,
+ const buffer_t * data)
{
struct mgmt_msg * msg;
uint8_t * buf;
- buf = malloc(sizeof(*msg) + ETH_HEADER_TOT_SIZE + len);
+ buf = malloc(sizeof(*msg) + ETH_HEADER_TOT_SIZE + data->len);
if (buf == NULL)
return -1;
@@ -538,7 +541,8 @@ static int eth_ipcp_alloc_resp(uint8_t * dst_addr,
#endif
msg->response = response;
- memcpy(msg + 1, data, len);
+ if (data->len > 0)
+ memcpy(msg + 1, data->data, data->len);
if (eth_ipcp_send_frame(dst_addr,
#if defined(BUILD_ETH_DIX)
@@ -547,7 +551,7 @@ static int eth_ipcp_alloc_resp(uint8_t * dst_addr,
reverse_bits(MGMT_SAP),
reverse_bits(MGMT_SAP),
#endif
- buf, sizeof(*msg) + len)) {
+ buf, sizeof(*msg) + data->len)) {
free(buf);
return -1;
}
@@ -557,42 +561,20 @@ static int eth_ipcp_alloc_resp(uint8_t * dst_addr,
return 0;
}
-static int eth_ipcp_req(uint8_t * r_addr,
+static int eth_ipcp_req(uint8_t * r_addr,
#if defined(BUILD_ETH_DIX)
- uint16_t r_eid,
+ uint16_t r_eid,
#elif defined(BUILD_ETH_LLC)
- uint8_t r_sap,
+ uint8_t r_sap,
#endif
- const uint8_t * dst,
- qosspec_t qs,
- const void * data,
- size_t len)
+ const uint8_t * dst,
+ qosspec_t qs,
+ const buffer_t * data)
{
- struct timespec ts = {0, ALLOC_TIMEO * MILLION};
- struct timespec abstime;
- int fd;
-
- clock_gettime(PTHREAD_COND_CLOCK, &abstime);
-
- pthread_mutex_lock(&ipcpi.alloc_lock);
-
- while (ipcpi.alloc_id != -1 && ipcp_get_state() == IPCP_OPERATIONAL) {
- ts_add(&abstime, &ts, &abstime);
- pthread_cond_timedwait(&ipcpi.alloc_cond,
- &ipcpi.alloc_lock,
- &abstime);
- }
+ int fd;
- if (ipcp_get_state() != IPCP_OPERATIONAL) {
- log_dbg("Won't allocate over non-operational IPCP.");
- pthread_mutex_unlock(&ipcpi.alloc_lock);
- return -1;
- }
-
- /* reply to IRM, called under lock to prevent race */
- fd = ipcp_flow_req_arr(dst, ipcp_dir_hash_len(), qs, data, len);
+ fd = ipcp_wait_flow_req_arr(dst, qs, IPCP_ETH_MPL, data);
if (fd < 0) {
- pthread_mutex_unlock(&ipcpi.alloc_lock);
log_err("Could not get new flow from IRMd.");
return -1;
}
@@ -607,11 +589,6 @@ static int eth_ipcp_req(uint8_t * r_addr,
pthread_rwlock_unlock(&eth_data.flows_lock);
- ipcpi.alloc_id = fd;
- pthread_cond_broadcast(&ipcpi.alloc_cond);
-
- pthread_mutex_unlock(&ipcpi.alloc_lock);
-
#if defined(BUILD_ETH_DIX)
log_dbg("New flow request, fd %d, remote endpoint %d.", fd, r_eid);
#elif defined(BUILD_ETH_LLC)
@@ -620,20 +597,20 @@ static int eth_ipcp_req(uint8_t * r_addr,
return 0;
}
-static int eth_ipcp_alloc_reply(uint8_t * r_addr,
+static int eth_ipcp_alloc_reply(uint8_t * r_addr,
#if defined(BUILD_ETH_DIX)
- uint16_t seid,
- uint16_t deid,
+ uint16_t seid,
+ uint16_t deid,
#elif defined(BUILD_ETH_LLC)
- uint8_t ssap,
- int dsap,
+ uint8_t ssap,
+ int dsap,
#endif
- int response,
- const void * data,
- size_t len)
+ int response,
+ const buffer_t * data)
{
- int ret = 0;
- int fd = -1;
+ int ret = 0;
+ int fd = -1;
+ time_t mpl = IPCP_ETH_MPL;
pthread_rwlock_wrlock(&eth_data.flows_lock);
@@ -668,11 +645,12 @@ static int eth_ipcp_alloc_reply(uint8_t * r_addr,
#elif defined(BUILD_ETH_LLC)
log_dbg("Flow reply, fd %d, SSAP %d, DSAP %d.", fd, ssap, dsap);
#endif
- if ((ret = ipcp_flow_alloc_reply(fd, response, data, len)) < 0)
+ if ((ret = ipcp_flow_alloc_reply(fd, response, mpl, data)) < 0) {
+ log_err("Failed to reply to flow allocation.");
return -1;
+ }
return ret;
-
}
static int eth_ipcp_name_query_req(const uint8_t * hash,
@@ -734,6 +712,7 @@ static int eth_ipcp_mgmt_frame(const uint8_t * buf,
struct mgmt_msg * msg;
size_t msg_len;
qosspec_t qs;
+ buffer_t data;
msg = (struct mgmt_msg *) buf;
@@ -751,6 +730,10 @@ static int eth_ipcp_mgmt_frame(const uint8_t * buf,
qs.in_order = msg->in_order;
qs.max_gap = ntoh32(msg->max_gap);
qs.cypher_s = ntoh16(msg->cypher_s);
+ qs.timeout = ntoh32(msg->timeout);
+
+ data.data = (uint8_t *) buf + msg_len;
+ data.len = len - msg_len;
if (shim_data_reg_has(eth_data.shim_data,
buf + sizeof(*msg))) {
@@ -762,13 +745,15 @@ static int eth_ipcp_mgmt_frame(const uint8_t * buf,
#endif
buf + sizeof(*msg),
qs,
- buf + msg_len,
- len - msg_len);
+ &data);
}
break;
case FLOW_REPLY:
assert(len >= sizeof(*msg));
+ data.data = (uint8_t *) buf + sizeof(*msg);
+ data.len = len - sizeof(*msg);
+
eth_ipcp_alloc_reply(r_addr,
#if defined(BUILD_ETH_DIX)
ntohs(msg->seid),
@@ -778,8 +763,7 @@ static int eth_ipcp_mgmt_frame(const uint8_t * buf,
msg->dsap,
#endif
msg->response,
- buf + sizeof(*msg),
- len - sizeof(*msg));
+ &data);
break;
case NAME_QUERY_REQ:
eth_ipcp_name_query_req(buf + sizeof(*msg), r_addr);
@@ -797,19 +781,15 @@ static int eth_ipcp_mgmt_frame(const uint8_t * buf,
static void * eth_ipcp_mgmt_handler(void * o)
{
- int ret;
- struct timespec timeout = {(MGMT_TIMEO / 1000),
- (MGMT_TIMEO % 1000) * MILLION};
- struct timespec abstime;
- struct mgmt_frame * frame;
-
(void) o;
- pthread_cleanup_push((void (*)(void *)) pthread_mutex_unlock,
- (void *) &eth_data.mgmt_lock);
+ pthread_cleanup_push(__cleanup_mutex_unlock, &eth_data.mgmt_lock);
while (true) {
- ret = 0;
+ int ret = 0;
+ struct timespec timeout = TIMESPEC_INIT_MS(MGMT_TIMEO);
+ struct timespec abstime;
+ struct mgmt_frame * frame = NULL;
clock_gettime(PTHREAD_COND_CLOCK, &abstime);
ts_add(&abstime, &timeout, &abstime);
@@ -821,23 +801,19 @@ static void * eth_ipcp_mgmt_handler(void * o)
ret = -pthread_cond_timedwait(&eth_data.mgmt_cond,
&eth_data.mgmt_lock,
&abstime);
+ if (ret != -ETIMEDOUT)
+ frame = list_first_entry((&eth_data.mgmt_frames),
+ struct mgmt_frame, next);
+ if (frame != NULL)
+ list_del(&frame->next);
- if (ret == -ETIMEDOUT) {
- pthread_mutex_unlock(&eth_data.mgmt_lock);
- continue;
- }
+ pthread_mutex_unlock(&eth_data.mgmt_lock);
- frame = list_first_entry((&eth_data.mgmt_frames),
- struct mgmt_frame, next);
- if (frame == NULL) {
- pthread_mutex_unlock(&eth_data.mgmt_lock);
+ if (frame == NULL)
continue;
- }
-
- list_del(&frame->next);
- pthread_mutex_unlock(&eth_data.mgmt_lock);
eth_ipcp_mgmt_frame(frame->buf, frame->len, frame->r_addr);
+
free(frame);
}
@@ -883,7 +859,7 @@ static void * eth_ipcp_packet_reader(void * o)
buf = nm_nextpkt(eth_data.nmd, &hdr);
if (buf == NULL) {
- log_err("Bad read from netmap device.");
+ log_dbg("Bad read from netmap device.");
continue;
}
#else
@@ -914,6 +890,7 @@ static void * eth_ipcp_packet_reader(void * o)
ETH_MTU + ETH_HEADER_TOT_SIZE, 0);
#endif
if (frame_len <= 0) {
+ log_dbg("Failed to receive frame.");
ipcp_sdb_release(sdb);
continue;
}
@@ -940,22 +917,14 @@ static void * eth_ipcp_packet_reader(void * o)
#endif
length = ntohs(e_frame->length);
#if defined(BUILD_ETH_DIX)
- if (e_frame->ethertype != eth_data.ethertype) {
-#ifndef HAVE_NETMAP
- ipcp_sdb_release(sdb);
-#endif
- continue;
- }
+ if (e_frame->ethertype != eth_data.ethertype)
+ goto fail_frame;
deid = ntohs(e_frame->eid);
if (deid == MGMT_EID) {
#elif defined (BUILD_ETH_LLC)
- if (length > 0x05FF) {/* DIX */
-#ifndef HAVE_NETMAP
- ipcp_sdb_release(sdb);
-#endif
- continue;
- }
+ if (length > 0x05FF) /* DIX */
+ goto fail_frame;
length -= LLC_HEADER_SIZE;
@@ -964,12 +933,12 @@ static void * eth_ipcp_packet_reader(void * o)
if (ssap == MGMT_SAP && dsap == MGMT_SAP) {
#endif
+ ipcp_sdb_release(sdb); /* No need for the N+1 buffer. */
+
frame = malloc(sizeof(*frame));
if (frame == NULL) {
-#ifndef HAVE_NETMAP
- ipcp_sdb_release(sdb);
-#endif
- continue;
+ log_err("Failed to allocate frame.");
+ goto fail_frame;
}
memcpy(frame->buf, &e_frame->payload, length);
@@ -980,10 +949,6 @@ static void * eth_ipcp_packet_reader(void * o)
list_add(&frame->next, &eth_data.mgmt_frames);
pthread_cond_signal(&eth_data.mgmt_cond);
pthread_mutex_unlock(&eth_data.mgmt_lock);
-
-#ifndef HAVE_NETMAP
- ipcp_sdb_release(sdb);
-#endif
} else {
pthread_rwlock_rdlock(&eth_data.flows_lock);
@@ -994,10 +959,7 @@ static void * eth_ipcp_packet_reader(void * o)
#endif
if (fd < 0) {
pthread_rwlock_unlock(&eth_data.flows_lock);
-#ifndef HAVE_NETMAP
- ipcp_sdb_release(sdb);
-#endif
- continue;
+ goto fail_frame;
}
#ifdef BUILD_ETH_LLC
@@ -1005,10 +967,7 @@ static void * eth_ipcp_packet_reader(void * o)
|| memcmp(eth_data.fd_to_ef[fd].r_addr,
e_frame->src_hwaddr, MAC_SIZE)) {
pthread_rwlock_unlock(&eth_data.flows_lock);
-#ifndef HAVE_NETMAP
- ipcp_sdb_release(sdb);
-#endif
- continue;
+ goto fail_frame;
}
#endif
pthread_rwlock_unlock(&eth_data.flows_lock);
@@ -1016,9 +975,20 @@ static void * eth_ipcp_packet_reader(void * o)
#ifndef HAVE_NETMAP
shm_du_buff_head_release(sdb, ETH_HEADER_TOT_SIZE);
shm_du_buff_truncate(sdb, length);
- ipcp_flow_write(fd, sdb);
#else
- flow_write(fd, &e_frame->payload, length);
+ if (ipcp_sdb_reserve(&sdb, length))
+ continue;
+
+ buf = shm_du_buff_head(sdb);
+ memcpy(buf, &e_frame->payload, length);
+#endif
+ if (np1_flow_write(fd, sdb) < 0)
+ ipcp_sdb_release(sdb);
+
+ continue;
+ fail_frame:
+#ifndef HAVE_NETMAP
+ ipcp_sdb_release(sdb);
#endif
}
}
@@ -1052,27 +1022,28 @@ static void * eth_ipcp_packet_writer(void * o)
(void) o;
- pthread_cleanup_push(cleanup_writer, fq);
-
ipcp_lock_to_core();
+ pthread_cleanup_push(cleanup_writer, fq);
+
while (true) {
fevent(eth_data.np1_flows, fq, NULL);
while ((fd = fqueue_next(fq)) >= 0) {
if (fqueue_type(fq) != FLOW_PKT)
continue;
- if (ipcp_flow_read(fd, &sdb)) {
+ if (np1_flow_read(fd, &sdb)) {
log_dbg("Bad read from fd %d.", fd);
continue;
}
- len = shm_du_buff_tail(sdb) - shm_du_buff_head(sdb);
+ len = shm_du_buff_len(sdb);
if (shm_du_buff_head_alloc(sdb, ETH_HEADER_TOT_SIZE)
== NULL) {
log_dbg("Failed to allocate header.");
ipcp_sdb_release(sdb);
+ continue;
}
pthread_rwlock_rdlock(&eth_data.flows_lock);
@@ -1088,14 +1059,15 @@ static void * eth_ipcp_packet_writer(void * o)
pthread_rwlock_unlock(&eth_data.flows_lock);
- eth_ipcp_send_frame(r_addr,
+ if (eth_ipcp_send_frame(r_addr,
#if defined(BUILD_ETH_DIX)
deid,
#elif defined(BUILD_ETH_LLC)
dsap, ssap,
#endif
shm_du_buff_head(sdb),
- len);
+ len))
+ log_dbg("Failed to send frame.");
ipcp_sdb_release(sdb);
}
}
@@ -1160,12 +1132,6 @@ static void change_flows_state(bool up)
pthread_rwlock_unlock(&eth_data.flows_lock);
}
-static void close_ptr(void * o)
-{
- close(*((int *) o));
-}
-
-
static void * eth_ipcp_if_monitor(void * o)
{
int fd;
@@ -1186,7 +1152,7 @@ static void * eth_ipcp_if_monitor(void * o)
return (void *) -1;
}
- pthread_cleanup_push(close_ptr, &fd);
+ pthread_cleanup_push(__cleanup_close_ptr, &fd);
while (true) {
status = recvmsg(fd, &msg, 0);
@@ -1251,107 +1217,123 @@ static int open_bpf_device(void)
}
#endif
-static int eth_ipcp_bootstrap(const struct ipcp_config * conf)
-{
- int idx;
- struct ifreq ifr;
-#if defined(HAVE_NETMAP)
- char ifn[IFNAMSIZ];
-#elif defined(HAVE_BPF)
- int enable = 1;
- int disable = 0;
- int blen;
-#endif /* HAVE_NETMAP */
-
#if defined(__FreeBSD__) || defined(__APPLE__)
+static int ifr_hwaddr_from_ifaddrs(struct ifreq * ifr)
+{
struct ifaddrs * ifaddr;
struct ifaddrs * ifa;
-#elif defined(__linux__)
- int skfd;
-#endif
-#ifndef SHM_RDRB_MULTI_BLOCK
- size_t maxsz;
-#endif
-#if defined(HAVE_RAW_SOCKETS)
- #if defined(IPCP_ETH_QDISC_BYPASS)
- int qdisc_bypass = 1;
- #endif /* ENABLE_QDISC_BYPASS */
- int flags;
-#endif
- assert(conf);
- assert(conf->type == THIS_TYPE);
+ int idx;
- if (conf->dev == NULL) {
- log_err("Device name is NULL.");
- return -1;
+ if (getifaddrs(&ifaddr) < 0) {
+ log_err("Could not get interfaces.");
+ goto fail_ifaddrs;
}
- if (strlen(conf->dev) >= IFNAMSIZ) {
- log_err("Invalid device name: %s.", conf->dev);
- return -1;
+ for (ifa = ifaddr, idx = 0; ifa != NULL; ifa = ifa->ifa_next, ++idx) {
+ if (strcmp(ifa->ifa_name, ifr->ifr_name) == 0)
+ break;
}
- memset(&ifr, 0, sizeof(ifr));
- strcpy(ifr.ifr_name, conf->dev);
-
-#ifdef BUILD_ETH_DIX
- if (conf->ethertype < 0x0600 || conf->ethertype == 0xFFFF) {
- log_err("Invalid Ethertype.");
- return -1;
+ if (ifa == NULL) {
+ log_err("Interface not found.");
+ goto fail_ifa;
}
- eth_data.ethertype = htons(conf->ethertype);
-#endif
-#if defined(__FreeBSD__) || defined(__APPLE__)
- if (getifaddrs(&ifaddr) < 0) {
- log_err("Could not get interfaces.");
- return -1;
- }
+ memcpy(&ifr->ifr_addr, ifa->ifa_addr, sizeof(*ifa->ifa_addr));
- for (ifa = ifaddr, idx = 0; ifa != NULL; ifa = ifa->ifa_next, ++idx) {
- if (strcmp(ifa->ifa_name, conf->dev))
- continue;
- log_dbg("Interface %s found.", conf->dev);
-
- #if defined(HAVE_NETMAP) || defined(HAVE_BPF)
- memcpy(eth_data.hw_addr,
- LLADDR((struct sockaddr_dl *) (ifa)->ifa_addr),
- MAC_SIZE);
- #elif defined (HAVE_RAW_SOCKETS)
- memcpy(&ifr.ifr_addr, ifa->ifa_addr, sizeof(*ifa->ifa_addr));
- #endif
- break;
- }
+ log_dbg("Interface %s hwaddr " MAC_FMT ".", ifr->ifr_name,
+ MAC_VAL(ifr->ifr_addr.sa_data));
freeifaddrs(ifaddr);
- if (ifa == NULL) {
- log_err("Interface not found.");
- return -1;
+ return 0;
+ fail_ifa:
+ freeifaddrs(ifaddr);
+ fail_ifaddrs:
+ return -1;
+
+}
+#elif defined(__linux__)
+static int ifr_hwaddr_from_socket(struct ifreq * ifr)
+{
+ int skfd;
+
+ skfd = socket(AF_UNIX, SOCK_STREAM, 0);
+ if (skfd < 0) {
+ log_err("Failed to open socket.");
+ goto fail_socket;
}
+ if (ioctl(skfd, SIOCGIFHWADDR, ifr)) {
+ log_err("Failed to get hwaddr.");
+ goto fail_ifr;
+ }
+
+ log_dbg("Interface %s hwaddr " MAC_FMT ".", ifr->ifr_name,
+ MAC_VAL(ifr->ifr_hwaddr.sa_data));
+
+ close(skfd);
+
+ return 0;
+
+ fail_ifr:
+ close(skfd);
+ fail_socket:
+ return -1;
+}
+#endif
+
+static int eth_ifr_hwaddr(struct ifreq * ifr)
+{
+#if defined(__FreeBSD__) || defined(__APPLE__)
+ return ifr_hwaddr_from_ifaddrs(ifr);
#elif defined(__linux__)
+ return ifr_hwaddr_from_socket(ifr);
+#else
+ return -1;
+#endif
+}
+
+static int eth_ifr_mtu(struct ifreq * ifr)
+{
+ int skfd;
+
skfd = socket(AF_UNIX, SOCK_STREAM, 0);
if (skfd < 0) {
log_err("Failed to open socket.");
- return -1;
+ goto fail_socket;
}
- if (ioctl(skfd, SIOCGIFMTU, &ifr)) {
+ if (ioctl(skfd, SIOCGIFMTU, ifr) < 0) {
log_err("Failed to get MTU.");
- close(skfd);
+ goto fail_mtu;
+ }
+ close(skfd);
+
+ return 0;
+
+ fail_mtu:
+ close(skfd);
+ fail_socket:
+ return -1;
+}
+
+static int eth_set_mtu(struct ifreq * ifr)
+{
+ if (eth_ifr_mtu(ifr) < 0) {
+ log_err("Failed to get interface MTU.");
return -1;
}
- log_dbg("Device MTU is %d.", ifr.ifr_mtu);
+ log_dbg("Device MTU is %d.", ifr->ifr_mtu);
- eth_data.mtu = MIN((int) ETH_MTU_MAX, ifr.ifr_mtu);
- if (memcmp(conf->dev, "lo", 2) == 0 && eth_data.mtu > IPCP_ETH_LO_MTU) {
+ eth_data.mtu = MIN((int) ETH_MTU_MAX, ifr->ifr_mtu);
+ if (memcmp(ifr->ifr_name, "lo", 2) == 0 &&
+ eth_data.mtu > IPCP_ETH_LO_MTU) {
log_dbg("Using loopback interface. MTU restricted to %d.",
IPCP_ETH_LO_MTU);
eth_data.mtu = IPCP_ETH_LO_MTU;
}
-
#ifndef SHM_RDRB_MULTI_BLOCK
maxsz = SHM_RDRB_BLOCK_SIZE - 5 * sizeof(size_t) -
(DU_BUFF_HEADSPACE + DU_BUFF_TAILSPACE);
@@ -1362,30 +1344,18 @@ static int eth_ipcp_bootstrap(const struct ipcp_config * conf)
#endif
log_dbg("Layer MTU is %d.", eth_data.mtu);
- if (ioctl(skfd, SIOCGIFHWADDR, &ifr)) {
- log_err("Failed to get hwaddr.");
- close(skfd);
- return -1;
- }
-
- close(skfd);
-
- idx = if_nametoindex(conf->dev);
- if (idx == 0) {
- log_err("Failed to retrieve interface index.");
- return -1;
- }
- eth_data.if_idx = idx;
-#endif /* __FreeBSD__ */
-
+ return 0;
+}
#if defined(HAVE_NETMAP)
+static int eth_init_nmd(struct ifreq * ifr)
+{
strcpy(ifn, "netmap:");
- strcat(ifn, conf->dev);
+ strcat(ifn, ifr->ifr_name);
eth_data.nmd = nm_open(ifn, NULL, 0, NULL);
if (eth_data.nmd == NULL) {
log_err("Failed to open netmap device.");
- return -1;
+ goto fail_nmd;
}
memset(&eth_data.poll_in, 0, sizeof(eth_data.poll_in));
@@ -1397,11 +1367,22 @@ static int eth_ipcp_bootstrap(const struct ipcp_config * conf)
eth_data.poll_out.events = POLLOUT;
log_info("Using netmap device.");
-#elif defined(HAVE_BPF) /* !HAVE_NETMAP */
+
+ return 0;
+ fail_nmd:
+ return -1;
+}
+#elif defined (HAVE_BPF)
+static int eth_init_bpf(struct ifreq * ifr)
+{
+ int enable = 1;
+ int disable = 0;
+ int blen;
+
eth_data.bpf = open_bpf_device();
if (eth_data.bpf < 0) {
log_err("Failed to open bpf device.");
- return -1;
+ goto fail_bpf;
}
ioctl(eth_data.bpf, BIOCGBLEN, &blen);
@@ -1411,7 +1392,7 @@ static int eth_ipcp_bootstrap(const struct ipcp_config * conf)
goto fail_device;
}
- if (ioctl(eth_data.bpf, BIOCSETIF, &ifr) < 0) {
+ if (ioctl(eth_data.bpf, BIOCSETIF, ifr) < 0) {
log_err("Failed to set interface.");
goto fail_device;
}
@@ -1432,25 +1413,42 @@ static int eth_ipcp_bootstrap(const struct ipcp_config * conf)
}
log_info("Using Berkeley Packet Filter.");
+
+ return 0;
+
+ fail_device:
+ close(eth_data.bpf);
+ fail_bpf:
+ return -1;
+}
#elif defined(HAVE_RAW_SOCKETS)
+static int eth_init_raw_socket(struct ifreq * ifr)
+{
+ int idx;
+ int flags;
+#if defined(IPCP_ETH_QDISC_BYPASS)
+ int qdisc_bypass = 1;
+#endif /* ENABLE_QDISC_BYPASS */
+
+ idx = if_nametoindex(ifr->ifr_name);
+ if (idx == 0) {
+ log_err("Failed to retrieve interface index.");
+ return -1;
+ }
memset(&(eth_data.device), 0, sizeof(eth_data.device));
eth_data.device.sll_ifindex = idx;
eth_data.device.sll_family = AF_PACKET;
- memcpy(eth_data.device.sll_addr, ifr.ifr_hwaddr.sa_data, MAC_SIZE);
+ memcpy(eth_data.device.sll_addr, ifr->ifr_hwaddr.sa_data, MAC_SIZE);
eth_data.device.sll_halen = MAC_SIZE;
eth_data.device.sll_protocol = htons(ETH_P_ALL);
-
- #if defined (BUILD_ETH_DIX)
+#if defined (BUILD_ETH_DIX)
eth_data.s_fd = socket(AF_PACKET, SOCK_RAW, eth_data.ethertype);
- #elif defined (BUILD_ETH_LLC)
+#elif defined (BUILD_ETH_LLC)
eth_data.s_fd = socket(AF_PACKET, SOCK_RAW, htons(ETH_P_802_2));
- #endif
-
- log_info("Using raw socket device.");
-
+#endif
if (eth_data.s_fd < 0) {
log_err("Failed to create socket.");
- return -1;
+ goto fail_socket;
}
flags = fcntl(eth_data.s_fd, F_GETFL, 0);
@@ -1464,80 +1462,140 @@ static int eth_ipcp_bootstrap(const struct ipcp_config * conf)
goto fail_device;
}
- #if defined(IPCP_ETH_QDISC_BYPASS)
+#if defined(IPCP_ETH_QDISC_BYPASS)
if (setsockopt(eth_data.s_fd, SOL_PACKET, PACKET_QDISC_BYPASS,
&qdisc_bypass, sizeof(qdisc_bypass))) {
log_info("Qdisc bypass not supported.");
}
- #endif
+#endif
if (bind(eth_data.s_fd, (struct sockaddr *) &eth_data.device,
- sizeof(eth_data.device))) {
+ sizeof(eth_data.device)) < 0) {
log_err("Failed to bind socket to interface.");
goto fail_device;
}
+#ifdef __linux__
+ eth_data.if_idx = idx;
+#endif
+ log_info("Using raw socket device.");
+ return 0;
+ fail_device:
+ close(eth_data.s_fd);
+ fail_socket:
+ return -1;
+}
+#endif
+
+static int eth_ipcp_bootstrap(struct ipcp_config * conf)
+{
+ struct ifreq ifr;
+ int i;
+#if defined(HAVE_NETMAP)
+ char ifn[IFNAMSIZ];
#endif /* HAVE_NETMAP */
- ipcp_set_state(IPCP_OPERATIONAL);
-#if defined(__linux__)
- if (pthread_create(&eth_data.if_monitor,
- NULL,
- eth_ipcp_if_monitor,
- NULL)) {
- ipcp_set_state(IPCP_INIT);
- goto fail_device;
+#ifndef SHM_RDRB_MULTI_BLOCK
+ size_t maxsz;
+#endif
+ assert(conf);
+ assert(conf->type == THIS_TYPE);
+
+ memset(&ifr, 0, sizeof(ifr));
+ strcpy(ifr.ifr_name, conf->eth.dev);
+
+ if (strlen(conf->eth.dev) >= IFNAMSIZ) {
+ log_err("Invalid device name: %s.", conf->eth.dev);
+ return -1;
}
+#ifdef BUILD_ETH_DIX
+ if (conf->eth.ethertype < 0x0600 || conf->eth.ethertype == 0xFFFF) {
+ log_err("Invalid Ethertype: %d.", conf->eth.ethertype);
+ return -1;
+ }
+ eth_data.ethertype = htons(conf->eth.ethertype);
#endif
+ if (eth_set_mtu(&ifr) < 0) {
+ log_err("Failed to set MTU.");
+ return -1;
+ }
- if (pthread_create(&eth_data.mgmt_handler,
- NULL,
- eth_ipcp_mgmt_handler,
- NULL)) {
- ipcp_set_state(IPCP_INIT);
+ if (eth_ifr_hwaddr(&ifr) < 0) {
+ log_err("Failed to get hardware addr.");
+ return -1;
+ }
+#if defined(HAVE_NETMAP) || defined(HAVE_BPF)
+ memcpy(eth_data.hw_addr, LLADDR((struct sockaddr_dl *) &ifr.ifr_addr),
+ MAC_SIZE);
+#endif
+#if defined(HAVE_NETMAP)
+ if (eth_init_nmd(&ifr) < 0) {
+ log_err("Failed to initialize netmap device.");
+ return -1;
+ }
+#elif defined(HAVE_BPF) /* !HAVE_NETMAP */
+ if (eth_init_bpf(&ifr) < 0) {
+ log_err("Failed to initialize BPF device.");
+ return -1;
+ }
+#elif defined(HAVE_RAW_SOCKETS)
+ if (eth_init_raw_socket(&ifr) < 0) {
+ log_err("Failed to initialize raw socket device.");
+ return -1;
+ }
+#endif /* HAVE_NETMAP */
+#if defined(__linux__)
+ if (pthread_create(&eth_data.if_monitor, NULL,
+ eth_ipcp_if_monitor, NULL)) {
+ log_err("Failed to create monitor thread: %s.",
+ strerror(errno));
+ goto fail_monitor;
+ }
+#endif
+ if (pthread_create(&eth_data.mgmt_handler, NULL,
+ eth_ipcp_mgmt_handler, NULL)) {
+ log_err("Failed to create mgmt handler thread: %s.",
+ strerror(errno));
goto fail_mgmt_handler;
}
- for (idx = 0; idx < IPCP_ETH_RD_THR; ++idx) {
- if (pthread_create(&eth_data.packet_reader[idx],
- NULL,
- eth_ipcp_packet_reader,
- NULL)) {
- ipcp_set_state(IPCP_INIT);
+ for (i = 0; i < IPCP_ETH_RD_THR; i++) {
+ if (pthread_create(&eth_data.packet_reader[i], NULL,
+ eth_ipcp_packet_reader, NULL)) {
+ log_err("Failed to create packet reader thread: %s",
+ strerror(errno));
goto fail_packet_reader;
}
}
- for (idx = 0; idx < IPCP_ETH_WR_THR; ++idx) {
- if (pthread_create(&eth_data.packet_writer[idx],
- NULL,
- eth_ipcp_packet_writer,
- NULL)) {
- ipcp_set_state(IPCP_INIT);
+ for (i = 0; i < IPCP_ETH_WR_THR; i++) {
+ if (pthread_create(&eth_data.packet_writer[i], NULL,
+ eth_ipcp_packet_writer, NULL)) {
+ log_err("Failed to create packet writer thread: %s",
+ strerror(errno));
goto fail_packet_writer;
}
}
#if defined(BUILD_ETH_DIX)
log_dbg("Bootstrapped IPCP over DIX Ethernet with pid %d "
- "and Ethertype 0x%X.", getpid(), conf->ethertype);
+ "and Ethertype 0x%X.", getpid(), conf->eth.ethertype);
#elif defined(BUILD_ETH_LLC)
log_dbg("Bootstrapped IPCP over Ethernet with LLC with pid %d.",
getpid());
#endif
-
return 0;
fail_packet_writer:
- while (idx > 0) {
- pthread_cancel(eth_data.packet_writer[--idx]);
- pthread_join(eth_data.packet_writer[idx], NULL);
+ while (i-- > 0) {
+ pthread_cancel(eth_data.packet_writer[i]);
+ pthread_join(eth_data.packet_writer[i], NULL);
}
- idx = IPCP_ETH_RD_THR;
+ i = IPCP_ETH_RD_THR;
fail_packet_reader:
- while (idx > 0) {
- pthread_cancel(eth_data.packet_reader[--idx]);
- pthread_join(eth_data.packet_reader[idx], NULL);
+ while (i-- > 0) {
+ pthread_cancel(eth_data.packet_reader[i]);
+ pthread_join(eth_data.packet_reader[i], NULL);
}
pthread_cancel(eth_data.mgmt_handler);
pthread_join(eth_data.mgmt_handler, NULL);
@@ -1546,8 +1604,8 @@ static int eth_ipcp_bootstrap(const struct ipcp_config * conf)
pthread_cancel(eth_data.if_monitor);
pthread_join(eth_data.if_monitor, NULL);
#endif
-#if defined(__linux__) || !defined(HAVE_NETMAP)
- fail_device:
+#if defined(__linux__)
+ fail_monitor:
#endif
#if defined(HAVE_NETMAP)
nm_close(eth_data.nmd);
@@ -1562,13 +1620,11 @@ static int eth_ipcp_bootstrap(const struct ipcp_config * conf)
static int eth_ipcp_reg(const uint8_t * hash)
{
if (shim_data_reg_add_entry(eth_data.shim_data, hash)) {
- log_err("Failed to add " HASH_FMT " to local registry.",
- HASH_VAL(hash));
+ log_err("Failed to add " HASH_FMT32 " to local registry.",
+ HASH_VAL32(hash));
return -1;
}
- log_dbg("Registered " HASH_FMT ".", HASH_VAL(hash));
-
return 0;
}
@@ -1582,8 +1638,7 @@ static int eth_ipcp_unreg(const uint8_t * hash)
static int eth_ipcp_query(const uint8_t * hash)
{
uint8_t r_addr[MAC_SIZE];
- struct timespec timeout = {(NAME_QUERY_TIMEO / 1000),
- (NAME_QUERY_TIMEO % 1000) * MILLION};
+ struct timespec timeout = TIMESPEC_INIT_MS(NAME_QUERY_TIMEO);
struct dir_query * query;
int ret;
uint8_t * buf;
@@ -1635,11 +1690,10 @@ static int eth_ipcp_query(const uint8_t * hash)
return ret;
}
-static int eth_ipcp_flow_alloc(int fd,
- const uint8_t * hash,
- qosspec_t qs,
- const void * data,
- size_t len)
+static int eth_ipcp_flow_alloc(int fd,
+ const uint8_t * hash,
+ qosspec_t qs,
+ const buffer_t * data)
{
#ifdef BUILD_ETH_LLC
uint8_t ssap = 0;
@@ -1647,12 +1701,11 @@ static int eth_ipcp_flow_alloc(int fd,
uint8_t r_addr[MAC_SIZE];
uint64_t addr = 0;
- log_dbg("Allocating flow to " HASH_FMT ".", HASH_VAL(hash));
-
assert(hash);
if (!shim_data_dir_has(eth_data.shim_data, hash)) {
- log_err("Destination unreachable.");
+ log_err("Destination "HASH_FMT32 "unreachable.",
+ HASH_VAL32(hash));
return -1;
}
addr = shim_data_dir_get_addr(eth_data.shim_data, hash);
@@ -1662,6 +1715,7 @@ static int eth_ipcp_flow_alloc(int fd,
ssap = bmp_allocate(eth_data.saps);
if (!bmp_is_id_valid(eth_data.saps, ssap)) {
pthread_rwlock_unlock(&eth_data.flows_lock);
+ log_err("Failed to allocate SSAP.");
return -1;
}
@@ -1680,34 +1734,29 @@ static int eth_ipcp_flow_alloc(int fd,
#endif
hash,
qs,
- data,
- len) < 0) {
+ data) < 0) {
#ifdef BUILD_ETH_LLC
pthread_rwlock_wrlock(&eth_data.flows_lock);
bmp_release(eth_data.saps, eth_data.fd_to_ef[fd].sap);
eth_data.fd_to_ef[fd].sap = -1;
eth_data.ef_to_fd[ssap] = -1;
pthread_rwlock_unlock(&eth_data.flows_lock);
+ log_err("Failed to allocate with peer.");
#endif
return -1;
}
fset_add(eth_data.np1_flows, fd);
-#if defined(BUILD_ETH_DIX)
- log_dbg("Pending flow with fd %d.", fd);
-#elif defined(BUILD_ETH_LLC)
- log_dbg("Pending flow with fd %d on SAP %d.", fd, ssap);
+#if defined(BUILD_ETH_LLC)
+ log_dbg("Assigned SAP %d for fd %d.", ssap, fd);
#endif
return 0;
}
-static int eth_ipcp_flow_alloc_resp(int fd,
- int response,
- const void * data,
- size_t len)
+static int eth_ipcp_flow_alloc_resp(int fd,
+ int response,
+ const buffer_t * data)
{
- struct timespec ts = {0, ALLOC_TIMEO * MILLION};
- struct timespec abstime;
#if defined(BUILD_ETH_DIX)
uint16_t r_eid;
#elif defined(BUILD_ETH_LLC)
@@ -1716,27 +1765,11 @@ static int eth_ipcp_flow_alloc_resp(int fd,
#endif
uint8_t r_addr[MAC_SIZE];
- clock_gettime(PTHREAD_COND_CLOCK, &abstime);
-
- pthread_mutex_lock(&ipcpi.alloc_lock);
-
- while (ipcpi.alloc_id != fd && ipcp_get_state() == IPCP_OPERATIONAL) {
- ts_add(&abstime, &ts, &abstime);
- pthread_cond_timedwait(&ipcpi.alloc_cond,
- &ipcpi.alloc_lock,
- &abstime);
- }
-
- if (ipcp_get_state() != IPCP_OPERATIONAL) {
- pthread_mutex_unlock(&ipcpi.alloc_lock);
+ if (ipcp_wait_flow_resp(fd) < 0) {
+ log_err("Failed to wait for flow response.");
return -1;
}
- ipcpi.alloc_id = -1;
- pthread_cond_broadcast(&ipcpi.alloc_cond);
-
- pthread_mutex_unlock(&ipcpi.alloc_lock);
-
pthread_rwlock_wrlock(&eth_data.flows_lock);
#if defined(BUILD_ETH_DIX)
r_eid = eth_data.fd_to_ef[fd].r_eid;
@@ -1744,6 +1777,7 @@ static int eth_ipcp_flow_alloc_resp(int fd,
ssap = bmp_allocate(eth_data.saps);
if (!bmp_is_id_valid(eth_data.saps, ssap)) {
pthread_rwlock_unlock(&eth_data.flows_lock);
+ log_err("Failed to allocate SSAP.");
return -1;
}
@@ -1762,21 +1796,19 @@ static int eth_ipcp_flow_alloc_resp(int fd,
ssap, r_sap,
#endif
response,
- data,
- len) < 0) {
+ data) < 0) {
#ifdef BUILD_ETH_LLC
pthread_rwlock_wrlock(&eth_data.flows_lock);
bmp_release(eth_data.saps, eth_data.fd_to_ef[fd].sap);
pthread_rwlock_unlock(&eth_data.flows_lock);
#endif
+ log_err("Failed to respond to peer.");
return -1;
}
fset_add(eth_data.np1_flows, fd);
-#if defined(BUILD_ETH_DIX)
- log_dbg("Accepted flow, fd %d.", fd);
-#elif defined(BUILD_ETH_LLC)
- log_dbg("Accepted flow, fd %d, SAP %d.", fd, (uint8_t)ssap);
+#if defined(BUILD_ETH_LLC)
+ log_dbg("Assigned SAP %d for fd %d.", ssap, fd);
#endif
return 0;
}
@@ -1788,10 +1820,10 @@ static int eth_ipcp_flow_dealloc(int fd)
#endif
ipcp_flow_fini(fd);
- pthread_rwlock_wrlock(&eth_data.flows_lock);
-
fset_del(eth_data.np1_flows, fd);
+ pthread_rwlock_wrlock(&eth_data.flows_lock);
+
#if defined(BUILD_ETH_DIX)
eth_data.fd_to_ef[fd].r_eid = -1;
#elif defined BUILD_ETH_LLC
@@ -1805,9 +1837,7 @@ static int eth_ipcp_flow_dealloc(int fd)
pthread_rwlock_unlock(&eth_data.flows_lock);
- flow_dealloc(fd);
-
- log_dbg("Flow with fd %d deallocated.", fd);
+ ipcp_flow_dealloc(fd);
return 0;
}
@@ -1831,9 +1861,6 @@ int main(int argc,
{
int i;
- if (ipcp_init(argc, argv, &eth_ops) < 0)
- goto fail_init;
-
if (eth_data_init() < 0) {
#if defined(BUILD_ETH_DIX)
log_err("Failed to init eth-llc data.");
@@ -1843,18 +1870,17 @@ int main(int argc,
goto fail_data_init;
}
- if (ipcp_boot() < 0) {
- log_err("Failed to boot IPCP.");
- goto fail_boot;
+ if (ipcp_init(argc, argv, &eth_ops, THIS_TYPE) < 0) {
+ log_err("Failed to initialize IPCP.");
+ goto fail_init;
}
- if (ipcp_create_r(0)) {
- log_err("Failed to notify IRMd we are initialized.");
- ipcp_set_state(IPCP_NULL);
- goto fail_create_r;
+ if (ipcp_start() < 0) {
+ log_err("Failed to start IPCP.");
+ goto fail_start;
}
- ipcp_shutdown();
+ ipcp_sigwait();
if (ipcp_get_state() == IPCP_SHUTDOWN) {
for (i = 0; i < IPCP_ETH_WR_THR; ++i)
@@ -1877,19 +1903,18 @@ int main(int argc,
#endif
}
- eth_data_fini();
+ ipcp_stop();
ipcp_fini();
+ eth_data_fini();
+
exit(EXIT_SUCCESS);
- fail_create_r:
- ipcp_shutdown();
- fail_boot:
- eth_data_fini();
- fail_data_init:
+ fail_start:
ipcp_fini();
fail_init:
- ipcp_create_r(-1);
+ eth_data_fini();
+ fail_data_init:
exit(EXIT_FAILURE);
}
diff --git a/src/ipcpd/eth/llc.c b/src/ipcpd/eth/llc.c
index 60abfdbb..c900dcab 100644
--- a/src/ipcpd/eth/llc.c
+++ b/src/ipcpd/eth/llc.c
@@ -1,10 +1,10 @@
/*
- * Ouroboros - Copyright (C) 2016 - 2020
+ * Ouroboros - Copyright (C) 2016 - 2024
*
* IPC processes over Ethernet - LLC
*
- * Dimitri Staessens <dimitri.staessens@ugent.be>
- * Sander Vrijders <sander.vrijders@ugent.be>
+ * Dimitri Staessens <dimitri@ouroboros.rocks>
+ * Sander Vrijders <sander@ouroboros.rocks>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
diff --git a/src/ipcpd/ipcp.c b/src/ipcpd/ipcp.c
index 95d2f783..7fe3e7eb 100644
--- a/src/ipcpd/ipcp.c
+++ b/src/ipcpd/ipcp.c
@@ -1,10 +1,10 @@
/*
- * Ouroboros - Copyright (C) 2016 - 2020
+ * Ouroboros - Copyright (C) 2016 - 2024
*
* IPC process main loop
*
- * Dimitri Staessens <dimitri.staessens@ugent.be>
- * Sander Vrijders <sander.vrijders@ugent.be>
+ * Dimitri Staessens <dimitri@ouroboros.rocks>
+ * Sander Vrijders <sander@ouroboros.rocks>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -24,7 +24,6 @@
#define _DEFAULT_SOURCE
#else
#define _POSIX_C_SOURCE 200112L
-#define __XSI_VISIBLE 500
#endif
#if defined(__linux__) && !defined(DISABLE_CORE_LOCK)
@@ -32,26 +31,25 @@
#define NPROC (sysconf(_SC_NPROCESSORS_ONLN))
#endif
-#if defined(__linux__) || defined(__CYGWIN__)
-#define _DEFAULT_SOURCE
-#else
-#define _POSIX_C_SOURCE 200112L
-#define __XSI_VISIBLE 500
-#endif
-
#include "config.h"
-#define OUROBOROS_PREFIX "ipcpd/ipcp"
+#define OUROBOROS_PREFIX "ipcpd/ipcp"
+#define IPCP_INFO "info"
+#define ALLOC_TIMEOUT 50 /* ms */
+#include <ouroboros/bitmap.h>
+#include <ouroboros/dev.h>
+#include <ouroboros/errno.h>
#include <ouroboros/hash.h>
+#include <ouroboros/ipcp-dev.h>
#include <ouroboros/logs.h>
-#include <ouroboros/time_utils.h>
-#include <ouroboros/utils.h>
-#include <ouroboros/sockets.h>
-#include <ouroboros/errno.h>
-#include <ouroboros/dev.h>
-#include <ouroboros/bitmap.h>
#include <ouroboros/np1_flow.h>
+#include <ouroboros/protobuf.h>
+#include <ouroboros/pthread.h>
+#include <ouroboros/rib.h>
+#include <ouroboros/sockets.h>
+#include <ouroboros/time.h>
+#include <ouroboros/utils.h>
#include "ipcp.h"
@@ -59,9 +57,78 @@
#include <string.h>
#include <sys/socket.h>
#include <stdlib.h>
-#if defined(__linux__) && !defined(DISABLE_CORE_LOCK)
+#if defined(__linux__)
+#include <sys/prctl.h>
+#ifndef DISABLE_CORE_LOCK
#include <unistd.h>
#endif
+#endif
+
+#ifndef CLOCK_REALTIME_COARSE
+#define CLOCK_REALTIME_COARSE CLOCK_REALTIME
+#endif
+
+static char * ipcp_type_str[] = {
+ "local",
+ "unicast",
+ "broadcast",
+ "eth-llc",
+ "eth-dix",
+ "udp"
+};
+
+static char * dir_hash_str[] = {
+ "SHA3-224",
+ "SHA3-256",
+ "SHA3-384",
+ "SHA3-512",
+ "CRC32",
+ "MD5"
+};
+
+static char * ipcp_state_str[] = {
+ "null",
+ "init",
+ "boot",
+ "bootstrapped",
+ "enrolled",
+ "operational",
+ "shutdown"
+};
+
+struct {
+ pid_t irmd_pid;
+ char * name;
+
+ enum ipcp_type type;
+ char layer_name[LAYER_NAME_SIZE + 1];
+
+ uint64_t dt_addr;
+
+ enum hash_algo dir_hash_algo;
+
+ struct ipcp_ops * ops;
+ int irmd_fd;
+
+ enum ipcp_state state;
+ pthread_cond_t state_cond;
+ pthread_mutex_t state_mtx;
+
+ int sockfd;
+ char * sock_path;
+
+ struct list_head cmds;
+ pthread_cond_t cmd_cond;
+ pthread_mutex_t cmd_lock;
+
+ int alloc_id;
+ pthread_cond_t alloc_cond;
+ pthread_mutex_t alloc_lock;
+
+ struct tpm * tpm;
+
+ pthread_t acceptor;
+} ipcpd;
struct cmd {
struct list_head next;
@@ -71,9 +138,29 @@ struct cmd {
int fd;
};
+enum ipcp_type ipcp_get_type(void)
+{
+ return ipcpd.type;
+}
+
+const char * ipcp_get_name(void)
+{
+ return ipcpd.name;
+}
+
+void ipcp_set_dir_hash_algo(enum hash_algo algo)
+{
+ ipcpd.dir_hash_algo = algo;
+}
+
+size_t ipcp_dir_hash_len(void)
+{
+ return hash_len(ipcpd.dir_hash_algo);
+}
+
uint8_t * ipcp_hash_dup(const uint8_t * hash)
{
- uint8_t * dup = malloc(hash_len(ipcpi.dir_hash_algo));
+ uint8_t * dup = malloc(hash_len(ipcpd.dir_hash_algo));
if (dup == NULL)
return NULL;
@@ -97,16 +184,120 @@ void ipcp_hash_str(char * buf,
buf[2 * i] = '\0';
}
-static void close_ptr(void * o)
+static const char * info[] = {
+ "_state",
+ "_type",
+ "_layer",
+ NULL
+};
+
+static int ipcp_rib_read(const char * path,
+ char * buf,
+ size_t len)
{
- close(*((int *) o));
+ char * entry;
+
+ if (len < LAYER_NAME_SIZE + 2) /* trailing \n */
+ return 0;
+
+ entry = strstr(path, RIB_SEPARATOR) + 1;
+ assert(entry);
+
+ if (strcmp(entry, info[0]) == 0) { /* _state */
+ enum ipcp_state state = ipcp_get_state();
+ if (state == IPCP_NULL)
+ strcpy(buf, "null\n");
+ else if (state == IPCP_INIT)
+ strcpy(buf, "init\n");
+ else if (state == IPCP_OPERATIONAL)
+ strcpy(buf, "operational\n");
+ else if (state == IPCP_SHUTDOWN)
+ strcpy(buf, "shutdown\n");
+ else
+ strcpy(buf, "bug\n");
+ }
+
+ if (strcmp(entry, info[1]) == 0) { /* _type */
+ if (ipcpd.type == IPCP_LOCAL)
+ strcpy(buf, "local\n");
+ else if (ipcpd.type == IPCP_UNICAST)
+ strcpy(buf, "unicast\n");
+ else if (ipcpd.type == IPCP_BROADCAST)
+ strcpy(buf, "broadcast\n");
+ else if (ipcpd.type == IPCP_ETH_LLC)
+ strcpy(buf, "eth-llc\n");
+ else if (ipcpd.type == IPCP_ETH_DIX)
+ strcpy(buf, "eth-dix\n");
+ else if (ipcpd.type == IPCP_UDP)
+ strcpy(buf, "udp\n");
+ else
+ strcpy(buf, "bug\n");
+ }
+
+ if (strcmp(entry, info[2]) == 0) { /* _layer */
+ memset(buf, 0, LAYER_NAME_SIZE + 1);
+ if (ipcp_get_state() < IPCP_OPERATIONAL)
+ strcpy(buf, "(null)");
+ else
+ strcpy(buf, ipcpd.layer_name);
+
+ buf[strlen(buf)] = '\n';
+ }
+
+ return strlen(buf);
}
+static int ipcp_rib_readdir(char *** buf)
+{
+ int i = 0;
+
+ while (info[i++] != NULL);
+
+ *buf = malloc(sizeof(**buf) * i);
+ if (*buf == NULL)
+ goto fail_entries;
+
+ i = 0;
+
+ while (info[i] != NULL) {
+ (*buf)[i] = strdup(info[i]);
+ if ((*buf)[i] == NULL)
+ goto fail_dup;
+ i++;
+ }
+
+ return i;
+ fail_dup:
+ while (i-- > 0)
+ free((*buf)[i]);
+ free(*buf);
+ fail_entries:
+ return -ENOMEM;
+}
+
+static int ipcp_rib_getattr(const char * path,
+ struct rib_attr * attr)
+{
+ char buf[LAYER_NAME_SIZE + 2];
+ struct timespec now;
+
+ clock_gettime(CLOCK_REALTIME_COARSE, &now);
+
+ attr->size = ipcp_rib_read(path, buf, LAYER_NAME_SIZE + 2);
+ attr->mtime = now.tv_sec;
+
+ return 0;
+}
+
+static struct rib_ops r_ops = {
+ .read = ipcp_rib_read,
+ .readdir = ipcp_rib_readdir,
+ .getattr = ipcp_rib_getattr
+};
+
static void * acceptloop(void * o)
{
- int csockfd;
- struct timeval tv = {(SOCKET_TIMEOUT / 1000),
- (SOCKET_TIMEOUT % 1000) * 1000};
+ int csockfd;
(void) o;
@@ -114,14 +305,10 @@ static void * acceptloop(void * o)
ipcp_get_state() != IPCP_NULL) {
struct cmd * cmd;
- csockfd = accept(ipcpi.sockfd, 0, 0);
+ csockfd = accept(ipcpd.sockfd, 0, 0);
if (csockfd < 0)
continue;
- if (setsockopt(csockfd, SOL_SOCKET, SO_RCVTIMEO,
- (void *) &tv, sizeof(tv)))
- log_warn("Failed to set timeout on socket.");
-
cmd = malloc(sizeof(*cmd));
if (cmd == NULL) {
log_err("Out of memory");
@@ -129,7 +316,7 @@ static void * acceptloop(void * o)
break;
}
- pthread_cleanup_push(close_ptr, &csockfd);
+ pthread_cleanup_push(__cleanup_close_ptr, &csockfd);
pthread_cleanup_push(free, cmd);
cmd->len = read(csockfd, cmd->cbuf, SOCK_BUF_SIZE);
@@ -146,52 +333,464 @@ static void * acceptloop(void * o)
cmd->fd = csockfd;
- pthread_mutex_lock(&ipcpi.cmd_lock);
+ pthread_mutex_lock(&ipcpd.cmd_lock);
- list_add(&cmd->next, &ipcpi.cmds);
+ list_add(&cmd->next, &ipcpd.cmds);
- pthread_cond_signal(&ipcpi.cmd_cond);
+ pthread_cond_signal(&ipcpd.cmd_cond);
- pthread_mutex_unlock(&ipcpi.cmd_lock);
+ pthread_mutex_unlock(&ipcpd.cmd_lock);
}
return (void *) 0;
}
+int ipcp_wait_flow_req_arr(const uint8_t * dst,
+ qosspec_t qs,
+ time_t mpl,
+ const buffer_t * data)
+{
+ struct timespec ts = TIMESPEC_INIT_MS(ALLOC_TIMEOUT);
+ struct timespec abstime;
+ int fd;
+ buffer_t hash;
+
+ hash.data = (uint8_t *) dst;
+ hash.len = ipcp_dir_hash_len();
+
+ clock_gettime(PTHREAD_COND_CLOCK, &abstime);
+
+ pthread_mutex_lock(&ipcpd.alloc_lock);
+
+ while (ipcpd.alloc_id != -1 && ipcp_get_state() == IPCP_OPERATIONAL) {
+ ts_add(&abstime, &ts, &abstime);
+ pthread_cond_timedwait(&ipcpd.alloc_cond,
+ &ipcpd.alloc_lock,
+ &abstime);
+ }
+
+ if (ipcp_get_state() != IPCP_OPERATIONAL) {
+ pthread_mutex_unlock(&ipcpd.alloc_lock);
+ log_err("Won't allocate over non-operational IPCP.");
+ return -EIPCPSTATE;
+ }
+
+ assert(ipcpd.alloc_id == -1);
+
+ fd = ipcp_flow_req_arr(&hash, qs, mpl, data);
+ if (fd < 0) {
+ pthread_mutex_unlock(&ipcpd.alloc_lock);
+ log_err("Failed to get fd for flow.");
+ return fd;
+ }
+
+ ipcpd.alloc_id = fd;
+ pthread_cond_broadcast(&ipcpd.alloc_cond);
+
+ pthread_mutex_unlock(&ipcpd.alloc_lock);
+
+ return fd;
+
+}
+
+int ipcp_wait_flow_resp(const int fd)
+{
+ struct timespec ts = TIMESPEC_INIT_MS(ALLOC_TIMEOUT);
+ struct timespec abstime;
+
+ clock_gettime(PTHREAD_COND_CLOCK, &abstime);
+
+ pthread_mutex_lock(&ipcpd.alloc_lock);
+
+ while (ipcpd.alloc_id != fd && ipcp_get_state() == IPCP_OPERATIONAL) {
+ ts_add(&abstime, &ts, &abstime);
+ pthread_cond_timedwait(&ipcpd.alloc_cond,
+ &ipcpd.alloc_lock,
+ &abstime);
+ }
+
+ if (ipcp_get_state() != IPCP_OPERATIONAL) {
+ pthread_mutex_unlock(&ipcpd.alloc_lock);
+ return -1;
+ }
+
+ assert(ipcpd.alloc_id == fd);
+
+ ipcpd.alloc_id = -1;
+ pthread_cond_broadcast(&ipcpd.alloc_cond);
+
+ pthread_mutex_unlock(&ipcpd.alloc_lock);
+
+ return 0;
+}
+
static void free_msg(void * o)
{
ipcp_msg__free_unpacked((ipcp_msg_t *) o, NULL);
}
-static void * mainloop(void * o)
+
+static void do_bootstrap(ipcp_config_msg_t * conf_msg,
+ ipcp_msg_t * ret_msg)
{
- int sfd;
- buffer_t buffer;
struct ipcp_config conf;
- struct layer_info info;
- ipcp_config_msg_t * conf_msg;
- ipcp_msg_t * msg;
+ struct layer_info * info;
+
+ log_info("Bootstrapping...");
+
+ if (ipcpd.ops->ipcp_bootstrap == NULL) {
+ log_err("Failed to Bootstrap: operation unsupported.");
+ ret_msg->result = -ENOTSUP;
+ return;
+ }
+
+ if (ipcp_get_state() != IPCP_INIT) {
+
+ log_err("Failed to bootstrap: IPCP in state <%s>, need <%s>.",
+ ipcp_state_str[ipcp_get_state()],
+ ipcp_state_str[IPCP_INIT]);
+ ret_msg->result = -EIPCPSTATE;
+ return;
+ }
+
+ conf = ipcp_config_msg_to_s(conf_msg);
+ switch(conf.type) { /* FIXED algorithms */
+ case IPCP_UDP:
+ conf.layer_info.dir_hash_algo = (enum pol_dir_hash) HASH_MD5;
+ break;
+ case IPCP_BROADCAST:
+ conf.layer_info.dir_hash_algo = DIR_HASH_SHA3_256;
+ break;
+ default:
+ break;
+ }
+
+ ret_msg->result = ipcpd.ops->ipcp_bootstrap(&conf);
+ if (ret_msg->result < 0) {
+ log_err("Failed to bootstrap IPCP.");
+ return;
+ }
+
+ info = &conf.layer_info;
+
+ strcpy(ipcpd.layer_name, info->name);
+ ipcpd.dir_hash_algo = (enum hash_algo) info->dir_hash_algo;
+ ret_msg->layer_info = layer_info_s_to_msg(info);
+
+ log_info("Finished bootstrapping in %s.", info->name);
+ log_info(" type: %s", ipcp_type_str[ipcpd.type]);
+ log_info(" hash: %s [%zd bytes]",
+ dir_hash_str[ipcpd.dir_hash_algo],
+ ipcp_dir_hash_len());
+
+ ipcp_set_state(IPCP_OPERATIONAL);
+}
+
+static void do_enroll(const char * dst,
+ ipcp_msg_t * ret_msg)
+{
+ struct layer_info info;
+
+ log_info("Enrolling with %s...", dst);
+
+ if (ipcpd.ops->ipcp_enroll == NULL) {
+ log_err("Failed to enroll: operation unsupported.");
+ ret_msg->result = -ENOTSUP;
+ return;
+ }
+
+ if (ipcp_get_state() != IPCP_INIT) {
+ log_err("Failed to enroll: IPCP in state <%s>, need <%s>.",
+ ipcp_state_str[ipcp_get_state()],
+ ipcp_state_str[IPCP_INIT]);
+ ret_msg->result = -EIPCPSTATE;
+ return;
+ }
+
+ ret_msg->result = ipcpd.ops->ipcp_enroll(dst, &info);
+ if (ret_msg->result < 0) {
+ log_err("Failed to bootstrap IPCP.");
+ return;
+ }
+
+ strcpy(ipcpd.layer_name, info.name);
+ ipcpd.dir_hash_algo = (enum hash_algo) info.dir_hash_algo;
+ ret_msg->layer_info = layer_info_s_to_msg(&info);
+ ipcp_set_state(IPCP_OPERATIONAL);
+
+ log_info("Finished enrolling with %s in layer %s.", dst, info.name);
+ log_info(" type: %s", ipcp_type_str[ipcpd.type]);
+ log_info(" hash: %s [%zd bytes]",
+ dir_hash_str[ipcpd.dir_hash_algo],
+ ipcp_dir_hash_len());
+}
+
+static void do_connect(const char * dst,
+ const char * comp,
+ qosspec_t qs,
+ ipcp_msg_t * ret_msg)
+{
+ log_info("Connecting %s to %s...", comp, dst);
+
+ if (ipcpd.ops->ipcp_connect == NULL) {
+ log_err("Failed to connect: operation unsupported.");
+ ret_msg->result = -ENOTSUP;
+ return;
+ }
+
+ ret_msg->result = ipcpd.ops->ipcp_connect(dst, comp, qs);
+
+ log_info("Finished connecting.");
+}
+
+static void do_disconnect(const char * dst,
+ const char * comp,
+ ipcp_msg_t * ret_msg)
+{
+ log_info("Disconnecting %s from %s...", comp, dst);
+
+ if (ipcpd.ops->ipcp_disconnect == NULL) {
+ log_err("Failed to disconnect: operation unsupported.");
+ ret_msg->result = -ENOTSUP;
+ return;
+ }
+
+ ret_msg->result = ipcpd.ops->ipcp_disconnect(dst, comp);
+
+ log_info("Finished disconnecting %s from %s.", comp, dst);
+}
+
+static void do_reg(const uint8_t * hash,
+ ipcp_msg_t * ret_msg)
+{
+
+ log_info("Registering " HASH_FMT32 "...", HASH_VAL32(hash));
+
+ if (ipcpd.ops->ipcp_reg == NULL) {
+ log_err("Failed to register: operation unsupported.");
+ ret_msg->result = -ENOTSUP;
+ return;
+ }
+
+ ret_msg->result = ipcpd.ops->ipcp_reg(hash);
+
+ log_info("Finished registering " HASH_FMT32 ".", HASH_VAL32(hash));
+}
+
+static void do_unreg(const uint8_t * hash,
+ ipcp_msg_t * ret_msg)
+{
+ log_info("Unregistering " HASH_FMT32 "...", HASH_VAL32(hash));
+
+ if (ipcpd.ops->ipcp_unreg == NULL) {
+ log_err("Failed to unregister: operation unsupported.");
+ ret_msg->result = -ENOTSUP;
+ return;
+ }
+
+ ret_msg->result = ipcpd.ops->ipcp_unreg(hash);
+
+ log_info("Finished unregistering " HASH_FMT32 ".", HASH_VAL32(hash));
+}
+
+static void do_query(const uint8_t * hash,
+ ipcp_msg_t * ret_msg)
+{
+ /* TODO: Log this operation when IRMd has internal caches. */
+
+ if (ipcpd.ops->ipcp_query == NULL) {
+ log_err("Failed to query: operation unsupported.");
+ ret_msg->result = -ENOTSUP;
+ return;
+ }
+
+ if (ipcp_get_state() != IPCP_OPERATIONAL) {
+ log_dbg("Failed to query: IPCP in state <%s>, need <%s>.",
+ ipcp_state_str[ipcp_get_state()],
+ ipcp_state_str[IPCP_OPERATIONAL]);
+ ret_msg->result = -EIPCPSTATE;
+ return;
+ }
+
+ ret_msg->result = ipcpd.ops->ipcp_query(hash);
+}
+
+static void do_flow_alloc(pid_t pid,
+ int flow_id,
+ uint8_t * dst,
+ qosspec_t qs,
+ const buffer_t * data,
+ ipcp_msg_t * ret_msg)
+{
+ int fd;
+
+ log_info("Allocating flow %d for %d to " HASH_FMT32 ".",
+ flow_id, pid, HASH_VAL32(dst));
+
+ if (ipcpd.ops->ipcp_flow_alloc == NULL) {
+ log_err("Flow allocation failed: operation unsupported.");
+ ret_msg->result = -ENOTSUP;
+ return;
+ }
+
+ if (ipcp_get_state() != IPCP_OPERATIONAL) {
+ log_err("Failed to enroll: IPCP in state <%s>, need <%s>.",
+ ipcp_state_str[ipcp_get_state()],
+ ipcp_state_str[IPCP_OPERATIONAL]);
+ ret_msg->result = -EIPCPSTATE;
+ return;
+ }
+
+ fd = np1_flow_alloc(pid, flow_id);
+ if (fd < 0) {
+ log_err("Failed allocating n + 1 fd on flow_id %d: %d",
+ flow_id, fd);
+ ret_msg->result = -EFLOWDOWN;
+ return;
+ }
+
+ ret_msg->result = ipcpd.ops->ipcp_flow_alloc(fd, dst, qs, data);
+
+ log_info("Finished allocating flow %d to " HASH_FMT32 ".",
+ flow_id, HASH_VAL32(dst));
+}
+
+
+static void do_flow_join(pid_t pid,
+ int flow_id,
+ const uint8_t * dst,
+ qosspec_t qs,
+ ipcp_msg_t * ret_msg)
+{
+ int fd;
+
+ log_info("Joining layer " HASH_FMT32 ".", HASH_VAL32(dst));
+
+ if (ipcpd.ops->ipcp_flow_join == NULL) {
+ log_err("Failed to join: operation unsupported.");
+ ret_msg->result = -ENOTSUP;
+ return;
+ }
+
+ if (ipcp_get_state() != IPCP_OPERATIONAL) {
+ log_err("Failed to join: IPCP in state <%s>, need <%s>.",
+ ipcp_state_str[ipcp_get_state()],
+ ipcp_state_str[IPCP_OPERATIONAL]);
+ ret_msg->result = -EIPCPSTATE;
+ return;
+ }
+
+ fd = np1_flow_alloc(pid, flow_id);
+ if (fd < 0) {
+ log_err("Failed allocating n + 1 fd on flow_id %d.", flow_id);
+ ret_msg->result = -1;
+ return;
+ }
+
+ ret_msg->result = ipcpd.ops->ipcp_flow_join(fd, dst, qs);
+
+ log_info("Finished joining layer " HASH_FMT32 ".", HASH_VAL32(dst));
+}
+
+static void do_flow_alloc_resp(int resp,
+ int flow_id,
+ const buffer_t * data,
+ ipcp_msg_t * ret_msg)
+{
+ int fd = -1;
+
+ log_info("Responding %d to alloc on flow_id %d.", resp, flow_id);
+
+ if (ipcpd.ops->ipcp_flow_alloc_resp == NULL) {
+ log_err("Failed to respond on flow %d: operation unsupported.",
+ flow_id);
+ ret_msg->result = -ENOTSUP;
+ return;
+ }
+
+ if (ipcp_get_state() != IPCP_OPERATIONAL) {
+ log_err("Failed to respond to flow %d:"
+ "IPCP in state <%s>, need <%s>.",
+ flow_id,
+ ipcp_state_str[ipcp_get_state()],
+ ipcp_state_str[IPCP_OPERATIONAL]);
+ ret_msg->result = -EIPCPSTATE;
+ return;
+ }
+
+ if (resp == 0) {
+ fd = np1_flow_resp(flow_id);
+ if (fd < 0) {
+ log_warn("Flow_id %d is not known.", flow_id);
+ ret_msg->result = -1;
+ return;
+ }
+ }
+
+ ret_msg->result = ipcpd.ops->ipcp_flow_alloc_resp(fd, resp, data);
+
+ log_info("Finished responding %d to allocation request.",
+ ret_msg->result);
+}
+
+static void do_flow_dealloc(int flow_id,
+ int timeo_sec,
+ ipcp_msg_t * ret_msg)
+{
+ int fd;
+
+ log_info("Deallocating flow %d.", flow_id);
+
+ if (ipcpd.ops->ipcp_flow_dealloc == NULL) {
+ log_err("Failed to dealloc: operation unsupported.");
+ ret_msg->result = -ENOTSUP;
+ return;
+ }
+
+ if (ipcp_get_state() != IPCP_OPERATIONAL) {
+ log_err("Failed to enroll: IPCP in state <%s>, need <%s>.",
+ ipcp_state_str[ipcp_get_state()],
+ ipcp_state_str[IPCP_OPERATIONAL]);
+ ret_msg->result = -EIPCPSTATE;
+ return;
+ }
+
+ fd = np1_flow_dealloc(flow_id, timeo_sec);
+ if (fd < 0) {
+ log_warn("Could not deallocate flow_id %d.", flow_id);
+ ret_msg->result = -1;
+ return;
+ }
+
+ ret_msg->result = ipcpd.ops->ipcp_flow_dealloc(fd);
+
+ log_info("Finished deallocating flow %d.", flow_id);
+}
+
+static void * mainloop(void * o)
+{
+ int sfd;
+ buffer_t buffer;
+ ipcp_msg_t * msg;
(void) o;
while (true) {
- ipcp_msg_t ret_msg = IPCP_MSG__INIT;
- layer_info_msg_t layer_info = LAYER_INFO_MSG__INIT;
- int fd = -1;
- struct cmd * cmd;
- qosspec_t qs;
+ ipcp_msg_t ret_msg = IPCP_MSG__INIT;
+ qosspec_t qs;
+ struct cmd * cmd;
+ buffer_t data;
ret_msg.code = IPCP_MSG_CODE__IPCP_REPLY;
- pthread_mutex_lock(&ipcpi.cmd_lock);
+ pthread_mutex_lock(&ipcpd.cmd_lock);
- pthread_cleanup_push((void *)(void *) pthread_mutex_unlock,
- &ipcpi.cmd_lock);
+ pthread_cleanup_push(__cleanup_mutex_unlock, &ipcpd.cmd_lock);
- while (list_is_empty(&ipcpi.cmds))
- pthread_cond_wait(&ipcpi.cmd_cond, &ipcpi.cmd_lock);
+ while (list_is_empty(&ipcpd.cmds))
+ pthread_cond_wait(&ipcpd.cmd_cond, &ipcpd.cmd_lock);
- cmd = list_last_entry(&ipcpi.cmds, struct cmd, next);
+ cmd = list_last_entry(&ipcpd.cmds, struct cmd, next);
list_del(&cmd->next);
pthread_cleanup_pop(true);
@@ -206,330 +805,70 @@ static void * mainloop(void * o)
continue;
}
- tpm_dec(ipcpi.tpm);
+ tpm_begin_work(ipcpd.tpm);
- pthread_cleanup_push(close_ptr, &sfd);
+ pthread_cleanup_push(__cleanup_close_ptr, &sfd);
pthread_cleanup_push(free_msg, msg);
+ ret_msg.has_result = true;
+
switch (msg->code) {
case IPCP_MSG_CODE__IPCP_BOOTSTRAP:
- ret_msg.has_result = true;
-
- if (ipcpi.ops->ipcp_bootstrap == NULL) {
- log_err("Bootstrap unsupported.");
- ret_msg.result = -ENOTSUP;
- break;
- }
-
- if (ipcp_get_state() != IPCP_INIT) {
- log_err("IPCP in wrong state.");
- ret_msg.result = -EIPCPSTATE;
- break;
- }
-
- conf_msg = msg->conf;
- conf.type = conf_msg->ipcp_type;
- strcpy(conf.layer_info.layer_name,
- conf_msg->layer_info->layer_name);
-
- switch(conf_msg->ipcp_type) {
- case IPCP_LOCAL:
- break;
- case IPCP_UNICAST:
- conf.addr_size = conf_msg->addr_size;
- conf.eid_size = conf_msg->eid_size;
- conf.max_ttl = conf_msg->max_ttl;
- conf.addr_auth_type = conf_msg->addr_auth_type;
- conf.routing_type = conf_msg->routing_type;
- break;
- case IPCP_ETH_DIX:
- conf.ethertype = conf_msg->ethertype;
- /* FALLTHRU */
- case IPCP_ETH_LLC:
- conf.dev = conf_msg->dev;
- break;
- case IPCP_UDP:
- conf.ip_addr = conf_msg->ip_addr;
- conf.dns_addr = conf_msg->dns_addr;
- conf.clt_port = conf_msg->clt_port;
- conf.srv_port = conf_msg->srv_port;
- conf.layer_info.dir_hash_algo = HASH_MD5;
- layer_info.dir_hash_algo = HASH_MD5;
- break;
- case IPCP_BROADCAST:
- conf.layer_info.dir_hash_algo = HASH_SHA3_256;
- layer_info.dir_hash_algo = HASH_SHA3_256;
- break;
- default:
- log_err("Unknown IPCP type: %d.", conf_msg->ipcp_type);
- }
-
- /* UDP and broadcast use fixed hash algorithm. */
- if (conf_msg->ipcp_type != IPCP_UDP &&
- conf_msg->ipcp_type != IPCP_BROADCAST) {
- switch(conf_msg->layer_info->dir_hash_algo) {
- case DIR_HASH_SHA3_224:
- conf.layer_info.dir_hash_algo =
- HASH_SHA3_224;
- break;
- case DIR_HASH_SHA3_256:
- conf.layer_info.dir_hash_algo =
- HASH_SHA3_256;
- break;
- case DIR_HASH_SHA3_384:
- conf.layer_info.dir_hash_algo =
- HASH_SHA3_384;
- break;
- case DIR_HASH_SHA3_512:
- conf.layer_info.dir_hash_algo =
- HASH_SHA3_512;
- break;
- default:
- assert(false);
- }
-
- layer_info.dir_hash_algo =
- conf.layer_info.dir_hash_algo;
- }
-
- ipcpi.dir_hash_algo = conf.layer_info.dir_hash_algo;
-
- ret_msg.result = ipcpi.ops->ipcp_bootstrap(&conf);
- if (ret_msg.result == 0) {
- ret_msg.layer_info = &layer_info;
- layer_info.layer_name =
- conf.layer_info.layer_name;
- }
+ do_bootstrap(msg->conf, &ret_msg);
break;
case IPCP_MSG_CODE__IPCP_ENROLL:
- ret_msg.has_result = true;
-
- if (ipcpi.ops->ipcp_enroll == NULL) {
- log_err("Enroll unsupported.");
- ret_msg.result = -ENOTSUP;
- break;
- }
-
- if (ipcp_get_state() != IPCP_INIT) {
- log_err("IPCP in wrong state.");
- ret_msg.result = -EIPCPSTATE;
- break;
- }
-
- ret_msg.result = ipcpi.ops->ipcp_enroll(msg->dst,
- &info);
- if (ret_msg.result == 0) {
- ret_msg.layer_info = &layer_info;
- layer_info.dir_hash_algo = info.dir_hash_algo;
- layer_info.layer_name = info.layer_name;
- }
+ do_enroll(msg->dst, &ret_msg);
break;
case IPCP_MSG_CODE__IPCP_CONNECT:
- ret_msg.has_result = true;
-
- if (ipcpi.ops->ipcp_connect == NULL) {
- log_err("Connect unsupported.");
- ret_msg.result = -ENOTSUP;
- break;
- }
-
- qs = msg_to_spec(msg->qosspec);
- ret_msg.result = ipcpi.ops->ipcp_connect(msg->dst,
- msg->comp,
- qs);
+ qs = qos_spec_msg_to_s(msg->qosspec);
+ do_connect(msg->dst, msg->comp, qs, &ret_msg);
break;
case IPCP_MSG_CODE__IPCP_DISCONNECT:
- ret_msg.has_result = true;
-
- if (ipcpi.ops->ipcp_disconnect == NULL) {
- log_err("Disconnect unsupported.");
- ret_msg.result = -ENOTSUP;
- break;
- }
-
- ret_msg.result = ipcpi.ops->ipcp_disconnect(msg->dst,
- msg->comp);
+ do_disconnect(msg->dst, msg->comp, &ret_msg);
break;
case IPCP_MSG_CODE__IPCP_REG:
- ret_msg.has_result = true;
-
- if (ipcpi.ops->ipcp_reg == NULL) {
- log_err("Registration unsupported.");
- ret_msg.result = -ENOTSUP;
- break;
- }
-
assert(msg->hash.len == ipcp_dir_hash_len());
-
- ret_msg.result =
- ipcpi.ops->ipcp_reg(msg->hash.data);
+ do_reg(msg->hash.data, &ret_msg);
break;
case IPCP_MSG_CODE__IPCP_UNREG:
- ret_msg.has_result = true;
-
- if (ipcpi.ops->ipcp_unreg == NULL) {
- log_err("Unregistration unsupported.");
- ret_msg.result = -ENOTSUP;
- break;
- }
-
assert(msg->hash.len == ipcp_dir_hash_len());
-
- ret_msg.result =
- ipcpi.ops->ipcp_unreg(msg->hash.data);
+ do_unreg(msg->hash.data, &ret_msg);
break;
case IPCP_MSG_CODE__IPCP_QUERY:
- ret_msg.has_result = true;
-
- if (ipcpi.ops->ipcp_query == NULL) {
- log_err("Directory query unsupported.");
- ret_msg.result = -ENOTSUP;
- break;
- }
-
assert(msg->hash.len == ipcp_dir_hash_len());
-
- if (ipcp_get_state() != IPCP_OPERATIONAL) {
- log_err("IPCP in wrong state.");
- ret_msg.result = -EIPCPSTATE;
- break;
- }
-
- ret_msg.result =
- ipcpi.ops->ipcp_query(msg->hash.data);
+ do_query(msg->hash.data, &ret_msg);
break;
case IPCP_MSG_CODE__IPCP_FLOW_ALLOC:
- ret_msg.has_result = true;
-
- if (ipcpi.ops->ipcp_flow_alloc == NULL) {
- log_err("Flow allocation unsupported.");
- ret_msg.result = -ENOTSUP;
- break;
- }
-
assert(msg->hash.len == ipcp_dir_hash_len());
assert(msg->pk.len > 0 ? msg->pk.data != NULL
: msg->pk.data == NULL);
-
- if (ipcp_get_state() != IPCP_OPERATIONAL) {
- log_err("IPCP in wrong state.");
- ret_msg.result = -EIPCPSTATE;
- break;
- }
-
- qs = msg_to_spec(msg->qosspec);
- fd = np1_flow_alloc(msg->pid,
- msg->flow_id,
- qs);
- if (fd < 0) {
- log_err("Failed allocating fd on flow_id %d.",
- msg->flow_id);
- ret_msg.result = -1;
- break;
- }
-
- ret_msg.result =
- ipcpi.ops->ipcp_flow_alloc(fd,
- msg->hash.data,
- qs,
- msg->pk.data,
- msg->pk.len);
+ data.len = msg->pk.len;
+ data.data = msg->pk.data;
+ qs = qos_spec_msg_to_s(msg->qosspec);
+ do_flow_alloc(msg->pid, msg->flow_id,
+ msg->hash.data, qs,
+ &data, &ret_msg);
break;
case IPCP_MSG_CODE__IPCP_FLOW_JOIN:
- ret_msg.has_result = true;
-
- if (ipcpi.ops->ipcp_flow_join == NULL) {
- log_err("Broadcast unsupported.");
- ret_msg.result = -ENOTSUP;
- break;
- }
-
assert(msg->hash.len == ipcp_dir_hash_len());
-
- if (ipcp_get_state() != IPCP_OPERATIONAL) {
- log_err("IPCP in wrong state.");
- ret_msg.result = -EIPCPSTATE;
- break;
- }
-
- qs = msg_to_spec(msg->qosspec);
- fd = np1_flow_alloc(msg->pid,
- msg->flow_id,
- qs);
- if (fd < 0) {
- log_err("Failed allocating fd on flow_id %d.",
- msg->flow_id);
- ret_msg.result = -1;
- break;
- }
-
- ret_msg.result =
- ipcpi.ops->ipcp_flow_join(fd,
- msg->hash.data,
- qs);
+ qs = qos_spec_msg_to_s(msg->qosspec);
+ do_flow_join(msg->pid, msg->flow_id,
+ msg->hash.data, qs, &ret_msg);
break;
case IPCP_MSG_CODE__IPCP_FLOW_ALLOC_RESP:
- ret_msg.has_result = true;
- if (ipcpi.ops->ipcp_flow_alloc_resp == NULL) {
- log_err("Flow_alloc_resp unsupported.");
- ret_msg.result = -ENOTSUP;
- break;
- }
-
- if (ipcp_get_state() != IPCP_OPERATIONAL) {
- log_err("IPCP in wrong state.");
- ret_msg.result = -EIPCPSTATE;
- break;
- }
-
- if (!msg->response) {
- fd = np1_flow_resp(msg->flow_id);
- if (fd < 0) {
- log_warn("Port_id %d is not known.",
- msg->flow_id);
- ret_msg.result = -1;
- break;
- }
- }
-
assert(msg->pk.len > 0 ? msg->pk.data != NULL
- : msg->pk.data == NULL);
-
- ret_msg.result =
- ipcpi.ops->ipcp_flow_alloc_resp(fd,
- msg->response,
- msg->pk.data,
- msg->pk.len);
+ : msg->pk.data == NULL);
+ data.len = msg->pk.len;
+ data.data = msg->pk.data;
+ do_flow_alloc_resp(msg->response, msg->flow_id,
+ &data, &ret_msg);
break;
case IPCP_MSG_CODE__IPCP_FLOW_DEALLOC:
- ret_msg.has_result = true;
- if (ipcpi.ops->ipcp_flow_dealloc == NULL) {
- log_err("Flow deallocation unsupported.");
- ret_msg.result = -ENOTSUP;
- break;
- }
-
- if (ipcp_get_state() != IPCP_OPERATIONAL) {
- log_err("IPCP in wrong state.");
- ret_msg.result = -EIPCPSTATE;
- break;
- }
-
- fd = np1_flow_dealloc(msg->flow_id);
- if (fd < 0) {
- log_warn("Could not deallocate flow_id %d.",
- msg->flow_id);
- ret_msg.result = -1;
- break;
- }
-
- ret_msg.result =
- ipcpi.ops->ipcp_flow_dealloc(fd);
+ do_flow_dealloc(msg->flow_id, msg->timeo_sec, &ret_msg);
break;
default:
- ret_msg.has_result = true;
- ret_msg.result = -1;
- log_err("Don't know that message code");
+ ret_msg.result = -1;
+ log_err("Unknown message code: %d.", msg->code);
break;
}
@@ -540,7 +879,7 @@ static void * mainloop(void * o)
if (buffer.len == 0) {
log_err("Failed to pack reply message");
close(sfd);
- tpm_inc(ipcpi.tpm);
+ tpm_end_work(ipcpd.tpm);
continue;
}
@@ -548,21 +887,25 @@ static void * mainloop(void * o)
if (buffer.data == NULL) {
log_err("Failed to create reply buffer.");
close(sfd);
- tpm_inc(ipcpi.tpm);
+ tpm_end_work(ipcpd.tpm);
continue;
}
ipcp_msg__pack(&ret_msg, buffer.data);
- pthread_cleanup_push(close_ptr, &sfd);
+ if (ret_msg.layer_info != NULL)
+ layer_info_msg__free_unpacked(ret_msg.layer_info, NULL);
+
+ pthread_cleanup_push(free, buffer.data)
+ pthread_cleanup_push(__cleanup_close_ptr, &sfd);
if (write(sfd, buffer.data, buffer.len) == -1)
log_warn("Failed to send reply message");
- free(buffer.data);
- pthread_cleanup_pop(true);
+ pthread_cleanup_pop(true); /* close sfd */
+ pthread_cleanup_pop(true); /* free buffer.data */
- tpm_inc(ipcpi.tpm);
+ tpm_end_work(ipcpd.tpm);
}
return (void *) 0;
@@ -581,10 +924,10 @@ static int parse_args(int argc,
if (atoi(argv[1]) == 0)
return -1;
- ipcpi.irmd_pid = atoi(argv[1]);
+ ipcpd.irmd_pid = atoi(argv[1]);
/* argument 2: IPCP name */
- ipcpi.name = argv[2];
+ ipcpd.name = argv[2];
/* argument 3: syslog */
if (argv[3] != NULL)
@@ -595,140 +938,185 @@ static int parse_args(int argc,
int ipcp_init(int argc,
char ** argv,
- struct ipcp_ops * ops)
+ struct ipcp_ops * ops,
+ enum ipcp_type type)
{
bool log;
pthread_condattr_t cattr;
- int ret = -1;
if (parse_args(argc, argv, &log))
return -1;
log_init(log);
- ipcpi.irmd_fd = -1;
- ipcpi.state = IPCP_NULL;
+ ipcpd.state = IPCP_NULL;
+ ipcpd.type = type;
- ipcpi.sock_path = ipcp_sock_path(getpid());
- if (ipcpi.sock_path == NULL)
+#if defined (__linux__)
+ prctl(PR_SET_TIMERSLACK, IPCP_LINUX_SLACK_NS, 0, 0, 0);
+#endif
+ ipcpd.sock_path = sock_path(getpid(), IPCP_SOCK_PATH_PREFIX);
+ if (ipcpd.sock_path == NULL)
goto fail_sock_path;
- ipcpi.sockfd = server_socket_open(ipcpi.sock_path);
- if (ipcpi.sockfd < 0) {
- log_err("Could not open server socket.");
+ ipcpd.sockfd = server_socket_open(ipcpd.sock_path);
+ if (ipcpd.sockfd < 0) {
+ log_err("Failed to open server socket at %s.",
+ ipcpd.sock_path);
goto fail_serv_sock;
}
- ipcpi.ops = ops;
+ ipcpd.ops = ops;
- if (pthread_mutex_init(&ipcpi.state_mtx, NULL)) {
- log_err("Could not create mutex.");
+ if (pthread_mutex_init(&ipcpd.state_mtx, NULL)) {
+ log_err("Failed to create mutex.");
goto fail_state_mtx;
}
if (pthread_condattr_init(&cattr)) {
- log_err("Could not create condattr.");
+ log_err("Failed to create condattr.");
goto fail_cond_attr;
}
#ifndef __APPLE__
pthread_condattr_setclock(&cattr, PTHREAD_COND_CLOCK);
#endif
- if (pthread_cond_init(&ipcpi.state_cond, &cattr)) {
- log_err("Could not init condvar.");
+ if (pthread_cond_init(&ipcpd.state_cond, &cattr)) {
+ log_err("Failed to init condvar.");
goto fail_state_cond;
}
- if (pthread_mutex_init(&ipcpi.alloc_lock, NULL)) {
+ if (pthread_mutex_init(&ipcpd.alloc_lock, NULL)) {
log_err("Failed to init mutex.");
goto fail_alloc_lock;
}
- if (pthread_cond_init(&ipcpi.alloc_cond, &cattr)) {
+ if (pthread_cond_init(&ipcpd.alloc_cond, &cattr)) {
log_err("Failed to init convar.");
goto fail_alloc_cond;
}
- if (pthread_mutex_init(&ipcpi.cmd_lock, NULL)) {
+ if (pthread_mutex_init(&ipcpd.cmd_lock, NULL)) {
log_err("Failed to init mutex.");
goto fail_cmd_lock;
}
- if (pthread_cond_init(&ipcpi.cmd_cond, &cattr)) {
+ if (pthread_cond_init(&ipcpd.cmd_cond, &cattr)) {
log_err("Failed to init convar.");
goto fail_cmd_cond;
}
- list_head_init(&ipcpi.cmds);
+ if (rib_init(ipcpd.name)) {
+ log_err("Failed to initialize RIB.");
+ goto fail_rib_init;
+ }
+
+ if (rib_reg(IPCP_INFO, &r_ops)) {
+ log_err("Failed to register rib.");
+ goto fail_rib_reg;
+ }
- ipcpi.alloc_id = -1;
+ list_head_init(&ipcpd.cmds);
+
+ ipcpd.tpm = tpm_create(IPCP_MIN_THREADS, IPCP_ADD_THREADS,
+ mainloop, NULL);
+ if (ipcpd.tpm == NULL) {
+ log_err("Failed to create threadpool manager.");
+ goto fail_tpm_create;
+ }
+
+ ipcpd.alloc_id = -1;
pthread_condattr_destroy(&cattr);
+ ipcp_set_state(IPCP_INIT);
+
return 0;
+ fail_tpm_create:
+ rib_unreg(IPCP_INFO);
+ fail_rib_reg:
+ rib_fini();
+ fail_rib_init:
+ pthread_cond_destroy(&ipcpd.cmd_cond);
fail_cmd_cond:
- pthread_mutex_destroy(&ipcpi.cmd_lock);
+ pthread_mutex_destroy(&ipcpd.cmd_lock);
fail_cmd_lock:
- pthread_cond_destroy(&ipcpi.alloc_cond);
+ pthread_cond_destroy(&ipcpd.alloc_cond);
fail_alloc_cond:
- pthread_mutex_destroy(&ipcpi.alloc_lock);
+ pthread_mutex_destroy(&ipcpd.alloc_lock);
fail_alloc_lock:
- pthread_cond_destroy(&ipcpi.state_cond);
+ pthread_cond_destroy(&ipcpd.state_cond);
fail_state_cond:
pthread_condattr_destroy(&cattr);
fail_cond_attr:
- pthread_mutex_destroy(&ipcpi.state_mtx);
+ pthread_mutex_destroy(&ipcpd.state_mtx);
fail_state_mtx:
- close(ipcpi.sockfd);
+ close(ipcpd.sockfd);
fail_serv_sock:
- free(ipcpi.sock_path);
+ free(ipcpd.sock_path);
fail_sock_path:
- return ret;
+ return -1;
}
-int ipcp_boot()
+int ipcp_start(void)
{
- sigset_t sigset;
+ sigset_t sigset;
+ struct ipcp_info info;
+
sigemptyset(&sigset);
sigaddset(&sigset, SIGINT);
sigaddset(&sigset, SIGQUIT);
sigaddset(&sigset, SIGHUP);
sigaddset(&sigset, SIGPIPE);
- ipcpi.tpm = tpm_create(IPCP_MIN_THREADS, IPCP_ADD_THREADS,
- mainloop, NULL);
- if (ipcpi.tpm == NULL)
- goto fail_tpm_create;
-
pthread_sigmask(SIG_BLOCK, &sigset, NULL);
- if (tpm_start(ipcpi.tpm))
- goto fail_tpm_start;
+ info.pid = getpid();
+ info.type = ipcpd.type;
+ strcpy(info.name, ipcpd.name);
+ info.state = IPCP_OPERATIONAL;
- ipcp_set_state(IPCP_INIT);
+ if (tpm_start(ipcpd.tpm)) {
+ log_err("Failed to start threadpool manager.");
+ goto fail_tpm_start;
+ }
- if (pthread_create(&ipcpi.acceptor, NULL, acceptloop, NULL)) {
+ if (pthread_create(&ipcpd.acceptor, NULL, acceptloop, NULL)) {
log_err("Failed to create acceptor thread.");
- ipcp_set_state(IPCP_NULL);
goto fail_acceptor;
}
+ info.state = IPCP_OPERATIONAL;
+
+ if (ipcp_create_r(&info)) {
+ log_err("Failed to notify IRMd we are initialized.");
+ goto fail_create_r;
+ }
+
return 0;
+ fail_create_r:
+ pthread_cancel(ipcpd.acceptor);
+ pthread_join(ipcpd.acceptor, NULL);
fail_acceptor:
- tpm_stop(ipcpi.tpm);
+ tpm_stop(ipcpd.tpm);
fail_tpm_start:
- tpm_destroy(ipcpi.tpm);
- fail_tpm_create:
+ tpm_destroy(ipcpd.tpm);
+ ipcp_set_state(IPCP_NULL);
+ info.state = IPCP_NULL;
+ ipcp_create_r(&info);
return -1;
}
-void ipcp_shutdown()
+void ipcp_sigwait(void)
{
+
siginfo_t info;
sigset_t sigset;
-
+#ifdef __APPLE__
+ int sig;
+#endif
sigemptyset(&sigset);
sigaddset(&sigset, SIGINT);
sigaddset(&sigset, SIGQUIT);
@@ -738,17 +1126,29 @@ void ipcp_shutdown()
while(ipcp_get_state() != IPCP_NULL &&
ipcp_get_state() != IPCP_SHUTDOWN) {
+#ifdef __APPLE__
+ if (sigwait(&sigset, &sig) < 0) {
+#else
if (sigwaitinfo(&sigset, &info) < 0) {
+#endif
log_warn("Bad signal.");
continue;
}
+#ifdef __APPLE__
+ memset(&info, 0, sizeof(info));
+ info.si_signo = sig;
+ info.si_pid = ipcpd.irmd_pid;
+#endif
switch(info.si_signo) {
case SIGINT:
+ /* FALLTHRU */
case SIGTERM:
+ /* FALLTHRU */
case SIGHUP:
+ /* FALLTHRU */
case SIGQUIT:
- if (info.si_pid == ipcpi.irmd_pid) {
+ if (info.si_pid == ipcpd.irmd_pid) {
if (ipcp_get_state() == IPCP_INIT)
ipcp_set_state(IPCP_NULL);
@@ -758,34 +1158,44 @@ void ipcp_shutdown()
break;
case SIGPIPE:
log_dbg("Ignored SIGPIPE.");
+ continue;
default:
continue;
}
}
+}
- pthread_cancel(ipcpi.acceptor);
+void ipcp_stop(void)
+{
+ log_info("IPCP %d shutting down.", getpid());
- pthread_join(ipcpi.acceptor, NULL);
- tpm_stop(ipcpi.tpm);
- tpm_destroy(ipcpi.tpm);
+ pthread_cancel(ipcpd.acceptor);
+ pthread_join(ipcpd.acceptor, NULL);
- log_info("IPCP %d shutting down.", getpid());
+ tpm_stop(ipcpd.tpm);
}
-void ipcp_fini()
+void ipcp_fini(void)
{
- close(ipcpi.sockfd);
- if (unlink(ipcpi.sock_path))
- log_warn("Could not unlink %s.", ipcpi.sock_path);
- free(ipcpi.sock_path);
+ tpm_destroy(ipcpd.tpm);
+
+ rib_unreg(IPCP_INFO);
- pthread_cond_destroy(&ipcpi.state_cond);
- pthread_mutex_destroy(&ipcpi.state_mtx);
- pthread_cond_destroy(&ipcpi.alloc_cond);
- pthread_mutex_destroy(&ipcpi.alloc_lock);
- pthread_cond_destroy(&ipcpi.cmd_cond);
- pthread_mutex_destroy(&ipcpi.cmd_lock);
+ rib_fini();
+
+ close(ipcpd.sockfd);
+ if (unlink(ipcpd.sock_path))
+ log_warn("Could not unlink %s.", ipcpd.sock_path);
+
+ free(ipcpd.sock_path);
+
+ pthread_cond_destroy(&ipcpd.state_cond);
+ pthread_mutex_destroy(&ipcpd.state_mtx);
+ pthread_cond_destroy(&ipcpd.alloc_cond);
+ pthread_mutex_destroy(&ipcpd.alloc_lock);
+ pthread_cond_destroy(&ipcpd.cmd_cond);
+ pthread_mutex_destroy(&ipcpd.cmd_lock);
log_info("IPCP %d out.", getpid());
@@ -794,59 +1204,27 @@ void ipcp_fini()
void ipcp_set_state(enum ipcp_state state)
{
- pthread_mutex_lock(&ipcpi.state_mtx);
+ pthread_mutex_lock(&ipcpd.state_mtx);
- ipcpi.state = state;
+ ipcpd.state = state;
- pthread_cond_broadcast(&ipcpi.state_cond);
- pthread_mutex_unlock(&ipcpi.state_mtx);
+ pthread_cond_broadcast(&ipcpd.state_cond);
+ pthread_mutex_unlock(&ipcpd.state_mtx);
}
-enum ipcp_state ipcp_get_state()
+enum ipcp_state ipcp_get_state(void)
{
enum ipcp_state state;
- pthread_mutex_lock(&ipcpi.state_mtx);
+ pthread_mutex_lock(&ipcpd.state_mtx);
- state = ipcpi.state;
+ state = ipcpd.state;
- pthread_mutex_unlock(&ipcpi.state_mtx);
+ pthread_mutex_unlock(&ipcpd.state_mtx);
return state;
}
-int ipcp_wait_state(enum ipcp_state state,
- const struct timespec * timeout)
-{
- struct timespec abstime;
- int ret = 0;
-
- clock_gettime(PTHREAD_COND_CLOCK, &abstime);
- ts_add(&abstime, timeout, &abstime);
-
- pthread_mutex_lock(&ipcpi.state_mtx);
-
- pthread_cleanup_push((void *)(void *) pthread_mutex_unlock,
- &ipcpi.state_mtx);
-
- while (ipcpi.state != state
- && ipcpi.state != IPCP_SHUTDOWN
- && ipcpi.state != IPCP_NULL
- && ret != -ETIMEDOUT) {
- if (timeout == NULL)
- ret = -pthread_cond_wait(&ipcpi.state_cond,
- &ipcpi.state_mtx);
- else
- ret = -pthread_cond_timedwait(&ipcpi.state_cond,
- &ipcpi.state_mtx,
- &abstime);
- }
-
- pthread_cleanup_pop(true);
-
- return ret;
-}
-
void ipcp_lock_to_core(void)
{
#if defined(__linux__) && !defined(DISABLE_CORE_LOCK)
diff --git a/src/ipcpd/ipcp.h b/src/ipcpd/ipcp.h
index 02c74f50..2c41f5b9 100644
--- a/src/ipcpd/ipcp.h
+++ b/src/ipcpd/ipcp.h
@@ -1,10 +1,10 @@
/*
- * Ouroboros - Copyright (C) 2016 - 2020
+ * Ouroboros - Copyright (C) 2016 - 2024
*
* IPC process structure
*
- * Dimitri Staessens <dimitri.staessens@ugent.be>
- * Sander Vrijders <sander.vrijders@ugent.be>
+ * Dimitri Staessens <dimitri@ouroboros.rocks>
+ * Sander Vrijders <sander@ouroboros.rocks>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -26,21 +26,18 @@
#include <ouroboros/hash.h>
#include <ouroboros/ipcp.h>
#include <ouroboros/list.h>
+#include <ouroboros/protobuf.h>
+#include <ouroboros/qos.h>
#include <ouroboros/sockets.h>
#include <ouroboros/tpm.h>
#include <pthread.h>
#include <time.h>
-enum ipcp_state {
- IPCP_NULL = 0,
- IPCP_INIT,
- IPCP_OPERATIONAL,
- IPCP_SHUTDOWN
-};
+#define ipcp_dir_hash_strlen() (ipcp_dir_hash_len() * 2)
struct ipcp_ops {
- int (* ipcp_bootstrap)(const struct ipcp_config * conf);
+ int (* ipcp_bootstrap)(struct ipcp_config * conf);
int (* ipcp_enroll)(const char * dst,
struct layer_info * info);
@@ -58,83 +55,60 @@ struct ipcp_ops {
int (* ipcp_query)(const uint8_t * hash);
- int (* ipcp_flow_alloc)(int fd,
- const uint8_t * dst,
- qosspec_t qs,
- const void * data,
- size_t len);
+ int (* ipcp_flow_alloc)(int fd,
+ const uint8_t * dst,
+ qosspec_t qs,
+ const buffer_t * data);
int (* ipcp_flow_join)(int fd,
const uint8_t * dst,
qosspec_t qs);
- int (* ipcp_flow_alloc_resp)(int fd,
- int response,
- const void * data,
- size_t len);
+ int (* ipcp_flow_alloc_resp)(int fd,
+ int response,
+ const buffer_t * data);
int (* ipcp_flow_dealloc)(int fd);
};
-#define ipcp_dir_hash_strlen() (hash_len(ipcpi.dir_hash_algo) * 2)
-#define ipcp_dir_hash_len() (hash_len(ipcpi.dir_hash_algo))
-
-struct ipcp {
- pid_t irmd_pid;
- char * name;
-
- enum ipcp_type type;
- char * layer_name;
-
- uint64_t dt_addr;
-
- enum hash_algo dir_hash_algo;
-
- struct ipcp_ops * ops;
- int irmd_fd;
-
- enum ipcp_state state;
- pthread_rwlock_t state_lock;
- pthread_mutex_t state_mtx;
- pthread_cond_t state_cond;
-
- int sockfd;
- char * sock_path;
-
- struct list_head cmds;
- pthread_cond_t cmd_cond;
- pthread_mutex_t cmd_lock;
+int ipcp_init(int argc,
+ char ** argv,
+ struct ipcp_ops * ops,
+ enum ipcp_type type);
- int alloc_id;
- pthread_cond_t alloc_cond;
- pthread_mutex_t alloc_lock;
+int ipcp_start(void);
- struct tpm * tpm;
+void ipcp_sigwait(void);
- pthread_t acceptor;
-} ipcpi;
+void ipcp_stop(void);
-int ipcp_init(int argc,
- char ** argv,
- struct ipcp_ops * ops);
+void ipcp_fini(void);
-int ipcp_boot(void);
+enum ipcp_type ipcp_get_type(void);
-void ipcp_shutdown(void);
+const char * ipcp_get_name(void);
-void ipcp_fini(void);
+/* TODO: Only specify hash algorithm in directory policy */
+void ipcp_set_dir_hash_algo(enum hash_algo algo);
void ipcp_set_state(enum ipcp_state state);
enum ipcp_state ipcp_get_state(void);
-int ipcp_wait_state(enum ipcp_state state,
- const struct timespec * timeout);
+int ipcp_set_layer_info(const struct layer_info * info);
+
+/* Helper functions to handle races during flow allocation */
+int ipcp_wait_flow_req_arr(const uint8_t * dst,
+ qosspec_t qs,
+ time_t mpl,
+ const buffer_t * data);
+
+int ipcp_wait_flow_resp(const int fd);
-int ipcp_parse_arg(int argc,
- char * argv[]);
/* Helper functions for directory entries, could be moved */
+size_t ipcp_dir_hash_len(void);
+
uint8_t * ipcp_hash_dup(const uint8_t * hash);
void ipcp_hash_str(char buf[],
diff --git a/src/ipcpd/local/CMakeLists.txt b/src/ipcpd/local/CMakeLists.txt
index a84f4f1b..08abff57 100644
--- a/src/ipcpd/local/CMakeLists.txt
+++ b/src/ipcpd/local/CMakeLists.txt
@@ -13,6 +13,8 @@ include_directories(${CMAKE_SOURCE_DIR}/include)
include_directories(${CMAKE_BINARY_DIR}/include)
set(IPCP_LOCAL_TARGET ipcpd-local CACHE INTERNAL "")
+set(IPCP_LOCAL_MPL 100 CACHE STRING
+ "Default maximum packet lifetime for the Ethernet IPCPs, in ms")
set(LOCAL_SOURCES
# Add source files here
diff --git a/src/ipcpd/local/main.c b/src/ipcpd/local/main.c
index a2e20017..ffa6dc5a 100644
--- a/src/ipcpd/local/main.c
+++ b/src/ipcpd/local/main.c
@@ -1,10 +1,10 @@
/*
- * Ouroboros - Copyright (C) 2016 - 2020
+ * Ouroboros - Copyright (C) 2016 - 2024
*
* Local IPC process
*
- * Dimitri Staessens <dimitri.staessens@ugent.be>
- * Sander Vrijders <sander.vrijders@ugent.be>
+ * Dimitri Staessens <dimitri@ouroboros.rocks>
+ * Sander Vrijders <sander@ouroboros.rocks>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -48,8 +48,7 @@
#include <sys/wait.h>
#include <assert.h>
-#define THIS_TYPE IPCP_LOCAL
-#define ALLOC_TIMEOUT 10 /* ms */
+#define THIS_TYPE IPCP_LOCAL
struct {
struct shim_data * shim_data;
@@ -70,34 +69,39 @@ static int local_data_init(void)
local_data.flows = fset_create();
if (local_data.flows == NULL)
- return -ENFILE;
+ goto fail_fset;
local_data.fq = fqueue_create();
- if (local_data.fq == NULL) {
- fset_destroy(local_data.flows);
- return -ENOMEM;
- }
+ if (local_data.fq == NULL)
+ goto fail_fqueue;
local_data.shim_data = shim_data_create();
- if (local_data.shim_data == NULL) {
- fqueue_destroy(local_data.fq);
- fset_destroy(local_data.flows);
- return -ENOMEM;
- }
+ if (local_data.shim_data == NULL)
+ goto fail_shim_data;
- pthread_rwlock_init(&local_data.lock, NULL);
+ if (pthread_rwlock_init(&local_data.lock, NULL) < 0)
+ goto fail_rwlock_init;
return 0;
+
+ fail_rwlock_init:
+ shim_data_destroy(local_data.shim_data);
+ fail_shim_data:
+ fqueue_destroy(local_data.fq);
+ fail_fqueue:
+ fset_destroy(local_data.flows);
+ fail_fset:
+ return -ENOMEM;
}
static void local_data_fini(void){
+ pthread_rwlock_destroy(&local_data.lock);
shim_data_destroy(local_data.shim_data);
- fset_destroy(local_data.flows);
fqueue_destroy(local_data.fq);
- pthread_rwlock_destroy(&local_data.lock);
+ fset_destroy(local_data.flows);
}
-static void * ipcp_local_packet_loop(void * o)
+static void * local_ipcp_packet_loop(void * o)
{
(void) o;
@@ -133,49 +137,45 @@ static void * ipcp_local_packet_loop(void * o)
return (void *) 0;
}
-static int ipcp_local_bootstrap(const struct ipcp_config * conf)
+static int local_ipcp_bootstrap(struct ipcp_config * conf)
{
+
assert(conf);
assert(conf->type == THIS_TYPE);
(void) conf;
- ipcp_set_state(IPCP_OPERATIONAL);
-
if (pthread_create(&local_data.packet_loop, NULL,
- ipcp_local_packet_loop, NULL)) {
+ local_ipcp_packet_loop, NULL)) {
+ log_err("Failed to create pthread: %s", strerror(errno));
ipcp_set_state(IPCP_INIT);
return -1;
}
- log_info("Bootstrapped local IPCP with pid %d.", getpid());
-
return 0;
}
-static int ipcp_local_reg(const uint8_t * hash)
+static int local_ipcp_reg(const uint8_t * hash)
{
if (shim_data_reg_add_entry(local_data.shim_data, hash)) {
- log_dbg("Failed to add " HASH_FMT " to local registry.",
- HASH_VAL(hash));
+ log_err("Failed to add " HASH_FMT32 " to local registry.",
+ HASH_VAL32(hash));
return -1;
}
- log_info("Registered " HASH_FMT ".", HASH_VAL(hash));
-
return 0;
}
-static int ipcp_local_unreg(const uint8_t * hash)
+static int local_ipcp_unreg(const uint8_t * hash)
{
shim_data_reg_del_entry(local_data.shim_data, hash);
- log_info("Unregistered " HASH_FMT ".", HASH_VAL(hash));
+ log_info("Unregistered " HASH_FMT32 ".", HASH_VAL32(hash));
return 0;
}
-static int ipcp_local_query(const uint8_t * hash)
+static int local_ipcp_query(const uint8_t * hash)
{
int ret;
@@ -184,41 +184,19 @@ static int ipcp_local_query(const uint8_t * hash)
return ret;
}
-static int ipcp_local_flow_alloc(int fd,
- const uint8_t * dst,
- qosspec_t qs,
- const void * data,
- size_t len)
+static int local_ipcp_flow_alloc(int fd,
+ const uint8_t * dst,
+ qosspec_t qs,
+ const buffer_t * data)
{
- struct timespec ts = {0, ALLOC_TIMEOUT * MILLION};
- struct timespec abstime;
- int out_fd = -1;
+ int out_fd = -1;
- log_dbg("Allocating flow to " HASH_FMT " on fd %d.", HASH_VAL(dst), fd);
+ log_dbg("Allocating flow to " HASH_FMT32 " on fd %d.",
+ HASH_VAL32(dst), fd);
assert(dst);
- clock_gettime(PTHREAD_COND_CLOCK, &abstime);
-
- pthread_mutex_lock(&ipcpi.alloc_lock);
-
- while (ipcpi.alloc_id != -1 && ipcp_get_state() == IPCP_OPERATIONAL) {
- ts_add(&abstime, &ts, &abstime);
- pthread_cond_timedwait(&ipcpi.alloc_cond,
- &ipcpi.alloc_lock,
- &abstime);
- }
-
- if (ipcp_get_state() != IPCP_OPERATIONAL) {
- log_dbg("Won't allocate over non-operational IPCP.");
- pthread_mutex_unlock(&ipcpi.alloc_lock);
- return -1;
- }
-
- assert(ipcpi.alloc_id == -1);
-
- out_fd = ipcp_flow_req_arr(dst, ipcp_dir_hash_len(), qs, data, len);
+ out_fd = ipcp_wait_flow_req_arr(dst, qs, IPCP_LOCAL_MPL, data);
if (out_fd < 0) {
- pthread_mutex_unlock(&ipcpi.alloc_lock);
log_dbg("Flow allocation failed: %d", out_fd);
return -1;
}
@@ -230,11 +208,6 @@ static int ipcp_local_flow_alloc(int fd,
pthread_rwlock_unlock(&local_data.lock);
- ipcpi.alloc_id = out_fd;
- pthread_cond_broadcast(&ipcpi.alloc_cond);
-
- pthread_mutex_unlock(&ipcpi.alloc_lock);
-
fset_add(local_data.flows, fd);
log_info("Pending local allocation request on fd %d.", fd);
@@ -242,40 +215,21 @@ static int ipcp_local_flow_alloc(int fd,
return 0;
}
-static int ipcp_local_flow_alloc_resp(int fd,
- int response,
- const void * data,
- size_t len)
+static int local_ipcp_flow_alloc_resp(int fd,
+ int response,
+ const buffer_t * data)
{
- struct timespec ts = {0, ALLOC_TIMEOUT * MILLION};
- struct timespec abstime;
- int out_fd = -1;
- int ret = -1;
-
- clock_gettime(PTHREAD_COND_CLOCK, &abstime);
-
- pthread_mutex_lock(&ipcpi.alloc_lock);
+ struct timespec wait = TIMESPEC_INIT_MS(1);
+ time_t mpl = IPCP_LOCAL_MPL;
+ int out_fd;
- while (ipcpi.alloc_id != fd && ipcp_get_state() == IPCP_OPERATIONAL) {
- ts_add(&abstime, &ts, &abstime);
- pthread_cond_timedwait(&ipcpi.alloc_cond,
- &ipcpi.alloc_lock,
- &abstime);
- }
-
- if (ipcp_get_state() != IPCP_OPERATIONAL) {
- pthread_mutex_unlock(&ipcpi.alloc_lock);
+ if (ipcp_wait_flow_resp(fd) < 0) {
+ log_err("Failed waiting for IRMd response.");
return -1;
}
- ipcpi.alloc_id = -1;
- pthread_cond_broadcast(&ipcpi.alloc_cond);
-
- pthread_mutex_unlock(&ipcpi.alloc_lock);
-
- pthread_rwlock_wrlock(&local_data.lock);
-
- if (response) {
+ if (response < 0) {
+ pthread_rwlock_wrlock(&local_data.lock);
if (local_data.in_out[fd] != -1)
local_data.in_out[local_data.in_out[fd]] = fd;
local_data.in_out[fd] = -1;
@@ -283,25 +237,38 @@ static int ipcp_local_flow_alloc_resp(int fd,
return 0;
}
+ pthread_rwlock_rdlock(&local_data.lock);
+
out_fd = local_data.in_out[fd];
if (out_fd == -1) {
pthread_rwlock_unlock(&local_data.lock);
- return -1;
+ log_dbg("Potential race detected");
+ nanosleep(&wait, NULL);
+ pthread_rwlock_rdlock(&local_data.lock);
+ out_fd = local_data.in_out[fd];
}
pthread_rwlock_unlock(&local_data.lock);
+ if (out_fd == -1) {
+ log_err("Invalid out_fd.");
+ return -1;
+ }
+
fset_add(local_data.flows, fd);
- if ((ret = ipcp_flow_alloc_reply(out_fd, response, data, len)) < 0)
+ if (ipcp_flow_alloc_reply(out_fd, response, mpl, data) < 0) {
+ log_err("Failed to reply to allocation");
+ fset_del(local_data.flows, fd);
return -1;
+ }
log_info("Flow allocation completed, fds (%d, %d).", out_fd, fd);
return 0;
}
-static int ipcp_local_flow_dealloc(int fd)
+static int local_ipcp_flow_dealloc(int fd)
{
assert(!(fd < 0));
@@ -315,7 +282,7 @@ static int ipcp_local_flow_dealloc(int fd)
pthread_rwlock_unlock(&local_data.lock);
- flow_dealloc(fd);
+ ipcp_flow_dealloc(fd);
log_info("Flow with fd %d deallocated.", fd);
@@ -323,60 +290,54 @@ static int ipcp_local_flow_dealloc(int fd)
}
static struct ipcp_ops local_ops = {
- .ipcp_bootstrap = ipcp_local_bootstrap,
+ .ipcp_bootstrap = local_ipcp_bootstrap,
.ipcp_enroll = NULL,
.ipcp_connect = NULL,
.ipcp_disconnect = NULL,
- .ipcp_reg = ipcp_local_reg,
- .ipcp_unreg = ipcp_local_unreg,
- .ipcp_query = ipcp_local_query,
- .ipcp_flow_alloc = ipcp_local_flow_alloc,
+ .ipcp_reg = local_ipcp_reg,
+ .ipcp_unreg = local_ipcp_unreg,
+ .ipcp_query = local_ipcp_query,
+ .ipcp_flow_alloc = local_ipcp_flow_alloc,
.ipcp_flow_join = NULL,
- .ipcp_flow_alloc_resp = ipcp_local_flow_alloc_resp,
- .ipcp_flow_dealloc = ipcp_local_flow_dealloc
+ .ipcp_flow_alloc_resp = local_ipcp_flow_alloc_resp,
+ .ipcp_flow_dealloc = local_ipcp_flow_dealloc
};
int main(int argc,
char * argv[])
{
- if (ipcp_init(argc, argv, &local_ops) < 0)
- goto fail_init;
-
if (local_data_init() < 0) {
log_err("Failed to init local data.");
goto fail_data_init;
}
- if (ipcp_boot() < 0) {
- log_err("Failed to boot IPCP.");
- goto fail_boot;
- }
+ if (ipcp_init(argc, argv, &local_ops, THIS_TYPE) < 0)
+ goto fail_init;
- if (ipcp_create_r(0)) {
- log_err("Failed to notify IRMd we are initialized.");
- goto fail_create_r;
+ if (ipcp_start() < 0) {
+ log_err("Failed to start IPCP.");
+ goto fail_start;
}
- ipcp_shutdown();
+ ipcp_sigwait();
if (ipcp_get_state() == IPCP_SHUTDOWN) {
pthread_cancel(local_data.packet_loop);
pthread_join(local_data.packet_loop, NULL);
}
- local_data_fini();
+ ipcp_stop();
ipcp_fini();
- exit(EXIT_SUCCESS);
- fail_create_r:
- ipcp_set_state(IPCP_NULL);
- ipcp_shutdown();
- fail_boot:
local_data_fini();
- fail_data_init:
+
+ exit(EXIT_SUCCESS);
+
+ fail_start:
ipcp_fini();
fail_init:
- ipcp_create_r(-1);
+ local_data_fini();
+ fail_data_init:
exit(EXIT_FAILURE);
}
diff --git a/src/ipcpd/raptor/CMakeLists.txt b/src/ipcpd/raptor/CMakeLists.txt
deleted file mode 100644
index 1883d9bb..00000000
--- a/src/ipcpd/raptor/CMakeLists.txt
+++ /dev/null
@@ -1,50 +0,0 @@
-get_filename_component(CURRENT_SOURCE_PARENT_DIR
- ${CMAKE_CURRENT_SOURCE_DIR} DIRECTORY)
-get_filename_component(CURRENT_BINARY_PARENT_DIR
- ${CMAKE_CURRENT_BINARY_DIR} DIRECTORY)
-
-include_directories(${CMAKE_CURRENT_SOURCE_DIR})
-include_directories(${CMAKE_CURRENT_BINARY_DIR})
-
-include_directories(${CURRENT_SOURCE_PARENT_DIR})
-include_directories(${CURRENT_BINARY_PARENT_DIR})
-
-include_directories(${CMAKE_SOURCE_DIR}/include)
-include_directories(${CMAKE_BINARY_DIR}/include)
-
-if (CMAKE_SYSTEM_NAME STREQUAL "Linux")
- find_path(RAPTOR_KERNEL_MODULE
- NAMES
- raptor.ko.gz
- raptor.ko.xz
- HINTS
- /lib/modules/${CMAKE_SYSTEM_VERSION}/extra
- )
-
- mark_as_advanced(RAPTOR_KERNEL_MODULE)
-
- if (RAPTOR_KERNEL_MODULE)
- set(DISABLE_RAPTOR FALSE CACHE BOOL
- "Disable support for raptor devices")
- if (NOT DISABLE_RAPTOR)
- message(STATUS "Kernel module for raptor found. Building raptor.")
- set(IPCP_RAPTOR_TARGET ipcpd-raptor CACHE INTERNAL "")
-
- set(RAPTOR_SOURCES
- # Add source files here
- ${CMAKE_CURRENT_SOURCE_DIR}/main.c)
-
- add_executable(ipcpd-raptor ${RAPTOR_SOURCES} ${IPCP_SOURCES})
- target_link_libraries(ipcpd-raptor LINK_PUBLIC ouroboros-dev)
-
- include(AddCompileFlags)
- if (CMAKE_BUILD_TYPE MATCHES "Debug*")
- add_compile_flags(ipcpd-raptor -DCONFIG_OUROBOROS_DEBUG)
- endif ()
-
- install(TARGETS ipcpd-raptor RUNTIME DESTINATION ${CMAKE_INSTALL_SBINDIR})
- else ()
- message(STATUS "Raptor support disabled by user")
- endif ()
- endif ()
-endif ()
diff --git a/src/ipcpd/raptor/main.c b/src/ipcpd/raptor/main.c
deleted file mode 100644
index 5cd7d50e..00000000
--- a/src/ipcpd/raptor/main.c
+++ /dev/null
@@ -1,1114 +0,0 @@
-/*
- * Ouroboros - Copyright (C) 2016 - 2020
- *
- * IPC process using the Raptor FPGA.
- *
- * Alexander D'hoore <dhoore.alexander@gmail.com>
- * Dimitri Staessens <dimitri.staessens@ugent.be>
- * Sander Vrijders <sander.vrijders@ugent.be>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., http://www.fsf.org/about/contact/.
- */
-
-#define _DEFAULT_SOURCE
-
-#include "config.h"
-
-#define OUROBOROS_PREFIX "ipcpd/raptor"
-
-#include <ouroboros/hash.h>
-#include <ouroboros/errno.h>
-#include <ouroboros/list.h>
-#include <ouroboros/utils.h>
-#include <ouroboros/bitmap.h>
-#include <ouroboros/dev.h>
-#include <ouroboros/local-dev.h>
-#include <ouroboros/ipcp-dev.h>
-#include <ouroboros/fqueue.h>
-#include <ouroboros/logs.h>
-#include <ouroboros/time_utils.h>
-
-#include "ipcp.h"
-#include "shim-data.h"
-
-#include <net/if.h>
-#include <signal.h>
-#include <stdlib.h>
-#include <pthread.h>
-#include <fcntl.h>
-#include <unistd.h>
-#include <string.h>
-#include <sys/socket.h>
-#include <sys/types.h>
-#include <sys/ioctl.h>
-#include <netinet/in.h>
-#include <malloc.h>
-
-#ifdef __linux__
-#include <linux/if_packet.h>
-#include <linux/if_ether.h>
-#endif
-
-#include <poll.h>
-#include <sys/mman.h>
-
-#define THIS_TYPE IPCP_RAPTOR
-#define MGMT_EID 0x01
-#define MAC_SIZE 6
-#define MAX_EIDS 64
-
-#define EVENT_WAIT_TIMEOUT 100 /* us */
-#define NAME_QUERY_TIMEOUT 2000 /* ms */
-#define MGMT_TIMEOUT 100 /* ms */
-
-#define IOCTL_SEND 0xAD420000
-#define IOCTL_RECV 0xAD430000
-#define IOCTL_SEND_DONE 0xAD440000
-#define IOCTL_RECV_DONE 0xAD450000
-#define IOCTL_RECV_NEED 0xAD460000
-
-#define RAPTOR_PAGE ((1 << 12) - 200) /* 4kB - 200 */
-#define RAPTOR_PAGE_MASK (~0xFFF)
-
-#define RAPTOR_BATCH 100
-#define RAPTOR_HEADER 3
-
-#define FLOW_REQ 0
-#define FLOW_REPLY 1
-#define NAME_QUERY_REQ 2
-#define NAME_QUERY_REPLY 3
-
-struct mgmt_msg {
- uint8_t code;
- uint8_t seid;
- uint8_t deid;
- int8_t response;
- /* QoS parameters from spec, aligned */
- uint32_t loss;
- uint64_t bandwidth;
- uint32_t ber;
- uint32_t max_gap;
- uint32_t delay;
- uint8_t in_order;
- uint8_t availability;
-} __attribute__((packed));
-
-struct ef {
- int8_t eid;
- int8_t r_eid;
-};
-
-struct mgmt_frame {
- struct list_head next;
- uint8_t buf[RAPTOR_PAGE];
- size_t len;
-};
-
-struct {
- struct shim_data * shim_data;
-
- int ioctl_fd;
-
- struct bmp * eids;
- fset_t * np1_flows;
- fqueue_t * fq;
- int * ef_to_fd;
- struct ef * fd_to_ef;
- pthread_rwlock_t flows_lock;
-
- pthread_t send_thread;
- pthread_t recv_thread;
- pthread_t send_done_thread;
- pthread_t recv_done_thread;
-
- /* Handle mgmt frames in a different thread */
- pthread_t mgmt_handler;
- pthread_mutex_t mgmt_lock;
- pthread_cond_t mgmt_cond;
- struct list_head mgmt_frames;
-
-} raptor_data;
-
-static int raptor_data_init(void)
-{
- int i;
- int ret = -ENOMEM;
- pthread_condattr_t cattr;
-
- raptor_data.fd_to_ef =
- malloc(sizeof(*raptor_data.fd_to_ef) * SYS_MAX_FLOWS);
- if (raptor_data.fd_to_ef == NULL)
- goto fail_fd_to_ef;
-
- raptor_data.ef_to_fd =
- malloc(sizeof(*raptor_data.ef_to_fd) * MAX_EIDS);
- if (raptor_data.ef_to_fd == NULL)
- goto fail_ef_to_fd;
-
- raptor_data.eids = bmp_create(MAX_EIDS, 2);
- if (raptor_data.eids == NULL)
- goto fail_eids;
-
- raptor_data.np1_flows = fset_create();
- if (raptor_data.np1_flows == NULL)
- goto fail_np1_flows;
-
- raptor_data.fq = fqueue_create();
- if (raptor_data.fq == NULL)
- goto fail_fq;
-
- for (i = 0; i < MAX_EIDS; ++i)
- raptor_data.ef_to_fd[i] = -1;
-
- for (i = 0; i < SYS_MAX_FLOWS; ++i) {
- raptor_data.fd_to_ef[i].eid = -1;
- raptor_data.fd_to_ef[i].r_eid = -1;
- }
-
- raptor_data.shim_data = shim_data_create();
- if (raptor_data.shim_data == NULL)
- goto fail_shim_data;
-
- ret = -1;
-
- if (pthread_rwlock_init(&raptor_data.flows_lock, NULL))
- goto fail_flows_lock;
-
- if (pthread_mutex_init(&raptor_data.mgmt_lock, NULL))
- goto fail_mgmt_lock;
-
- if (pthread_condattr_init(&cattr))
- goto fail_condattr;
-
-#ifndef __APPLE__
- pthread_condattr_setclock(&cattr, PTHREAD_COND_CLOCK);
-#endif
-
- if (pthread_cond_init(&raptor_data.mgmt_cond, &cattr))
- goto fail_mgmt_cond;
-
- pthread_condattr_destroy(&cattr);
-
- list_head_init(&raptor_data.mgmt_frames);
-
- return 0;
-
- fail_mgmt_cond:
- pthread_condattr_destroy(&cattr);
- fail_condattr:
- pthread_mutex_destroy(&raptor_data.mgmt_lock);
- fail_mgmt_lock:
- pthread_rwlock_destroy(&raptor_data.flows_lock);
- fail_flows_lock:
- shim_data_destroy(raptor_data.shim_data);
- fail_shim_data:
- fqueue_destroy(raptor_data.fq);
- fail_fq:
- fset_destroy(raptor_data.np1_flows);
- fail_np1_flows:
- bmp_destroy(raptor_data.eids);
- fail_eids:
- free(raptor_data.ef_to_fd);
- fail_ef_to_fd:
- free(raptor_data.fd_to_ef);
- fail_fd_to_ef:
- return ret;
-}
-
-static void raptor_data_fini(void)
-{
- close(raptor_data.ioctl_fd);
- pthread_cond_destroy(&raptor_data.mgmt_cond);
- pthread_mutex_destroy(&raptor_data.mgmt_lock);
- pthread_rwlock_destroy(&raptor_data.flows_lock);
- fqueue_destroy(raptor_data.fq);
- fset_destroy(raptor_data.np1_flows);
- bmp_destroy(raptor_data.eids);
- free(raptor_data.fd_to_ef);
- free(raptor_data.ef_to_fd);
-}
-
-static int raptor_send_frame(struct shm_du_buff * sdb,
- uint8_t deid)
-{
- uint8_t * frame;
- size_t frame_len;
- uint8_t * payload;
- size_t len;
-
- payload = shm_du_buff_head(sdb);
- len = shm_du_buff_tail(sdb) - shm_du_buff_head(sdb);
-
- frame_len = RAPTOR_HEADER + len;
-
- if (frame_len >= RAPTOR_PAGE) {
- log_err("Payload too large.");
- return -1;
- }
-
- frame = memalign(1 << 12, 1 << 12);
- if (frame == NULL) {
- log_err("frame == NULL");
- return -1;
- }
-
- if ((uint64_t)frame & 0xFFF) {
- log_err("page offset not zero");
- return -1;
- }
-
- frame[0] = (frame_len & 0x00FF) >> 0;
- frame[1] = (frame_len & 0xFF00) >> 8;
- frame[2] = deid;
-
- memcpy(&frame[RAPTOR_HEADER], payload, len);
-
- if (ioctl(raptor_data.ioctl_fd, IOCTL_SEND | 1, &frame) != 1) {
- log_err("Ioctl send failed.");
- free(frame);
- return -1;
- }
-
- return 0;
-}
-
-static int raptor_eid_alloc(uint8_t seid,
- const uint8_t * hash,
- qosspec_t qs)
-{
- struct mgmt_msg * msg;
- struct shm_du_buff * sdb;
-
- if (ipcp_sdb_reserve(&sdb, sizeof(*msg) + ipcp_dir_hash_len()) < 0) {
- log_err("failed to reserve sdb for management frame.");
- return -1;
- }
-
- msg = (struct mgmt_msg *) shm_du_buff_head(sdb);
- msg->code = FLOW_REQ;
- msg->seid = seid;
- msg->delay = hton32(qs.delay);
- msg->bandwidth = hton64(qs.bandwidth);
- msg->availability = qs.availability;
- msg->loss = hton32(qs.loss);
- msg->ber = hton32(qs.ber);
- msg->in_order = qs.in_order;
- msg->max_gap = hton32(qs.max_gap);
-
- memcpy(msg + 1, hash, ipcp_dir_hash_len());
-
- if (raptor_send_frame(sdb, MGMT_EID)) {
- log_err("Failed to send management frame.");
- ipcp_sdb_release(sdb);
- return -1;
- }
-
- ipcp_sdb_release(sdb);
-
- return 0;
-}
-
-static int raptor_eid_alloc_resp(uint8_t seid,
- uint8_t deid,
- int response)
-{
- struct mgmt_msg * msg;
- struct shm_du_buff * sdb;
-
- if (ipcp_sdb_reserve(&sdb, sizeof(*msg)) < 0) {
- log_err("Failed to reserve sdb for management frame.");
- return -1;
- }
-
- msg = (struct mgmt_msg *) shm_du_buff_head(sdb);
- msg->code = FLOW_REPLY;
- msg->seid = seid;
- msg->deid = deid;
- msg->response = response;
-
- if (raptor_send_frame(sdb, MGMT_EID)) {
- log_err("Failed to send management frame.");
- ipcp_sdb_release(sdb);
- return -1;
- }
-
- ipcp_sdb_release(sdb);
-
- return 0;
-}
-
-static int raptor_eid_req(uint8_t r_eid,
- const uint8_t * dst,
- qosspec_t qs)
-{
- struct timespec ts = {0, EVENT_WAIT_TIMEOUT * 1000};
- struct timespec abstime;
- int fd;
-
- clock_gettime(PTHREAD_COND_CLOCK, &abstime);
-
- pthread_mutex_lock(&ipcpi.alloc_lock);
-
- while (ipcpi.alloc_id != -1 && ipcp_get_state() == IPCP_OPERATIONAL) {
- ts_add(&abstime, &ts, &abstime);
- pthread_cond_timedwait(&ipcpi.alloc_cond,
- &ipcpi.alloc_lock,
- &abstime);
- }
-
- if (ipcp_get_state() != IPCP_OPERATIONAL) {
- log_dbg("Won't allocate over non-operational IPCP.");
- pthread_mutex_unlock(&ipcpi.alloc_lock);
- return -1;
- }
-
- /* reply to IRM, called under lock to prevent race */
- fd = ipcp_flow_req_arr(getpid(), dst, ipcp_dir_hash_len(), qs);
- if (fd < 0) {
- pthread_mutex_unlock(&ipcpi.alloc_lock);
- log_err("Could not get new flow from IRMd.");
- return -1;
- }
-
- pthread_rwlock_wrlock(&raptor_data.flows_lock);
-
- raptor_data.fd_to_ef[fd].r_eid = r_eid;
-
- ipcpi.alloc_id = fd;
- pthread_cond_broadcast(&ipcpi.alloc_cond);
-
- pthread_rwlock_unlock(&raptor_data.flows_lock);
- pthread_mutex_unlock(&ipcpi.alloc_lock);
-
- log_dbg("New flow request, fd %d, remote EID %d.", fd, r_eid);
-
- return 0;
-}
-
-static int raptor_eid_alloc_reply(uint8_t seid,
- int deid,
- int response)
-{
- int ret = 0;
- int fd = -1;
-
- pthread_rwlock_wrlock(&raptor_data.flows_lock);
-
- fd = raptor_data.ef_to_fd[deid];
- if (fd < 0) {
- pthread_rwlock_unlock(& raptor_data.flows_lock);
- log_err("No flow found with that EID.");
- return -1; /* -EFLOWNOTFOUND */
- }
-
- if (response)
- bmp_release(raptor_data.eids, raptor_data.fd_to_ef[fd].eid);
- else
- raptor_data.fd_to_ef[fd].r_eid = seid;
-
- pthread_rwlock_unlock(&raptor_data.flows_lock);
-
- log_dbg("Flow reply, fd %d, SEID %d, DEID %d.", fd, seid, deid);
-
- if ((ret = ipcp_flow_alloc_reply(fd, response)) < 0)
- return -1;
-
- return ret;
-
-}
-
-static int raptor_name_query_req(const uint8_t * hash)
-{
- struct mgmt_msg * msg;
- struct shm_du_buff * sdb;
-
- if (!shim_data_reg_has(raptor_data.shim_data, hash))
- return 0;
-
- if (ipcp_sdb_reserve(&sdb, sizeof(*msg) + ipcp_dir_hash_len()) < 0) {
- log_err("Failed to reserve sdb for management frame.");
- return -1;
- }
-
- msg = (struct mgmt_msg *) shm_du_buff_head(sdb);
- msg->code = NAME_QUERY_REPLY;
-
- memcpy(msg + 1, hash, ipcp_dir_hash_len());
-
- if (raptor_send_frame(sdb, MGMT_EID)) {
- log_err("Failed to send management frame.");
- ipcp_sdb_release(sdb);
- return -1;
- }
-
- ipcp_sdb_release(sdb);
-
- return 0;
-}
-
-static int raptor_name_query_reply(const uint8_t * hash)
-{
- shim_data_dir_add_entry(raptor_data.shim_data, hash, 0);
-
- shim_data_dir_query_respond(raptor_data.shim_data, hash);
-
- return 0;
-}
-
-static int raptor_mgmt_frame(const uint8_t * buf,
- size_t len)
-{
- struct mgmt_msg * msg = (struct mgmt_msg *) buf;
- uint8_t * hash = (uint8_t *) (msg + 1);
- qosspec_t qs;
-
- switch (msg->code) {
- case FLOW_REQ:
- if (len != sizeof(*msg) + ipcp_dir_hash_len()) {
- log_err("Corrupt message received.");
- return -1;
- }
-
- qs.delay = ntoh32(msg->delay);
- qs.bandwidth = ntoh64(msg->bandwidth);
- qs.availability = msg->availability;
- qs.loss = ntoh32(msg->loss);
- qs.ber = ntoh32(msg->ber);
- qs.in_order = msg->in_order;
- qs.max_gap = ntoh32(msg->max_gap);
-
- if (shim_data_reg_has(raptor_data.shim_data, hash))
- raptor_eid_req(msg->seid, hash, qs);
- break;
- case FLOW_REPLY:
- if (len != sizeof(*msg)) {
- log_err("Corrupt message received.");
- return -1;
- }
-
- raptor_eid_alloc_reply(msg->seid, msg->deid, msg->response);
- break;
- case NAME_QUERY_REQ:
- if (len != sizeof(*msg) + ipcp_dir_hash_len()) {
- log_err("Corrupt message received.");
- return -1;
- }
-
- raptor_name_query_req(hash);
- break;
- case NAME_QUERY_REPLY:
- if (len != sizeof(*msg) + ipcp_dir_hash_len()) {
- log_err("Corrupt message received.");
- return -1;
- }
-
- raptor_name_query_reply(hash);
- break;
- default:
- log_err("Unknown message received %d.", msg->code);
- return -1;
- }
-
- return 0;
-}
-
-static void * raptor_mgmt_handler(void * o)
-{
- int ret;
- struct timespec timeout = {(MGMT_TIMEOUT / 1000),
- (MGMT_TIMEOUT % 1000) * MILLION};
- struct timespec abstime;
- struct mgmt_frame * frame;
-
- (void) o;
-
- while (true) {
- ret = 0;
-
- if (ipcp_get_state() != IPCP_OPERATIONAL)
- break;
-
- clock_gettime(PTHREAD_COND_CLOCK, &abstime);
- ts_add(&abstime, &timeout, &abstime);
-
- pthread_mutex_lock(&raptor_data.mgmt_lock);
-
- while (list_is_empty(&raptor_data.mgmt_frames) &&
- ret != -ETIMEDOUT)
- ret = -pthread_cond_timedwait(&raptor_data.mgmt_cond,
- &raptor_data.mgmt_lock,
- &abstime);
-
- if (ret == -ETIMEDOUT) {
- pthread_mutex_unlock(&raptor_data.mgmt_lock);
- continue;
- }
-
- frame = list_first_entry((&raptor_data.mgmt_frames),
- struct mgmt_frame, next);
- if (frame == NULL) {
- pthread_mutex_unlock(&raptor_data.mgmt_lock);
- continue;
- }
-
- list_del(&frame->next);
- pthread_mutex_unlock(&raptor_data.mgmt_lock);
-
- raptor_mgmt_frame(frame->buf, frame->len);
- free(frame);
- }
-
- return NULL;
-}
-
-static void raptor_recv_frame(uint8_t * frame)
-{
- uint8_t deid;
- uint8_t * payload;
- size_t frame_len;
- size_t length;
- int fd;
- struct mgmt_frame * mgmt_frame;
- struct shm_du_buff * sdb;
- size_t idx;
-
- sdb = (struct shm_du_buff *)((uint64_t) frame & RAPTOR_PAGE_MASK);
- idx = shm_du_buff_get_idx(sdb);
-
- frame_len = frame[0] | (frame[1] << 8);
- if (frame_len < RAPTOR_HEADER) {
- log_err("Received packet smaller than header alone.");
- ipcp_sdb_release(sdb);
- return;
- }
-
- if (frame_len >= RAPTOR_PAGE) {
- log_err("Received packet too large.");
- ipcp_sdb_release(sdb);
- return;
- }
-
- deid = frame[2];
- payload = &frame[RAPTOR_HEADER];
- length = frame_len - RAPTOR_HEADER;
-
- shm_du_buff_head_release(sdb, RAPTOR_HEADER);
- shm_du_buff_tail_release(sdb, RAPTOR_PAGE - frame_len);
-
- if (deid == MGMT_EID) {
- pthread_mutex_lock(&raptor_data.mgmt_lock);
-
- mgmt_frame = malloc(sizeof(*mgmt_frame));
- if (mgmt_frame == NULL) {
- pthread_mutex_unlock(&raptor_data.mgmt_lock);
- ipcp_sdb_release(sdb);
- return;
- }
-
- memcpy(mgmt_frame->buf, payload, length);
- mgmt_frame->len = length;
- list_add(&mgmt_frame->next, &raptor_data.mgmt_frames);
- pthread_cond_signal(&raptor_data.mgmt_cond);
- pthread_mutex_unlock(&raptor_data.mgmt_lock);
-
- ipcp_sdb_release(sdb);
- } else {
- pthread_rwlock_rdlock(&raptor_data.flows_lock);
-
- fd = raptor_data.ef_to_fd[deid];
- if (fd < 0) {
- pthread_rwlock_unlock(&raptor_data.flows_lock);
- ipcp_sdb_release(sdb);
- return;
- }
-
- pthread_rwlock_unlock(&raptor_data.flows_lock);
-
- local_flow_write(fd, idx);
- }
-}
-
-static void * raptor_recv_done_thread(void * o)
-{
- uint8_t * frames[RAPTOR_BATCH];
- int count;
- int i;
-
- (void) o;
-
- while (true) {
- if (ipcp_get_state() != IPCP_OPERATIONAL)
- break;
-
- count = ioctl(raptor_data.ioctl_fd,
- IOCTL_RECV_DONE | RAPTOR_BATCH, frames);
-
- if (count <= 0)
- continue;
-
- for (i = 0; i < count; i++)
- raptor_recv_frame(frames[i]);
- }
-
- return NULL;
-}
-
-static void * raptor_send_thread(void * o)
-{
- struct timespec timeout = {0, EVENT_WAIT_TIMEOUT * 1000};
- int fd;
- struct shm_du_buff * sdb;
- uint8_t deid;
-
- (void) o;
-
- while (fevent(raptor_data.np1_flows, raptor_data.fq, &timeout)) {
- if (ipcp_get_state() != IPCP_OPERATIONAL)
- break;
-
- pthread_rwlock_rdlock(&raptor_data.flows_lock);
- while ((fd = fqueue_next(raptor_data.fq)) >= 0) {
- if (ipcp_flow_read(fd, &sdb)) {
- log_err("Bad read from fd %d.", fd);
- continue;
- }
-
- deid = raptor_data.fd_to_ef[fd].r_eid;
-
- raptor_send_frame(sdb, deid);
- }
- pthread_rwlock_unlock(&raptor_data.flows_lock);
- }
-
- return NULL;
-}
-
-static void * raptor_send_done_thread(void * o)
-{
- uint8_t * frames[RAPTOR_BATCH];
- int count;
- int i;
-
- (void) o;
-
- while (true) {
- if (ipcp_get_state() != IPCP_OPERATIONAL)
- break;
-
- count = ioctl(raptor_data.ioctl_fd,
- IOCTL_SEND_DONE | RAPTOR_BATCH, frames);
-
- if (count <= 0)
- continue;
-
- for (i = 0; i < count; i++)
- free(frames[i]);
- }
-
- return NULL;
-}
-
-static void * raptor_recv_thread(void * o)
-{
- struct shm_du_buff * sdb;
- uint8_t * frames[RAPTOR_BATCH];
- uint8_t ** head;
- int needed = 0;
- int count;
- int i;
-
- (void) o;
-
- while (true) {
- if (ipcp_get_state() != IPCP_OPERATIONAL)
- break;
-
- needed = ioctl(raptor_data.ioctl_fd,
- IOCTL_RECV_NEED | RAPTOR_BATCH, NULL);
-
- if (needed <= 0)
- continue;
-
- for (i = 0; i < needed; i++) {
- if (ipcp_sdb_reserve(&sdb, RAPTOR_PAGE) < 0) {
- log_err("Recv thread: reserve sdb failed.");
- return NULL;
- }
-
- if ((uint64_t)sdb & (~RAPTOR_PAGE_MASK)) {
- log_err("Recv thread: sdb not at offset 0.");
- return NULL;
- }
-
- frames[i] = shm_du_buff_head(sdb);
-
- if ((uint64_t)frames[i] & 0x7) {
- log_err("Recv thread: frame not aligned.");
- return NULL;
- }
- }
-
- head = frames;
-
- do {
- count = ioctl(raptor_data.ioctl_fd,
- IOCTL_RECV | needed, head);
- if (count <= 0)
- continue;
-
- assert(count <= needed);
-
- needed -= count;
- head += count;
-
- } while (needed > 0 && ipcp_get_state() == IPCP_OPERATIONAL);
- }
-
- return NULL;
-}
-
-static int raptor_bootstrap(const struct ipcp_config * conf)
-{
- assert(conf);
- assert(conf->type == THIS_TYPE);
-
- (void) conf;
-
- raptor_data.ioctl_fd = open("/dev/raptor", 0);
- if (raptor_data.ioctl_fd < 0) {
- log_err("Failed to open /dev/raptor.");
- goto fail_ioctl;
- }
-
- ipcp_set_state(IPCP_OPERATIONAL);
-
- if (pthread_create(&raptor_data.mgmt_handler,
- NULL,
- raptor_mgmt_handler,
- NULL)) {
- ipcp_set_state(IPCP_INIT);
- goto fail_mgmt_handler;
- }
-
- if (pthread_create(&raptor_data.send_thread,
- NULL,
- raptor_send_thread,
- NULL)) {
- ipcp_set_state(IPCP_INIT);
- goto fail_send_thread;
- }
-
- if (pthread_create(&raptor_data.recv_thread,
- NULL,
- raptor_recv_thread,
- NULL)) {
- ipcp_set_state(IPCP_INIT);
- goto fail_recv_thread;
- }
-
- if (pthread_create(&raptor_data.send_done_thread,
- NULL,
- raptor_send_done_thread,
- NULL)) {
- ipcp_set_state(IPCP_INIT);
- goto fail_send_done_thread;
- }
-
- if (pthread_create(&raptor_data.recv_done_thread,
- NULL,
- raptor_recv_done_thread,
- NULL)) {
- ipcp_set_state(IPCP_INIT);
- goto fail_recv_done_thread;
- }
-
- log_dbg("Bootstrapped raptor IPCP with api %d.", getpid());
-
- return 0;
-
- fail_recv_done_thread:
- pthread_join(raptor_data.send_done_thread, NULL);
- fail_send_done_thread:
- pthread_join(raptor_data.recv_thread, NULL);
- fail_recv_thread:
- pthread_join(raptor_data.send_thread, NULL);
- fail_send_thread:
- pthread_join(raptor_data.mgmt_handler, NULL);
- fail_mgmt_handler:
- close(raptor_data.ioctl_fd);
- fail_ioctl:
- return -1;
-}
-
-static int raptor_reg(const uint8_t * hash)
-{
- uint8_t * hash_dup;
-
- hash_dup = ipcp_hash_dup(hash);
- if (hash_dup == NULL) {
- log_err("Failed to duplicate hash.");
- return -ENOMEM;
- }
-
- if (shim_data_reg_add_entry(raptor_data.shim_data, hash_dup)) {
- log_err("Failed to add " HASH_FMT " to local registry.",
- HASH_VAL(hash));
- free(hash_dup);
- return -1;
- }
-
- log_dbg("Registered " HASH_FMT ".", HASH_VAL(hash));
-
- return 0;
-}
-
-static int raptor_unreg(const uint8_t * hash)
-{
- shim_data_reg_del_entry(raptor_data.shim_data, hash);
-
- return 0;
-}
-
-static int raptor_query(const uint8_t * hash)
-{
- struct timespec timeout = {(NAME_QUERY_TIMEOUT / 1000),
- (NAME_QUERY_TIMEOUT % 1000) * MILLION};
- struct mgmt_msg * msg;
- struct dir_query * query;
- int ret;
- struct shm_du_buff * sdb;
-
- if (shim_data_dir_has(raptor_data.shim_data, hash))
- return 0;
-
- if (ipcp_sdb_reserve(&sdb, sizeof(*msg) + ipcp_dir_hash_len()) < 0) {
- log_err("failed to reserve sdb for management frame.");
- return -1;
- }
-
- msg = (struct mgmt_msg *) shm_du_buff_head(sdb);
- msg->code = NAME_QUERY_REQ;
-
- memcpy(msg + 1, hash, ipcp_dir_hash_len());
-
- query = shim_data_dir_query_create(raptor_data.shim_data, hash);
- if (query == NULL) {
- ipcp_sdb_release(sdb);
- return -1;
- }
-
- if (raptor_send_frame(sdb, MGMT_EID)) {
- log_err("Failed to send management frame.");
- ipcp_sdb_release(sdb);
- return -1;
- }
-
- ret = shim_data_dir_query_wait(query, &timeout);
-
- shim_data_dir_query_destroy(raptor_data.shim_data, query);
-
- return ret;
-}
-
-static int raptor_flow_alloc(int fd,
- const uint8_t * hash,
- qosspec_t qs)
-{
- uint8_t seid = 0;
-
- log_dbg("Allocating flow to " HASH_FMT ".", HASH_VAL(hash));
-
- assert(hash);
-
- if (!shim_data_dir_has(raptor_data.shim_data, hash)) {
- log_err("Destination unreachable.");
- return -1;
- }
-
- pthread_rwlock_wrlock(&raptor_data.flows_lock);
-
- seid = bmp_allocate(raptor_data.eids);
- if (!bmp_is_id_valid(raptor_data.eids, seid)) {
- pthread_rwlock_unlock(&raptor_data.flows_lock);
- return -1;
- }
-
- raptor_data.fd_to_ef[fd].eid = seid;
- raptor_data.ef_to_fd[seid] = fd;
-
- pthread_rwlock_unlock(&raptor_data.flows_lock);
-
- if (raptor_eid_alloc(seid, hash, qs) < 0) {
- pthread_rwlock_wrlock(&raptor_data.flows_lock);
- bmp_release(raptor_data.eids, raptor_data.fd_to_ef[fd].eid);
- raptor_data.fd_to_ef[fd].eid = -1;
- raptor_data.ef_to_fd[seid] = -1;
- pthread_rwlock_unlock(&raptor_data.flows_lock);
- return -1;
- }
-
- fset_add(raptor_data.np1_flows, fd);
-
- log_dbg("Pending flow with fd %d on EID %d.", fd, seid);
-
- return 0;
-}
-
-static int raptor_flow_alloc_resp(int fd,
- int response)
-{
- struct timespec ts = {0, EVENT_WAIT_TIMEOUT * 1000};
- struct timespec abstime;
- uint8_t seid = 0;
- uint8_t r_eid = 0;
-
- clock_gettime(PTHREAD_COND_CLOCK, &abstime);
-
- pthread_mutex_lock(&ipcpi.alloc_lock);
-
- while (ipcpi.alloc_id != fd && ipcp_get_state() == IPCP_OPERATIONAL) {
- ts_add(&abstime, &ts, &abstime);
- pthread_cond_timedwait(&ipcpi.alloc_cond,
- &ipcpi.alloc_lock,
- &abstime);
- }
-
- if (ipcp_get_state() != IPCP_OPERATIONAL) {
- pthread_mutex_unlock(&ipcpi.alloc_lock);
- return -1;
- }
-
- ipcpi.alloc_id = -1;
- pthread_cond_broadcast(&ipcpi.alloc_cond);
-
- pthread_mutex_unlock(&ipcpi.alloc_lock);
-
- pthread_rwlock_wrlock(&raptor_data.flows_lock);
-
- seid = bmp_allocate(raptor_data.eids);
- if (!bmp_is_id_valid(raptor_data.eids, seid)) {
- pthread_rwlock_unlock(&raptor_data.flows_lock);
- return -1;
- }
-
- raptor_data.fd_to_ef[fd].eid = seid;
- r_eid = raptor_data.fd_to_ef[fd].r_eid;
- raptor_data.ef_to_fd[seid] = fd;
-
- pthread_rwlock_unlock(&raptor_data.flows_lock);
-
- if (raptor_eid_alloc_resp(seid, r_eid, response) < 0) {
- pthread_rwlock_wrlock(&raptor_data.flows_lock);
- bmp_release(raptor_data.eids, raptor_data.fd_to_ef[fd].eid);
- pthread_rwlock_unlock(&raptor_data.flows_lock);
- return -1;
- }
-
- fset_add(raptor_data.np1_flows, fd);
-
- log_dbg("Accepted flow, fd %d, EID %d.", fd, (uint8_t)seid);
-
- return 0;
-}
-
-static int raptor_flow_dealloc(int fd)
-{
- uint8_t eid;
-
- ipcp_flow_fini(fd);
-
- pthread_rwlock_wrlock(&raptor_data.flows_lock);
-
- fset_del(raptor_data.np1_flows, fd);
-
- eid = raptor_data.fd_to_ef[fd].eid;
- bmp_release(raptor_data.eids, eid);
- raptor_data.fd_to_ef[fd].eid = -1;
- raptor_data.fd_to_ef[fd].r_eid = -1;
-
- raptor_data.ef_to_fd[eid] = -1;
-
- pthread_rwlock_unlock(&raptor_data.flows_lock);
-
- flow_dealloc(fd);
-
- log_dbg("Flow with fd %d deallocated.", fd);
-
- return 0;
-}
-
-static struct ipcp_ops raptor_ops = {
- .ipcp_bootstrap = raptor_bootstrap,
- .ipcp_enroll = NULL,
- .ipcp_reg = raptor_reg,
- .ipcp_unreg = raptor_unreg,
- .ipcp_query = raptor_query,
- .ipcp_flow_alloc = raptor_flow_alloc,
- .ipcp_flow_join = NULL,
- .ipcp_flow_alloc_resp = raptor_flow_alloc_resp,
- .ipcp_flow_dealloc = raptor_flow_dealloc
-};
-
-int main(int argc,
- char * argv[])
-{
- if (ipcp_init(argc, argv, &raptor_ops) < 0) {
- log_err("Failed to init IPCP.");
- goto fail_init;
- }
-
- if (raptor_data_init() < 0) {
- log_err("Failed to init shim-eth-llc data.");
- goto fail_data_init;
- }
-
- if (ipcp_boot() < 0) {
- log_err("Failed to boot IPCP.");
- goto fail_boot;
- }
-
- if (ipcp_create_r(getpid(), 0)) {
- log_err("Failed to notify IRMd we are initialized.");
- ipcp_set_state(IPCP_NULL);
- goto fail_create_r;
- }
-
- log_info("Raptor created.");
-
- ipcp_shutdown();
-
- if (ipcp_get_state() == IPCP_SHUTDOWN) {
- pthread_join(raptor_data.send_thread, NULL);
- pthread_join(raptor_data.recv_thread, NULL);
- pthread_join(raptor_data.send_done_thread, NULL);
- pthread_join(raptor_data.recv_done_thread, NULL);
- pthread_join(raptor_data.mgmt_handler, NULL);
- }
-
- raptor_data_fini();
-
- ipcp_fini();
-
- exit(EXIT_SUCCESS);
-
- fail_create_r:
- ipcp_shutdown();
- fail_boot:
- raptor_data_fini();
- fail_data_init:
- ipcp_fini();
- fail_init:
- ipcp_create_r(getpid(), -1);
- exit(EXIT_FAILURE);
-}
diff --git a/src/ipcpd/shim-data.c b/src/ipcpd/shim-data.c
index 60f0245a..1fac63ac 100644
--- a/src/ipcpd/shim-data.c
+++ b/src/ipcpd/shim-data.c
@@ -1,10 +1,10 @@
/*
- * Ouroboros - Copyright (C) 2016 - 2020
+ * Ouroboros - Copyright (C) 2016 - 2024
*
* IPC process utilities
*
- * Dimitri Staessens <dimitri.staessens@ugent.be>
- * Sander Vrijders <sander.vrijders@ugent.be>
+ * Dimitri Staessens <dimitri@ouroboros.rocks>
+ * Sander Vrijders <sander@ouroboros.rocks>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -30,18 +30,18 @@
#define OUROBOROS_PREFIX "shim-data"
-#include <ouroboros/endian.h>
-#include <ouroboros/logs.h>
-#include <ouroboros/list.h>
-#include <ouroboros/time_utils.h>
#include <ouroboros/errno.h>
+#include <ouroboros/hash.h>
+#include <ouroboros/list.h>
+#include <ouroboros/logs.h>
+#include <ouroboros/time.h>
#include "shim-data.h"
#include "ipcp.h"
-#include <string.h>
-#include <stdlib.h>
#include <assert.h>
+#include <stdlib.h>
+#include <string.h>
struct reg_entry {
struct list_head list;
@@ -74,6 +74,9 @@ static void destroy_dir_query(struct dir_query * query)
case QUERY_DESTROY:
pthread_mutex_unlock(&query->lock);
return;
+ default:
+ assert(false);
+ return;
}
while (query->state != QUERY_DONE)
@@ -136,9 +139,11 @@ static void dir_entry_destroy(struct dir_entry * entry)
free(entry);
}
-struct shim_data * shim_data_create()
+struct shim_data * shim_data_create(void)
{
- struct shim_data * sd = malloc(sizeof(*sd));
+ struct shim_data * sd;
+
+ sd = malloc(sizeof(*sd));
if (sd == NULL)
return NULL;
@@ -148,11 +153,23 @@ struct shim_data * shim_data_create()
list_head_init(&sd->dir_queries);
/* init the locks */
- pthread_rwlock_init(&sd->reg_lock, NULL);
- pthread_rwlock_init(&sd->dir_lock, NULL);
- pthread_mutex_init(&sd->dir_queries_lock, NULL);
+ if (pthread_rwlock_init(&sd->reg_lock, NULL) < 0)
+ goto fail_reg_lock_init;
+
+ if (pthread_rwlock_init(&sd->dir_lock, NULL) < 0)
+ goto fail_dir_lock_init;
+
+ if (pthread_mutex_init(&sd->dir_queries_lock, NULL) < 0)
+ goto fail_mutex_init;
return sd;
+
+ fail_mutex_init:
+ pthread_rwlock_destroy(&sd->dir_lock);
+ fail_dir_lock_init:
+ pthread_rwlock_destroy(&sd->reg_lock);
+ fail_reg_lock_init:
+ return NULL;
}
static void clear_registry(struct shim_data * data)
@@ -280,8 +297,8 @@ int shim_data_reg_add_entry(struct shim_data * data,
if (find_reg_entry_by_hash(data, hash)) {
pthread_rwlock_unlock(&data->reg_lock);
- log_dbg(HASH_FMT " was already in the directory.",
- HASH_VAL(hash));
+ log_dbg(HASH_FMT32 " was already in the directory.",
+ HASH_VAL32(hash));
return 0;
}
@@ -429,9 +446,9 @@ uint64_t shim_data_dir_get_addr(struct shim_data * data,
pthread_rwlock_rdlock(&data->dir_lock);
entry = find_dir_entry_any(data, hash);
-
if (entry == NULL) {
pthread_rwlock_unlock(&data->dir_lock);
+ log_warn("No address for " HASH_FMT32 ".", HASH_VAL32(hash));
return 0; /* undefined behaviour, 0 may be a valid address */
}
diff --git a/src/ipcpd/shim-data.h b/src/ipcpd/shim-data.h
index af937f07..372b4ea7 100644
--- a/src/ipcpd/shim-data.h
+++ b/src/ipcpd/shim-data.h
@@ -1,10 +1,10 @@
/*
- * Ouroboros - Copyright (C) 2016 - 2020
+ * Ouroboros - Copyright (C) 2016 - 2024
*
* Utitilies for building IPC processes
*
- * Dimitri Staessens <dimitri.staessens@ugent.be>
- * Sander Vrijders <sander.vrijders@ugent.be>
+ * Dimitri Staessens <dimitri@ouroboros.rocks>
+ * Sander Vrijders <sander@ouroboros.rocks>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
diff --git a/src/ipcpd/udp/CMakeLists.txt b/src/ipcpd/udp/CMakeLists.txt
index f1a29ef6..5abf5a00 100644
--- a/src/ipcpd/udp/CMakeLists.txt
+++ b/src/ipcpd/udp/CMakeLists.txt
@@ -58,6 +58,8 @@ set(IPCP_UDP_RD_THR 3 CACHE STRING
"Number of reader threads in UDP IPCP")
set(IPCP_UDP_WR_THR 3 CACHE STRING
"Number of writer threads in UDP IPCP")
+set(IPCP_UDP_MPL 5000 CACHE STRING
+ "Default maximum packet lifetime for the UDP IPCP, in ms")
include(AddCompileFlags)
if (CMAKE_BUILD_TYPE MATCHES "Debug*")
diff --git a/src/ipcpd/udp/main.c b/src/ipcpd/udp/main.c
index 04c21a8b..5f770a61 100644
--- a/src/ipcpd/udp/main.c
+++ b/src/ipcpd/udp/main.c
@@ -1,10 +1,10 @@
/*
- * Ouroboros - Copyright (C) 2016 - 2020
+ * Ouroboros - Copyright (C) 2016 - 2024
*
* IPC process over UDP
*
- * Dimitri Staessens <dimitri.staessens@ugent.be>
- * Sander Vrijders <sander.vrijders@ugent.be>
+ * Dimitri Staessens <dimitri@ouroboros.rocks>
+ * Sander Vrijders <sander@ouroboros.rocks>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -31,6 +31,7 @@
#define OUROBOROS_PREFIX "ipcpd/udp"
#include <ouroboros/bitmap.h>
+#include <ouroboros/endian.h>
#include <ouroboros/hash.h>
#include <ouroboros/list.h>
#include <ouroboros/utils.h>
@@ -39,6 +40,7 @@
#include <ouroboros/fqueue.h>
#include <ouroboros/errno.h>
#include <ouroboros/logs.h>
+#include <ouroboros/pthread.h>
#include "ipcp.h"
#include "shim-data.h"
@@ -51,7 +53,6 @@
#include <netinet/in.h>
#include <signal.h>
#include <stdlib.h>
-#include <pthread.h>
#include <sys/wait.h>
#include <fcntl.h>
@@ -65,18 +66,21 @@
#define IPCP_UDP_BUF_SIZE 8980
#define IPCP_UDP_MSG_SIZE 8980
#define DNS_TTL 86400
-#define FD_UPDATE_TIMEOUT 100 /* microseconds */
-#define SERV_PORT udp_data.s_saddr.sin_port;
-#define SERV_SADDR ((struct sockaddr *) &udp_data.s_saddr)
-#define CLNT_SADDR ((struct sockaddr *) &udp_data.c_saddr)
-#define SERV_SADDR_SIZE (sizeof(udp_data.s_saddr))
+#define SADDR ((struct sockaddr *) &udp_data.s_saddr)
+#define SADDR_SIZE (sizeof(udp_data.s_saddr))
#define LOCAL_IP (udp_data.s_saddr.sin_addr.s_addr)
#define MGMT_EID 0
#define MGMT_FRAME_SIZE (sizeof(struct mgmt_msg))
#define MGMT_FRAME_BUF_SIZE 2048
+#ifdef __linux__
+#define SENDTO_FLAGS MSG_CONFIRM
+#else
+#define SENDTO_FLAGS 0
+#endif
+
/* Keep order for alignment. */
struct mgmt_msg {
uint32_t eid;
@@ -92,7 +96,9 @@ struct mgmt_msg {
uint32_t loss;
uint32_t ber;
uint32_t max_gap;
+ uint32_t timeout;
uint16_t cypher_s;
+
} __attribute__((packed));
struct mgmt_frame {
@@ -104,20 +110,17 @@ struct mgmt_frame {
/* UDP flow */
struct uf {
- int d_eid;
- /* IP details are stored through connect(). */
- int skfd;
+ int d_eid;
+ struct sockaddr_in r_saddr;
};
struct {
struct shim_data * shim_data;
uint32_t dns_addr;
- /* server socket */
+
struct sockaddr_in s_saddr;
int s_fd;
- /* client port */
- int clt_port;
fset_t * np1_flows;
struct uf fd_to_uf[SYS_MAX_FLOWS];
@@ -135,21 +138,25 @@ struct {
static int udp_data_init(void)
{
- int i;
+ int i;
+ pthread_condattr_t cattr;
if (pthread_rwlock_init(&udp_data.flows_lock, NULL))
goto fail_rwlock_init;
- if (pthread_cond_init(&udp_data.mgmt_cond, NULL))
+ if (pthread_condattr_init(&cattr))
+ goto fail_condattr;
+#ifndef __APPLE__
+ pthread_condattr_setclock(&cattr, PTHREAD_COND_CLOCK);
+#endif
+ if (pthread_cond_init(&udp_data.mgmt_cond, &cattr))
goto fail_mgmt_cond;
if (pthread_mutex_init(&udp_data.mgmt_lock, NULL))
goto fail_mgmt_lock;
- for (i = 0; i < SYS_MAX_FLOWS; ++i) {
- udp_data.fd_to_uf[i].skfd = -1;
+ for (i = 0; i < SYS_MAX_FLOWS; ++i)
udp_data.fd_to_uf[i].d_eid = -1;
- }
udp_data.np1_flows = fset_create();
if (udp_data.np1_flows == NULL)
@@ -159,9 +166,12 @@ static int udp_data_init(void)
if (udp_data.shim_data == NULL)
goto fail_data;
+ pthread_condattr_destroy(&cattr);
+
list_head_init(&udp_data.mgmt_frames);
return 0;
+
fail_data:
fset_destroy(udp_data.np1_flows);
fail_fset:
@@ -169,6 +179,8 @@ static int udp_data_init(void)
fail_mgmt_lock:
pthread_cond_destroy(&udp_data.mgmt_cond);
fail_mgmt_cond:
+ pthread_condattr_destroy(&cattr);
+ fail_condattr:
pthread_rwlock_destroy(&udp_data.flows_lock);
fail_rwlock_init:
return -1;
@@ -185,22 +197,21 @@ static void udp_data_fini(void)
pthread_mutex_destroy(&udp_data.mgmt_lock);
}
-static int ipcp_udp_port_alloc(int skfd,
- uint32_t s_eid,
- const uint8_t * dst,
- qosspec_t qs,
- const void * data,
- size_t dlen)
+static int udp_ipcp_port_alloc(const struct sockaddr_in * r_saddr,
+ uint32_t s_eid,
+ const uint8_t * dst,
+ qosspec_t qs,
+ const buffer_t * data)
{
uint8_t * buf;
struct mgmt_msg * msg;
size_t len;
- assert(dlen > 0 ? data != NULL : data == NULL);
+ assert(data->len > 0 ? data->data != NULL : data->data == NULL);
len = sizeof(*msg) + ipcp_dir_hash_len();
- buf = malloc(len + dlen);
+ buf = malloc(len + data->len);
if (buf == NULL)
return -1;
@@ -216,11 +227,15 @@ static int ipcp_udp_port_alloc(int skfd,
msg->in_order = qs.in_order;
msg->max_gap = hton32(qs.max_gap);
msg->cypher_s = hton16(qs.cypher_s);
+ msg->timeout = hton32(qs.timeout);
memcpy(msg + 1, dst, ipcp_dir_hash_len());
- memcpy(buf + len, data, dlen);
+ if (data->len > 0)
+ memcpy(buf + len, data->data, data->len);
- if (write(skfd, msg, len + dlen) < 0) {
+ if (sendto(udp_data.s_fd, msg, len + data->len,
+ SENDTO_FLAGS,
+ (const struct sockaddr *) r_saddr, sizeof(*r_saddr)) < 0) {
free(buf);
return -1;
}
@@ -230,16 +245,15 @@ static int ipcp_udp_port_alloc(int skfd,
return 0;
}
-static int ipcp_udp_port_alloc_resp(int skfd,
- uint32_t s_eid,
- uint32_t d_eid,
- int8_t response,
- const void * data,
- size_t len)
+static int udp_ipcp_port_alloc_resp(const struct sockaddr_in * r_saddr,
+ uint32_t s_eid,
+ uint32_t d_eid,
+ int8_t response,
+ const buffer_t * data)
{
- struct mgmt_msg * msg;
+ struct mgmt_msg * msg;
- msg = malloc(sizeof(*msg) + len);
+ msg = malloc(sizeof(*msg) + data->len);
if (msg == NULL)
return -1;
@@ -249,9 +263,12 @@ static int ipcp_udp_port_alloc_resp(int skfd,
msg->d_eid = hton32(d_eid);
msg->response = response;
- memcpy(msg + 1, data, len);
+ if (data->len > 0)
+ memcpy(msg + 1, data->data, data->len);
- if (write(skfd, msg, sizeof(*msg) + len) < 0) {
+ if (sendto(udp_data.s_fd, msg, sizeof(*msg) + data->len,
+ SENDTO_FLAGS,
+ (const struct sockaddr *) r_saddr, sizeof(*r_saddr)) < 0 ) {
free(msg);
return -1;
}
@@ -261,134 +278,74 @@ static int ipcp_udp_port_alloc_resp(int skfd,
return 0;
}
-static int ipcp_udp_port_req(struct sockaddr_in * c_saddr,
+static int udp_ipcp_port_req(struct sockaddr_in * c_saddr,
int d_eid,
const uint8_t * dst,
qosspec_t qs,
- const void * data,
- size_t len)
+ const buffer_t * data)
{
- struct timespec ts = {0, FD_UPDATE_TIMEOUT * 1000};
- struct timespec abstime;
- int skfd;
- int fd;
-
- skfd = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP);
- if (skfd < 0) {
- log_err("Could not create UDP socket.");
- return -1;
- }
+ int fd;
- /* Remote listens on server port. Mod of c_saddr allowed. */
- c_saddr->sin_port = udp_data.s_saddr.sin_port;
-
- /* Connect stores the remote address in the file descriptor. */
- if (connect(skfd, (struct sockaddr *) c_saddr, sizeof(*c_saddr)) < 0) {
- log_err("Could not connect to remote UDP client.");
- close(skfd);
- return -1;
- }
-
- clock_gettime(PTHREAD_COND_CLOCK, &abstime);
-
- pthread_mutex_lock(&ipcpi.alloc_lock);
-
- while (ipcpi.alloc_id != -1 && ipcp_get_state() == IPCP_OPERATIONAL) {
- ts_add(&abstime, &ts, &abstime);
- pthread_cond_timedwait(&ipcpi.alloc_cond, &ipcpi.alloc_lock,
- &abstime);
- }
-
- if (ipcp_get_state() != IPCP_OPERATIONAL) {
- log_dbg("Won't allocate over non-operational IPCP.");
- pthread_mutex_unlock(&ipcpi.alloc_lock);
- close(skfd);
- return -1;
- }
-
- /* reply to IRM */
- fd = ipcp_flow_req_arr(dst, ipcp_dir_hash_len(), qs, data, len);
+ fd = ipcp_wait_flow_req_arr(dst, qs, IPCP_UDP_MPL, data);
if (fd < 0) {
- pthread_mutex_unlock(&ipcpi.alloc_lock);
log_err("Could not get new flow from IRMd.");
- close(skfd);
return -1;
}
pthread_rwlock_wrlock(&udp_data.flows_lock);
- udp_data.fd_to_uf[fd].skfd = skfd;
- udp_data.fd_to_uf[fd].d_eid = d_eid;
+ udp_data.fd_to_uf[fd].r_saddr = *c_saddr;
+ udp_data.fd_to_uf[fd].d_eid = d_eid;
pthread_rwlock_unlock(&udp_data.flows_lock);
- ipcpi.alloc_id = fd;
- pthread_cond_broadcast(&ipcpi.alloc_cond);
-
- pthread_mutex_unlock(&ipcpi.alloc_lock);
-
log_dbg("Pending allocation request, fd %d, remote eid %d.",
fd, d_eid);
return 0;
}
-static int ipcp_udp_port_alloc_reply(uint32_t s_eid,
- uint32_t d_eid,
- int8_t response,
- const void * data,
- size_t len)
+static int udp_ipcp_port_alloc_reply(const struct sockaddr_in * saddr,
+ uint32_t s_eid,
+ uint32_t d_eid,
+ int8_t response,
+ const buffer_t * data)
{
- struct sockaddr_in t_saddr;
- socklen_t t_saddr_len;
- int ret = 0;
- int skfd = -1;
-
- t_saddr_len = sizeof(t_saddr);
+ time_t mpl = IPCP_UDP_MPL;
pthread_rwlock_wrlock(&udp_data.flows_lock);
- skfd = udp_data.fd_to_uf[s_eid].skfd;
- if (skfd < 0) {
+ if (memcmp(&udp_data.fd_to_uf[s_eid].r_saddr, saddr, sizeof(*saddr))) {
pthread_rwlock_unlock(&udp_data.flows_lock);
- log_err("Got reply for unknown UDP eid: %u.", s_eid);
+ log_err("Flow allocation reply for %u from wrong source.",
+ s_eid);
return -1;
}
- udp_data.fd_to_uf[s_eid].d_eid = d_eid;
+ if (response == 0)
+ udp_data.fd_to_uf[s_eid].d_eid = d_eid;
pthread_rwlock_unlock(&udp_data.flows_lock);
- if (getpeername(skfd, (struct sockaddr *) &t_saddr, &t_saddr_len) < 0) {
- log_dbg("Flow with fd %d has no peer.", s_eid);
- close(skfd);
- return -1;
- }
-
- if (connect(skfd, (struct sockaddr *) &t_saddr, sizeof(t_saddr)) < 0) {
- log_dbg("Could not connect flow to remote.");
- close(skfd);
- return -1;
- }
-
- if (ipcp_flow_alloc_reply(s_eid, response, data, len) < 0) {
- log_dbg("Failed to reply to flow allocation.");
+ if (ipcp_flow_alloc_reply(s_eid, response, mpl, data) < 0) {
+ log_err("Failed to reply to flow allocation.");
return -1;
}
log_dbg("Flow allocation completed on eids (%d, %d).",
s_eid, d_eid);
- return ret;
+ return 0;
}
-static int ipcp_udp_mgmt_frame(const uint8_t * buf,
+static int udp_ipcp_mgmt_frame(const uint8_t * buf,
size_t len,
struct sockaddr_in c_saddr)
{
struct mgmt_msg * msg;
size_t msg_len;
qosspec_t qs;
+ buffer_t data;
msg = (struct mgmt_msg *) buf;
@@ -398,6 +355,10 @@ static int ipcp_udp_mgmt_frame(const uint8_t * buf,
assert(len >= msg_len);
+ data.len = len - msg_len;
+ data.data = (uint8_t *) buf + msg_len;
+
+
qs.delay = ntoh32(msg->delay);
qs.bandwidth = ntoh64(msg->bandwidth);
qs.availability = msg->availability;
@@ -406,31 +367,33 @@ static int ipcp_udp_mgmt_frame(const uint8_t * buf,
qs.in_order = msg->in_order;
qs.max_gap = ntoh32(msg->max_gap);
qs.cypher_s = ntoh16(msg->cypher_s);
+ qs.timeout = ntoh32(msg->timeout);
- return ipcp_udp_port_req(&c_saddr, ntoh32(msg->s_eid),
+ return udp_ipcp_port_req(&c_saddr, ntoh32(msg->s_eid),
(uint8_t *) (msg + 1), qs,
- buf + msg_len,
- len - msg_len);
+ &data);
case FLOW_REPLY:
assert(len >= sizeof(*msg));
- return ipcp_udp_port_alloc_reply(ntoh32(msg->s_eid),
+ data.len = len - sizeof(*msg);
+ data.data = (uint8_t *) buf + sizeof(*msg);
+
+ return udp_ipcp_port_alloc_reply(&c_saddr,
+ ntoh32(msg->s_eid),
ntoh32(msg->d_eid),
msg->response,
- buf + sizeof(*msg),
- len - sizeof(*msg));
+ &data);
default:
log_err("Unknown message received %d.", msg->code);
return -1;
}
}
-static void * ipcp_udp_mgmt_handler(void * o)
+static void * udp_ipcp_mgmt_handler(void * o)
{
(void) o;
- pthread_cleanup_push((void (*)(void *)) pthread_mutex_unlock,
- (void *) &udp_data.mgmt_lock);
+ pthread_cleanup_push(__cleanup_mutex_unlock, &udp_data.mgmt_lock);
while (true) {
struct mgmt_frame * frame;
@@ -448,7 +411,7 @@ static void * ipcp_udp_mgmt_handler(void * o)
pthread_mutex_unlock(&udp_data.mgmt_lock);
- ipcp_udp_mgmt_frame(frame->buf, frame->len, frame->r_saddr);
+ udp_ipcp_mgmt_frame(frame->buf, frame->len, frame->r_saddr);
free(frame);
}
@@ -458,7 +421,7 @@ static void * ipcp_udp_mgmt_handler(void * o)
return (void *) 0;
}
-static void * ipcp_udp_packet_reader(void * o)
+static void * udp_ipcp_packet_reader(void * o)
{
uint8_t buf[IPCP_UDP_MAX_PACKET_SIZE];
uint8_t * data;
@@ -468,13 +431,17 @@ static void * ipcp_udp_packet_reader(void * o)
(void) o;
+ ipcp_lock_to_core();
+
data = buf + sizeof(uint32_t);
eid_p = (uint32_t *) buf;
while (true) {
- struct mgmt_frame * frame;
- struct sockaddr_in r_saddr;
- socklen_t len;
+ struct mgmt_frame * frame;
+ struct sockaddr_in r_saddr;
+ socklen_t len;
+ struct shm_du_buff * sdb;
+ uint8_t * head;
len = sizeof(r_saddr);
@@ -515,18 +482,31 @@ static void * ipcp_udp_packet_reader(void * o)
continue;
}
- flow_write(eid, data, n - sizeof(eid));
+ n-= sizeof(eid);
+
+ if (ipcp_sdb_reserve(&sdb, n))
+ continue;
+
+ head = shm_du_buff_head(sdb);
+ memcpy(head, data, n);
+ if (np1_flow_write(eid, sdb) < 0)
+ ipcp_sdb_release(sdb);
}
- return 0;
+ return (void *) 0;
}
-static void cleanup_writer(void * o)
+static void cleanup_fqueue(void * fq)
{
- fqueue_destroy((fqueue_t *) o);
+ fqueue_destroy((fqueue_t *) fq);
}
-static void * ipcp_udp_packet_writer(void * o)
+static void cleanup_sdb(void * sdb)
+{
+ ipcp_sdb_release((struct shm_du_buff *) sdb);
+}
+
+static void * udp_ipcp_packet_writer(void * o)
{
fqueue_t * fq;
@@ -538,11 +518,12 @@ static void * ipcp_udp_packet_writer(void * o)
ipcp_lock_to_core();
- pthread_cleanup_push(cleanup_writer, fq);
+ pthread_cleanup_push(cleanup_fqueue, fq);
while (true) {
- int fd;
- int eid;
+ struct sockaddr_in saddr;
+ int eid;
+ int fd;
fevent(udp_data.np1_flows, fq, NULL);
while ((fd = fqueue_next(fq)) >= 0) {
struct shm_du_buff * sdb;
@@ -552,12 +533,12 @@ static void * ipcp_udp_packet_writer(void * o)
if (fqueue_type(fq) != FLOW_PKT)
continue;
- if (ipcp_flow_read(fd, &sdb)) {
+ if (np1_flow_read(fd, &sdb)) {
log_dbg("Bad read from fd %d.", fd);
continue;
}
- len = shm_du_buff_tail(sdb) - shm_du_buff_head(sdb);
+ len = shm_du_buff_len(sdb);
if (len > IPCP_UDP_MAX_PACKET_SIZE) {
log_dbg("Packet length exceeds MTU.");
ipcp_sdb_release(sdb);
@@ -574,16 +555,18 @@ static void * ipcp_udp_packet_writer(void * o)
pthread_rwlock_rdlock(&udp_data.flows_lock);
eid = hton32(udp_data.fd_to_uf[fd].d_eid);
- fd = udp_data.fd_to_uf[fd].skfd;
+ saddr = udp_data.fd_to_uf[fd].r_saddr;
pthread_rwlock_unlock(&udp_data.flows_lock);
memcpy(buf, &eid, sizeof(eid));
- pthread_cleanup_push((void (*)(void *))
- ipcp_sdb_release, (void *) sdb);
+ pthread_cleanup_push(cleanup_sdb, sdb);
- if (write(fd, buf, len + OUR_HEADER_LEN) < 0)
+ if (sendto(udp_data.s_fd, buf, len + OUR_HEADER_LEN,
+ SENDTO_FLAGS,
+ (const struct sockaddr *) &saddr,
+ sizeof(saddr)) < 0)
log_err("Failed to send packet.");
pthread_cleanup_pop(true);
@@ -595,30 +578,34 @@ static void * ipcp_udp_packet_writer(void * o)
return (void *) 1;
}
-static int ipcp_udp_bootstrap(const struct ipcp_config * conf)
+static const char * inet4_ntop(const void * addr,
+ char * buf)
+{
+ return inet_ntop(AF_INET, addr, buf, INET_ADDRSTRLEN);
+}
+
+static int udp_ipcp_bootstrap(struct ipcp_config * conf)
{
char ipstr[INET_ADDRSTRLEN];
char dnsstr[INET_ADDRSTRLEN];
- char portstr[128]; /* port is max 64535 = 5 chars */
int i = 1;
- assert(conf);
+ assert(conf != NULL);
assert(conf->type == THIS_TYPE);
+ assert(conf->layer_info.dir_hash_algo == (enum pol_dir_hash) HASH_MD5);
- if (inet_ntop(AF_INET, &conf->ip_addr, ipstr, INET_ADDRSTRLEN)
- == NULL) {
- log_err("Failed to convert IP address");
+ if (inet4_ntop(&conf->udp.ip_addr, ipstr) == NULL) {
+ log_err("Failed to convert IP address.");
return -1;
}
- if (conf->dns_addr != 0) {
- if (inet_ntop(AF_INET, &conf->dns_addr, dnsstr, INET_ADDRSTRLEN)
- == NULL) {
- log_err("Failed to convert DNS address");
+ if (conf->udp.dns_addr != 0) {
+ if (inet4_ntop(&conf->udp.dns_addr, dnsstr) == NULL) {
+ log_err("Failed to convert DNS address.");
return -1;
}
#ifndef HAVE_DDNS
- log_warn("DNS disabled at compile time, address ignored");
+ log_warn("DNS disabled at compile time, address ignored.");
#endif
} else {
strcpy(dnsstr, "not set");
@@ -631,66 +618,60 @@ static int ipcp_udp_bootstrap(const struct ipcp_config * conf)
goto fail_socket;
}
- if (setsockopt(udp_data.s_fd, SOL_SOCKET, SO_REUSEADDR,
- &i, sizeof(i)) < 0)
- log_warn("Failed to set SO_REUSEADDR.");
-
memset((char *) &udp_data.s_saddr, 0, sizeof(udp_data.s_saddr));
udp_data.s_saddr.sin_family = AF_INET;
- udp_data.s_saddr.sin_addr.s_addr = conf->ip_addr;
- udp_data.s_saddr.sin_port = htons(conf->srv_port);
+ udp_data.s_saddr.sin_addr.s_addr = conf->udp.ip_addr;
+ udp_data.s_saddr.sin_port = htons(conf->udp.port);
- if (bind(udp_data.s_fd, SERV_SADDR, SERV_SADDR_SIZE) < 0) {
- log_err("Couldn't bind to %s.", ipstr);
+ if (bind(udp_data.s_fd, SADDR, SADDR_SIZE) < 0) {
+ log_err("Couldn't bind to %s:%d. %s.",
+ ipstr, conf->udp.port, strerror(errno));
goto fail_bind;
}
- udp_data.dns_addr = conf->dns_addr;
- udp_data.clt_port = htons(conf->clt_port);
-
- ipcp_set_state(IPCP_OPERATIONAL);
+ udp_data.dns_addr = conf->udp.dns_addr;
if (pthread_create(&udp_data.mgmt_handler, NULL,
- ipcp_udp_mgmt_handler, NULL)) {
- ipcp_set_state(IPCP_INIT);
+ udp_ipcp_mgmt_handler, NULL)) {
+ log_err("Failed to create management thread.");
goto fail_bind;
}
for (i = 0; i < IPCP_UDP_RD_THR; ++i) {
if (pthread_create(&udp_data.packet_reader[i], NULL,
- ipcp_udp_packet_reader, NULL)) {
- ipcp_set_state(IPCP_INIT);
+ udp_ipcp_packet_reader, NULL)) {
+ log_err("Failed to create reader thread.");
goto fail_packet_reader;
}
}
for (i = 0; i < IPCP_UDP_WR_THR; ++i) {
if (pthread_create(&udp_data.packet_writer[i], NULL,
- ipcp_udp_packet_writer, NULL)) {
- ipcp_set_state(IPCP_INIT);
+ udp_ipcp_packet_writer, NULL)) {
+ log_err("Failed to create writer thread.");
goto fail_packet_writer;
}
}
- sprintf(portstr, "%d", conf->clt_port);
-
log_dbg("Bootstrapped IPCP over UDP with pid %d.", getpid());
log_dbg("Bound to IP address %s.", ipstr);
- log_dbg("Client port is %s.", conf->clt_port == 0 ? "random" : portstr);
- log_dbg("Server port is %u.", conf->srv_port);
- log_dbg("DNS server address is %s.", dnsstr);
+ log_dbg("Using port %u.", conf->udp.port);
+ if (conf->udp.dns_addr != 0)
+ log_dbg("DNS server address is %s.", dnsstr);
+ else
+ log_dbg("DNS server not in use.");
return 0;
fail_packet_writer:
- while (i > 0) {
- pthread_cancel(udp_data.packet_writer[--i]);
+ while (i-- > 0) {
+ pthread_cancel(udp_data.packet_writer[i]);
pthread_join(udp_data.packet_writer[i], NULL);
}
i = IPCP_UDP_RD_THR;
fail_packet_reader:
- while (i > 0) {
- pthread_cancel(udp_data.packet_reader[--i]);
+ while (i-- > 0) {
+ pthread_cancel(udp_data.packet_reader[i]);
pthread_join(udp_data.packet_reader[i], NULL);
}
pthread_cancel(udp_data.mgmt_handler);
@@ -706,20 +687,22 @@ static int ipcp_udp_bootstrap(const struct ipcp_config * conf)
/* NOTE: Disgusted with this crap */
static int ddns_send(char * cmd)
{
- pid_t pid = -1;
+ pid_t pid;
int wstatus;
int pipe_fd[2];
char * argv[] = {NSUPDATE_EXEC, 0};
char * envp[] = {0};
if (pipe(pipe_fd)) {
- log_err("Failed to create pipe.");
+ log_err("Failed to create pipe: %s.", strerror(errno));
return -1;
}
pid = fork();
if (pid == -1) {
- log_err("Failed to fork.");
+ log_err("Failed to fork: %s.", strerror(errno));
+ close(pipe_fd[0]);
+ close(pipe_fd[1]);
return -1;
}
@@ -727,12 +710,15 @@ static int ddns_send(char * cmd)
close(pipe_fd[1]);
dup2(pipe_fd[0], 0);
execve(argv[0], &argv[0], envp);
+ log_err("Failed to execute: %s", strerror(errno));
+ exit(1);
}
close(pipe_fd[0]);
if (write(pipe_fd[1], cmd, strlen(cmd)) == -1) {
- log_err("Failed to communicate with nsupdate.");
+ log_err("Failed to communicate with nsupdate: %s.",
+ strerror(errno));
close(pipe_fd[1]);
return -1;
}
@@ -762,18 +748,20 @@ static uint32_t ddns_resolve(char * name,
char * addr_str = "Address:";
uint32_t ip_addr = 0;
- if (inet_ntop(AF_INET, &dns_addr, dnsstr, INET_ADDRSTRLEN) == NULL)
+ if (inet4_ntop(&dns_addr, dnsstr) == NULL)
return 0;
if (pipe(pipe_fd)) {
- log_err("Failed to create pipe.");
+ log_err("Failed to create pipe: %s.", strerror(errno));
return 0;
}
pid = fork();
if (pid == -1) {
- log_err("Failed to fork.");
- return 0;
+ log_err("Failed to fork: %s.", strerror(errno));
+ close(pipe_fd[0]);
+ close(pipe_fd[1]);
+ return -1;
}
if (pid == 0) {
@@ -783,11 +771,13 @@ static uint32_t ddns_resolve(char * name,
close(pipe_fd[0]);
dup2(pipe_fd[1], 1);
execve(argv[0], &argv[0], envp);
+ log_err("Failed to execute: %s", strerror(errno));
+ exit(1);
}
close(pipe_fd[1]);
- count = read(pipe_fd[0], buf, IPCP_UDP_BUF_SIZE);
+ count = read(pipe_fd[0], buf, IPCP_UDP_BUF_SIZE - 1);
if (count <= 0) {
log_err("Failed to communicate with nslookup.");
close(pipe_fd[0]);
@@ -798,7 +788,7 @@ static uint32_t ddns_resolve(char * name,
waitpid(pid, &wstatus, 0);
if (WIFEXITED(wstatus) && WEXITSTATUS(wstatus) == 0 &&
- count != IPCP_UDP_BUF_SIZE)
+ count != IPCP_UDP_BUF_SIZE - 1)
log_dbg("Succesfully communicated with nslookup.");
else
log_err("Failed to resolve DNS address.");
@@ -824,7 +814,7 @@ static uint32_t ddns_resolve(char * name,
}
#endif
-static int ipcp_udp_reg(const uint8_t * hash)
+static int udp_ipcp_reg(const uint8_t * hash)
{
#ifdef HAVE_DDNS
char ipstr[INET_ADDRSTRLEN];
@@ -836,16 +826,18 @@ static int ipcp_udp_reg(const uint8_t * hash)
char * hashstr;
hashstr = malloc(ipcp_dir_hash_strlen() + 1);
- if (hashstr == NULL)
+ if (hashstr == NULL) {
+ log_err("Failed to malloc hashstr.");
return -1;
+ }
assert(hash);
ipcp_hash_str(hashstr, hash);
if (shim_data_reg_add_entry(udp_data.shim_data, hash)) {
- log_err("Failed to add " HASH_FMT " to local registry.",
- HASH_VAL(hash));
+ log_err("Failed to add " HASH_FMT32 " to local registry.",
+ HASH_VAL32(hash));
free(hashstr);
return -1;
}
@@ -858,14 +850,14 @@ static int ipcp_udp_reg(const uint8_t * hash)
if (dns_addr != 0) {
ip_addr = udp_data.s_saddr.sin_addr.s_addr;
- if (inet_ntop(AF_INET, &ip_addr,
- ipstr, INET_ADDRSTRLEN) == NULL) {
+ if (inet4_ntop(&ip_addr, ipstr) == NULL) {
+ log_err("Failed to convert IP address to string.");
free(hashstr);
return -1;
}
- if (inet_ntop(AF_INET, &dns_addr,
- dnsstr, INET_ADDRSTRLEN) == NULL) {
+ if (inet4_ntop(&dns_addr, dnsstr) == NULL) {
+ log_err("Failed to convert DNS address to string.");
free(hashstr);
return -1;
}
@@ -874,20 +866,19 @@ static int ipcp_udp_reg(const uint8_t * hash)
dnsstr, hashstr, DNS_TTL, ipstr);
if (ddns_send(cmd)) {
+ log_err("Failed to send DDNS message.");
shim_data_reg_del_entry(udp_data.shim_data, hash);
free(hashstr);
return -1;
}
}
#endif
- log_dbg("Registered " HASH_FMT ".", HASH_VAL(hash));
-
free(hashstr);
return 0;
}
-static int ipcp_udp_unreg(const uint8_t * hash)
+static int udp_ipcp_unreg(const uint8_t * hash)
{
#ifdef HAVE_DDNS
char dnsstr[INET_ADDRSTRLEN];
@@ -900,8 +891,10 @@ static int ipcp_udp_unreg(const uint8_t * hash)
assert(hash);
hashstr = malloc(ipcp_dir_hash_strlen() + 1);
- if (hashstr == NULL)
+ if (hashstr == NULL) {
+ log_err("Failed to malloc hashstr.");
return -1;
+ }
ipcp_hash_str(hashstr, hash);
@@ -911,8 +904,8 @@ static int ipcp_udp_unreg(const uint8_t * hash)
dns_addr = udp_data.dns_addr;
if (dns_addr != 0) {
- if (inet_ntop(AF_INET, &dns_addr, dnsstr, INET_ADDRSTRLEN)
- == NULL) {
+ if (inet4_ntop(&dns_addr, dnsstr) == NULL) {
+ log_err("Failed to convert DNS address to string.");
free(hashstr);
return -1;
}
@@ -925,14 +918,12 @@ static int ipcp_udp_unreg(const uint8_t * hash)
shim_data_reg_del_entry(udp_data.shim_data, hash);
- log_dbg("Unregistered " HASH_FMT ".", HASH_VAL(hash));
-
free(hashstr);
return 0;
}
-static int ipcp_udp_query(const uint8_t * hash)
+static int udp_ipcp_query(const uint8_t * hash)
{
uint32_t ip_addr = 0;
char * hashstr;
@@ -943,8 +934,10 @@ static int ipcp_udp_query(const uint8_t * hash)
assert(hash);
hashstr = malloc(ipcp_dir_hash_strlen() + 1);
- if (hashstr == NULL)
+ if (hashstr == NULL) {
+ log_err("Failed to malloc hashstr.");
return -ENOMEM;
+ }
ipcp_hash_str(hashstr, hash);
@@ -959,7 +952,7 @@ static int ipcp_udp_query(const uint8_t * hash)
if (dns_addr != 0) {
ip_addr = ddns_resolve(hashstr, dns_addr);
if (ip_addr == 0) {
- log_dbg("Could not resolve %s.", hashstr);
+ log_err("Could not resolve %s.", hashstr);
free(hashstr);
return -1;
}
@@ -967,7 +960,7 @@ static int ipcp_udp_query(const uint8_t * hash)
#endif
h = gethostbyname(hashstr);
if (h == NULL) {
- log_dbg("Could not resolve %s.", hashstr);
+ log_err("Could not resolve %s.", hashstr);
free(hashstr);
return -1;
}
@@ -988,190 +981,116 @@ static int ipcp_udp_query(const uint8_t * hash)
return 0;
}
-static int ipcp_udp_flow_alloc(int fd,
- const uint8_t * dst,
- qosspec_t qs,
- const void * data,
- size_t len)
+static int udp_ipcp_flow_alloc(int fd,
+ const uint8_t * dst,
+ qosspec_t qs,
+ const buffer_t * data)
{
struct sockaddr_in r_saddr; /* Server address */
- struct sockaddr_in c_saddr; /* Client address */
- socklen_t c_saddr_len;
- int skfd;
uint32_t ip_addr = 0;
- char ip_str[INET_ADDRSTRLEN];
-
- c_saddr_len = sizeof(c_saddr);
-
- log_dbg("Allocating flow to " HASH_FMT ".", HASH_VAL(dst));
+ char ipstr[INET_ADDRSTRLEN];
(void) qs;
assert(dst);
- skfd = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP);
- if (skfd < 0) {
- log_err("Could not create socket.");
+ if (!shim_data_dir_has(udp_data.shim_data, dst)) {
+ log_err("Could not resolve destination.");
return -1;
}
- /* This socket is for the flow. */
- memset((char *) &c_saddr, 0, sizeof(c_saddr));
- c_saddr.sin_family = AF_INET;
- c_saddr.sin_addr.s_addr = LOCAL_IP;
- c_saddr.sin_port = udp_data.clt_port;
-
- if (bind(skfd, (struct sockaddr *) &c_saddr, sizeof(c_saddr)) < 0) {
- log_dbg("Could not bind socket to client address.");
- close(skfd);
- return -1;
- }
+ ip_addr = (uint32_t) shim_data_dir_get_addr(udp_data.shim_data, dst);
- if (getsockname(skfd, (struct sockaddr *) &c_saddr, &c_saddr_len) < 0) {
- log_err("Could not get address from fd.");
- close(skfd);
+ if (inet4_ntop(&ip_addr, ipstr) == NULL) {
+ log_err("Could not convert IP address.");
return -1;
}
- if (!shim_data_dir_has(udp_data.shim_data, dst)) {
- log_dbg("Could not resolve destination.");
- close(skfd);
- return -1;
- }
+ log_dbg("Destination " HASH_FMT32 " resolved at IP %s.",
+ HASH_VAL32(dst), ipstr);
- ip_addr = (uint32_t) shim_data_dir_get_addr(udp_data.shim_data, dst);
-
- inet_ntop(AF_INET, &ip_addr, ip_str, INET_ADDRSTRLEN);
- log_dbg("Destination UDP ipcp resolved at %s.", ip_str);
-
- /* Connect to server and store the remote IP address in the skfd. */
memset((char *) &r_saddr, 0, sizeof(r_saddr));
r_saddr.sin_family = AF_INET;
r_saddr.sin_addr.s_addr = ip_addr;
r_saddr.sin_port = udp_data.s_saddr.sin_port;
- if (connect(skfd, (struct sockaddr *) &r_saddr, sizeof(r_saddr)) < 0) {
- log_dbg("Could not connect socket to remote.");
- close(skfd);
- return -1;
- }
-
- if (ipcp_udp_port_alloc(skfd, fd, dst, qs, data, len) < 0) {
+ if (udp_ipcp_port_alloc(&r_saddr, fd, dst, qs, data) < 0) {
log_err("Could not allocate port.");
- close(skfd);
return -1;
}
pthread_rwlock_wrlock(&udp_data.flows_lock);
- udp_data.fd_to_uf[fd].d_eid = -1;
- udp_data.fd_to_uf[fd].skfd = skfd;
-
- fset_add(udp_data.np1_flows, fd);
+ udp_data.fd_to_uf[fd].d_eid = -1;
+ udp_data.fd_to_uf[fd].r_saddr = r_saddr;
pthread_rwlock_unlock(&udp_data.flows_lock);
- log_dbg("Flow pending on fd %d, UDP src port %d, dst port %d.",
- fd, ntohs(c_saddr.sin_port), ntohs(r_saddr.sin_port));
+ fset_add(udp_data.np1_flows, fd);
return 0;
}
-static int ipcp_udp_flow_alloc_resp(int fd,
- int resp,
- const void * data,
- size_t len)
+static int udp_ipcp_flow_alloc_resp(int fd,
+ int resp,
+ const buffer_t * data)
{
- struct timespec ts = {0, FD_UPDATE_TIMEOUT * 1000};
- struct timespec abstime;
- int skfd;
- int d_eid;
-
- if (resp)
- return 0;
-
- clock_gettime(PTHREAD_COND_CLOCK, &abstime);
-
- pthread_mutex_lock(&ipcpi.alloc_lock);
-
- while (ipcpi.alloc_id != fd && ipcp_get_state() == IPCP_OPERATIONAL) {
- ts_add(&abstime, &ts, &abstime);
- pthread_cond_timedwait(&ipcpi.alloc_cond,
- &ipcpi.alloc_lock,
- &abstime);
- }
+ struct sockaddr_in saddr;
+ int d_eid;
- if (ipcp_get_state() != IPCP_OPERATIONAL) {
- pthread_mutex_unlock(&ipcpi.alloc_lock);
+ if (ipcp_wait_flow_resp(fd) < 0) {
+ log_err("Failed to wait for flow response.");
return -1;
}
- ipcpi.alloc_id = -1;
- pthread_cond_broadcast(&ipcpi.alloc_cond);
-
- pthread_mutex_unlock(&ipcpi.alloc_lock);
-
pthread_rwlock_rdlock(&udp_data.flows_lock);
- skfd = udp_data.fd_to_uf[fd].skfd;
+ saddr = udp_data.fd_to_uf[fd].r_saddr;
d_eid = udp_data.fd_to_uf[fd].d_eid;
- fset_add(udp_data.np1_flows, fd);
-
pthread_rwlock_unlock(&udp_data.flows_lock);
- if (ipcp_udp_port_alloc_resp(skfd, d_eid, fd, resp, data, len) < 0) {
- pthread_rwlock_rdlock(&udp_data.flows_lock);
+ if (udp_ipcp_port_alloc_resp(&saddr, d_eid, fd, resp, data) < 0) {
fset_del(udp_data.np1_flows, fd);
- pthread_rwlock_unlock(&udp_data.flows_lock);
log_err("Failed to respond to flow request.");
return -1;
}
- log_dbg("Accepted flow, fd %d on eid %d.",
- fd, d_eid);
+ fset_add(udp_data.np1_flows, fd);
return 0;
}
-static int ipcp_udp_flow_dealloc(int fd)
+static int udp_ipcp_flow_dealloc(int fd)
{
- int skfd = -1;
-
ipcp_flow_fini(fd);
- pthread_rwlock_wrlock(&udp_data.flows_lock);
-
fset_del(udp_data.np1_flows, fd);
- skfd = udp_data.fd_to_uf[fd].skfd;
+ pthread_rwlock_wrlock(&udp_data.flows_lock);
udp_data.fd_to_uf[fd].d_eid = -1;
- udp_data.fd_to_uf[fd].skfd = -1;
-
- close(skfd);
+ memset(&udp_data.fd_to_uf[fd].r_saddr, 0, SADDR_SIZE);
pthread_rwlock_unlock(&udp_data.flows_lock);
- flow_dealloc(fd);
-
- log_dbg("Flow with fd %d deallocated.", fd);
+ ipcp_flow_dealloc(fd);
return 0;
}
static struct ipcp_ops udp_ops = {
- .ipcp_bootstrap = ipcp_udp_bootstrap,
+ .ipcp_bootstrap = udp_ipcp_bootstrap,
.ipcp_enroll = NULL,
.ipcp_connect = NULL,
.ipcp_disconnect = NULL,
- .ipcp_reg = ipcp_udp_reg,
- .ipcp_unreg = ipcp_udp_unreg,
- .ipcp_query = ipcp_udp_query,
- .ipcp_flow_alloc = ipcp_udp_flow_alloc,
+ .ipcp_reg = udp_ipcp_reg,
+ .ipcp_unreg = udp_ipcp_unreg,
+ .ipcp_query = udp_ipcp_query,
+ .ipcp_flow_alloc = udp_ipcp_flow_alloc,
.ipcp_flow_join = NULL,
- .ipcp_flow_alloc_resp = ipcp_udp_flow_alloc_resp,
- .ipcp_flow_dealloc = ipcp_udp_flow_dealloc
+ .ipcp_flow_alloc_resp = udp_ipcp_flow_alloc_resp,
+ .ipcp_flow_dealloc = udp_ipcp_flow_dealloc
};
int main(int argc,
@@ -1179,53 +1098,51 @@ int main(int argc,
{
int i;
- if (ipcp_init(argc, argv, &udp_ops) < 0)
- goto fail_init;
if (udp_data_init() < 0) {
log_err("Failed to init udp data.");
goto fail_data_init;
}
- if (ipcp_boot() < 0) {
- log_err("Failed to boot IPCP.");
- goto fail_boot;
+ if (ipcp_init(argc, argv, &udp_ops, THIS_TYPE) < 0) {
+ log_err("Failed to initialize IPCP.");
+ goto fail_init;
}
- if (ipcp_create_r(0)) {
- log_err("Failed to notify IRMd we are initialized.");
- goto fail_create_r;
+ if (ipcp_start() < 0) {
+ log_err("Failed to start IPCP.");
+ goto fail_start;
}
- ipcp_shutdown();
+ ipcp_sigwait();
if (ipcp_get_state() == IPCP_SHUTDOWN) {
- for (i = 0; i < IPCP_UDP_RD_THR; ++i)
- pthread_cancel(udp_data.packet_reader[i]);
for (i = 0; i < IPCP_UDP_WR_THR; ++i)
pthread_cancel(udp_data.packet_writer[i]);
+ for (i = 0; i < IPCP_UDP_RD_THR; ++i)
+ pthread_cancel(udp_data.packet_reader[i]);
pthread_cancel(udp_data.mgmt_handler);
- for (i = 0; i < IPCP_UDP_RD_THR; ++i)
- pthread_join(udp_data.packet_reader[i], NULL);
for (i = 0; i < IPCP_UDP_WR_THR; ++i)
pthread_join(udp_data.packet_writer[i], NULL);
+ for (i = 0; i < IPCP_UDP_RD_THR; ++i)
+ pthread_join(udp_data.packet_reader[i], NULL);
pthread_join(udp_data.mgmt_handler, NULL);
+ close(udp_data.s_fd);
}
- udp_data_fini();
+ ipcp_stop();
ipcp_fini();
- exit(EXIT_SUCCESS);
- fail_create_r:
- ipcp_set_state(IPCP_NULL);
- ipcp_shutdown();
- fail_boot:
udp_data_fini();
- fail_data_init:
+
+ exit(EXIT_SUCCESS);
+
+ fail_start:
ipcp_fini();
fail_init:
- ipcp_create_r(-1);
+ udp_data_fini();
+ fail_data_init:
exit(EXIT_FAILURE);
}
diff --git a/src/ipcpd/unicast/CMakeLists.txt b/src/ipcpd/unicast/CMakeLists.txt
index c0c55519..2df0bae0 100644
--- a/src/ipcpd/unicast/CMakeLists.txt
+++ b/src/ipcpd/unicast/CMakeLists.txt
@@ -13,8 +13,14 @@ include_directories(${CMAKE_SOURCE_DIR}/include)
include_directories(${CMAKE_BINARY_DIR}/include)
set(IPCP_UNICAST_TARGET ipcpd-unicast CACHE INTERNAL "")
+set(IPCP_UNICAST_MPL 10000 CACHE STRING
+ "Default maximum packet lifetime for the unicast IPCP, in ms")
+set(DEBUG_PROTO_DHT FALSE CACHE BOOL
+ "Add DHT protocol message output to debug logging")
+set(DEBUG_PROTO_LS FALSE CACHE BOOL
+ "Add link state protocol message output to debug logging")
-protobuf_generate_c(KAD_PROTO_SRCS KAD_PROTO_HDRS kademlia.proto)
+protobuf_generate_c(DHT_PROTO_SRCS DHT_PROTO_HDRS dir/dht.proto)
math(EXPR PFT_EXPR "1 << 12")
set(PFT_SIZE ${PFT_EXPR} CACHE STRING
@@ -29,32 +35,33 @@ if (HAVE_FUSE)
endif ()
endif ()
-
-set(SOURCE_FILES
+set(IPCP_UNICAST_SOURCE_FILES
# Add source files here
- addr_auth.c
+ addr-auth.c
+ ca.c
connmgr.c
- dht.c
dir.c
dt.c
- enroll.c
fa.c
main.c
pff.c
routing.c
psched.c
# Add policies last
- pol/pft.c
- pol/flat.c
- pol/link_state.c
- pol/graph.c
- pol/simple_pff.c
- pol/alternate_pff.c
- pol/multipath_pff.c
+ addr-auth/flat.c
+ ca/mb-ecn.c
+ ca/nop.c
+ dir/dht.c
+ pff/simple.c
+ pff/alternate.c
+ pff/multipath.c
+ pff/pft.c
+ routing/link-state.c
+ routing/graph.c
)
-add_executable(ipcpd-unicast ${SOURCE_FILES} ${IPCP_SOURCES}
- ${KAD_PROTO_SRCS} ${LAYER_CONFIG_PROTO_SRCS})
+add_executable(ipcpd-unicast ${IPCP_UNICAST_SOURCE_FILES} ${IPCP_SOURCES} ${COMMON_SOURCES}
+ ${DHT_PROTO_SRCS} ${LAYER_CONFIG_PROTO_SRCS})
target_link_libraries(ipcpd-unicast LINK_PUBLIC ouroboros-dev)
include(AddCompileFlags)
@@ -64,8 +71,9 @@ endif ()
install(TARGETS ipcpd-unicast RUNTIME DESTINATION ${CMAKE_INSTALL_SBINDIR})
-add_subdirectory(pol/tests)
+add_subdirectory(pff/tests)
+add_subdirectory(routing/tests)
if (NOT GNU)
- add_subdirectory(tests)
+ add_subdirectory(dir/tests)
endif ()
diff --git a/src/ipcpd/unicast/addr_auth.c b/src/ipcpd/unicast/addr-auth.c
index e82ea254..908a4aa1 100644
--- a/src/ipcpd/unicast/addr_auth.c
+++ b/src/ipcpd/unicast/addr-auth.c
@@ -1,10 +1,10 @@
/*
- * Ouroboros - Copyright (C) 2016 - 2020
+ * Ouroboros - Copyright (C) 2016 - 2024
*
* Address authority
*
- * Dimitri Staessens <dimitri.staessens@ugent.be>
- * Sander Vrijders <sander.vrijders@ugent.be>
+ * Dimitri Staessens <dimitri@ouroboros.rocks>
+ * Sander Vrijders <sander@ouroboros.rocks>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -24,13 +24,12 @@
#include <ouroboros/logs.h>
-#include "addr_auth.h"
-#include "pol-addr-auth-ops.h"
-#include "pol/flat.h"
+#include "addr-auth.h"
+#include "addr-auth/pol.h"
#include <stdlib.h>
-struct pol_addr_auth_ops * ops;
+struct addr_auth_ops * ops;
int addr_auth_init(enum pol_addr_auth type,
const void * info)
diff --git a/src/ipcpd/unicast/addr_auth.h b/src/ipcpd/unicast/addr-auth.h
index 6bedf420..0d2cd4c0 100644
--- a/src/ipcpd/unicast/addr_auth.h
+++ b/src/ipcpd/unicast/addr-auth.h
@@ -1,10 +1,10 @@
/*
- * Ouroboros - Copyright (C) 2016 - 2020
+ * Ouroboros - Copyright (C) 2016 - 2024
*
* Address authority
*
- * Dimitri Staessens <dimitri.staessens@ugent.be>
- * Sander Vrijders <sander.vrijders@ugent.be>
+ * Dimitri Staessens <dimitri@ouroboros.rocks>
+ * Sander Vrijders <sander@ouroboros.rocks>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -27,6 +27,14 @@
#include <stdint.h>
+#define ADDR_FMT32 "%02x.%02x.%02x.%02x"
+#define ADDR_VAL32(a) \
+ ((uint8_t *) a)[0], ((uint8_t *) a)[1], \
+ ((uint8_t *) a)[2], ((uint8_t *) a)[3]
+
+#define ADDR_FMT64 ADDR_FMT32 "." ADDR_FMT32
+#define ADDR_VAL64(a) ADDR_VAL32(a), ADDR_VAL32(a + 4)
+
int addr_auth_init(enum pol_addr_auth type,
const void * info);
diff --git a/src/ipcpd/unicast/pol/flat.c b/src/ipcpd/unicast/addr-auth/flat.c
index 6e5c96ab..34ca1cef 100644
--- a/src/ipcpd/unicast/pol/flat.c
+++ b/src/ipcpd/unicast/addr-auth/flat.c
@@ -1,10 +1,10 @@
/*
- * Ouroboros - Copyright (C) 2016 - 2020
+ * Ouroboros - Copyright (C) 2016 - 2024
*
* Policy for flat addresses in a distributed way
*
- * Dimitri Staessens <dimitri.staessens@ugent.be>
- * Sander Vrijders <sander.vrijders@ugent.be>
+ * Dimitri Staessens <dimitri@ouroboros.rocks>
+ * Sander Vrijders <sander@ouroboros.rocks>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -29,28 +29,21 @@
#define OUROBOROS_PREFIX "flat-addr-auth"
#include <ouroboros/logs.h>
-#include <ouroboros/errno.h>
-#include <ouroboros/time_utils.h>
-#include <ouroboros/utils.h>
+#include <ouroboros/random.h>
+#include "addr-auth.h"
#include "ipcp.h"
#include "flat.h"
-#include <time.h>
-#include <stdlib.h>
-#include <math.h>
-#include <string.h>
-#include <assert.h>
-
-#define NAME_LEN 8
+#define NAME_LEN 8
+#define INVALID_ADDRESS 0
struct {
- uint8_t addr_size;
+ uint8_t addr_size;
+ uint32_t addr;
} flat;
-#define INVALID_ADDRESS 0
-
-struct pol_addr_auth_ops flat_ops = {
+struct addr_auth_ops flat_ops = {
.init = flat_init,
.fini = flat_fini,
.address = flat_address
@@ -65,6 +58,15 @@ int flat_init(const void * info)
return -1;
}
+#if defined (CONFIG_OUROBOROS_DEBUG) && defined (IPCP_DEBUG_LOCAL)
+ flat.addr = getpid();
+#else
+ while (flat.addr == INVALID_ADDRESS)
+ random_buffer(&flat.addr,sizeof(flat.addr));
+#endif
+ log_dbg("Flat address initialized to " ADDR_FMT32 ".",
+ ADDR_VAL32((uint8_t *) &flat.addr));
+
return 0;
}
@@ -75,13 +77,5 @@ int flat_fini(void)
uint64_t flat_address(void)
{
- struct timespec t;
- uint32_t addr;
-
- clock_gettime(CLOCK_REALTIME, &t);
- srand(t.tv_nsec);
-
- addr = (rand() % (RAND_MAX - 1) + 1) & 0xFFFFFFFF;
-
- return addr;
+ return (uint64_t) flat.addr;
}
diff --git a/src/ipcpd/unicast/pol/flat.h b/src/ipcpd/unicast/addr-auth/flat.h
index 54460bb3..d4b672c7 100644
--- a/src/ipcpd/unicast/pol/flat.h
+++ b/src/ipcpd/unicast/addr-auth/flat.h
@@ -1,10 +1,10 @@
/*
- * Ouroboros - Copyright (C) 2016 - 2020
+ * Ouroboros - Copyright (C) 2016 - 2024
*
* Policy for flat addresses in a distributed way
*
- * Dimitri Staessens <dimitri.staessens@ugent.be>
- * Sander Vrijders <sander.vrijders@ugent.be>
+ * Dimitri Staessens <dimitri@ouroboros.rocks>
+ * Sander Vrijders <sander@ouroboros.rocks>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -23,7 +23,7 @@
#ifndef OUROBOROS_IPCPD_UNICAST_FLAT_H
#define OUROBOROS_IPCPD_UNICAST_FLAT_H
-#include "pol-addr-auth-ops.h"
+#include "ops.h"
int flat_init(const void * info);
@@ -31,6 +31,6 @@ int flat_fini(void);
uint64_t flat_address(void);
-struct pol_addr_auth_ops flat_ops;
+extern struct addr_auth_ops flat_ops;
#endif /* OUROBOROS_IPCPD_UNICAST_FLAT_H */
diff --git a/src/ipcpd/unicast/pol-addr-auth-ops.h b/src/ipcpd/unicast/addr-auth/ops.h
index 1096eecb..06b24cec 100644
--- a/src/ipcpd/unicast/pol-addr-auth-ops.h
+++ b/src/ipcpd/unicast/addr-auth/ops.h
@@ -1,10 +1,10 @@
/*
- * Ouroboros - Copyright (C) 2016 - 2020
+ * Ouroboros - Copyright (C) 2016 - 2024
*
* Address authority policy ops
*
- * Dimitri Staessens <dimitri.staessens@ugent.be>
- * Sander Vrijders <sander.vrijders@ugent.be>
+ * Dimitri Staessens <dimitri@ouroboros.rocks>
+ * Sander Vrijders <sander@ouroboros.rocks>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -20,10 +20,10 @@
* Foundation, Inc., http://www.fsf.org/about/contact/.
*/
-#ifndef OUROBOROS_IPCPD_UNICAST_POL_ADDR_AUTH_OPS_H
-#define OUROBOROS_IPCPD_UNICAST_POL_ADDR_AUTH_OPS_H
+#ifndef OUROBOROS_IPCPD_UNICAST_ADDR_AUTH_OPS_H
+#define OUROBOROS_IPCPD_UNICAST_ADDR_AUTH_OPS_H
-struct pol_addr_auth_ops {
+struct addr_auth_ops {
int (* init)(const void * info);
int (* fini)(void);
@@ -31,4 +31,4 @@ struct pol_addr_auth_ops {
uint64_t (* address)(void);
};
-#endif /* OUROBOROS_IPCPD_UNICAST_POL_ADDR_AUTH_OPS_H */
+#endif /* OUROBOROS_IPCPD_UNICAST_ADDR_AUTH_OPS_H */
diff --git a/src/ipcpd/unicast/addr-auth/pol.h b/src/ipcpd/unicast/addr-auth/pol.h
new file mode 100644
index 00000000..844308c6
--- /dev/null
+++ b/src/ipcpd/unicast/addr-auth/pol.h
@@ -0,0 +1,23 @@
+/*
+ * Ouroboros - Copyright (C) 2016 - 2024
+ *
+ * Address Authority policies
+ *
+ * Dimitri Staessens <dimitri@ouroboros.rocks>
+ * Sander Vrijders <sander@ouroboros.rocks>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., http://www.fsf.org/about/contact/.
+ */
+
+#include "flat.h"
diff --git a/src/ipcpd/unicast/ca.c b/src/ipcpd/unicast/ca.c
new file mode 100644
index 00000000..1fcc9bb2
--- /dev/null
+++ b/src/ipcpd/unicast/ca.c
@@ -0,0 +1,108 @@
+/*
+ * Ouroboros - Copyright (C) 2016 - 2024
+ *
+ * Congestion Avoidance
+ *
+ * Dimitri Staessens <dimitri@ouroboros.rocks>
+ * Sander Vrijders <sander@ouroboros.rocks>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., http://www.fsf.org/about/contact/.
+ */
+
+#define OUROBOROS_PREFIX "ca"
+
+#include <ouroboros/logs.h>
+
+#include "ca.h"
+#include "ca/pol.h"
+
+struct {
+ struct ca_ops * ops;
+} ca;
+
+int ca_init(enum pol_cong_avoid pol)
+{
+ switch(pol) {
+ case CA_NONE:
+ log_dbg("Disabling congestion control.");
+ ca.ops = &nop_ca_ops;
+ break;
+ case CA_MB_ECN:
+ log_dbg("Using multi-bit ECN.");
+ ca.ops = &mb_ecn_ca_ops;
+ break;
+ default:
+ return -1;
+ }
+
+ return 0;
+}
+
+void ca_fini(void)
+{
+ ca.ops = NULL;
+}
+
+void * ca_ctx_create(void)
+{
+ return ca.ops->ctx_create();
+}
+
+void ca_ctx_destroy(void * ctx)
+{
+ return ca.ops->ctx_destroy(ctx);
+}
+
+ca_wnd_t ca_ctx_update_snd(void * ctx,
+ size_t len)
+{
+ return ca.ops->ctx_update_snd(ctx, len);
+}
+
+bool ca_ctx_update_rcv(void * ctx,
+ size_t len,
+ uint8_t ecn,
+ uint16_t * ece)
+{
+ return ca.ops->ctx_update_rcv(ctx, len, ecn, ece);
+}
+
+void ca_ctx_update_ece(void * ctx,
+ uint16_t ece)
+{
+ return ca.ops->ctx_update_ece(ctx, ece);
+}
+
+void ca_wnd_wait(ca_wnd_t wnd)
+{
+ return ca.ops->wnd_wait(wnd);
+}
+
+int ca_calc_ecn(int fd,
+ uint8_t * ecn,
+ qoscube_t qc,
+ size_t len)
+{
+ return ca.ops->calc_ecn(fd, ecn, qc, len);
+}
+
+ssize_t ca_print_stats(void * ctx,
+ char * buf,
+ size_t len)
+{
+ if (ca.ops->print_stats == NULL)
+ return 0;
+
+ return ca.ops->print_stats(ctx, buf, len);
+}
diff --git a/src/ipcpd/unicast/ca.h b/src/ipcpd/unicast/ca.h
new file mode 100644
index 00000000..ea803e17
--- /dev/null
+++ b/src/ipcpd/unicast/ca.h
@@ -0,0 +1,68 @@
+/*
+ * Ouroboros - Copyright (C) 2016 - 2024
+ *
+ * Congestion avoidance
+ *
+ * Dimitri Staessens <dimitri@ouroboros.rocks>
+ * Sander Vrijders <sander@ouroboros.rocks>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., http://www.fsf.org/about/contact/.
+ */
+
+#ifndef OUROBOROS_IPCPD_UNICAST_CA_H
+#define OUROBOROS_IPCPD_UNICAST_CA_H
+
+#include <ouroboros/ipcp.h>
+#include <ouroboros/qoscube.h>
+
+#include <stdbool.h>
+#include <sys/types.h>
+
+typedef union {
+ time_t wait;
+} ca_wnd_t;
+
+int ca_init(enum pol_cong_avoid ca);
+
+void ca_fini(void);
+
+
+/* OPS */
+void * ca_ctx_create(void);
+
+void ca_ctx_destroy(void * ctx);
+
+ca_wnd_t ca_ctx_update_snd(void * ctx,
+ size_t len);
+
+bool ca_ctx_update_rcv(void * ctx,
+ size_t len,
+ uint8_t ecn,
+ uint16_t * ece);
+
+void ca_ctx_update_ece(void * ctx,
+ uint16_t ece);
+
+void ca_wnd_wait(ca_wnd_t wnd);
+
+int ca_calc_ecn(int fd,
+ uint8_t * ecn,
+ qoscube_t qc,
+ size_t len);
+
+ssize_t ca_print_stats(void * ctx,
+ char * buf,
+ size_t len);
+
+#endif /* OUROBOROS_IPCPD_UNICAST_CA_H */
diff --git a/src/ipcpd/unicast/ca/mb-ecn.c b/src/ipcpd/unicast/ca/mb-ecn.c
new file mode 100644
index 00000000..d9a204b0
--- /dev/null
+++ b/src/ipcpd/unicast/ca/mb-ecn.c
@@ -0,0 +1,296 @@
+/*
+ * Ouroboros - Copyright (C) 2016 - 2024
+ *
+ * Multi-bit ECN Congestion Avoidance
+ *
+ * Dimitri Staessens <dimitri@ouroboros.rocks>
+ * Sander Vrijders <sander@ouroboros.rocks>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., http://www.fsf.org/about/contact/.
+ */
+
+#if defined(__linux__) || defined(__CYGWIN__)
+#define _DEFAULT_SOURCE
+#else
+#define _POSIX_C_SOURCE 200809L
+#endif
+
+#include "config.h"
+
+#include <ouroboros/ipcp-dev.h>
+#include <ouroboros/time.h>
+
+#include "mb-ecn.h"
+
+#include <inttypes.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdio.h>
+
+/* congestion avoidance constants */
+#define CA_SHFT 5 /* Average over 32 pkts */
+#define CA_WND (1 << CA_SHFT) /* 32 pkts receiver wnd */
+#define CA_UPD (1 << (CA_SHFT - 2)) /* Update snd every 8 pkt */
+#define CA_SLOT 24 /* Initial slot = 16 ms */
+#define CA_INC 1UL << 16 /* ~4MiB/s^2 additive inc */
+#define CA_IWL 1UL << 16 /* Initial limit ~4MiB/s */
+#define CA_MINPS 8 /* Mimimum pkts / slot */
+#define CA_MAXPS 64 /* Maximum pkts / slot */
+#define ECN_Q_SHFT 4
+#define ts_to_ns(ts) ((size_t) ts.tv_sec * BILLION + ts.tv_nsec)
+
+struct mb_ecn_ctx {
+ uint16_t rx_ece; /* Level of congestion (upstream) */
+ size_t rx_ctr; /* Receiver side packet counter */
+
+ uint16_t tx_ece; /* Level of congestion (downstream) */
+ size_t tx_ctr; /* Sender side packet counter */
+ size_t tx_wbc; /* Window byte count */
+ size_t tx_wpc; /* Window packet count */
+ size_t tx_wbl; /* Window byte limit */
+ bool tx_cav; /* Congestion avoidance */
+ size_t tx_mul; /* Slot size multiplier */
+ size_t tx_inc; /* Additive increase */
+ size_t tx_slot;
+};
+
+struct ca_ops mb_ecn_ca_ops = {
+ .ctx_create = mb_ecn_ctx_create,
+ .ctx_destroy = mb_ecn_ctx_destroy,
+ .ctx_update_snd = mb_ecn_ctx_update_snd,
+ .ctx_update_rcv = mb_ecn_ctx_update_rcv,
+ .ctx_update_ece = mb_ecn_ctx_update_ece,
+ .wnd_wait = mb_ecn_wnd_wait,
+ .calc_ecn = mb_ecn_calc_ecn,
+ .print_stats = mb_ecn_print_stats
+};
+
+void * mb_ecn_ctx_create(void)
+{
+ struct timespec now;
+ struct mb_ecn_ctx * ctx;
+
+ ctx = malloc(sizeof(*ctx));
+ if (ctx == NULL)
+ return NULL;
+
+ clock_gettime(PTHREAD_COND_CLOCK, &now);
+
+ memset(ctx, 0, sizeof(*ctx));
+
+ ctx->tx_mul = CA_SLOT;
+ ctx->tx_wbl = CA_IWL;
+ ctx->tx_inc = CA_INC;
+ ctx->tx_slot = ts_to_ns(now) >> ctx->tx_mul;
+
+ return (void *) ctx;
+}
+
+void mb_ecn_ctx_destroy(void * ctx)
+{
+ free(ctx);
+}
+
+#define _slot_after(new, old) ((int64_t) (old - new) < 0)
+
+ca_wnd_t mb_ecn_ctx_update_snd(void * _ctx,
+ size_t len)
+{
+ struct timespec now;
+ size_t slot;
+ ca_wnd_t wnd;
+ struct mb_ecn_ctx * ctx = _ctx;
+
+ clock_gettime(PTHREAD_COND_CLOCK, &now);
+
+ slot = ts_to_ns(now) >> ctx->tx_mul;
+
+ ctx->tx_ctr++;
+ ctx->tx_wpc++;
+ ctx->tx_wbc += len;
+
+ if (ctx->tx_ctr > CA_WND)
+ ctx->tx_ece = 0;
+
+ if (_slot_after(slot, ctx->tx_slot)) {
+ bool carry = false; /* may carry over if window increases */
+
+ ctx->tx_slot = slot;
+
+ if (!ctx->tx_cav) { /* Slow start */
+ if (ctx->tx_wbc > ctx->tx_wbl)
+ ctx->tx_wbl <<= 1;
+ } else {
+ if (ctx->tx_ece) /* Mult. Decrease */
+ ctx->tx_wbl -= (ctx->tx_wbl * ctx->tx_ece)
+ >> (CA_SHFT + 8);
+ else /* Add. Increase */
+ ctx->tx_wbl = ctx->tx_wbc + ctx->tx_inc;
+ }
+
+ /* Window scaling */
+ if (ctx->tx_wpc < CA_MINPS) {
+ size_t fact = 0; /* factor to scale the window up */
+ size_t pkts = ctx->tx_wpc;
+ while (pkts < CA_MINPS) {
+ pkts <<= 1;
+ fact++;
+ }
+ ctx->tx_mul += fact;
+ ctx->tx_slot >>= fact;
+ if ((ctx->tx_slot & ((1 << fact) - 1)) == 0) {
+ carry = true;
+ ctx->tx_slot += 1;
+ }
+ ctx->tx_wbl <<= fact;
+ ctx->tx_inc <<= fact;
+ } else if (ctx->tx_wpc > CA_MAXPS) {
+ size_t fact = 0; /* factor to scale the window down */
+ size_t pkts = ctx->tx_wpc;
+ while (pkts > CA_MAXPS) {
+ pkts >>= 1;
+ fact++;
+ }
+ ctx->tx_mul -= fact;
+ ctx->tx_slot <<= fact;
+ ctx->tx_wbl >>= fact;
+ ctx->tx_inc >>= fact;
+ } else {
+ ctx->tx_slot = slot;
+ }
+
+ if (!carry) {
+ ctx->tx_wbc = 0;
+ ctx->tx_wpc = 0;
+ }
+ }
+
+ if (ctx->tx_wbc > ctx->tx_wbl)
+ wnd.wait = ((ctx->tx_slot + 1) << ctx->tx_mul) - ts_to_ns(now);
+ else
+ wnd.wait = 0;
+
+ return wnd;
+}
+
+void mb_ecn_wnd_wait(ca_wnd_t wnd)
+{
+ if (wnd.wait > 0) {
+ struct timespec s = TIMESPEC_INIT_S(0);
+ if (wnd.wait > BILLION) /* Don't care throttling < 1s */
+ s.tv_sec = 1;
+ else
+ s.tv_nsec = wnd.wait;
+
+ nanosleep(&s, NULL);
+ }
+}
+
+bool mb_ecn_ctx_update_rcv(void * _ctx,
+ size_t len,
+ uint8_t ecn,
+ uint16_t * ece)
+{
+ struct mb_ecn_ctx* ctx = _ctx;
+ bool update;
+
+ (void) len;
+
+ if ((ctx->rx_ece | ecn) == 0)
+ return false;
+
+ if (ecn == 0) { /* End of congestion */
+ ctx->rx_ece >>= 2;
+ update = ctx->rx_ece == 0;
+ } else {
+ if (ctx->rx_ece == 0) { /* Start of congestion */
+ ctx->rx_ece = ecn;
+ ctx->rx_ctr = 0;
+ update = true;
+ } else { /* Congestion update */
+ ctx->rx_ece -= ctx->rx_ece >> CA_SHFT;
+ ctx->rx_ece += ecn;
+ update = (ctx->rx_ctr++ & (CA_UPD - 1)) == true;
+ }
+ }
+
+ *ece = ctx->rx_ece;
+
+ return update;
+}
+
+
+void mb_ecn_ctx_update_ece(void * _ctx,
+ uint16_t ece)
+{
+ struct mb_ecn_ctx* ctx = _ctx;
+
+ ctx->tx_ece = ece;
+ ctx->tx_ctr = 0;
+ ctx->tx_cav = true;
+}
+
+int mb_ecn_calc_ecn(int fd,
+ uint8_t * ecn,
+ qoscube_t qc,
+ size_t len)
+{
+ size_t q;
+
+ (void) len;
+ (void) qc;
+
+ q = ipcp_flow_queued(fd);
+
+ *ecn |= (uint8_t) (q >> ECN_Q_SHFT);
+
+ return 0;
+}
+
+ssize_t mb_ecn_print_stats(void * _ctx,
+ char * buf,
+ size_t len)
+{
+ struct mb_ecn_ctx* ctx = _ctx;
+ char * regime;
+
+ if (len < 1024)
+ return 0;
+
+ if (!ctx->tx_cav)
+ regime = "Slow start";
+ else if (ctx->tx_ece)
+ regime = "Multiplicative dec";
+ else
+ regime = "Additive inc";
+
+ sprintf(buf,
+ "Congestion avoidance algorithm: %20s\n"
+ "Upstream congestion level: %20u\n"
+ "Upstream packet counter: %20zu\n"
+ "Downstream congestion level: %20u\n"
+ "Downstream packet counter: %20zu\n"
+ "Congestion window size (ns): %20" PRIu64 "\n"
+ "Packets in this window: %20zu\n"
+ "Bytes in this window: %20zu\n"
+ "Max bytes in this window: %20zu\n"
+ "Current congestion regime: %20s\n",
+ "Multi-bit ECN",
+ ctx->tx_ece, ctx->tx_ctr,
+ ctx->rx_ece, ctx->rx_ctr, (uint64_t) (1ULL << ctx->tx_mul),
+ ctx->tx_wpc, ctx->tx_wbc, ctx->tx_wbl,
+ regime);
+
+ return strlen(buf);
+}
diff --git a/src/ipcpd/unicast/ca/mb-ecn.h b/src/ipcpd/unicast/ca/mb-ecn.h
new file mode 100644
index 00000000..9a2c8b49
--- /dev/null
+++ b/src/ipcpd/unicast/ca/mb-ecn.h
@@ -0,0 +1,56 @@
+/*
+ * Ouroboros - Copyright (C) 2016 - 2024
+ *
+ * Multi-bit ECN Congestion Avoidance
+ *
+ * Dimitri Staessens <dimitri@ouroboros.rocks>
+ * Sander Vrijders <sander@ouroboros.rocks>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., http://www.fsf.org/about/contact/.
+ */
+
+#ifndef OUROBOROS_IPCPD_UNICAST_CA_MB_ECN_H
+#define OUROBOROS_IPCPD_UNICAST_CA_MB_ECN_H
+
+#include "ops.h"
+
+void * mb_ecn_ctx_create(void);
+
+void mb_ecn_ctx_destroy(void * ctx);
+
+ca_wnd_t mb_ecn_ctx_update_snd(void * ctx,
+ size_t len);
+
+bool mb_ecn_ctx_update_rcv(void * ctx,
+ size_t len,
+ uint8_t ecn,
+ uint16_t * ece);
+
+void mb_ecn_ctx_update_ece(void * ctx,
+ uint16_t ece);
+
+void mb_ecn_wnd_wait(ca_wnd_t wnd);
+
+int mb_ecn_calc_ecn(int fd,
+ uint8_t * ecn,
+ qoscube_t qc,
+ size_t len);
+
+ssize_t mb_ecn_print_stats(void * ctx,
+ char * buf,
+ size_t len);
+
+extern struct ca_ops mb_ecn_ca_ops;
+
+#endif /* OUROBOROS_IPCPD_UNICAST_CA_MB_ECN_H */
diff --git a/src/ipcpd/unicast/ca/nop.c b/src/ipcpd/unicast/ca/nop.c
new file mode 100644
index 00000000..617fc15b
--- /dev/null
+++ b/src/ipcpd/unicast/ca/nop.c
@@ -0,0 +1,98 @@
+/*
+ * Ouroboros - Copyright (C) 2016 - 2024
+ *
+ * Dummy Congestion Avoidance
+ *
+ * Dimitri Staessens <dimitri@ouroboros.rocks>
+ * Sander Vrijders <sander@ouroboros.rocks>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., http://www.fsf.org/about/contact/.
+ */
+
+#include "nop.h"
+
+#include <string.h>
+
+struct ca_ops nop_ca_ops = {
+ .ctx_create = nop_ctx_create,
+ .ctx_destroy = nop_ctx_destroy,
+ .ctx_update_snd = nop_ctx_update_snd,
+ .ctx_update_rcv = nop_ctx_update_rcv,
+ .ctx_update_ece = nop_ctx_update_ece,
+ .wnd_wait = nop_wnd_wait,
+ .calc_ecn = nop_calc_ecn,
+ .print_stats = NULL
+};
+
+void * nop_ctx_create(void)
+{
+ return (void *) 1;
+}
+
+void nop_ctx_destroy(void * ctx)
+{
+ (void) ctx;
+}
+
+ca_wnd_t nop_ctx_update_snd(void * ctx,
+ size_t len)
+{
+ ca_wnd_t wnd;
+
+ (void) ctx;
+ (void) len;
+
+ memset(&wnd, 0, sizeof(wnd));
+
+ return wnd;
+}
+
+void nop_wnd_wait(ca_wnd_t wnd)
+{
+ (void) wnd;
+}
+
+bool nop_ctx_update_rcv(void * ctx,
+ size_t len,
+ uint8_t ecn,
+ uint16_t * ece)
+{
+ (void) ctx;
+ (void) len;
+ (void) ecn;
+ (void) ece;
+
+ return false;
+}
+
+void nop_ctx_update_ece(void * ctx,
+ uint16_t ece)
+{
+ (void) ctx;
+ (void) ece;
+}
+
+
+int nop_calc_ecn(int fd,
+ uint8_t * ecn,
+ qoscube_t qc,
+ size_t len)
+{
+ (void) fd;
+ (void) len;
+ (void) ecn;
+ (void) qc;
+
+ return 0;
+}
diff --git a/src/ipcpd/unicast/ca/nop.h b/src/ipcpd/unicast/ca/nop.h
new file mode 100644
index 00000000..248b198d
--- /dev/null
+++ b/src/ipcpd/unicast/ca/nop.h
@@ -0,0 +1,52 @@
+/*
+ * Ouroboros - Copyright (C) 2016 - 2024
+ *
+ * Dummy Congestion Avoidance
+ *
+ * Dimitri Staessens <dimitri@ouroboros.rocks>
+ * Sander Vrijders <sander@ouroboros.rocks>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., http://www.fsf.org/about/contact/.
+ */
+
+#ifndef OUROBOROS_IPCPD_UNICAST_CA_NOP_H
+#define OUROBOROS_IPCPD_UNICAST_CA_NOP_H
+
+#include "ops.h"
+
+void * nop_ctx_create(void);
+
+void nop_ctx_destroy(void * ctx);
+
+ca_wnd_t nop_ctx_update_snd(void * ctx,
+ size_t len);
+
+bool nop_ctx_update_rcv(void * ctx,
+ size_t len,
+ uint8_t ecn,
+ uint16_t * ece);
+
+void nop_ctx_update_ece(void * ctx,
+ uint16_t ece);
+
+void nop_wnd_wait(ca_wnd_t wnd);
+
+int nop_calc_ecn(int fd,
+ uint8_t * ecn,
+ qoscube_t qc,
+ size_t len);
+
+extern struct ca_ops nop_ca_ops;
+
+#endif /* OUROBOROS_IPCPD_UNICAST_CA_NOP_H */
diff --git a/src/ipcpd/unicast/ca/ops.h b/src/ipcpd/unicast/ca/ops.h
new file mode 100644
index 00000000..3a7b7248
--- /dev/null
+++ b/src/ipcpd/unicast/ca/ops.h
@@ -0,0 +1,58 @@
+/*
+ * Ouroboros - Copyright (C) 2016 - 2024
+ *
+ * Congestion avoidance policy ops
+ *
+ * Dimitri Staessens <dimitri@ouroboros.rocks>
+ * Sander Vrijders <sander@ouroboros.rocks>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., http://www.fsf.org/about/contact/.
+ */
+
+#ifndef OUROBOROS_IPCPD_UNICAST_CA_OPS_H
+#define OUROBOROS_IPCPD_UNICAST_CA_OPS_H
+
+#include "ca.h"
+
+struct ca_ops {
+ void * (* ctx_create)(void);
+
+ void (* ctx_destroy)(void * ctx);
+
+ ca_wnd_t (* ctx_update_snd)(void * ctx,
+ size_t len);
+
+ bool (* ctx_update_rcv)(void * ctx,
+ size_t len,
+ uint8_t ecn,
+ uint16_t * ece);
+
+ void (* ctx_update_ece)(void * ctx,
+ uint16_t ece);
+
+ void (* wnd_wait)(ca_wnd_t wnd);
+
+ int (* calc_ecn)(int fd,
+ uint8_t * ecn,
+ qoscube_t qc,
+ size_t len);
+
+ /* Optional, can be NULL */
+ ssize_t (* print_stats)(void * ctx,
+ char * buf,
+ size_t len);
+
+};
+
+#endif /* OUROBOROS_IPCPD_UNICAST_CA_OPS_H */
diff --git a/src/ipcpd/unicast/ca/pol.h b/src/ipcpd/unicast/ca/pol.h
new file mode 100644
index 00000000..db0a1a11
--- /dev/null
+++ b/src/ipcpd/unicast/ca/pol.h
@@ -0,0 +1,24 @@
+/*
+ * Ouroboros - Copyright (C) 2016 - 2024
+ *
+ * Congestion avoidance policies
+ *
+ * Dimitri Staessens <dimitri@ouroboros.rocks>
+ * Sander Vrijders <sander@ouroboros.rocks>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., http://www.fsf.org/about/contact/.
+ */
+
+#include "mb-ecn.h"
+#include "nop.h"
diff --git a/src/ipcpd/unicast/connmgr.c b/src/ipcpd/unicast/connmgr.c
index 3ebef7f9..07568fb5 100644
--- a/src/ipcpd/unicast/connmgr.c
+++ b/src/ipcpd/unicast/connmgr.c
@@ -1,10 +1,10 @@
/*
- * Ouroboros - Copyright (C) 2016 - 2020
+ * Ouroboros - Copyright (C) 2016 - 2024
*
* Handles connections between components
*
- * Dimitri Staessens <dimitri.staessens@ugent.be>
- * Sander Vrijders <sander.vrijders@ugent.be>
+ * Dimitri Staessens <dimitri@ouroboros.rocks>
+ * Sander Vrijders <sander@ouroboros.rocks>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -20,506 +20,16 @@
* Foundation, Inc., http://www.fsf.org/about/contact/.
*/
+#include "config.h"
+
#if defined(__linux__) || defined(__CYGWIN__)
#define _DEFAULT_SOURCE
#else
#define _POSIX_C_SOURCE 200112L
#endif
-#define OUROBOROS_PREFIX "connection-manager"
-
-#include <ouroboros/dev.h>
-#include <ouroboros/cacep.h>
-#include <ouroboros/errno.h>
-#include <ouroboros/list.h>
-#include <ouroboros/logs.h>
-#include <ouroboros/notifier.h>
-
-#include "comp.h"
-#include "connmgr.h"
-#include "dir.h"
-#include "enroll.h"
-#include "ipcp.h"
-
-#include <pthread.h>
-#include <string.h>
-#include <stdlib.h>
-#include <assert.h>
-
-enum connmgr_state {
- CONNMGR_NULL = 0,
- CONNMGR_INIT,
- CONNMGR_RUNNING
-};
-
-struct conn_el {
- struct list_head next;
- struct conn conn;
-};
-
-struct comp {
- struct conn_info info;
-
- struct list_head conns;
- struct list_head pending;
-
- pthread_cond_t cond;
- pthread_mutex_t lock;
-};
-
-struct {
- struct comp comps[COMPID_MAX];
- enum connmgr_state state;
-
- pthread_t acceptor;
-} connmgr;
-
-static int get_id_by_name(const char * name)
-{
- enum comp_id i;
-
- for (i = 0; i < COMPID_MAX; ++i)
- if (strcmp(name, connmgr.comps[i].info.comp_name) == 0)
- return i;
-
- return -1;
-}
-
-static int get_conn_by_fd(int fd,
- enum comp_id id,
- struct conn * conn)
-{
- struct list_head * p;
-
- pthread_mutex_lock(&connmgr.comps[id].lock);
-
- list_for_each(p, &connmgr.comps[id].conns) {
- struct conn_el * c =
- list_entry(p, struct conn_el, next);
- if (c->conn.flow_info.fd == fd) {
- *conn = c->conn;
- pthread_mutex_unlock(&connmgr.comps[id].lock);
- return 0;
- }
- }
-
- pthread_mutex_unlock(&connmgr.comps[id].lock);
-
- return -1;
-}
-
-static int add_comp_conn(enum comp_id id,
- int fd,
- qosspec_t qs,
- struct conn_info * rcv_info)
-{
- struct conn_el * el;
-
- el = malloc(sizeof(*el));
- if (el == NULL) {
- log_err("Not enough memory.");
- return -1;
- }
-
- el->conn.conn_info = *rcv_info;
- el->conn.flow_info.fd = fd;
- el->conn.flow_info.qs = qs;
-
- pthread_mutex_lock(&connmgr.comps[id].lock);
-
- list_add(&el->next, &connmgr.comps[id].pending);
- pthread_cond_signal(&connmgr.comps[id].cond);
-
- pthread_mutex_unlock(&connmgr.comps[id].lock);
-
- return 0;
-}
-
-static void * flow_acceptor(void * o)
-{
- int fd;
- qosspec_t qs;
- struct conn_info rcv_info;
- struct conn_info fail_info;
-
- (void) o;
-
- memset(&fail_info, 0, sizeof(fail_info));
-
- while (true) {
- int id;
-
- fd = flow_accept(&qs, NULL);
- if (fd < 0) {
- if (fd != -EIRMD)
- log_warn("Flow accept failed: %d", fd);
- continue;
- }
-
- if (cacep_rcv(fd, &rcv_info)) {
- log_dbg("Error establishing application connection.");
- flow_dealloc(fd);
- continue;
- }
-
- id = get_id_by_name(rcv_info.comp_name);
- if (id < 0) {
- log_dbg("Connection request for unknown component %s.",
- rcv_info.comp_name);
- cacep_snd(fd, &fail_info);
- flow_dealloc(fd);
- continue;
- }
-
- assert(id < COMPID_MAX);
-
- if (cacep_snd(fd, &connmgr.comps[id].info)) {
- log_dbg("Failed to respond to request.");
- flow_dealloc(fd);
- continue;
- }
-
- if (add_comp_conn(id, fd, qs, &rcv_info)) {
- log_dbg("Failed to add new connection.");
- flow_dealloc(fd);
- continue;
- }
- }
-
- return (void *) 0;
-}
-
-static void handle_event(void * self,
- int event,
- const void * o)
-{
- struct conn conn;
-
- (void) self;
-
- if (!(event == NOTIFY_DT_FLOW_UP ||
- event == NOTIFY_DT_FLOW_DOWN ||
- event == NOTIFY_DT_FLOW_DEALLOC))
- return;
-
- if (get_conn_by_fd(*((int *) o), COMPID_DT, &conn))
- return;
-
- switch (event) {
- case NOTIFY_DT_FLOW_UP:
- notifier_event(NOTIFY_DT_CONN_UP, &conn);
- break;
- case NOTIFY_DT_FLOW_DOWN:
- notifier_event(NOTIFY_DT_CONN_DOWN, &conn);
- break;
- case NOTIFY_DT_FLOW_DEALLOC:
- notifier_event(NOTIFY_DT_CONN_DEL, &conn);
- break;
- default:
- break;
- }
-}
-
-int connmgr_init(void)
-{
- connmgr.state = CONNMGR_INIT;
-
- if (notifier_reg(handle_event, NULL))
- return -1;
-
- return 0;
-}
-
-void connmgr_fini(void)
-{
- int i;
-
- notifier_unreg(handle_event);
-
- if (connmgr.state == CONNMGR_RUNNING)
- pthread_join(connmgr.acceptor, NULL);
-
- for (i = 0; i < COMPID_MAX; ++i)
- connmgr_comp_fini(i);
-}
-
-int connmgr_start(void)
-{
- if (pthread_create(&connmgr.acceptor, NULL, flow_acceptor, NULL))
- return -1;
-
- connmgr.state = CONNMGR_RUNNING;
-
- return 0;
-}
-
-void connmgr_stop(void)
-{
- if (connmgr.state == CONNMGR_RUNNING)
- pthread_cancel(connmgr.acceptor);
-}
-
-int connmgr_comp_init(enum comp_id id,
- const struct conn_info * info)
-{
- struct comp * comp;
-
- assert(id >= 0 && id < COMPID_MAX);
-
- comp = connmgr.comps + id;
-
- if (pthread_mutex_init(&comp->lock, NULL))
- return -1;
-
- if (pthread_cond_init(&comp->cond, NULL)) {
- pthread_mutex_destroy(&comp->lock);
- return -1;
- }
-
- list_head_init(&comp->conns);
- list_head_init(&comp->pending);
-
- memcpy(&connmgr.comps[id].info, info, sizeof(connmgr.comps[id].info));
-
- return 0;
-}
-
-void connmgr_comp_fini(enum comp_id id)
-{
- struct list_head * p;
- struct list_head * h;
- struct comp * comp;
-
- assert(id >= 0 && id < COMPID_MAX);
-
- if (strlen(connmgr.comps[id].info.comp_name) == 0)
- return;
-
- comp = connmgr.comps + id;
-
- pthread_mutex_lock(&comp->lock);
-
- list_for_each_safe(p, h, &comp->conns) {
- struct conn_el * e = list_entry(p, struct conn_el, next);
- list_del(&e->next);
- free(e);
- }
-
- list_for_each_safe(p, h, &comp->pending) {
- struct conn_el * e = list_entry(p, struct conn_el, next);
- list_del(&e->next);
- free(e);
- }
-
- pthread_mutex_unlock(&comp->lock);
-
- pthread_cond_destroy(&comp->cond);
- pthread_mutex_destroy(&comp->lock);
-
- memset(&connmgr.comps[id].info, 0, sizeof(connmgr.comps[id].info));
-}
-
-int connmgr_ipcp_connect(const char * dst,
- const char * component,
- qosspec_t qs)
-{
- struct conn_el * ce;
- int id;
-
- assert(dst);
- assert(component);
-
- ce = malloc(sizeof(*ce));
- if (ce == NULL) {
- log_dbg("Out of memory.");
- return -1;
- }
-
- id = get_id_by_name(component);
- if (id < 0) {
- log_dbg("No such component: %s", component);
- free(ce);
- return -1;
- }
-
- if (connmgr_alloc(id, dst, &qs, &ce->conn)) {
- free(ce);
- return -1;
- }
-
- if (strlen(dst) > DST_MAX_STRLEN) {
- log_warn("Truncating dst length for connection.");
- memcpy(ce->conn.flow_info.dst, dst, DST_MAX_STRLEN);
- ce->conn.flow_info.dst[DST_MAX_STRLEN] = '\0';
- } else {
- strcpy(ce->conn.flow_info.dst, dst);
- }
-
- pthread_mutex_lock(&connmgr.comps[id].lock);
-
- list_add(&ce->next, &connmgr.comps[id].conns);
-
- pthread_mutex_unlock(&connmgr.comps[id].lock);
-
- return 0;
-}
-
-int connmgr_ipcp_disconnect(const char * dst,
- const char * component)
-{
- struct list_head * p;
- struct list_head * h;
- int id;
-
- assert(dst);
- assert(component);
-
- id = get_id_by_name(component);
- if (id < 0)
- return -1;
-
- pthread_mutex_lock(&connmgr.comps[id].lock);
-
- list_for_each_safe(p,h, &connmgr.comps[id].conns) {
- struct conn_el * el = list_entry(p, struct conn_el, next);
- if (strcmp(el->conn.flow_info.dst, dst) == 0) {
- int ret;
- pthread_mutex_unlock(&connmgr.comps[id].lock);
- list_del(&el->next);
- ret = connmgr_dealloc(id, &el->conn);
- free(el);
- return ret;
- }
- }
-
- pthread_mutex_unlock(&connmgr.comps[id].lock);
-
- return 0;
-}
-
-int connmgr_alloc(enum comp_id id,
- const char * dst,
- qosspec_t * qs,
- struct conn * conn)
-{
- assert(id >= 0 && id < COMPID_MAX);
- assert(dst);
-
- conn->flow_info.fd = flow_alloc(dst, qs, NULL);
- if (conn->flow_info.fd < 0) {
- log_dbg("Failed to allocate flow to %s.", dst);
- return -1;
- }
-
- if (qs != NULL)
- conn->flow_info.qs = *qs;
- else
- memset(&conn->flow_info.qs, 0, sizeof(conn->flow_info.qs));
-
- log_dbg("Sending cacep info for protocol %s to fd %d.",
- connmgr.comps[id].info.protocol, conn->flow_info.fd);
-
- if (cacep_snd(conn->flow_info.fd, &connmgr.comps[id].info)) {
- log_dbg("Failed to create application connection.");
- flow_dealloc(conn->flow_info.fd);
- return -1;
- }
-
- if (cacep_rcv(conn->flow_info.fd, &conn->conn_info)) {
- log_dbg("Failed to connect to application.");
- flow_dealloc(conn->flow_info.fd);
- return -1;
- }
-
- if (strcmp(connmgr.comps[id].info.protocol, conn->conn_info.protocol)) {
- log_dbg("Unknown protocol (requested %s, got %s).",
- connmgr.comps[id].info.protocol,
- conn->conn_info.protocol);
- flow_dealloc(conn->flow_info.fd);
- return -1;
- }
-
- if (connmgr.comps[id].info.pref_version !=
- conn->conn_info.pref_version) {
- log_dbg("Unknown protocol version.");
- flow_dealloc(conn->flow_info.fd);
- return -1;
- }
-
- if (connmgr.comps[id].info.pref_syntax != conn->conn_info.pref_syntax) {
- log_dbg("Unknown protocol syntax.");
- flow_dealloc(conn->flow_info.fd);
- return -1;
- }
-
- switch (id) {
- case COMPID_DT:
- notifier_event(NOTIFY_DT_CONN_ADD, conn);
-#ifdef IPCP_CONN_WAIT_DIR
- dir_wait_running();
-#endif
- break;
- case COMPID_MGMT:
- notifier_event(NOTIFY_MGMT_CONN_ADD, conn);
- break;
- default:
- break;
- }
-
- return 0;
-}
-
-int connmgr_dealloc(enum comp_id id,
- struct conn * conn)
-{
- switch (id) {
- case COMPID_DT:
- notifier_event(NOTIFY_DT_CONN_DEL, conn);
- break;
- case COMPID_MGMT:
- notifier_event(NOTIFY_MGMT_CONN_DEL, conn);
- break;
- default:
- break;
- }
-
- return flow_dealloc(conn->flow_info.fd);
-}
-
-
-int connmgr_wait(enum comp_id id,
- struct conn * conn)
-{
- struct conn_el * el;
- struct comp * comp;
-
- assert(id >= 0 && id < COMPID_MAX);
- assert(conn);
-
- comp = connmgr.comps + id;
-
- pthread_mutex_lock(&comp->lock);
-
- pthread_cleanup_push((void(*)(void *))pthread_mutex_unlock,
- (void *) &comp->lock);
-
- while (list_is_empty(&comp->pending))
- pthread_cond_wait(&comp->cond, &comp->lock);
-
- pthread_cleanup_pop(false);
-
- el = list_first_entry((&comp->pending), struct conn_el, next);
- if (el == NULL) {
- pthread_mutex_unlock(&comp->lock);
- return -1;
- }
-
- *conn = el->conn;
-
- list_del(&el->next);
- list_add(&el->next, &connmgr.comps[id].conns);
+#include <ouroboros/ipcp.h>
- pthread_mutex_unlock(&comp->lock);
+#define BUILD_IPCP_UNICAST
- return 0;
-}
+#include "common/connmgr.c"
diff --git a/src/ipcpd/unicast/dht.c b/src/ipcpd/unicast/dht.c
deleted file mode 100644
index 8555312e..00000000
--- a/src/ipcpd/unicast/dht.c
+++ /dev/null
@@ -1,2840 +0,0 @@
-/*
- * Ouroboros - Copyright (C) 2016 - 2020
- *
- * Distributed Hash Table based on Kademlia
- *
- * Dimitri Staessens <dimitri.staessens@ugent.be>
- * Sander Vrijders <sander.vrijders@ugent.be>
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public License
- * version 2.1 as published by the Free Software Foundation.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., http://www.fsf.org/about/contact/.
- */
-
-#if defined(__linux__) || defined(__CYGWIN__)
-#define _DEFAULT_SOURCE
-#else
-#define _POSIX_C_SOURCE 200112L
-#endif
-
-#include "config.h"
-
-#define DHT "dht"
-#define OUROBOROS_PREFIX DHT
-
-#include <ouroboros/hash.h>
-#include <ouroboros/ipcp-dev.h>
-#include <ouroboros/bitmap.h>
-#include <ouroboros/errno.h>
-#include <ouroboros/logs.h>
-#include <ouroboros/list.h>
-#include <ouroboros/notifier.h>
-#include <ouroboros/random.h>
-#include <ouroboros/time_utils.h>
-#include <ouroboros/tpm.h>
-#include <ouroboros/utils.h>
-
-#include "connmgr.h"
-#include "dht.h"
-#include "dt.h"
-
-#include <pthread.h>
-#include <stdlib.h>
-#include <string.h>
-#include <assert.h>
-#include <inttypes.h>
-#include <limits.h>
-
-#include "kademlia.pb-c.h"
-typedef KadMsg kad_msg_t;
-typedef KadContactMsg kad_contact_msg_t;
-
-#ifndef CLOCK_REALTIME_COARSE
-#define CLOCK_REALTIME_COARSE CLOCK_REALTIME
-#endif
-
-#define DHT_MAX_REQS 2048 /* KAD recommends rnd(), bmp can be changed. */
-#define KAD_ALPHA 3 /* Parallel factor, proven optimal value. */
-#define KAD_K 8 /* Replication factor, MDHT value. */
-#define KAD_T_REPL 900 /* Replication time, tied to k. MDHT value. */
-#define KAD_T_REFR 900 /* Refresh time stale bucket, MDHT value. */
-#define KAD_T_JOIN 8 /* Response time to wait for a join. */
-#define KAD_T_RESP 5 /* Response time to wait for a response. */
-#define KAD_R_PING 2 /* Ping retries before declaring peer dead. */
-#define KAD_QUEER 15 /* Time to declare peer questionable. */
-#define KAD_BETA 8 /* Bucket split factor, must be 1, 2, 4 or 8. */
-#define KAD_RESP_RETR 6 /* Number of retries on sending a response. */
-#define KAD_JOIN_RETR 8 /* Number of retries sending a join. */
-#define KAD_JOIN_INTV 1 /* Time (seconds) between join retries. */
-#define HANDLE_TIMEO 1000 /* Timeout for dht_handle_packet tpm check (ms) */
-#define DHT_RETR_ADDR 1 /* Number of addresses to return on retrieve */
-
-enum dht_state {
- DHT_INIT = 0,
- DHT_SHUTDOWN,
- DHT_JOINING,
- DHT_RUNNING,
-};
-
-enum kad_code {
- KAD_JOIN = 0,
- KAD_FIND_NODE,
- KAD_FIND_VALUE,
- /* Messages without a response below. */
- KAD_STORE,
- KAD_RESPONSE
-};
-
-enum kad_req_state {
- REQ_NULL = 0,
- REQ_INIT,
- REQ_PENDING,
- REQ_RESPONSE,
- REQ_DONE,
- REQ_DESTROY
-};
-
-enum lookup_state {
- LU_NULL = 0,
- LU_INIT,
- LU_PENDING,
- LU_UPDATE,
- LU_COMPLETE,
- LU_DESTROY
-};
-
-struct kad_req {
- struct list_head next;
-
- uint32_t cookie;
- enum kad_code code;
- uint8_t * key;
- uint64_t addr;
-
- enum kad_req_state state;
- pthread_cond_t cond;
- pthread_mutex_t lock;
-
- time_t t_exp;
-};
-
-struct cookie_el {
- struct list_head next;
-
- uint32_t cookie;
-};
-
-struct lookup {
- struct list_head next;
-
- struct list_head cookies;
-
- uint8_t * key;
-
- struct list_head contacts;
- size_t n_contacts;
-
- uint64_t * addrs;
- size_t n_addrs;
-
- enum lookup_state state;
- pthread_cond_t cond;
- pthread_mutex_t lock;
-};
-
-struct val {
- struct list_head next;
-
- uint64_t addr;
-
- time_t t_exp;
- time_t t_rep;
-};
-
-struct ref_entry {
- struct list_head next;
-
- uint8_t * key;
-
- time_t t_rep;
-};
-
-struct dht_entry {
- struct list_head next;
-
- uint8_t * key;
- size_t n_vals;
- struct list_head vals;
-};
-
-struct contact {
- struct list_head next;
-
- uint8_t * id;
- uint64_t addr;
-
- size_t fails;
- time_t t_seen;
-};
-
-struct bucket {
- struct list_head contacts;
- size_t n_contacts;
-
- struct list_head alts;
- size_t n_alts;
-
- time_t t_refr;
-
- size_t depth;
- uint8_t mask;
-
- struct bucket * parent;
- struct bucket * children[1L << KAD_BETA];
-};
-
-struct cmd {
- struct list_head next;
-
- struct shm_du_buff * sdb;
-};
-
-struct dht {
- size_t alpha;
- size_t b;
- size_t k;
-
- time_t t_expire;
- time_t t_refresh;
- time_t t_replic;
- time_t t_repub;
-
- uint8_t * id;
- uint64_t addr;
-
- struct bucket * buckets;
-
- struct list_head entries;
-
- struct list_head refs;
-
- struct list_head lookups;
-
- struct list_head requests;
- struct bmp * cookies;
-
- enum dht_state state;
- struct list_head cmds;
- pthread_cond_t cond;
- pthread_mutex_t mtx;
-
- pthread_rwlock_t lock;
-
- int fd;
-
- struct tpm * tpm;
-
- pthread_t worker;
-};
-
-struct join_info {
- struct dht * dht;
- uint64_t addr;
-};
-
-struct packet_info {
- struct dht * dht;
- struct shm_du_buff * sdb;
-};
-
-static uint8_t * dht_dup_key(const uint8_t * key,
- size_t len)
-{
- uint8_t * dup;
-
- dup = malloc(sizeof(*dup) * len);
- if (dup == NULL)
- return NULL;
-
- memcpy(dup, key, len);
-
- return dup;
-}
-
-static enum dht_state dht_get_state(struct dht * dht)
-{
- enum dht_state state;
-
- pthread_mutex_lock(&dht->mtx);
-
- state = dht->state;
-
- pthread_mutex_unlock(&dht->mtx);
-
- return state;
-}
-
-static int dht_set_state(struct dht * dht,
- enum dht_state state)
-{
- pthread_mutex_lock(&dht->mtx);
-
- if (state == DHT_JOINING && dht->state != DHT_INIT) {
- pthread_mutex_unlock(&dht->mtx);
- return -1;
- }
-
- dht->state = state;
-
- pthread_cond_broadcast(&dht->cond);
-
- pthread_mutex_unlock(&dht->mtx);
-
- return 0;
-}
-
-int dht_wait_running(struct dht * dht)
-{
- int ret = 0;
-
- pthread_mutex_lock(&dht->mtx);
-
- pthread_cleanup_push((void *)(void *) pthread_mutex_unlock,
- &dht->mtx);
-
- while (dht->state == DHT_JOINING)
- pthread_cond_wait(&dht->cond, &dht->mtx);
-
- if (dht->state != DHT_RUNNING)
- ret = -1;
-
- pthread_cleanup_pop(true);
-
- return ret;
-}
-
-static uint8_t * create_id(size_t len)
-{
- uint8_t * id;
-
- id = malloc(len);
- if (id == NULL)
- return NULL;
-
- if (random_buffer(id, len) < 0) {
- free(id);
- return NULL;
- }
-
- return id;
-}
-
-static void kad_req_create(struct dht * dht,
- kad_msg_t * msg,
- uint64_t addr)
-{
- struct kad_req * req;
- pthread_condattr_t cattr;
- struct timespec t;
- size_t b;
-
- req = malloc(sizeof(*req));
- if (req == NULL)
- return;
-
- list_head_init(&req->next);
-
- clock_gettime(CLOCK_REALTIME_COARSE, &t);
-
- req->t_exp = t.tv_sec + KAD_T_RESP;
- req->addr = addr;
- req->state = REQ_INIT;
- req->cookie = msg->cookie;
- req->code = msg->code;
- req->key = NULL;
-
- pthread_rwlock_rdlock(&dht->lock);
- b = dht->b;
- pthread_rwlock_unlock(&dht->lock);
-
- if (msg->has_key) {
- req->key = dht_dup_key(msg->key.data, b);
- if (req->key == NULL) {
- free(req);
- return;
- }
- }
-
- if (pthread_mutex_init(&req->lock, NULL)) {
- free(req->key);
- free(req);
- return;
- }
-
- pthread_condattr_init(&cattr);
-#ifndef __APPLE__
- pthread_condattr_setclock(&cattr, PTHREAD_COND_CLOCK);
-#endif
-
- if (pthread_cond_init(&req->cond, &cattr)) {
- pthread_condattr_destroy(&cattr);
- pthread_mutex_destroy(&req->lock);
- free(req->key);
- free(req);
- return;
- }
-
- pthread_condattr_destroy(&cattr);
-
- pthread_rwlock_wrlock(&dht->lock);
-
- list_add(&req->next, &dht->requests);
-
- pthread_rwlock_unlock(&dht->lock);
-}
-
-static void cancel_req_destroy(void * o)
-{
- struct kad_req * req = (struct kad_req *) o;
-
- pthread_mutex_unlock(&req->lock);
-
- pthread_cond_destroy(&req->cond);
- pthread_mutex_destroy(&req->lock);
-
- if (req->key != NULL)
- free(req->key);
-
- free(req);
-}
-
-static void kad_req_destroy(struct kad_req * req)
-{
- assert(req);
-
- pthread_mutex_lock(&req->lock);
-
- switch (req->state) {
- case REQ_DESTROY:
- pthread_mutex_unlock(&req->lock);
- return;
- case REQ_PENDING:
- req->state = REQ_DESTROY;
- pthread_cond_signal(&req->cond);
- break;
- case REQ_INIT:
- case REQ_DONE:
- req->state = REQ_NULL;
- break;
- case REQ_RESPONSE:
- case REQ_NULL:
- default:
- break;
- }
-
- pthread_cleanup_push(cancel_req_destroy, req);
-
- while (req->state != REQ_NULL && req->state != REQ_DONE)
- pthread_cond_wait(&req->cond, &req->lock);
-
- pthread_cleanup_pop(true);
-}
-
-static int kad_req_wait(struct kad_req * req,
- time_t t)
-{
- struct timespec timeo = {t, 0};
- struct timespec abs;
- int ret = 0;
-
- assert(req);
-
- clock_gettime(PTHREAD_COND_CLOCK, &abs);
-
- ts_add(&abs, &timeo, &abs);
-
- pthread_mutex_lock(&req->lock);
-
- req->state = REQ_PENDING;
-
- pthread_cleanup_push((void *)(void *) pthread_mutex_unlock,
- &req->lock);
-
- while (req->state == REQ_PENDING && ret != -ETIMEDOUT)
- ret = -pthread_cond_timedwait(&req->cond, &req->lock, &abs);
-
- switch(req->state) {
- case REQ_DESTROY:
- ret = -1;
- req->state = REQ_NULL;
- pthread_cond_signal(&req->cond);
- break;
- case REQ_PENDING: /* ETIMEDOUT */
- case REQ_RESPONSE:
- req->state = REQ_DONE;
- pthread_cond_broadcast(&req->cond);
- break;
- default:
- break;
- }
-
- pthread_cleanup_pop(true);
-
- return ret;
-}
-
-static void kad_req_respond(struct kad_req * req)
-{
- pthread_mutex_lock(&req->lock);
-
- req->state = REQ_RESPONSE;
- pthread_cond_signal(&req->cond);
-
- pthread_mutex_unlock(&req->lock);
-}
-
-static struct contact * contact_create(const uint8_t * id,
- size_t len,
- uint64_t addr)
-{
- struct contact * c;
- struct timespec t;
-
- c = malloc(sizeof(*c));
- if (c == NULL)
- return NULL;
-
- list_head_init(&c->next);
-
- clock_gettime(CLOCK_REALTIME_COARSE, &t);
-
- c->addr = addr;
- c->fails = 0;
- c->t_seen = t.tv_sec;
- c->id = dht_dup_key(id, len);
- if (c->id == NULL) {
- free(c);
- return NULL;
- }
-
- return c;
-}
-
-static void contact_destroy(struct contact * c)
-{
- if (c != NULL)
- free(c->id);
-
- free(c);
-}
-
-static struct bucket * iter_bucket(struct bucket * b,
- const uint8_t * id)
-{
- uint8_t byte;
- uint8_t mask;
-
- assert(b);
-
- if (b->children[0] == NULL)
- return b;
-
- byte = id[(b->depth * KAD_BETA) / CHAR_BIT];
-
- mask = ((1L << KAD_BETA) - 1) & 0xFF;
-
- byte >>= (CHAR_BIT - KAD_BETA) -
- (((b->depth) * KAD_BETA) & (CHAR_BIT - 1));
-
- return iter_bucket(b->children[(byte & mask)], id);
-}
-
-static struct bucket * dht_get_bucket(struct dht * dht,
- const uint8_t * id)
-{
- assert(dht->buckets);
-
- return iter_bucket(dht->buckets, id);
-}
-
-/*
- * If someone builds a network where the n (n > k) closest nodes all
- * have IDs starting with the same 64 bits: by all means, change this.
- */
-static uint64_t dist(const uint8_t * src,
- const uint8_t * dst)
-{
- return betoh64(*((uint64_t *) src) ^ *((uint64_t *) dst));
-}
-
-static size_t list_add_sorted(struct list_head * l,
- struct contact * c,
- const uint8_t * key)
-{
- struct list_head * p;
-
- assert(l);
- assert(c);
- assert(key);
- assert(c->id);
-
- list_for_each(p, l) {
- struct contact * e = list_entry(p, struct contact, next);
- if (dist(c->id, key) > dist(e->id, key))
- break;
- }
-
- list_add_tail(&c->next, p);
-
- return 1;
-}
-
-static size_t dht_contact_list(struct dht * dht,
- struct list_head * l,
- const uint8_t * key)
-{
- struct list_head * p;
- struct bucket * b;
- size_t len = 0;
- size_t i;
- struct timespec t;
-
- assert(l);
- assert(dht);
- assert(key);
- assert(list_is_empty(l));
-
- clock_gettime(CLOCK_REALTIME_COARSE, &t);
-
- b = dht_get_bucket(dht, key);
- if (b == NULL)
- return 0;
-
- b->t_refr = t.tv_sec + KAD_T_REFR;
-
- if (b->n_contacts == dht->k || b->parent == NULL) {
- list_for_each(p, &b->contacts) {
- struct contact * c;
- c = list_entry(p, struct contact, next);
- c = contact_create(c->id, dht->b, c->addr);
- if (list_add_sorted(l, c, key) == 1)
- if (++len == dht->k)
- break;
- }
- } else {
- struct bucket * d = b->parent;
- for (i = 0; i < (1L << KAD_BETA) && len < dht->k; ++i) {
- list_for_each(p, &d->children[i]->contacts) {
- struct contact * c;
- c = list_entry(p, struct contact, next);
- c = contact_create(c->id, dht->b, c->addr);
- if (c == NULL)
- continue;
- if (list_add_sorted(l, c, key) == 1)
- if (++len == dht->k)
- break;
- }
- }
- }
-
- assert(len == dht->k || b->parent == NULL);
-
- return len;
-}
-
-static struct lookup * lookup_create(struct dht * dht,
- const uint8_t * id)
-{
- struct lookup * lu;
- pthread_condattr_t cattr;
-
- assert(dht);
- assert(id);
-
- lu = malloc(sizeof(*lu));
- if (lu == NULL)
- goto fail_malloc;
-
- list_head_init(&lu->contacts);
- list_head_init(&lu->cookies);
-
- lu->state = LU_INIT;
- lu->addrs = NULL;
- lu->n_addrs = 0;
- lu->key = dht_dup_key(id, dht->b);
- if (lu->key == NULL)
- goto fail_id;
-
- if (pthread_mutex_init(&lu->lock, NULL))
- goto fail_mutex;
-
- pthread_condattr_init(&cattr);
-#ifndef __APPLE__
- pthread_condattr_setclock(&cattr, PTHREAD_COND_CLOCK);
-#endif
-
- if (pthread_cond_init(&lu->cond, &cattr))
- goto fail_cond;
-
- pthread_condattr_destroy(&cattr);
-
- pthread_rwlock_wrlock(&dht->lock);
-
- list_add(&lu->next, &dht->lookups);
-
- lu->n_contacts = dht_contact_list(dht, &lu->contacts, id);
-
- pthread_rwlock_unlock(&dht->lock);
-
- return lu;
-
- fail_cond:
- pthread_condattr_destroy(&cattr);
- pthread_mutex_destroy(&lu->lock);
- fail_mutex:
- free(lu->key);
- fail_id:
- free(lu);
- fail_malloc:
- return NULL;
-}
-
-static void cancel_lookup_destroy(void * o)
-{
- struct lookup * lu;
- struct list_head * p;
- struct list_head * h;
-
- lu = (struct lookup *) o;
-
- if (lu->key != NULL)
- free(lu->key);
- if (lu->addrs != NULL)
- free(lu->addrs);
-
- list_for_each_safe(p, h, &lu->contacts) {
- struct contact * c = list_entry(p, struct contact, next);
- list_del(&c->next);
- contact_destroy(c);
- }
-
- list_for_each_safe(p, h, &lu->cookies) {
- struct cookie_el * c = list_entry(p, struct cookie_el, next);
- list_del(&c->next);
- free(c);
- }
-
- pthread_mutex_unlock(&lu->lock);
-
- pthread_mutex_destroy(&lu->lock);
-
- free(lu);
-}
-
-static void lookup_destroy(struct lookup * lu)
-{
- assert(lu);
-
- pthread_mutex_lock(&lu->lock);
-
- switch (lu->state) {
- case LU_DESTROY:
- pthread_mutex_unlock(&lu->lock);
- return;
- case LU_PENDING:
- lu->state = LU_DESTROY;
- pthread_cond_broadcast(&lu->cond);
- break;
- case LU_INIT:
- case LU_UPDATE:
- case LU_COMPLETE:
- lu->state = LU_NULL;
- break;
- case LU_NULL:
- default:
- break;
- }
-
- pthread_cleanup_push(cancel_lookup_destroy, lu);
-
- while (lu->state != LU_NULL)
- pthread_cond_wait(&lu->cond, &lu->lock);
-
- pthread_cleanup_pop(true);
-}
-
-static void lookup_update(struct dht * dht,
- struct lookup * lu,
- kad_msg_t * msg)
-{
- struct list_head * p = NULL;
- struct list_head * h;
- struct contact * c = NULL;
- size_t n;
- size_t pos = 0;
- bool mod = false;
-
- assert(lu);
- assert(msg);
-
- if (dht_get_state(dht) != DHT_RUNNING)
- return;
-
- pthread_mutex_lock(&lu->lock);
-
- list_for_each_safe(p, h, &lu->cookies) {
- struct cookie_el * e = list_entry(p, struct cookie_el, next);
- if (e->cookie == msg->cookie) {
- list_del(&e->next);
- free(e);
- break;
- }
- }
-
- if (lu->state == LU_COMPLETE) {
- pthread_mutex_unlock(&lu->lock);
- return;
- }
-
- if (msg->n_addrs > 0) {
- if (lu->addrs == NULL) {
- lu->addrs = malloc(sizeof(*lu->addrs) * msg->n_addrs);
- for (n = 0; n < msg->n_addrs; ++n)
- lu->addrs[n] = msg->addrs[n];
- lu->n_addrs = msg->n_addrs;
- }
-
- lu->state = LU_COMPLETE;
- pthread_cond_broadcast(&lu->cond);
- pthread_mutex_unlock(&lu->lock);
- return;
- }
-
- pthread_cleanup_push((void *)(void *) pthread_mutex_unlock,
- &lu->lock);
-
- while (lu->state == LU_INIT) {
- pthread_rwlock_unlock(&dht->lock);
- pthread_cond_wait(&lu->cond, &lu->lock);
- pthread_rwlock_rdlock(&dht->lock);
- }
-
- pthread_cleanup_pop(false);
-
- for (n = 0; n < msg->n_contacts; ++n) {
- c = contact_create(msg->contacts[n]->id.data,
- dht->b, msg->contacts[n]->addr);
- if (c == NULL)
- continue;
-
- pos = 0;
-
- list_for_each(p, &lu->contacts) {
- struct contact * e;
- e = list_entry(p, struct contact, next);
- if (!memcmp(e->id, c->id, dht->b)) {
- contact_destroy(c);
- c = NULL;
- break;
- }
-
- if (dist(c->id, lu->key) > dist(e->id, lu->key))
- break;
-
- pos++;
- }
-
- if (c == NULL)
- continue;
-
- if (lu->n_contacts < dht->k) {
- list_add_tail(&c->next, p);
- ++lu->n_contacts;
- mod = true;
- } else if (pos == dht->k) {
- contact_destroy(c);
- } else {
- struct contact * d;
- list_add_tail(&c->next, p);
- d = list_last_entry(&lu->contacts,
- struct contact, next);
- list_del(&d->next);
- assert(lu->contacts.prv != &d->next);
- contact_destroy(d);
- mod = true;
- }
- }
-
- if (list_is_empty(&lu->cookies) && !mod)
- lu->state = LU_COMPLETE;
- else
- lu->state = LU_UPDATE;
-
- pthread_cond_broadcast(&lu->cond);
- pthread_mutex_unlock(&lu->lock);
- return;
-}
-
-static ssize_t lookup_get_addrs(struct lookup * lu,
- uint64_t * addrs)
-{
- ssize_t n;
-
- assert(lu);
-
- pthread_mutex_lock(&lu->lock);
-
- for (n = 0; (size_t) n < lu->n_addrs; ++n)
- addrs[n] = lu->addrs[n];
-
- assert((size_t) n == lu->n_addrs);
-
- pthread_mutex_unlock(&lu->lock);
-
- return n;
-}
-
-static ssize_t lookup_contact_addrs(struct lookup * lu,
- uint64_t * addrs)
-{
- struct list_head * p;
- ssize_t n = 0;
-
- assert(lu);
- assert(addrs);
-
- pthread_mutex_lock(&lu->lock);
-
- list_for_each(p, &lu->contacts) {
- struct contact * c = list_entry(p, struct contact, next);
- addrs[n] = c->addr;
- n++;
- }
-
- pthread_mutex_unlock(&lu->lock);
-
- return n;
-}
-
-static void lookup_new_addrs(struct lookup * lu,
- uint64_t * addrs)
-{
- struct list_head * p;
- size_t n = 0;
-
- assert(lu);
- assert(addrs);
-
- pthread_mutex_lock(&lu->lock);
-
- /* Uses fails to check if the contact has been contacted. */
- list_for_each(p, &lu->contacts) {
- struct contact * c = list_entry(p, struct contact, next);
- if (c->fails == 0) {
- c->fails = 1;
- addrs[n] = c->addr;
- n++;
- }
-
- if (n == KAD_ALPHA)
- break;
- }
-
- assert(n <= KAD_ALPHA);
-
- addrs[n] = 0;
-
- pthread_mutex_unlock(&lu->lock);
-}
-
-static void lookup_set_state(struct lookup * lu,
- enum lookup_state state)
-{
- pthread_mutex_lock(&lu->lock);
-
- lu->state = state;
- pthread_cond_broadcast(&lu->cond);
-
- pthread_mutex_unlock(&lu->lock);
-}
-
-static void cleanup_wait(void * o)
-{
- struct lookup * lu = (struct lookup *) o;
- lu->state = LU_NULL;
- pthread_mutex_unlock(&lu->lock);
- lookup_destroy(lu);
-}
-
-static enum lookup_state lookup_wait(struct lookup * lu)
-{
- struct timespec timeo = {KAD_T_RESP, 0};
- struct timespec abs;
- enum lookup_state state;
- int ret = 0;
-
- clock_gettime(PTHREAD_COND_CLOCK, &abs);
-
- ts_add(&abs, &timeo, &abs);
-
- pthread_mutex_lock(&lu->lock);
-
- if (lu->state == LU_INIT || lu->state == LU_UPDATE)
- lu->state = LU_PENDING;
-
- pthread_cleanup_push(cleanup_wait, lu);
-
- while (lu->state == LU_PENDING && ret != -ETIMEDOUT)
- ret = -pthread_cond_timedwait(&lu->cond, &lu->lock, &abs);
-
- pthread_cleanup_pop(false);
-
- if (ret == -ETIMEDOUT)
- lu->state = LU_COMPLETE;
-
- state = lu->state;
-
- pthread_mutex_unlock(&lu->lock);
-
- return state;
-}
-
-static struct kad_req * dht_find_request(struct dht * dht,
- kad_msg_t * msg)
-{
- struct list_head * p;
-
- assert(dht);
- assert(msg);
-
- list_for_each(p, &dht->requests) {
- struct kad_req * r = list_entry(p, struct kad_req, next);
- if (r->cookie == msg->cookie)
- return r;
- }
-
- return NULL;
-}
-
-static struct lookup * dht_find_lookup(struct dht * dht,
- uint32_t cookie)
-{
- struct list_head * p;
- struct list_head * p2;
- struct list_head * h2;
-
- assert(dht);
- assert(cookie > 0);
-
- list_for_each(p, &dht->lookups) {
- struct lookup * l = list_entry(p, struct lookup, next);
- pthread_mutex_lock(&l->lock);
- list_for_each_safe(p2, h2, &l->cookies) {
- struct cookie_el * e;
- e = list_entry(p2, struct cookie_el, next);
- if (e->cookie == cookie) {
- list_del(&e->next);
- free(e);
- pthread_mutex_unlock(&l->lock);
- return l;
- }
- }
- pthread_mutex_unlock(&l->lock);
- }
-
- return NULL;
-}
-
-static struct val * val_create(uint64_t addr,
- time_t exp)
-{
- struct val * v;
- struct timespec t;
-
- v = malloc(sizeof(*v));
- if (v == NULL)
- return NULL;
-
- list_head_init(&v->next);
- v->addr = addr;
-
- clock_gettime(CLOCK_REALTIME_COARSE, &t);
-
- v->t_exp = t.tv_sec + exp;
- v->t_rep = t.tv_sec + KAD_T_REPL;
-
- return v;
-}
-
-static void val_destroy(struct val * v)
-{
- assert(v);
-
- free(v);
-}
-
-static struct ref_entry * ref_entry_create(struct dht * dht,
- const uint8_t * key)
-{
- struct ref_entry * e;
- struct timespec t;
-
- assert(dht);
- assert(key);
-
- e = malloc(sizeof(*e));
- if (e == NULL)
- return NULL;
-
- e->key = dht_dup_key(key, dht->b);
- if (e->key == NULL) {
- free(e);
- return NULL;
- }
-
- clock_gettime(CLOCK_REALTIME_COARSE, &t);
-
- e->t_rep = t.tv_sec + dht->t_repub;
-
- return e;
-}
-
-static void ref_entry_destroy(struct ref_entry * e)
-{
- free(e->key);
- free(e);
-}
-
-static struct dht_entry * dht_entry_create(struct dht * dht,
- const uint8_t * key)
-{
- struct dht_entry * e;
-
- assert(dht);
- assert(key);
-
- e = malloc(sizeof(*e));
- if (e == NULL)
- return NULL;
-
- list_head_init(&e->next);
- list_head_init(&e->vals);
-
- e->n_vals = 0;
-
- e->key = dht_dup_key(key, dht->b);
- if (e->key == NULL) {
- free(e);
- return NULL;
- }
-
- return e;
-}
-
-static void dht_entry_destroy(struct dht_entry * e)
-{
- struct list_head * p;
- struct list_head * h;
-
- assert(e);
-
- list_for_each_safe(p, h, &e->vals) {
- struct val * v = list_entry(p, struct val, next);
- list_del(&v->next);
- val_destroy(v);
- }
-
- free(e->key);
-
- free(e);
-}
-
-static int dht_entry_add_addr(struct dht_entry * e,
- uint64_t addr,
- time_t exp)
-{
- struct list_head * p;
- struct val * val;
- struct timespec t;
-
- clock_gettime(CLOCK_REALTIME_COARSE, &t);
-
- list_for_each(p, &e->vals) {
- struct val * v = list_entry(p, struct val, next);
- if (v->addr == addr) {
- if (v->t_exp < t.tv_sec + exp) {
- v->t_exp = t.tv_sec + exp;
- v->t_rep = t.tv_sec + KAD_T_REPL;
- }
-
- return 0;
- }
- }
-
- val = val_create(addr, exp);
- if (val == NULL)
- return -ENOMEM;
-
- list_add(&val->next, &e->vals);
- ++e->n_vals;
-
- return 0;
-}
-
-
-static void dht_entry_del_addr(struct dht_entry * e,
- uint64_t addr)
-{
- struct list_head * p;
- struct list_head * h;
-
- assert(e);
-
- list_for_each_safe(p, h, &e->vals) {
- struct val * v = list_entry(p, struct val, next);
- if (v->addr == addr) {
- list_del(&v->next);
- val_destroy(v);
- --e->n_vals;
- }
- }
-
- if (e->n_vals == 0) {
- list_del(&e->next);
- dht_entry_destroy(e);
- }
-}
-
-static uint64_t dht_entry_get_addr(struct dht * dht,
- struct dht_entry * e)
-{
- struct list_head * p;
-
- assert(e);
- assert(!list_is_empty(&e->vals));
-
- list_for_each(p, &e->vals) {
- struct val * v = list_entry(p, struct val, next);
- if (v->addr != dht->addr)
- return v->addr;
- }
-
- return 0;
-}
-
-/* Forward declaration. */
-static struct lookup * kad_lookup(struct dht * dht,
- const uint8_t * key,
- enum kad_code code);
-
-
-/* Build a refresh list. */
-static void bucket_refresh(struct dht * dht,
- struct bucket * b,
- time_t t,
- struct list_head * r)
-{
- size_t i;
-
- if (*b->children != NULL)
- for (i = 0; i < (1L << KAD_BETA); ++i)
- bucket_refresh(dht, b->children[i], t, r);
-
- if (b->n_contacts == 0)
- return;
-
- if (t > b->t_refr) {
- struct contact * c;
- struct contact * d;
- c = list_first_entry(&b->contacts, struct contact, next);
- d = contact_create(c->id, dht->b, c->addr);
- if (c != NULL)
- list_add(&d->next, r);
- return;
- }
-}
-
-
-static struct bucket * bucket_create(void)
-{
- struct bucket * b;
- struct timespec t;
- size_t i;
-
- b = malloc(sizeof(*b));
- if (b == NULL)
- return NULL;
-
- list_head_init(&b->contacts);
- b->n_contacts = 0;
-
- list_head_init(&b->alts);
- b->n_alts = 0;
-
- clock_gettime(CLOCK_REALTIME_COARSE, &t);
- b->t_refr = t.tv_sec + KAD_T_REFR;
-
- for (i = 0; i < (1L << KAD_BETA); ++i)
- b->children[i] = NULL;
-
- b->parent = NULL;
- b->depth = 0;
-
- return b;
-}
-
-static void bucket_destroy(struct bucket * b)
-{
- struct list_head * p;
- struct list_head * h;
- size_t i;
-
- assert(b);
-
- for (i = 0; i < (1L << KAD_BETA); ++i)
- if (b->children[i] != NULL)
- bucket_destroy(b->children[i]);
-
- list_for_each_safe(p, h, &b->contacts) {
- struct contact * c = list_entry(p, struct contact, next);
- list_del(&c->next);
- contact_destroy(c);
- --b->n_contacts;
- }
-
- list_for_each_safe(p, h, &b->alts) {
- struct contact * c = list_entry(p, struct contact, next);
- list_del(&c->next);
- contact_destroy(c);
- --b->n_contacts;
- }
-
- free(b);
-}
-
-static bool bucket_has_id(struct bucket * b,
- const uint8_t * id)
-{
- uint8_t mask;
- uint8_t byte;
-
- if (b->depth == 0)
- return true;
-
- byte = id[(b->depth * KAD_BETA) / CHAR_BIT];
-
- mask = ((1L << KAD_BETA) - 1) & 0xFF;
-
- byte >>= (CHAR_BIT - KAD_BETA) -
- (((b->depth - 1) * KAD_BETA) & (CHAR_BIT - 1));
-
- return ((byte & mask) == b->mask);
-}
-
-static int split_bucket(struct bucket * b)
-{
- struct list_head * p;
- struct list_head * h;
- uint8_t mask = 0;
- size_t i;
- size_t c;
-
- assert(b);
- assert(b->n_alts == 0);
- assert(b->n_contacts);
- assert(b->children[0] == NULL);
-
- c = b->n_contacts;
-
- for (i = 0; i < (1L << KAD_BETA); ++i) {
- b->children[i] = bucket_create();
- if (b->children[i] == NULL) {
- size_t j;
- for (j = 0; j < i; ++j)
- bucket_destroy(b->children[j]);
- return -1;
- }
-
- b->children[i]->depth = b->depth + 1;
- b->children[i]->mask = mask;
- b->children[i]->parent = b;
-
- list_for_each_safe(p, h, &b->contacts) {
- struct contact * c;
- c = list_entry(p, struct contact, next);
- if (bucket_has_id(b->children[i], c->id)) {
- list_del(&c->next);
- --b->n_contacts;
- list_add(&c->next, &b->children[i]->contacts);
- ++b->children[i]->n_contacts;
- }
- }
-
- mask++;
- }
-
- for (i = 0; i < (1L << KAD_BETA); ++i)
- if (b->children[i]->n_contacts == c)
- split_bucket(b->children[i]);
-
- return 0;
-}
-
-/* Locked externally to mandate update as (final) part of join transaction. */
-static int dht_update_bucket(struct dht * dht,
- const uint8_t * id,
- uint64_t addr)
-{
- struct list_head * p;
- struct list_head * h;
- struct bucket * b;
- struct contact * c;
-
- assert(dht);
-
- b = dht_get_bucket(dht, id);
- if (b == NULL)
- return -1;
-
- c = contact_create(id, dht->b, addr);
- if (c == NULL)
- return -1;
-
- list_for_each_safe(p, h, &b->contacts) {
- struct contact * d = list_entry(p, struct contact, next);
- if (d->addr == addr) {
- list_del(&d->next);
- contact_destroy(d);
- --b->n_contacts;
- }
- }
-
- if (b->n_contacts == dht->k) {
- if (bucket_has_id(b, dht->id)) {
- list_add_tail(&c->next, &b->contacts);
- ++b->n_contacts;
- if (split_bucket(b)) {
- list_del(&c->next);
- contact_destroy(c);
- --b->n_contacts;
- }
- } else if (b->n_alts == dht->k) {
- struct contact * d;
- d = list_first_entry(&b->alts, struct contact, next);
- list_del(&d->next);
- contact_destroy(d);
- list_add_tail(&c->next, &b->alts);
- } else {
- list_add_tail(&c->next, &b->alts);
- ++b->n_alts;
- }
- } else {
- list_add_tail(&c->next, &b->contacts);
- ++b->n_contacts;
- }
-
- return 0;
-}
-
-static int send_msg(struct dht * dht,
- kad_msg_t * msg,
- uint64_t addr)
-{
-#ifndef __DHT_TEST__
- struct shm_du_buff * sdb;
- size_t len;
-#endif
- int retr = 0;
-
- if (msg->code == KAD_RESPONSE)
- retr = KAD_RESP_RETR;
-
- pthread_rwlock_wrlock(&dht->lock);
-
- if (dht->id != NULL) {
- msg->has_s_id = true;
- msg->s_id.data = dht->id;
- msg->s_id.len = dht->b;
- }
-
- msg->s_addr = dht->addr;
-
- if (msg->code < KAD_STORE) {
- msg->cookie = bmp_allocate(dht->cookies);
- if (!bmp_is_id_valid(dht->cookies, msg->cookie)) {
- pthread_rwlock_unlock(&dht->lock);
- goto fail_bmp_alloc;
- }
- }
-
- pthread_rwlock_unlock(&dht->lock);
-
-#ifndef __DHT_TEST__
- len = kad_msg__get_packed_size(msg);
- if (len == 0)
- goto fail_msg;
-
- while (true) {
- if (ipcp_sdb_reserve(&sdb, len))
- goto fail_msg;
-
- kad_msg__pack(msg, shm_du_buff_head(sdb));
-
- if (dt_write_packet(addr, QOS_CUBE_BE, dht->fd, sdb) == 0)
- break;
-
- ipcp_sdb_release(sdb);
-
- sleep(1);
-
- if (--retr < 0)
- goto fail_msg;
- }
-
-#else
- (void) addr;
- (void) retr;
-#endif /* __DHT_TEST__ */
-
- if (msg->code < KAD_STORE && dht_get_state(dht) != DHT_SHUTDOWN)
- kad_req_create(dht, msg, addr);
-
- return msg->cookie;
-#ifndef __DHT_TEST__
- fail_msg:
- pthread_rwlock_wrlock(&dht->lock);
- bmp_release(dht->cookies, msg->cookie);
- pthread_rwlock_unlock(&dht->lock);
-#endif /* !__DHT_TEST__ */
- fail_bmp_alloc:
- return -1;
-}
-
-static struct dht_entry * dht_find_entry(struct dht * dht,
- const uint8_t * key)
-{
- struct list_head * p;
-
- list_for_each(p, &dht->entries) {
- struct dht_entry * e = list_entry(p, struct dht_entry, next);
- if (!memcmp(key, e->key, dht->b))
- return e;
- }
-
- return NULL;
-}
-
-static int kad_add(struct dht * dht,
- const kad_contact_msg_t * contacts,
- ssize_t n,
- time_t exp)
-{
- struct dht_entry * e;
-
- pthread_rwlock_wrlock(&dht->lock);
-
- while (n-- > 0) {
- if (contacts[n].id.len != dht->b)
- log_warn("Bad key length in contact data.");
-
- e = dht_find_entry(dht, contacts[n].id.data);
- if (e != NULL) {
- if (dht_entry_add_addr(e, contacts[n].addr, exp))
- goto fail;
- } else {
- e = dht_entry_create(dht, contacts[n].id.data);
- if (e == NULL)
- goto fail;
-
- if (dht_entry_add_addr(e, contacts[n].addr, exp)) {
- dht_entry_destroy(e);
- goto fail;
- }
-
- list_add(&e->next, &dht->entries);
- }
- }
-
- pthread_rwlock_unlock(&dht->lock);
- return 0;
-
- fail:
- pthread_rwlock_unlock(&dht->lock);
- return -ENOMEM;
-}
-
-static int wait_resp(struct dht * dht,
- kad_msg_t * msg,
- time_t timeo)
-{
- struct kad_req * req;
-
- assert(dht);
- assert(msg);
-
- pthread_rwlock_rdlock(&dht->lock);
-
- req = dht_find_request(dht, msg);
- if (req == NULL) {
- pthread_rwlock_unlock(&dht->lock);
- return -EPERM;
- }
-
- pthread_rwlock_unlock(&dht->lock);
-
- return kad_req_wait(req, timeo);
-}
-
-static int kad_store(struct dht * dht,
- const uint8_t * key,
- uint64_t addr,
- uint64_t r_addr,
- time_t ttl)
-{
- kad_msg_t msg = KAD_MSG__INIT;
- kad_contact_msg_t cmsg = KAD_CONTACT_MSG__INIT;
- kad_contact_msg_t * cmsgp[1];
-
- cmsg.id.data = (uint8_t *) key;
- cmsg.addr = addr;
-
- pthread_rwlock_rdlock(&dht->lock);
-
- cmsg.id.len = dht->b;
-
- pthread_rwlock_unlock(&dht->lock);
-
- cmsgp[0] = &cmsg;
-
- msg.code = KAD_STORE;
- msg.has_t_expire = true;
- msg.t_expire = ttl;
- msg.n_contacts = 1;
- msg.contacts = cmsgp;
-
- if (send_msg(dht, &msg, r_addr) < 0)
- return -1;
-
- return 0;
-}
-
-static ssize_t kad_find(struct dht * dht,
- struct lookup * lu,
- const uint64_t * addrs,
- enum kad_code code)
-{
- kad_msg_t msg = KAD_MSG__INIT;
- ssize_t sent = 0;
-
- assert(dht);
- assert(lu->key);
-
- msg.code = code;
-
- msg.has_key = true;
- msg.key.data = (uint8_t *) lu->key;
- msg.key.len = dht->b;
-
- while (*addrs != 0) {
- struct cookie_el * c;
- int ret;
-
- if (*addrs == dht->addr) {
- ++addrs;
- continue;
- }
-
- ret = send_msg(dht, &msg, *addrs);
- if (ret < 0)
- break;
-
- c = malloc(sizeof(*c));
- if (c == NULL)
- break;
-
- c->cookie = (uint32_t) ret;
-
- pthread_mutex_lock(&lu->lock);
-
- list_add_tail(&c->next, &lu->cookies);
-
- pthread_mutex_unlock(&lu->lock);
-
- ++sent;
- ++addrs;
- }
-
- return sent;
-}
-
-static void lookup_detach(struct dht * dht,
- struct lookup * lu)
-{
- pthread_rwlock_wrlock(&dht->lock);
-
- list_del(&lu->next);
-
- pthread_rwlock_unlock(&dht->lock);
-}
-
-static struct lookup * kad_lookup(struct dht * dht,
- const uint8_t * id,
- enum kad_code code)
-{
- uint64_t addrs[KAD_ALPHA + 1];
- enum lookup_state state;
- struct lookup * lu;
-
- lu = lookup_create(dht, id);
- if (lu == NULL)
- return NULL;
-
- lookup_new_addrs(lu, addrs);
-
- if (addrs[0] == 0) {
- lookup_detach(dht, lu);
- lookup_destroy(lu);
- return NULL;
- }
-
- if (kad_find(dht, lu, addrs, code) == 0) {
- lookup_detach(dht, lu);
- return lu;
- }
-
- while ((state = lookup_wait(lu)) != LU_COMPLETE) {
- switch (state) {
- case LU_UPDATE:
- lookup_new_addrs(lu, addrs);
- if (addrs[0] == 0)
- break;
-
- kad_find(dht, lu, addrs, code);
- break;
- case LU_DESTROY:
- lookup_detach(dht, lu);
- lookup_set_state(lu, LU_NULL);
- return NULL;
- default:
- break;
- }
- }
-
- assert(state = LU_COMPLETE);
-
- lookup_detach(dht, lu);
-
- return lu;
-}
-
-static void kad_publish(struct dht * dht,
- const uint8_t * key,
- uint64_t addr,
- time_t exp)
-{
- struct lookup * lu;
- uint64_t * addrs;
- ssize_t n;
- size_t k;
- time_t t_expire;
-
-
- assert(dht);
- assert(key);
-
- pthread_rwlock_rdlock(&dht->lock);
-
- k = dht->k;
- t_expire = dht->t_expire;
-
- pthread_rwlock_unlock(&dht->lock);
-
- addrs = malloc(k * sizeof(*addrs));
- if (addrs == NULL)
- return;
-
- lu = kad_lookup(dht, key, KAD_FIND_NODE);
- if (lu == NULL) {
- free(addrs);
- return;
- }
-
- n = lookup_contact_addrs(lu, addrs);
-
- while (n-- > 0) {
- if (addrs[n] == dht->addr) {
- kad_contact_msg_t msg = KAD_CONTACT_MSG__INIT;
- msg.id.data = (uint8_t *) key;
- msg.id.len = dht->b;
- msg.addr = addr;
- kad_add(dht, &msg, 1, exp);
- } else {
- if (kad_store(dht, key, addr, addrs[n], t_expire))
- log_warn("Failed to send store message.");
- }
- }
-
- lookup_destroy(lu);
-
- free(addrs);
-}
-
-static int kad_join(struct dht * dht,
- uint64_t addr)
-{
- kad_msg_t msg = KAD_MSG__INIT;
-
- msg.code = KAD_JOIN;
-
- msg.has_alpha = true;
- msg.has_b = true;
- msg.has_k = true;
- msg.has_t_refresh = true;
- msg.has_t_replicate = true;
- msg.alpha = KAD_ALPHA;
- msg.k = KAD_K;
- msg.t_refresh = KAD_T_REFR;
- msg.t_replicate = KAD_T_REPL;
-
- pthread_rwlock_rdlock(&dht->lock);
-
- msg.b = dht->b;
-
- pthread_rwlock_unlock(&dht->lock);
-
- if (send_msg(dht, &msg, addr) < 0)
- return -1;
-
- if (wait_resp(dht, &msg, KAD_T_JOIN) < 0)
- return -1;
-
- dht->id = create_id(dht->b);
- if (dht->id == NULL)
- return -1;
-
- pthread_rwlock_wrlock(&dht->lock);
-
- dht_update_bucket(dht, dht->id, dht->addr);
-
- pthread_rwlock_unlock(&dht->lock);
-
- return 0;
-}
-
-static void dht_dead_peer(struct dht * dht,
- uint8_t * key,
- uint64_t addr)
-{
- struct list_head * p;
- struct list_head * h;
- struct bucket * b;
-
- b = dht_get_bucket(dht, key);
-
- list_for_each_safe(p, h, &b->contacts) {
- struct contact * c = list_entry(p, struct contact, next);
- if (b->n_contacts + b->n_alts <= dht->k) {
- ++c->fails;
- return;
- }
-
- if (c->addr == addr) {
- list_del(&c->next);
- contact_destroy(c);
- --b->n_contacts;
- break;
- }
- }
-
- while (b->n_contacts < dht->k && b->n_alts > 0) {
- struct contact * c;
- c = list_first_entry(&b->alts, struct contact, next);
- list_del(&c->next);
- --b->n_alts;
- list_add(&c->next, &b->contacts);
- ++b->n_contacts;
- }
-}
-
-static int dht_del(struct dht * dht,
- const uint8_t * key,
- uint64_t addr)
-{
- struct dht_entry * e;
-
- pthread_rwlock_wrlock(&dht->lock);
-
- e = dht_find_entry(dht, key);
- if (e == NULL) {
- pthread_rwlock_unlock(&dht->lock);
- return -EPERM;
- }
-
- dht_entry_del_addr(e, addr);
-
- pthread_rwlock_unlock(&dht->lock);
-
- return 0;
-}
-
-static buffer_t dht_retrieve(struct dht * dht,
- const uint8_t * key)
-{
- struct dht_entry * e;
- struct list_head * p;
- buffer_t buf;
- uint64_t * pos;
- size_t addrs = 0;
-
- pthread_rwlock_rdlock(&dht->lock);
-
- e = dht_find_entry(dht, key);
- if (e == NULL)
- goto fail;
-
- buf.len = MIN(DHT_RETR_ADDR, e->n_vals);
- if (buf.len == 0)
- goto fail;
-
- pos = malloc(sizeof(dht->addr) * buf.len);
- if (pos == NULL)
- goto fail;
-
- buf.data = (uint8_t *) pos;
-
- list_for_each(p, &e->vals) {
- struct val * v = list_entry(p, struct val, next);
- *pos++ = v->addr;
- if (++addrs >= buf.len)
- break;
- }
-
- pthread_rwlock_unlock(&dht->lock);
-
- return buf;
-
- fail:
- pthread_rwlock_unlock(&dht->lock);
- buf.len = 0;
-
- return buf;
-}
-
-static ssize_t dht_get_contacts(struct dht * dht,
- const uint8_t * key,
- kad_contact_msg_t *** msgs)
-{
- struct list_head l;
- struct list_head * p;
- struct list_head * h;
- size_t len;
- size_t i = 0;
-
- list_head_init(&l);
-
- pthread_rwlock_wrlock(&dht->lock);
-
- len = dht_contact_list(dht, &l, key);
- if (len == 0) {
- pthread_rwlock_unlock(&dht->lock);
- return 0;
- }
-
- *msgs = malloc(len * sizeof(**msgs));
- if (*msgs == NULL) {
- pthread_rwlock_unlock(&dht->lock);
- return 0;
- }
-
- list_for_each_safe(p, h, &l) {
- struct contact * c = list_entry(p, struct contact, next);
- (*msgs)[i] = malloc(sizeof(***msgs));
- if ((*msgs)[i] == NULL) {
- pthread_rwlock_unlock(&dht->lock);
- while (i > 0)
- free(*msgs[--i]);
- free(*msgs);
- return 0;
- }
-
- kad_contact_msg__init((*msgs)[i]);
-
- (*msgs)[i]->id.data = c->id;
- (*msgs)[i]->id.len = dht->b;
- (*msgs)[i++]->addr = c->addr;
- list_del(&c->next);
- free(c);
- }
-
- pthread_rwlock_unlock(&dht->lock);
-
- return i;
-}
-
-static time_t gcd(time_t a,
- time_t b)
-{
- if (a == 0)
- return b;
-
- return gcd(b % a, a);
-}
-
-static void * work(void * o)
-{
- struct dht * dht;
- struct timespec now;
- struct list_head * p;
- struct list_head * h;
- struct list_head reflist;
- time_t intv;
- struct lookup * lu;
-
- dht = (struct dht *) o;
-
- pthread_rwlock_rdlock(&dht->lock);
-
- intv = gcd(dht->t_expire, dht->t_repub);
- intv = gcd(intv, gcd(KAD_T_REPL, KAD_T_REFR)) / 2;
-
- pthread_rwlock_unlock(&dht->lock);
-
- list_head_init(&reflist);
-
- while (true) {
- clock_gettime(CLOCK_REALTIME_COARSE, &now);
-
- pthread_rwlock_wrlock(&dht->lock);
-
- /* Republish registered hashes. */
- list_for_each(p, &dht->refs) {
- struct ref_entry * e;
- uint8_t * key;
- uint64_t addr;
- time_t t_expire;
- e = list_entry(p, struct ref_entry, next);
- if (now.tv_sec > e->t_rep) {
- key = dht_dup_key(e->key, dht->b);
- if (key == NULL)
- continue;
- addr = dht->addr;
- t_expire = dht->t_expire;
- e->t_rep = now.tv_sec + dht->t_repub;
-
- pthread_rwlock_unlock(&dht->lock);
- kad_publish(dht, key, addr, t_expire);
- pthread_rwlock_wrlock(&dht->lock);
- free(key);
- }
- }
-
- /* Remove stale entries and republish if necessary. */
- list_for_each_safe(p, h, &dht->entries) {
- struct list_head * p1;
- struct list_head * h1;
- struct dht_entry * e;
- uint8_t * key;
- time_t t_expire;
- e = list_entry (p, struct dht_entry, next);
- list_for_each_safe(p1, h1, &e->vals) {
- struct val * v;
- uint64_t addr;
- v = list_entry(p1, struct val, next);
- if (now.tv_sec > v->t_exp) {
- list_del(&v->next);
- val_destroy(v);
- continue;
- }
-
- if (now.tv_sec > v->t_rep) {
- key = dht_dup_key(e->key, dht->b);
- addr = v->addr;
- t_expire = dht->t_expire = now.tv_sec;
- v->t_rep = now.tv_sec + dht->t_replic;
- pthread_rwlock_unlock(&dht->lock);
- kad_publish(dht, key, addr, t_expire);
- pthread_rwlock_wrlock(&dht->lock);
- free(key);
- }
- }
- }
-
- /* Check the requests list for unresponsive nodes. */
- list_for_each_safe(p, h, &dht->requests) {
- struct kad_req * r;
- r = list_entry(p, struct kad_req, next);
- if (now.tv_sec > r->t_exp) {
- list_del(&r->next);
- bmp_release(dht->cookies, r->cookie);
- dht_dead_peer(dht, r->key, r->addr);
- kad_req_destroy(r);
- }
- }
-
- /* Refresh unaccessed buckets. */
- bucket_refresh(dht, dht->buckets, now.tv_sec, &reflist);
-
- pthread_rwlock_unlock(&dht->lock);
-
- list_for_each_safe(p, h, &reflist) {
- struct contact * c;
- c = list_entry(p, struct contact, next);
- lu = kad_lookup(dht, c->id, KAD_FIND_NODE);
- if (lu != NULL)
- lookup_destroy(lu);
- list_del(&c->next);
- contact_destroy(c);
- }
-
- sleep(intv);
- }
-
- return (void *) 0;
-}
-
-static int kad_handle_join_resp(struct dht * dht,
- struct kad_req * req,
- kad_msg_t * msg)
-{
- assert(dht);
- assert(req);
- assert(msg);
-
- /* We might send version numbers later to warn of updates if needed. */
- if (!(msg->has_alpha && msg->has_b && msg->has_k && msg->has_t_expire &&
- msg->has_t_refresh && msg->has_t_replicate)) {
- log_warn("Join refused by remote.");
- return -1;
- }
-
- if (msg->b < sizeof(uint64_t)) {
- log_err("Hash sizes less than 8 bytes unsupported.");
- return -1;
- }
-
- pthread_rwlock_wrlock(&dht->lock);
-
- dht->buckets = bucket_create();
- if (dht->buckets == NULL) {
- pthread_rwlock_unlock(&dht->lock);
- return -1;
- }
-
- /* Likely corrupt packet. The member will refuse, we might here too. */
- if (msg->alpha != KAD_ALPHA || msg->k != KAD_K)
- log_warn("Different kademlia parameters detected.");
-
- if (msg->t_replicate != KAD_T_REPL)
- log_warn("Different kademlia replication time detected.");
-
- if (msg->t_refresh != KAD_T_REFR)
- log_warn("Different kademlia refresh time detected.");
-
- dht->k = msg->k;
- dht->b = msg->b;
- dht->t_expire = msg->t_expire;
- dht->t_repub = MAX(1, dht->t_expire - 10);
-
- if (pthread_create(&dht->worker, NULL, work, dht)) {
- bucket_destroy(dht->buckets);
- pthread_rwlock_unlock(&dht->lock);
- return -1;
- }
-
- kad_req_respond(req);
-
- dht_update_bucket(dht, msg->s_id.data, msg->s_addr);
-
- pthread_rwlock_unlock(&dht->lock);
-
- log_dbg("Enrollment of DHT completed.");
-
- return 0;
-}
-
-static int kad_handle_find_resp(struct dht * dht,
- struct kad_req * req,
- kad_msg_t * msg)
-{
- struct lookup * lu;
-
- assert(dht);
- assert(req);
- assert(msg);
-
- pthread_rwlock_rdlock(&dht->lock);
-
- lu = dht_find_lookup(dht, req->cookie);
- if (lu == NULL) {
- pthread_rwlock_unlock(&dht->lock);
- return -1;
- }
-
- lookup_update(dht, lu, msg);
-
- pthread_rwlock_unlock(&dht->lock);
-
- return 0;
-}
-
-static void kad_handle_response(struct dht * dht,
- kad_msg_t * msg)
-{
- struct kad_req * req;
-
- assert(dht);
- assert(msg);
-
- pthread_rwlock_wrlock(&dht->lock);
-
- req = dht_find_request(dht, msg);
- if (req == NULL) {
- pthread_rwlock_unlock(&dht->lock);
- return;
- }
-
- bmp_release(dht->cookies, req->cookie);
- list_del(&req->next);
-
- pthread_rwlock_unlock(&dht->lock);
-
- switch(req->code) {
- case KAD_JOIN:
- if (kad_handle_join_resp(dht, req, msg))
- log_err("Enrollment of DHT failed.");
- break;
- case KAD_FIND_VALUE:
- case KAD_FIND_NODE:
- if (dht_get_state(dht) != DHT_RUNNING)
- break;
- kad_handle_find_resp(dht, req, msg);
- break;
- default:
- break;
- }
-
- kad_req_destroy(req);
-}
-
-int dht_bootstrap(struct dht * dht,
- size_t b,
- time_t t_expire)
-{
- assert(dht);
-
- pthread_rwlock_wrlock(&dht->lock);
-
- dht->id = create_id(b);
- if (dht->id == NULL)
- goto fail_id;
-
- dht->buckets = bucket_create();
- if (dht->buckets == NULL)
- goto fail_buckets;
-
- dht->buckets->depth = 0;
- dht->buckets->mask = 0;
-
- dht->b = b / CHAR_BIT;
- dht->t_expire = MAX(2, t_expire);
- dht->t_repub = MAX(1, t_expire - 10);
- dht->k = KAD_K;
-
- if (pthread_create(&dht->worker, NULL, work, dht))
- goto fail_pthread_create;
-
- dht->state = DHT_RUNNING;
-
- dht_update_bucket(dht, dht->id, dht->addr);
-
- pthread_rwlock_unlock(&dht->lock);
-
- return 0;
-
- fail_pthread_create:
- bucket_destroy(dht->buckets);
- dht->buckets = NULL;
- fail_buckets:
- free(dht->id);
- dht->id = NULL;
- fail_id:
- pthread_rwlock_unlock(&dht->lock);
- return -1;
-}
-
-static struct ref_entry * ref_entry_get(struct dht * dht,
- const uint8_t * key)
-{
- struct list_head * p;
-
- list_for_each(p, &dht->refs) {
- struct ref_entry * r = list_entry(p, struct ref_entry, next);
- if (!memcmp(key, r->key, dht-> b) )
- return r;
- }
-
- return NULL;
-}
-
-int dht_reg(struct dht * dht,
- const uint8_t * key)
-{
- struct ref_entry * e;
- uint64_t addr;
- time_t t_expire;
-
- assert(dht);
- assert(key);
- assert(dht->addr != 0);
-
- if (dht_wait_running(dht))
- return -1;
-
- pthread_rwlock_wrlock(&dht->lock);
-
- if (ref_entry_get(dht, key) != NULL) {
- log_dbg("Name already registered.");
- pthread_rwlock_unlock(&dht->lock);
- return 0;
- }
-
- e = ref_entry_create(dht, key);
- if (e == NULL) {
- pthread_rwlock_unlock(&dht->lock);
- return -ENOMEM;
- }
-
- list_add(&e->next, &dht->refs);
-
- t_expire = dht->t_expire;
- addr = dht->addr;
-
- pthread_rwlock_unlock(&dht->lock);
-
- kad_publish(dht, key, addr, t_expire);
-
- return 0;
-}
-
-int dht_unreg(struct dht * dht,
- const uint8_t * key)
-{
- struct list_head * p;
- struct list_head * h;
-
- assert(dht);
- assert(key);
-
- if (dht_get_state(dht) != DHT_RUNNING)
- return -1;
-
- pthread_rwlock_wrlock(&dht->lock);
-
- list_for_each_safe(p, h, &dht->refs) {
- struct ref_entry * r = list_entry(p, struct ref_entry, next);
- if (!memcmp(key, r->key, dht-> b) ) {
- list_del(&r->next);
- ref_entry_destroy(r);
- }
- }
-
- dht_del(dht, key, dht->addr);
-
- pthread_rwlock_unlock(&dht->lock);
-
- return 0;
-}
-
-uint64_t dht_query(struct dht * dht,
- const uint8_t * key)
-{
- struct dht_entry * e;
- struct lookup * lu;
- uint64_t addrs[KAD_K];
- size_t n;
-
- addrs[0] = 0;
-
- if (dht_wait_running(dht))
- return 0;
-
- pthread_rwlock_rdlock(&dht->lock);
-
- e = dht_find_entry(dht, key);
- if (e != NULL)
- addrs[0] = dht_entry_get_addr(dht, e);
-
- pthread_rwlock_unlock(&dht->lock);
-
- if (addrs[0] != 0)
- return addrs[0];
-
- lu = kad_lookup(dht, key, KAD_FIND_VALUE);
- if (lu == NULL)
- return 0;
-
- n = lookup_get_addrs(lu, addrs);
- if (n == 0) {
- lookup_destroy(lu);
- return 0;
- }
-
- lookup_destroy(lu);
-
- /* Current behaviour is anycast and return the first peer address. */
- if (addrs[0] != dht->addr)
- return addrs[0];
-
- if (n > 1)
- return addrs[1];
-
- return 0;
-}
-
-static void * dht_handle_packet(void * o)
-{
- struct dht * dht = (struct dht *) o;
-
- assert(dht);
-
- while (true) {
- kad_msg_t * msg;
- kad_contact_msg_t ** cmsgs;
- kad_msg_t resp_msg = KAD_MSG__INIT;
- uint64_t addr;
- buffer_t buf;
- size_t i;
- size_t b;
- size_t t_expire;
- struct cmd * cmd;
-
- pthread_mutex_lock(&dht->mtx);
-
- pthread_cleanup_push((void *)(void *) pthread_mutex_unlock,
- &dht->mtx);
-
- while (list_is_empty(&dht->cmds))
- pthread_cond_wait(&dht->cond, &dht->mtx);
-
- cmd = list_last_entry(&dht->cmds, struct cmd, next);
- list_del(&cmd->next);
-
- pthread_cleanup_pop(true);
-
- i = shm_du_buff_tail(cmd->sdb) - shm_du_buff_head(cmd->sdb);
-
- msg = kad_msg__unpack(NULL, i, shm_du_buff_head(cmd->sdb));
-#ifndef __DHT_TEST__
- ipcp_sdb_release(cmd->sdb);
-#endif
- free(cmd);
-
- if (msg == NULL) {
- log_err("Failed to unpack message.");
- continue;
- }
-
- if (msg->code != KAD_RESPONSE && dht_wait_running(dht)) {
- kad_msg__free_unpacked(msg, NULL);
- log_dbg("Got a request message when not running.");
- continue;
- }
-
- pthread_rwlock_rdlock(&dht->lock);
-
- b = dht->b;
- t_expire = dht->t_expire;
-
- pthread_rwlock_unlock(&dht->lock);
-
- if (msg->has_key && msg->key.len != b) {
- kad_msg__free_unpacked(msg, NULL);
- log_warn("Bad key in message.");
- continue;
- }
-
- if (msg->has_s_id && !msg->has_b && msg->s_id.len != b) {
- kad_msg__free_unpacked(msg, NULL);
- log_warn("Bad source ID in message of type %d.",
- msg->code);
- continue;
- }
-
- tpm_dec(dht->tpm);
-
- addr = msg->s_addr;
-
- resp_msg.code = KAD_RESPONSE;
- resp_msg.cookie = msg->cookie;
-
- switch(msg->code) {
- case KAD_JOIN:
- /* Refuse enrollee on check fails. */
- if (msg->alpha != KAD_ALPHA || msg->k != KAD_K) {
- log_warn("Parameter mismatch. "
- "DHT enrolment refused.");
- break;
- }
-
- if (msg->t_replicate != KAD_T_REPL) {
- log_warn("Replication time mismatch. "
- "DHT enrolment refused.");
-
- break;
- }
-
- if (msg->t_refresh != KAD_T_REFR) {
- log_warn("Refresh time mismatch. "
- "DHT enrolment refused.");
- break;
- }
-
- resp_msg.has_alpha = true;
- resp_msg.has_b = true;
- resp_msg.has_k = true;
- resp_msg.has_t_expire = true;
- resp_msg.has_t_refresh = true;
- resp_msg.has_t_replicate = true;
- resp_msg.alpha = KAD_ALPHA;
- resp_msg.b = b;
- resp_msg.k = KAD_K;
- resp_msg.t_expire = t_expire;
- resp_msg.t_refresh = KAD_T_REFR;
- resp_msg.t_replicate = KAD_T_REPL;
- break;
- case KAD_FIND_VALUE:
- buf = dht_retrieve(dht, msg->key.data);
- if (buf.len != 0) {
- resp_msg.n_addrs = buf.len;
- resp_msg.addrs = (uint64_t *) buf.data;
- break;
- }
- /* FALLTHRU */
- case KAD_FIND_NODE:
- /* Return k closest contacts. */
- resp_msg.n_contacts =
- dht_get_contacts(dht, msg->key.data, &cmsgs);
- resp_msg.contacts = cmsgs;
- break;
- case KAD_STORE:
- if (msg->n_contacts < 1) {
- log_warn("No contacts in store message.");
- break;
- }
-
- if (!msg->has_t_expire) {
- log_warn("No expiry time in store message.");
- break;
- }
-
- kad_add(dht, *msg->contacts, msg->n_contacts,
- msg->t_expire);
- break;
- case KAD_RESPONSE:
- kad_handle_response(dht, msg);
- break;
- default:
- assert(false);
- break;
- }
-
- if (msg->code != KAD_JOIN) {
- pthread_rwlock_wrlock(&dht->lock);
- if (dht_get_state(dht) == DHT_JOINING &&
- dht->buckets == NULL) {
- pthread_rwlock_unlock(&dht->lock);
- break;
- }
-
- if (dht_update_bucket(dht, msg->s_id.data, addr))
- log_warn("Failed to update bucket.");
- pthread_rwlock_unlock(&dht->lock);
- }
-
- if (msg->code < KAD_STORE && send_msg(dht, &resp_msg, addr) < 0)
- log_warn("Failed to send response.");
-
- kad_msg__free_unpacked(msg, NULL);
-
- if (resp_msg.n_addrs > 0)
- free(resp_msg.addrs);
-
- if (resp_msg.n_contacts == 0) {
- tpm_inc(dht->tpm);
- continue;
- }
-
- for (i = 0; i < resp_msg.n_contacts; ++i)
- kad_contact_msg__free_unpacked(resp_msg.contacts[i],
- NULL);
- free(resp_msg.contacts);
-
- tpm_inc(dht->tpm);
- }
-
- return (void *) 0;
-}
-
-static void dht_post_packet(void * comp,
- struct shm_du_buff * sdb)
-{
- struct cmd * cmd;
- struct dht * dht = (struct dht *) comp;
-
- if (dht_get_state(dht) == DHT_SHUTDOWN) {
-#ifndef __DHT_TEST__
- ipcp_sdb_release(sdb);
-#endif
- return;
- }
-
- cmd = malloc(sizeof(*cmd));
- if (cmd == NULL) {
- log_err("Command failed. Out of memory.");
- return;
- }
-
- cmd->sdb = sdb;
-
- pthread_mutex_lock(&dht->mtx);
-
- list_add(&cmd->next, &dht->cmds);
-
- pthread_cond_signal(&dht->cond);
-
- pthread_mutex_unlock(&dht->mtx);
-}
-
-void dht_destroy(struct dht * dht)
-{
- struct list_head * p;
- struct list_head * h;
-
- if (dht == NULL)
- return;
-
-#ifndef __DHT_TEST__
- tpm_stop(dht->tpm);
-
- tpm_destroy(dht->tpm);
-#endif
- if (dht_get_state(dht) == DHT_RUNNING) {
- dht_set_state(dht, DHT_SHUTDOWN);
- pthread_cancel(dht->worker);
- pthread_join(dht->worker, NULL);
- }
-
- pthread_rwlock_wrlock(&dht->lock);
-
- list_for_each_safe(p, h, &dht->cmds) {
- struct cmd * c = list_entry(p, struct cmd, next);
- list_del(&c->next);
-#ifndef __DHT_TEST__
- ipcp_sdb_release(c->sdb);
-#endif
- free(c);
- }
-
- list_for_each_safe(p, h, &dht->entries) {
- struct dht_entry * e = list_entry(p, struct dht_entry, next);
- list_del(&e->next);
- dht_entry_destroy(e);
- }
-
- list_for_each_safe(p, h, &dht->requests) {
- struct kad_req * r = list_entry(p, struct kad_req, next);
- list_del(&r->next);
- kad_req_destroy(r);
- }
-
- list_for_each_safe(p, h, &dht->refs) {
- struct ref_entry * e = list_entry(p, struct ref_entry, next);
- list_del(&e->next);
- ref_entry_destroy(e);
- }
-
- list_for_each_safe(p, h, &dht->lookups) {
- struct lookup * l = list_entry(p, struct lookup, next);
- list_del(&l->next);
- lookup_destroy(l);
- }
-
- pthread_rwlock_unlock(&dht->lock);
-
- if (dht->buckets != NULL)
- bucket_destroy(dht->buckets);
-
- bmp_destroy(dht->cookies);
-
- pthread_mutex_destroy(&dht->mtx);
-
- pthread_rwlock_destroy(&dht->lock);
-
- free(dht->id);
-
- free(dht);
-}
-
-static void * join_thr(void * o)
-{
- struct join_info * info = (struct join_info *) o;
- struct lookup * lu;
- size_t retr = 0;
-
- assert(info);
-
- while (kad_join(info->dht, info->addr)) {
- if (dht_get_state(info->dht) == DHT_SHUTDOWN) {
- log_dbg("DHT enrollment aborted.");
- goto finish;
- }
-
- if (retr++ == KAD_JOIN_RETR) {
- dht_set_state(info->dht, DHT_INIT);
- log_warn("DHT enrollment attempt failed.");
- goto finish;
- }
-
- sleep(KAD_JOIN_INTV);
- }
-
- dht_set_state(info->dht, DHT_RUNNING);
-
- lu = kad_lookup(info->dht, info->dht->id, KAD_FIND_NODE);
- if (lu != NULL)
- lookup_destroy(lu);
-
- finish:
- free(info);
-
- return (void *) 0;
-}
-
-static void handle_event(void * self,
- int event,
- const void * o)
-{
- struct dht * dht = (struct dht *) self;
-
- if (event == NOTIFY_DT_CONN_ADD) {
- pthread_t thr;
- struct join_info * inf;
- struct conn * c = (struct conn *) o;
- struct timespec slack = {0, DHT_ENROLL_SLACK * MILLION};
-
- /* Give the pff some time to update for the new link. */
- nanosleep(&slack, NULL);
-
- switch(dht_get_state(dht)) {
- case DHT_INIT:
- inf = malloc(sizeof(*inf));
- if (inf == NULL)
- break;
-
- inf->dht = dht;
- inf->addr = c->conn_info.addr;
-
- if (dht_set_state(dht, DHT_JOINING) == 0 ||
- dht_wait_running(dht)) {
- if (pthread_create(&thr, NULL, join_thr, inf)) {
- dht_set_state(dht, DHT_INIT);
- free(inf);
- return;
- }
- pthread_detach(thr);
- } else {
- free(inf);
- }
- break;
- case DHT_RUNNING:
- /*
- * FIXME: this lookup for effiency reasons
- * causes a SEGV when stressed with rapid
- * enrollments.
- * lu = kad_lookup(dht, dht->id, KAD_FIND_NODE);
- * if (lu != NULL)
- * lookup_destroy(lu);
- */
- break;
- default:
- break;
- }
- }
-}
-
-struct dht * dht_create(uint64_t addr)
-{
- struct dht * dht;
-
- dht = malloc(sizeof(*dht));
- if (dht == NULL)
- goto fail_malloc;
-
- dht->buckets = NULL;
-
- list_head_init(&dht->entries);
- list_head_init(&dht->requests);
- list_head_init(&dht->refs);
- list_head_init(&dht->lookups);
- list_head_init(&dht->cmds);
-
- if (pthread_rwlock_init(&dht->lock, NULL))
- goto fail_rwlock;
-
- if (pthread_mutex_init(&dht->mtx, NULL))
- goto fail_mutex;
-
- if (pthread_cond_init(&dht->cond, NULL))
- goto fail_cond;
-
- dht->cookies = bmp_create(DHT_MAX_REQS, 1);
- if (dht->cookies == NULL)
- goto fail_bmp;
-
- dht->b = 0;
- dht->addr = addr;
- dht->id = NULL;
-#ifndef __DHT_TEST__
- dht->tpm = tpm_create(2, 1, dht_handle_packet, dht);
- if (dht->tpm == NULL)
- goto fail_tpm_create;
-
- if (tpm_start(dht->tpm))
- goto fail_tpm_start;
-
- dht->fd = dt_reg_comp(dht, &dht_post_packet, DHT);
- notifier_reg(handle_event, dht);
-#else
- (void) handle_event;
- (void) dht_handle_packet;
- (void) dht_post_packet;
-#endif
- dht->state = DHT_INIT;
-
- return dht;
-#ifndef __DHT_TEST__
- fail_tpm_start:
- tpm_destroy(dht->tpm);
- fail_tpm_create:
- bmp_destroy(dht->cookies);
-#endif
- fail_bmp:
- pthread_cond_destroy(&dht->cond);
- fail_cond:
- pthread_mutex_destroy(&dht->mtx);
- fail_mutex:
- pthread_rwlock_destroy(&dht->lock);
- fail_rwlock:
- free(dht);
- fail_malloc:
- return NULL;
-}
diff --git a/src/ipcpd/unicast/dht.h b/src/ipcpd/unicast/dht.h
deleted file mode 100644
index 39dfc07e..00000000
--- a/src/ipcpd/unicast/dht.h
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Ouroboros - Copyright (C) 2016 - 2020
- *
- * Distributed Hash Table based on Kademlia
- *
- * Dimitri Staessens <dimitri.staessens@ugent.be>
- * Sander Vrijders <sander.vrijders@ugent.be>
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public License
- * version 2.1 as published by the Free Software Foundation.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., http://www.fsf.org/about/contact/.
- */
-
-#ifndef OUROBOROS_IPCPD_UNICAST_DHT_H
-#define OUROBOROS_IPCPD_UNICAST_DHT_H
-
-#include <ouroboros/ipcp-dev.h>
-
-#include <stdint.h>
-#include <sys/types.h>
-
-struct dht;
-
-struct dht * dht_create(uint64_t addr);
-
-int dht_bootstrap(struct dht * dht,
- size_t b,
- time_t t_expire);
-
-void dht_destroy(struct dht * dht);
-
-int dht_reg(struct dht * dht,
- const uint8_t * key);
-
-int dht_unreg(struct dht * dht,
- const uint8_t * key);
-
-uint64_t dht_query(struct dht * dht,
- const uint8_t * key);
-
-int dht_wait_running(struct dht * dht);
-
-#endif /* OUROBOROS_IPCPD_UNICAST_DHT_H */
diff --git a/src/ipcpd/unicast/dir.c b/src/ipcpd/unicast/dir.c
index 43ee94f0..2b305626 100644
--- a/src/ipcpd/unicast/dir.c
+++ b/src/ipcpd/unicast/dir.c
@@ -1,10 +1,10 @@
/*
- * Ouroboros - Copyright (C) 2016 - 2020
+ * Ouroboros - Copyright (C) 2016 - 2024
*
- * Directory
+ * Directory Management
*
- * Dimitri Staessens <dimitri.staessens@ugent.be>
- * Sander Vrijders <sander.vrijders@ugent.be>
+ * Dimitri Staessens <dimitri@ouroboros.rocks>
+ * Sander Vrijders <sander@ouroboros.rocks>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -34,8 +34,7 @@
#include <ouroboros/utils.h>
#include "dir.h"
-#include "dht.h"
-#include "ipcp.h"
+#include "dir/pol.h"
#include <stdlib.h>
#include <string.h>
@@ -43,59 +42,59 @@
#include <inttypes.h>
#include <limits.h>
-#define KAD_B (hash_len(ipcpi.dir_hash_algo) * CHAR_BIT)
+struct {
+ struct dir_ops * ops;
+} dir;
-struct dht * dht;
-
-int dir_init(void)
+int dir_init(struct dir_config * conf)
{
- dht = dht_create(ipcpi.dt_addr);
- if (dht == NULL)
- return -ENOMEM;
+ void * cfg;
+
+ assert(conf != NULL);
+
+ switch (conf->pol) {
+ case DIR_DHT:
+ log_info("Using DHT policy.");
+ dir.ops = &dht_dir_ops;
+ cfg = &conf->dht;
+ break;
+ default: /* DIR_INVALID */
+ log_err("Invalid directory policy %d.", conf->pol);
+ return -EINVAL;
+ }
- return 0;
+ assert(dir.ops->init != NULL);
+
+ return dir.ops->init(cfg);
}
void dir_fini(void)
{
- dht_destroy(dht);
+ dir.ops->fini();
+ dir.ops = NULL;
}
-int dir_bootstrap(void) {
- log_dbg("Bootstrapping directory.");
-
- /* TODO: get parameters for bootstrap from IRM tool. */
- if (dht_bootstrap(dht, KAD_B, 86400)) {
- dht_destroy(dht);
- return -ENOMEM;
- }
-
- log_info("Directory bootstrapped.");
+int dir_start(void)
+{
+ return dir.ops->start();
+}
- return 0;
+void dir_stop(void)
+{
+ dir.ops->stop();
}
int dir_reg(const uint8_t * hash)
{
- return dht_reg(dht, hash);
+ return dir.ops->reg(hash);
}
int dir_unreg(const uint8_t * hash)
{
- return dht_unreg(dht, hash);
+ return dir.ops->unreg(hash);
}
uint64_t dir_query(const uint8_t * hash)
{
- return dht_query(dht, hash);
-}
-
-int dir_wait_running(void)
-{
- if (dht_wait_running(dht)) {
- log_warn("Directory did not bootstrap.");
- return -1;
- }
-
- return 0;
+ return dir.ops->query(hash);
}
diff --git a/src/ipcpd/unicast/dir.h b/src/ipcpd/unicast/dir.h
index 1b67a08c..dbfde19f 100644
--- a/src/ipcpd/unicast/dir.h
+++ b/src/ipcpd/unicast/dir.h
@@ -1,10 +1,10 @@
/*
- * Ouroboros - Copyright (C) 2016 - 2020
+ * Ouroboros - Copyright (C) 2016 - 2024
*
* Directory
*
- * Dimitri Staessens <dimitri.staessens@ugent.be>
- * Sander Vrijders <sander.vrijders@ugent.be>
+ * Dimitri Staessens <dimitri@ouroboros.rocks>
+ * Sander Vrijders <sander@ouroboros.rocks>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -23,11 +23,16 @@
#ifndef OUROBOROS_IPCPD_UNICAST_DIR_H
#define OUROBOROS_IPCPD_UNICAST_DIR_H
-int dir_init(void);
+#include <inttypes.h>
+
+/* may update the config! */
+int dir_init(struct dir_config * conf);
void dir_fini(void);
-int dir_bootstrap(void);
+int dir_start(void);
+
+void dir_stop(void);
int dir_reg(const uint8_t * hash);
@@ -35,6 +40,4 @@ int dir_unreg(const uint8_t * hash);
uint64_t dir_query(const uint8_t * hash);
-int dir_wait_running(void);
-
#endif /* OUROBOROS_IPCPD_UNICAST_DIR_H */
diff --git a/src/ipcpd/unicast/dir/dht.c b/src/ipcpd/unicast/dir/dht.c
new file mode 100644
index 00000000..f7de7bb7
--- /dev/null
+++ b/src/ipcpd/unicast/dir/dht.c
@@ -0,0 +1,4035 @@
+/*
+ * Ouroboros - Copyright (C) 2016 - 2024
+ *
+ * Distributed Hash Table based on Kademlia
+ *
+ * Dimitri Staessens <dimitri@ouroboros.rocks>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public License
+ * version 2.1 as published by the Free Software Foundation.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., http://www.fsf.org/about/contact/.
+ */
+
+#if !defined (__DHT_TEST__)
+ #if defined(__linux__) || defined(__CYGWIN__)
+ #define _DEFAULT_SOURCE
+ #else
+ #define _POSIX_C_SOURCE 200112L
+ #endif
+#endif
+
+#include "config.h"
+
+#define DHT "dht"
+#define OUROBOROS_PREFIX DHT
+
+#include <ouroboros/endian.h>
+#include <ouroboros/hash.h>
+#include <ouroboros/ipcp-dev.h>
+#include <ouroboros/bitmap.h>
+#include <ouroboros/errno.h>
+#include <ouroboros/logs.h>
+#include <ouroboros/list.h>
+#include <ouroboros/notifier.h>
+#include <ouroboros/random.h>
+#include <ouroboros/rib.h>
+#include <ouroboros/time.h>
+#include <ouroboros/tpm.h>
+#include <ouroboros/utils.h>
+#include <ouroboros/pthread.h>
+
+#include "addr-auth.h"
+#include "common/connmgr.h"
+#include "dht.h"
+#include "dt.h"
+#include "ipcp.h"
+#include "ops.h"
+
+#include <stdlib.h>
+#include <string.h>
+#include <assert.h>
+#include <inttypes.h>
+#include <limits.h>
+
+#include "dht.pb-c.h"
+typedef DhtMsg dht_msg_t;
+typedef DhtContactMsg dht_contact_msg_t;
+typedef DhtStoreMsg dht_store_msg_t;
+typedef DhtFindReqMsg dht_find_req_msg_t;
+typedef DhtFindNodeRspMsg dht_find_node_rsp_msg_t;
+typedef DhtFindValueRspMsg dht_find_value_rsp_msg_t;
+typedef ProtobufCBinaryData binary_data_t;
+
+#ifndef CLOCK_REALTIME_COARSE
+#define CLOCK_REALTIME_COARSE CLOCK_REALTIME
+#endif
+
+#define DHT_MAX_REQS 128 /* KAD recommends rnd(), bmp can be changed. */
+#define DHT_WARN_REQS 100 /* Warn if number of requests exceeds this. */
+#define DHT_MAX_VALS 8 /* Max number of values to return for a key. */
+#define DHT_T_CACHE 60 /* Max cache time for values (s) */
+#define DHT_T_RESP 2 /* Response time to wait for a response (s). */
+#define DHT_N_REPUB 5 /* Republish if expiry within n replications. */
+#define DHT_R_PING 2 /* Ping retries before declaring peer dead. */
+#define DHT_QUEER 15 /* Time to declare peer questionable. */
+#define DHT_BETA 8 /* Bucket split factor, must be 1, 2, 4 or 8. */
+#define DHT_RESP_RETR 6 /* Number of retries on sending a response. */
+#define HANDLE_TIMEO 1000 /* Timeout for dht_handle_packet tpm check (ms) */
+#define DHT_INVALID 0 /* Invalid cookie value. */
+
+#define KEY_FMT "K<" HASH_FMT64 ">"
+#define KEY_VAL(key) HASH_VAL64(key)
+
+#define VAL_FMT "V<" HASH_FMT64 ">"
+#define VAL_VAL(val) HASH_VAL64((val).data)
+
+#define KV_FMT "<" HASH_FMT64 ", " HASH_FMT64 ">"
+#define KV_VAL(key, val) HASH_VAL64(key), HASH_VAL64((val).data)
+
+#define PEER_FMT "[" HASH_FMT64 "|" ADDR_FMT32 "]"
+#define PEER_VAL(id, addr) HASH_VAL64(id), ADDR_VAL32(&(addr))
+
+#define DHT_CODE(msg) dht_code_str[(msg)->code]
+
+#define TX_HDR_FMT "%s --> " PEER_FMT
+#define TX_HDR_VAL(msg, id, addr) DHT_CODE(msg), PEER_VAL(id, addr)
+
+#define RX_HDR_FMT "%s <-- " PEER_FMT
+#define RX_HDR_VAL(msg) DHT_CODE(msg), \
+ PEER_VAL(msg->src->id.data, msg->src->addr)
+
+#define CK_FMT "|" HASH_FMT64 "|"
+#define CK_VAL(cookie) HASH_VAL64(&(cookie))
+
+#define IS_REQUEST(code) \
+ (code == DHT_FIND_NODE_REQ || code == DHT_FIND_VALUE_REQ)
+
+enum dht_code {
+ DHT_STORE,
+ DHT_FIND_NODE_REQ,
+ DHT_FIND_NODE_RSP,
+ DHT_FIND_VALUE_REQ,
+ DHT_FIND_VALUE_RSP
+};
+
+const char * dht_code_str[] = {
+ "DHT_STORE",
+ "DHT_FIND_NODE_REQ",
+ "DHT_FIND_NODE_RSP",
+ "DHT_FIND_VALUE_REQ",
+ "DHT_FIND_VALUE_RSP"
+};
+
+enum dht_state {
+ DHT_NULL = 0,
+ DHT_INIT,
+ DHT_RUNNING
+};
+
+struct val_entry {
+ struct list_head next;
+
+ buffer_t val;
+
+ time_t t_exp; /* Expiry time */
+ time_t t_repl; /* Last replication time */
+};
+
+struct dht_entry {
+ struct list_head next;
+
+ uint8_t * key;
+
+ struct {
+ struct list_head list;
+ size_t len;
+ } vals; /* We don't own these, only replicate */
+
+ struct {
+ struct list_head list;
+ size_t len;
+ } lvals; /* We own these, must be republished */
+};
+
+struct contact {
+ struct list_head next;
+
+ uint8_t * id;
+ uint64_t addr;
+
+ size_t fails;
+ time_t t_seen;
+};
+
+struct peer_entry {
+ struct list_head next;
+
+ uint64_t cookie;
+ uint8_t * id;
+ uint64_t addr;
+ enum dht_code code;
+
+ time_t t_sent;
+};
+
+struct dht_req {
+ struct list_head next;
+
+ uint8_t * key;
+ time_t t_exp;
+
+ struct {
+ struct list_head list;
+ size_t len;
+ } peers;
+
+ struct {
+ struct list_head list;
+ size_t len;
+ } cache;
+};
+
+struct bucket {
+ struct {
+ struct list_head list;
+ size_t len;
+ } contacts;
+
+ struct {
+ struct list_head list;
+ size_t len;
+ } alts;
+
+ time_t t_refr;
+
+ size_t depth;
+ uint8_t mask;
+
+ struct bucket * parent;
+ struct bucket * children[1L << DHT_BETA];
+};
+
+struct cmd {
+ struct list_head next;
+ buffer_t cbuf;
+};
+
+struct dir_ops dht_dir_ops = {
+ .init = (int (*)(void *)) dht_init,
+ .fini = dht_fini,
+ .start = dht_start,
+ .stop = dht_stop,
+ .reg = dht_reg,
+ .unreg = dht_unreg,
+ .query = dht_query
+};
+
+struct {
+ struct { /* Kademlia parameters */
+ uint32_t alpha; /* Number of concurrent requests */
+ size_t k; /* Number of replicas to store */
+ time_t t_expire; /* Expiry time for values (s) */
+ time_t t_refresh; /* Refresh time for contacts (s) */
+ time_t t_repl; /* Replication time for values (s) */
+ };
+
+ buffer_t id;
+
+ time_t t0; /* Creation time */
+ uint64_t addr; /* Our own address */
+ uint64_t peer; /* Enrollment peer address */
+ uint64_t magic; /* Magic cookie for retransmit */
+
+ uint64_t eid; /* Entity ID */
+
+ struct tpm * tpm;
+ pthread_t worker;
+
+ enum dht_state state;
+
+ struct {
+ struct {
+ struct bucket * root;
+ } contacts;
+
+ struct {
+ struct list_head list;
+ size_t len;
+ size_t vals;
+ size_t lvals;
+ } kv;
+
+ pthread_rwlock_t lock;
+ } db;
+
+ struct {
+ struct list_head list;
+ size_t len;
+ pthread_cond_t cond;
+ pthread_mutex_t mtx;
+ } reqs;
+
+ struct {
+ struct list_head list;
+ pthread_cond_t cond;
+ pthread_mutex_t mtx;
+ } cmds;
+} dht;
+
+
+/* DHT RIB */
+
+static const char * dht_dir[] = {
+ "database",
+ "stats",
+ NULL
+};
+
+const char * dht_stats = \
+ "DHT: " HASH_FMT64 "\n"
+ " Created: %s\n"
+ " Address: " ADDR_FMT32 "\n"
+ " Kademlia parameters:\n"
+ " Number of concurrent requests (alpha): %10zu\n"
+ " Number of replicas (k): %10zu\n"
+ " Expiry time for values (s): %10ld\n"
+ " Refresh time for contacts (s): %10ld\n"
+ " Replication time for values (s): %10ld\n"
+ " Number of keys: %10zu\n"
+ " Number of local values: %10zu\n"
+ " Number of non-local values: %10zu\n";
+
+static int dht_rib_statfile(char * buf,
+ size_t len)
+{
+ struct tm * tm;
+ char tmstr[RIB_TM_STRLEN];
+ size_t keys;
+ size_t vals;
+ size_t lvals;
+
+ assert(buf != NULL);
+ assert(len > 0);
+
+ pthread_rwlock_rdlock(&dht.db.lock);
+
+ keys = dht.db.kv.len;
+ lvals = dht.db.kv.lvals;
+ vals = dht.db.kv.vals;
+
+ pthread_rwlock_unlock(&dht.db.lock);
+
+ tm = gmtime(&dht.t0);
+ strftime(tmstr, sizeof(tmstr), RIB_TM_FORMAT, tm);
+
+ snprintf(buf, len, dht_stats,
+ HASH_VAL64(dht.id.data),
+ tmstr,
+ ADDR_VAL32(&dht.addr),
+ dht.alpha, dht.k,
+ dht.t_expire, dht.t_refresh, dht.t_repl,
+ keys, vals, lvals);
+
+ return strlen(buf);
+}
+
+static size_t dht_db_file_len(void)
+{
+ size_t sz;
+ size_t vals;
+
+ sz = 18; /* DHT database + 2 * \n */
+
+ pthread_rwlock_rdlock(&dht.db.lock);
+
+ if (dht.db.kv.len == 0) {
+ pthread_rwlock_unlock(&dht.db.lock);
+ sz += 14; /* No entries */
+ return sz;
+ }
+
+ sz += 39 * 3 + 1; /* tally + extra newline */
+ sz += dht.db.kv.len * (25 + 19 + 23 + 1);
+
+ vals = dht.db.kv.vals + dht.db.kv.lvals;
+
+ sz += vals * (48 + 2 * RIB_TM_STRLEN);
+
+ pthread_rwlock_unlock(&dht.db.lock);
+
+ return sz;
+}
+
+static int dht_rib_dbfile(char * buf,
+ size_t len)
+{
+ struct tm * tm;
+ char tmstr[RIB_TM_STRLEN];
+ char exstr[RIB_TM_STRLEN];
+ size_t i = 0;
+ struct list_head * p;
+
+ assert(buf != NULL);
+ assert(len > 0);
+
+ pthread_rwlock_rdlock(&dht.db.lock);
+
+ if (dht.db.kv.len == 0) {
+ i += snprintf(buf, len, " No entries.\n");
+ pthread_rwlock_unlock(&dht.db.lock);
+ return i;
+ }
+
+ i += snprintf(buf + i, len - i, "DHT database:\n\n");
+ i += snprintf(buf + i, len - i,
+ "Number of keys: %10zu\n"
+ "Number of local values: %10zu\n"
+ "Number of non-local values: %10zu\n\n",
+ dht.db.kv.len, dht.db.kv.vals, dht.db.kv.lvals);
+
+ list_for_each(p, &dht.db.kv.list) {
+ struct dht_entry * e = list_entry(p, struct dht_entry, next);
+ struct list_head * h;
+
+ i += snprintf(buf + i, len - i, "Key: " KEY_FMT "\n",
+ KEY_VAL(e->key));
+ i += snprintf(buf + i, len - i, " Local entries:\n");
+
+ list_for_each(h, &e->vals.list) {
+ struct val_entry * v;
+
+ v = list_entry(h, struct val_entry, next);
+
+ tm = gmtime(&v->t_repl);
+ strftime(tmstr, sizeof(tmstr), RIB_TM_FORMAT, tm);
+
+ tm = gmtime(&v->t_exp);
+ strftime(exstr, sizeof(exstr), RIB_TM_FORMAT, tm);
+
+ i += snprintf(buf + i, len - i,
+ " " VAL_FMT
+ ", t_replicated=%.*s, t_expire=%.*s\n",
+ VAL_VAL(v->val),
+ RIB_TM_STRLEN, tmstr,
+ RIB_TM_STRLEN, exstr);
+ }
+
+ i += snprintf(buf + i, len - i, "\n");
+
+ i += snprintf(buf + i, len - i, " Non-local entries:\n");
+
+ list_for_each(h, &e->lvals.list) {
+ struct val_entry * v;
+
+ v= list_entry(h, struct val_entry, next);
+
+ tm = gmtime(&v->t_repl);
+ strftime(tmstr, sizeof(tmstr), RIB_TM_FORMAT, tm);
+
+ tm = gmtime(&v->t_exp);
+ strftime(exstr, sizeof(exstr), RIB_TM_FORMAT, tm);
+
+ i += snprintf(buf + i, len - i,
+ " " VAL_FMT
+ ", t_replicated=%.*s, t_expire=%.*s\n",
+ VAL_VAL(v->val),
+ RIB_TM_STRLEN, tmstr,
+ RIB_TM_STRLEN, exstr);
+
+ }
+ }
+
+ pthread_rwlock_unlock(&dht.db.lock);
+
+ printf("DHT RIB DB file generated (%zu bytes).\n", i);
+
+ return i;
+}
+
+static int dht_rib_read(const char * path,
+ char * buf,
+ size_t len)
+{
+ char * entry;
+
+ entry = strstr(path, RIB_SEPARATOR) + 1;
+
+ if (strcmp(entry, "database") == 0) {
+ return dht_rib_dbfile(buf, len);
+ } else if (strcmp(entry, "stats") == 0) {
+ return dht_rib_statfile(buf, len);
+ }
+
+ return 0;
+}
+
+static int dht_rib_readdir(char *** buf)
+{
+ int i = 0;
+
+ while (dht_dir[i++] != NULL);
+
+ *buf = malloc(sizeof(**buf) * i);
+ if (*buf == NULL)
+ goto fail_buf;
+
+ i = 0;
+
+ while (dht_dir[i] != NULL) {
+ (*buf)[i] = strdup(dht_dir[i]);
+ if ((*buf)[i] == NULL)
+ goto fail_dup;
+ i++;
+ }
+
+ return i;
+ fail_dup:
+ freepp(char, *buf, i);
+ fail_buf:
+ return -ENOMEM;
+}
+
+static int dht_rib_getattr(const char * path,
+ struct rib_attr * attr)
+{
+ struct timespec now;
+ char * entry;
+
+ clock_gettime(CLOCK_REALTIME_COARSE, &now);
+
+ attr->mtime = now.tv_sec;
+
+ entry = strstr(path, RIB_SEPARATOR) + 1;
+
+ if (strcmp(entry, "database") == 0) {
+ attr->size = dht_db_file_len();
+ } else if (strcmp(entry, "stats") == 0) {
+ attr->size = 545;
+ }
+
+ return 0;
+}
+
+static struct rib_ops r_ops = {
+ .read = dht_rib_read,
+ .readdir = dht_rib_readdir,
+ .getattr = dht_rib_getattr
+};
+
+/* Helper functions */
+
+static uint8_t * generate_id(void)
+{
+ uint8_t * id;
+
+ if(dht.id.len < sizeof(uint64_t)) {
+ log_err("DHT ID length is too short (%zu < %zu).",
+ dht.id.len, sizeof(uint64_t));
+ return NULL;
+ }
+
+ id = malloc(dht.id.len);
+ if (id == NULL) {
+ log_err("Failed to malloc ID.");
+ goto fail_id;
+ }
+
+ if (random_buffer(id, dht.id.len) < 0) {
+ log_err("Failed to generate random ID.");
+ goto fail_rnd;
+ }
+
+ return id;
+ fail_rnd:
+ free(id);
+ fail_id:
+ return NULL;
+}
+
+static uint64_t generate_cookie(void)
+{
+ uint64_t cookie = DHT_INVALID;
+
+ while (cookie == DHT_INVALID)
+ random_buffer((uint8_t *) &cookie, sizeof(cookie));
+
+ return cookie;
+}
+
+/*
+ * If someone builds a network where the n (n > k) closest nodes all
+ * have IDs starting with the same 64 bits: by all means, change this.
+ */
+static uint64_t dist(const uint8_t * src,
+ const uint8_t * dst)
+{
+ assert(dht.id.len >= sizeof(uint64_t));
+
+ return betoh64(*((uint64_t *) src) ^ *((uint64_t *) dst));
+}
+
+#define IS_CLOSER(x, y) (dist((x), dht.id.data) < dist((y), dht.id.data))
+
+static int addr_to_buf(const uint64_t addr,
+ buffer_t * buf)
+{
+ size_t len;
+ uint64_t _addr;
+
+ len = sizeof(addr);
+ _addr = hton64(addr);
+
+ assert(buf != NULL);
+
+ buf->data = malloc(len);
+ if (buf->data == NULL)
+ goto fail_malloc;
+
+ buf->len = sizeof(_addr);
+ memcpy(buf->data, &_addr, sizeof(_addr));
+
+ return 0;
+ fail_malloc:
+ return -ENOMEM;
+}
+
+static int buf_to_addr(const buffer_t buf,
+ uint64_t * addr)
+{
+ assert(addr != NULL);
+ assert(buf.data != NULL);
+
+ if (buf.len != sizeof(*addr))
+ return - EINVAL;
+
+ *addr = ntoh64(*((uint64_t *) buf.data));
+
+ if (*addr == dht.addr)
+ *addr = INVALID_ADDR;
+
+ return 0;
+}
+
+static uint8_t * dht_dup_key(const uint8_t * key)
+{
+ uint8_t * dup;
+
+ assert(key != NULL);
+ assert(dht.id.len != 0);
+
+ dup = malloc(dht.id.len);
+ if (dup == NULL)
+ return NULL;
+
+ memcpy(dup, key, dht.id.len);
+
+ return dup;
+}
+
+/* DHT */
+
+static struct val_entry * val_entry_create(const buffer_t val,
+ time_t exp)
+{
+ struct val_entry * e;
+ struct timespec now;
+
+ assert(val.data != NULL);
+ assert(val.len > 0);
+
+ clock_gettime(CLOCK_REALTIME_COARSE, &now);
+
+#ifndef __DHT_TEST_ALLOW_EXPIRED__
+ if (exp < now.tv_sec)
+ return NULL; /* Refuse to add expired values */
+#endif
+ e = malloc(sizeof(*e));
+ if (e == NULL)
+ goto fail_entry;
+
+ list_head_init(&e->next);
+
+ e->val.len = val.len;
+ e->val.data = malloc(val.len);
+ if (e->val.data == NULL)
+ goto fail_val;
+
+ memcpy(e->val.data, val.data, val.len);
+
+ e->t_repl = 0;
+ e->t_exp = exp;
+
+ return e;
+
+ fail_val:
+ free(e);
+ fail_entry:
+ return NULL;
+}
+
+static void val_entry_destroy(struct val_entry * v)
+{
+ assert(v->val.data != NULL);
+
+ freebuf(v->val);
+ free(v);
+}
+
+static struct dht_entry * dht_entry_create(const uint8_t * key)
+{
+ struct dht_entry * e;
+
+ assert(key != NULL);
+
+ e = malloc(sizeof(*e));
+ if (e == NULL)
+ goto fail_entry;
+
+ list_head_init(&e->next);
+ list_head_init(&e->vals.list);
+ list_head_init(&e->lvals.list);
+
+ e->vals.len = 0;
+ e->lvals.len = 0;
+
+ e->key = dht_dup_key(key);
+ if (e->key == NULL)
+ goto fail_key;
+
+ return e;
+ fail_key:
+ free(e);
+ fail_entry:
+ return NULL;
+}
+
+static void dht_entry_destroy(struct dht_entry * e)
+{
+ struct list_head * p;
+ struct list_head * h;
+
+ assert(e != NULL);
+
+ list_for_each_safe(p, h, &e->vals.list) {
+ struct val_entry * v = list_entry(p, struct val_entry, next);
+ list_del(&v->next);
+ val_entry_destroy(v);
+ --e->vals.len;
+ --dht.db.kv.vals;
+ }
+
+ list_for_each_safe(p, h, &e->lvals.list) {
+ struct val_entry * v = list_entry(p, struct val_entry, next);
+ list_del(&v->next);
+ val_entry_destroy(v);
+ --e->lvals.len;
+ --dht.db.kv.lvals;
+ }
+
+ free(e->key);
+
+ assert(e->vals.len == 0 && e->lvals.len == 0);
+
+ free(e);
+}
+
+static struct val_entry * dht_entry_get_lval(const struct dht_entry * e,
+ const buffer_t val)
+{
+ struct list_head * p;
+
+ assert(e != NULL);
+ assert(val.data != NULL);
+ assert(val.len > 0);
+
+ list_for_each(p, &e->lvals.list) {
+ struct val_entry * v = list_entry(p, struct val_entry, next);
+ if (bufcmp(&v->val, &val) == 0)
+ return v;
+ }
+
+ return NULL;
+}
+
+static struct val_entry * dht_entry_get_val(const struct dht_entry * e,
+ const buffer_t val)
+{
+ struct list_head * p;
+
+ assert(e != NULL);
+ assert(val.data != NULL);
+ assert(val.len > 0);
+
+ list_for_each(p, &e->vals.list) {
+ struct val_entry * v = list_entry(p, struct val_entry, next);
+ if (bufcmp(&v->val, &val) == 0)
+ return v;
+
+ }
+
+ return NULL;
+}
+
+static int dht_entry_update_val(struct dht_entry * e,
+ buffer_t val,
+ time_t exp)
+{
+ struct val_entry * v;
+ struct timespec now;
+
+ assert(e != NULL);
+ assert(val.data != NULL);
+ assert(val.len > 0);
+
+ clock_gettime(CLOCK_REALTIME_COARSE, &now);
+
+ if (exp < now.tv_sec)
+ return -EINVAL; /* Refuse to add expired values */
+
+ if (dht_entry_get_lval(e, val) != NULL) {
+ log_dbg(KV_FMT " Val already in lvals.", KV_VAL(e->key, val));
+ return 0; /* Refuse to add local values */
+ }
+
+ v = dht_entry_get_val(e, val);
+ if (v == NULL) {
+ v = val_entry_create(val, exp);
+ if (v == NULL)
+ return -ENOMEM;
+
+ list_add_tail(&v->next, &e->vals.list);
+ ++e->vals.len;
+ ++dht.db.kv.vals;
+
+ return 0;
+ }
+
+ if (v->t_exp < exp)
+ v->t_exp = exp;
+
+ return 0;
+}
+
+static int dht_entry_update_lval(struct dht_entry * e,
+ buffer_t val)
+{
+ struct val_entry * v;
+ struct timespec now;
+
+ assert(e != NULL);
+ assert(val.data != NULL);
+ assert(val.len > 0);
+
+ clock_gettime(CLOCK_REALTIME_COARSE, &now);
+
+ v = dht_entry_get_lval(e, val);
+ if (v == NULL) {
+ log_dbg(KV_FMT " Adding lval.", KV_VAL(e->key, val));
+ v = val_entry_create(val, now.tv_sec + dht.t_expire);
+ if (v == NULL)
+ return -ENOMEM;
+
+ list_add_tail(&v->next, &e->lvals.list);
+ ++e->lvals.len;
+ ++dht.db.kv.lvals;
+
+ return 0;
+ }
+
+ return 0;
+}
+
+static int dht_entry_remove_lval(struct dht_entry * e,
+ buffer_t val)
+{
+ struct val_entry * v;
+
+ assert(e != NULL);
+ assert(val.data != NULL);
+ assert(val.len > 0);
+
+ v = dht_entry_get_lval(e, val);
+ if (v == NULL)
+ return -ENOENT;
+
+ log_dbg(KV_FMT " Removing lval.", KV_VAL(e->key, val));
+
+ list_del(&v->next);
+ val_entry_destroy(v);
+ --e->lvals.len;
+ --dht.db.kv.lvals;
+
+ return 0;
+}
+
+#define IS_EXPIRED(v, now) ((now)->tv_sec > (v)->t_exp)
+static void dht_entry_remove_expired_vals(struct dht_entry * e)
+{
+ struct list_head * p;
+ struct list_head * h;
+ struct timespec now;
+
+ assert(e != NULL);
+
+ clock_gettime(CLOCK_REALTIME_COARSE, &now);
+
+ list_for_each_safe(p, h, &e->vals.list) {
+ struct val_entry * v = list_entry(p, struct val_entry, next);
+ if (!IS_EXPIRED(v, &now))
+ continue;
+
+ log_dbg(KV_FMT " Value expired." , KV_VAL(e->key, v->val));
+ list_del(&v->next);
+ val_entry_destroy(v);
+ --e->vals.len;
+ --dht.db.kv.vals;
+ }
+}
+
+static struct dht_entry * __dht_kv_find_entry(const uint8_t * key)
+{
+ struct list_head * p;
+
+ assert(key != NULL);
+
+ list_for_each(p, &dht.db.kv.list) {
+ struct dht_entry * e = list_entry(p, struct dht_entry, next);
+ if (!memcmp(key, e->key, dht.id.len))
+ return e;
+ }
+
+ return NULL;
+}
+
+static void dht_kv_remove_expired_entries(void)
+{
+ struct list_head * p;
+ struct list_head * h;
+ struct timespec now;
+
+ clock_gettime(CLOCK_REALTIME_COARSE, &now);
+
+ pthread_rwlock_wrlock(&dht.db.lock);
+
+ list_for_each_safe(p, h, &dht.db.kv.list) {
+ struct dht_entry * e = list_entry(p, struct dht_entry, next);
+ dht_entry_remove_expired_vals(e);
+ if (e->lvals.len > 0 || e->vals.len > 0)
+ continue;
+
+ log_dbg(KEY_FMT " Entry removed. ", KEY_VAL(e->key));
+ list_del(&e->next);
+ dht_entry_destroy(e);
+ --dht.db.kv.len;
+ }
+
+ pthread_rwlock_unlock(&dht.db.lock);
+}
+
+
+static struct contact * contact_create(const uint8_t * id,
+ uint64_t addr)
+{
+ struct contact * c;
+ struct timespec t;
+
+ c = malloc(sizeof(*c));
+ if (c == NULL)
+ return NULL;
+
+ list_head_init(&c->next);
+
+ clock_gettime(CLOCK_REALTIME_COARSE, &t);
+
+ c->addr = addr;
+ c->fails = 0;
+ c->t_seen = t.tv_sec;
+ c->id = dht_dup_key(id);
+ if (c->id == NULL) {
+ free(c);
+ return NULL;
+ }
+
+ return c;
+}
+
+static void contact_destroy(struct contact * c)
+{
+ assert(c != NULL);
+ assert(list_is_empty(&c->next));
+
+ free(c->id);
+ free(c);
+}
+
+static struct dht_req * dht_req_create(const uint8_t * key)
+{
+ struct dht_req * req;
+ struct timespec now;
+
+ assert(key != NULL);
+
+ clock_gettime(PTHREAD_COND_CLOCK, &now);
+
+ req = malloc(sizeof(*req));
+ if (req == NULL)
+ goto fail_malloc;
+
+ list_head_init(&req->next);
+
+ req->t_exp = now.tv_sec + DHT_T_RESP;
+
+ list_head_init(&req->peers.list);
+ req->peers.len = 0;
+
+ req->key = dht_dup_key(key);
+ if (req->key == NULL)
+ goto fail_dup_key;
+
+ list_head_init(&req->cache.list);
+ req->cache.len = 0;
+
+ return req;
+
+ fail_dup_key:
+ free(req);
+ fail_malloc:
+ return NULL;
+}
+
+static void dht_req_destroy(struct dht_req * req)
+{
+ struct list_head * p;
+ struct list_head * h;
+
+ assert(req);
+ assert(req->key);
+
+ list_for_each_safe(p, h, &req->peers.list) {
+ struct peer_entry * e = list_entry(p, struct peer_entry, next);
+ list_del(&e->next);
+ free(e->id);
+ free(e);
+ --req->peers.len;
+ }
+
+ list_for_each_safe(p, h, &req->cache.list) {
+ struct val_entry * e = list_entry(p, struct val_entry, next);
+ list_del(&e->next);
+ val_entry_destroy(e);
+ --req->cache.len;
+ }
+
+ free(req->key);
+
+ assert(req->peers.len == 0);
+
+ free(req);
+}
+
+static struct peer_entry * dht_req_get_peer(struct dht_req * req,
+ struct peer_entry * e)
+{
+ struct list_head * p;
+
+ list_for_each(p, &req->peers.list) {
+ struct peer_entry * x = list_entry(p, struct peer_entry, next);
+ if (x->addr == e->addr)
+ return x;
+ }
+
+ return NULL;
+}
+
+#define IS_MAGIC(peer) ((peer)->cookie == dht.magic)
+void dht_req_add_peer(struct dht_req * req,
+ struct peer_entry * e)
+{
+ struct peer_entry * x; /* existing */
+ struct list_head * p; /* iterator */
+ size_t pos = 0;
+
+ assert(req != NULL);
+ assert(e != NULL);
+ assert(e->id != NULL);
+
+ /*
+ * Dedupe messages to the same peer, unless
+ * 1) The previous request was FIND_NODE and now it's FIND_VALUE
+ * 2) We urgently need contacts from emergency peer (magic cookie)
+ */
+ x = dht_req_get_peer(req, e);
+ if (x != NULL && x->code >= e->code && !IS_MAGIC(e))
+ goto skip;
+
+ /* Find how this contact ranks in distance to the key */
+ list_for_each(p, &req->peers.list) {
+ struct peer_entry * y = list_entry(p, struct peer_entry, next);
+ if (IS_CLOSER(y->id, e->id)) {
+ pos++;
+ continue;
+ }
+ break;
+ }
+
+ /* Add a new peer to this request if we need to */
+ if (pos < dht.alpha || !IS_MAGIC(e)) {
+ x = malloc(sizeof(*x));
+ if (x == NULL) {
+ log_err("Failed to malloc peer entry.");
+ goto skip;
+ }
+
+ x->cookie = e->cookie;
+ x->addr = e->addr;
+ x->code = e->code;
+ x->t_sent = e->t_sent;
+ x->id = dht_dup_key(e->id);
+ if (x->id == NULL) {
+ log_err("Failed to dup peer ID.");
+ free(x);
+ goto skip;
+ }
+
+ if (IS_MAGIC(e))
+ list_add(&x->next, p);
+ else
+ list_add_tail(&x->next, p);
+ ++req->peers.len;
+ return;
+ }
+ skip:
+ list_del(&e->next);
+ free(e->id);
+ free(e);
+}
+
+static size_t dht_req_add_peers(struct dht_req * req,
+ struct list_head * pl)
+{
+ struct list_head * p;
+ struct list_head * h;
+ size_t n = 0;
+
+ assert(req != NULL);
+ assert(pl != NULL);
+
+ list_for_each_safe(p, h, pl) {
+ struct peer_entry * e = list_entry(p, struct peer_entry, next);
+ dht_req_add_peer(req, e);
+ }
+
+ return n;
+}
+
+static bool dht_req_has_peer(struct dht_req * req,
+ uint64_t cookie)
+{
+ struct list_head * p;
+
+ assert(req != NULL);
+
+ list_for_each(p, &req->peers.list) {
+ struct peer_entry * e = list_entry(p, struct peer_entry, next);
+ if (e->cookie == cookie)
+ return true;
+ }
+
+ return false;
+}
+
+static void peer_list_destroy(struct list_head * pl)
+{
+ struct list_head * p;
+ struct list_head * h;
+
+ assert(pl != NULL);
+
+ list_for_each_safe(p, h, pl) {
+ struct peer_entry * e = list_entry(p, struct peer_entry, next);
+ list_del(&e->next);
+ free(e->id);
+ free(e);
+ }
+}
+
+static int dht_kv_create_peer_list(struct list_head * cl,
+ struct list_head * pl,
+ enum dht_code code)
+{
+ struct list_head * p;
+ struct list_head * h;
+ struct timespec now;
+ size_t len;
+
+ assert(cl != NULL);
+ assert(pl != NULL);
+ assert(list_is_empty(pl));
+
+ clock_gettime(CLOCK_REALTIME_COARSE, &now);
+
+ len = 0;
+
+ list_for_each_safe(p, h, cl) {
+ struct contact * c = list_entry(p, struct contact, next);
+ struct peer_entry * e;
+ if (len++ == dht.alpha)
+ break;
+
+ e = malloc(sizeof(*e));
+ if (e == NULL)
+ return -ENOMEM;
+
+ e->cookie = generate_cookie();
+ e->code = code;
+ e->addr = c->addr;
+ e->t_sent = now.tv_sec;
+
+ e->id = c->id;
+
+ list_add_tail(&e->next, pl);
+
+ list_del(&c->next);
+ c->id = NULL; /* we stole the id */
+ contact_destroy(c);
+ }
+
+ return 0;
+}
+
+static struct dht_req * __dht_kv_req_get_req(const uint8_t * key)
+{
+ struct list_head * p;
+
+ list_for_each(p, &dht.reqs.list) {
+ struct dht_req * r = list_entry(p, struct dht_req, next);
+ if (memcmp(r->key, key, dht.id.len) == 0)
+ return r;
+ }
+
+ return NULL;
+}
+
+static struct dht_req * __dht_kv_get_req_cache(const uint8_t * key)
+{
+ struct dht_req * req;
+
+ assert(key != NULL);
+
+ req = __dht_kv_req_get_req(key);
+ if (req == NULL)
+ return NULL;
+
+ if (req->cache.len == 0)
+ return NULL;
+
+ return req;
+}
+
+static void __dht_kv_req_remove(const uint8_t * key)
+{
+ struct dht_req * req;
+
+ assert(key != NULL);
+
+ req = __dht_kv_req_get_req(key);
+ if (req == NULL)
+ return;
+
+ list_del(&req->next);
+ --dht.reqs.len;
+
+ dht_req_destroy(req);
+}
+
+static struct dht_req * __dht_kv_get_req_peer(const uint8_t * key,
+ uint64_t cookie)
+{
+ struct dht_req * req;
+
+ assert(key != NULL);
+
+ req = __dht_kv_req_get_req(key);
+ if (req == NULL)
+ return NULL;
+
+ if (!dht_req_has_peer(req, cookie))
+ return NULL;
+
+ return req;
+}
+
+static bool dht_kv_has_req(const uint8_t * key,
+ uint64_t cookie)
+{
+ bool found;
+
+ pthread_mutex_lock(&dht.reqs.mtx);
+
+ found = __dht_kv_get_req_peer(key, cookie) != NULL;
+
+ pthread_mutex_unlock(&dht.reqs.mtx);
+
+ return found;
+}
+
+/*
+ * This will filter the peer list for addresses that still need to be
+ * contacted.
+ */
+static int dht_kv_update_req(const uint8_t * key,
+ struct list_head * pl)
+{
+ struct dht_req * req;
+ struct timespec now;
+
+ assert(key != NULL);
+ assert(pl != NULL);
+ assert(!list_is_empty(pl));
+
+ clock_gettime(PTHREAD_COND_CLOCK, &now);
+
+ pthread_mutex_lock(&dht.reqs.mtx);
+
+ req = __dht_kv_req_get_req(key);
+ if (req == NULL) {
+ if (dht.reqs.len == DHT_MAX_REQS) {
+ log_err(KEY_FMT " Max reqs reached (%zu).",
+ KEY_VAL(key), dht.reqs.len);
+ peer_list_destroy(pl);
+ goto fail_req;
+ }
+ req = dht_req_create(key);
+ if (req == NULL) {
+ log_err(KEY_FMT "Failed to create req.", KEY_VAL(key));
+ goto fail_req;
+ }
+ list_add_tail(&req->next, &dht.reqs.list);
+ ++dht.reqs.len;
+ }
+
+ if (req->cache.len > 0) /* Already have values */
+ peer_list_destroy(pl);
+
+ dht_req_add_peers(req, pl);
+ req->t_exp = now.tv_sec + DHT_T_RESP;
+
+ if (dht.reqs.len > DHT_WARN_REQS) {
+ log_warn("Number of outstanding requests (%zu) exceeds %u.",
+ dht.reqs.len, DHT_WARN_REQS);
+ }
+
+ pthread_mutex_unlock(&dht.reqs.mtx);
+
+ return 0;
+ fail_req:
+ pthread_mutex_unlock(&dht.reqs.mtx);
+ return -1;
+}
+
+static int dht_kv_respond_req(uint8_t * key,
+ binary_data_t * vals,
+ size_t len)
+{
+ struct dht_req * req;
+ struct timespec now;
+ size_t i;
+
+ assert(key != NULL);
+ assert(vals != NULL);
+ assert(len > 0);
+
+ clock_gettime(CLOCK_REALTIME_COARSE, &now);
+
+ pthread_mutex_lock(&dht.reqs.mtx);
+
+ req = __dht_kv_req_get_req(key);
+ if (req == NULL) {
+ log_warn(KEY_FMT " Failed to find req.", KEY_VAL(key));
+ goto fail_req;
+ }
+
+ for (i = 0; i < len; ++i) {
+ struct val_entry * e;
+ buffer_t val;
+ val.data = vals[i].data;
+ val.len = vals[i].len;
+ e = val_entry_create(val, now.tv_sec + DHT_T_CACHE);
+ if (e == NULL) {
+ log_err(" Failed to create val_entry.");
+ continue;
+ }
+
+ list_add_tail(&e->next, &req->cache.list);
+ ++req->cache.len;
+ }
+
+ pthread_cond_broadcast(&dht.reqs.cond);
+
+ pthread_mutex_unlock(&dht.reqs.mtx);
+ fail_req:
+ pthread_mutex_unlock(&dht.reqs.mtx);
+ return -1;
+}
+
+static ssize_t dht_kv_wait_req(const uint8_t * key,
+ buffer_t ** vals)
+{
+ struct list_head * p;
+ struct dht_req * req;
+ struct timespec t;
+#ifdef __DHT_TEST__
+ struct timespec intv = TIMESPEC_INIT_MS(10);
+#else
+ struct timespec intv = TIMESPEC_INIT_S(DHT_T_RESP);
+#endif
+ size_t max;
+ size_t i = 0;
+ int ret = 0;
+
+ assert(key != NULL);
+ assert(vals != NULL);
+
+ clock_gettime(PTHREAD_COND_CLOCK, &t);
+
+ ts_add(&t, &intv, &t);
+
+ pthread_mutex_lock(&dht.reqs.mtx);
+
+ pthread_cleanup_push(__cleanup_mutex_unlock, &dht.reqs.mtx);
+
+ while ((req = __dht_kv_get_req_cache(key)) == NULL) {
+ ret = pthread_cond_timedwait(&dht.reqs.cond, &dht.reqs.mtx, &t);
+ if (ret == ETIMEDOUT)
+ break;
+ }
+
+ pthread_cleanup_pop(false);
+
+ if (ret == ETIMEDOUT) {
+ log_warn(KEY_FMT " Req timed out.", KEY_VAL(key));
+ __dht_kv_req_remove(key);
+ goto timedout;
+ }
+
+ max = MIN(req->cache.len, DHT_MAX_VALS);
+ if (max == 0)
+ goto no_vals;
+
+ *vals = malloc(max * sizeof(**vals));
+ if (*vals == NULL) {
+ log_err(KEY_FMT "Failed to malloc val buffer.", KEY_VAL(key));
+ goto fail_vals;
+ }
+
+ memset(*vals, 0, max * sizeof(**vals));
+
+ list_for_each(p, &req->cache.list) {
+ struct val_entry * v;
+ if (i == max)
+ break; /* We have enough values */
+ v = list_entry(p, struct val_entry, next);
+ (*vals)[i].data = malloc(v->val.len);
+ if ((*vals)[i].data == NULL)
+ goto fail_val_data;
+
+ (*vals)[i].len = v->val.len;
+ memcpy((*vals)[i++].data, v->val.data, v->val.len);
+ }
+
+ pthread_mutex_unlock(&dht.reqs.mtx);
+
+ return i;
+ no_vals:
+ pthread_mutex_unlock(&dht.reqs.mtx);
+ return 0;
+ fail_val_data:
+ freebufs(*vals, i);
+ fail_vals:
+ pthread_mutex_unlock(&dht.reqs.mtx);
+ return -ENOMEM;
+ timedout:
+ pthread_mutex_unlock(&dht.reqs.mtx);
+ return -ETIMEDOUT;
+}
+
+static struct bucket * iter_bucket(struct bucket * b,
+ const uint8_t * id)
+{
+ uint8_t byte;
+ uint8_t mask;
+
+ assert(b != NULL);
+
+ if (b->children[0] == NULL)
+ return b;
+
+ byte = id[(b->depth * DHT_BETA) / CHAR_BIT];
+
+ mask = ((1L << DHT_BETA) - 1) & 0xFF;
+
+ byte >>= (CHAR_BIT - DHT_BETA) -
+ (((b->depth) * DHT_BETA) & (CHAR_BIT - 1));
+
+ return iter_bucket(b->children[(byte & mask)], id);
+}
+
+static struct bucket * __dht_kv_get_bucket(const uint8_t * id)
+{
+ assert(dht.db.contacts.root != NULL);
+
+ return iter_bucket(dht.db.contacts.root, id);
+}
+
+static void contact_list_add(struct list_head * l,
+ struct contact * c)
+{
+ struct list_head * p;
+
+ assert(l != NULL);
+ assert(c != NULL);
+
+ list_for_each(p, l) {
+ struct contact * e = list_entry(p, struct contact, next);
+ if (IS_CLOSER(e->id, c->id))
+ continue;
+ }
+
+ list_add_tail(&c->next, p);
+}
+
+static ssize_t dht_kv_contact_list(const uint8_t * key,
+ struct list_head * l,
+ size_t max)
+{
+ struct list_head * p;
+ struct bucket * b;
+ struct timespec t;
+ size_t i;
+ size_t len = 0;
+
+ assert(l != NULL);
+ assert(key != NULL);
+ assert(list_is_empty(l));
+
+ clock_gettime(CLOCK_REALTIME_COARSE, &t);
+
+ max = MIN(max, dht.k);
+
+ pthread_rwlock_rdlock(&dht.db.lock);
+
+ b = __dht_kv_get_bucket(key);
+ if (b == NULL) {
+ log_err(KEY_FMT " Failed to get bucket.", KEY_VAL(key));
+ goto fail_bucket;
+ }
+
+ b->t_refr = t.tv_sec + dht.t_refresh;
+
+ if (b->contacts.len == dht.k || b->parent == NULL) {
+ list_for_each(p, &b->contacts.list) {
+ struct contact * c;
+ struct contact * d;
+ c = list_entry(p, struct contact, next);
+ if (c->addr == dht.addr)
+ continue;
+ d = contact_create(c->id, c->addr);
+ if (d == NULL)
+ continue;
+ contact_list_add(l, d);
+ if (++len == max)
+ break;
+ }
+ } else {
+ struct bucket * d = b->parent;
+ for (i = 0; i < (1L << DHT_BETA) && len < dht.k; ++i) {
+ list_for_each(p, &d->children[i]->contacts.list) {
+ struct contact * c;
+ struct contact * d;
+ c = list_entry(p, struct contact, next);
+ if (c->addr == dht.addr)
+ continue;
+ d = contact_create(c->id, c->addr);
+ if (d == NULL)
+ continue;
+ contact_list_add(l, d);
+ if (++len == max)
+ break;
+ }
+ }
+ }
+
+ pthread_rwlock_unlock(&dht.db.lock);
+
+ return len;
+ fail_bucket:
+ pthread_rwlock_unlock(&dht.db.lock);
+ return -1;
+}
+
+static void contact_list_destroy(struct list_head * l)
+{
+ struct list_head * p;
+ struct list_head * h;
+
+ assert(l != NULL);
+
+ list_for_each_safe(p, h, l) {
+ struct contact * c = list_entry(p, struct contact, next);
+ list_del(&c->next);
+ contact_destroy(c);
+ }
+}
+
+static ssize_t dht_kv_get_contacts(const uint8_t * key,
+ dht_contact_msg_t *** msgs)
+{
+ struct list_head cl;
+ struct list_head * p;
+ struct list_head * h;
+ size_t len;
+ size_t i = 0;
+
+ assert(key != NULL);
+ assert(msgs != NULL);
+
+ list_head_init(&cl);
+
+ len = dht_kv_contact_list(key, &cl, dht.k);
+ if (len == 0) {
+ *msgs = NULL;
+ return 0;
+ }
+
+ *msgs = malloc(len * sizeof(**msgs));
+ if (*msgs == NULL)
+ goto fail_msgs;
+
+ list_for_each_safe(p, h, &cl) {
+ struct contact * c;
+ (*msgs)[i] = malloc(sizeof(***msgs));
+ if ((*msgs)[i] == NULL)
+ goto fail_contact;
+
+ dht_contact_msg__init((*msgs)[i]);
+ c = list_entry(p, struct contact, next);
+ list_del(&c->next);
+ (*msgs)[i]->id.data = c->id;
+ (*msgs)[i]->id.len = dht.id.len;
+ (*msgs)[i++]->addr = c->addr;
+ free(c);
+ }
+
+ return i;
+ fail_contact:
+ while (i-- > 0)
+ dht_contact_msg__free_unpacked((*msgs)[i], NULL);
+ free(*msgs);
+ *msgs = NULL;
+ fail_msgs:
+ contact_list_destroy(&cl);
+ return -ENOMEM;
+}
+
+/* Build a refresh list. */
+static void __dht_kv_bucket_refresh_list(struct bucket * b,
+ time_t t,
+ struct list_head * r)
+{
+ struct contact * c;
+ struct contact * d;
+
+ assert(b != NULL);
+
+ if (t < b->t_refr)
+ return;
+
+ if (*b->children != NULL) {
+ size_t i;
+ for (i = 0; i < (1L << DHT_BETA); ++i)
+ __dht_kv_bucket_refresh_list(b->children[i], t, r);
+ }
+
+ if (b->contacts.len == 0)
+ return;
+
+ c = list_first_entry(&b->contacts.list, struct contact, next);
+ if (t > c->t_seen + dht.t_refresh) {
+ d = contact_create(c->id, c->addr);
+ if (d != NULL)
+ list_add(&d->next, r);
+ }
+}
+
+static struct bucket * bucket_create(void)
+{
+ struct bucket * b;
+ struct timespec t;
+ size_t i;
+
+ b = malloc(sizeof(*b));
+ if (b == NULL)
+ return NULL;
+
+ list_head_init(&b->contacts.list);
+ b->contacts.len = 0;
+
+ list_head_init(&b->alts.list);
+ b->alts.len = 0;
+
+ clock_gettime(CLOCK_REALTIME_COARSE, &t);
+ b->t_refr = t.tv_sec + dht.t_refresh;
+
+ for (i = 0; i < (1L << DHT_BETA); ++i)
+ b->children[i] = NULL;
+
+ b->parent = NULL;
+ b->depth = 0;
+ b->mask = 0;
+
+ return b;
+}
+
+static void bucket_destroy(struct bucket * b)
+{
+ struct list_head * p;
+ struct list_head * h;
+ size_t i;
+
+ assert(b != NULL);
+
+ for (i = 0; i < (1L << DHT_BETA); ++i)
+ if (b->children[i] != NULL)
+ bucket_destroy(b->children[i]);
+
+ list_for_each_safe(p, h, &b->contacts.list) {
+ struct contact * c = list_entry(p, struct contact, next);
+ list_del(&c->next);
+ contact_destroy(c);
+ --b->contacts.len;
+ }
+
+ list_for_each_safe(p, h, &b->alts.list) {
+ struct contact * c = list_entry(p, struct contact, next);
+ list_del(&c->next);
+ contact_destroy(c);
+ --b->alts.len;
+ }
+
+ free(b);
+}
+
+static bool bucket_has_id(struct bucket * b,
+ const uint8_t * id)
+{
+ uint8_t mask;
+ uint8_t byte;
+
+ if (b->depth == 0)
+ return true;
+
+ byte = id[(b->depth * DHT_BETA) / CHAR_BIT];
+
+ mask = ((1L << DHT_BETA) - 1) & 0xFF;
+
+ byte >>= (CHAR_BIT - DHT_BETA) -
+ (((b->depth - 1) * DHT_BETA) & (CHAR_BIT - 1));
+
+ return ((byte & mask) == b->mask);
+}
+
+static int move_contacts(struct bucket * b,
+ struct bucket * c)
+{
+ struct list_head * p;
+ struct list_head * h;
+ struct contact * d;
+
+ assert(b != NULL);
+ assert(c != NULL);
+
+ list_for_each_safe(p, h, &b->contacts.list) {
+ d = list_entry(p, struct contact, next);
+ if (bucket_has_id(c, d->id)) {
+ list_del(&d->next);
+ --b->contacts.len;
+ list_add_tail(&d->next, &c->contacts.list);
+ ++c->contacts.len;
+ }
+ }
+
+ return 0;
+}
+
+static int split_bucket(struct bucket * b)
+{
+ uint8_t mask = 0;
+ size_t i;
+ size_t b_len;
+
+ assert(b);
+ assert(b->alts.len == 0);
+ assert(b->contacts.len != 0);
+ assert(b->children[0] == NULL);
+
+ b_len = b->contacts.len;
+
+ for (i = 0; i < (1L << DHT_BETA); ++i) {
+ b->children[i] = bucket_create();
+ if (b->children[i] == NULL)
+ goto fail_child;
+
+ b->children[i]->depth = b->depth + 1;
+ b->children[i]->mask = mask;
+ b->children[i]->parent = b;
+
+ move_contacts(b, b->children[i]);
+
+ mask++;
+ }
+
+ for (i = 0; i < (1L << DHT_BETA); ++i)
+ if (b->children[i]->contacts.len == b_len)
+ split_bucket(b->children[i]);
+
+ return 0;
+ fail_child:
+ while (i-- > 0)
+ bucket_destroy(b->children[i]);
+ return -1;
+}
+
+static int dht_kv_update_contacts(const uint8_t * id,
+ uint64_t addr)
+{
+ struct list_head * p;
+ struct list_head * h;
+ struct bucket * b;
+ struct contact * c;
+
+ assert(id != NULL);
+ assert(addr != INVALID_ADDR);
+
+ pthread_rwlock_wrlock(&dht.db.lock);
+
+ b = __dht_kv_get_bucket(id);
+ if (b == NULL) {
+ log_err(PEER_FMT " Failed to get bucket.", PEER_VAL(id, addr));
+ goto fail_update;
+ }
+
+ c = contact_create(id, addr);
+ if (c == NULL) {
+ log_err(PEER_FMT " Failed to create contact.",
+ PEER_VAL(id, addr));
+ goto fail_update;
+ }
+
+ list_for_each_safe(p, h, &b->contacts.list) {
+ struct contact * d = list_entry(p, struct contact, next);
+ if (d->addr == addr) {
+ list_del(&d->next);
+ contact_destroy(d);
+ --b->contacts.len;
+ }
+ }
+
+ if (b->contacts.len == dht.k) {
+ if (bucket_has_id(b, dht.id.data)) {
+ list_add_tail(&c->next, &b->contacts.list);
+ ++b->contacts.len;
+ if (split_bucket(b)) {
+ list_del(&c->next);
+ contact_destroy(c);
+ --b->contacts.len;
+ }
+ } else if (b->alts.len == dht.k) {
+ struct contact * d;
+ d = list_first_entry(&b->alts.list,
+ struct contact, next);
+ list_del(&d->next);
+ contact_destroy(d);
+ list_add_tail(&c->next, &b->alts.list);
+ ++b->alts.len;
+ } else {
+ list_add_tail(&c->next, &b->alts.list);
+ ++b->alts.len;
+ }
+ } else {
+ list_add_tail(&c->next, &b->contacts.list);
+ ++b->contacts.len;
+ }
+
+ pthread_rwlock_unlock(&dht.db.lock);
+
+ return 0;
+ fail_update:
+ pthread_rwlock_unlock(&dht.db.lock);
+ return -1;
+}
+
+static time_t gcd(time_t a,
+ time_t b)
+{
+ if (a == 0)
+ return b;
+
+ return gcd(b % a, a);
+}
+
+static dht_contact_msg_t * dht_kv_src_contact_msg(void)
+{
+ dht_contact_msg_t * src;
+
+ src = malloc(sizeof(*src));
+ if (src == NULL)
+ goto fail_malloc;
+
+ dht_contact_msg__init(src);
+
+ src->id.data = dht_dup_key(dht.id.data);
+ if (src->id.data == NULL)
+ goto fail_id;
+
+ src->id.len = dht.id.len;
+ src->addr = dht.addr;
+
+ return src;
+ fail_id:
+ dht_contact_msg__free_unpacked(src, NULL);
+ fail_malloc:
+ return NULL;
+}
+
+static dht_msg_t * dht_kv_find_req_msg(const uint8_t * key,
+ enum dht_code code)
+{
+ dht_msg_t * msg;
+
+ assert(key != NULL);
+
+ msg = malloc(sizeof(*msg));
+ if (msg == NULL)
+ goto fail_malloc;
+
+ dht_msg__init(msg);
+ msg->code = code;
+
+ msg->src = dht_kv_src_contact_msg();
+ if (msg->src == NULL)
+ goto fail_msg;
+
+ msg->find = malloc(sizeof(*msg->find));
+ if (msg->find == NULL)
+ goto fail_msg;
+
+ dht_find_req_msg__init(msg->find);
+
+ msg->find->key.data = dht_dup_key(key);
+ if (msg->find->key.data == NULL)
+ goto fail_msg;
+
+ msg->find->key.len = dht.id.len;
+ msg->find->cookie = DHT_INVALID;
+
+ return msg;
+
+ fail_msg:
+ dht_msg__free_unpacked(msg, NULL);
+ fail_malloc:
+ return NULL;
+}
+
+static dht_msg_t * dht_kv_find_node_req_msg(const uint8_t * key)
+{
+ return dht_kv_find_req_msg(key, DHT_FIND_NODE_REQ);
+}
+
+static dht_msg_t * dht_kv_find_value_req_msg(const uint8_t * key)
+{
+ return dht_kv_find_req_msg(key, DHT_FIND_VALUE_REQ);
+}
+
+static dht_msg_t * dht_kv_find_node_rsp_msg(uint8_t * key,
+ uint64_t cookie,
+ dht_contact_msg_t *** contacts,
+ size_t len)
+{
+ dht_msg_t * msg;
+
+ msg = malloc(sizeof(*msg));
+ if (msg == NULL)
+ goto fail_malloc;
+
+ dht_msg__init(msg);
+ msg->code = DHT_FIND_NODE_RSP;
+
+ msg->src = dht_kv_src_contact_msg();
+ if (msg->src == NULL)
+ goto fail_msg;
+
+ msg->node = malloc(sizeof(*msg->node));
+ if (msg->node == NULL)
+ goto fail_msg;
+
+ dht_find_node_rsp_msg__init(msg->node);
+
+ msg->node->key.data = dht_dup_key(key);
+ if (msg->node->key.data == NULL)
+ goto fail_msg;
+
+ msg->node->cookie = cookie;
+ msg->node->key.len = dht.id.len;
+ msg->node->n_contacts = len;
+ if (len != 0) { /* Steal the ptr */
+ msg->node->contacts = *contacts;
+ *contacts = NULL;
+ }
+
+ return msg;
+
+ fail_msg:
+ dht_msg__free_unpacked(msg, NULL);
+ fail_malloc:
+ return NULL;
+}
+
+static dht_msg_t * dht_kv_find_value_rsp_msg(uint8_t * key,
+ uint64_t cookie,
+ dht_contact_msg_t *** contacts,
+ size_t n_contacts,
+ buffer_t ** vals,
+ size_t n_vals)
+{
+ dht_msg_t * msg;
+
+ msg = dht_kv_find_node_rsp_msg(key, cookie, contacts, n_contacts);
+ if (msg == NULL)
+ goto fail_node_rsp;
+
+ msg->code = DHT_FIND_VALUE_RSP;
+
+ msg->val = malloc(sizeof(*msg->val));
+ if (msg->val == NULL)
+ goto fail_msg;
+
+ dht_find_value_rsp_msg__init(msg->val);
+
+ msg->val->n_values = n_vals;
+ if (n_vals != 0) /* Steal the ptr */
+ msg->val->values = (binary_data_t *) *vals;
+
+ return msg;
+
+ fail_msg:
+ dht_msg__free_unpacked(msg, NULL);
+ fail_node_rsp:
+ return NULL;
+}
+
+static dht_msg_t * dht_kv_store_msg(const uint8_t * key,
+ const buffer_t val,
+ time_t exp)
+{
+ dht_msg_t * msg;
+
+ assert(key != NULL);
+ assert(val.data != NULL);
+ assert(val.len > 0);
+
+ msg = malloc(sizeof(*msg));
+ if (msg == NULL)
+ goto fail_malloc;
+
+ dht_msg__init(msg);
+
+ msg->code = DHT_STORE;
+
+ msg->src = dht_kv_src_contact_msg();
+ if (msg->src == NULL)
+ goto fail_msg;
+
+ msg->store = malloc(sizeof(*msg->store));
+ if (msg->store == NULL)
+ goto fail_msg;
+
+ dht_store_msg__init(msg->store);
+
+ msg->store->key.data = dht_dup_key(key);
+ if (msg->store->key.data == NULL)
+ goto fail_msg;
+
+ msg->store->key.len = dht.id.len;
+ msg->store->val.data = malloc(val.len);
+ if (msg->store->val.data == NULL)
+ goto fail_msg;
+
+ memcpy(msg->store->val.data, val.data, val.len);
+
+ msg->store->val.len = val.len;
+ msg->store->exp = exp;
+
+ return msg;
+
+ fail_msg:
+ dht_msg__free_unpacked(msg, NULL);
+ fail_malloc:
+ return NULL;
+}
+
+static ssize_t dht_kv_retrieve(const uint8_t * key,
+ buffer_t ** vals)
+{
+ struct dht_entry * e;
+ struct list_head * p;
+ size_t n;
+ size_t i;
+
+ assert(key != NULL);
+
+ pthread_rwlock_rdlock(&dht.db.lock);
+
+ e = __dht_kv_find_entry(key);
+ if (e == NULL)
+ goto no_vals;
+
+ n = MIN(DHT_MAX_VALS, e->vals.len + e->lvals.len);
+ if (n == 0)
+ goto no_vals;
+
+ *vals = malloc(n * sizeof(**vals));
+ if (*vals == NULL)
+ goto fail_vals;
+
+ memset(*vals, 0, n * sizeof(**vals));
+
+ i = 0;
+
+ list_for_each(p, &e->vals.list) {
+ struct val_entry * v;
+ if (i == n)
+ break; /* We have enough values */
+ v = list_entry(p, struct val_entry, next);
+ (*vals)[i].data = malloc(v->val.len);
+ if ((*vals)[i].data == NULL)
+ goto fail_val_data;
+
+ (*vals)[i].len = v->val.len;
+ memcpy((*vals)[i++].data, v->val.data, v->val.len);
+ }
+
+ list_for_each(p, &e->lvals.list) {
+ struct val_entry * v;
+ if (i == n)
+ break; /* We have enough values */
+ v = list_entry(p, struct val_entry, next);
+ (*vals)[i].data = malloc(v->val.len);
+ if ((*vals)[i].data == NULL)
+ goto fail_val_data;
+
+ (*vals)[i].len = v->val.len;
+ memcpy((*vals)[i++].data, v->val.data, v->val.len);
+ }
+
+ pthread_rwlock_unlock(&dht.db.lock);
+
+ return (ssize_t) i;
+
+ fail_val_data:
+ pthread_rwlock_unlock(&dht.db.lock);
+ freebufs(*vals, i);
+ *vals = NULL;
+ return -ENOMEM;
+ fail_vals:
+ pthread_rwlock_unlock(&dht.db.lock);
+ return -ENOMEM;
+ no_vals:
+ pthread_rwlock_unlock(&dht.db.lock);
+ *vals = NULL;
+ return 0;
+}
+
+static void __cleanup_dht_msg(void * msg)
+{
+ dht_msg__free_unpacked((dht_msg_t *) msg, NULL);
+}
+
+#ifdef DEBUG_PROTO_DHT
+static void dht_kv_debug_msg(dht_msg_t * msg)
+{
+ struct tm * tm;
+ char tmstr[RIB_TM_STRLEN];
+ time_t stamp;
+ size_t i;
+
+ if (msg == NULL)
+ return;
+
+ pthread_cleanup_push(__cleanup_dht_msg, msg);
+
+ switch (msg->code) {
+ case DHT_STORE:
+ log_proto(" key: " HASH_FMT64 " [%zu bytes]",
+ HASH_VAL64(msg->store->key.data),
+ msg->store->key.len);
+ log_proto(" val: " HASH_FMT64 " [%zu bytes]",
+ HASH_VAL64(msg->store->val.data),
+ msg->store->val.len);
+ stamp = msg->store->exp;
+ tm = gmtime(&stamp);
+ strftime(tmstr, sizeof(tmstr), RIB_TM_FORMAT, tm);
+ log_proto(" exp: %s.", tmstr);
+ break;
+ case DHT_FIND_NODE_REQ:
+ /* FALLTHRU */
+ case DHT_FIND_VALUE_REQ:
+ log_proto(" cookie: " HASH_FMT64,
+ HASH_VAL64(&msg->find->cookie));
+ log_proto(" key: " HASH_FMT64 " [%zu bytes]",
+ HASH_VAL64(msg->find->key.data),
+ msg->find->key.len);
+ break;
+ case DHT_FIND_VALUE_RSP:
+ log_proto(" cookie: " HASH_FMT64,
+ HASH_VAL64(&msg->node->cookie));
+ log_proto(" key: " HASH_FMT64 " [%zu bytes]",
+ HASH_VAL64(msg->node->key.data),
+ msg->node->key.len);
+ log_proto(" values: [%zd]", msg->val->n_values);
+ for (i = 0; i < msg->val->n_values; i++)
+ log_proto(" " HASH_FMT64 " [%zu bytes]",
+ HASH_VAL64(msg->val->values[i].data),
+ msg->val->values[i].len);
+ log_proto(" contacts: [%zd]", msg->node->n_contacts);
+ for (i = 0; i < msg->node->n_contacts; i++) {
+ dht_contact_msg_t * c = msg->node->contacts[i];
+ log_proto(" " PEER_FMT,
+ PEER_VAL(c->id.data, c->addr));
+ }
+ break;
+ case DHT_FIND_NODE_RSP:
+ log_proto(" cookie: " HASH_FMT64,
+ HASH_VAL64(&msg->node->cookie));
+ log_proto(" key: " HASH_FMT64 " [%zu bytes]",
+ HASH_VAL64(msg->node->key.data), msg->node->key.len);
+ log_proto(" contacts: [%zd]", msg->node->n_contacts);
+ for (i = 0; i < msg->node->n_contacts; i++) {
+ dht_contact_msg_t * c = msg->node->contacts[i];
+ log_proto(" " PEER_FMT,
+ PEER_VAL(c->id.data, c->addr));
+ }
+
+ break;
+ default:
+ break;
+ }
+
+ pthread_cleanup_pop(false);
+}
+
+static void dht_kv_debug_msg_snd(dht_msg_t * msg,
+ uint8_t * id,
+ uint64_t addr)
+{
+ if (msg == NULL)
+ return;
+
+ log_proto(TX_HDR_FMT ".", TX_HDR_VAL(msg, id, addr));
+
+ dht_kv_debug_msg(msg);
+}
+
+static void dht_kv_debug_msg_rcv(dht_msg_t * msg)
+{
+ if (msg == NULL)
+ return;
+
+ log_proto(RX_HDR_FMT ".", RX_HDR_VAL(msg));
+
+ dht_kv_debug_msg(msg);
+}
+#endif
+
+#ifndef __DHT_TEST__
+static int dht_send_msg(dht_msg_t * msg,
+ uint64_t addr)
+{
+ size_t len;
+ struct shm_du_buff * sdb;
+
+ if (msg == NULL)
+ return 0;
+
+ assert(addr != INVALID_ADDR && addr != dht.addr);
+
+ len = dht_msg__get_packed_size(msg);
+ if (len == 0) {
+ log_warn("%s failed to pack.", DHT_CODE(msg));
+ goto fail_msg;
+ }
+
+ if (ipcp_sdb_reserve(&sdb, len)) {
+ log_warn("%s failed to get sdb.", DHT_CODE(msg));
+ goto fail_msg;
+ }
+
+ dht_msg__pack(msg, shm_du_buff_head(sdb));
+
+ if (dt_write_packet(addr, QOS_CUBE_BE, dht.eid, sdb) < 0) {
+ log_warn("%s write failed", DHT_CODE(msg));
+ goto fail_send;
+ }
+
+ return 0;
+ fail_send:
+ ipcp_sdb_release(sdb);
+ fail_msg:
+ return -1;
+}
+#else /* funtion for testing */
+static int dht_send_msg(dht_msg_t * msg,
+ uint64_t addr)
+{
+ buffer_t buf;
+
+ assert(msg != NULL);
+ assert(addr != INVALID_ADDR && addr != dht.addr);
+
+ buf.len = dht_msg__get_packed_size(msg);
+ if (buf.len == 0) {
+ log_warn("%s failed to pack.", DHT_CODE(msg));
+ goto fail_msg;
+ }
+
+ buf.data = malloc(buf.len);
+ if (buf.data == NULL) {
+ log_warn("%s failed to malloc buf.", DHT_CODE(msg));
+ goto fail_msg;
+ }
+
+ dht_msg__pack(msg, buf.data);
+
+ if (sink_send_msg(&buf, addr) < 0) {
+ log_warn("%s write failed", DHT_CODE(msg));
+ goto fail_send;
+ }
+
+ return 0;
+ fail_send:
+ freebuf(buf);
+ fail_msg:
+ return -1;
+}
+#endif /* __DHT_TEST__ */
+
+static void __cleanup_peer_list(void * pl)
+{
+ struct list_head * p;
+ struct list_head * h;
+
+ assert(pl != NULL);
+
+ list_for_each_safe(p, h, (struct list_head *) pl) {
+ struct peer_entry * e = list_entry(p, struct peer_entry, next);
+ list_del(&e->next);
+ free(e->id);
+ free(e);
+ }
+}
+
+
+static int dht_kv_send_msgs(dht_msg_t * msg,
+ struct list_head * pl)
+{
+ struct list_head * p;
+ struct list_head * h;
+
+ pthread_cleanup_push(__cleanup_dht_msg, msg);
+ pthread_cleanup_push(__cleanup_peer_list, pl);
+
+ list_for_each_safe(p, h, pl) {
+ struct peer_entry * e = list_entry(p, struct peer_entry, next);
+ if (IS_REQUEST(msg->code)) {
+ msg->find->cookie = e->cookie;
+ assert(msg->find->cookie != DHT_INVALID);
+ }
+ if (dht_send_msg(msg, e->addr) < 0)
+ continue;
+
+#ifdef DEBUG_PROTO_DHT
+ dht_kv_debug_msg_snd(msg, e->id, e->addr);
+#endif
+ list_del(&e->next);
+ free(e->id);
+ free(e);
+ }
+
+ pthread_cleanup_pop(false);
+ pthread_cleanup_pop(false);
+
+ return list_is_empty(pl) ? 0 : -1;
+}
+
+static int dht_kv_get_peer_list_for_msg(dht_msg_t * msg,
+ struct list_head * pl)
+{
+ struct list_head cl; /* contact list */
+ uint8_t * key; /* key in the request */
+ size_t max;
+
+ assert(msg != NULL);
+
+ assert(list_is_empty(pl));
+
+ max = msg->code == DHT_STORE ? dht.k : dht.alpha;
+
+ switch (msg->code) {
+ case DHT_FIND_NODE_REQ:
+ /* FALLTHRU */
+ case DHT_FIND_VALUE_REQ:
+ key = msg->find->key.data;
+ break;
+ case DHT_STORE:
+ key = msg->store->key.data;
+ break;
+ default:
+ log_err("Invalid DHT msg code (%d).", msg->code);
+ return -1;
+ }
+
+ list_head_init(&cl);
+
+ if (dht_kv_contact_list(key, &cl, max) < 0) {
+ log_err(KEY_FMT " Failed to get contact list.", KEY_VAL(key));
+ goto fail_contacts;
+ }
+
+ if (list_is_empty(&cl)) {
+ log_warn(KEY_FMT " No available contacts.", KEY_VAL(key));
+ goto fail_contacts;
+ }
+
+ if (dht_kv_create_peer_list(&cl, pl, msg->code) < 0) {
+ log_warn(KEY_FMT " Failed to get peer list.", KEY_VAL(key));
+ goto fail_peers;
+ }
+
+ contact_list_destroy(&cl);
+ return 0;
+ fail_peers:
+ contact_list_destroy(&cl);
+ fail_contacts:
+ return -1;
+}
+
+static int dht_kv_store_remote(const uint8_t * key,
+ const buffer_t val,
+ time_t exp)
+{
+ dht_msg_t * msg;
+ struct timespec now;
+ struct list_head pl;
+
+ assert(key != NULL);
+ assert(val.data != NULL);
+ assert(val.len > 0);
+
+ clock_gettime(CLOCK_REALTIME_COARSE, &now);
+
+ msg = dht_kv_store_msg(key, val, exp);
+ if (msg == NULL) {
+ log_err(KV_FMT " Failed to create %s.",
+ KV_VAL(key, val), dht_code_str[DHT_STORE]);
+ goto fail_msg;
+ }
+
+ list_head_init(&pl);
+
+ if (dht_kv_get_peer_list_for_msg(msg, &pl) < 0) {
+ log_dbg(KV_FMT " Failed to get peer list.", KV_VAL(key, val));
+ goto fail_peer_list;
+ }
+
+ if (dht_kv_send_msgs(msg, &pl) < 0) {
+ log_warn(KV_FMT " Failed to send any %s msg.",
+ KV_VAL(key, val), DHT_CODE(msg));
+ goto fail_msgs;
+ }
+
+ dht_msg__free_unpacked(msg, NULL);
+
+ return 0;
+ fail_msgs:
+ peer_list_destroy(&pl);
+ fail_peer_list:
+ dht_msg__free_unpacked(msg, NULL);
+ fail_msg:
+ return -1;
+}
+
+/* recursive lookup, start with pl NULL */
+static int dht_kv_query_contacts(const uint8_t * key,
+ struct list_head * pl)
+{
+ struct list_head p;
+
+ dht_msg_t * msg;
+
+ assert(key != NULL);
+
+ msg = dht_kv_find_node_req_msg(key);
+ if (msg == NULL) {
+ log_err(KEY_FMT " Failed to create %s msg.",
+ KEY_VAL(key), dht_code_str[DHT_FIND_NODE_REQ]);
+ goto fail_msg;
+ }
+
+ if (pl == NULL) {
+ list_head_init(&p);
+ pl = &p;
+ }
+
+ if (list_is_empty(pl) && dht_kv_get_peer_list_for_msg(msg, pl) < 0) {
+ log_warn(KEY_FMT " Failed to get peer list.", KEY_VAL(key));
+ goto fail_peer_list;
+ }
+
+ if (dht_kv_update_req(key, pl) < 0) {
+ log_warn(KEY_FMT " Failed to update req.", KEY_VAL(key));
+ goto fail_update;
+ }
+
+ if (dht_kv_send_msgs(msg, pl)) {
+ log_warn(KEY_FMT " Failed to send any %s msg.",
+ KEY_VAL(key), DHT_CODE(msg));
+ goto fail_update;
+ }
+
+ dht_msg__free_unpacked(msg, NULL);
+
+ return 0;
+ fail_update:
+ peer_list_destroy(pl);
+ fail_peer_list:
+ dht_msg__free_unpacked(msg, NULL);
+ fail_msg:
+ return -1;
+}
+
+/* recursive lookup, start with pl NULL */
+static ssize_t dht_kv_query_remote(const uint8_t * key,
+ buffer_t ** vals,
+ struct list_head * pl)
+{
+ struct list_head p;
+ dht_msg_t * msg;
+
+ assert(key != NULL);
+
+ msg = dht_kv_find_value_req_msg(key);
+ if (msg == NULL) {
+ log_err(KEY_FMT " Failed to create value req.", KEY_VAL(key));
+ goto fail_msg;
+ }
+
+ if (pl == NULL) {
+ list_head_init(&p);
+ pl = &p;
+ }
+
+ if (list_is_empty(pl) && dht_kv_get_peer_list_for_msg(msg, pl) < 0) {
+ log_warn(KEY_FMT " Failed to get peer list.", KEY_VAL(key));
+ goto fail_peer_list;
+ }
+
+ if (dht_kv_update_req(key, pl) < 0) {
+ log_err(KEY_FMT " Failed to update request.", KEY_VAL(key));
+ goto fail_update;
+ }
+
+ if (dht_kv_send_msgs(msg, pl)) {
+ log_warn(KEY_FMT " Failed to send %s msg.",
+ KEY_VAL(key), DHT_CODE(msg));
+ goto fail_update;
+ }
+
+ dht_msg__free_unpacked(msg, NULL);
+
+ if (vals == NULL) /* recursive lookup, already waiting */
+ return 0;
+
+ return dht_kv_wait_req(key, vals);
+ fail_update:
+ peer_list_destroy(pl);
+ fail_peer_list:
+ dht_msg__free_unpacked(msg, NULL);
+ fail_msg:
+ return -1;
+}
+
+static void __add_dht_kv_entry(struct dht_entry * e)
+{
+ struct list_head * p;
+
+ assert(e != NULL);
+
+ list_for_each(p, &dht.db.kv.list) {
+ struct dht_entry * d = list_entry(p, struct dht_entry, next);
+ if (IS_CLOSER(d->key, e->key))
+ continue;
+ break;
+ }
+
+ list_add_tail(&e->next, p);
+ ++dht.db.kv.len;
+}
+
+/* incoming store message */
+static int dht_kv_store(const uint8_t * key,
+ const buffer_t val,
+ time_t exp)
+{
+ struct dht_entry * e;
+ bool new = false;
+
+ assert(key != NULL);
+ assert(val.data != NULL);
+ assert(val.len > 0);
+
+ pthread_rwlock_wrlock(&dht.db.lock);
+
+ e = __dht_kv_find_entry(key);
+ if (e == NULL) {
+ log_dbg(KV_FMT " Adding entry (store).", KV_VAL(key, val));
+ e = dht_entry_create(key);
+ if (e == NULL)
+ goto fail;
+
+ new = true;
+
+ __add_dht_kv_entry(e);
+ }
+
+ if (dht_entry_update_val(e, val, exp) < 0)
+ goto fail_add;
+
+ pthread_rwlock_unlock(&dht.db.lock);
+
+ return 0;
+ fail_add:
+ if (new) {
+ list_del(&e->next);
+ dht_entry_destroy(e);
+ --dht.db.kv.len;
+ }
+ fail:
+ pthread_rwlock_unlock(&dht.db.lock);
+ return -1;
+}
+
+static int dht_kv_publish(const uint8_t * key,
+ const buffer_t val)
+{
+ struct dht_entry * e;
+ struct timespec now;
+ bool new = false;
+
+ assert(key != NULL);
+ assert(val.data != NULL);
+ assert(val.len > 0);
+
+ clock_gettime(CLOCK_REALTIME_COARSE, &now);
+
+ pthread_rwlock_wrlock(&dht.db.lock);
+
+ e = __dht_kv_find_entry(key);
+ if (e == NULL) {
+ log_dbg(KV_FMT " Adding entry (publish).", KV_VAL(key, val));
+ e = dht_entry_create(key);
+ if (e == NULL)
+ goto fail;
+
+ __add_dht_kv_entry(e);
+ new = true;
+ }
+
+ if (dht_entry_update_lval(e, val) < 0)
+ goto fail_add;
+
+ pthread_rwlock_unlock(&dht.db.lock);
+
+ dht_kv_store_remote(key, val, now.tv_sec + dht.t_expire);
+
+ return 0;
+ fail_add:
+ if (new) {
+ list_del(&e->next);
+ dht_entry_destroy(e);
+ --dht.db.kv.len;
+ }
+ fail:
+ pthread_rwlock_unlock(&dht.db.lock);
+ return -1;
+}
+
+static int dht_kv_unpublish(const uint8_t * key,
+ const buffer_t val)
+{
+ struct dht_entry * e;
+ int rc;
+
+ assert(key != NULL);
+
+ pthread_rwlock_wrlock(&dht.db.lock);
+
+ e = __dht_kv_find_entry(key);
+ if (e == NULL)
+ goto no_entry;
+
+ rc = dht_entry_remove_lval(e, val);
+
+ pthread_rwlock_unlock(&dht.db.lock);
+
+ return rc;
+ no_entry:
+ pthread_rwlock_unlock(&dht.db.lock);
+ return -ENOENT;
+
+}
+
+/* message validation */
+static int dht_kv_validate_store_msg(const dht_store_msg_t * store)
+{
+ if (store == NULL) {
+ log_warn("Store in msg is NULL.");
+ return -EINVAL;
+ }
+
+ if (store->key.data == NULL || store->key.len == 0) {
+ log_warn("Invalid key in DHT store msg.");
+ return -EINVAL;
+ }
+
+ if (store->key.len != dht.id.len) {
+ log_warn("Invalid key length in DHT store msg.");
+ return -EINVAL;
+ }
+
+ if (store->val.data == NULL || store->val.len == 0) {
+ log_warn("Invalid value in DHT store msg.");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int validate_find_req_msg(const dht_find_req_msg_t * req)
+{
+ if (req == NULL) {
+ log_warn("Request in msg is NULL.");
+ return -EINVAL;
+ }
+
+ if (req->key.data == NULL || req->key.len == 0) {
+ log_warn("Find request without key.");
+ return -EINVAL;
+ }
+
+ if (req->key.len != dht.id.len) {
+ log_warn("Invalid key length in request msg.");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int validate_node_rsp_msg(const dht_find_node_rsp_msg_t * rsp)
+{
+ if (rsp == NULL) {
+ log_warn("Node rsp in msg is NULL.");
+ return -EINVAL;
+ }
+
+ if (rsp->key.data == NULL) {
+ log_warn("Invalid key in DHT response msg.");
+ return -EINVAL;
+ }
+
+ if (rsp->key.len != dht.id.len) {
+ log_warn("Invalid key length in DHT response msg.");
+ return -EINVAL;
+ }
+
+ if (!dht_kv_has_req(rsp->key.data, rsp->cookie)) {
+ log_warn(KEY_FMT " No request " CK_FMT ".",
+ KEY_VAL(rsp->key.data), CK_VAL(rsp->cookie));
+
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int validate_value_rsp_msg(const dht_find_value_rsp_msg_t * rsp)
+{
+ if (rsp == NULL) {
+ log_warn("Invalid DHT find value response msg.");
+ return -EINVAL;
+ }
+
+ if (rsp->values == NULL && rsp->n_values > 0) {
+ log_dbg("No values in DHT response msg.");
+ return 0;
+ }
+
+ if (rsp->n_values == 0 && rsp->values != NULL) {
+ log_dbg("DHT response did not set values NULL.");
+ return 0;
+ }
+
+ return 0;
+}
+
+static int dht_kv_validate_msg(dht_msg_t * msg)
+{
+
+ assert(msg != NULL);
+
+ if (msg->src->id.len != dht.id.len) {
+ log_warn("%s Invalid source contact ID.", DHT_CODE(msg));
+ return -EINVAL;
+ }
+
+ if (msg->src->addr == INVALID_ADDR) {
+ log_warn("%s Invalid source address.", DHT_CODE(msg));
+ return -EINVAL;
+ }
+
+ switch (msg->code) {
+ case DHT_FIND_VALUE_REQ:
+ /* FALLTHRU */
+ case DHT_FIND_NODE_REQ:
+ if (validate_find_req_msg(msg->find) < 0)
+ return -EINVAL;
+ break;
+ case DHT_FIND_VALUE_RSP:
+ if (validate_value_rsp_msg(msg->val) < 0)
+ return -EINVAL;
+ /* FALLTHRU */
+ case DHT_FIND_NODE_RSP:
+ if (validate_node_rsp_msg(msg->node) < 0)
+ return -EINVAL;
+ break;
+ case DHT_STORE:
+ if (dht_kv_validate_store_msg(msg->store) < 0)
+ return -EINVAL;
+ break;
+ default:
+ log_warn("Invalid DHT msg code (%d).", msg->code);
+ return -ENOENT;
+ }
+
+ return 0;
+}
+
+static void do_dht_kv_store(const dht_store_msg_t * store)
+{
+ struct tm * tm;
+ char tmstr[RIB_TM_STRLEN];
+ buffer_t val;
+ uint8_t * key;
+ time_t exp;
+
+ assert(store != NULL);
+
+ val.data = store->val.data;
+ val.len = store->val.len;
+ key = store->key.data;
+ exp = store->exp;
+
+ if (dht_kv_store(store->key.data, val, store->exp) < 0) {
+ log_err(KV_FMT " Failed to store.", KV_VAL(key, val));
+ return;
+ }
+
+ tm = gmtime(&exp);
+ strftime(tmstr, sizeof(tmstr), RIB_TM_FORMAT, tm);
+ log_dbg(KV_FMT " Stored value until %s.", KV_VAL(key, val), tmstr);
+}
+
+static dht_msg_t * do_dht_kv_find_node_req(const dht_find_req_msg_t * req)
+{
+ dht_contact_msg_t ** contacts;
+ dht_msg_t * rsp;
+ uint8_t * key;
+ uint64_t cookie;
+ ssize_t len;
+
+ assert(req != NULL);
+
+ key = req->key.data;
+ cookie = req->cookie;
+
+ len = dht_kv_get_contacts(key, &contacts);
+ if (len < 0) {
+ log_warn(KEY_FMT " Failed to get contacts.", KEY_VAL(key));
+ goto fail_contacts;
+ }
+
+ rsp = dht_kv_find_node_rsp_msg(key, cookie, &contacts, len);
+ if (rsp == NULL) {
+ log_err(KEY_FMT " Failed to create %s.", KEY_VAL(key),
+ dht_code_str[DHT_FIND_NODE_RSP]);
+ goto fail_msg;
+ }
+
+ assert(rsp->code == DHT_FIND_NODE_RSP);
+
+ log_info(KEY_FMT " Responding with %zd contacts", KEY_VAL(key), len);
+
+ return rsp;
+ fail_msg:
+ while (len-- > 0)
+ dht_contact_msg__free_unpacked(contacts[len], NULL);
+ free(contacts);
+ fail_contacts:
+ return NULL;
+}
+
+static void dht_kv_process_node_rsp(dht_contact_msg_t ** contacts,
+ size_t len,
+ struct list_head * pl,
+ enum dht_code code)
+{
+ struct timespec now;
+ size_t i;
+
+ assert(contacts != NULL);
+ assert(len > 0);
+ assert(pl != NULL);
+ assert(list_is_empty(pl));
+
+ clock_gettime(CLOCK_REALTIME_COARSE, &now);
+
+ for (i = 0; i < len; i++) {
+ dht_contact_msg_t * c = contacts[i];
+ struct peer_entry * e;
+ if (c->addr == dht.addr)
+ continue;
+
+ if (dht_kv_update_contacts(c->id.data, c->addr) < 0)
+ log_warn(PEER_FMT " Failed to update contacts.",
+ PEER_VAL(c->id.data, c->addr));
+
+ e = malloc(sizeof(*e));
+ if (e == NULL) {
+ log_err(PEER_FMT " Failed to malloc entry.",
+ PEER_VAL(c->id.data, c->addr));
+ continue;
+ }
+
+ e->id = dht_dup_key(c->id.data);
+ if (e->id == NULL) {
+ log_warn(PEER_FMT " Failed to duplicate id.",
+ PEER_VAL(c->id.data, c->addr));
+ free(e);
+ continue;
+ }
+
+ e->cookie = generate_cookie();
+ e->code = code;
+ e->addr = c->addr;
+ e->t_sent = now.tv_sec;
+
+ list_add_tail(&e->next, pl);
+ }
+}
+
+static dht_msg_t * do_dht_kv_find_value_req(const dht_find_req_msg_t * req)
+{
+ dht_contact_msg_t ** contacts;
+ ssize_t n_contacts;
+ buffer_t * vals;
+ ssize_t n_vals;
+ dht_msg_t * rsp;
+ uint8_t * key;
+ uint64_t cookie;
+
+ assert(req != NULL);
+
+ key = req->key.data;
+ cookie = req->cookie;
+
+ n_contacts = dht_kv_get_contacts(key, &contacts);
+ if (n_contacts < 0) {
+ log_warn(KEY_FMT " Failed to get contacts.", KEY_VAL(key));
+ goto fail_contacts;
+ }
+
+ assert(n_contacts > 0 || contacts == NULL);
+
+ n_vals = dht_kv_retrieve(key, &vals);
+ if (n_vals < 0) {
+ log_dbg(KEY_FMT " Failed to get values.", KEY_VAL(key));
+ goto fail_vals;
+ }
+
+ if (n_vals == 0)
+ log_dbg(KEY_FMT " No values found.", KEY_VAL(key));
+
+ rsp = dht_kv_find_value_rsp_msg(key, cookie, &contacts, n_contacts,
+ &vals, n_vals);
+ if (rsp == NULL) {
+ log_err(KEY_FMT " Failed to create %s msg.",
+ KEY_VAL(key), dht_code_str[DHT_FIND_VALUE_RSP]);
+ goto fail_msg;
+ }
+
+ log_info(KEY_FMT " Responding with %zd contacts, %zd values.",
+ KEY_VAL(req->key.data), n_contacts, n_vals);
+
+ return rsp;
+
+ fail_msg:
+ freebufs(vals, n_vals);
+ fail_vals:
+ while (n_contacts-- > 0)
+ dht_contact_msg__free_unpacked(contacts[n_contacts], NULL);
+ free(contacts);
+ fail_contacts:
+ return NULL;
+}
+
+static void do_dht_kv_find_node_rsp(const dht_find_node_rsp_msg_t * rsp)
+{
+ struct list_head pl;
+
+ assert(rsp != NULL);
+
+ list_head_init(&pl);
+
+ dht_kv_process_node_rsp(rsp->contacts, rsp->n_contacts, &pl,
+ DHT_FIND_NODE_REQ);
+
+ if (list_is_empty(&pl))
+ goto no_contacts;
+
+ if (dht_kv_update_req(rsp->key.data, &pl) < 0) {
+ log_err(KEY_FMT " Failed to update request.",
+ KEY_VAL(rsp->key.data));
+ goto fail_update;
+ }
+
+ dht_kv_query_contacts(rsp->key.data, &pl);
+
+ return;
+
+ fail_update:
+ peer_list_destroy(&pl);
+ no_contacts:
+ return;
+}
+
+static void do_dht_kv_find_value_rsp(const dht_find_node_rsp_msg_t * node,
+ const dht_find_value_rsp_msg_t * val)
+{
+ struct list_head pl;
+ uint8_t * key;
+
+ assert(node != NULL);
+ assert(val != NULL);
+
+ list_head_init(&pl);
+
+ key = node->key.data;
+
+ dht_kv_process_node_rsp(node->contacts, node->n_contacts, &pl,
+ DHT_FIND_VALUE_REQ);
+
+ if (val->n_values > 0) {
+ log_dbg(KEY_FMT " %zd new values received.",
+ KEY_VAL(key), val->n_values);
+ dht_kv_respond_req(key, val->values, val->n_values);
+ peer_list_destroy(&pl);
+ return; /* done! */
+ }
+
+ if (list_is_empty(&pl))
+ goto no_contacts;
+
+ if (dht_kv_update_req(key, &pl) < 0) {
+ log_err(KEY_FMT " Failed to update request.", KEY_VAL(key));
+ goto fail_update;
+ }
+
+ dht_kv_query_remote(key, NULL, &pl);
+
+ return;
+ fail_update:
+ peer_list_destroy(&pl);
+ no_contacts:
+ return;
+}
+
+static dht_msg_t * dht_wait_for_dht_msg(void)
+{
+ dht_msg_t * msg;
+ struct cmd * cmd;
+
+ pthread_mutex_lock(&dht.cmds.mtx);
+
+ pthread_cleanup_push(__cleanup_mutex_unlock, &dht.cmds.mtx);
+
+ while (list_is_empty(&dht.cmds.list))
+ pthread_cond_wait(&dht.cmds.cond, &dht.cmds.mtx);
+
+ cmd = list_last_entry(&dht.cmds.list, struct cmd, next);
+ list_del(&cmd->next);
+
+ pthread_cleanup_pop(true);
+
+ msg = dht_msg__unpack(NULL, cmd->cbuf.len, cmd->cbuf.data);
+ if (msg == NULL)
+ log_warn("Failed to unpack DHT msg.");
+
+ freebuf(cmd->cbuf);
+ free(cmd);
+
+ return msg;
+}
+
+static void do_dht_msg(dht_msg_t * msg)
+{
+ dht_msg_t * rsp = NULL;
+ uint8_t * id;
+ uint64_t addr;
+
+#ifdef DEBUG_PROTO_DHT
+ dht_kv_debug_msg_rcv(msg);
+#endif
+ if (dht_kv_validate_msg(msg) == -EINVAL) {
+ log_warn("%s Validation failed.", DHT_CODE(msg));
+ dht_msg__free_unpacked(msg, NULL);
+ return;
+ }
+
+ id = msg->src->id.data;
+ addr = msg->src->addr;
+
+ if (dht_kv_update_contacts(id, addr) < 0)
+ log_warn(PEER_FMT " Failed to update contact from msg src.",
+ PEER_VAL(id, addr));
+
+ pthread_cleanup_push(__cleanup_dht_msg, msg);
+
+ switch(msg->code) {
+ case DHT_FIND_VALUE_REQ:
+ rsp = do_dht_kv_find_value_req(msg->find);
+ break;
+ case DHT_FIND_NODE_REQ:
+ rsp = do_dht_kv_find_node_req(msg->find);
+ break;
+ case DHT_STORE:
+ do_dht_kv_store(msg->store);
+ break;
+ case DHT_FIND_NODE_RSP:
+ do_dht_kv_find_node_rsp(msg->node);
+ break;
+ case DHT_FIND_VALUE_RSP:
+ do_dht_kv_find_value_rsp(msg->node, msg->val);
+ break;
+ default:
+ assert(false); /* already validated */
+ }
+
+ pthread_cleanup_pop(true);
+
+ if (rsp == NULL)
+ return;
+
+ pthread_cleanup_push(__cleanup_dht_msg, rsp);
+
+ dht_send_msg(rsp, addr);
+
+ pthread_cleanup_pop(true); /* free rsp */
+}
+
+static void * dht_handle_packet(void * o)
+{
+ (void) o;
+
+ while (true) {
+ dht_msg_t * msg;
+
+ msg = dht_wait_for_dht_msg();
+ if (msg == NULL)
+ continue;
+
+ tpm_begin_work(dht.tpm);
+
+ do_dht_msg(msg);
+
+ tpm_end_work(dht.tpm);
+ }
+
+ return (void *) 0;
+}
+#ifndef __DHT_TEST__
+static void dht_post_packet(void * comp,
+ struct shm_du_buff * sdb)
+{
+ struct cmd * cmd;
+
+ (void) comp;
+
+ cmd = malloc(sizeof(*cmd));
+ if (cmd == NULL) {
+ log_err("Command malloc failed.");
+ goto fail_cmd;
+ }
+
+ cmd->cbuf.data = malloc(shm_du_buff_len(sdb));
+ if (cmd->cbuf.data == NULL) {
+ log_err("Command buffer malloc failed.");
+ goto fail_buf;
+ }
+
+ cmd->cbuf.len = shm_du_buff_len(sdb);
+
+ memcpy(cmd->cbuf.data, shm_du_buff_head(sdb), cmd->cbuf.len);
+
+ ipcp_sdb_release(sdb);
+
+ pthread_mutex_lock(&dht.cmds.mtx);
+
+ list_add(&cmd->next, &dht.cmds.list);
+
+ pthread_cond_signal(&dht.cmds.cond);
+
+ pthread_mutex_unlock(&dht.cmds.mtx);
+
+ return;
+
+ fail_buf:
+ free(cmd);
+ fail_cmd:
+ ipcp_sdb_release(sdb);
+ return;
+}
+#endif
+
+int dht_reg(const uint8_t * key)
+{
+ buffer_t val;
+
+ if (addr_to_buf(dht.addr, &val) < 0) {
+ log_err("Failed to convert address to buffer.");
+ goto fail_a2b;
+ }
+
+ if (dht_kv_publish(key, val)) {
+ log_err(KV_FMT " Failed to publish.", KV_VAL(key, val));
+ goto fail_publish;
+ }
+
+ freebuf(val);
+
+ return 0;
+ fail_publish:
+ freebuf(val);
+ fail_a2b:
+ return -1;
+}
+
+int dht_unreg(const uint8_t * key)
+{
+ buffer_t val;
+
+ if (addr_to_buf(dht.addr, &val) < 0) {
+ log_err("Failed to convert address to buffer.");
+ goto fail_a2b;
+ }
+
+ if (dht_kv_unpublish(key, val)) {
+ log_err(KV_FMT " Failed to unpublish.", KV_VAL(key, val));
+ goto fail_unpublish;
+ }
+
+ freebuf(val);
+
+ return 0;
+ fail_unpublish:
+ freebuf(val);
+ fail_a2b:
+ return -ENOMEM;
+}
+
+uint64_t dht_query(const uint8_t * key)
+{
+ buffer_t * vals;
+ ssize_t n;
+ uint64_t addr;
+
+ n = dht_kv_retrieve(key, &vals);
+ if (n < 0) {
+ log_err(KEY_FMT " Failed to query db.", KEY_VAL(key));
+ goto fail_vals;
+ }
+
+ if (n == 0) {
+ log_dbg(KEY_FMT " No local values.", KEY_VAL(key));
+ n = dht_kv_query_remote(key, &vals, NULL);
+ if (n < 0) {
+ log_warn(KEY_FMT " Failed to query DHT.", KEY_VAL(key));
+ goto fail_vals;
+ }
+ if (n == 0) {
+ log_dbg(KEY_FMT " No values.", KEY_VAL(key));
+ goto no_vals;
+ }
+ }
+
+ if (buf_to_addr(vals[0], &addr) < 0) {
+ log_err(VAL_FMT " Failed addr conversion.", VAL_VAL(vals[0]));
+ goto fail_b2a;
+ }
+
+ if (n > 1 && addr == INVALID_ADDR && buf_to_addr(vals[1], &addr) < 0) {
+ log_err(VAL_FMT " Failed addr conversion.", VAL_VAL(vals[1]));
+ goto fail_b2a;
+ }
+
+ freebufs(vals, n);
+
+ return addr;
+ fail_b2a:
+ freebufs(vals, n);
+ return INVALID_ADDR;
+ no_vals:
+ free(vals);
+ fail_vals:
+ return INVALID_ADDR;
+}
+
+static int emergency_peer(struct list_head * pl)
+{
+ struct peer_entry * e;
+ struct timespec now;
+
+ assert(pl != NULL);
+ assert(list_is_empty(pl));
+
+ if (dht.peer == INVALID_ADDR)
+ return -1;
+
+ clock_gettime(CLOCK_REALTIME_COARSE, &now);
+
+ e = malloc(sizeof(*e));
+ if (e == NULL) {
+ log_err("Failed to malloc emergency peer entry.");
+ goto fail_malloc;
+ }
+
+ e->id = dht_dup_key(dht.id.data);
+ if (e->id == NULL) {
+ log_err("Failed to duplicate DHT ID for emergency peer.");
+ goto fail_id;
+ }
+
+ e->addr = dht.peer;
+ e->cookie = dht.magic;
+ e->code = DHT_FIND_NODE_REQ;
+ e->t_sent = now.tv_sec;
+
+ list_add_tail(&e->next, pl);
+
+ return 0;
+ fail_id:
+ free(e);
+ fail_malloc:
+ return -ENOMEM;
+}
+
+static int dht_kv_seed_bootstrap_peer(void)
+{
+ struct list_head pl;
+
+ list_head_init(&pl);
+
+ if (dht.peer == INVALID_ADDR) {
+ log_dbg("No-one to contact.");
+ return 0;
+ }
+
+ if (emergency_peer(&pl) < 0) {
+ log_err("Could not create emergency peer.");
+ goto fail_peer;
+ }
+
+ log_dbg("Pinging emergency peer " ADDR_FMT32 ".",
+ ADDR_VAL32(&dht.peer));
+
+ if (dht_kv_query_contacts(dht.id.data, &pl) < 0) {
+ log_warn("Failed to bootstrap peer.");
+ goto fail_query;
+ }
+
+ peer_list_destroy(&pl);
+
+ return 0;
+ fail_query:
+ peer_list_destroy(&pl);
+ fail_peer:
+ return -EAGAIN;
+}
+
+static void dht_kv_check_contacts(void)
+{
+ struct list_head cl;
+ struct list_head pl;
+
+ list_head_init(&cl);
+
+ dht_kv_contact_list(dht.id.data, &cl, dht.k);
+
+ if (!list_is_empty(&cl))
+ goto success;
+
+ contact_list_destroy(&cl);
+
+ list_head_init(&pl);
+
+ if (dht.peer == INVALID_ADDR) {
+ log_dbg("No-one to contact.");
+ return;
+ }
+
+ if (emergency_peer(&pl) < 0) {
+ log_err("Could not create emergency peer.");
+ goto fail_peer;
+ }
+
+ log_dbg("No contacts found, using emergency peer " ADDR_FMT32 ".",
+ ADDR_VAL32(&dht.peer));
+
+ dht_kv_query_contacts(dht.id.data, &pl);
+
+ peer_list_destroy(&pl);
+
+ return;
+ success:
+ contact_list_destroy(&cl);
+ return;
+ fail_peer:
+ return;
+}
+
+static void dht_kv_remove_expired_reqs(void)
+{
+ struct list_head * p;
+ struct list_head * h;
+ struct timespec now;
+
+ clock_gettime(PTHREAD_COND_CLOCK, &now);
+
+ pthread_mutex_lock(&dht.reqs.mtx);
+
+ list_for_each_safe(p, h, &dht.reqs.list) {
+ struct dht_req * e;
+ e = list_entry(p, struct dht_req, next);
+ if (IS_EXPIRED(e, &now)) {
+ log_dbg(KEY_FMT " Removing expired request.",
+ KEY_VAL(e->key));
+ list_del(&e->next);
+ dht_req_destroy(e);
+ --dht.reqs.len;
+ }
+ }
+
+ pthread_mutex_unlock(&dht.reqs.mtx);
+}
+
+static void value_list_destroy(struct list_head * vl)
+{
+ struct list_head * p;
+ struct list_head * h;
+
+ assert(vl != NULL);
+
+ list_for_each_safe(p, h, vl) {
+ struct val_entry * v = list_entry(p, struct val_entry, next);
+ list_del(&v->next);
+ val_entry_destroy(v);
+ }
+}
+
+#define MUST_REPLICATE(v, now) ((now)->tv_sec > (v)->t_repl + dht.t_repl)
+#define MUST_REPUBLISH(v, now) /* Close to expiry deadline */ \
+ (((v)->t_exp - (now)->tv_sec) < (DHT_N_REPUB * dht.t_repl))
+static void dht_entry_get_repl_lists(const struct dht_entry * e,
+ struct list_head * repl,
+ struct list_head * rebl,
+ struct timespec * now)
+{
+ struct list_head * p;
+ struct val_entry * n;
+
+ list_for_each(p, &e->vals.list) {
+ struct val_entry * v = list_entry(p, struct val_entry, next);
+ if (MUST_REPLICATE(v, now) && !IS_EXPIRED(v, now)) {
+ n = val_entry_create(v->val, v->t_exp);
+ if (n == NULL)
+ continue;
+
+ list_add_tail(&n->next, repl);
+ }
+ }
+
+ list_for_each(p, &e->lvals.list) {
+ struct val_entry * v = list_entry(p, struct val_entry, next);
+ if (MUST_REPLICATE(v, now) && MUST_REPUBLISH(v, now)) {
+ /* Add expire time here, to allow creating val_entry */
+ n = val_entry_create(v->val, now->tv_sec + dht.t_expire);
+ if (n == NULL)
+ continue;
+
+ list_add_tail(&n->next, rebl);
+ }
+ }
+}
+
+static int dht_kv_next_values(uint8_t * key,
+ struct list_head * repl,
+ struct list_head * rebl)
+{
+ struct timespec now;
+ struct list_head * p;
+ struct list_head * h;
+ struct dht_entry * e = NULL;
+
+ assert(key != NULL);
+ assert(repl != NULL);
+ assert(rebl != NULL);
+
+ clock_gettime(CLOCK_REALTIME_COARSE, &now);
+
+ assert(list_is_empty(repl));
+ assert(list_is_empty(rebl));
+
+ pthread_rwlock_rdlock(&dht.db.lock);
+
+ if (dht.db.kv.len == 0)
+ goto no_entries;
+
+ list_for_each_safe(p, h, &dht.db.kv.list) {
+ e = list_entry(p, struct dht_entry, next);
+ if (IS_CLOSER(e->key, key))
+ continue; /* Already processed */
+ }
+
+ if (e != NULL) {
+ memcpy(key, e->key, dht.id.len);
+ dht_entry_get_repl_lists(e, repl, rebl, &now);
+ }
+ no_entries:
+ pthread_rwlock_unlock(&dht.db.lock);
+
+ return list_is_empty(repl) && list_is_empty(rebl) ? -ENOENT : 0;
+}
+
+static void dht_kv_replicate_value(const uint8_t * key,
+ struct val_entry * v,
+ const struct timespec * now)
+{
+ assert(MUST_REPLICATE(v, now));
+
+ (void) now;
+
+ if (dht_kv_store_remote(key, v->val, v->t_exp) == 0) {
+ log_dbg(KV_FMT " Replicated.", KV_VAL(key, v->val));
+ return;
+ }
+
+ log_dbg(KV_FMT " Replication failed.", KV_VAL(key, v->val));
+
+ list_del(&v->next);
+ val_entry_destroy(v);
+}
+
+static void dht_kv_republish_value(const uint8_t * key,
+ struct val_entry * v,
+ const struct timespec * now)
+{
+ assert(MUST_REPLICATE(v, now));
+
+ if (MUST_REPUBLISH(v, now))
+ assert(v->t_exp >= now->tv_sec + dht.t_expire);
+
+ if (dht_kv_store_remote(key, v->val, v->t_exp) == 0) {
+ log_dbg(KV_FMT " Republished.", KV_VAL(key, v->val));
+ return;
+ }
+
+ if (MUST_REPUBLISH(v, now))
+ log_warn(KV_FMT " Republish failed.", KV_VAL(key, v->val));
+ else
+ log_dbg(KV_FMT " Replication failed.", KV_VAL(key, v->val));
+
+ list_del(&v->next);
+ val_entry_destroy(v);
+}
+
+static void dht_kv_update_replication_times(const uint8_t * key,
+ struct list_head * repl,
+ struct list_head * rebl,
+ const struct timespec * now)
+{
+ struct dht_entry * e;
+ struct list_head * p;
+ struct list_head * h;
+ struct val_entry * v;
+
+ assert(key != NULL);
+ assert(repl != NULL);
+ assert(rebl != NULL);
+ assert(now != NULL);
+
+ pthread_rwlock_wrlock(&dht.db.lock);
+
+ e = __dht_kv_find_entry(key);
+ if (e == NULL) {
+ pthread_rwlock_unlock(&dht.db.lock);
+ return;
+ }
+
+ list_for_each_safe(p, h, repl) {
+ struct val_entry * x;
+ v = list_entry(p, struct val_entry, next);
+ x = dht_entry_get_val(e, v->val);
+ if (x == NULL) {
+ log_err(KV_FMT " Not in vals.", KV_VAL(key, v->val));
+ continue;
+ }
+
+ x->t_repl = now->tv_sec;
+
+ list_del(&v->next);
+ val_entry_destroy(v);
+ }
+
+ list_for_each_safe(p, h, rebl) {
+ struct val_entry * x;
+ v = list_entry(p, struct val_entry, next);
+ x = dht_entry_get_lval(e, v->val);
+ if (x == NULL) {
+ log_err(KV_FMT " Not in lvals.", KV_VAL(key, v->val));
+ continue;
+ }
+
+ x->t_repl = now->tv_sec;
+ if (v->t_exp > x->t_exp) {
+ x->t_exp = v->t_exp; /* update expiration time */
+ }
+
+ list_del(&v->next);
+ val_entry_destroy(v);
+ }
+
+ pthread_rwlock_unlock(&dht.db.lock);
+}
+
+static void dht_kv_replicate_values(const uint8_t * key,
+ struct list_head * repl,
+ struct list_head * rebl)
+{
+ struct timespec now;
+ struct list_head * p;
+ struct list_head * h;
+
+ clock_gettime(CLOCK_REALTIME_COARSE, &now);
+
+ list_for_each_safe(p, h, repl) {
+ struct val_entry * v;
+ v = list_entry(p, struct val_entry, next);
+ dht_kv_replicate_value(key, v, &now);
+ }
+
+ list_for_each_safe(p, h, rebl) {
+ struct val_entry * v;
+ v = list_entry(p, struct val_entry, next);
+ dht_kv_republish_value(key, v, &now);
+ }
+
+ /* removes non-replicated items from the list */
+ dht_kv_update_replication_times(key, repl, rebl, &now);
+
+ if (list_is_empty(repl) && list_is_empty(rebl))
+ return;
+
+ log_warn(KEY_FMT " Failed to update replication times.", KEY_VAL(key));
+}
+
+static void dht_kv_replicate(void)
+{
+ struct list_head repl; /* list of values to replicate */
+ struct list_head rebl; /* list of local values to republish */
+ uint8_t * key;
+
+ key = dht_dup_key(dht.id.data); /* dist == 0 */
+ if (key == NULL) {
+ log_err("Replicate: Failed to duplicate DHT ID.");
+ return;
+ }
+
+ list_head_init(&repl);
+ list_head_init(&rebl);
+
+ pthread_cleanup_push(free, key);
+
+ while (dht_kv_next_values(key, &repl, &rebl) == 0) {
+ dht_kv_replicate_values(key, &repl, &rebl);
+ if (!list_is_empty(&repl)) {
+ log_warn(KEY_FMT " Replication items left.",
+ KEY_VAL(key));
+ value_list_destroy(&repl);
+ }
+
+ if (!list_is_empty(&rebl)) {
+ log_warn(KEY_FMT " Republish items left.",
+ KEY_VAL(key));
+ value_list_destroy(&rebl);
+ }
+ }
+
+ pthread_cleanup_pop(true);
+}
+
+static void dht_kv_refresh_contacts(void)
+{
+ struct list_head * p;
+ struct list_head * h;
+ struct list_head rl; /* refresh list */
+ struct timespec now;
+
+ list_head_init(&rl);
+
+ clock_gettime(CLOCK_REALTIME_COARSE, &now);
+
+ pthread_rwlock_rdlock(&dht.db.lock);
+
+ __dht_kv_bucket_refresh_list(dht.db.contacts.root, now.tv_sec, &rl);
+
+ pthread_rwlock_unlock(&dht.db.lock);
+
+ list_for_each_safe(p, h, &rl) {
+ struct contact * c;
+ c = list_entry(p, struct contact, next);
+ log_dbg(PEER_FMT " Refreshing contact.",
+ PEER_VAL(c->id, c->addr));
+ dht_kv_query_contacts(c->id, NULL);
+ list_del(&c->next);
+ contact_destroy(c);
+ }
+
+ assert(list_is_empty(&rl));
+}
+
+static void (*tasks[])(void) = {
+ dht_kv_check_contacts,
+ dht_kv_remove_expired_entries,
+ dht_kv_remove_expired_reqs,
+ dht_kv_replicate,
+ dht_kv_refresh_contacts,
+ NULL
+};
+
+static void * work(void * o)
+{
+ struct timespec now = TIMESPEC_INIT_MS(1);
+ time_t intv;
+ size_t n; /* number of tasks */
+
+ n = sizeof(tasks) / sizeof(tasks[0]) - 1; /* last is NULL */
+
+ (void) o;
+
+ while (dht_kv_seed_bootstrap_peer() == -EAGAIN) {
+ ts_add(&now, &now, &now); /* exponential backoff */
+ if (now.tv_sec > 1) /* cap at 1 second */
+ now.tv_sec = 1;
+ nanosleep(&now, NULL);
+ }
+
+ intv = gcd(dht.t_expire, (dht.t_expire - DHT_N_REPUB * dht.t_repl));
+ intv = gcd(intv, gcd(dht.t_repl, dht.t_refresh)) / 2;
+ intv = MAX(1, intv / n);
+
+ log_dbg("DHT worker starting %ld seconds interval.", intv * n);
+
+ while (true) {
+ int i = 0;
+ while (tasks[i] != NULL) {
+ tasks[i++]();
+ sleep(intv);
+ }
+ }
+
+ return (void *) 0;
+}
+
+int dht_start(void)
+{
+ dht.state = DHT_RUNNING;
+
+ if (tpm_start(dht.tpm))
+ goto fail_tpm_start;
+
+#ifndef __DHT_TEST__
+ if (pthread_create(&dht.worker, NULL, work, NULL)) {
+ log_err("Failed to create DHT worker thread.");
+ goto fail_worker;
+ }
+
+ dht.eid = dt_reg_comp(&dht, &dht_post_packet, DHT);
+ if ((int) dht.eid < 0) {
+ log_err("Failed to register DHT component.");
+ goto fail_reg;
+ }
+#else
+ (void) work;
+#endif
+ return 0;
+#ifndef __DHT_TEST__
+ fail_reg:
+ pthread_cancel(dht.worker);
+ pthread_join(dht.worker, NULL);
+ fail_worker:
+ tpm_stop(dht.tpm);
+#endif
+ fail_tpm_start:
+ dht.state = DHT_INIT;
+ return -1;
+}
+
+void dht_stop(void)
+{
+ assert(dht.state == DHT_RUNNING);
+
+#ifndef __DHT_TEST__
+ dt_unreg_comp(dht.eid);
+
+ pthread_cancel(dht.worker);
+ pthread_join(dht.worker, NULL);
+#endif
+ tpm_stop(dht.tpm);
+
+ dht.state = DHT_INIT;
+}
+
+int dht_init(struct dir_dht_config * conf)
+{
+ struct timespec now;
+ pthread_condattr_t cattr;
+
+ assert(conf != NULL);
+
+ clock_gettime(CLOCK_REALTIME_COARSE, &now);
+
+#ifndef __DHT_TEST__
+ dht.id.len = ipcp_dir_hash_len();
+ dht.addr = addr_auth_address();
+#else
+ dht.id.len = DHT_TEST_KEY_LEN;
+ dht.addr = DHT_TEST_ADDR;
+#endif
+ dht.t0 = now.tv_sec;
+ dht.alpha = conf->params.alpha;
+ dht.k = conf->params.k;
+ dht.t_expire = conf->params.t_expire;
+ dht.t_refresh = conf->params.t_refresh;
+ dht.t_repl = conf->params.t_replicate;
+ dht.peer = conf->peer;
+
+ dht.magic = generate_cookie();
+
+ /* Send my address on enrollment */
+ conf->peer = dht.addr;
+
+ dht.id.data = generate_id();
+ if (dht.id.data == NULL) {
+ log_err("Failed to create DHT ID.");
+ goto fail_id;
+ }
+
+ list_head_init(&dht.cmds.list);
+
+ if (pthread_mutex_init(&dht.cmds.mtx, NULL)) {
+ log_err("Failed to initialize command mutex.");
+ goto fail_cmds_mutex;
+ }
+
+ if (pthread_cond_init(&dht.cmds.cond, NULL)) {
+ log_err("Failed to initialize command condvar.");
+ goto fail_cmds_cond;
+ }
+
+ list_head_init(&dht.reqs.list);
+ dht.reqs.len = 0;
+
+ if (pthread_mutex_init(&dht.reqs.mtx, NULL)) {
+ log_err("Failed to initialize request mutex.");
+ goto fail_reqs_mutex;
+ }
+
+ if (pthread_condattr_init(&cattr)) {
+ log_err("Failed to initialize request condvar attributes.");
+ goto fail_cattr;
+ }
+#ifndef __APPLE__
+ if (pthread_condattr_setclock(&cattr, PTHREAD_COND_CLOCK)) {
+ log_err("Failed to set request condvar clock.");
+ goto fail_cattr;
+ }
+#endif
+ if (pthread_cond_init(&dht.reqs.cond, &cattr)) {
+ log_err("Failed to initialize request condvar.");
+ goto fail_reqs_cond;
+ }
+
+ list_head_init(&dht.db.kv.list);
+ dht.db.kv.len = 0;
+ dht.db.kv.vals = 0;
+ dht.db.kv.lvals = 0;
+
+ if (pthread_rwlock_init(&dht.db.lock, NULL)) {
+ log_err("Failed to initialize store rwlock.");
+ goto fail_rwlock;
+ }
+
+ dht.db.contacts.root = bucket_create();
+ if (dht.db.contacts.root == NULL) {
+ log_err("Failed to create DHT buckets.");
+ goto fail_buckets;
+ }
+
+ if (rib_reg(DHT, &r_ops) < 0) {
+ log_err("Failed to register DHT RIB operations.");
+ goto fail_rib_reg;
+ }
+
+ dht.tpm = tpm_create(2, 1, dht_handle_packet, NULL);
+ if (dht.tpm == NULL) {
+ log_err("Failed to create TPM for DHT.");
+ goto fail_tpm_create;
+ }
+
+ if (dht_kv_update_contacts(dht.id.data, dht.addr) < 0)
+ log_warn("Failed to update contacts with DHT ID.");
+
+ pthread_condattr_destroy(&cattr);
+#ifndef __DHT_TEST__
+ log_info("DHT initialized.");
+ log_dbg(" ID: " HASH_FMT64 " [%zu bytes].",
+ HASH_VAL64(dht.id.data), dht.id.len);
+ log_dbg(" address: " ADDR_FMT32 ".", ADDR_VAL32(&dht.addr));
+ log_dbg(" peer: " ADDR_FMT32 ".", ADDR_VAL32(&dht.peer));
+ log_dbg(" magic cookie: " HASH_FMT64 ".", HASH_VAL64(&dht.magic));
+ log_info(" parameters: alpha=%u, k=%zu, t_expire=%ld, "
+ "t_refresh=%ld, t_replicate=%ld.",
+ dht.alpha, dht.k, dht.t_expire, dht.t_refresh, dht.t_repl);
+#endif
+ dht.state = DHT_INIT;
+
+ return 0;
+
+ fail_tpm_create:
+ rib_unreg(DHT);
+ fail_rib_reg:
+ bucket_destroy(dht.db.contacts.root);
+ fail_buckets:
+ pthread_rwlock_destroy(&dht.db.lock);
+ fail_rwlock:
+ pthread_cond_destroy(&dht.reqs.cond);
+ fail_reqs_cond:
+ pthread_condattr_destroy(&cattr);
+ fail_cattr:
+ pthread_mutex_destroy(&dht.reqs.mtx);
+ fail_reqs_mutex:
+ pthread_cond_destroy(&dht.cmds.cond);
+ fail_cmds_cond:
+ pthread_mutex_destroy(&dht.cmds.mtx);
+ fail_cmds_mutex:
+ freebuf(dht.id);
+ fail_id:
+ return -1;
+}
+
+void dht_fini(void)
+{
+ struct list_head * p;
+ struct list_head * h;
+
+ rib_unreg(DHT);
+
+ tpm_destroy(dht.tpm);
+
+ pthread_mutex_lock(&dht.cmds.mtx);
+
+ list_for_each_safe(p, h, &dht.cmds.list) {
+ struct cmd * c = list_entry(p, struct cmd, next);
+ list_del(&c->next);
+ freebuf(c->cbuf);
+ free(c);
+ }
+
+ pthread_mutex_unlock(&dht.cmds.mtx);
+
+ pthread_cond_destroy(&dht.cmds.cond);
+ pthread_mutex_destroy(&dht.cmds.mtx);
+
+ pthread_mutex_lock(&dht.reqs.mtx);
+
+ list_for_each_safe(p, h, &dht.reqs.list) {
+ struct dht_req * r = list_entry(p, struct dht_req, next);
+ list_del(&r->next);
+ dht_req_destroy(r);
+ dht.reqs.len--;
+ }
+
+ pthread_mutex_unlock(&dht.reqs.mtx);
+
+ pthread_cond_destroy(&dht.reqs.cond);
+ pthread_mutex_destroy(&dht.reqs.mtx);
+
+ pthread_rwlock_wrlock(&dht.db.lock);
+
+ list_for_each_safe(p, h, &dht.db.kv.list) {
+ struct dht_entry * e = list_entry(p, struct dht_entry, next);
+ list_del(&e->next);
+ dht_entry_destroy(e);
+ dht.db.kv.len--;
+ }
+
+ if (dht.db.contacts.root != NULL)
+ bucket_destroy(dht.db.contacts.root);
+
+ pthread_rwlock_unlock(&dht.db.lock);
+
+ pthread_rwlock_destroy(&dht.db.lock);
+
+ assert(dht.db.kv.len == 0);
+ assert(dht.db.kv.vals == 0);
+ assert(dht.db.kv.lvals == 0);
+ assert(dht.reqs.len == 0);
+
+ freebuf(dht.id);
+}
diff --git a/src/ipcpd/unicast/dir/dht.h b/src/ipcpd/unicast/dir/dht.h
new file mode 100644
index 00000000..852a5130
--- /dev/null
+++ b/src/ipcpd/unicast/dir/dht.h
@@ -0,0 +1,49 @@
+/*
+ * Ouroboros - Copyright (C) 2016 - 2024
+ *
+ * Distributed Hash Table based on Kademlia
+ *
+ * Dimitri Staessens <dimitri@ouroboros.rocks>
+ * Sander Vrijders <sander@ouroboros.rocks>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public License
+ * version 2.1 as published by the Free Software Foundation.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., http://www.fsf.org/about/contact/.
+ */
+
+#ifndef OUROBOROS_IPCPD_UNICAST_DIR_DHT_H
+#define OUROBOROS_IPCPD_UNICAST_DIR_DHT_H
+
+#include <ouroboros/ipcp-dev.h>
+
+#include "ops.h"
+
+#include <stdint.h>
+#include <sys/types.h>
+
+int dht_init(struct dir_dht_config * conf);
+
+void dht_fini(void);
+
+int dht_start(void);
+
+void dht_stop(void);
+
+int dht_reg(const uint8_t * key);
+
+int dht_unreg(const uint8_t * key);
+
+uint64_t dht_query(const uint8_t * key);
+
+extern struct dir_ops dht_dir_ops;
+
+#endif /* OUROBOROS_IPCPD_UNICAST_DIR_DHT_H */
diff --git a/src/ipcpd/unicast/dir/dht.proto b/src/ipcpd/unicast/dir/dht.proto
new file mode 100644
index 00000000..ea74805f
--- /dev/null
+++ b/src/ipcpd/unicast/dir/dht.proto
@@ -0,0 +1,58 @@
+/*
+ * Ouroboros - Copyright (C) 2016 - 2024
+ *
+ * DHT protocol, based on Kademlia
+ *
+ * Dimitri Staessens <dimitri@ouroboros.rocks>
+ * Sander Vrijders <sander@ouroboros.rocks>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public License
+ * version 2.1 as published by the Free Software Foundation.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., http://www.fsf.org/about/contact/.
+ */
+
+syntax = "proto2";
+
+message dht_contact_msg {
+ required bytes id = 1;
+ required uint64 addr = 2;
+}
+
+message dht_find_req_msg {
+ required uint64 cookie = 1;
+ required bytes key = 2;
+}
+
+message dht_find_node_rsp_msg {
+ required uint64 cookie = 1;
+ required bytes key = 2;
+ repeated dht_contact_msg contacts = 3;
+}
+
+message dht_find_value_rsp_msg {
+ repeated bytes values = 1;
+}
+
+message dht_store_msg {
+ required bytes key = 1;
+ required bytes val = 2;
+ required uint32 exp = 3;
+}
+
+message dht_msg {
+ required uint32 code = 1;
+ required dht_contact_msg src = 2;
+ optional dht_store_msg store = 3;
+ optional dht_find_req_msg find = 4;
+ optional dht_find_node_rsp_msg node = 5;
+ optional dht_find_value_rsp_msg val = 6;
+}
diff --git a/src/ipcpd/unicast/dir/ops.h b/src/ipcpd/unicast/dir/ops.h
new file mode 100644
index 00000000..8c6e5eb5
--- /dev/null
+++ b/src/ipcpd/unicast/dir/ops.h
@@ -0,0 +1,42 @@
+/*
+ * Ouroboros - Copyright (C) 2016 - 2024
+ *
+ * Directory policy ops
+ *
+ * Dimitri Staessens <dimitri@ouroboros.rocks>
+ * Sander Vrijders <sander@ouroboros.rocks>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., http://www.fsf.org/about/contact/.
+ */
+
+#ifndef OUROBOROS_IPCPD_UNICAST_DIR_OPS_H
+#define OUROBOROS_IPCPD_UNICAST_DIR_OPS_H
+
+struct dir_ops {
+ int (* init)(void * config);
+
+ void (* fini)(void);
+
+ int (* start)(void);
+
+ void (* stop)(void);
+
+ int (* reg)(const uint8_t * hash);
+
+ int (* unreg)(const uint8_t * hash);
+
+ uint64_t (* query)(const uint8_t * hash);
+};
+
+#endif /* OUROBOROS_IPCPD_UNICAST_DIR_OPS_H */
diff --git a/src/ipcpd/unicast/dir/pol.h b/src/ipcpd/unicast/dir/pol.h
new file mode 100644
index 00000000..eae4b2e7
--- /dev/null
+++ b/src/ipcpd/unicast/dir/pol.h
@@ -0,0 +1,23 @@
+/*
+ * Ouroboros - Copyright (C) 2016 - 2024
+ *
+ * Directory policies
+ *
+ * Dimitri Staessens <dimitri@ouroboros.rocks>
+ * Sander Vrijders <sander@ouroboros.rocks>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., http://www.fsf.org/about/contact/.
+ */
+
+#include "dht.h"
diff --git a/src/ipcpd/unicast/dir/tests/CMakeLists.txt b/src/ipcpd/unicast/dir/tests/CMakeLists.txt
new file mode 100644
index 00000000..f62ed993
--- /dev/null
+++ b/src/ipcpd/unicast/dir/tests/CMakeLists.txt
@@ -0,0 +1,40 @@
+get_filename_component(CURRENT_SOURCE_PARENT_DIR
+ ${CMAKE_CURRENT_SOURCE_DIR} DIRECTORY)
+get_filename_component(CURRENT_BINARY_PARENT_DIR
+ ${CMAKE_CURRENT_BINARY_DIR} DIRECTORY)
+
+include_directories(${CMAKE_CURRENT_SOURCE_DIR})
+include_directories(${CMAKE_CURRENT_BINARY_DIR})
+
+include_directories(${CURRENT_SOURCE_PARENT_DIR})
+include_directories(${CURRENT_BINARY_PARENT_DIR})
+
+include_directories(${CMAKE_SOURCE_DIR}/include)
+include_directories(${CMAKE_BINARY_DIR}/include)
+
+get_filename_component(PARENT_PATH ${CMAKE_CURRENT_SOURCE_DIR} DIRECTORY)
+get_filename_component(PARENT_DIR ${PARENT_PATH} NAME)
+
+create_test_sourcelist(${PARENT_DIR}_tests test_suite.c
+ # Add new tests here
+ dht_test.c
+ )
+
+protobuf_generate_c(DHT_PROTO_SRCS KAD_PROTO_HDRS ../dht.proto)
+add_executable(${PARENT_DIR}_test EXCLUDE_FROM_ALL ${${PARENT_DIR}_tests}
+ ${DHT_PROTO_SRCS})
+target_link_libraries(${PARENT_DIR}_test ouroboros-common)
+
+add_dependencies(check ${PARENT_DIR}_test)
+
+set(tests_to_run ${${PARENT_DIR}_tests})
+if(CMAKE_VERSION VERSION_LESS "3.29.0")
+ remove(tests_to_run test_suite.c)
+else ()
+ list(POP_FRONT tests_to_run)
+endif()
+
+foreach (test ${tests_to_run})
+ get_filename_component(test_name ${test} NAME_WE)
+ add_test(${test_name} ${C_TEST_PATH}/${PARENT_DIR}_test ${test_name})
+endforeach (test)
diff --git a/src/ipcpd/unicast/dir/tests/dht_test.c b/src/ipcpd/unicast/dir/tests/dht_test.c
new file mode 100644
index 00000000..b6563a03
--- /dev/null
+++ b/src/ipcpd/unicast/dir/tests/dht_test.c
@@ -0,0 +1,1925 @@
+/*
+ * Ouroboros - Copyright (C) 2016 - 2024
+ *
+ * Unit tests of the DHT
+ *
+ * Dimitri Staessens <dimitri@ouroboros.rocks>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., http://www.fsf.org/about/contact/.
+ */
+
+#define __DHT_TEST__
+
+#if defined(__linux__) || defined(__CYGWIN__)
+#define _DEFAULT_SOURCE
+#else
+#define _POSIX_C_SOURCE 200112L
+#endif
+
+#include <ouroboros/test.h>
+#include <ouroboros/list.h>
+#include <ouroboros/utils.h>
+
+#include "dht.pb-c.h"
+
+#include <assert.h>
+#include <inttypes.h>
+#include <time.h>
+#include <stdlib.h>
+#include <stdio.h>
+
+#define DHT_MAX_RAND_SIZE 64
+#define DHT_TEST_KEY_LEN 32
+#define DHT_TEST_ADDR 0x1234567890abcdefULL
+
+/* forward declare for use in the dht code */
+/* Packet sink for DHT tests */
+struct {
+ bool enabled;
+
+ struct list_head list;
+ size_t len;
+} sink;
+
+struct message {
+ struct list_head next;
+ void * msg;
+ uint64_t dst;
+};
+
+static int sink_send_msg(buffer_t * pkt,
+ uint64_t addr)
+{
+ struct message * m;
+
+ assert(pkt != NULL);
+ assert(addr != 0);
+
+ assert(!list_is_empty(&sink.list) || sink.len == 0);
+
+ if (!sink.enabled)
+ goto finish;
+
+ m = malloc(sizeof(*m));
+ if (m == NULL) {
+ printf("Failed to malloc message.");
+ goto fail_malloc;
+ }
+
+ m->msg = dht_msg__unpack(NULL, pkt->len, pkt->data);
+ if (m->msg == NULL)
+ goto fail_unpack;
+
+ m->dst = addr;
+
+ list_add_tail(&m->next, &sink.list);
+
+ ++sink.len;
+ finish:
+ freebuf(*pkt);
+
+ return 0;
+ fail_unpack:
+ free(m);
+ fail_malloc:
+ freebuf(*pkt);
+ return -1;
+}
+
+#include "dht.c"
+
+/* Test helpers */
+
+static void sink_init(void)
+{
+ list_head_init(&sink.list);
+ sink.len = 0;
+ sink.enabled = true;
+}
+
+static void sink_clear(void)
+{
+ struct list_head * p;
+ struct list_head * h;
+
+ list_for_each_safe(p, h, &sink.list) {
+ struct message * m = list_entry(p, struct message, next);
+ list_del(&m->next);
+ dht_msg__free_unpacked((dht_msg_t *) m->msg, NULL);
+ free(m);
+ --sink.len;
+ }
+
+ assert(list_is_empty(&sink.list));
+}
+
+static void sink_fini(void)
+{
+ sink_clear();
+
+ assert(list_is_empty(&sink.list) || sink.len != 0);
+}
+
+static dht_msg_t * sink_read(void)
+{
+ struct message * m;
+ dht_msg_t * msg;
+
+ assert(!list_is_empty(&sink.list) || sink.len == 0);
+
+ if (list_is_empty(&sink.list))
+ return NULL;
+
+ m = list_first_entry(&sink.list, struct message, next);
+
+ --sink.len;
+
+ list_del(&m->next);
+
+ msg = m->msg;
+
+ free(m);
+
+ return (dht_msg_t *) msg;
+}
+
+static const buffer_t test_val = {
+ .data = (uint8_t *) "test_value",
+ .len = 10
+};
+
+static const buffer_t test_val2 = {
+ .data = (uint8_t *) "test_value_2",
+ .len = 12
+};
+
+static int random_value_len(buffer_t * b)
+{
+ assert(b != NULL);
+ assert(b->len > 0 && b->len <= DHT_MAX_RAND_SIZE);
+
+ b->data = malloc(b->len);
+ if (b->data == NULL)
+ goto fail_malloc;
+
+ random_buffer(b->data, b->len);
+
+ return 0;
+
+ fail_malloc:
+ return -ENOMEM;
+}
+
+static int random_value(buffer_t * b)
+{
+ assert(b != NULL);
+
+ b->len = rand() % DHT_MAX_RAND_SIZE + 1;
+
+ return random_value_len(b);
+}
+
+static int fill_dht_with_contacts(size_t n)
+{
+ size_t i;
+ uint8_t * id;
+
+ for (i = 0; i < n; i++) {
+ uint64_t addr = generate_cookie();
+ id = generate_id();
+ if (id == NULL)
+ goto fail_id;
+
+ if (dht_kv_update_contacts(id, addr) < 0)
+ goto fail_update;
+ free(id);
+ }
+
+ return 0;
+
+ fail_update:
+ free(id);
+ fail_id:
+ return -1;
+}
+
+static int fill_store_with_random_values(const uint8_t * key,
+ size_t len,
+ size_t n_values)
+{
+ buffer_t val;
+ struct timespec now;
+ size_t i;
+ uint8_t * _key;
+
+ clock_gettime(CLOCK_REALTIME_COARSE, &now);
+
+ for (i = 0; i < n_values; ++i) {
+ if (key != NULL)
+ _key = (uint8_t *) key;
+ else {
+ _key = generate_id();
+ if (_key == NULL)
+ goto fail_key;
+ }
+
+ if (len == 0)
+ val.len = rand() % DHT_MAX_RAND_SIZE + 1;
+ else
+ val.len = len;
+
+ if (random_value_len(&val) < 0)
+ goto fail_value;
+
+ if (dht_kv_store(_key, val, now.tv_sec + 10) < 0)
+ goto fail_store;
+
+ freebuf(val);
+ if (key == NULL)
+ free(_key);
+ }
+
+ return 0;
+
+ fail_store:
+ freebuf(val);
+ fail_value:
+ free(_key);
+ fail_key:
+ return -1;
+}
+
+static int random_contact_list(dht_contact_msg_t *** contacts,
+ size_t max)
+{
+ size_t i;
+
+ assert(contacts != NULL);
+ assert(max > 0);
+
+ *contacts = malloc(max * sizeof(**contacts));
+ if (*contacts == NULL)
+ goto fail_malloc;
+
+ for (i = 0; i < max; i++) {
+ (*contacts)[i] = malloc(sizeof(*(*contacts)[i]));
+ if ((*contacts)[i] == NULL)
+ goto fail_contacts;
+
+ dht_contact_msg__init((*contacts)[i]);
+
+ (*contacts)[i]->id.data = generate_id();
+ if ((*contacts)[i]->id.data == NULL)
+ goto fail_contact;
+
+ (*contacts)[i]->id.len = dht.id.len;
+ (*contacts)[i]->addr = generate_cookie();
+ }
+
+ return 0;
+
+ fail_contact:
+ dht_contact_msg__free_unpacked((*contacts)[i], NULL);
+ fail_contacts:
+ while (i-- > 0)
+ free((*contacts)[i]);
+ free(*contacts);
+ fail_malloc:
+ return -ENOMEM;
+}
+
+static void clear_contacts(dht_contact_msg_t ** contacts,
+ size_t len)
+{
+ size_t i;
+
+ assert(contacts != NULL);
+ if (*contacts == NULL)
+ return;
+
+ for (i = 0; i < len; ++i)
+ dht_contact_msg__free_unpacked((contacts)[i], NULL);
+
+ free(*contacts);
+ *contacts = NULL;
+}
+
+/* Start of actual tests */
+static struct dir_dht_config test_dht_config = {
+ .params = {
+ .alpha = 3,
+ .k = 8,
+ .t_expire = 86400,
+ .t_refresh = 900,
+ .t_replicate = 900
+ }
+};
+
+static int test_dht_init_fini(void)
+{
+ TEST_START();
+
+ if (dht_init(&test_dht_config) < 0) {
+ printf("Failed to create dht.\n");
+ goto fail_init;
+ }
+
+ dht_fini();
+
+ TEST_SUCCESS();
+
+ return TEST_RC_SUCCESS;
+ fail_init:
+ TEST_FAIL();
+ return TEST_RC_FAIL;
+}
+
+static int test_dht_start_stop(void)
+{
+ TEST_START();
+
+ if (dht_init(&test_dht_config) < 0) {
+ printf("Failed to create dht.\n");
+ goto fail_init;
+ }
+
+ if (dht_start() < 0) {
+ printf("Failed to start dht.\n");
+ goto fail_start;
+ }
+
+ dht_stop();
+
+ dht_fini();
+
+ TEST_SUCCESS();
+
+ return TEST_RC_SUCCESS;
+
+ fail_start:
+ dht_fini();
+ fail_init:
+ TEST_FAIL();
+ return TEST_RC_FAIL;
+}
+
+static int test_val_entry_create_destroy(void)
+{
+ struct val_entry * e;
+ struct timespec now;
+
+ TEST_START();
+
+ clock_gettime(CLOCK_REALTIME_COARSE, &now);
+
+ if (dht_init(&test_dht_config) < 0) {
+ printf("Failed to create dht.\n");
+ goto fail_init;
+ }
+
+ e = val_entry_create(test_val, now.tv_sec + 10);
+ if (e == NULL) {
+ printf("Failed to create val entry.\n");
+ goto fail_entry;
+ }
+
+ val_entry_destroy(e);
+
+ dht_fini();
+
+ TEST_SUCCESS();
+
+ return TEST_RC_SUCCESS;
+
+ fail_entry:
+ dht_fini();
+ fail_init:
+ TEST_FAIL();
+ return TEST_RC_FAIL;
+}
+
+static int test_dht_entry_create_destroy(void)
+{
+ struct dht_entry * e;
+
+ TEST_START();
+
+ if (dht_init(&test_dht_config) < 0) {
+ printf("Failed to create dht.\n");
+ goto fail_init;
+ }
+
+ e = dht_entry_create(dht.id.data);
+ if (e == NULL) {
+ printf("Failed to create dht entry.\n");
+ goto fail_entry;
+ }
+
+ dht_entry_destroy(e);
+
+ dht_fini();
+
+ TEST_SUCCESS();
+
+ return TEST_RC_SUCCESS;
+
+ fail_entry:
+ dht_fini();
+ fail_init:
+ TEST_FAIL();
+ return TEST_RC_FAIL;
+}
+
+static int test_dht_entry_update_get_val(void)
+{
+ struct dht_entry * e;
+ struct val_entry * v;
+ struct timespec now;
+
+ TEST_START();
+
+ clock_gettime(CLOCK_REALTIME_COARSE, &now);
+
+ if (dht_init(&test_dht_config) < 0) {
+ printf("Failed to create dht.\n");
+ goto fail_init;
+ }
+
+ e = dht_entry_create(dht.id.data);
+ if (e == NULL) {
+ printf("Failed to create dht entry.\n");
+ goto fail_entry;
+ }
+
+ if (dht_entry_get_val(e, test_val) != NULL) {
+ printf("Found value in empty dht entry.\n");
+ goto fail_get;
+ }
+
+ if (dht_entry_update_val(e, test_val, now.tv_sec + 10) < 0) {
+ printf("Failed to update dht entry value.\n");
+ goto fail_get;
+ }
+
+ if (dht_entry_get_val(e, test_val2) != NULL) {
+ printf("Found value in dht entry with different key.\n");
+ goto fail_get;
+ }
+
+ v = dht_entry_get_val(e, test_val);
+ if (v == NULL) {
+ printf("Failed to get value from dht entry.\n");
+ goto fail_get;
+ }
+
+ if (v->val.len != test_val.len) {
+ printf("Length in dht entry does not match expected.\n");
+ goto fail_get;
+ }
+
+ if(memcmp(v->val.data, test_val.data, test_val.len) != 0) {
+ printf("Data in dht entry does not match expected.\n");
+ goto fail_get;
+ }
+
+ if (dht_entry_update_val(e, test_val, now.tv_sec + 15) < 0) {
+ printf("Failed to update exsting dht entry value.\n");
+ goto fail_get;
+ }
+
+ if (v->t_exp != now.tv_sec + 15) {
+ printf("Expiration time in dht entry value not updated.\n");
+ goto fail_get;
+ }
+
+ if (dht_entry_update_val(e, test_val, now.tv_sec + 5) < 0) {
+ printf("Failed to update existing dht entry value (5).\n");
+ goto fail_get;
+ }
+
+ if (v->t_exp != now.tv_sec + 15) {
+ printf("Expiration time in dht entry shortened.\n");
+ goto fail_get;
+ }
+
+ if (dht_entry_get_val(e, test_val) != v) {
+ printf("Wrong value in dht entry found after update.\n");
+ goto fail_get;
+ }
+
+ dht_entry_destroy(e);
+
+ dht_fini();
+
+ TEST_SUCCESS();
+
+ return TEST_RC_SUCCESS;
+
+ fail_get:
+ dht_entry_destroy(e);
+ fail_entry:
+ dht_fini();
+ fail_init:
+ TEST_FAIL();
+ return TEST_RC_FAIL;
+}
+
+static int test_dht_entry_update_get_lval(void)
+{
+ struct dht_entry * e;
+ struct val_entry * v;
+ struct timespec now;
+
+ TEST_START();
+
+ clock_gettime(CLOCK_REALTIME_COARSE, &now);
+
+ if (dht_init(&test_dht_config) < 0) {
+ printf("Failed to create dht.\n");
+ goto fail_init;
+ }
+
+ e = dht_entry_create(dht.id.data);
+ if (e == NULL) {
+ printf("Failed to create dht entry.\n");
+ goto fail_entry;
+ }
+
+ if (dht_entry_get_lval(e, test_val) != NULL) {
+ printf("Found value in empty dht entry.\n");
+ goto fail_get;
+ }
+
+ if (dht_entry_update_lval(e, test_val) < 0) {
+ printf("Failed to update dht entry value.\n");
+ goto fail_get;
+ }
+
+ v = dht_entry_get_lval(e, test_val);
+ if (v== NULL) {
+ printf("Failed to get value from dht entry.\n");
+ goto fail_get;
+ }
+
+ if (dht_entry_get_lval(e, test_val2) != NULL) {
+ printf("Found value in dht entry in vals.\n");
+ goto fail_get;
+ }
+
+ if (v->val.len != test_val.len) {
+ printf("Length in dht entry does not match expected.\n");
+ goto fail_get;
+ }
+
+ if(memcmp(v->val.data, test_val.data, test_val.len) != 0) {
+ printf("Data in dht entry does not match expected.\n");
+ goto fail_get;
+ }
+
+ if (dht_entry_update_lval(e, test_val) < 0) {
+ printf("Failed to update existing dht entry value.\n");
+ goto fail_get;
+ }
+
+ if (dht_entry_get_lval(e, test_val) != v) {
+ printf("Wrong value in dht entry found after update.\n");
+ goto fail_get;
+ }
+
+ dht_entry_destroy(e);
+
+ dht_fini();
+
+ TEST_SUCCESS();
+
+ return TEST_RC_SUCCESS;
+
+ fail_get:
+ dht_entry_destroy(e);
+ fail_entry:
+ dht_fini();
+ fail_init:
+ TEST_FAIL();
+ return TEST_RC_FAIL;
+}
+
+static int test_dht_kv_contact_create_destroy(void)
+{
+ struct contact * c;
+
+ TEST_START();
+
+ if (dht_init(&test_dht_config) < 0) {
+ printf("Failed to create dht.\n");
+ goto fail_init;
+ }
+
+ c = contact_create(dht.id.data, dht.addr);
+ if (c == NULL) {
+ printf("Failed to create contact.\n");
+ goto fail_contact;
+ }
+
+ contact_destroy(c);
+
+ dht_fini();
+
+ TEST_SUCCESS();
+
+ return TEST_RC_SUCCESS;
+
+ fail_contact:
+ dht_fini();
+ fail_init:
+ TEST_FAIL();
+ return TEST_RC_FAIL;
+}
+
+static int test_dht_kv_update_bucket(void)
+{
+ TEST_START();
+
+ if (dht_init(&test_dht_config) < 0) {
+ printf("Failed to create dht.\n");
+ goto fail_init;
+ }
+
+ if (fill_dht_with_contacts(1000) < 0) {
+ printf("Failed to fill bucket with contacts.\n");
+ goto fail_update;
+ }
+
+ dht_fini();
+
+ TEST_SUCCESS();
+
+ return TEST_RC_SUCCESS;
+
+ fail_update:
+ dht_fini();
+ fail_init:
+ TEST_FAIL();
+ return TEST_RC_FAIL;
+}
+
+static int test_dht_kv_contact_list(void)
+{
+ struct list_head cl;
+ ssize_t len;
+ ssize_t items;
+
+ TEST_START();
+
+ list_head_init(&cl);
+
+ if (dht_init(&test_dht_config) < 0) {
+ printf("Failed to create dht.\n");
+ goto fail_init;
+ }
+
+ items = 5;
+
+ if (fill_dht_with_contacts(items) < 0) {
+ printf("Failed to fill bucket with contacts.\n");
+ goto fail_fill;
+ }
+
+ len = dht_kv_contact_list(dht.id.data, &cl, dht.k);
+ if (len < 0) {
+ printf("Failed to get contact list.\n");
+ goto fail_fill;
+ }
+
+ if (len != items) {
+ printf("Failed to get contacts (%zu != %zu).\n", len, items);
+ goto fail_contact_list;
+ }
+
+ contact_list_destroy(&cl);
+
+ items = 100;
+
+ if (fill_dht_with_contacts(items) < 0) {
+ printf("Failed to fill bucket with contacts.\n");
+ goto fail_fill;
+ }
+
+ len = dht_kv_contact_list(dht.id.data, &cl, items);
+ if (len < 0) {
+ printf("Failed to get contact list.\n");
+ goto fail_fill;
+ }
+
+ if ((size_t) len < dht.k) {
+ printf("Failed to get contacts (%zu < %zu).\n", len, dht.k);
+ goto fail_contact_list;
+ }
+
+ contact_list_destroy(&cl);
+
+ dht_fini();
+
+ TEST_SUCCESS();
+
+ return TEST_RC_SUCCESS;
+
+ fail_contact_list:
+ contact_list_destroy(&cl);
+ fail_fill:
+ dht_fini();
+ fail_init:
+ TEST_FAIL();
+ return TEST_RC_FAIL;
+}
+
+static int test_dht_kv_get_values(void)
+{
+ buffer_t * vals;
+ ssize_t len;
+ size_t n = sizeof(uint64_t);
+
+ TEST_START();
+
+ if (dht_init(&test_dht_config) < 0) {
+ printf("Failed to create dht.\n");
+ goto fail_init;
+ }
+
+ if (fill_store_with_random_values(dht.id.data, n, 3) < 0) {
+ printf("Failed to fill store with random values.\n");
+ goto fail_fill;
+ }
+
+ len = dht_kv_retrieve(dht.id.data, &vals);
+ if (len < 0) {
+ printf("Failed to get values from store.\n");
+ goto fail_fill;
+ }
+
+ if (len != 3) {
+ printf("Failed to get %ld values (%zu).\n", 3L, len);
+ goto fail_get_values;
+ }
+
+ freebufs(vals, len);
+
+ if (fill_store_with_random_values(dht.id.data, n, 20) < 0) {
+ printf("Failed to fill store with random values.\n");
+ goto fail_fill;
+ }
+
+ len = dht_kv_retrieve(dht.id.data, &vals);
+ if (len < 0) {
+ printf("Failed to get values from store.\n");
+ goto fail_fill;
+ }
+
+ if (len != DHT_MAX_VALS) {
+ printf("Failed to get %d values.\n", DHT_MAX_VALS);
+ goto fail_get_values;
+ }
+
+ freebufs(vals, len);
+
+ dht_fini();
+
+ TEST_SUCCESS();
+
+ return TEST_RC_SUCCESS;
+
+ fail_get_values:
+ freebufs(vals, len);
+ fail_fill:
+ dht_fini();
+ fail_init:
+ TEST_FAIL();
+ return TEST_RC_FAIL;
+}
+
+static int test_dht_kv_find_node_req_msg(void)
+{
+ dht_msg_t * msg;
+ dht_msg_t * upk;
+ size_t len;
+ uint8_t * buf;
+
+ TEST_START();
+
+ if (dht_init(&test_dht_config) < 0) {
+ printf("Failed to create dht.\n");
+ goto fail_init;
+ }
+
+ msg = dht_kv_find_node_req_msg(dht.id.data);
+ if (msg == NULL) {
+ printf("Failed to get find node request message.\n");
+ goto fail_msg;
+ }
+
+ if (msg->code != DHT_FIND_NODE_REQ) {
+ printf("Wrong code in find_node_req message (%s != %s).\n",
+ dht_code_str[msg->code],
+ dht_code_str[DHT_FIND_NODE_REQ]);
+ goto fail_msg;
+ }
+
+ len = dht_msg__get_packed_size(msg);
+ if (len == 0) {
+ printf("Failed to get packed length of find_node_req.\n");
+ goto fail_msg;
+ }
+
+ buf = malloc(len);
+ if (buf == NULL) {
+ printf("Failed to malloc find_node_req buf.\n");
+ goto fail_msg;
+ }
+
+ if (dht_msg__pack(msg, buf) != len) {
+ printf("Failed to pack find_node_req message.\n");
+ goto fail_pack;
+ }
+
+ upk = dht_msg__unpack(NULL, len, buf);
+ if (upk == NULL) {
+ printf("Failed to unpack find_value_req message.\n");
+ goto fail_unpack;
+ }
+
+ free(buf);
+ dht_msg__free_unpacked(msg, NULL);
+ dht_msg__free_unpacked(upk, NULL);
+
+ dht_fini();
+
+ TEST_SUCCESS();
+
+ return TEST_RC_SUCCESS;
+
+ fail_unpack:
+ dht_msg__free_unpacked(msg, NULL);
+ fail_pack:
+ free(buf);
+ fail_msg:
+ dht_fini();
+ fail_init:
+ TEST_FAIL();
+ return TEST_RC_FAIL;
+}
+
+static int test_dht_kv_find_node_rsp_msg(void)
+{
+ dht_contact_msg_t ** contacts;
+ dht_msg_t * msg;
+ dht_msg_t * upk;
+ size_t len;
+ uint8_t * buf;
+
+ TEST_START();
+
+ if (dht_init(&test_dht_config) < 0) {
+ printf("Failed to create dht.\n");
+ goto fail_init;
+ }
+
+ msg = dht_kv_find_node_rsp_msg(dht.id.data, 0, &contacts, 0);
+ if (msg == NULL) {
+ printf("Failed to get find node response message.\n");
+ goto fail_msg;
+ }
+
+ if (msg->code != DHT_FIND_NODE_RSP) {
+ printf("Wrong code in find_node_rsp message (%s != %s).\n",
+ dht_code_str[msg->code],
+ dht_code_str[DHT_FIND_NODE_RSP]);
+ goto fail_msg;
+ }
+
+ len = dht_msg__get_packed_size(msg);
+ if (len == 0) {
+ printf("Failed to get packed length of find_node_rsp.\n");
+ goto fail_msg;
+ }
+
+ buf = malloc(len);
+ if (buf == NULL) {
+ printf("Failed to malloc find_node_rsp buf.\n");
+ goto fail_msg;
+ }
+
+ if (dht_msg__pack(msg, buf) != len) {
+ printf("Failed to pack find_node_rsp message.\n");
+ goto fail_pack;
+ }
+
+ upk = dht_msg__unpack(NULL, len, buf);
+ if (upk == NULL) {
+ printf("Failed to unpack find_node_rsp message.\n");
+ goto fail_unpack;
+ }
+
+ free(buf);
+ dht_msg__free_unpacked(msg, NULL);
+ dht_msg__free_unpacked(upk, NULL);
+
+ dht_fini();
+
+ TEST_SUCCESS();
+
+ return TEST_RC_SUCCESS;
+
+ fail_unpack:
+ dht_msg__free_unpacked(msg, NULL);
+ fail_pack:
+ free(buf);
+ fail_msg:
+ dht_fini();
+ fail_init:
+ TEST_FAIL();
+ return TEST_RC_FAIL;
+}
+
+static int test_dht_kv_find_node_rsp_msg_contacts(void)
+{
+ dht_contact_msg_t ** contacts;
+ dht_msg_t * msg;
+ dht_msg_t * upk;
+ uint8_t * buf;
+ size_t len;
+ ssize_t n;
+
+ TEST_START();
+
+ if (dht_init(&test_dht_config) < 0) {
+ printf("Failed to create dht.\n");
+ goto fail_init;
+ }
+
+ if (fill_dht_with_contacts(100) < 0) {
+ printf("Failed to fill bucket with contacts.\n");
+ goto fail_fill;
+ }
+
+ n = dht_kv_get_contacts(dht.id.data, &contacts);
+ if (n < 0) {
+ printf("Failed to get contacts.\n");
+ goto fail_fill;
+ }
+
+ if ((size_t) n < dht.k) {
+ printf("Failed to get enough contacts (%zu < %zu).\n", n, dht.k);
+ goto fail_fill;
+ }
+
+ msg = dht_kv_find_node_rsp_msg(dht.id.data, 0, &contacts, n);
+ if (msg == NULL) {
+ printf("Failed to build find node response message.\n");
+ goto fail_msg;
+ }
+
+ len = dht_msg__get_packed_size(msg);
+ if (len == 0) {
+ printf("Failed to get packed length of find_node_rsp.\n");
+ goto fail_msg;
+ }
+
+ buf = malloc(len);
+ if (buf == NULL) {
+ printf("Failed to malloc find_node_rsp buf.\n");
+ goto fail_msg;
+ }
+
+ if (dht_msg__pack(msg, buf) != len) {
+ printf("Failed to pack find_node_rsp message.\n");
+ goto fail_pack;
+ }
+
+ upk = dht_msg__unpack(NULL, len, buf);
+ if (upk == NULL) {
+ printf("Failed to unpack find_node_rsp message.\n");
+ goto fail_unpack;
+ }
+
+ free(buf);
+ dht_msg__free_unpacked(msg, NULL);
+ dht_msg__free_unpacked(upk, NULL);
+
+ dht_fini();
+
+ TEST_SUCCESS();
+
+ return TEST_RC_SUCCESS;
+
+ fail_unpack:
+ dht_msg__free_unpacked(msg, NULL);
+ fail_pack:
+ free(buf);
+ fail_msg:
+ clear_contacts(contacts, n);
+ fail_fill:
+ dht_fini();
+ fail_init:
+ TEST_FAIL();
+ return TEST_RC_FAIL;
+}
+
+static int test_dht_kv_find_value_req_msg(void)
+{
+ dht_msg_t * msg;
+ dht_msg_t * upk;
+ size_t len;
+ uint8_t * buf;
+
+ TEST_START();
+
+ if (dht_init(&test_dht_config) < 0) {
+ printf("Failed to create dht.\n");
+ goto fail_init;
+ }
+
+ msg = dht_kv_find_value_req_msg(dht.id.data);
+ if (msg == NULL) {
+ printf("Failed to build find value request message.\n");
+ goto fail_msg;
+ }
+
+ if (msg->code != DHT_FIND_VALUE_REQ) {
+ printf("Wrong code in find_value_req message (%s != %s).\n",
+ dht_code_str[msg->code],
+ dht_code_str[DHT_FIND_VALUE_REQ]);
+ goto fail_msg;
+ }
+
+ len = dht_msg__get_packed_size(msg);
+ if (len == 0) {
+ printf("Failed to get packed length of find_value_req.\n");
+ goto fail_msg;
+ }
+
+ buf = malloc(len);
+ if (buf == NULL) {
+ printf("Failed to malloc find_node_req buf.\n");
+ goto fail_msg;
+ }
+
+ if (dht_msg__pack(msg, buf) != len) {
+ printf("Failed to pack find_value_req message.\n");
+ goto fail_pack;
+ }
+
+ upk = dht_msg__unpack(NULL, len, buf);
+ if (upk == NULL) {
+ printf("Failed to unpack find_value_req message.\n");
+ goto fail_unpack;
+ }
+
+ free(buf);
+ dht_msg__free_unpacked(msg, NULL);
+ dht_msg__free_unpacked(upk, NULL);
+
+ dht_fini();
+
+ TEST_SUCCESS();
+
+ return TEST_RC_SUCCESS;
+
+ fail_unpack:
+ dht_msg__free_unpacked(msg, NULL);
+ fail_pack:
+ free(buf);
+ fail_msg:
+ dht_fini();
+ fail_init:
+ TEST_FAIL();
+ return TEST_RC_FAIL;
+}
+
+static int test_dht_kv_find_value_rsp_msg(void)
+{
+ dht_msg_t * msg;
+ dht_msg_t * upk;
+ size_t len;
+ uint8_t * buf;
+
+ TEST_START();
+
+ if (dht_init(&test_dht_config) < 0) {
+ printf("Failed to create dht.\n");
+ goto fail_init;
+ }
+
+ msg = dht_kv_find_value_rsp_msg(dht.id.data, 0, NULL, 0, NULL, 0);
+ if (msg == NULL) {
+ printf("Failed to build find value response message.\n");
+ goto fail_msg;
+ }
+
+ if (msg->code != DHT_FIND_VALUE_RSP) {
+ printf("Wrong code in find_value_rsp message (%s != %s).\n",
+ dht_code_str[msg->code],
+ dht_code_str[DHT_FIND_VALUE_RSP]);
+ goto fail_msg;
+ }
+
+ len = dht_msg__get_packed_size(msg);
+ if (len == 0) {
+ printf("Failed to get packed length of find_value_rsp.\n");
+ goto fail_msg;
+ }
+
+ buf = malloc(len);
+ if (buf == NULL) {
+ printf("Failed to malloc find_value_rsp buf.\n");
+ goto fail_msg;
+ }
+
+ if (dht_msg__pack(msg, buf) != len) {
+ printf("Failed to pack find_value_rsp message.\n");
+ goto fail_pack;
+ }
+
+ upk = dht_msg__unpack(NULL, len, buf);
+ if (upk == NULL) {
+ printf("Failed to unpack find_value_rsp message.\n");
+ goto fail_unpack;
+ }
+
+ free(buf);
+ dht_msg__free_unpacked(msg, NULL);
+ dht_msg__free_unpacked(upk, NULL);
+
+ dht_fini();
+
+ TEST_SUCCESS();
+
+ return TEST_RC_SUCCESS;
+
+ fail_unpack:
+ dht_msg__free_unpacked(msg, NULL);
+ fail_pack:
+ free(buf);
+ fail_msg:
+ dht_fini();
+ fail_init:
+ TEST_FAIL();
+ return TEST_RC_FAIL;
+}
+
+static int test_dht_kv_find_value_rsp_msg_contacts(void)
+{
+ dht_msg_t * msg;
+ dht_msg_t * upk;
+ size_t len;
+ uint8_t * buf;
+ dht_contact_msg_t ** contacts;
+ ssize_t n;
+
+ TEST_START();
+
+ if (dht_init(&test_dht_config) < 0) {
+ printf("Failed to create dht.\n");
+ goto fail_init;
+ }
+
+ if (fill_dht_with_contacts(100) < 0) {
+ printf("Failed to fill bucket with contacts.\n");
+ goto fail_fill;
+ }
+
+ n = dht_kv_get_contacts(dht.id.data, &contacts);
+ if (n < 0) {
+ printf("Failed to get contacts.\n");
+ goto fail_fill;
+ }
+
+ if ((size_t) n < dht.k) {
+ printf("Failed to get enough contacts (%zu < %zu).\n", n, dht.k);
+ goto fail_fill;
+ }
+
+ msg = dht_kv_find_value_rsp_msg(dht.id.data, 0, &contacts, n, NULL, 0);
+ if (msg == NULL) {
+ printf("Failed to build find value response message.\n");
+ goto fail_msg;
+ }
+
+ len = dht_msg__get_packed_size(msg);
+ if (len == 0) {
+ printf("Failed to get packed length of find_value_rsp.\n");
+ goto fail_msg;
+ }
+
+ buf = malloc(len);
+ if (buf == NULL) {
+ printf("Failed to malloc find_value_rsp buf.\n");
+ goto fail_msg;
+ }
+
+ if (dht_msg__pack(msg, buf) != len) {
+ printf("Failed to pack find_value_rsp message.\n");
+ goto fail_pack;
+ }
+
+ upk = dht_msg__unpack(NULL, len, buf);
+ if (upk == NULL) {
+ printf("Failed to unpack find_value_rsp message.\n");
+ goto fail_unpack;
+ }
+
+ free(buf);
+ dht_msg__free_unpacked(msg, NULL);
+ dht_msg__free_unpacked(upk, NULL);
+
+ dht_fini();
+
+ TEST_SUCCESS();
+
+ return TEST_RC_SUCCESS;
+
+ fail_unpack:
+ dht_msg__free_unpacked(msg, NULL);
+ fail_pack:
+ free(buf);
+ fail_msg:
+ clear_contacts(contacts, n);
+ fail_fill:
+ dht_fini();
+ fail_init:
+ TEST_FAIL();
+ return TEST_RC_FAIL;
+}
+
+static int test_dht_kv_find_value_rsp_msg_values(void)
+{
+ dht_msg_t * msg;
+ dht_msg_t * upk;
+ size_t len;
+ uint8_t * buf;
+ buffer_t * values;
+ size_t i;
+ uint64_t ck;
+
+ TEST_START();
+
+ ck = generate_cookie();
+
+ if (dht_init(&test_dht_config) < 0) {
+ printf("Failed to create dht.\n");
+ goto fail_init;
+ }
+
+ values = malloc(sizeof(*values) * 8);
+ if (values == NULL) {
+ printf("Failed to malloc values.\n");
+ goto fail_values;
+ }
+
+ for (i = 0; i < 8; i++) {
+ if (random_value(&values[i]) < 0) {
+ printf("Failed to create random value.\n");
+ goto fail_fill;
+ }
+ }
+
+ msg = dht_kv_find_value_rsp_msg(dht.id.data, ck, NULL, 0, &values, 8);
+ if (msg == NULL) {
+ printf("Failed to build find value response message.\n");
+ goto fail_msg;
+ }
+
+ values = NULL; /* msg owns the values now */
+
+ len = dht_msg__get_packed_size(msg);
+ if (len == 0) {
+ printf("Failed to get packed length of find_value_rsp.\n");
+ goto fail_msg;
+ }
+
+ buf = malloc(len);
+ if (buf == NULL) {
+ printf("Failed to malloc find_value_rsp buf.\n");
+ goto fail_msg;
+ }
+
+ if (dht_msg__pack(msg, buf) != len) {
+ printf("Failed to pack find_value_rsp message.\n");
+ goto fail_pack;
+ }
+
+ upk = dht_msg__unpack(NULL, len, buf);
+ if (upk == NULL) {
+ printf("Failed to unpack find_value_rsp message.\n");
+ goto fail_unpack;
+ }
+
+ if (upk->code != DHT_FIND_VALUE_RSP) {
+ printf("Wrong code in find_value_rsp message (%s != %s).\n",
+ dht_code_str[upk->code],
+ dht_code_str[DHT_FIND_VALUE_RSP]);
+ goto fail_unpack;
+ }
+
+ if (upk->val == NULL) {
+ printf("No values in find_value_rsp message.\n");
+ goto fail_unpack;
+ }
+
+ if (upk->val->n_values != 8) {
+ printf("Not enough values in find_value_rsp (%zu != %zu).\n",
+ upk->val->n_values, 8UL);
+ goto fail_unpack;
+ }
+
+ free(buf);
+ dht_msg__free_unpacked(msg, NULL);
+ dht_msg__free_unpacked(upk, NULL);
+
+ free(values);
+
+ dht_fini();
+
+ TEST_SUCCESS();
+
+ return TEST_RC_SUCCESS;
+
+ fail_unpack:
+ dht_msg__free_unpacked(msg, NULL);
+ fail_pack:
+ free(buf);
+ fail_msg:
+ fail_fill:
+ while((i--) > 0)
+ freebuf(values[i]);
+ free(values);
+ fail_values:
+ dht_fini();
+ fail_init:
+ TEST_FAIL();
+ return TEST_RC_FAIL;
+}
+
+static int test_dht_kv_store_msg(void)
+{
+ dht_msg_t * msg;
+ size_t len;
+ uint8_t * buf;
+ struct timespec now;
+
+ TEST_START();
+
+ clock_gettime(CLOCK_REALTIME_COARSE, &now);
+
+ if (dht_init(&test_dht_config) < 0) {
+ printf("Failed to create dht.\n");
+ goto fail_init;
+ }
+
+ msg = dht_kv_store_msg(dht.id.data, test_val, now.tv_sec + 10);
+ if (msg == NULL) {
+ printf("Failed to get store message.\n");
+ goto fail_msg;
+ }
+
+ if (msg->code != DHT_STORE) {
+ printf("Wrong code in store message (%s != %s).\n",
+ dht_code_str[msg->code],
+ dht_code_str[DHT_STORE]);
+ goto fail_store_msg;
+ }
+
+ if (dht_kv_validate_msg(msg) < 0) {
+ printf("Failed to validate store message.\n");
+ goto fail_store_msg;
+ }
+
+ len = dht_msg__get_packed_size(msg);
+ if (len == 0) {
+ printf("Failed to get packed msg length.\n");
+ goto fail_msg;
+ }
+
+ buf = malloc(len);
+ if (buf == NULL) {
+ printf("Failed to malloc store msg buf.\n");
+ goto fail_msg;
+ }
+
+ if (dht_msg__pack(msg, buf) != len) {
+ printf("Failed to pack store message.\n");
+ goto fail_pack;
+ }
+
+ free(buf);
+
+ dht_msg__free_unpacked(msg, NULL);
+
+ dht_fini();
+
+ TEST_SUCCESS();
+
+ return TEST_RC_SUCCESS;
+
+ fail_pack:
+ free(buf);
+ fail_store_msg:
+ dht_msg__free_unpacked(msg, NULL);
+ fail_msg:
+ dht_fini();
+ fail_init:
+ TEST_FAIL();
+ return TEST_RC_FAIL;
+}
+
+static int test_dht_kv_query_contacts_req_rsp(void)
+{
+ dht_msg_t * req;
+ dht_msg_t * rsp;
+ dht_contact_msg_t ** contacts;
+ size_t len = 2;
+
+ uint8_t * key;
+
+ TEST_START();
+
+ sink_init();
+
+ if (dht_init(&test_dht_config) < 0) {
+ printf("Failed to create dht.\n");
+ goto fail_init;
+ }
+
+ if (fill_dht_with_contacts(1) < 0) {
+ printf("Failed to fill bucket with contacts.\n");
+ goto fail_prep;
+ }
+
+ key = generate_id();
+ if (key == NULL) {
+ printf("Failed to generate key.\n");
+ goto fail_prep;
+ }
+
+ if (dht_kv_query_contacts(key, NULL) < 0) {
+ printf("Failed to query contacts.\n");
+ goto fail_query;
+ }
+
+ req = sink_read();
+ if (req == NULL) {
+ printf("Failed to read request from sink.\n");
+ goto fail_query;
+ }
+
+ if (dht_kv_validate_msg(req) < 0) {
+ printf("Failed to validate find node req.\n");
+ goto fail_val_req;
+ }
+
+ if (random_contact_list(&contacts, len) < 0) {
+ printf("Failed to create random contact.\n");
+ goto fail_val_req;
+ }
+
+ rsp = dht_kv_find_node_rsp_msg(key, req->find->cookie, &contacts, len);
+ if (rsp == NULL) {
+ printf("Failed to create find node response message.\n");
+ goto fail_rsp;
+ }
+
+ memcpy(rsp->src->id.data, dht.id.data, dht.id.len);
+ rsp->src->addr = generate_cookie();
+
+ if (dht_kv_validate_msg(rsp) < 0) {
+ printf("Failed to validate find node response message.\n");
+ goto fail_val_rsp;
+ }
+
+ do_dht_kv_find_node_rsp(rsp->node);
+
+ /* dht_contact_msg__free_unpacked(contacts[0], NULL); set to NULL */
+
+ free(contacts);
+
+ dht_msg__free_unpacked(rsp, NULL);
+
+ free(key);
+
+ dht_msg__free_unpacked(req, NULL);
+
+ sink_fini();
+
+ dht_fini();
+
+ TEST_SUCCESS();
+
+ return TEST_RC_SUCCESS;
+
+ fail_val_rsp:
+ dht_msg__free_unpacked(rsp, NULL);
+ fail_rsp:
+ while (len-- > 0)
+ dht_contact_msg__free_unpacked(contacts[len], NULL);
+ free(contacts);
+ fail_val_req:
+ dht_msg__free_unpacked(req, NULL);
+ fail_query:
+ free(key);
+ fail_prep:
+ dht_fini();
+ fail_init:
+ sink_fini();
+ TEST_FAIL();
+ return TEST_RC_FAIL;
+}
+
+static int test_dht_req_create_destroy(void)
+{
+ struct dht_req * req;
+
+ TEST_START();
+
+ if (dht_init(&test_dht_config) < 0) {
+ printf("Failed to create dht.\n");
+ goto fail_init;
+ }
+
+ req = dht_req_create(dht.id.data);
+ if (req == NULL) {
+ printf("Failed to create kad request.\n");
+ goto fail_req;
+ }
+
+ dht_req_destroy(req);
+
+ dht_fini();
+
+ TEST_SUCCESS();
+
+ return TEST_RC_SUCCESS;
+
+ fail_req:
+ dht_fini();
+ fail_init:
+ TEST_FAIL();
+ return TEST_RC_FAIL;
+}
+
+static int test_dht_reg_unreg(void)
+{
+ TEST_START();
+
+ sink_init();
+
+ if (dht_init(&test_dht_config) < 0) {
+ printf("Failed to create dht.\n");
+ goto fail_init;
+ }
+
+ if (dht_reg(dht.id.data) < 0) {
+ printf("Failed to register own id.\n");
+ goto fail_reg;
+ }
+
+ if (sink.len != 0) {
+ printf("Packet sent without contacts!");
+ goto fail_msg;
+ }
+
+ if (dht_unreg(dht.id.data) < 0) {
+ printf("Failed to unregister own id.\n");
+ goto fail_msg;
+ }
+
+ dht_fini();
+
+ sink_fini();
+
+ TEST_SUCCESS();
+
+ return TEST_RC_SUCCESS;
+
+ fail_msg:
+ dht_unreg(dht.id.data);
+ fail_reg:
+ dht_fini();
+ fail_init:
+ sink_fini();
+ TEST_FAIL();
+ return TEST_RC_FAIL;
+}
+
+static int test_dht_reg_unreg_contacts(void)
+{
+ dht_msg_t * msg;
+
+ TEST_START();
+
+ sink_init();
+
+ if (dht_init(&test_dht_config) < 0) {
+ printf("Failed to create dht.\n");
+ goto fail_init;
+ }
+
+ if (fill_dht_with_contacts(4) < 0) {
+ printf("Failed to fill bucket with contacts.\n");
+ goto fail_reg;
+ }
+
+ if (dht_reg(dht.id.data) < 0) {
+ printf("Failed to register own id.\n");
+ goto fail_reg;
+ }
+
+ if (sink.len != dht.alpha) {
+ printf("Packet sent to too few contacts!\n");
+ goto fail_msg;
+ }
+
+ msg = sink_read();
+ if (msg == NULL) {
+ printf("Failed to read message from sink.\n");
+ goto fail_msg;
+ }
+
+ if (msg->code != DHT_STORE) {
+ printf("Wrong code in dht reg message (%s != %s).\n",
+ dht_code_str[msg->code],
+ dht_code_str[DHT_STORE]);
+ goto fail_validation;
+ }
+
+ if (dht_kv_validate_msg(msg) < 0) {
+ printf("Failed to validate dht message.\n");
+ goto fail_validation;
+ }
+
+ if (dht_unreg(dht.id.data) < 0) {
+ printf("Failed to unregister own id.\n");
+ goto fail_validation;
+ }
+
+ dht_msg__free_unpacked(msg, NULL);
+
+ dht_fini();
+
+ sink_fini();
+
+ TEST_SUCCESS();
+
+ return TEST_RC_SUCCESS;
+
+ fail_validation:
+ dht_msg__free_unpacked(msg, NULL);
+ fail_msg:
+ sink_clear();
+ dht_unreg(dht.id.data);
+ fail_reg:
+ dht_fini();
+ fail_init:
+ sink_fini();
+ TEST_FAIL();
+ return TEST_RC_FAIL;
+}
+
+static int test_dht_reg_query_local(void)
+{
+ struct timespec now;
+ buffer_t test_addr;
+
+ TEST_START();
+
+ clock_gettime(CLOCK_REALTIME_COARSE, &now);
+
+ if (addr_to_buf(1234321, &test_addr) < 0) {
+ printf("Failed to convert test address to buffer.\n");
+ goto fail_buf;
+ }
+
+ if (dht_init(&test_dht_config) < 0) {
+ printf("Failed to create dht.\n");
+ goto fail_init;
+ }
+
+ if (dht_reg(dht.id.data) < 0) {
+ printf("Failed to register own id.\n");
+ goto fail_reg;
+ }
+
+ if (dht_query(dht.id.data) == dht.addr) {
+ printf("Succeeded to query own id.\n");
+ goto fail_get;
+ }
+
+ if (dht_kv_store(dht.id.data, test_addr, now.tv_sec + 5) < 0) {
+ printf("Failed to publish value.\n");
+ goto fail_get;
+ }
+
+ if (dht_query(dht.id.data) != 1234321) {
+ printf("Failed to return remote addr.\n");
+ goto fail_get;
+ }
+
+ if (dht_unreg(dht.id.data) < 0) {
+ printf("Failed to unregister own id.\n");
+ goto fail_get;
+ }
+
+ freebuf(test_addr);
+
+ dht_fini();
+
+ TEST_SUCCESS();
+
+ return TEST_RC_SUCCESS;
+
+ fail_get:
+ dht_unreg(dht.id.data);
+ fail_reg:
+ dht_fini();
+ fail_init:
+ freebuf(test_addr);
+ fail_buf:
+ TEST_FAIL();
+ return TEST_RC_FAIL;
+}
+
+static int test_dht_query(void)
+{
+ uint8_t * key;
+ struct dir_dht_config cfg;
+
+ TEST_START();
+
+ sink_init();
+
+ cfg = test_dht_config;
+ cfg.peer = generate_cookie();
+
+ if (dht_init(&cfg)) {
+ printf("Failed to create dht.\n");
+ goto fail_init;
+ }
+
+ key = generate_id();
+ if (key == NULL) {
+ printf("Failed to generate key.\n");
+ goto fail_key;
+ }
+
+ if (dht_query(key) != INVALID_ADDR) {
+ printf("Succeeded to get address without contacts.\n");
+ goto fail_get;
+ }
+
+ if (sink.len != 0) {
+ printf("Packet sent without contacts!");
+ goto fail_test;
+ }
+
+ free(key);
+
+ dht_fini();
+
+ sink_fini();
+
+ TEST_SUCCESS();
+
+ return TEST_RC_SUCCESS;
+
+ fail_test:
+ sink_clear();
+ fail_get:
+ free(key);
+ fail_key:
+ dht_fini();
+ fail_init:
+ sink_fini();
+ return TEST_RC_FAIL;
+}
+
+static int test_dht_query_contacts(void)
+{
+ dht_msg_t * msg;
+ uint8_t * key;
+ struct dir_dht_config cfg;
+
+
+ TEST_START();
+
+ sink_init();
+
+ cfg = test_dht_config;
+ cfg.peer = generate_cookie();
+
+ if (dht_init(&cfg)) {
+ printf("Failed to create dht.\n");
+ goto fail_init;
+ }
+
+ if (fill_dht_with_contacts(10) < 0) {
+ printf("Failed to fill with contacts!");
+ goto fail_contacts;
+ }
+
+ key = generate_id();
+ if (key == NULL) {
+ printf("Failed to generate key.");
+ goto fail_contacts;
+ }
+
+ if (dht_query(key) != INVALID_ADDR) {
+ printf("Succeeded to get address for random id.\n");
+ goto fail_query;
+ }
+
+ msg = sink_read();
+ if (msg == NULL) {
+ printf("Failed to read message.!\n");
+ goto fail_read;
+ }
+
+ if (dht_kv_validate_msg(msg) < 0) {
+ printf("Failed to validate dht message.\n");
+ goto fail_msg;
+ }
+
+ if (msg->code != DHT_FIND_VALUE_REQ) {
+ printf("Failed to validate dht message.\n");
+ goto fail_msg;
+ }
+
+ dht_msg__free_unpacked(msg, NULL);
+
+ free(key);
+
+ sink_clear();
+
+ dht_fini();
+
+ sink_fini();
+
+ TEST_SUCCESS();
+
+ return TEST_RC_SUCCESS;
+ fail_msg:
+ dht_msg__free_unpacked(msg, NULL);
+ fail_read:
+ sink_clear();
+ fail_query:
+ free(key);
+ fail_contacts:
+ dht_fini();
+ fail_init:
+ sink_fini();
+ return TEST_RC_FAIL;
+}
+
+int dht_test(int argc,
+ char ** argv)
+{
+ int rc = 0;
+
+ (void) argc;
+ (void) argv;
+
+ rc |= test_dht_init_fini();
+ rc |= test_dht_start_stop();
+ rc |= test_val_entry_create_destroy();
+ rc |= test_dht_entry_create_destroy();
+ rc |= test_dht_entry_update_get_val();
+ rc |= test_dht_entry_update_get_lval();
+ rc |= test_dht_kv_contact_create_destroy();
+ rc |= test_dht_kv_contact_list();
+ rc |= test_dht_kv_update_bucket();
+ rc |= test_dht_kv_get_values();
+ rc |= test_dht_kv_find_node_req_msg();
+ rc |= test_dht_kv_find_node_rsp_msg();
+ rc |= test_dht_kv_find_node_rsp_msg_contacts();
+ rc |= test_dht_kv_query_contacts_req_rsp();
+ rc |= test_dht_kv_find_value_req_msg();
+ rc |= test_dht_kv_find_value_rsp_msg();
+ rc |= test_dht_kv_find_value_rsp_msg_contacts();
+ rc |= test_dht_kv_find_value_rsp_msg_values();
+ rc |= test_dht_kv_store_msg();
+ rc |= test_dht_req_create_destroy();
+ rc |= test_dht_reg_unreg();
+ rc |= test_dht_reg_unreg_contacts();
+ rc |= test_dht_reg_query_local();
+ rc |= test_dht_query();
+ rc |= test_dht_query_contacts();
+
+ return rc;
+}
diff --git a/src/ipcpd/unicast/dt.c b/src/ipcpd/unicast/dt.c
index d6a3ddc9..e2679ffe 100644
--- a/src/ipcpd/unicast/dt.c
+++ b/src/ipcpd/unicast/dt.c
@@ -1,10 +1,10 @@
/*
- * Ouroboros - Copyright (C) 2016 - 2020
+ * Ouroboros - Copyright (C) 2016 - 2024
*
* Data Transfer Component
*
- * Dimitri Staessens <dimitri.staessens@ugent.be>
- * Sander Vrijders <sander.vrijders@ugent.be>
+ * Dimitri Staessens <dimitri@ouroboros.rocks>
+ * Sander Vrijders <sander@ouroboros.rocks>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -41,13 +41,15 @@
#include <ouroboros/fccntl.h>
#endif
-#include "connmgr.h"
+#include "addr-auth.h"
+#include "common/comp.h"
+#include "common/connmgr.h"
+#include "ca.h"
#include "ipcp.h"
#include "dt.h"
#include "pff.h"
#include "routing.h"
#include "psched.h"
-#include "comp.h"
#include "fa.h"
#include <stdlib.h>
@@ -57,8 +59,9 @@
#include <inttypes.h>
#include <assert.h>
-#define QOS_BLOCK_LEN 672
-#define STAT_FILE_LEN (189 + QOS_BLOCK_LEN * QOS_CUBE_MAX)
+#define QOS_BLOCK_LEN 672
+#define RIB_FILE_STRLEN (169 + RIB_TM_STRLEN + QOS_BLOCK_LEN * QOS_CUBE_MAX)
+#define RIB_NAME_STRLEN 256
#ifndef CLOCK_REALTIME_COARSE
#define CLOCK_REALTIME_COARSE CLOCK_REALTIME
@@ -80,7 +83,7 @@ struct dt_pci {
qoscube_t qc;
uint8_t ttl;
uint8_t ecn;
- uint32_t eid;
+ uint64_t eid;
};
struct {
@@ -98,39 +101,29 @@ struct {
uint8_t max_ttl;
} dt_pci_info;
-static int dt_pci_ser(struct shm_du_buff * sdb,
- struct dt_pci * dt_pci)
+static void dt_pci_ser(uint8_t * head,
+ struct dt_pci * dt_pci)
{
- uint8_t * head;
- uint8_t ttl = dt_pci_info.max_ttl;
+ uint8_t ttl = dt_pci_info.max_ttl;
- assert(sdb);
+ assert(head);
assert(dt_pci);
- head = shm_du_buff_head_alloc(sdb, dt_pci_info.head_size);
- if (head == NULL)
- return -EPERM;
-
/* FIXME: Add check and operations for Big Endian machines. */
memcpy(head, &dt_pci->dst_addr, dt_pci_info.addr_size);
memcpy(head + dt_pci_info.qc_o, &dt_pci->qc, QOS_LEN);
memcpy(head + dt_pci_info.ttl_o, &ttl, TTL_LEN);
- memcpy(head + dt_pci_info.ecn_o, &dt_pci->ecn, ECN_LEN);
+ memcpy(head + dt_pci_info.ecn_o, &dt_pci->ecn, ECN_LEN);
memcpy(head + dt_pci_info.eid_o, &dt_pci->eid, dt_pci_info.eid_size);
- return 0;
}
-static void dt_pci_des(struct shm_du_buff * sdb,
- struct dt_pci * dt_pci)
+static void dt_pci_des(uint8_t * head,
+ struct dt_pci * dt_pci)
{
- uint8_t * head;
-
- assert(sdb);
+ assert(head);
assert(dt_pci);
- head = shm_du_buff_head(sdb);
-
/* Decrease TTL */
--*(head + dt_pci_info.ttl_o);
@@ -152,6 +145,8 @@ static void dt_pci_shrink(struct shm_du_buff * sdb)
struct {
struct psched * psched;
+ uint64_t addr;
+
struct pff * pff[QOS_CUBE_MAX];
struct routing_i * routing[QOS_CUBE_MAX];
#ifdef IPCP_FLOW_STATS
@@ -184,24 +179,28 @@ struct {
pthread_t listener;
} dt;
-static int dt_stat_read(const char * path,
- char * buf,
- size_t len)
+static int dt_rib_read(const char * path,
+ char * buf,
+ size_t len)
{
#ifdef IPCP_FLOW_STATS
int fd;
int i;
char str[QOS_BLOCK_LEN + 1];
char addrstr[20];
- char tmstr[20];
+ char * entry;
+ char tmstr[RIB_TM_STRLEN];
size_t rxqlen = 0;
size_t txqlen = 0;
struct tm * tm;
/* NOTE: we may need stronger checks. */
- fd = atoi(path);
+ entry = strstr(path, RIB_SEPARATOR) + 1;
+ assert(entry);
- if (len < STAT_FILE_LEN)
+ fd = atoi(entry);
+
+ if (len < RIB_FILE_STRLEN)
return 0;
buf[0] = '\0';
@@ -213,13 +212,13 @@ static int dt_stat_read(const char * path,
return 0;
}
- if (dt.stat[fd].addr == ipcpi.dt_addr)
+ if (dt.stat[fd].addr == dt.addr)
sprintf(addrstr, "%s", dt.comps[fd].name);
else
- sprintf(addrstr, "%" PRIu64, dt.stat[fd].addr);
+ sprintf(addrstr, ADDR_FMT32, ADDR_VAL32(&dt.stat[fd].addr));
- tm = localtime(&dt.stat[fd].stamp);
- strftime(tmstr, sizeof(tmstr), "%F %T", tm);
+ tm = gmtime(&dt.stat[fd].stamp);
+ strftime(tmstr, sizeof(tmstr), RIB_TM_FORMAT, tm);
if (fd >= PROG_RES_FDS) {
fccntl(fd, FLOWGRXQLEN, &rxqlen);
@@ -227,12 +226,11 @@ static int dt_stat_read(const char * path,
}
sprintf(buf,
- "Flow established at: %20s\n"
+ "Flow established at: %.*s\n"
"Endpoint address: %20s\n"
"Queued packets (rx): %20zu\n"
"Queued packets (tx): %20zu\n\n",
- tmstr, addrstr, rxqlen, txqlen);
-
+ RIB_TM_STRLEN - 1, tmstr, addrstr, rxqlen, txqlen);
for (i = 0; i < QOS_CUBE_MAX; ++i) {
sprintf(str,
"Qos cube %3d:\n"
@@ -271,7 +269,7 @@ static int dt_stat_read(const char * path,
pthread_mutex_unlock(&dt.stat[fd].lock);
- return STAT_FILE_LEN;
+ return RIB_FILE_STRLEN;
#else
(void) path;
(void) buf;
@@ -280,7 +278,7 @@ static int dt_stat_read(const char * path,
#endif
}
-static int dt_stat_readdir(char *** buf)
+static int dt_rib_readdir(char *** buf)
{
#ifdef IPCP_FLOW_STATS
char entry[RIB_PATH_LEN + 1];
@@ -290,94 +288,88 @@ static int dt_stat_readdir(char *** buf)
pthread_rwlock_rdlock(&dt.lock);
if (dt.n_flows < 1) {
- pthread_rwlock_unlock(&dt.lock);
- return 0;
+ *buf = NULL;
+ goto no_flows;
}
*buf = malloc(sizeof(**buf) * dt.n_flows);
- if (*buf == NULL) {
- pthread_rwlock_unlock(&dt.lock);
- return -ENOMEM;
- }
+ if (*buf == NULL)
+ goto fail_entries;
for (i = 0; i < PROG_MAX_FLOWS; ++i) {
pthread_mutex_lock(&dt.stat[i].lock);
if (dt.stat[i].stamp == 0) {
pthread_mutex_unlock(&dt.stat[i].lock);
- /* Optimization: skip unused res_fds. */
- if (i < PROG_RES_FDS)
- i = PROG_RES_FDS;
- continue;
+ break;
}
+ pthread_mutex_unlock(&dt.stat[i].lock);
+
sprintf(entry, "%zu", i);
(*buf)[idx] = malloc(strlen(entry) + 1);
- if ((*buf)[idx] == NULL) {
- while (idx-- > 0)
- free((*buf)[idx]);
- free(buf);
- pthread_mutex_unlock(&dt.stat[i].lock);
- pthread_rwlock_unlock(&dt.lock);
- return -ENOMEM;
- }
+ if ((*buf)[idx] == NULL)
+ goto fail_entry;
strcpy((*buf)[idx++], entry);
- pthread_mutex_unlock(&dt.stat[i].lock);
}
-
+ no_flows:
pthread_rwlock_unlock(&dt.lock);
- assert((size_t) idx == dt.n_flows);
-
return idx;
+
+ fail_entry:
+ while (idx-- > 0)
+ free((*buf)[idx]);
+ free(*buf);
+ fail_entries:
+ pthread_rwlock_unlock(&dt.lock);
+ return -ENOMEM;
#else
(void) buf;
return 0;
#endif
}
-static int dt_stat_getattr(const char * path,
- struct stat * st)
+static int dt_rib_getattr(const char * path,
+ struct rib_attr * attr)
{
#ifdef IPCP_FLOW_STATS
- int fd;
+ int fd;
+ char * entry;
- fd = atoi(path);
+ entry = strstr(path, RIB_SEPARATOR) + 1;
+ assert(entry);
- st->st_mode = S_IFREG | 0755;
- st->st_nlink = 1;
- st->st_uid = getuid();
- st->st_gid = getgid();
+ fd = atoi(entry);
pthread_mutex_lock(&dt.stat[fd].lock);
if (dt.stat[fd].stamp != -1) {
- st->st_size = STAT_FILE_LEN;
- st->st_mtime = dt.stat[fd].stamp;
+ attr->size = RIB_FILE_STRLEN;
+ attr->mtime = dt.stat[fd].stamp;
} else {
- st->st_size = 0;
- st->st_mtime = 0;
+ attr->size = 0;
+ attr->mtime = 0;
}
pthread_mutex_unlock(&dt.stat[fd].lock);
#else
(void) path;
- (void) st;
+ (void) attr;
#endif
return 0;
}
static struct rib_ops r_ops = {
- .read = dt_stat_read,
- .readdir = dt_stat_readdir,
- .getattr = dt_stat_getattr
+ .read = dt_rib_read,
+ .readdir = dt_rib_readdir,
+ .getattr = dt_rib_getattr
};
#ifdef IPCP_FLOW_STATS
-
static void stat_used(int fd,
uint64_t addr)
{
@@ -407,6 +399,7 @@ static void handle_event(void * self,
const void * o)
{
struct conn * c;
+ int fd;
(void) self;
@@ -414,19 +407,20 @@ static void handle_event(void * self,
switch (event) {
case NOTIFY_DT_CONN_ADD:
+ fd = c->flow_info.fd;
#ifdef IPCP_FLOW_STATS
- stat_used(c->flow_info.fd, c->conn_info.addr);
+ stat_used(fd, c->conn_info.addr);
#endif
- psched_add(dt.psched, c->flow_info.fd);
- log_dbg("Added fd %d to packet scheduler.", c->flow_info.fd);
+ psched_add(dt.psched, fd);
+ log_dbg("Added fd %d to packet scheduler.", fd);
break;
case NOTIFY_DT_CONN_DEL:
+ fd = c->flow_info.fd;
#ifdef IPCP_FLOW_STATS
- stat_used(c->flow_info.fd, INVALID_ADDR);
+ stat_used(fd, INVALID_ADDR);
#endif
- psched_del(dt.psched, c->flow_info.fd);
- log_dbg("Removed fd %d from "
- "packet scheduler.", c->flow_info.fd);
+ psched_del(dt.psched, fd);
+ log_dbg("Removed fd %d from packet scheduler.", fd);
break;
default:
break;
@@ -440,26 +434,33 @@ static void packet_handler(int fd,
struct dt_pci dt_pci;
int ret;
int ofd;
-#ifdef IPCP_FLOW_STATS
+ uint8_t * head;
size_t len;
-#else
+
+ len = shm_du_buff_len(sdb);
+
+#ifndef IPCP_FLOW_STATS
(void) fd;
-#endif
+#else
+ pthread_mutex_lock(&dt.stat[fd].lock);
-#ifdef IPCP_FLOW_STATS
- len = shm_du_buff_tail(sdb) - shm_du_buff_head(sdb);
+ ++dt.stat[fd].rcv_pkt[qc];
+ dt.stat[fd].rcv_bytes[qc] += len;
+
+ pthread_mutex_unlock(&dt.stat[fd].lock);
#endif
memset(&dt_pci, 0, sizeof(dt_pci));
- dt_pci_des(sdb, &dt_pci);
- if (dt_pci.dst_addr != ipcpi.dt_addr) {
+
+ head = shm_du_buff_head(sdb);
+
+ dt_pci_des(head, &dt_pci);
+ if (dt_pci.dst_addr != dt.addr) {
if (dt_pci.ttl == 0) {
log_dbg("TTL was zero.");
ipcp_sdb_release(sdb);
#ifdef IPCP_FLOW_STATS
pthread_mutex_lock(&dt.stat[fd].lock);
- ++dt.stat[fd].rcv_pkt[qc];
- dt.stat[fd].rcv_bytes[qc] += len;
++dt.stat[fd].r_drp_pkt[qc];
dt.stat[fd].r_drp_bytes[qc] += len;
@@ -471,13 +472,12 @@ static void packet_handler(int fd,
/* FIXME: Use qoscube from PCI instead of incoming flow. */
ofd = pff_nhop(dt.pff[qc], dt_pci.dst_addr);
if (ofd < 0) {
- log_dbg("No next hop for %" PRIu64, dt_pci.dst_addr);
+ log_dbg("No next hop for %" PRIu64 ".",
+ dt_pci.dst_addr);
ipcp_sdb_release(sdb);
#ifdef IPCP_FLOW_STATS
pthread_mutex_lock(&dt.stat[fd].lock);
- ++dt.stat[fd].rcv_pkt[qc];
- dt.stat[fd].rcv_bytes[qc] += len;
++dt.stat[fd].f_nhp_pkt[qc];
dt.stat[fd].f_nhp_bytes[qc] += len;
@@ -486,6 +486,8 @@ static void packet_handler(int fd,
return;
}
+ (void) ca_calc_ecn(ofd, head + dt_pci_info.ecn_o, qc, len);
+
ret = ipcp_flow_write(ofd, sdb);
if (ret < 0) {
log_dbg("Failed to write packet to fd %d.", ofd);
@@ -493,12 +495,6 @@ static void packet_handler(int fd,
notifier_event(NOTIFY_DT_FLOW_DOWN, &ofd);
ipcp_sdb_release(sdb);
#ifdef IPCP_FLOW_STATS
- pthread_mutex_lock(&dt.stat[fd].lock);
-
- ++dt.stat[fd].rcv_pkt[qc];
- dt.stat[fd].rcv_bytes[qc] += len;
-
- pthread_mutex_unlock(&dt.stat[fd].lock);
pthread_mutex_lock(&dt.stat[ofd].lock);
++dt.stat[ofd].w_drp_pkt[qc];
@@ -509,12 +505,6 @@ static void packet_handler(int fd,
return;
}
#ifdef IPCP_FLOW_STATS
- pthread_mutex_lock(&dt.stat[fd].lock);
-
- ++dt.stat[fd].rcv_pkt[qc];
- dt.stat[fd].rcv_bytes[qc] += len;
-
- pthread_mutex_unlock(&dt.stat[fd].lock);
pthread_mutex_lock(&dt.stat[ofd].lock);
++dt.stat[ofd].snd_pkt[qc];
@@ -525,65 +515,20 @@ static void packet_handler(int fd,
} else {
dt_pci_shrink(sdb);
if (dt_pci.eid >= PROG_RES_FDS) {
- if (ipcp_flow_write(dt_pci.eid, sdb)) {
- ipcp_sdb_release(sdb);
-#ifdef IPCP_FLOW_STATS
- pthread_mutex_lock(&dt.stat[fd].lock);
- ++dt.stat[fd].rcv_pkt[qc];
- dt.stat[fd].rcv_bytes[qc] += len;
- pthread_mutex_unlock(&dt.stat[fd].lock);
-
- pthread_mutex_lock(&dt.stat[dt_pci.eid].lock);
- ++dt.stat[dt_pci.eid].w_drp_pkt[qc];
- dt.stat[dt_pci.eid].w_drp_bytes[qc] += len;
- pthread_mutex_unlock(&dt.stat[dt_pci.eid].lock);
-#endif
-
- }
-#ifdef IPCP_FLOW_STATS
- pthread_mutex_lock(&dt.stat[fd].lock);
-
- ++dt.stat[fd].rcv_pkt[qc];
- dt.stat[fd].rcv_bytes[qc] += len;
-
- pthread_mutex_unlock(&dt.stat[fd].lock);
- pthread_mutex_lock(&dt.stat[dt_pci.eid].lock);
-
- ++dt.stat[dt_pci.eid].rcv_pkt[qc];
- dt.stat[dt_pci.eid].rcv_bytes[qc] += len;
- ++dt.stat[dt_pci.eid].lcl_r_pkt[qc];
- dt.stat[dt_pci.eid].lcl_r_bytes[qc] += len;
-
- pthread_mutex_unlock(&dt.stat[dt_pci.eid].lock);
-#endif
+ uint8_t ecn = *(head + dt_pci_info.ecn_o);
+ fa_np1_rcv(dt_pci.eid, ecn, sdb);
return;
}
if (dt.comps[dt_pci.eid].post_packet == NULL) {
- log_err("No registered component on eid %d.",
+ log_err("No registered component on eid %" PRIu64 ".",
dt_pci.eid);
ipcp_sdb_release(sdb);
-#ifdef IPCP_FLOW_STATS
- pthread_mutex_lock(&dt.stat[fd].lock);
-
- ++dt.stat[fd].rcv_pkt[qc];
- dt.stat[fd].rcv_bytes[qc] += len;
-
- pthread_mutex_unlock(&dt.stat[fd].lock);
- pthread_mutex_lock(&dt.stat[dt_pci.eid].lock);
-
- ++dt.stat[dt_pci.eid].w_drp_pkt[qc];
- dt.stat[dt_pci.eid].w_drp_bytes[qc] += len;
-
- pthread_mutex_unlock(&dt.stat[dt_pci.eid].lock);
-#endif
return;
}
#ifdef IPCP_FLOW_STATS
pthread_mutex_lock(&dt.stat[fd].lock);
- ++dt.stat[fd].rcv_pkt[qc];
- dt.stat[fd].rcv_bytes[qc] += len;
++dt.stat[fd].lcl_r_pkt[qc];
dt.stat[fd].lcl_r_bytes[qc] += len;
@@ -620,28 +565,36 @@ static void * dt_conn_handle(void * o)
return 0;
}
-int dt_init(enum pol_routing pr,
- uint8_t addr_size,
- uint8_t eid_size,
- uint8_t max_ttl)
+int dt_init(struct dt_config cfg)
{
int i;
int j;
- char dtstr[256];
- int pp;
+ char dtstr[RIB_NAME_STRLEN + 1];
+ enum pol_pff pp;
struct conn_info info;
memset(&info, 0, sizeof(info));
+ dt.addr = addr_auth_address();
+ if (dt.addr == INVALID_ADDR) {
+ log_err("Failed to get address");
+ return -1;
+ }
+
strcpy(info.comp_name, DT_COMP);
strcpy(info.protocol, DT_PROTO);
info.pref_version = 1;
info.pref_syntax = PROTO_FIXED;
- info.addr = ipcpi.dt_addr;
+ info.addr = dt.addr;
+
+ if (cfg.eid_size != 8) { /* only support 64 bits from now */
+ log_warn("Invalid EID size. Only 64 bit is supported.");
+ cfg.eid_size = 8;
+ }
- dt_pci_info.addr_size = addr_size;
- dt_pci_info.eid_size = eid_size;
- dt_pci_info.max_ttl = max_ttl;
+ dt_pci_info.addr_size = cfg.addr_size;
+ dt_pci_info.eid_size = cfg.eid_size;
+ dt_pci_info.max_ttl = cfg.max_ttl;
dt_pci_info.qc_o = dt_pci_info.addr_size;
dt_pci_info.ttl_o = dt_pci_info.qc_o + QOS_LEN;
@@ -649,18 +602,12 @@ int dt_init(enum pol_routing pr,
dt_pci_info.eid_o = dt_pci_info.ecn_o + ECN_LEN;
dt_pci_info.head_size = dt_pci_info.eid_o + dt_pci_info.eid_size;
- if (notifier_reg(handle_event, NULL)) {
- log_err("Failed to register with notifier.");
- goto fail_notifier_reg;
- }
-
if (connmgr_comp_init(COMPID_DT, &info)) {
log_err("Failed to register with connmgr.");
goto fail_connmgr_comp_init;
}
- pp = routing_init(pr);
- if (pp < 0) {
+ if (routing_init(&cfg.routing, &pp) < 0) {
log_err("Failed to init routing.");
goto fail_routing;
}
@@ -697,6 +644,7 @@ int dt_init(enum pol_routing pr,
for (i = 0; i < PROG_MAX_FLOWS; ++i)
if (pthread_mutex_init(&dt.stat[i].lock, NULL)) {
+ log_err("Failed to init mutex for flow %d.", i);
for (j = 0; j < i; ++j)
pthread_mutex_destroy(&dt.stat[j].lock);
goto fail_stat_lock;
@@ -704,9 +652,11 @@ int dt_init(enum pol_routing pr,
dt.n_flows = 0;
#endif
- sprintf(dtstr, "%s.%" PRIu64, DT, ipcpi.dt_addr);
- if (rib_reg(dtstr, &r_ops))
+ sprintf(dtstr, "%s." ADDR_FMT32, DT, ADDR_VAL32(&dt.addr));
+ if (rib_reg(dtstr, &r_ops)) {
+ log_err("Failed to register RIB.");
goto fail_rib_reg;
+ }
return 0;
@@ -730,16 +680,16 @@ int dt_init(enum pol_routing pr,
fail_routing:
connmgr_comp_fini(COMPID_DT);
fail_connmgr_comp_init:
- notifier_unreg(&handle_event);
- fail_notifier_reg:
return -1;
}
void dt_fini(void)
{
+ char dtstr[RIB_NAME_STRLEN + 1];
int i;
- rib_unreg(DT);
+ sprintf(dtstr, "%s.%" PRIu64, DT, dt.addr);
+ rib_unreg(dtstr);
#ifdef IPCP_FLOW_STATS
for (i = 0; i < PROG_MAX_FLOWS; ++i)
pthread_mutex_destroy(&dt.stat[i].lock);
@@ -757,31 +707,53 @@ void dt_fini(void)
routing_fini();
connmgr_comp_fini(COMPID_DT);
-
- notifier_unreg(&handle_event);
}
int dt_start(void)
{
- dt.psched = psched_create(packet_handler);
+ dt.psched = psched_create(packet_handler, ipcp_flow_read);
if (dt.psched == NULL) {
log_err("Failed to create N-1 packet scheduler.");
- return -1;
+ goto fail_psched;
+ }
+
+ if (notifier_reg(handle_event, NULL)) {
+ log_err("Failed to register with notifier.");
+ goto fail_notifier_reg;
}
if (pthread_create(&dt.listener, NULL, dt_conn_handle, NULL)) {
log_err("Failed to create listener thread.");
- psched_destroy(dt.psched);
- return -1;
+ goto fail_listener;
+ }
+
+ if (routing_start() < 0) {
+ log_err("Failed to start routing.");
+ goto fail_routing;
}
return 0;
+
+ fail_routing:
+ pthread_cancel(dt.listener);
+ pthread_join(dt.listener, NULL);
+ fail_listener:
+ notifier_unreg(&handle_event);
+ fail_notifier_reg:
+ psched_destroy(dt.psched);
+ fail_psched:
+ return -1;
}
void dt_stop(void)
{
+ routing_stop();
+
pthread_cancel(dt.listener);
pthread_join(dt.listener, NULL);
+
+ notifier_unreg(&handle_event);
+
psched_destroy(dt.psched);
}
@@ -789,81 +761,111 @@ int dt_reg_comp(void * comp,
void (* func)(void * func, struct shm_du_buff *),
char * name)
{
- int res_fd;
+ int eid;
- assert(func);
+ assert(func != NULL);
pthread_rwlock_wrlock(&dt.lock);
- res_fd = bmp_allocate(dt.res_fds);
- if (!bmp_is_id_valid(dt.res_fds, res_fd)) {
- log_warn("Reserved fds depleted.");
+ eid = bmp_allocate(dt.res_fds);
+ if (!bmp_is_id_valid(dt.res_fds, eid)) {
+ log_err("Cannot allocate EID.");
pthread_rwlock_unlock(&dt.lock);
return -EBADF;
}
- assert(dt.comps[res_fd].post_packet == NULL);
- assert(dt.comps[res_fd].comp == NULL);
- assert(dt.comps[res_fd].name == NULL);
+ assert(dt.comps[eid].post_packet == NULL);
+ assert(dt.comps[eid].comp == NULL);
+ assert(dt.comps[eid].name == NULL);
- dt.comps[res_fd].post_packet = func;
- dt.comps[res_fd].comp = comp;
- dt.comps[res_fd].name = name;
+ dt.comps[eid].post_packet = func;
+ dt.comps[eid].comp = comp;
+ dt.comps[eid].name = name;
pthread_rwlock_unlock(&dt.lock);
#ifdef IPCP_FLOW_STATS
- stat_used(res_fd, ipcpi.dt_addr);
+ stat_used(eid, dt.addr);
#endif
- return res_fd;
+ return eid;
+}
+
+void dt_unreg_comp(int eid)
+{
+ assert(eid >= 0 && eid < PROG_RES_FDS);
+
+ pthread_rwlock_wrlock(&dt.lock);
+
+ assert(dt.comps[eid].post_packet != NULL);
+
+ dt.comps[eid].post_packet = NULL;
+ dt.comps[eid].comp = NULL;
+ dt.comps[eid].name = NULL;
+
+ pthread_rwlock_unlock(&dt.lock);
+
+ return;
}
int dt_write_packet(uint64_t dst_addr,
qoscube_t qc,
- int np1_fd,
+ uint64_t eid,
struct shm_du_buff * sdb)
{
- int fd;
struct dt_pci dt_pci;
+ int fd;
int ret;
-#ifdef IPCP_FLOW_STATS
+ uint8_t * head;
size_t len;
-#endif
+
assert(sdb);
- assert(dst_addr != ipcpi.dt_addr);
+ assert(dst_addr != dt.addr);
+
+ len = shm_du_buff_len(sdb);
+#ifdef IPCP_FLOW_STATS
+ if (eid < PROG_RES_FDS) {
+ pthread_mutex_lock(&dt.stat[eid].lock);
+
+ ++dt.stat[eid].lcl_r_pkt[qc];
+ dt.stat[eid].lcl_r_bytes[qc] += len;
+
+ pthread_mutex_unlock(&dt.stat[eid].lock);
+ }
+#endif
fd = pff_nhop(dt.pff[qc], dst_addr);
if (fd < 0) {
- log_dbg("Could not get nhop for addr %" PRIu64 ".", dst_addr);
+ log_dbg("Could not get nhop for " ADDR_FMT32 ".",
+ ADDR_VAL32(&dst_addr));
#ifdef IPCP_FLOW_STATS
- len = shm_du_buff_tail(sdb) - shm_du_buff_head(sdb);
+ if (eid < PROG_RES_FDS) {
+ pthread_mutex_lock(&dt.stat[eid].lock);
- pthread_mutex_lock(&dt.stat[np1_fd].lock);
+ ++dt.stat[eid].lcl_r_pkt[qc];
+ dt.stat[eid].lcl_r_bytes[qc] += len;
- ++dt.stat[np1_fd].lcl_r_pkt[qc];
- dt.stat[np1_fd].lcl_r_bytes[qc] += len;
- ++dt.stat[np1_fd].f_nhp_pkt[qc];
- dt.stat[np1_fd].f_nhp_bytes[qc] += len;
-
- pthread_mutex_unlock(&dt.stat[np1_fd].lock);
+ pthread_mutex_unlock(&dt.stat[eid].lock);
+ }
#endif
- return -1;
+ return -EPERM;
}
+ head = shm_du_buff_head_alloc(sdb, dt_pci_info.head_size);
+ if (head == NULL) {
+ log_dbg("Failed to allocate DT header.");
+ goto fail_write;
+ }
+
+ len = shm_du_buff_len(sdb);
+
dt_pci.dst_addr = dst_addr;
dt_pci.qc = qc;
- dt_pci.eid = np1_fd;
+ dt_pci.eid = eid;
dt_pci.ecn = 0;
- if (dt_pci_ser(sdb, &dt_pci)) {
- log_dbg("Failed to serialize PDU.");
-#ifdef IPCP_FLOW_STATS
- len = shm_du_buff_tail(sdb) - shm_du_buff_head(sdb);
-#endif
- goto fail_write;
- }
-#ifdef IPCP_FLOW_STATS
- len = shm_du_buff_tail(sdb) - shm_du_buff_head(sdb);
-#endif
+ (void) ca_calc_ecn(fd, &dt_pci.ecn, qc, len);
+
+ dt_pci_ser(head, &dt_pci);
+
ret = ipcp_flow_write(fd, sdb);
if (ret < 0) {
log_dbg("Failed to write packet to fd %d.", fd);
@@ -872,12 +874,6 @@ int dt_write_packet(uint64_t dst_addr,
goto fail_write;
}
#ifdef IPCP_FLOW_STATS
- pthread_mutex_lock(&dt.stat[np1_fd].lock);
-
- ++dt.stat[np1_fd].lcl_r_pkt[qc];
- dt.stat[np1_fd].lcl_r_bytes[qc] += len;
-
- pthread_mutex_unlock(&dt.stat[np1_fd].lock);
pthread_mutex_lock(&dt.stat[fd].lock);
if (dt_pci.eid < PROG_RES_FDS) {
@@ -893,15 +889,9 @@ int dt_write_packet(uint64_t dst_addr,
fail_write:
#ifdef IPCP_FLOW_STATS
- pthread_mutex_lock(&dt.stat[np1_fd].lock);
-
- ++dt.stat[np1_fd].lcl_w_pkt[qc];
- dt.stat[np1_fd].lcl_w_bytes[qc] += len;
-
- pthread_mutex_unlock(&dt.stat[np1_fd].lock);
pthread_mutex_lock(&dt.stat[fd].lock);
- if (dt_pci.eid < PROG_RES_FDS) {
+ if (eid < PROG_RES_FDS) {
++dt.stat[fd].lcl_w_pkt[qc];
dt.stat[fd].lcl_w_bytes[qc] += len;
}
diff --git a/src/ipcpd/unicast/dt.h b/src/ipcpd/unicast/dt.h
index 73b71a92..2c5b7978 100644
--- a/src/ipcpd/unicast/dt.h
+++ b/src/ipcpd/unicast/dt.h
@@ -1,10 +1,10 @@
/*
- * Ouroboros - Copyright (C) 2016 - 2020
+ * Ouroboros - Copyright (C) 2016 - 2024
*
* Data Transfer component
*
- * Dimitri Staessens <dimitri.staessens@ugent.be>
- * Sander Vrijders <sander.vrijders@ugent.be>
+ * Dimitri Staessens <dimitri@ouroboros.rocks>
+ * Sander Vrijders <sander@ouroboros.rocks>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -31,11 +31,7 @@
#define DT_PROTO "dtp"
#define INVALID_ADDR 0
-int dt_init(enum pol_routing pr,
- uint8_t addr_size,
- uint8_t eid_size,
- uint8_t max_ttl
-);
+int dt_init(struct dt_config cfg);
void dt_fini(void);
@@ -43,13 +39,15 @@ int dt_start(void);
void dt_stop(void);
-int dt_reg_comp(void * comp,
+int dt_reg_comp(void * comp,
void (* func)(void * comp, struct shm_du_buff * sdb),
- char * name);
+ char * name);
+
+void dt_unreg_comp(int eid);
int dt_write_packet(uint64_t dst_addr,
qoscube_t qc,
- int res_fd,
+ uint64_t eid,
struct shm_du_buff * sdb);
#endif /* OUROBOROS_IPCPD_UNICAST_DT_H */
diff --git a/src/ipcpd/unicast/enroll.c b/src/ipcpd/unicast/enroll.c
deleted file mode 100644
index 3b4a5a89..00000000
--- a/src/ipcpd/unicast/enroll.c
+++ /dev/null
@@ -1,379 +0,0 @@
-/*
- * Ouroboros - Copyright (C) 2016 - 2020
- *
- * Enrollment Task
- *
- * Dimitri Staessens <dimitri.staessens@ugent.be>
- * Sander Vrijders <sander.vrijders@ugent.be>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., http://www.fsf.org/about/contact/.
- */
-
-#if defined(__linux__) || defined(__CYGWIN__)
-#define _DEFAULT_SOURCE
-#else
-#define _POSIX_C_SOURCE 199309L
-#endif
-
-#define OUROBOROS_PREFIX "enrollment"
-
-#include <ouroboros/endian.h>
-#include <ouroboros/errno.h>
-#include <ouroboros/time_utils.h>
-#include <ouroboros/dev.h>
-#include <ouroboros/logs.h>
-#include <ouroboros/errno.h>
-#include <ouroboros/sockets.h>
-
-#include "connmgr.h"
-#include "enroll.h"
-#include "ipcp.h"
-
-#include <assert.h>
-#include <stdlib.h>
-#include <string.h>
-#include <pthread.h>
-
-#include "ipcp_config.pb-c.h"
-typedef EnrollMsg enroll_msg_t;
-
-#define ENROLL_COMP "Enrollment"
-#define ENROLL_PROTO "OEP" /* Ouroboros enrollment protocol */
-#define ENROLL_WARN_TIME_OFFSET 20
-#define ENROLL_BUF_LEN 1024
-
-enum enroll_state {
- ENROLL_NULL = 0,
- ENROLL_INIT,
- ENROLL_RUNNING
-};
-
-struct {
- struct ipcp_config conf;
- enum enroll_state state;
- pthread_t listener;
-} enroll;
-
-static int send_rcv_enroll_msg(int fd)
-{
- enroll_msg_t req = ENROLL_MSG__INIT;
- enroll_msg_t * reply;
- uint8_t buf[ENROLL_BUF_LEN];
- ssize_t len;
- ssize_t delta_t;
- struct timespec t0;
- struct timespec rtt;
-
- req.code = ENROLL_CODE__ENROLL_REQ;
-
- len = enroll_msg__get_packed_size(&req);
- if (len < 0) {
- log_dbg("Failed pack request message.");
- return -1;
- }
-
- enroll_msg__pack(&req, buf);
-
- clock_gettime(CLOCK_REALTIME, &t0);
-
- if (flow_write(fd, buf, len) < 0) {
- log_dbg("Failed to send request message.");
- return -1;
- }
-
- len = flow_read(fd, buf, ENROLL_BUF_LEN);
- if (len < 0) {
- log_dbg("No enrollment reply received.");
- return -1;
- }
-
- log_dbg("Received enrollment info (%zd bytes).", len);
-
- reply = enroll_msg__unpack(NULL, len, buf);
- if (reply == NULL) {
- log_dbg("No enrollment response.");
- return -1;
- }
-
- if (reply->code != ENROLL_CODE__ENROLL_BOOT) {
- log_dbg("Failed to unpack enrollment response.");
- enroll_msg__free_unpacked(reply, NULL);
- return -1;
- }
-
- if (!(reply->has_t_sec && reply->has_t_nsec)) {
- log_dbg("No time in response message.");
- enroll_msg__free_unpacked(reply, NULL);
- return -1;
- }
-
- clock_gettime(CLOCK_REALTIME, &rtt);
-
- delta_t = ts_diff_ms(&t0, &rtt);
-
- rtt.tv_sec = reply->t_sec;
- rtt.tv_nsec = reply->t_nsec;
-
- if (labs(ts_diff_ms(&t0, &rtt)) - delta_t > ENROLL_WARN_TIME_OFFSET)
- log_warn("Clock offset above threshold.");
-
- strcpy(enroll.conf.layer_info.layer_name,
- reply->conf->layer_info->layer_name);
- enroll.conf.type = reply->conf->ipcp_type;
- enroll.conf.addr_size = reply->conf->addr_size;
- enroll.conf.eid_size = reply->conf->eid_size;
- enroll.conf.max_ttl = reply->conf->max_ttl;
- enroll.conf.addr_auth_type = reply->conf->addr_auth_type;
- enroll.conf.routing_type = reply->conf->routing_type;
- enroll.conf.layer_info.dir_hash_algo
- = reply->conf->layer_info->dir_hash_algo;
-
- enroll_msg__free_unpacked(reply, NULL);
-
- return 0;
-}
-
-static ssize_t enroll_pack(uint8_t ** buf)
-{
- enroll_msg_t msg = ENROLL_MSG__INIT;
- ipcp_config_msg_t config = IPCP_CONFIG_MSG__INIT;
- layer_info_msg_t layer_info = LAYER_INFO_MSG__INIT;
- struct timespec now;
- ssize_t len;
-
- clock_gettime(CLOCK_REALTIME, &now);
-
- msg.code = ENROLL_CODE__ENROLL_BOOT;
- msg.has_t_sec = true;
- msg.t_sec = now.tv_sec;
- msg.has_t_nsec = true;
- msg.t_nsec = now.tv_nsec;
- msg.conf = &config;
-
- config.ipcp_type = enroll.conf.type;
- config.has_addr_size = true;
- config.addr_size = enroll.conf.addr_size;
- config.has_eid_size = true;
- config.eid_size = enroll.conf.eid_size;
- config.has_max_ttl = true;
- config.max_ttl = enroll.conf.max_ttl;
- config.has_addr_auth_type = true;
- config.addr_auth_type = enroll.conf.addr_auth_type;
- config.has_routing_type = true;
- config.routing_type = enroll.conf.routing_type;
- config.layer_info = &layer_info;
-
- layer_info.layer_name = (char *) enroll.conf.layer_info.layer_name;
- layer_info.dir_hash_algo = enroll.conf.layer_info.dir_hash_algo;
-
- len = enroll_msg__get_packed_size(&msg);
-
- *buf = malloc(len);
- if (*buf == NULL)
- return -1;
-
- enroll_msg__pack(&msg, *buf);
-
- return len;
-}
-
-static void * enroll_handle(void * o)
-{
- struct conn conn;
- uint8_t buf[ENROLL_BUF_LEN];
- uint8_t * reply;
- ssize_t len;
- enroll_msg_t * msg;
-
- (void) o;
-
- while (true) {
- if (connmgr_wait(COMPID_ENROLL, &conn)) {
- log_err("Failed to get next connection.");
- continue;
- }
-
- len = flow_read(conn.flow_info.fd, buf, ENROLL_BUF_LEN);
- if (len < 0) {
- log_err("Failed to read from flow.");
- connmgr_dealloc(COMPID_ENROLL, &conn);
- continue;
- }
-
- msg = enroll_msg__unpack(NULL, len, buf);
- if (msg == NULL) {
- log_err("Failed to unpack message.");
- connmgr_dealloc(COMPID_ENROLL, &conn);
- continue;
- }
-
- if (msg->code != ENROLL_CODE__ENROLL_REQ) {
- log_err("Wrong message type.");
- connmgr_dealloc(COMPID_ENROLL, &conn);
- enroll_msg__free_unpacked(msg, NULL);
- continue;
- }
-
- log_dbg("Enrolling a new neighbor.");
-
- enroll_msg__free_unpacked(msg, NULL);
-
- len = enroll_pack(&reply);
- if (reply == NULL) {
- log_err("Failed to pack enrollment message.");
- connmgr_dealloc(COMPID_ENROLL, &conn);
- continue;
- }
-
- log_dbg("Sending enrollment info (%zd bytes).", len);
-
- if (flow_write(conn.flow_info.fd, reply, len) < 0) {
- log_err("Failed respond to enrollment request.");
- connmgr_dealloc(COMPID_ENROLL, &conn);
- free(reply);
- continue;
- }
-
- free(reply);
-
- len = flow_read(conn.flow_info.fd, buf, ENROLL_BUF_LEN);
- if (len < 0) {
- log_err("Failed to read from flow.");
- connmgr_dealloc(COMPID_ENROLL, &conn);
- continue;
- }
-
- msg = enroll_msg__unpack(NULL, len, buf);
- if (msg == NULL) {
- log_err("Failed to unpack message.");
- connmgr_dealloc(COMPID_ENROLL, &conn);
- continue;
- }
-
- if (msg->code != ENROLL_CODE__ENROLL_DONE || !msg->has_result) {
- log_err("Wrong message type.");
- enroll_msg__free_unpacked(msg, NULL);
- connmgr_dealloc(COMPID_ENROLL, &conn);
- continue;
- }
-
- if (msg->result == 0)
- log_dbg("Neighbor enrollment successful.");
- else
- log_dbg("Neigbor reported failed enrollment.");
-
- enroll_msg__free_unpacked(msg, NULL);
-
- connmgr_dealloc(COMPID_ENROLL, &conn);
- }
-
- return 0;
-}
-
-int enroll_boot(struct conn * conn)
-{
- log_dbg("Getting boot information.");
-
- if (send_rcv_enroll_msg(conn->flow_info.fd)) {
- log_err("Failed to enroll.");
- return -1;
- }
-
- return 0;
-}
-
-int enroll_done(struct conn * conn,
- int result)
-{
- enroll_msg_t msg = ENROLL_MSG__INIT;
- uint8_t buf[ENROLL_BUF_LEN];
- ssize_t len;
-
- msg.code = ENROLL_CODE__ENROLL_DONE;
- msg.has_result = true;
- msg.result = result;
-
- len = enroll_msg__get_packed_size(&msg);
- if (len < 0) {
- log_dbg("Failed pack request message.");
- return -1;
- }
-
- enroll_msg__pack(&msg, buf);
-
- if (flow_write(conn->flow_info.fd, buf, len) < 0) {
- log_dbg("Failed to send acknowledgment.");
- return -1;
- }
-
- return 0;
-}
-
-void enroll_bootstrap(const struct ipcp_config * conf)
-{
- assert(conf);
-
- memcpy(&enroll.conf, conf, sizeof(enroll.conf));
-}
-
-struct ipcp_config * enroll_get_conf(void)
-{
- return &enroll.conf;
-}
-
-int enroll_init(void)
-{
- struct conn_info info;
-
- memset(&info, 0, sizeof(info));
-
- strcpy(info.comp_name, ENROLL_COMP);
- strcpy(info.protocol, ENROLL_PROTO);
- info.pref_version = 1;
- info.pref_syntax = PROTO_GPB;
- info.addr = 0;
-
- if (connmgr_comp_init(COMPID_ENROLL, &info)) {
- log_err("Failed to register with connmgr.");
- return -1;
- }
-
- enroll.state = ENROLL_INIT;
-
- return 0;
-}
-
-void enroll_fini(void)
-{
- if (enroll.state == ENROLL_RUNNING)
- pthread_join(enroll.listener, NULL);
-
- connmgr_comp_fini(COMPID_ENROLL);
-}
-
-int enroll_start(void)
-{
- if (pthread_create(&enroll.listener, NULL, enroll_handle, NULL))
- return -1;
-
- enroll.state = ENROLL_RUNNING;
-
- return 0;
-}
-
-void enroll_stop(void)
-{
- if (enroll.state == ENROLL_RUNNING)
- pthread_cancel(enroll.listener);
-}
diff --git a/src/ipcpd/unicast/fa.c b/src/ipcpd/unicast/fa.c
index e0727e85..23676c23 100644
--- a/src/ipcpd/unicast/fa.c
+++ b/src/ipcpd/unicast/fa.c
@@ -1,10 +1,10 @@
/*
- * Ouroboros - Copyright (C) 2016 - 2020
+ * Ouroboros - Copyright (C) 2016 - 2024
*
* Flow allocator of the IPC Process
*
- * Dimitri Staessens <dimitri.staessens@ugent.be>
- * Sander Vrijders <sander.vrijders@ugent.be>
+ * Dimitri Staessens <dimitri@ouroboros.rocks>
+ * Sander Vrijders <sander@ouroboros.rocks>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -31,43 +31,58 @@
#define FA "flow-allocator"
#define OUROBOROS_PREFIX FA
+#include <ouroboros/endian.h>
#include <ouroboros/logs.h>
#include <ouroboros/fqueue.h>
#include <ouroboros/errno.h>
#include <ouroboros/dev.h>
#include <ouroboros/ipcp-dev.h>
+#include <ouroboros/rib.h>
+#include <ouroboros/random.h>
+#include <ouroboros/pthread.h>
+#include "addr-auth.h"
#include "dir.h"
#include "fa.h"
#include "psched.h"
#include "ipcp.h"
#include "dt.h"
+#include "ca.h"
-#include <pthread.h>
+#include <inttypes.h>
#include <stdlib.h>
#include <string.h>
-#define TIMEOUT 10000 /* nanoseconds */
+#if defined (IPCP_FLOW_STATS) && !defined(CLOCK_REALTIME_COARSE)
+#define CLOCK_REALTIME_COARSE CLOCK_REALTIME
+#endif
+
+#define TIMEOUT 10 * MILLION /* nanoseconds */
+
+#define FLOW_REQ 0
+#define FLOW_REPLY 1
+#define FLOW_UPDATE 2
+#define MSGBUFSZ 2048
-#define FLOW_REQ 0
-#define FLOW_REPLY 1
-#define MSGBUFSZ 2048
+#define STAT_FILE_LEN 0
struct fa_msg {
uint64_t s_addr;
- uint32_t r_eid;
- uint32_t s_eid;
+ uint64_t r_eid;
+ uint64_t s_eid;
uint8_t code;
int8_t response;
+ uint16_t ece;
/* QoS parameters from spec, aligned */
- uint8_t availability;
- uint8_t in_order;
uint32_t delay;
uint64_t bandwidth;
uint32_t loss;
uint32_t ber;
uint32_t max_gap;
+ uint32_t timeout;
uint16_t cypher_s;
+ uint8_t availability;
+ uint8_t in_order;
} __attribute__((packed));
struct cmd {
@@ -75,11 +90,33 @@ struct cmd {
struct shm_du_buff * sdb;
};
+struct fa_flow {
+#ifdef IPCP_FLOW_STATS
+ time_t stamp; /* Flow creation */
+ size_t p_snd; /* Packets sent */
+ size_t p_snd_f; /* Packets sent fail */
+ size_t b_snd; /* Bytes sent */
+ size_t b_snd_f; /* Bytes sent fail */
+ size_t p_rcv; /* Packets received */
+ size_t p_rcv_f; /* Packets received fail */
+ size_t b_rcv; /* Bytes received */
+ size_t b_rcv_f; /* Bytes received fail */
+ size_t u_snd; /* Flow updates sent */
+ size_t u_rcv; /* Flow updates received */
+#endif
+ uint64_t s_eid; /* Local endpoint id */
+ uint64_t r_eid; /* Remote endpoint id */
+ uint64_t r_addr; /* Remote address */
+ void * ctx; /* Congestion avoidance context */
+};
+
struct {
pthread_rwlock_t flows_lock;
- int r_eid[PROG_MAX_FLOWS];
- uint64_t r_addr[PROG_MAX_FLOWS];
- int fd;
+ struct fa_flow flows[PROG_MAX_FLOWS];
+#ifdef IPCP_FLOW_STATS
+ size_t n_flows;
+#endif
+ uint32_t eid;
struct list_head cmds;
pthread_cond_t cond;
@@ -89,26 +126,290 @@ struct {
struct psched * psched;
} fa;
+static int fa_rib_read(const char * path,
+ char * buf,
+ size_t len)
+{
+#ifdef IPCP_FLOW_STATS
+ struct fa_flow * flow;
+ int fd;
+ char r_addrstr[21];
+ char s_eidstr[21];
+ char r_eidstr[21];
+ char tmstr[RIB_TM_STRLEN];
+ char castr[1024];
+ char * entry;
+ struct tm * tm;
+
+ entry = strstr(path, RIB_SEPARATOR) + 1;
+ assert(entry);
+
+ fd = atoi(entry);
+
+ if (fd < 0 || fd >= PROG_MAX_FLOWS)
+ return -1;
+
+ if (len < 1536)
+ return 0;
+
+ flow = &fa.flows[fd];
+
+ buf[0] = '\0';
+
+ pthread_rwlock_rdlock(&fa.flows_lock);
+
+ if (flow->stamp ==0) {
+ pthread_rwlock_unlock(&fa.flows_lock);
+ return 0;
+ }
+
+ sprintf(r_addrstr, "%" PRIu64, flow->r_addr);
+ sprintf(s_eidstr, "%" PRIu64, flow->s_eid);
+ sprintf(r_eidstr, "%" PRIu64, flow->r_eid);
+
+ tm = gmtime(&flow->stamp);
+ strftime(tmstr, sizeof(tmstr), RIB_TM_FORMAT, tm);
+
+ ca_print_stats(flow->ctx, castr, 1024);
+
+ sprintf(buf,
+ "Flow established at: %20s\n"
+ "Remote address: %20s\n"
+ "Local endpoint ID: %20s\n"
+ "Remote endpoint ID: %20s\n"
+ "Sent (packets): %20zu\n"
+ "Sent (bytes): %20zu\n"
+ "Send failed (packets): %20zu\n"
+ "Send failed (bytes): %20zu\n"
+ "Received (packets): %20zu\n"
+ "Received (bytes): %20zu\n"
+ "Receive failed (packets): %20zu\n"
+ "Receive failed (bytes): %20zu\n"
+ "Sent flow updates (packets): %20zu\n"
+ "Received flow updates (packets): %20zu\n"
+ "%s",
+ tmstr, r_addrstr,
+ s_eidstr, r_eidstr,
+ flow->p_snd, flow->b_snd,
+ flow->p_snd_f, flow->b_snd_f,
+ flow->p_rcv, flow->b_rcv,
+ flow->b_rcv_f, flow->b_rcv_f,
+ flow->u_snd, flow->u_rcv,
+ castr);
+
+ pthread_rwlock_unlock(&fa.flows_lock);
+
+ return strlen(buf);
+#else
+ (void) path;
+ (void) buf;
+ (void) len;
+ return 0;
+#endif
+}
+
+static int fa_rib_readdir(char *** buf)
+{
+#ifdef IPCP_FLOW_STATS
+ char entry[RIB_PATH_LEN + 1];
+ size_t i;
+ int idx = 0;
+
+ pthread_rwlock_rdlock(&fa.flows_lock);
+
+ if (fa.n_flows < 1) {
+ *buf = NULL;
+ goto no_flows;
+ }
+
+ *buf = malloc(sizeof(**buf) * fa.n_flows);
+ if (*buf == NULL)
+ goto fail_entries;
+
+ for (i = 0; i < PROG_MAX_FLOWS; ++i) {
+ struct fa_flow * flow;
+
+ flow = &fa.flows[i];
+ if (flow->stamp == 0)
+ continue;
+
+ sprintf(entry, "%zu", i);
+
+ (*buf)[idx] = malloc(strlen(entry) + 1);
+ if ((*buf)[idx] == NULL)
+ goto fail_entry;
+
+ strcpy((*buf)[idx++], entry);
+ }
+
+ assert((size_t) idx == fa.n_flows);
+ no_flows:
+ pthread_rwlock_unlock(&fa.flows_lock);
+
+ return idx;
+
+ fail_entry:
+ while (idx-- > 0)
+ free((*buf)[idx]);
+ free(*buf);
+ fail_entries:
+ pthread_rwlock_unlock(&fa.flows_lock);
+ return -ENOMEM;
+#else
+ (void) buf;
+ return 0;
+#endif
+}
+
+static int fa_rib_getattr(const char * path,
+ struct rib_attr * attr)
+{
+#ifdef IPCP_FLOW_STATS
+ int fd;
+ char * entry;
+ struct fa_flow * flow;
+
+ entry = strstr(path, RIB_SEPARATOR) + 1;
+ assert(entry);
+
+ fd = atoi(entry);
+
+ flow = &fa.flows[fd];
+
+ pthread_rwlock_rdlock(&fa.flows_lock);
+
+ if (flow->stamp != 0) {
+ attr->size = 1536;
+ attr->mtime = flow->stamp;
+ } else {
+ attr->size = 0;
+ attr->mtime = 0;
+ }
+
+ pthread_rwlock_unlock(&fa.flows_lock);
+#else
+ (void) path;
+ (void) attr;
+#endif
+ return 0;
+}
+
+static struct rib_ops r_ops = {
+ .read = fa_rib_read,
+ .readdir = fa_rib_readdir,
+ .getattr = fa_rib_getattr
+};
+
+static int eid_to_fd(uint64_t eid)
+{
+ struct fa_flow * flow;
+ int fd;
+
+ fd = eid & 0xFFFFFFFF;
+
+ if (fd < 0 || fd >= PROG_MAX_FLOWS)
+ return -1;
+
+ flow = &fa.flows[fd];
+
+ if (flow->s_eid == eid)
+ return fd;
+
+ return -1;
+}
+
+static uint64_t gen_eid(int fd)
+{
+ uint32_t rnd;
+
+ if (random_buffer(&rnd, sizeof(rnd)) < 0)
+ return fa.eid; /* INVALID */
+
+ fd &= 0xFFFFFFFF;
+
+ return ((uint64_t) rnd << 32) + fd;
+}
+
static void packet_handler(int fd,
qoscube_t qc,
struct shm_du_buff * sdb)
{
- pthread_rwlock_rdlock(&fa.flows_lock);
+ struct fa_flow * flow;
+ uint64_t r_addr;
+ uint64_t r_eid;
+ ca_wnd_t wnd;
+ size_t len;
- if (dt_write_packet(fa.r_addr[fd], qc, fa.r_eid[fd], sdb)) {
- pthread_rwlock_unlock(&fa.flows_lock);
+ flow = &fa.flows[fd];
+
+ pthread_rwlock_wrlock(&fa.flows_lock);
+
+ len = shm_du_buff_len(sdb);
+
+#ifdef IPCP_FLOW_STATS
+ ++flow->p_snd;
+ flow->b_snd += len;
+#endif
+ wnd = ca_ctx_update_snd(flow->ctx, len);
+
+ r_addr = flow->r_addr;
+ r_eid = flow->r_eid;
+
+ pthread_rwlock_unlock(&fa.flows_lock);
+
+ ca_wnd_wait(wnd);
+
+ if (dt_write_packet(r_addr, qc, r_eid, sdb)) {
ipcp_sdb_release(sdb);
- log_warn("Failed to forward packet.");
+ log_dbg("Failed to forward packet.");
+#ifdef IPCP_FLOW_STATS
+ pthread_rwlock_wrlock(&fa.flows_lock);
+ ++flow->p_snd_f;
+ flow->b_snd_f += len;
+ pthread_rwlock_unlock(&fa.flows_lock);
+#endif
return;
}
+}
- pthread_rwlock_unlock(&fa.flows_lock);
+static int fa_flow_init(struct fa_flow * flow)
+{
+#ifdef IPCP_FLOW_STATS
+ struct timespec now;
+#endif
+ memset(flow, 0, sizeof(*flow));
+
+ flow->r_eid = -1;
+ flow->s_eid = -1;
+ flow->r_addr = INVALID_ADDR;
+
+ flow->ctx = ca_ctx_create();
+ if (flow->ctx == NULL)
+ return -1;
+
+#ifdef IPCP_FLOW_STATS
+ clock_gettime(CLOCK_REALTIME_COARSE, &now);
+
+ flow->stamp = now.tv_sec;
+
+ ++fa.n_flows;
+#endif
+ return 0;
}
-static void destroy_conn(int fd)
+static void fa_flow_fini(struct fa_flow * flow)
{
- fa.r_eid[fd] = -1;
- fa.r_addr[fd] = INVALID_ADDR;
+ ca_ctx_destroy(flow->ctx);
+
+ memset(flow, 0, sizeof(*flow));
+
+ flow->r_eid = -1;
+ flow->s_eid = -1;
+ flow->r_addr = INVALID_ADDR;
+
+#ifdef IPCP_FLOW_STATS
+ --fa.n_flows;
+#endif
}
static void fa_post_packet(void * comp,
@@ -138,146 +439,200 @@ static void fa_post_packet(void * comp,
pthread_mutex_unlock(&fa.mtx);
}
-static void * fa_handle_packet(void * o)
+static size_t fa_wait_for_fa_msg(struct fa_msg * msg)
{
- struct timespec ts = {0, TIMEOUT * 1000};
+ struct cmd * cmd;
+ size_t len;
- (void) o;
+ pthread_mutex_lock(&fa.mtx);
- while (true) {
- struct timespec abstime;
- int fd;
- uint8_t buf[MSGBUFSZ];
- struct fa_msg * msg;
- qosspec_t qs;
- struct cmd * cmd;
- size_t len;
- size_t msg_len;
+ pthread_cleanup_push(__cleanup_mutex_unlock, &fa.mtx);
- pthread_mutex_lock(&fa.mtx);
+ while (list_is_empty(&fa.cmds))
+ pthread_cond_wait(&fa.cond, &fa.mtx);
- pthread_cleanup_push((void (*)(void *)) pthread_mutex_unlock,
- &fa.mtx);
+ cmd = list_last_entry(&fa.cmds, struct cmd, next);
+ list_del(&cmd->next);
- while (list_is_empty(&fa.cmds))
- pthread_cond_wait(&fa.cond, &fa.mtx);
+ pthread_cleanup_pop(true);
- cmd = list_last_entry(&fa.cmds, struct cmd, next);
- list_del(&cmd->next);
+ len = shm_du_buff_len(cmd->sdb);
+ if (len > MSGBUFSZ || len < sizeof(*msg)) {
+ log_warn("Invalid flow allocation message (len: %zd).", len);
+ free(cmd);
+ return 0; /* No valid message */
+ }
- pthread_cleanup_pop(true);
+ memcpy(msg, shm_du_buff_head(cmd->sdb), len);
- len = shm_du_buff_tail(cmd->sdb) - shm_du_buff_head(cmd->sdb);
+ ipcp_sdb_release(cmd->sdb);
- if (len > MSGBUFSZ) {
- log_err("Message over buffer size.");
- free(cmd);
- continue;
- }
+ free(cmd);
- msg = (struct fa_msg *) buf;
+ return len;
+}
- /* Depending on the message call the function in ipcp-dev.h */
+static int fa_handle_flow_req(struct fa_msg * msg,
+ size_t len)
+{
+ size_t msg_len;
+ int fd;
+ qosspec_t qs;
+ struct fa_flow * flow;
+ uint8_t * dst;
+ buffer_t data; /* Piggbacked data on flow alloc request. */
+
+ msg_len = sizeof(*msg) + ipcp_dir_hash_len();
+ if (len < msg_len) {
+ log_err("Invalid flow allocation request");
+ return -EPERM;
+ }
- memcpy(msg, shm_du_buff_head(cmd->sdb), len);
+ dst = (uint8_t *)(msg + 1);
+ data.data = (uint8_t *) msg + msg_len;
+ data.len = len - msg_len;
- ipcp_sdb_release(cmd->sdb);
+ qs.delay = ntoh32(msg->delay);
+ qs.bandwidth = ntoh64(msg->bandwidth);
+ qs.availability = msg->availability;
+ qs.loss = ntoh32(msg->loss);
+ qs.ber = ntoh32(msg->ber);
+ qs.in_order = msg->in_order;
+ qs.max_gap = ntoh32(msg->max_gap);
+ qs.cypher_s = ntoh16(msg->cypher_s);
+ qs.timeout = ntoh32(msg->timeout);
- free(cmd);
+ fd = ipcp_wait_flow_req_arr(dst, qs, IPCP_UNICAST_MPL, &data);
+ if (fd < 0)
+ return fd;
- switch (msg->code) {
- case FLOW_REQ:
- msg_len = sizeof(*msg) + ipcp_dir_hash_len();
+ flow = &fa.flows[fd];
+
+ pthread_rwlock_wrlock(&fa.flows_lock);
+
+ fa_flow_init(flow);
+
+ flow->s_eid = gen_eid(fd);
+ flow->r_eid = ntoh64(msg->s_eid);
+ flow->r_addr = ntoh64(msg->s_addr);
+
+ pthread_rwlock_unlock(&fa.flows_lock);
+
+ return fd;
+}
+
+static int fa_handle_flow_reply(struct fa_msg * msg,
+ size_t len)
+{
+ int fd;
+ struct fa_flow * flow;
+ buffer_t data; /* Piggbacked data on flow alloc request. */
+ time_t mpl = IPCP_UNICAST_MPL;
- assert(len >= msg_len);
+ assert(len >= sizeof(*msg));
- clock_gettime(PTHREAD_COND_CLOCK, &abstime);
+ data.data = (uint8_t *) msg + sizeof(*msg);
+ data.len = len - sizeof(*msg);
- pthread_mutex_lock(&ipcpi.alloc_lock);
+ pthread_rwlock_wrlock(&fa.flows_lock);
- while (ipcpi.alloc_id != -1 &&
- ipcp_get_state() == IPCP_OPERATIONAL) {
- ts_add(&abstime, &ts, &abstime);
- pthread_cond_timedwait(&ipcpi.alloc_cond,
- &ipcpi.alloc_lock,
- &abstime);
- }
+ fd = eid_to_fd(ntoh64(msg->r_eid));
+ if (fd < 0) {
+ pthread_rwlock_unlock(&fa.flows_lock);
+ log_err("Flow reply for unknown EID %" PRIu64 ".",
+ ntoh64(msg->r_eid));
+ return -ENOTALLOC;
+ }
- if (ipcp_get_state() != IPCP_OPERATIONAL) {
- pthread_mutex_unlock(&ipcpi.alloc_lock);
- log_dbg("Won't allocate over non-operational"
- "IPCP.");
- continue;
- }
+ flow = &fa.flows[fd];
- assert(ipcpi.alloc_id == -1);
+ flow->r_eid = ntoh64(msg->s_eid);
- qs.delay = ntoh32(msg->delay);
- qs.bandwidth = ntoh64(msg->bandwidth);
- qs.availability = msg->availability;
- qs.loss = ntoh32(msg->loss);
- qs.ber = ntoh32(msg->ber);
- qs.in_order = msg->in_order;
- qs.max_gap = ntoh32(msg->max_gap);
- qs.cypher_s = ntoh16(msg->cypher_s);
+ if (msg->response < 0)
+ fa_flow_fini(flow);
+ else
+ psched_add(fa.psched, fd);
- fd = ipcp_flow_req_arr((uint8_t *) (msg + 1),
- ipcp_dir_hash_len(),
- qs,
- buf + msg_len,
- len - msg_len);
- if (fd < 0) {
- pthread_mutex_unlock(&ipcpi.alloc_lock);
- log_err("Failed to get fd for flow.");
- continue;
- }
+ pthread_rwlock_unlock(&fa.flows_lock);
- pthread_rwlock_wrlock(&fa.flows_lock);
+ if (ipcp_flow_alloc_reply(fd, msg->response, mpl, &data) < 0) {
+ log_err("Failed to reply for flow allocation on fd %d.", fd);
+ return -EIRMD;
+ }
- fa.r_eid[fd] = ntoh32(msg->s_eid);
- fa.r_addr[fd] = ntoh64(msg->s_addr);
+ return 0;
+}
- pthread_rwlock_unlock(&fa.flows_lock);
+static int fa_handle_flow_update(struct fa_msg * msg,
+ size_t len)
+{
+ struct fa_flow * flow;
+ int fd;
- ipcpi.alloc_id = fd;
- pthread_cond_broadcast(&ipcpi.alloc_cond);
+ (void) len;
+ assert(len >= sizeof(*msg));
- pthread_mutex_unlock(&ipcpi.alloc_lock);
+ pthread_rwlock_wrlock(&fa.flows_lock);
- break;
- case FLOW_REPLY:
- assert(len >= sizeof(*msg));
+ fd = eid_to_fd(ntoh64(msg->r_eid));
+ if (fd < 0) {
+ pthread_rwlock_unlock(&fa.flows_lock);
+ log_err("Flow update for unknown EID %" PRIu64 ".",
+ ntoh64(msg->r_eid));
+ return -EPERM;
+ }
- pthread_rwlock_wrlock(&fa.flows_lock);
+ flow = &fa.flows[fd];
+#ifdef IPCP_FLOW_STATS
+ flow->u_rcv++;
+#endif
+ ca_ctx_update_ece(flow->ctx, ntoh16(msg->ece));
- fa.r_eid[ntoh32(msg->r_eid)] = ntoh32(msg->s_eid);
+ pthread_rwlock_unlock(&fa.flows_lock);
- ipcp_flow_alloc_reply(ntoh32(msg->r_eid),
- msg->response,
- buf + sizeof(*msg),
- len - sizeof(*msg));
+ return 0;
+}
- if (msg->response < 0)
- destroy_conn(ntoh32(msg->r_eid));
- else
- psched_add(fa.psched, ntoh32(msg->r_eid));
+static void * fa_handle_packet(void * o)
+{
+ (void) o;
- pthread_rwlock_unlock(&fa.flows_lock);
+ while (true) {
+ uint8_t buf[MSGBUFSZ];
+ struct fa_msg * msg;
+ size_t len;
+ msg = (struct fa_msg *) buf;
+
+ len = fa_wait_for_fa_msg(msg);
+ if (len == 0)
+ continue;
+
+ switch (msg->code) {
+ case FLOW_REQ:
+ if (fa_handle_flow_req(msg, len) < 0)
+ log_err("Error handling flow alloc request.");
+ break;
+ case FLOW_REPLY:
+ if (fa_handle_flow_reply(msg, len) < 0)
+ log_err("Error handling flow reply.");
+ break;
+ case FLOW_UPDATE:
+ if (fa_handle_flow_update(msg, len) < 0)
+ log_err("Error handling flow update.");
break;
default:
- log_err("Got an unknown flow allocation message.");
+ log_warn("Recieved unknown flow allocation message.");
break;
}
}
+
+ return (void *) 0;
}
int fa_init(void)
{
- int i;
-
- for (i = 0; i < PROG_MAX_FLOWS; ++i)
- destroy_conn(i);
+ pthread_condattr_t cattr;
if (pthread_rwlock_init(&fa.flows_lock, NULL))
goto fail_rwlock;
@@ -285,26 +640,47 @@ int fa_init(void)
if (pthread_mutex_init(&fa.mtx, NULL))
goto fail_mtx;
- if (pthread_cond_init(&fa.cond, NULL))
+ if (pthread_condattr_init(&cattr))
+ goto fail_cattr;
+
+#ifndef __APPLE__
+ pthread_condattr_setclock(&cattr, PTHREAD_COND_CLOCK);
+#endif
+ if (pthread_cond_init(&fa.cond, &cattr))
goto fail_cond;
+ if (rib_reg(FA, &r_ops))
+ goto fail_rib_reg;
+
+ fa.eid = dt_reg_comp(&fa, &fa_post_packet, FA);
+ if ((int) fa.eid < 0)
+ goto fail_dt_reg;
+
list_head_init(&fa.cmds);
- fa.fd = dt_reg_comp(&fa, &fa_post_packet, FA);
+ pthread_condattr_destroy(&cattr);
return 0;
+ fail_dt_reg:
+ rib_unreg(FA);
+ fail_rib_reg:
+ pthread_cond_destroy(&fa.cond);
fail_cond:
+ pthread_condattr_destroy(&cattr);
+ fail_cattr:
pthread_mutex_destroy(&fa.mtx);
fail_mtx:
pthread_rwlock_destroy(&fa.flows_lock);
fail_rwlock:
- log_err("Failed to initialize flow allocator.");
+
return -1;
}
void fa_fini(void)
{
+ rib_unreg(FA);
+
pthread_cond_destroy(&fa.cond);;
pthread_mutex_destroy(&fa.mtx);
pthread_rwlock_destroy(&fa.flows_lock);
@@ -316,7 +692,7 @@ int fa_start(void)
int pol;
int max;
- fa.psched = psched_create(packet_handler);
+ fa.psched = psched_create(packet_handler, np1_flow_read);
if (fa.psched == NULL) {
log_err("Failed to start packet scheduler.");
goto fail_psched;
@@ -353,7 +729,6 @@ int fa_start(void)
fail_thread:
psched_destroy(fa.psched);
fail_psched:
- log_err("Failed to start flow allocator.");
return -1;
}
@@ -365,17 +740,18 @@ void fa_stop(void)
psched_destroy(fa.psched);
}
-int fa_alloc(int fd,
- const uint8_t * dst,
- qosspec_t qs,
- const void * data,
- size_t dlen)
+int fa_alloc(int fd,
+ const uint8_t * dst,
+ qosspec_t qs,
+ const buffer_t * data)
{
struct fa_msg * msg;
- uint64_t addr;
struct shm_du_buff * sdb;
- qoscube_t qc;
+ struct fa_flow * flow;
+ uint64_t addr;
+ qoscube_t qc = QOS_CUBE_BE;
size_t len;
+ uint64_t eid;
addr = dir_query(dst);
if (addr == 0)
@@ -383,13 +759,17 @@ int fa_alloc(int fd,
len = sizeof(*msg) + ipcp_dir_hash_len();
- if (ipcp_sdb_reserve(&sdb, len + dlen))
+ if (ipcp_sdb_reserve(&sdb, len + data->len))
return -1;
- msg = (struct fa_msg *) shm_du_buff_head(sdb);
+ msg = (struct fa_msg *) shm_du_buff_head(sdb);
+ memset(msg, 0, sizeof(*msg));
+
+ eid = gen_eid(fd);
+
msg->code = FLOW_REQ;
- msg->s_eid = hton32(fd);
- msg->s_addr = hton64(ipcpi.dt_addr);
+ msg->s_eid = hton64(eid);
+ msg->s_addr = hton64(addr_auth_address());
msg->delay = hton32(qs.delay);
msg->bandwidth = hton64(qs.bandwidth);
msg->availability = qs.availability;
@@ -398,111 +778,196 @@ int fa_alloc(int fd,
msg->in_order = qs.in_order;
msg->max_gap = hton32(qs.max_gap);
msg->cypher_s = hton16(qs.cypher_s);
+ msg->timeout = hton32(qs.timeout);
memcpy(msg + 1, dst, ipcp_dir_hash_len());
- memcpy(shm_du_buff_head(sdb) + len, data, dlen);
-
- qc = qos_spec_to_cube(qs);
+ if (data->len > 0)
+ memcpy(shm_du_buff_head(sdb) + len, data->data, data->len);
- if (dt_write_packet(addr, qc, fa.fd, sdb)) {
+ if (dt_write_packet(addr, qc, fa.eid, sdb)) {
+ log_err("Failed to send flow allocation request packet.");
ipcp_sdb_release(sdb);
return -1;
}
+ flow = &fa.flows[fd];
+
pthread_rwlock_wrlock(&fa.flows_lock);
- assert(fa.r_eid[fd] == -1);
- fa.r_addr[fd] = addr;
+ fa_flow_init(flow);
+ flow->r_addr = addr;
+ flow->s_eid = eid;
pthread_rwlock_unlock(&fa.flows_lock);
return 0;
}
-int fa_alloc_resp(int fd,
- int response,
- const void * data,
- size_t len)
+int fa_alloc_resp(int fd,
+ int response,
+ const buffer_t * data)
{
- struct timespec ts = {0, TIMEOUT * 1000};
- struct timespec abstime;
struct fa_msg * msg;
struct shm_du_buff * sdb;
- qoscube_t qc;
-
- clock_gettime(PTHREAD_COND_CLOCK, &abstime);
+ struct fa_flow * flow;
+ qoscube_t qc = QOS_CUBE_BE;
- pthread_mutex_lock(&ipcpi.alloc_lock);
+ flow = &fa.flows[fd];
- while (ipcpi.alloc_id != fd && ipcp_get_state() == IPCP_OPERATIONAL) {
- ts_add(&abstime, &ts, &abstime);
- pthread_cond_timedwait(&ipcpi.alloc_cond,
- &ipcpi.alloc_lock,
- &abstime);
+ if (ipcp_wait_flow_resp(fd) < 0) {
+ log_err("Failed to wait for flow response.");
+ goto fail_alloc_resp;
}
- if (ipcp_get_state() != IPCP_OPERATIONAL) {
- pthread_mutex_unlock(&ipcpi.alloc_lock);
- return -1;
+ if (ipcp_sdb_reserve(&sdb, sizeof(*msg) + data->len)) {
+ log_err("Failed to reserve sdb (%zu bytes).",
+ sizeof(*msg) + data->len);
+ goto fail_reserve;
}
- ipcpi.alloc_id = -1;
- pthread_cond_broadcast(&ipcpi.alloc_cond);
+ msg = (struct fa_msg *) shm_du_buff_head(sdb);
+ memset(msg, 0, sizeof(*msg));
- pthread_mutex_unlock(&ipcpi.alloc_lock);
+ msg->code = FLOW_REPLY;
+ msg->response = response;
+ if (data->len > 0)
+ memcpy(msg + 1, data->data, data->len);
- if (ipcp_sdb_reserve(&sdb, sizeof(*msg) + len)) {
- destroy_conn(fd);
- return -1;
- }
+ pthread_rwlock_rdlock(&fa.flows_lock);
- pthread_rwlock_wrlock(&fa.flows_lock);
+ msg->r_eid = hton64(flow->r_eid);
+ msg->s_eid = hton64(flow->s_eid);
- msg = (struct fa_msg *) shm_du_buff_head(sdb);
- msg->code = FLOW_REPLY;
- msg->r_eid = hton32(fa.r_eid[fd]);
- msg->s_eid = hton32(fd);
- msg->response = response;
+ pthread_rwlock_unlock(&fa.flows_lock);
- memcpy(msg + 1, data, len);
+ if (dt_write_packet(flow->r_addr, qc, fa.eid, sdb)) {
+ log_err("Failed to send flow allocation response packet.");
+ goto fail_packet;
+ }
if (response < 0) {
- destroy_conn(fd);
- ipcp_sdb_release(sdb);
+ pthread_rwlock_rdlock(&fa.flows_lock);
+ fa_flow_fini(flow);
+ pthread_rwlock_unlock(&fa.flows_lock);
} else {
psched_add(fa.psched, fd);
}
- ipcp_flow_get_qoscube(fd, &qc);
+ return 0;
- assert(qc >= 0 && qc < QOS_CUBE_MAX);
+ fail_packet:
+ ipcp_sdb_release(sdb);
+ fail_reserve:
+ pthread_rwlock_wrlock(&fa.flows_lock);
+ fa_flow_fini(flow);
+ pthread_rwlock_unlock(&fa.flows_lock);
+ fail_alloc_resp:
+ return -1;
+}
- if (dt_write_packet(fa.r_addr[fd], qc, fa.fd, sdb)) {
- destroy_conn(fd);
- pthread_rwlock_unlock(&fa.flows_lock);
- ipcp_sdb_release(sdb);
+int fa_dealloc(int fd)
+{
+ if (ipcp_flow_fini(fd) < 0)
+ return 0;
+
+ psched_del(fa.psched, fd);
+
+ pthread_rwlock_wrlock(&fa.flows_lock);
+
+ fa_flow_fini(&fa.flows[fd]);
+
+ pthread_rwlock_unlock(&fa.flows_lock);
+
+ ipcp_flow_dealloc(fd);
+
+ return 0;
+}
+
+static int fa_update_remote(int fd,
+ uint16_t ece)
+{
+ struct fa_msg * msg;
+ struct shm_du_buff * sdb;
+ qoscube_t qc = QOS_CUBE_BE;
+ struct fa_flow * flow;
+ uint64_t r_addr;
+
+ if (ipcp_sdb_reserve(&sdb, sizeof(*msg))) {
+ log_err("Failed to reserve sdb (%zu bytes).", sizeof(*msg));
return -1;
}
+ msg = (struct fa_msg *) shm_du_buff_head(sdb);
+
+ memset(msg, 0, sizeof(*msg));
+
+ flow = &fa.flows[fd];
+
+ pthread_rwlock_wrlock(&fa.flows_lock);
+
+ msg->code = FLOW_UPDATE;
+ msg->r_eid = hton64(flow->r_eid);
+ msg->ece = hton16(ece);
+
+ r_addr = flow->r_addr;
+#ifdef IPCP_FLOW_STATS
+ flow->u_snd++;
+#endif
pthread_rwlock_unlock(&fa.flows_lock);
+
+ if (dt_write_packet(r_addr, qc, fa.eid, sdb)) {
+ log_err("Failed to send flow update packet.");
+ ipcp_sdb_release(sdb);
+ return -1;
+ }
+
return 0;
}
-int fa_dealloc(int fd)
+void fa_np1_rcv(uint64_t eid,
+ uint8_t ecn,
+ struct shm_du_buff * sdb)
{
- if (ipcp_flow_fini(fd) < 0)
- return 0;
+ struct fa_flow * flow;
+ bool update;
+ uint16_t ece;
+ int fd;
+ size_t len;
+
+ len = shm_du_buff_len(sdb);
pthread_rwlock_wrlock(&fa.flows_lock);
- psched_del(fa.psched, fd);
+ fd = eid_to_fd(eid);
+ if (fd < 0) {
+ pthread_rwlock_unlock(&fa.flows_lock);
+ log_dbg("Received packet for unknown EID %" PRIu64 ".", eid);
+ ipcp_sdb_release(sdb);
+ return;
+ }
+
+ flow = &fa.flows[fd];
- destroy_conn(fd);
+#ifdef IPCP_FLOW_STATS
+ ++flow->p_rcv;
+ flow->b_rcv += len;
+#endif
+ update = ca_ctx_update_rcv(flow->ctx, len, ecn, &ece);
pthread_rwlock_unlock(&fa.flows_lock);
- flow_dealloc(fd);
+ if (ipcp_flow_write(fd, sdb) < 0) {
+ log_dbg("Failed to write to flow %d.", fd);
+ ipcp_sdb_release(sdb);
+#ifdef IPCP_FLOW_STATS
+ pthread_rwlock_wrlock(&fa.flows_lock);
+ ++flow->p_rcv_f;
+ flow->b_rcv_f += len;
+ pthread_rwlock_unlock(&fa.flows_lock);
+#endif
+ }
- return 0;
+ if (update)
+ fa_update_remote(eid, ece);
}
diff --git a/src/ipcpd/unicast/fa.h b/src/ipcpd/unicast/fa.h
index 12a10a0c..1e716966 100644
--- a/src/ipcpd/unicast/fa.h
+++ b/src/ipcpd/unicast/fa.h
@@ -1,10 +1,10 @@
/*
- * Ouroboros - Copyright (C) 2016 - 2020
+ * Ouroboros - Copyright (C) 2016 - 2024
*
* Flow allocator of the IPC Process
*
- * Dimitri Staessens <dimitri.staessens@ugent.be>
- * Sander Vrijders <sander.vrijders@ugent.be>
+ * Dimitri Staessens <dimitri@ouroboros.rocks>
+ * Sander Vrijders <sander@ouroboros.rocks>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -34,17 +34,19 @@ int fa_start(void);
void fa_stop(void);
-int fa_alloc(int fd,
- const uint8_t * dst,
- qosspec_t qs,
- const void * data,
- size_t len);
+int fa_alloc(int fd,
+ const uint8_t * dst,
+ qosspec_t qs,
+ const buffer_t * data);
-int fa_alloc_resp(int fd,
- int response,
- const void * data,
- size_t len);
+int fa_alloc_resp(int fd,
+ int response,
+ const buffer_t * data);
int fa_dealloc(int fd);
+void fa_np1_rcv(uint64_t eid,
+ uint8_t ecn,
+ struct shm_du_buff * sdb);
+
#endif /* OUROBOROS_IPCPD_UNICAST_FA_H */
diff --git a/src/ipcpd/unicast/kademlia.proto b/src/ipcpd/unicast/kademlia.proto
deleted file mode 100644
index 4f807fd3..00000000
--- a/src/ipcpd/unicast/kademlia.proto
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Ouroboros - Copyright (C) 2016 - 2020
- *
- * KAD protocol
- *
- * Dimitri Staessens <dimitri.staessens@ugent.be>
- * Sander Vrijders <sander.vrijders@ugent.be>
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public License
- * version 2.1 as published by the Free Software Foundation.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., http://www.fsf.org/about/contact/.
- */
-
-syntax = "proto2";
-
-message kad_contact_msg {
- required bytes id = 1;
- required uint64 addr = 2;
-};
-
-message kad_msg {
- required uint32 code = 1;
- required uint32 cookie = 2;
- required uint64 s_addr = 3;
- optional bytes s_id = 4;
- optional bytes key = 5;
- repeated uint64 addrs = 6;
- repeated kad_contact_msg contacts = 7;
- // enrolment parameters
- optional uint32 alpha = 8;
- optional uint32 b = 9;
- optional uint32 k = 10;
- optional uint32 t_expire = 11;
- optional uint32 t_refresh = 12;
- optional uint32 t_replicate = 13;
-}; \ No newline at end of file
diff --git a/src/ipcpd/unicast/main.c b/src/ipcpd/unicast/main.c
index 43052209..c2348242 100644
--- a/src/ipcpd/unicast/main.c
+++ b/src/ipcpd/unicast/main.c
@@ -1,10 +1,10 @@
/*
- * Ouroboros - Copyright (C) 2016 - 2020
+ * Ouroboros - Copyright (C) 2016 - 2024
*
* Unicast IPC Process
*
- * Dimitri Staessens <dimitri.staessens@ugent.be>
- * Sander Vrijders <sander.vrijders@ugent.be>
+ * Dimitri Staessens <dimitri@ouroboros.rocks>
+ * Sander Vrijders <sander@ouroboros.rocks>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -29,20 +29,22 @@
#include "config.h"
#define OUROBOROS_PREFIX "unicast-ipcp"
+#define THIS_TYPE IPCP_UNICAST
#include <ouroboros/errno.h>
-#include <ouroboros/hash.h>
#include <ouroboros/ipcp-dev.h>
#include <ouroboros/logs.h>
#include <ouroboros/notifier.h>
+#include <ouroboros/random.h>
#include <ouroboros/rib.h>
-#include <ouroboros/time_utils.h>
+#include <ouroboros/time.h>
-#include "addr_auth.h"
-#include "connmgr.h"
+#include "common/connmgr.h"
+#include "common/enroll.h"
+#include "addr-auth.h"
+#include "ca.h"
#include "dir.h"
#include "dt.h"
-#include "enroll.h"
#include "fa.h"
#include "ipcp.h"
@@ -53,117 +55,114 @@
#include <assert.h>
#include <inttypes.h>
-#define THIS_TYPE IPCP_UNICAST
-
-static int initialize_components(const struct ipcp_config * conf)
+static int initialize_components(struct ipcp_config * conf)
{
- ipcpi.layer_name = strdup(conf->layer_info.layer_name);
- if (ipcpi.layer_name == NULL) {
- log_err("Failed to set layer name.");
- goto fail_layer_name;
- }
-
- ipcpi.dir_hash_algo = conf->layer_info.dir_hash_algo;
-
assert(ipcp_dir_hash_len() != 0);
- if (addr_auth_init(conf->addr_auth_type,
- &conf->addr_size)) {
+ if (addr_auth_init(conf->unicast.addr_auth_type,
+ &conf->unicast.dt.addr_size)) {
log_err("Failed to init address authority.");
goto fail_addr_auth;
}
- ipcpi.dt_addr = addr_auth_address();
- if (ipcpi.dt_addr == 0) {
- log_err("Failed to get a valid address.");
- goto fail_addr_auth;
- }
+ log_info("IPCP got address %" PRIu64 ".", addr_auth_address());
- log_dbg("IPCP got address %" PRIu64 ".", ipcpi.dt_addr);
+ if (ca_init(conf->unicast.cong_avoid)) {
+ log_err("Failed to initialize congestion avoidance.");
+ goto fail_ca;
+ }
- if (dt_init(conf->routing_type,
- conf->addr_size,
- conf->eid_size,
- conf->max_ttl)) {
+ if (dt_init(conf->unicast.dt)) {
log_err("Failed to initialize data transfer component.");
goto fail_dt;
}
- if (fa_init()) {
- log_err("Failed to initialize flow allocator component.");
- goto fail_fa;
- }
+ ipcp_set_dir_hash_algo((enum hash_algo) conf->layer_info.dir_hash_algo);
- if (dir_init()) {
+ if (dir_init(&conf->unicast.dir)) {
log_err("Failed to initialize directory.");
goto fail_dir;
}
+ if (fa_init()) {
+ log_err("Failed to initialize flow allocator component.");
+ goto fail_fa;
+ }
+
ipcp_set_state(IPCP_INIT);
return 0;
- fail_dir:
- fa_fini();
fail_fa:
+ dir_fini();
+ fail_dir:
dt_fini();
fail_dt:
+ ca_fini();
+ fail_ca:
addr_auth_fini();
fail_addr_auth:
- free(ipcpi.layer_name);
- fail_layer_name:
return -1;
}
static void finalize_components(void)
{
- dir_fini();
-
fa_fini();
+ dir_fini();
+
dt_fini();
- addr_auth_fini();
+ ca_fini();
- free(ipcpi.layer_name);
+ addr_auth_fini();
}
static int start_components(void)
{
- assert(ipcp_get_state() == IPCP_INIT);
-
- ipcp_set_state(IPCP_OPERATIONAL);
+ if (dt_start() < 0) {
+ log_err("Failed to start data transfer.");
+ goto fail_dt_start;
+ }
- if (fa_start()) {
+ if (fa_start() < 0) {
log_err("Failed to start flow allocator.");
goto fail_fa_start;
}
- if (enroll_start()) {
+ if (enroll_start() < 0) {
log_err("Failed to start enrollment.");
goto fail_enroll_start;
}
- if (connmgr_start()) {
+ if (connmgr_start() < 0) {
log_err("Failed to start AP connection manager.");
goto fail_connmgr_start;
}
+ if (dir_start() < 0) {
+ log_err("Failed to start directory.");
+ goto fail_dir_start;
+ }
+
return 0;
+ fail_dir_start:
+ connmgr_stop();
fail_connmgr_start:
enroll_stop();
fail_enroll_start:
fa_stop();
fail_fa_start:
+ dt_stop();
+ fail_dt_start:
ipcp_set_state(IPCP_INIT);
return -1;
}
static void stop_components(void)
{
- assert(ipcp_get_state() == IPCP_OPERATIONAL ||
- ipcp_get_state() == IPCP_SHUTDOWN);
+ dir_stop();
connmgr_stop();
@@ -171,110 +170,88 @@ static void stop_components(void)
fa_stop();
- ipcp_set_state(IPCP_INIT);
-}
-
-static int bootstrap_components(void)
-{
- if (dir_bootstrap()) {
- log_err("Failed to bootstrap directory.");
- dt_stop();
- return -1;
- }
+ dt_stop();
- return 0;
+ ipcp_set_state(IPCP_INIT);
}
static int unicast_ipcp_enroll(const char * dst,
struct layer_info * info)
{
- struct conn conn;
+ struct ipcp_config * conf;
+ struct conn conn;
+ uint8_t id[ENROLL_ID_LEN];
- if (connmgr_alloc(COMPID_ENROLL, dst, NULL, &conn)) {
- log_err("Failed to get connection.");
- goto fail_er_flow;
+ if (random_buffer(id, ENROLL_ID_LEN) < 0) {
+ log_err("Failed to generate enrollment ID.");
+ goto fail_id;
}
- /* Get boot state from peer. */
- if (enroll_boot(&conn)) {
- log_err("Failed to get boot information.");
- goto fail_enroll_boot;
+ log_info_id(id, "Requesting enrollment.");
+
+ if (connmgr_alloc(COMPID_ENROLL, dst, NULL, &conn) < 0) {
+ log_err_id(id, "Failed to get connection.");
+ goto fail_id;
}
- if (initialize_components(enroll_get_conf())) {
- log_err("Failed to initialize IPCP components.");
+ /* Get boot state from peer. */
+ if (enroll_boot(&conn, id) < 0) {
+ log_err_id(id, "Failed to get boot information.");
goto fail_enroll_boot;
}
- if (dt_start()) {
- log_err("Failed to initialize IPCP components.");
- goto fail_dt_start;
+ conf = enroll_get_conf();
+
+ *info = conf->layer_info;
+
+ if (initialize_components(conf) < 0) {
+ log_err_id(id, "Failed to initialize components.");
+ goto fail_enroll_boot;
}
- if (start_components()) {
- log_err("Failed to start components.");
+ if (start_components() < 0) {
+ log_err_id(id, "Failed to start components.");
goto fail_start_comp;
}
- if (enroll_done(&conn, 0))
- log_warn("Failed to confirm enrollment with peer.");
-
- if (connmgr_dealloc(COMPID_ENROLL, &conn))
- log_warn("Failed to deallocate enrollment flow.");
+ if (enroll_ack(&conn, id, 0) < 0)
+ log_err_id(id, "Failed to confirm enrollment.");
- log_info("Enrolled with %s.", dst);
+ if (connmgr_dealloc(COMPID_ENROLL, &conn) < 0)
+ log_warn_id(id, "Failed to dealloc enrollment flow.");
- info->dir_hash_algo = ipcpi.dir_hash_algo;
- strcpy(info->layer_name, ipcpi.layer_name);
+ log_info_id(id, "Enrolled with %s.", dst);
return 0;
fail_start_comp:
- dt_stop();
- fail_dt_start:
finalize_components();
fail_enroll_boot:
connmgr_dealloc(COMPID_ENROLL, &conn);
- fail_er_flow:
+ fail_id:
return -1;
}
-static int unicast_ipcp_bootstrap(const struct ipcp_config * conf)
+static int unicast_ipcp_bootstrap(struct ipcp_config * conf)
{
assert(conf);
assert(conf->type == THIS_TYPE);
- enroll_bootstrap(conf);
-
- if (initialize_components(conf)) {
+ if (initialize_components(conf) < 0) {
log_err("Failed to init IPCP components.");
goto fail_init;
}
- if (dt_start()) {
- log_err("Failed to initialize IPCP components.");
- goto fail_dt_start;
- };
+ enroll_bootstrap(conf);
- if (start_components()) {
+ if (start_components() < 0) {
log_err("Failed to init IPCP components.");
goto fail_start;
}
- if (bootstrap_components()) {
- log_err("Failed to bootstrap IPCP components.");
- goto fail_bootstrap;
- }
-
- log_dbg("Bootstrapped in layer %s.", conf->layer_info.layer_name);
-
return 0;
- fail_bootstrap:
- stop_components();
fail_start:
- dt_stop();
- fail_dt_start:
finalize_components();
fail_init:
return -1;
@@ -302,76 +279,58 @@ static struct ipcp_ops unicast_ops = {
int main(int argc,
char * argv[])
{
- if (ipcp_init(argc, argv, &unicast_ops) < 0) {
+ if (ipcp_init(argc, argv, &unicast_ops, THIS_TYPE) < 0) {
log_err("Failed to init IPCP.");
goto fail_init;
}
- /* These components must be init at creation. */
- if (rib_init(ipcpi.name)) {
- log_err("Failed to initialize RIB.");
- goto fail_rib_init;
- }
-
- if (notifier_init()) {
+ if (notifier_init() < 0) {
log_err("Failed to initialize notifier component.");
goto fail_notifier_init;
}
- if (connmgr_init()) {
+ if (connmgr_init() < 0) {
log_err("Failed to initialize connection manager.");
goto fail_connmgr_init;
}
- if (enroll_init()) {
+ if (enroll_init() < 0) {
log_err("Failed to initialize enrollment component.");
goto fail_enroll_init;
}
- if (ipcp_boot() < 0) {
- log_err("Failed to boot IPCP.");
- goto fail_boot;
- }
-
- if (ipcp_create_r(0)) {
- log_err("Failed to notify IRMd we are initialized.");
- ipcp_set_state(IPCP_NULL);
- goto fail_create_r;
+ if (ipcp_start() < 0) {
+ log_err("Failed to start IPCP.");
+ goto fail_start;
}
- ipcp_shutdown();
+ ipcp_sigwait();
if (ipcp_get_state() == IPCP_SHUTDOWN) {
- dt_stop();
stop_components();
finalize_components();
}
+ ipcp_stop();
+
enroll_fini();
connmgr_fini();
notifier_fini();
- rib_fini();
-
ipcp_fini();
exit(EXIT_SUCCESS);
- fail_create_r:
- ipcp_shutdown();
- fail_boot:
+ fail_start:
enroll_fini();
fail_enroll_init:
connmgr_fini();
fail_connmgr_init:
notifier_fini();
fail_notifier_init:
- rib_fini();
- fail_rib_init:
- ipcp_fini();
+ ipcp_fini();
fail_init:
- ipcp_create_r(-1);
exit(EXIT_FAILURE);
}
diff --git a/src/ipcpd/unicast/pff.c b/src/ipcpd/unicast/pff.c
index 19432972..9b2aa2b4 100644
--- a/src/ipcpd/unicast/pff.c
+++ b/src/ipcpd/unicast/pff.c
@@ -1,10 +1,10 @@
/*
- * Ouroboros - Copyright (C) 2016 - 2020
+ * Ouroboros - Copyright (C) 2016 - 2024
*
* PDU Forwarding Function
*
- * Dimitri Staessens <dimitri.staessens@ugent.be>
- * Sander Vrijders <sander.vrijders@ugent.be>
+ * Dimitri Staessens <dimitri@ouroboros.rocks>
+ * Sander Vrijders <sander@ouroboros.rocks>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -26,14 +26,11 @@
#include <ouroboros/logs.h>
#include "pff.h"
-#include "pol-pff-ops.h"
-#include "pol/alternate_pff.h"
-#include "pol/multipath_pff.h"
-#include "pol/simple_pff.h"
+#include "pff/pol.h"
struct pff {
- struct pol_pff_ops * ops;
- struct pff_i * pff_i;
+ struct pff_ops * ops;
+ struct pff_i * pff_i;
};
struct pff * pff_create(enum pol_pff pol)
@@ -62,8 +59,10 @@ struct pff * pff_create(enum pol_pff pol)
}
pff->pff_i = pff->ops->create();
- if (pff->pff_i == NULL)
+ if (pff->pff_i == NULL) {
+ log_err("Failed to create PFF instance.");
goto err;
+ }
return pff;
err:
diff --git a/src/ipcpd/unicast/pff.h b/src/ipcpd/unicast/pff.h
index 962ae594..f44e5531 100644
--- a/src/ipcpd/unicast/pff.h
+++ b/src/ipcpd/unicast/pff.h
@@ -1,10 +1,10 @@
/*
- * Ouroboros - Copyright (C) 2016 - 2020
+ * Ouroboros - Copyright (C) 2016 - 2024
*
* PDU Forwarding Function
*
- * Dimitri Staessens <dimitri.staessens@ugent.be>
- * Sander Vrijders <sander.vrijders@ugent.be>
+ * Dimitri Staessens <dimitri@ouroboros.rocks>
+ * Sander Vrijders <sander@ouroboros.rocks>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
diff --git a/src/ipcpd/unicast/pol/alternate_pff.c b/src/ipcpd/unicast/pff/alternate.c
index f26bb047..85e85914 100644
--- a/src/ipcpd/unicast/pol/alternate_pff.c
+++ b/src/ipcpd/unicast/pff/alternate.c
@@ -1,10 +1,10 @@
/*
- * Ouroboros - Copyright (C) 2016 - 2020
+ * Ouroboros - Copyright (C) 2016 - 2024
*
* Policy for PFF with alternate next hops
*
- * Dimitri Staessens <dimitri.staessens@ugent.be>
- * Sander Vrijders <sander.vrijders@ugent.be>
+ * Dimitri Staessens <dimitri@ouroboros.rocks>
+ * Sander Vrijders <sander@ouroboros.rocks>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -28,7 +28,7 @@
#include <ouroboros/list.h>
#include "pft.h"
-#include "alternate_pff.h"
+#include "alternate.h"
#include <string.h>
#include <assert.h>
@@ -54,7 +54,7 @@ struct pff_i {
pthread_rwlock_t lock;
};
-struct pol_pff_ops alternate_pff_ops = {
+struct pff_ops alternate_pff_ops = {
.create = alternate_pff_create,
.destroy = alternate_pff_destroy,
.lock = alternate_pff_lock,
diff --git a/src/ipcpd/unicast/pol/alternate_pff.h b/src/ipcpd/unicast/pff/alternate.h
index 5e5fca3d..96207e74 100644
--- a/src/ipcpd/unicast/pol/alternate_pff.h
+++ b/src/ipcpd/unicast/pff/alternate.h
@@ -1,10 +1,10 @@
/*
- * Ouroboros - Copyright (C) 2016 - 2020
+ * Ouroboros - Copyright (C) 2016 - 2024
*
* Policy for PFF with alternate next hops
*
- * Dimitri Staessens <dimitri.staessens@ugent.be>
- * Sander Vrijders <sander.vrijders@ugent.be>
+ * Dimitri Staessens <dimitri@ouroboros.rocks>
+ * Sander Vrijders <sander@ouroboros.rocks>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -23,7 +23,7 @@
#ifndef OUROBOROS_IPCPD_UNICAST_ALTERNATE_PFF_H
#define OUROBOROS_IPCPD_UNICAST_ALTERNATE_PFF_H
-#include "pol-pff-ops.h"
+#include "ops.h"
struct pff_i * alternate_pff_create(void);
@@ -56,6 +56,6 @@ int alternate_flow_state_change(struct pff_i * pff_i,
int fd,
bool up);
-struct pol_pff_ops alternate_pff_ops;
+extern struct pff_ops alternate_pff_ops;
#endif /* OUROBOROS_IPCPD_UNICAST_ALTERNATE_PFF_H */
diff --git a/src/ipcpd/unicast/pol/multipath_pff.c b/src/ipcpd/unicast/pff/multipath.c
index 0fe101bc..cbab0f5f 100644
--- a/src/ipcpd/unicast/pol/multipath_pff.c
+++ b/src/ipcpd/unicast/pff/multipath.c
@@ -1,10 +1,10 @@
/*
- * Ouroboros - Copyright (C) 2016 - 2020
+ * Ouroboros - Copyright (C) 2016 - 2024
*
* Policy for PFF supporting multipath routing
*
- * Dimitri Staessens <dimitri.staessens@ugent.be>
- * Sander Vrijders <sander.vrijders@ugent.be>
+ * Dimitri Staessens <dimitri@ouroboros.rocks>
+ * Sander Vrijders <sander@ouroboros.rocks>
* Nick Aerts <nick.aerts@ugent.be>
*
* This program is free software; you can redistribute it and/or modify
@@ -28,7 +28,7 @@
#include <ouroboros/errno.h>
#include "pft.h"
-#include "multipath_pff.h"
+#include "multipath.h"
#include <string.h>
#include <assert.h>
@@ -39,7 +39,7 @@ struct pff_i {
pthread_rwlock_t lock;
};
-struct pol_pff_ops multipath_pff_ops = {
+struct pff_ops multipath_pff_ops = {
.create = multipath_pff_create,
.destroy = multipath_pff_destroy,
.lock = multipath_pff_lock,
@@ -58,21 +58,23 @@ struct pff_i * multipath_pff_create(void)
tmp = malloc(sizeof(*tmp));
if (tmp == NULL)
- return NULL;
+ goto fail_malloc;
- if (pthread_rwlock_init(&tmp->lock, NULL)) {
- free(tmp);
- return NULL;
- }
+ if (pthread_rwlock_init(&tmp->lock, NULL))
+ goto fail_rwlock;
tmp->pft = pft_create(PFT_SIZE, false);
- if (tmp->pft == NULL) {
- pthread_rwlock_destroy(&tmp->lock);
- free(tmp);
- return NULL;
- }
+ if (tmp->pft == NULL)
+ goto fail_pft;
return tmp;
+
+ fail_pft:
+ pthread_rwlock_destroy(&tmp->lock);
+ fail_rwlock:
+ free(tmp);
+ fail_malloc:
+ return NULL;
}
void multipath_pff_destroy(struct pff_i * pff_i)
@@ -80,8 +82,8 @@ void multipath_pff_destroy(struct pff_i * pff_i)
assert(pff_i);
pft_destroy(pff_i->pft);
-
pthread_rwlock_destroy(&pff_i->lock);
+
free(pff_i);
}
@@ -177,7 +179,7 @@ int multipath_pff_nhop(struct pff_i * pff_i,
assert(pff_i);
- pthread_rwlock_rdlock(&pff_i->lock);
+ pthread_rwlock_wrlock(&pff_i->lock);
if (pft_lookup(pff_i->pft, addr, &fds, &len)) {
pthread_rwlock_unlock(&pff_i->lock);
@@ -189,7 +191,7 @@ int multipath_pff_nhop(struct pff_i * pff_i,
assert(len > 0);
/* Rotate fds left. */
- memcpy(fds, fds + 1, (len - 1) * sizeof(*fds));
+ memmove(fds, fds + 1, (len - 1) * sizeof(*fds));
fds[len - 1] = fd;
pthread_rwlock_unlock(&pff_i->lock);
diff --git a/src/ipcpd/unicast/pol/multipath_pff.h b/src/ipcpd/unicast/pff/multipath.h
index a8ee088f..0eb03476 100644
--- a/src/ipcpd/unicast/pol/multipath_pff.h
+++ b/src/ipcpd/unicast/pff/multipath.h
@@ -1,10 +1,10 @@
/*
- * Ouroboros - Copyright (C) 2016 - 2020
+ * Ouroboros - Copyright (C) 2016 - 2024
*
* Policy for PFF supporting multipath routing
*
- * Dimitri Staessens <dimitri.staessens@ugent.be>
- * Sander Vrijders <sander.vrijders@ugent.be>
+ * Dimitri Staessens <dimitri@ouroboros.rocks>
+ * Sander Vrijders <sander@ouroboros.rocks>
* Nick Aerts <nick.aerts@ugent.be>
*
* This program is free software; you can redistribute it and/or modify
@@ -24,7 +24,7 @@
#ifndef OUROBOROS_IPCPD_UNICAST_MULTIPATH_PFF_H
#define OUROBOROS_IPCPD_UNICAST_MULTIPATH_PFF_H
-#include "pol-pff-ops.h"
+#include "ops.h"
struct pff_i * multipath_pff_create(void);
@@ -53,6 +53,6 @@ void multipath_pff_flush(struct pff_i * pff_i);
int multipath_pff_nhop(struct pff_i * pff_i,
uint64_t addr);
-struct pol_pff_ops multipath_pff_ops;
+extern struct pff_ops multipath_pff_ops;
#endif /* OUROBOROS_IPCPD_UNICAST_MULTIPATH_PFF_H */
diff --git a/src/ipcpd/unicast/pol-pff-ops.h b/src/ipcpd/unicast/pff/ops.h
index 9e126cb5..16a31273 100644
--- a/src/ipcpd/unicast/pol-pff-ops.h
+++ b/src/ipcpd/unicast/pff/ops.h
@@ -1,10 +1,10 @@
/*
- * Ouroboros - Copyright (C) 2016 - 2020
+ * Ouroboros - Copyright (C) 2016 - 2024
*
* Pff policy ops
*
- * Dimitri Staessens <dimitri.staessens@ugent.be>
- * Sander Vrijders <sander.vrijders@ugent.be>
+ * Dimitri Staessens <dimitri@ouroboros.rocks>
+ * Sander Vrijders <sander@ouroboros.rocks>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -20,14 +20,14 @@
* Foundation, Inc., http://www.fsf.org/about/contact/.
*/
-#ifndef OUROBOROS_IPCPD_UNICAST_POL_PFF_OPS_H
-#define OUROBOROS_IPCPD_UNICAST_POL_PFF_OPS_H
+#ifndef OUROBOROS_IPCPD_UNICAST_PFF_OPS_H
+#define OUROBOROS_IPCPD_UNICAST_PFF_OPS_H
#include <stdbool.h>
struct pff_i;
-struct pol_pff_ops {
+struct pff_ops {
struct pff_i * (* create)(void);
void (* destroy)(struct pff_i * pff_i);
@@ -60,4 +60,4 @@ struct pol_pff_ops {
bool up);
};
-#endif /* OUROBOROS_IPCPD_UNICAST_POL_PFF_OPS_H */
+#endif /* OUROBOROS_IPCPD_UNICAST_PFF_OPS_H */
diff --git a/src/ipcpd/unicast/pol/pft.c b/src/ipcpd/unicast/pff/pft.c
index 53acc08e..8c436113 100644
--- a/src/ipcpd/unicast/pol/pft.c
+++ b/src/ipcpd/unicast/pff/pft.c
@@ -1,10 +1,10 @@
/*
- * Ouroboros - Copyright (C) 2016 - 2020
+ * Ouroboros - Copyright (C) 2016 - 2024
*
* Packet forwarding table (PFT) with chaining on collisions
*
- * Dimitri Staessens <dimitri.staessens@ugent.be>
- * Sander Vrijders <sander.vrijders@ugent.be>
+ * Dimitri Staessens <dimitri@ouroboros.rocks>
+ * Sander Vrijders <sander@ouroboros.rocks>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -115,19 +115,11 @@ void pft_flush(struct pft * pft)
static uint64_t hash(uint64_t key)
{
- void * res;
- uint64_t ret;
- uint8_t keys[4];
+ uint64_t res[2];
- memcpy(keys, &key, 4);
+ mem_hash(HASH_MD5, res, (uint8_t *) &key, sizeof(key));
- mem_hash(HASH_MD5, &res, keys, 4);
-
- ret = (* (uint64_t *) res);
-
- free(res);
-
- return ret;
+ return res[0];
}
static uint64_t calc_key(struct pft * pft,
diff --git a/src/ipcpd/unicast/pol/pft.h b/src/ipcpd/unicast/pff/pft.h
index aed4dba8..711dabcb 100644
--- a/src/ipcpd/unicast/pol/pft.h
+++ b/src/ipcpd/unicast/pff/pft.h
@@ -1,10 +1,10 @@
/*
- * Ouroboros - Copyright (C) 2016 - 2020
+ * Ouroboros - Copyright (C) 2016 - 2024
*
* Packet forwarding table (PFT) with chaining on collisions
*
- * Dimitri Staessens <dimitri.staessens@ugent.be>
- * Sander Vrijders <sander.vrijders@ugent.be>
+ * Dimitri Staessens <dimitri@ouroboros.rocks>
+ * Sander Vrijders <sander@ouroboros.rocks>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
diff --git a/src/ipcpd/unicast/pff/pol.h b/src/ipcpd/unicast/pff/pol.h
new file mode 100644
index 00000000..245b03c4
--- /dev/null
+++ b/src/ipcpd/unicast/pff/pol.h
@@ -0,0 +1,25 @@
+/*
+ * Ouroboros - Copyright (C) 2016 - 2024
+ *
+ * PDU Forwarding Function policies
+ *
+ * Dimitri Staessens <dimitri@ouroboros.rocks>
+ * Sander Vrijders <sander@ouroboros.rocks>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., http://www.fsf.org/about/contact/.
+ */
+
+#include "alternate.h"
+#include "multipath.h"
+#include "simple.h"
diff --git a/src/ipcpd/unicast/pol/simple_pff.c b/src/ipcpd/unicast/pff/simple.c
index 5bd73d8a..5f95e3ce 100644
--- a/src/ipcpd/unicast/pol/simple_pff.c
+++ b/src/ipcpd/unicast/pff/simple.c
@@ -1,10 +1,10 @@
/*
- * Ouroboros - Copyright (C) 2016 - 2020
+ * Ouroboros - Copyright (C) 2016 - 2024
*
* Simple PDU Forwarding Function
*
- * Dimitri Staessens <dimitri.staessens@ugent.be>
- * Sander Vrijders <sander.vrijders@ugent.be>
+ * Dimitri Staessens <dimitri@ouroboros.rocks>
+ * Sander Vrijders <sander@ouroboros.rocks>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -27,7 +27,7 @@
#include <ouroboros/errno.h>
#include "pft.h"
-#include "simple_pff.h"
+#include "simple.h"
#include <assert.h>
#include <pthread.h>
@@ -37,7 +37,7 @@ struct pff_i {
pthread_rwlock_t lock;
};
-struct pol_pff_ops simple_pff_ops = {
+struct pff_ops simple_pff_ops = {
.create = simple_pff_create,
.destroy = simple_pff_destroy,
.lock = simple_pff_lock,
diff --git a/src/ipcpd/unicast/pol/simple_pff.h b/src/ipcpd/unicast/pff/simple.h
index 2dfce45c..0966a186 100644
--- a/src/ipcpd/unicast/pol/simple_pff.h
+++ b/src/ipcpd/unicast/pff/simple.h
@@ -1,10 +1,10 @@
/*
- * Ouroboros - Copyright (C) 2016 - 2020
+ * Ouroboros - Copyright (C) 2016 - 2024
*
* Simple policy for PFF
*
- * Dimitri Staessens <dimitri.staessens@ugent.be>
- * Sander Vrijders <sander.vrijders@ugent.be>
+ * Dimitri Staessens <dimitri@ouroboros.rocks>
+ * Sander Vrijders <sander@ouroboros.rocks>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -23,7 +23,7 @@
#ifndef OUROBOROS_IPCPD_UNICAST_SIMPLE_PFF_H
#define OUROBOROS_IPCPD_UNICAST_SIMPLE_PFF_H
-#include "pol-pff-ops.h"
+#include "ops.h"
struct pff_i * simple_pff_create(void);
@@ -52,6 +52,6 @@ void simple_pff_flush(struct pff_i * pff_i);
int simple_pff_nhop(struct pff_i * pff_i,
uint64_t addr);
-struct pol_pff_ops simple_pff_ops;
+extern struct pff_ops simple_pff_ops;
#endif /* OUROBOROS_IPCPD_UNICAST_SIMPLE_PFF_H */
diff --git a/src/ipcpd/unicast/tests/CMakeLists.txt b/src/ipcpd/unicast/pff/tests/CMakeLists.txt
index 482711d5..65705714 100644
--- a/src/ipcpd/unicast/tests/CMakeLists.txt
+++ b/src/ipcpd/unicast/pff/tests/CMakeLists.txt
@@ -17,19 +17,20 @@ get_filename_component(PARENT_DIR ${PARENT_PATH} NAME)
create_test_sourcelist(${PARENT_DIR}_tests test_suite.c
# Add new tests here
- dht_test.c
+ pft_test.c
)
-protobuf_generate_c(KAD_PROTO_SRCS KAD_PROTO_HDRS ../kademlia.proto)
-
-add_executable(${PARENT_DIR}_test EXCLUDE_FROM_ALL ${${PARENT_DIR}_tests}
- ${KAD_PROTO_SRCS})
+add_executable(${PARENT_DIR}_test EXCLUDE_FROM_ALL ${${PARENT_DIR}_tests})
target_link_libraries(${PARENT_DIR}_test ouroboros-common)
add_dependencies(check ${PARENT_DIR}_test)
set(tests_to_run ${${PARENT_DIR}_tests})
-remove(tests_to_run test_suite.c)
+if(CMAKE_VERSION VERSION_LESS "3.29.0")
+ remove(tests_to_run test_suite.c)
+else ()
+ list(POP_FRONT tests_to_run)
+endif()
foreach (test ${tests_to_run})
get_filename_component(test_name ${test} NAME_WE)
diff --git a/src/ipcpd/unicast/pol/tests/pft_test.c b/src/ipcpd/unicast/pff/tests/pft_test.c
index 4e23898b..18287fb8 100644
--- a/src/ipcpd/unicast/pol/tests/pft_test.c
+++ b/src/ipcpd/unicast/pff/tests/pft_test.c
@@ -1,10 +1,10 @@
/*
- * Ouroboros - Copyright (C) 2016 - 2020
+ * Ouroboros - Copyright (C) 2016 - 2024
*
* Test of the hash table
*
- * Dimitri Staessens <dimitri.staessens@ugent.be>
- * Sander Vrijders <sander.vrijders@ugent.be>
+ * Dimitri Staessens <dimitri@ouroboros.rocks>
+ * Sander Vrijders <sander@ouroboros.rocks>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
diff --git a/src/ipcpd/unicast/psched.c b/src/ipcpd/unicast/psched.c
index 1ac3fc12..7e12148b 100644
--- a/src/ipcpd/unicast/psched.c
+++ b/src/ipcpd/unicast/psched.c
@@ -1,10 +1,10 @@
/*
- * Ouroboros - Copyright (C) 2016 - 2020
+ * Ouroboros - Copyright (C) 2016 - 2024
*
* Packet scheduler component
*
- * Dimitri Staessens <dimitri.staessens@ugent.be>
- * Sander Vrijders <sander.vrijders@ugent.be>
+ * Dimitri Staessens <dimitri@ouroboros.rocks>
+ * Sander Vrijders <sander@ouroboros.rocks>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -31,9 +31,9 @@
#include <ouroboros/errno.h>
#include <ouroboros/notifier.h>
+#include "common/connmgr.h"
#include "ipcp.h"
#include "psched.h"
-#include "connmgr.h"
#include <assert.h>
#include <sched.h>
@@ -50,6 +50,7 @@ static int qos_prio [] = {
struct psched {
fset_t * set[QOS_CUBE_MAX];
next_packet_fn_t callback;
+ read_fn_t read;
pthread_t readers[QOS_CUBE_MAX * IPCP_SCHED_THR_MUL];
};
@@ -101,7 +102,7 @@ static void * packet_reader(void * o)
notifier_event(NOTIFY_DT_FLOW_UP, &fd);
break;
case FLOW_PKT:
- if (ipcp_flow_read(fd, &sdb))
+ if (sched->read(fd, &sdb) < 0)
continue;
sched->callback(fd, qc, sdb);
@@ -117,7 +118,8 @@ static void * packet_reader(void * o)
return (void *) 0;
}
-struct psched * psched_create(next_packet_fn_t callback)
+struct psched * psched_create(next_packet_fn_t callback,
+ read_fn_t read)
{
struct psched * psched;
struct sched_info * infos[QOS_CUBE_MAX * IPCP_SCHED_THR_MUL];
@@ -131,6 +133,7 @@ struct psched * psched_create(next_packet_fn_t callback)
goto fail_malloc;
psched->callback = callback;
+ psched->read = read;
for (i = 0; i < QOS_CUBE_MAX; ++i) {
psched->set[i] = fset_create();
@@ -160,7 +163,7 @@ struct psched * psched_create(next_packet_fn_t callback)
for (j = 0; j < i; ++j)
pthread_join(psched->readers[j], NULL);
for (j = i; j < QOS_CUBE_MAX * IPCP_SCHED_THR_MUL; ++j)
- free(infos[i]);
+ free(infos[j]);
goto fail_infos;
}
}
diff --git a/src/ipcpd/unicast/psched.h b/src/ipcpd/unicast/psched.h
index 85f32b9a..831f8084 100644
--- a/src/ipcpd/unicast/psched.h
+++ b/src/ipcpd/unicast/psched.h
@@ -1,10 +1,10 @@
/*
- * Ouroboros - Copyright (C) 2016 - 2020
+ * Ouroboros - Copyright (C) 2016 - 2024
*
* Packet scheduler component
*
- * Dimitri Staessens <dimitri.staessens@ugent.be>
- * Sander Vrijders <sander.vrijders@ugent.be>
+ * Dimitri Staessens <dimitri@ouroboros.rocks>
+ * Sander Vrijders <sander@ouroboros.rocks>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -30,7 +30,11 @@ typedef void (* next_packet_fn_t)(int fd,
qoscube_t qc,
struct shm_du_buff * sdb);
-struct psched * psched_create(next_packet_fn_t callback);
+typedef int (* read_fn_t)(int fd,
+ struct shm_du_buff ** sdb);
+
+struct psched * psched_create(next_packet_fn_t callback,
+ read_fn_t read);
void psched_destroy(struct psched * psched);
diff --git a/src/ipcpd/unicast/routing.c b/src/ipcpd/unicast/routing.c
index 0ac43f9f..2ad7b234 100644
--- a/src/ipcpd/unicast/routing.c
+++ b/src/ipcpd/unicast/routing.c
@@ -1,10 +1,10 @@
/*
- * Ouroboros - Copyright (C) 2016 - 2020
+ * Ouroboros - Copyright (C) 2016 - 2024
*
* Routing component of the IPCP
*
- * Dimitri Staessens <dimitri.staessens@ugent.be>
- * Sander Vrijders <sander.vrijders@ugent.be>
+ * Dimitri Staessens <dimitri@ouroboros.rocks>
+ * Sander Vrijders <sander@ouroboros.rocks>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -26,35 +26,30 @@
#include "pff.h"
#include "routing.h"
-#include "pol/link_state.h"
+#include "routing/pol.h"
-struct pol_routing_ops * r_ops;
+struct routing_ops * r_ops;
-int routing_init(enum pol_routing pr)
+int routing_init(struct routing_config * conf,
+ enum pol_pff * pff_type)
{
- enum pol_pff pff_type;
+ void * cfg;
- switch (pr) {
+ switch (conf->pol) {
case ROUTING_LINK_STATE:
- pff_type = PFF_SIMPLE;
- r_ops = &link_state_ops;
- break;
- case ROUTING_LINK_STATE_LFA:
- pff_type = PFF_ALTERNATE;
- r_ops = &link_state_ops;
- break;
- case ROUTING_LINK_STATE_ECMP:
- pff_type=PFF_MULTIPATH;
r_ops = &link_state_ops;
+ cfg = &conf->ls;
break;
default:
return -ENOTSUP;
}
- if (r_ops->init(pr))
- return -1;
+ return r_ops->init(cfg, pff_type);
+}
- return pff_type;
+int routing_start(void)
+{
+ return r_ops->start();
}
struct routing_i * routing_i_create(struct pff * pff)
@@ -67,6 +62,11 @@ void routing_i_destroy(struct routing_i * instance)
return r_ops->routing_i_destroy(instance);
}
+void routing_stop(void)
+{
+ r_ops->stop();
+}
+
void routing_fini(void)
{
r_ops->fini();
diff --git a/src/ipcpd/unicast/routing.h b/src/ipcpd/unicast/routing.h
index 38e875e7..e14960b5 100644
--- a/src/ipcpd/unicast/routing.h
+++ b/src/ipcpd/unicast/routing.h
@@ -1,10 +1,10 @@
/*
- * Ouroboros - Copyright (C) 2016 - 2020
+ * Ouroboros - Copyright (C) 2016 - 2024
*
* Routing component of the IPCP
*
- * Dimitri Staessens <dimitri.staessens@ugent.be>
- * Sander Vrijders <sander.vrijders@ugent.be>
+ * Dimitri Staessens <dimitri@ouroboros.rocks>
+ * Sander Vrijders <sander@ouroboros.rocks>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -30,10 +30,15 @@
#include <stdint.h>
-int routing_init(enum pol_routing pr);
+int routing_init(struct routing_config * conf,
+ enum pol_pff * pff_type);
void routing_fini(void);
+int routing_start(void);
+
+void routing_stop(void);
+
struct routing_i * routing_i_create(struct pff * pff);
void routing_i_destroy(struct routing_i * instance);
diff --git a/src/ipcpd/unicast/pol/graph.c b/src/ipcpd/unicast/routing/graph.c
index ba2ce553..32442dad 100644
--- a/src/ipcpd/unicast/pol/graph.c
+++ b/src/ipcpd/unicast/routing/graph.c
@@ -1,10 +1,10 @@
/*
- * Ouroboros - Copyright (C) 2016 - 2020
+ * Ouroboros - Copyright (C) 2016 - 2024
*
* Undirected graph structure
*
- * Dimitri Staessens <dimitri.staessens@ugent.be>
- * Sander Vrijders <sander.vrijders@ugent.be>
+ * Dimitri Staessens <dimitri@ouroboros.rocks>
+ * Sander Vrijders <sander@ouroboros.rocks>
* Nick Aerts <nick.aerts@ugent.be>
*
* This program is free software; you can redistribute it and/or modify
@@ -57,8 +57,11 @@ struct edge {
};
struct graph {
- size_t nr_vertices;
- struct list_head vertices;
+ struct {
+ struct list_head list;
+ size_t len;
+ } vertices;
+
pthread_mutex_t lock;
};
@@ -67,7 +70,7 @@ static struct edge * find_edge_by_addr(struct vertex * vertex,
{
struct list_head * p;
- assert(vertex);
+ assert(vertex != NULL);
list_for_each(p, &vertex->edges) {
struct edge * e = list_entry(p, struct edge, next);
@@ -85,7 +88,7 @@ static struct vertex * find_vertex_by_addr(struct graph * graph,
assert(graph);
- list_for_each(p, &graph->vertices) {
+ list_for_each(p, &graph->vertices.list) {
struct vertex * e = list_entry(p, struct vertex, next);
if (e->addr == addr)
return e;
@@ -99,8 +102,8 @@ static struct edge * add_edge(struct vertex * vertex,
{
struct edge * edge;
- assert(vertex);
- assert(nb);
+ assert(vertex != NULL);
+ assert(nb != NULL);
edge = malloc(sizeof(*edge));
if (edge == NULL)
@@ -139,7 +142,7 @@ static struct vertex * add_vertex(struct graph * graph,
vertex->addr = addr;
/* Keep them ordered on address. */
- list_for_each(p, &graph->vertices) {
+ list_for_each(p, &graph->vertices.list) {
struct vertex * v = list_entry(p, struct vertex, next);
if (v->addr > addr)
break;
@@ -151,13 +154,13 @@ static struct vertex * add_vertex(struct graph * graph,
list_add_tail(&vertex->next, p);
/* Increase the index of the vertices to the right. */
- list_for_each(p, &graph->vertices) {
+ list_for_each(p, &vertex->next) {
struct vertex * v = list_entry(p, struct vertex, next);
if (v->addr > addr)
v->index++;
}
- graph->nr_vertices++;
+ ++graph->vertices.len;
return vertex;
}
@@ -168,13 +171,13 @@ static void del_vertex(struct graph * graph,
struct list_head * p;
struct list_head * h;
- assert(graph);
- assert(vertex);
+ assert(graph != NULL);
+ assert(vertex != NULL);
list_del(&vertex->next);
/* Decrease the index of the vertices to the right. */
- list_for_each(p, &graph->vertices) {
+ list_for_each(p, &graph->vertices.list) {
struct vertex * v = list_entry(p, struct vertex, next);
if (v->addr > vertex->addr)
v->index--;
@@ -187,7 +190,7 @@ static void del_vertex(struct graph * graph,
free(vertex);
- graph->nr_vertices--;
+ --graph->vertices.len;
}
struct graph * graph_create(void)
@@ -203,8 +206,8 @@ struct graph * graph_create(void)
return NULL;
}
- graph->nr_vertices = 0;
- list_head_init(&graph->vertices);
+ graph->vertices.len = 0;
+ list_head_init(&graph->vertices.list);
return graph;
}
@@ -218,7 +221,7 @@ void graph_destroy(struct graph * graph)
pthread_mutex_lock(&graph->lock);
- list_for_each_safe(p, n, &graph->vertices) {
+ list_for_each_safe(p, n, &graph->vertices.list) {
struct vertex * e = list_entry(p, struct vertex, next);
del_vertex(graph, e);
}
@@ -227,6 +230,8 @@ void graph_destroy(struct graph * graph)
pthread_mutex_destroy(&graph->lock);
+ assert(graph->vertices.len == 0);
+
free(graph);
}
@@ -240,63 +245,35 @@ int graph_update_edge(struct graph * graph,
struct vertex * nb;
struct edge * nb_e;
- assert(graph);
+ assert(graph != NULL);
pthread_mutex_lock(&graph->lock);
v = find_vertex_by_addr(graph, s_addr);
- if (v == NULL) {
- v = add_vertex(graph, s_addr);
- if (v == NULL) {
- pthread_mutex_unlock(&graph->lock);
- log_err("Failed to add vertex.");
- return -ENOMEM;
- }
+ if (v == NULL && ((v = add_vertex(graph, s_addr)) == NULL)) {;
+ log_err("Failed to add src vertex.");
+ goto fail_add_s;
}
nb = find_vertex_by_addr(graph, d_addr);
- if (nb == NULL) {
- nb = add_vertex(graph, d_addr);
- if (nb == NULL) {
- if (list_is_empty(&v->edges))
- del_vertex(graph, v);
- pthread_mutex_unlock(&graph->lock);
- log_err("Failed to add vertex.");
- return -ENOMEM;
- }
+ if (nb == NULL && ((nb = add_vertex(graph, d_addr)) == NULL)) {
+ log_err("Failed to add dst vertex.");
+ goto fail_add_d;
}
e = find_edge_by_addr(v, d_addr);
- if (e == NULL) {
- e = add_edge(v, nb);
- if (e == NULL) {
- if (list_is_empty(&v->edges))
- del_vertex(graph, v);
- if (list_is_empty(&nb->edges))
- del_vertex(graph, nb);
- pthread_mutex_unlock(&graph->lock);
- log_err("Failed to add edge.");
- return -ENOMEM;
- }
+ if (e == NULL && ((e = add_edge(v, nb)) == NULL)) {
+ log_err("Failed to add edge to dst.");
+ goto fail_add_edge_d;
}
e->announced++;
e->qs = qs;
nb_e = find_edge_by_addr(nb, s_addr);
- if (nb_e == NULL) {
- nb_e = add_edge(nb, v);
- if (nb_e == NULL) {
- if (--e->announced == 0)
- del_edge(e);
- if (list_is_empty(&v->edges))
- del_vertex(graph, v);
- if (list_is_empty(&nb->edges))
- del_vertex(graph, nb);
- pthread_mutex_unlock(&graph->lock);
- log_err("Failed to add edge.");
- return -ENOMEM;
- }
+ if (nb_e == NULL && ((nb_e = add_edge(nb, v)) == NULL)) {;
+ log_err("Failed to add edge to src.");
+ goto fail_add_edge_s;
}
nb_e->announced++;
@@ -305,6 +282,19 @@ int graph_update_edge(struct graph * graph,
pthread_mutex_unlock(&graph->lock);
return 0;
+ fail_add_edge_s:
+ if (--e->announced == 0)
+ del_edge(e);
+ fail_add_edge_d:
+ if (list_is_empty(&nb->edges))
+ del_vertex(graph, nb);
+ fail_add_d:
+ if (list_is_empty(&v->edges))
+ del_vertex(graph, v);
+ fail_add_s:
+ pthread_mutex_unlock(&graph->lock);
+ return -ENOMEM;
+
}
int graph_del_edge(struct graph * graph,
@@ -322,30 +312,26 @@ int graph_del_edge(struct graph * graph,
v = find_vertex_by_addr(graph, s_addr);
if (v == NULL) {
- pthread_mutex_unlock(&graph->lock);
- log_err("No such source vertex.");
- return -1;
+ log_err("Failed to find src vertex.");
+ goto fail;
}
nb = find_vertex_by_addr(graph, d_addr);
if (nb == NULL) {
- pthread_mutex_unlock(&graph->lock);
log_err("No such destination vertex.");
- return -1;
+ goto fail;
}
e = find_edge_by_addr(v, d_addr);
if (e == NULL) {
- pthread_mutex_unlock(&graph->lock);
log_err("No such source edge.");
- return -1;
+ goto fail;
}
nb_e = find_edge_by_addr(nb, s_addr);
if (nb_e == NULL) {
- pthread_mutex_unlock(&graph->lock);
log_err("No such destination edge.");
- return -1;
+ goto fail;
}
if (--e->announced == 0)
@@ -362,6 +348,10 @@ int graph_del_edge(struct graph * graph,
pthread_mutex_unlock(&graph->lock);
return 0;
+
+ fail:
+ pthread_mutex_unlock(&graph->lock);
+ return -1;
}
static int get_min_vertex(struct graph * graph,
@@ -381,7 +371,7 @@ static int get_min_vertex(struct graph * graph,
*v = NULL;
- list_for_each(p, &graph->vertices) {
+ list_for_each(p, &graph->vertices.list) {
if (!used[i] && dist[i] < min) {
min = dist[i];
index = i;
@@ -413,24 +403,24 @@ static int dijkstra(struct graph * graph,
assert(nhops);
assert(dist);
- *nhops = malloc(sizeof(**nhops) * graph->nr_vertices);
+ *nhops = malloc(sizeof(**nhops) * graph->vertices.len);
if (*nhops == NULL)
goto fail_pnhops;
- *dist = malloc(sizeof(**dist) * graph->nr_vertices);
+ *dist = malloc(sizeof(**dist) * graph->vertices.len);
if (*dist == NULL)
goto fail_pdist;
- used = malloc(sizeof(*used) * graph->nr_vertices);
+ used = malloc(sizeof(*used) * graph->vertices.len);
if (used == NULL)
goto fail_used;
/* Init the data structures */
- memset(used, 0, sizeof(*used) * graph->nr_vertices);
- memset(*nhops, 0, sizeof(**nhops) * graph->nr_vertices);
- memset(*dist, 0, sizeof(**dist) * graph->nr_vertices);
+ memset(used, 0, sizeof(*used) * graph->vertices.len);
+ memset(*nhops, 0, sizeof(**nhops) * graph->vertices.len);
+ memset(*dist, 0, sizeof(**dist) * graph->vertices.len);
- list_for_each(p, &graph->vertices) {
+ list_for_each(p, &graph->vertices.list) {
v = list_entry(p, struct vertex, next);
(*dist)[i++] = (v->addr == src) ? 0 : INT_MAX;
}
@@ -527,7 +517,7 @@ static int graph_routing_table_simple(struct graph * graph,
assert(dist);
/* We need at least 2 vertices for a table */
- if (graph->nr_vertices < 2)
+ if (graph->vertices.len < 2)
goto fail_vertices;
if (dijkstra(graph, s_addr, &nhops, dist))
@@ -536,7 +526,7 @@ static int graph_routing_table_simple(struct graph * graph,
list_head_init(table);
/* Now construct the routing table from the nhops. */
- list_for_each(p, &graph->vertices) {
+ list_for_each(p, &graph->vertices.list) {
v = list_entry(p, struct vertex, next);
/* This is the src */
@@ -634,7 +624,7 @@ static int graph_routing_table_lfa(struct graph * graph,
addrs[j] = -1;
}
- list_for_each(p, &graph->vertices) {
+ list_for_each(p, &graph->vertices.list) {
v = list_entry(p, struct vertex, next);
if (v->addr != s_addr)
@@ -660,7 +650,7 @@ static int graph_routing_table_lfa(struct graph * graph,
}
/* Loop though all nodes to see if we have a LFA for them. */
- list_for_each(p, &graph->vertices) {
+ list_for_each(p, &graph->vertices.list) {
v = list_entry(p, struct vertex, next);
if (v->addr == s_addr)
@@ -717,14 +707,14 @@ static int graph_routing_table_ecmp(struct graph * graph,
assert(graph);
assert(dist);
- if (graph-> nr_vertices < 2)
+ if (graph->vertices.len < 2)
goto fail_vertices;
- forwarding = malloc(sizeof(*forwarding) * graph->nr_vertices);
+ forwarding = malloc(sizeof(*forwarding) * graph->vertices.len);
if (forwarding == NULL)
goto fail_vertices;
- for (i = 0; i < graph->nr_vertices; ++i)
+ for (i = 0; i < graph->vertices.len; ++i)
list_head_init(&forwarding[i]);
if (dijkstra(graph, s_addr, &nhops, dist))
@@ -745,7 +735,7 @@ static int graph_routing_table_ecmp(struct graph * graph,
free(nhops);
- list_for_each(h, &graph->vertices) {
+ list_for_each(h, &graph->vertices.list) {
v = list_entry(h, struct vertex, next);
if (tmp_dist[v->index] + 1 == (*dist)[v->index]) {
n = malloc(sizeof(*n));
@@ -763,7 +753,7 @@ static int graph_routing_table_ecmp(struct graph * graph,
list_head_init(table);
i = 0;
- list_for_each(p, &graph->vertices) {
+ list_for_each(p, &graph->vertices.list) {
v = list_entry(p, struct vertex, next);
if (v->addr == s_addr) {
++i;
@@ -834,7 +824,7 @@ int graph_routing_table(struct graph * graph,
break;
default:
log_err("Unsupported algorithm.");
- goto fail_algo;
+ goto fail_table;
}
pthread_mutex_unlock(&graph->lock);
@@ -843,8 +833,6 @@ int graph_routing_table(struct graph * graph,
return 0;
- fail_algo:
- free(s_dist);
fail_table:
pthread_mutex_unlock(&graph->lock);
return -1;
diff --git a/src/ipcpd/unicast/pol/graph.h b/src/ipcpd/unicast/routing/graph.h
index 473a5163..8190cc6c 100644
--- a/src/ipcpd/unicast/pol/graph.h
+++ b/src/ipcpd/unicast/routing/graph.h
@@ -1,10 +1,10 @@
/*
- * Ouroboros - Copyright (C) 2016 - 2020
+ * Ouroboros - Copyright (C) 2016 - 2024
*
* Undirected graph structure
*
- * Dimitri Staessens <dimitri.staessens@ugent.be>
- * Sander Vrijders <sander.vrijders@ugent.be>
+ * Dimitri Staessens <dimitri@ouroboros.rocks>
+ * Sander Vrijders <sander@ouroboros.rocks>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
diff --git a/src/ipcpd/unicast/pol/link_state.c b/src/ipcpd/unicast/routing/link-state.c
index d9482876..e5edf539 100644
--- a/src/ipcpd/unicast/pol/link_state.c
+++ b/src/ipcpd/unicast/routing/link-state.c
@@ -1,10 +1,10 @@
/*
- * Ouroboros - Copyright (C) 2016 - 2020
+ * Ouroboros - Copyright (C) 2016 - 2024
*
* Link state routing policy
*
- * Dimitri Staessens <dimitri.staessens@ugent.be>
- * Sander Vrijders <sander.vrijders@ugent.be>
+ * Dimitri Staessens <dimitri@ouroboros.rocks>
+ * Sander Vrijders <sander@ouroboros.rocks>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -38,25 +38,23 @@
#include <ouroboros/list.h>
#include <ouroboros/logs.h>
#include <ouroboros/notifier.h>
+#include <ouroboros/pthread.h>
#include <ouroboros/rib.h>
#include <ouroboros/utils.h>
-#include "comp.h"
-#include "connmgr.h"
+#include "addr-auth.h"
+#include "common/comp.h"
+#include "common/connmgr.h"
#include "graph.h"
#include "ipcp.h"
-#include "link_state.h"
+#include "link-state.h"
#include "pff.h"
#include <assert.h>
#include <stdlib.h>
#include <inttypes.h>
#include <string.h>
-#include <pthread.h>
-#define RECALC_TIME 4
-#define LS_UPDATE_TIME 15
-#define LS_TIMEO 60
#define LS_ENTRY_SIZE 104
#define LSDB "lsdb"
@@ -64,6 +62,12 @@
#define CLOCK_REALTIME_COARSE CLOCK_REALTIME
#endif
+#define LINK_FMT ADDR_FMT32 "--" ADDR_FMT32
+#define LINK_VAL(src, dst) ADDR_VAL32(&src), ADDR_VAL32(&dst)
+
+#define LSU_FMT "LSU ["ADDR_FMT32 " -- " ADDR_FMT32 " seq: %09" PRIu64 "]"
+#define LSU_VAL(src, dst, seqno) ADDR_VAL32(&src), ADDR_VAL32(&dst), seqno
+
struct lsa {
uint64_t d_addr;
uint64_t s_addr;
@@ -106,30 +110,45 @@ struct nb {
};
struct {
- struct list_head nbs;
- size_t nbs_len;
+ uint64_t addr;
+
+ enum routing_algo routing_algo;
+
+ struct ls_config conf;
+
fset_t * mgmt_set;
- struct list_head db;
- size_t db_len;
+ struct graph * graph;
+
+ struct {
+ struct {
+ struct list_head list;
+ size_t len;
+ } nbs;
- pthread_rwlock_t db_lock;
+ struct {
+ struct list_head list;
+ size_t len;
+ } db;
- struct graph * graph;
+ pthread_rwlock_t lock;
+ };
+
+ struct {
+ struct list_head list;
+ pthread_mutex_t mtx;
+ } instances;
pthread_t lsupdate;
pthread_t lsreader;
pthread_t listener;
-
- struct list_head routing_instances;
- pthread_mutex_t routing_i_lock;
-
- enum routing_algo routing_algo;
} ls;
-struct pol_routing_ops link_state_ops = {
- .init = link_state_init,
+struct routing_ops link_state_ops = {
+ .init = (int (*)(void *, enum pol_pff *)) link_state_init,
.fini = link_state_fini,
+ .start = link_state_start,
+ .stop = link_state_stop,
.routing_i_create = link_state_routing_i_create,
.routing_i_destroy = link_state_routing_i_destroy
};
@@ -138,7 +157,7 @@ static int str_adj(struct adjacency * adj,
char * buf,
size_t len)
{
- char tmbuf[64];
+ char tmstr[RIB_TM_STRLEN];
char srcbuf[64];
char dstbuf[64];
char seqnobuf[64];
@@ -149,15 +168,16 @@ static int str_adj(struct adjacency * adj,
if (len < LS_ENTRY_SIZE)
return -1;
- tm = localtime(&adj->stamp);
- strftime(tmbuf, sizeof(tmbuf), "%F %T", tm); /* 19 chars */
+ tm = gmtime(&adj->stamp);
+ strftime(tmstr, sizeof(tmstr), RIB_TM_FORMAT, tm);
- sprintf(srcbuf, "%" PRIu64, adj->src);
- sprintf(dstbuf, "%" PRIu64, adj->dst);
+ sprintf(srcbuf, ADDR_FMT32, ADDR_VAL32(&adj->src));
+ sprintf(dstbuf, ADDR_FMT32, ADDR_VAL32(&adj->dst));
sprintf(seqnobuf, "%" PRIu64, adj->seqno);
- sprintf(buf, "src: %20s\ndst: %20s\nseqno: %18s\nupd: %20s\n",
- srcbuf, dstbuf, seqnobuf, tmbuf);
+ sprintf(buf, "src: %20s\ndst: %20s\nseqno: %18s\n"
+ "upd: %s\n",
+ srcbuf, dstbuf, seqnobuf, tmstr);
return LS_ENTRY_SIZE;
}
@@ -169,9 +189,9 @@ static struct adjacency * get_adj(const char * path)
assert(path);
- list_for_each(p, &ls.db) {
+ list_for_each(p, &ls.db.list) {
struct adjacency * a = list_entry(p, struct adjacency, next);
- sprintf(entry, "%" PRIu64 ".%" PRIu64, a->src, a->dst);
+ sprintf(entry, LINK_FMT, LINK_VAL(a->src, a->dst));
if (strcmp(entry, path) == 0)
return a;
}
@@ -179,53 +199,56 @@ static struct adjacency * get_adj(const char * path)
return NULL;
}
-static int lsdb_getattr(const char * path,
- struct stat * st)
+static int lsdb_rib_getattr(const char * path,
+ struct rib_attr * attr)
{
struct adjacency * adj;
struct timespec now;
+ char * entry;
assert(path);
- assert(st);
+ assert(attr);
+
+ entry = strstr(path, RIB_SEPARATOR) + 1;
+ assert(entry);
clock_gettime(CLOCK_REALTIME_COARSE, &now);
- pthread_rwlock_rdlock(&ls.db_lock);
+ pthread_rwlock_rdlock(&ls.lock);
- adj = get_adj(path);
+ adj = get_adj(entry);
if (adj != NULL) {
- st->st_mtime = adj->stamp;
- st->st_size = LS_ENTRY_SIZE;
+ attr->mtime = adj->stamp;
+ attr->size = LS_ENTRY_SIZE;
} else {
- st->st_mtime = now.tv_sec;
- st->st_size = 0;
+ attr->mtime = now.tv_sec;
+ attr->size = 0;
}
- st->st_mode = S_IFREG | 0755;
- st->st_nlink = 1;
- st->st_uid = getuid();
- st->st_gid = getgid();
-
- pthread_rwlock_unlock(&ls.db_lock);
+ pthread_rwlock_unlock(&ls.lock);
return 0;
}
-static int lsdb_read(const char * path,
- char * buf,
- size_t len)
+static int lsdb_rib_read(const char * path,
+ char * buf,
+ size_t len)
{
struct adjacency * a;
+ char * entry;
int size;
assert(path);
- pthread_rwlock_rdlock(&ls.db_lock);
+ entry = strstr(path, RIB_SEPARATOR) + 1;
+ assert(entry);
- if (ls.db_len + ls.nbs_len == 0)
+ pthread_rwlock_rdlock(&ls.lock);
+
+ if (ls.db.len + ls.nbs.len == 0)
goto fail;
- a = get_adj(path);
+ a = get_adj(entry);
if (a == NULL)
goto fail;
@@ -233,80 +256,72 @@ static int lsdb_read(const char * path,
if (size < 0)
goto fail;
- pthread_rwlock_unlock(&ls.db_lock);
+ pthread_rwlock_unlock(&ls.lock);
return size;
fail:
- pthread_rwlock_unlock(&ls.db_lock);
+ pthread_rwlock_unlock(&ls.lock);
return -1;
}
-static int lsdb_readdir(char *** buf)
+static int lsdb_rib_readdir(char *** buf)
{
struct list_head * p;
char entry[RIB_PATH_LEN + 1];
ssize_t idx = 0;
- assert(buf);
+ assert(buf != NULL);
- pthread_rwlock_rdlock(&ls.db_lock);
+ pthread_rwlock_rdlock(&ls.lock);
- if (ls.db_len + ls.nbs_len == 0) {
- pthread_rwlock_unlock(&ls.db_lock);
- return 0;
+ if (ls.db.len + ls.nbs.len == 0) {
+ *buf = NULL;
+ goto no_entries;
}
- *buf = malloc(sizeof(**buf) * (ls.db_len + ls.nbs_len));
- if (*buf == NULL) {
- pthread_rwlock_unlock(&ls.db_lock);
- return -ENOMEM;
- }
- list_for_each(p, &ls.nbs) {
+ *buf = malloc(sizeof(**buf) * (ls.db.len + ls.nbs.len));
+ if (*buf == NULL)
+ goto fail_entries;
+
+ list_for_each(p, &ls.nbs.list) {
struct nb * nb = list_entry(p, struct nb, next);
- char * str = (nb->type == NB_DT ? "dt." : "mgmt.");
- sprintf(entry, "%s%" PRIu64, str, nb->addr);
+ char * str = (nb->type == NB_DT ? ".dt " : ".mgmt ");
+ sprintf(entry, "%s" ADDR_FMT32 , str, ADDR_VAL32(&nb->addr));
(*buf)[idx] = malloc(strlen(entry) + 1);
- if ((*buf)[idx] == NULL) {
- while (idx-- > 0)
- free((*buf)[idx]);
- free(buf);
- pthread_rwlock_unlock(&ls.db_lock);
- return -ENOMEM;
- }
+ if ((*buf)[idx] == NULL)
+ goto fail_entry;
- strcpy((*buf)[idx], entry);
-
- idx++;
+ strcpy((*buf)[idx++], entry);
}
- list_for_each(p, &ls.db) {
+ list_for_each(p, &ls.db.list) {
struct adjacency * a = list_entry(p, struct adjacency, next);
- sprintf(entry, "%" PRIu64 ".%" PRIu64, a->src, a->dst);
+ sprintf(entry, LINK_FMT, LINK_VAL(a->src, a->dst));
(*buf)[idx] = malloc(strlen(entry) + 1);
- if ((*buf)[idx] == NULL) {
- ssize_t j;
- for (j = 0; j < idx; ++j)
- free(*buf[j]);
- free(buf);
- pthread_rwlock_unlock(&ls.db_lock);
- return -ENOMEM;
- }
+ if ((*buf)[idx] == NULL)
+ goto fail_entry;
- strcpy((*buf)[idx], entry);
-
- idx++;
+ strcpy((*buf)[idx++], entry);
}
-
- pthread_rwlock_unlock(&ls.db_lock);
+ no_entries:
+ pthread_rwlock_unlock(&ls.lock);
return idx;
+
+ fail_entry:
+ while (idx-- > 0)
+ free((*buf)[idx]);
+ free(*buf);
+ fail_entries:
+ pthread_rwlock_unlock(&ls.lock);
+ return -ENOMEM;
}
static struct rib_ops r_ops = {
- .read = lsdb_read,
- .readdir = lsdb_readdir,
- .getattr = lsdb_getattr
+ .read = lsdb_rib_read,
+ .readdir = lsdb_rib_readdir,
+ .getattr = lsdb_rib_getattr
};
static int lsdb_add_nb(uint64_t addr,
@@ -316,28 +331,28 @@ static int lsdb_add_nb(uint64_t addr,
struct list_head * p;
struct nb * nb;
- pthread_rwlock_wrlock(&ls.db_lock);
+ pthread_rwlock_wrlock(&ls.lock);
- list_for_each(p, &ls.nbs) {
+ list_for_each(p, &ls.nbs.list) {
struct nb * el = list_entry(p, struct nb, next);
- if (el->addr == addr && el->type == type) {
- log_dbg("Already know %s neighbor %" PRIu64 ".",
- type == NB_DT ? "dt" : "mgmt", addr);
- if (el->fd != fd) {
- log_warn("Existing neighbor assigned new fd.");
- el->fd = fd;
- }
- pthread_rwlock_unlock(&ls.db_lock);
- return -EPERM;
- }
-
if (addr > el->addr)
break;
+ if (el->addr != addr || el->type != type)
+ continue;
+
+ log_dbg("Already know %s neighbor " ADDR_FMT32 ".",
+ type == NB_DT ? "dt" : "mgmt", ADDR_VAL32(&addr));
+ if (el->fd != fd) {
+ log_warn("Existing neighbor assigned new fd.");
+ el->fd = fd;
+ }
+ pthread_rwlock_unlock(&ls.lock);
+ return -EPERM;
}
nb = malloc(sizeof(*nb));
if (nb == NULL) {
- pthread_rwlock_unlock(&ls.db_lock);
+ pthread_rwlock_unlock(&ls.lock);
return -ENOMEM;
}
@@ -347,12 +362,12 @@ static int lsdb_add_nb(uint64_t addr,
list_add_tail(&nb->next, p);
- ++ls.nbs_len;
+ ++ls.nbs.len;
- log_dbg("Type %s neighbor %" PRIu64 " added.",
- nb->type == NB_DT ? "dt" : "mgmt", addr);
+ log_dbg("Type %s neighbor " ADDR_FMT32 " added.",
+ nb->type == NB_DT ? "dt" : "mgmt", ADDR_VAL32(&addr));
- pthread_rwlock_unlock(&ls.db_lock);
+ pthread_rwlock_unlock(&ls.lock);
return 0;
}
@@ -363,22 +378,23 @@ static int lsdb_del_nb(uint64_t addr,
struct list_head * p;
struct list_head * h;
- pthread_rwlock_wrlock(&ls.db_lock);
+ pthread_rwlock_wrlock(&ls.lock);
- list_for_each_safe(p, h, &ls.nbs) {
+ list_for_each_safe(p, h, &ls.nbs.list) {
struct nb * nb = list_entry(p, struct nb, next);
- if (nb->addr == addr && nb->fd == fd) {
- list_del(&nb->next);
- --ls.nbs_len;
- pthread_rwlock_unlock(&ls.db_lock);
- log_dbg("Type %s neighbor %" PRIu64 " deleted.",
- nb->type == NB_DT ? "dt" : "mgmt", addr);
- free(nb);
- return 0;
- }
+ if (nb->addr != addr || nb->fd != fd)
+ continue;
+
+ list_del(&nb->next);
+ --ls.nbs.len;
+ pthread_rwlock_unlock(&ls.lock);
+ log_dbg("Type %s neighbor " ADDR_FMT32 " deleted.",
+ nb->type == NB_DT ? "dt" : "mgmt", ADDR_VAL32(&addr));
+ free(nb);
+ return 0;
}
- pthread_rwlock_unlock(&ls.db_lock);
+ pthread_rwlock_unlock(&ls.lock);
return -EPERM;
}
@@ -388,18 +404,18 @@ static int nbr_to_fd(uint64_t addr)
struct list_head * p;
int fd;
- pthread_rwlock_rdlock(&ls.db_lock);
+ pthread_rwlock_rdlock(&ls.lock);
- list_for_each(p, &ls.nbs) {
+ list_for_each(p, &ls.nbs.list) {
struct nb * nb = list_entry(p, struct nb, next);
if (nb->addr == addr && nb->type == NB_DT) {
fd = nb->fd;
- pthread_rwlock_unlock(&ls.db_lock);
+ pthread_rwlock_unlock(&ls.lock);
return fd;
}
}
- pthread_rwlock_unlock(&ls.db_lock);
+ pthread_rwlock_unlock(&ls.lock);
return -1;
}
@@ -414,8 +430,7 @@ static void calculate_pff(struct routing_i * instance)
assert(instance);
- if (graph_routing_table(ls.graph, ls.routing_algo,
- ipcpi.dt_addr, &table))
+ if (graph_routing_table(ls.graph, ls.routing_algo, ls.addr, &table))
return;
pff_lock(instance->pff);
@@ -450,8 +465,8 @@ static void set_pff_modified(bool calc)
{
struct list_head * p;
- pthread_mutex_lock(&ls.routing_i_lock);
- list_for_each(p, &ls.routing_instances) {
+ pthread_mutex_lock(&ls.instances.mtx);
+ list_for_each(p, &ls.instances.list) {
struct routing_i * inst =
list_entry(p, struct routing_i, next);
pthread_mutex_lock(&inst->lock);
@@ -460,7 +475,7 @@ static void set_pff_modified(bool calc)
if (calc)
calculate_pff(inst);
}
- pthread_mutex_unlock(&ls.routing_i_lock);
+ pthread_mutex_unlock(&ls.instances.mtx);
}
static int lsdb_add_link(uint64_t src,
@@ -477,9 +492,9 @@ static int lsdb_add_link(uint64_t src,
clock_gettime(CLOCK_REALTIME_COARSE, &now);
- pthread_rwlock_wrlock(&ls.db_lock);
+ pthread_rwlock_wrlock(&ls.lock);
- list_for_each(p, &ls.db) {
+ list_for_each(p, &ls.db.list) {
struct adjacency * a = list_entry(p, struct adjacency, next);
if (a->dst == dst && a->src == src) {
if (a->seqno < seqno) {
@@ -487,7 +502,7 @@ static int lsdb_add_link(uint64_t src,
a->seqno = seqno;
ret = 0;
}
- pthread_rwlock_unlock(&ls.db_lock);
+ pthread_rwlock_unlock(&ls.lock);
return ret;
}
@@ -497,7 +512,7 @@ static int lsdb_add_link(uint64_t src,
adj = malloc(sizeof(*adj));
if (adj == NULL) {
- pthread_rwlock_unlock(&ls.db_lock);
+ pthread_rwlock_unlock(&ls.lock);
return -ENOMEM;
}
@@ -508,12 +523,12 @@ static int lsdb_add_link(uint64_t src,
list_add_tail(&adj->next, p);
- ls.db_len++;
+ ls.db.len++;
if (graph_update_edge(ls.graph, src, dst, *qs))
log_warn("Failed to add edge to graph.");
- pthread_rwlock_unlock(&ls.db_lock);
+ pthread_rwlock_unlock(&ls.lock);
set_pff_modified(true);
@@ -526,25 +541,25 @@ static int lsdb_del_link(uint64_t src,
struct list_head * p;
struct list_head * h;
- pthread_rwlock_wrlock(&ls.db_lock);
+ pthread_rwlock_wrlock(&ls.lock);
- list_for_each_safe(p, h, &ls.db) {
+ list_for_each_safe(p, h, &ls.db.list) {
struct adjacency * a = list_entry(p, struct adjacency, next);
if (a->dst == dst && a->src == src) {
list_del(&a->next);
if (graph_del_edge(ls.graph, src, dst))
log_warn("Failed to delete edge from graph.");
- ls.db_len--;
+ ls.db.len--;
- pthread_rwlock_unlock(&ls.db_lock);
+ pthread_rwlock_unlock(&ls.lock);
set_pff_modified(false);
free(a);
return 0;
}
}
- pthread_rwlock_unlock(&ls.db_lock);
+ pthread_rwlock_unlock(&ls.lock);
return -EPERM;
}
@@ -567,7 +582,7 @@ static void * periodic_recalc_pff(void * o)
if (modified)
calculate_pff(inst);
- sleep(RECALC_TIME);
+ sleep(ls.conf.t_recalc);
}
return (void *) 0;
@@ -584,10 +599,20 @@ static void send_lsm(uint64_t src,
lsm.s_addr = hton64(src);
lsm.seqno = hton64(seqno);
- list_for_each(p, &ls.nbs) {
+ list_for_each(p, &ls.nbs.list) {
struct nb * nb = list_entry(p, struct nb, next);
- if (nb->type == NB_MGMT)
- flow_write(nb->fd, &lsm, sizeof(lsm));
+ if (nb->type != NB_MGMT)
+ continue;
+
+ if (flow_write(nb->fd, &lsm, sizeof(lsm)) < 0)
+ log_err("Failed to send LSM to " ADDR_FMT32,
+ ADDR_VAL32(&nb->addr));
+#ifdef DEBUG_PROTO_LS
+ else
+ log_proto(LSU_FMT " --> " ADDR_FMT32,
+ LSU_VAL(src, dst, seqno),
+ ADDR_VAL32(&nb->addr));
+#endif
}
}
@@ -601,9 +626,9 @@ static void lsdb_replicate(int fd)
list_head_init(&copy);
/* Lock the lsdb, copy the lsms and send outside of lock. */
- pthread_rwlock_rdlock(&ls.db_lock);
+ pthread_rwlock_rdlock(&ls.lock);
- list_for_each(p, &ls.db) {
+ list_for_each(p, &ls.db.list) {
struct adjacency * adj;
struct adjacency * cpy;
adj = list_entry(p, struct adjacency, next);
@@ -620,7 +645,7 @@ static void lsdb_replicate(int fd)
list_add_tail(&cpy->next, &copy);
}
- pthread_rwlock_unlock(&ls.db_lock);
+ pthread_rwlock_unlock(&ls.lock);
list_for_each_safe(p, h, &copy) {
struct lsa lsm;
@@ -646,18 +671,17 @@ static void * lsupdate(void * o)
while (true) {
clock_gettime(CLOCK_REALTIME_COARSE, &now);
- pthread_rwlock_wrlock(&ls.db_lock);
+ pthread_rwlock_wrlock(&ls.lock);
- pthread_cleanup_push((void (*) (void *)) pthread_rwlock_unlock,
- (void *) &ls.db_lock);
+ pthread_cleanup_push(__cleanup_rwlock_unlock, &ls.lock);
- list_for_each_safe(p, h, &ls.db) {
+ list_for_each_safe(p, h, &ls.db.list) {
struct adjacency * adj;
adj = list_entry(p, struct adjacency, next);
- if (now.tv_sec - adj->stamp > LS_TIMEO) {
+ if (now.tv_sec > adj->stamp + ls.conf.t_timeo) {
list_del(&adj->next);
- log_dbg("%" PRIu64 " - %" PRIu64" timed out.",
- adj->src, adj->dst);
+ log_dbg(LINK_FMT " timed out.",
+ LINK_VAL(adj->src, adj->dst));
if (graph_del_edge(ls.graph, adj->src,
adj->dst))
log_err("Failed to del edge.");
@@ -665,7 +689,7 @@ static void * lsupdate(void * o)
continue;
}
- if (adj->src == ipcpi.dt_addr) {
+ if (adj->src == ls.addr) {
adj->seqno++;
send_lsm(adj->src, adj->dst, adj->seqno);
adj->stamp = now.tv_sec;
@@ -674,7 +698,7 @@ static void * lsupdate(void * o)
pthread_cleanup_pop(true);
- sleep(LS_UPDATE_TIME);
+ sleep(ls.conf.t_update);
}
return (void *) 0;
@@ -706,30 +730,55 @@ static void forward_lsm(uint8_t * buf,
int in_fd)
{
struct list_head * p;
+#ifdef DEBUG_PROTO_LS
+ struct lsa lsm;
- pthread_rwlock_rdlock(&ls.db_lock);
+ assert(buf);
+ assert(len >= sizeof(struct lsa));
+
+ memcpy(&lsm, buf, sizeof(lsm));
+
+ lsm.s_addr = ntoh64(lsm.s_addr);
+ lsm.d_addr = ntoh64(lsm.d_addr);
+ lsm.seqno = ntoh64(lsm.seqno);
+#endif
+ pthread_rwlock_rdlock(&ls.lock);
- pthread_cleanup_push((void (*))(void *) pthread_rwlock_unlock,
- &ls.db_lock);
+ pthread_cleanup_push(__cleanup_rwlock_unlock, &ls.lock);
- list_for_each(p, &ls.nbs) {
+ list_for_each(p, &ls.nbs.list) {
struct nb * nb = list_entry(p, struct nb, next);
- if (nb->type == NB_MGMT && nb->fd != in_fd)
- flow_write(nb->fd, buf, len);
+ if (nb->type != NB_MGMT || nb->fd == in_fd)
+ continue;
+
+ if (flow_write(nb->fd, buf, len) < 0)
+ log_err("Failed to forward LSM to " ADDR_FMT32,
+ ADDR_VAL32(&nb->addr));
+#ifdef DEBUG_PROTO_LS
+ else
+ log_proto(LSU_FMT " --> " ADDR_FMT32 " [forwarded]",
+ LSU_VAL(lsm.s_addr, lsm.d_addr, lsm.seqno),
+ ADDR_VAL32(&nb->addr));
+#endif
}
pthread_cleanup_pop(true);
}
+static void cleanup_fqueue(void * fq)
+{
+ fqueue_destroy((fqueue_t *) fq);
+}
+
static void * lsreader(void * o)
{
- fqueue_t * fq;
- int ret;
- uint8_t buf[sizeof(struct lsa)];
- int fd;
- qosspec_t qs;
- struct lsa * msg;
- size_t len;
+ fqueue_t * fq;
+ int ret;
+ uint8_t buf[sizeof(struct lsa)];
+ int fd;
+ qosspec_t qs;
+ struct lsa msg;
+ size_t len;
(void) o;
@@ -739,8 +788,7 @@ static void * lsreader(void * o)
if (fq == NULL)
return (void *) -1;
- pthread_cleanup_push((void (*) (void *)) fqueue_destroy,
- (void *) fq);
+ pthread_cleanup_push(cleanup_fqueue, fq);
while (true) {
ret = fevent(ls.mgmt_set, fq, NULL);
@@ -753,15 +801,22 @@ static void * lsreader(void * o)
if (fqueue_type(fq) != FLOW_PKT)
continue;
- len = flow_read(fd, buf, sizeof(*msg));
- if (len <= 0 || len != sizeof(*msg))
+ len = flow_read(fd, buf, sizeof(msg));
+ if (len <= 0 || len != sizeof(msg))
continue;
- msg = (struct lsa *) buf;
-
- if (lsdb_add_link(ntoh64(msg->s_addr),
- ntoh64(msg->d_addr),
- ntoh64(msg->seqno),
+ memcpy(&msg, buf, sizeof(msg));
+ msg.s_addr = ntoh64(msg.s_addr);
+ msg.d_addr = ntoh64(msg.d_addr);
+ msg.seqno = ntoh64(msg.seqno);
+#ifdef DEBUG_PROTO_LS
+ log_proto(LSU_FMT " <-- " ADDR_FMT32,
+ LSU_VAL(msg.s_addr, msg.d_addr, msg.seqno),
+ ADDR_VAL32(&ls.addr));
+#endif
+ if (lsdb_add_link(msg.s_addr,
+ msg.d_addr,
+ msg.seqno,
&qs))
continue;
@@ -782,14 +837,14 @@ static void flow_event(int fd,
log_dbg("Notifying routing instances of flow event.");
- pthread_mutex_lock(&ls.routing_i_lock);
+ pthread_mutex_lock(&ls.instances.mtx);
- list_for_each(p, &ls.routing_instances) {
+ list_for_each(p, &ls.instances.list) {
struct routing_i * ri = list_entry(p, struct routing_i, next);
pff_flow_state_change(ri->pff, fd, up);
}
- pthread_mutex_unlock(&ls.routing_i_lock);
+ pthread_mutex_unlock(&ls.instances.mtx);
}
static void handle_event(void * self,
@@ -811,14 +866,17 @@ static void handle_event(void * self,
switch (event) {
case NOTIFY_DT_CONN_ADD:
- pthread_rwlock_rdlock(&ls.db_lock);
- send_lsm(ipcpi.dt_addr, c->conn_info.addr, 0);
- pthread_rwlock_unlock(&ls.db_lock);
+ pthread_rwlock_rdlock(&ls.lock);
+
+ pthread_cleanup_push(__cleanup_rwlock_unlock, &ls.lock);
+
+ send_lsm(ls.addr, c->conn_info.addr, 0);
+ pthread_cleanup_pop(true);
if (lsdb_add_nb(c->conn_info.addr, c->flow_info.fd, NB_DT))
log_dbg("Failed to add neighbor to LSDB.");
- if (lsdb_add_link(ipcpi.dt_addr, c->conn_info.addr, 0, &qs))
+ if (lsdb_add_link(ls.addr, c->conn_info.addr, 0, &qs))
log_dbg("Failed to add new adjacency to LSDB.");
break;
case NOTIFY_DT_CONN_DEL:
@@ -827,7 +885,7 @@ static void handle_event(void * self,
if (lsdb_del_nb(c->conn_info.addr, c->flow_info.fd))
log_dbg("Failed to delete neighbor from LSDB.");
- if (lsdb_del_link(ipcpi.dt_addr, c->conn_info.addr))
+ if (lsdb_del_link(ls.addr, c->conn_info.addr))
log_dbg("Local link was not in LSDB.");
break;
case NOTIFY_DT_CONN_QOS:
@@ -878,11 +936,11 @@ struct routing_i * link_state_routing_i_create(struct pff * pff)
periodic_recalc_pff, tmp))
goto fail_pthread_create_lsupdate;
- pthread_mutex_lock(&ls.routing_i_lock);
+ pthread_mutex_lock(&ls.instances.mtx);
- list_add(&tmp->next, &ls.routing_instances);
+ list_add(&tmp->next, &ls.instances.list);
- pthread_mutex_unlock(&ls.routing_i_lock);
+ pthread_mutex_unlock(&ls.instances.mtx);
return tmp;
@@ -898,11 +956,11 @@ void link_state_routing_i_destroy(struct routing_i * instance)
{
assert(instance);
- pthread_mutex_lock(&ls.routing_i_lock);
+ pthread_mutex_lock(&ls.instances.mtx);
list_del(&instance->next);
- pthread_mutex_unlock(&ls.routing_i_lock);
+ pthread_mutex_unlock(&ls.instances.mtx);
pthread_cancel(instance->calculator);
@@ -913,96 +971,146 @@ void link_state_routing_i_destroy(struct routing_i * instance)
free(instance);
}
-int link_state_init(enum pol_routing pr)
+int link_state_start(void)
+{
+ if (notifier_reg(handle_event, NULL)) {
+ log_err("Failed to register link-state with notifier.");
+ goto fail_notifier_reg;
+ }
+
+ if (pthread_create(&ls.lsupdate, NULL, lsupdate, NULL)) {
+ log_err("Failed to create lsupdate thread.");
+ goto fail_pthread_create_lsupdate;
+ }
+
+ if (pthread_create(&ls.lsreader, NULL, lsreader, NULL)) {
+ log_err("Failed to create lsreader thread.");
+ goto fail_pthread_create_lsreader;
+ }
+
+ if (pthread_create(&ls.listener, NULL, ls_conn_handle, NULL)) {
+ log_err("Failed to create listener thread.");
+ goto fail_pthread_create_listener;
+ }
+
+ return 0;
+
+ fail_pthread_create_listener:
+ pthread_cancel(ls.lsreader);
+ pthread_join(ls.lsreader, NULL);
+ fail_pthread_create_lsreader:
+ pthread_cancel(ls.lsupdate);
+ pthread_join(ls.lsupdate, NULL);
+ fail_pthread_create_lsupdate:
+ notifier_unreg(handle_event);
+ fail_notifier_reg:
+ return -1;
+}
+
+void link_state_stop(void)
+{
+ pthread_cancel(ls.listener);
+ pthread_cancel(ls.lsreader);
+ pthread_cancel(ls.lsupdate);
+
+ pthread_join(ls.listener, NULL);
+ pthread_join(ls.lsreader, NULL);
+ pthread_join(ls.lsupdate, NULL);
+
+ notifier_unreg(handle_event);
+}
+
+
+int link_state_init(struct ls_config * conf,
+ enum pol_pff * pff_type)
{
struct conn_info info;
+ assert(conf != NULL);
+ assert(pff_type != NULL);
+
memset(&info, 0, sizeof(info));
+ ls.addr = addr_auth_address();
+
strcpy(info.comp_name, LS_COMP);
strcpy(info.protocol, LS_PROTO);
info.pref_version = 1;
info.pref_syntax = PROTO_GPB;
- info.addr = ipcpi.dt_addr;
+ info.addr = ls.addr;
- switch (pr) {
- case ROUTING_LINK_STATE:
- log_dbg("Using link state routing policy.");
+ ls.conf = *conf;
+
+ switch (conf->pol) {
+ case LS_SIMPLE:
+ *pff_type = PFF_SIMPLE;
ls.routing_algo = ROUTING_SIMPLE;
+ log_dbg("Using Link State Routing policy.");
break;
- case ROUTING_LINK_STATE_LFA:
- log_dbg("Using Loop-Free Alternates policy.");
+ case LS_LFA:
ls.routing_algo = ROUTING_LFA;
+ *pff_type = PFF_ALTERNATE;
+ log_dbg("Using Loop-Free Alternates policy.");
break;
- case ROUTING_LINK_STATE_ECMP:
- log_dbg("Using Equal-Cost Multipath policy.");
+ case LS_ECMP:
ls.routing_algo = ROUTING_ECMP;
+ *pff_type = PFF_MULTIPATH;
+ log_dbg("Using Equal-Cost Multipath policy.");
break;
default:
goto fail_graph;
}
+ log_dbg("LS update interval: %ld seconds.", ls.conf.t_update);
+ log_dbg("LS link timeout : %ld seconds.", ls.conf.t_timeo);
+ log_dbg("LS recalc interval: %ld seconds.", ls.conf.t_recalc);
+
ls.graph = graph_create();
if (ls.graph == NULL)
goto fail_graph;
- if (notifier_reg(handle_event, NULL))
- goto fail_notifier_reg;
-
- if (pthread_rwlock_init(&ls.db_lock, NULL))
- goto fail_db_lock_init;
+ if (pthread_rwlock_init(&ls.lock, NULL)) {
+ log_err("Failed to init lock.");
+ goto fail_lock_init;
+ }
- if (pthread_mutex_init(&ls.routing_i_lock, NULL))
+ if (pthread_mutex_init(&ls.instances.mtx, NULL)) {
+ log_err("Failed to init instances mutex.");
goto fail_routing_i_lock_init;
+ }
- if (connmgr_comp_init(COMPID_MGMT, &info))
+ if (connmgr_comp_init(COMPID_MGMT, &info)) {
+ log_err("Failed to init connmgr.");
goto fail_connmgr_comp_init;
+ }
ls.mgmt_set = fset_create();
- if (ls.mgmt_set == NULL)
+ if (ls.mgmt_set == NULL) {
+ log_err("Failed to create fset.");
goto fail_fset_create;
+ }
- list_head_init(&ls.db);
- list_head_init(&ls.nbs);
- list_head_init(&ls.routing_instances);
-
- if (pthread_create(&ls.lsupdate, NULL, lsupdate, NULL))
- goto fail_pthread_create_lsupdate;
-
- if (pthread_create(&ls.lsreader, NULL, lsreader, NULL))
- goto fail_pthread_create_lsreader;
-
- if (pthread_create(&ls.listener, NULL, ls_conn_handle, NULL))
- goto fail_pthread_create_listener;
+ list_head_init(&ls.db.list);
+ list_head_init(&ls.nbs.list);
+ list_head_init(&ls.instances.list);
if (rib_reg(LSDB, &r_ops))
goto fail_rib_reg;
- ls.db_len = 0;
- ls.nbs_len = 0;
+ ls.db.len = 0;
+ ls.nbs.len = 0;
return 0;
fail_rib_reg:
- pthread_cancel(ls.listener);
- pthread_join(ls.listener, NULL);
- fail_pthread_create_listener:
- pthread_cancel(ls.lsreader);
- pthread_join(ls.lsreader, NULL);
- fail_pthread_create_lsreader:
- pthread_cancel(ls.lsupdate);
- pthread_join(ls.lsupdate, NULL);
- fail_pthread_create_lsupdate:
fset_destroy(ls.mgmt_set);
fail_fset_create:
connmgr_comp_fini(COMPID_MGMT);
fail_connmgr_comp_init:
- pthread_mutex_destroy(&ls.routing_i_lock);
+ pthread_mutex_destroy(&ls.instances.mtx);
fail_routing_i_lock_init:
- pthread_rwlock_destroy(&ls.db_lock);
- fail_db_lock_init:
- notifier_unreg(handle_event);
- fail_notifier_reg:
+ pthread_rwlock_destroy(&ls.lock);
+ fail_lock_init:
graph_destroy(ls.graph);
fail_graph:
return -1;
@@ -1015,33 +1123,23 @@ void link_state_fini(void)
rib_unreg(LSDB);
- notifier_unreg(handle_event);
-
- pthread_cancel(ls.listener);
- pthread_cancel(ls.lsreader);
- pthread_cancel(ls.lsupdate);
-
- pthread_join(ls.listener, NULL);
- pthread_join(ls.lsreader, NULL);
- pthread_join(ls.lsupdate, NULL);
-
fset_destroy(ls.mgmt_set);
connmgr_comp_fini(COMPID_MGMT);
graph_destroy(ls.graph);
- pthread_rwlock_wrlock(&ls.db_lock);
+ pthread_rwlock_wrlock(&ls.lock);
- list_for_each_safe(p, h, &ls.db) {
+ list_for_each_safe(p, h, &ls.db.list) {
struct adjacency * a = list_entry(p, struct adjacency, next);
list_del(&a->next);
free(a);
}
- pthread_rwlock_unlock(&ls.db_lock);
+ pthread_rwlock_unlock(&ls.lock);
- pthread_rwlock_destroy(&ls.db_lock);
+ pthread_rwlock_destroy(&ls.lock);
- pthread_mutex_destroy(&ls.routing_i_lock);
+ pthread_mutex_destroy(&ls.instances.mtx);
}
diff --git a/src/ipcpd/unicast/pol/link_state.h b/src/ipcpd/unicast/routing/link-state.h
index 9d4858e1..69eb6781 100644
--- a/src/ipcpd/unicast/pol/link_state.h
+++ b/src/ipcpd/unicast/routing/link-state.h
@@ -1,10 +1,10 @@
/*
- * Ouroboros - Copyright (C) 2016 - 2020
+ * Ouroboros - Copyright (C) 2016 - 2024
*
* Link state routing policy
*
- * Dimitri Staessens <dimitri.staessens@ugent.be>
- * Sander Vrijders <sander.vrijders@ugent.be>
+ * Dimitri Staessens <dimitri@ouroboros.rocks>
+ * Sander Vrijders <sander@ouroboros.rocks>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -26,16 +26,21 @@
#define LS_COMP "Management"
#define LS_PROTO "LSP"
-#include "pol-routing-ops.h"
+#include "ops.h"
-int link_state_init(enum pol_routing pr);
+int link_state_init(struct ls_config * ls,
+ enum pol_pff * pff_type);
void link_state_fini(void);
+int link_state_start(void);
+
+void link_state_stop(void);
+
struct routing_i * link_state_routing_i_create(struct pff * pff);
void link_state_routing_i_destroy(struct routing_i * instance);
-struct pol_routing_ops link_state_ops;
+extern struct routing_ops link_state_ops;
#endif /* OUROBOROS_IPCPD_UNICAST_POL_LINK_STATE_H */
diff --git a/src/ipcpd/unicast/pol-routing-ops.h b/src/ipcpd/unicast/routing/ops.h
index 3fa1d573..4bf75c80 100644
--- a/src/ipcpd/unicast/pol-routing-ops.h
+++ b/src/ipcpd/unicast/routing/ops.h
@@ -1,10 +1,10 @@
/*
- * Ouroboros - Copyright (C) 2016 - 2020
+ * Ouroboros - Copyright (C) 2016 - 2024
*
* Routing policy ops
*
- * Dimitri Staessens <dimitri.staessens@ugent.be>
- * Sander Vrijders <sander.vrijders@ugent.be>
+ * Dimitri Staessens <dimitri@ouroboros.rocks>
+ * Sander Vrijders <sander@ouroboros.rocks>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -20,19 +20,24 @@
* Foundation, Inc., http://www.fsf.org/about/contact/.
*/
-#ifndef OUROBOROS_IPCPD_UNICAST_POL_ROUTING_OPS_H
-#define OUROBOROS_IPCPD_UNICAST_POL_ROUTING_OPS_H
+#ifndef OUROBOROS_IPCPD_UNICAST_ROUTING_OPS_H
+#define OUROBOROS_IPCPD_UNICAST_ROUTING_OPS_H
#include "pff.h"
-struct pol_routing_ops {
- int (* init)(enum pol_routing pr);
+struct routing_ops {
+ int (* init)(void * conf,
+ enum pol_pff * pff_type);
void (* fini)(void);
+ int (* start)(void);
+
+ void (* stop)(void);
+
struct routing_i * (* routing_i_create)(struct pff * pff);
void (* routing_i_destroy)(struct routing_i * instance);
};
-#endif /* OUROBOROS_IPCPD_UNICAST_POL_ROUTING_OPS_H */
+#endif /* OUROBOROS_IPCPD_UNICAST_ROUTING_OPS_H */
diff --git a/src/ipcpd/unicast/routing/pol.h b/src/ipcpd/unicast/routing/pol.h
new file mode 100644
index 00000000..b6a6f150
--- /dev/null
+++ b/src/ipcpd/unicast/routing/pol.h
@@ -0,0 +1,23 @@
+/*
+ * Ouroboros - Copyright (C) 2016 - 2024
+ *
+ * Routing policies
+ *
+ * Dimitri Staessens <dimitri@ouroboros.rocks>
+ * Sander Vrijders <sander@ouroboros.rocks>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., http://www.fsf.org/about/contact/.
+ */
+
+#include "link-state.h"
diff --git a/src/ipcpd/unicast/pol/tests/CMakeLists.txt b/src/ipcpd/unicast/routing/tests/CMakeLists.txt
index 34d80e8d..9d24bf03 100644
--- a/src/ipcpd/unicast/pol/tests/CMakeLists.txt
+++ b/src/ipcpd/unicast/routing/tests/CMakeLists.txt
@@ -18,7 +18,6 @@ get_filename_component(PARENT_DIR ${PARENT_PATH} NAME)
create_test_sourcelist(${PARENT_DIR}_tests test_suite.c
# Add new tests here
graph_test.c
- pft_test.c
)
add_executable(${PARENT_DIR}_test EXCLUDE_FROM_ALL ${${PARENT_DIR}_tests})
@@ -27,7 +26,11 @@ target_link_libraries(${PARENT_DIR}_test ouroboros-common)
add_dependencies(check ${PARENT_DIR}_test)
set(tests_to_run ${${PARENT_DIR}_tests})
-remove(tests_to_run test_suite.c)
+if(CMAKE_VERSION VERSION_LESS "3.29.0")
+ remove(tests_to_run test_suite.c)
+else ()
+ list(POP_FRONT tests_to_run)
+endif()
foreach (test ${tests_to_run})
get_filename_component(test_name ${test} NAME_WE)
diff --git a/src/ipcpd/unicast/pol/tests/graph_test.c b/src/ipcpd/unicast/routing/tests/graph_test.c
index ea4c0e59..d805640c 100644
--- a/src/ipcpd/unicast/pol/tests/graph_test.c
+++ b/src/ipcpd/unicast/routing/tests/graph_test.c
@@ -1,10 +1,10 @@
/*
- * Ouroboros - Copyright (C) 2016 - 2020
+ * Ouroboros - Copyright (C) 2016 - 2024
*
* Test of the graph structure
*
- * Dimitri Staessens <dimitri.staessens@ugent.be>
- * Sander Vrijders <sander.vrijders@ugent.be>
+ * Dimitri Staessens <dimitri@ouroboros.rocks>
+ * Sander Vrijders <sander@ouroboros.rocks>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
diff --git a/src/ipcpd/unicast/tests/dht_test.c b/src/ipcpd/unicast/tests/dht_test.c
deleted file mode 100644
index 21ecd564..00000000
--- a/src/ipcpd/unicast/tests/dht_test.c
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- * Ouroboros - Copyright (C) 2016 - 2020
- *
- * Unit tests of the DHT
- *
- * Dimitri Staessens <dimitri.staessens@ugent.be>
- * Sander Vrijders <sander.vrijders@ugent.be>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., http://www.fsf.org/about/contact/.
- */
-
-#define __DHT_TEST__
-
-#include "dht.c"
-
-#include <pthread.h>
-#include <time.h>
-#include <stdlib.h>
-#include <stdio.h>
-
-#define KEY_LEN 32
-
-#define EXP 86400
-#define CONTACTS 1000
-
-int dht_test(int argc,
- char ** argv)
-{
- struct dht * dht;
- uint64_t addr = 0x0D1F;
- uint8_t key[KEY_LEN];
- size_t i;
-
- (void) argc;
- (void) argv;
-
- dht = dht_create(addr);
- if (dht == NULL) {
- printf("Failed to create dht.\n");
- return -1;
- }
-
- dht_destroy(dht);
-
- dht = dht_create(addr);
- if (dht == NULL) {
- printf("Failed to re-create dht.\n");
- return -1;
- }
-
- if (dht_bootstrap(dht, KEY_LEN, EXP)) {
- printf("Failed to bootstrap dht.\n");
- dht_destroy(dht);
- return -1;
- }
-
- dht_destroy(dht);
-
- dht = dht_create(addr);
- if (dht == NULL) {
- printf("Failed to re-create dht.\n");
- return -1;
- }
-
- if (dht_bootstrap(dht, KEY_LEN, EXP)) {
- printf("Failed to bootstrap dht.\n");
- dht_destroy(dht);
- return -1;
- }
-
- for (i = 0; i < CONTACTS; ++i) {
- uint64_t addr;
- random_buffer(&addr, sizeof(addr));
- random_buffer(key, KEY_LEN);
- pthread_rwlock_wrlock(&dht->lock);
- if (dht_update_bucket(dht, key, addr)) {
- pthread_rwlock_unlock(&dht->lock);
- printf("Failed to update bucket.\n");
- dht_destroy(dht);
- return -1;
- }
- pthread_rwlock_unlock(&dht->lock);
- }
-
- dht_destroy(dht);
-
- return 0;
-}