diff options
Diffstat (limited to 'src/ipcpd')
| -rw-r--r-- | src/ipcpd/CMakeLists.txt | 10 | ||||
| -rw-r--r-- | src/ipcpd/broadcast/CMakeLists.txt | 5 | ||||
| -rw-r--r-- | src/ipcpd/broadcast/connmgr.c | 2 | ||||
| -rw-r--r-- | src/ipcpd/broadcast/dt.c | 23 | ||||
| -rw-r--r-- | src/ipcpd/broadcast/dt.h | 2 | ||||
| -rw-r--r-- | src/ipcpd/broadcast/main.c | 172 | ||||
| -rw-r--r-- | src/ipcpd/common/comp.h | 4 | ||||
| -rw-r--r-- | src/ipcpd/common/connmgr.c | 202 | ||||
| -rw-r--r-- | src/ipcpd/common/connmgr.h | 4 | ||||
| -rw-r--r-- | src/ipcpd/common/enroll.c | 392 | ||||
| -rw-r--r-- | src/ipcpd/common/enroll.h | 10 | ||||
| -rw-r--r-- | src/ipcpd/config.h.in | 22 | ||||
| -rw-r--r-- | src/ipcpd/eth/CMakeLists.txt | 10 | ||||
| -rw-r--r-- | src/ipcpd/eth/dix.c | 2 | ||||
| -rw-r--r-- | src/ipcpd/eth/eth.c | 777 | ||||
| -rw-r--r-- | src/ipcpd/eth/llc.c | 2 | ||||
| -rw-r--r-- | src/ipcpd/ipcp.c | 1198 | ||||
| -rw-r--r-- | src/ipcpd/ipcp.h | 99 | ||||
| -rw-r--r-- | src/ipcpd/local/CMakeLists.txt | 2 | ||||
| -rw-r--r-- | src/ipcpd/local/main.c | 219 | ||||
| -rw-r--r-- | src/ipcpd/shim-data.c | 78 | ||||
| -rw-r--r-- | src/ipcpd/shim-data.h | 21 | ||||
| -rw-r--r-- | src/ipcpd/udp/CMakeLists.txt | 34 | ||||
| -rw-r--r-- | src/ipcpd/udp/udp.c (renamed from src/ipcpd/udp/main.c) | 681 | ||||
| -rw-r--r-- | src/ipcpd/udp/udp4.c | 42 | ||||
| -rw-r--r-- | src/ipcpd/udp/udp6.c | 42 | ||||
| -rw-r--r-- | src/ipcpd/unicast/CMakeLists.txt | 45 | ||||
| -rw-r--r-- | src/ipcpd/unicast/addr-auth.c (renamed from src/ipcpd/unicast/addr_auth.c) | 9 | ||||
| -rw-r--r-- | src/ipcpd/unicast/addr-auth.h (renamed from src/ipcpd/unicast/addr_auth.h) | 10 | ||||
| -rw-r--r-- | src/ipcpd/unicast/addr-auth/flat.c (renamed from src/ipcpd/unicast/pol/flat.c) | 42 | ||||
| -rw-r--r-- | src/ipcpd/unicast/addr-auth/flat.h (renamed from src/ipcpd/unicast/pol/flat.h) | 6 | ||||
| -rw-r--r-- | src/ipcpd/unicast/addr-auth/ops.h (renamed from src/ipcpd/unicast/pol-addr-auth-ops.h) | 10 | ||||
| -rw-r--r-- | src/ipcpd/unicast/addr-auth/pol.h (renamed from src/ipcpd/broadcast/enroll.c) | 7 | ||||
| -rw-r--r-- | src/ipcpd/unicast/ca.c | 9 | ||||
| -rw-r--r-- | src/ipcpd/unicast/ca.h | 2 | ||||
| -rw-r--r-- | src/ipcpd/unicast/ca/mb-ecn.c (renamed from src/ipcpd/unicast/pol/ca-mb-ecn.c) | 10 | ||||
| -rw-r--r-- | src/ipcpd/unicast/ca/mb-ecn.h (renamed from src/ipcpd/unicast/pol/ca-mb-ecn.h) | 6 | ||||
| -rw-r--r-- | src/ipcpd/unicast/ca/nop.c (renamed from src/ipcpd/unicast/pol/ca-nop.c) | 6 | ||||
| -rw-r--r-- | src/ipcpd/unicast/ca/nop.h (renamed from src/ipcpd/unicast/pol/ca-nop.h) | 6 | ||||
| -rw-r--r-- | src/ipcpd/unicast/ca/ops.h (renamed from src/ipcpd/unicast/pol-ca-ops.h) | 10 | ||||
| -rw-r--r-- | src/ipcpd/unicast/ca/pol.h | 24 | ||||
| -rw-r--r-- | src/ipcpd/unicast/connmgr.c | 6 | ||||
| -rw-r--r-- | src/ipcpd/unicast/dht.c | 2842 | ||||
| -rw-r--r-- | src/ipcpd/unicast/dir.c | 74 | ||||
| -rw-r--r-- | src/ipcpd/unicast/dir.h | 11 | ||||
| -rw-r--r-- | src/ipcpd/unicast/dir/dht.c | 4052 | ||||
| -rw-r--r-- | src/ipcpd/unicast/dir/dht.h (renamed from src/ipcpd/unicast/dht.h) | 31 | ||||
| -rw-r--r-- | src/ipcpd/unicast/dir/dht.proto | 58 | ||||
| -rw-r--r-- | src/ipcpd/unicast/dir/ops.h | 42 | ||||
| -rw-r--r-- | src/ipcpd/unicast/dir/pol.h | 23 | ||||
| -rw-r--r-- | src/ipcpd/unicast/dir/tests/CMakeLists.txt | 40 | ||||
| -rw-r--r-- | src/ipcpd/unicast/dir/tests/dht_test.c | 1925 | ||||
| -rw-r--r-- | src/ipcpd/unicast/dt.c | 190 | ||||
| -rw-r--r-- | src/ipcpd/unicast/dt.h | 14 | ||||
| -rw-r--r-- | src/ipcpd/unicast/enroll.c | 3 | ||||
| -rw-r--r-- | src/ipcpd/unicast/fa.c | 437 | ||||
| -rw-r--r-- | src/ipcpd/unicast/fa.h | 18 | ||||
| -rw-r--r-- | src/ipcpd/unicast/kademlia.proto | 45 | ||||
| -rw-r--r-- | src/ipcpd/unicast/main.c | 217 | ||||
| -rw-r--r-- | src/ipcpd/unicast/pff.c | 15 | ||||
| -rw-r--r-- | src/ipcpd/unicast/pff.h | 2 | ||||
| -rw-r--r-- | src/ipcpd/unicast/pff/alternate.c (renamed from src/ipcpd/unicast/pol/alternate_pff.c) | 6 | ||||
| -rw-r--r-- | src/ipcpd/unicast/pff/alternate.h (renamed from src/ipcpd/unicast/pol/alternate_pff.h) | 6 | ||||
| -rw-r--r-- | src/ipcpd/unicast/pff/multipath.c (renamed from src/ipcpd/unicast/pol/multipath_pff.c) | 34 | ||||
| -rw-r--r-- | src/ipcpd/unicast/pff/multipath.h (renamed from src/ipcpd/unicast/pol/multipath_pff.h) | 6 | ||||
| -rw-r--r-- | src/ipcpd/unicast/pff/ops.h (renamed from src/ipcpd/unicast/pol-pff-ops.h) | 10 | ||||
| -rw-r--r-- | src/ipcpd/unicast/pff/pft.c (renamed from src/ipcpd/unicast/pol/pft.c) | 16 | ||||
| -rw-r--r-- | src/ipcpd/unicast/pff/pft.h (renamed from src/ipcpd/unicast/pol/pft.h) | 2 | ||||
| -rw-r--r-- | src/ipcpd/unicast/pff/pol.h | 25 | ||||
| -rw-r--r-- | src/ipcpd/unicast/pff/simple.c (renamed from src/ipcpd/unicast/pol/simple_pff.c) | 6 | ||||
| -rw-r--r-- | src/ipcpd/unicast/pff/simple.h (renamed from src/ipcpd/unicast/pol/simple_pff.h) | 6 | ||||
| -rw-r--r-- | src/ipcpd/unicast/pff/tests/CMakeLists.txt (renamed from src/ipcpd/unicast/tests/CMakeLists.txt) | 13 | ||||
| -rw-r--r-- | src/ipcpd/unicast/pff/tests/pft_test.c (renamed from src/ipcpd/unicast/pol/tests/pft_test.c) | 2 | ||||
| -rw-r--r-- | src/ipcpd/unicast/psched.c | 9 | ||||
| -rw-r--r-- | src/ipcpd/unicast/psched.h | 8 | ||||
| -rw-r--r-- | src/ipcpd/unicast/routing.c | 36 | ||||
| -rw-r--r-- | src/ipcpd/unicast/routing.h | 9 | ||||
| -rw-r--r-- | src/ipcpd/unicast/routing/graph.c (renamed from src/ipcpd/unicast/pol/graph.c) | 158 | ||||
| -rw-r--r-- | src/ipcpd/unicast/routing/graph.h (renamed from src/ipcpd/unicast/pol/graph.h) | 2 | ||||
| -rw-r--r-- | src/ipcpd/unicast/routing/link-state.c (renamed from src/ipcpd/unicast/pol/link_state.c) | 546 | ||||
| -rw-r--r-- | src/ipcpd/unicast/routing/link-state.h (renamed from src/ipcpd/unicast/pol/link_state.h) | 13 | ||||
| -rw-r--r-- | src/ipcpd/unicast/routing/ops.h (renamed from src/ipcpd/unicast/pol-routing-ops.h) | 17 | ||||
| -rw-r--r-- | src/ipcpd/unicast/routing/pol.h | 23 | ||||
| -rw-r--r-- | src/ipcpd/unicast/routing/tests/CMakeLists.txt (renamed from src/ipcpd/unicast/pol/tests/CMakeLists.txt) | 7 | ||||
| -rw-r--r-- | src/ipcpd/unicast/routing/tests/graph_test.c (renamed from src/ipcpd/unicast/pol/tests/graph_test.c) | 2 | ||||
| -rw-r--r-- | src/ipcpd/unicast/tests/dht_test.c | 99 | 
86 files changed, 9528 insertions, 5835 deletions
diff --git a/src/ipcpd/CMakeLists.txt b/src/ipcpd/CMakeLists.txt index 1ce1bc0d..b3b049e3 100644 --- a/src/ipcpd/CMakeLists.txt +++ b/src/ipcpd/CMakeLists.txt @@ -1,3 +1,7 @@ +set(CONNMGR_RCV_TIMEOUT 1000 CACHE STRING +  "Timeout for the connection manager to wait for OCEP info (ms).") +set(IPCP_DEBUG_LOCAL FALSE CACHE BOOL +  "Use PID as address for local debugging")  set(IPCP_QOS_CUBE_BE_PRIO 50 CACHE STRING    "Priority for best effort QoS cube (0-99)")  set(IPCP_QOS_CUBE_VIDEO_PRIO 90 CACHE STRING @@ -12,8 +16,6 @@ set(IPCP_SCHED_THR_MUL 2 CACHE STRING    "Number of scheduler threads per QoS cube")  set(DISABLE_CORE_LOCK TRUE CACHE BOOL    "Disable locking performance threads to a core") -set(IPCP_CONN_WAIT_DIR TRUE CACHE BOOL -  "Check the running state of the directory when adding a dt connection")  set(DHT_ENROLL_SLACK 50 CACHE STRING    "DHT enrollment waiting time (0-999, ms)")  if (CMAKE_SYSTEM_NAME STREQUAL "Linux") @@ -44,6 +46,10 @@ set(IPCP_SOURCES    ${CMAKE_CURRENT_SOURCE_DIR}/shim-data.c    ) +set (COMMON_SOURCES +   ${CMAKE_CURRENT_SOURCE_DIR}/common/enroll.c +  ) +  add_subdirectory(local)  add_subdirectory(eth)  add_subdirectory(udp) diff --git a/src/ipcpd/broadcast/CMakeLists.txt b/src/ipcpd/broadcast/CMakeLists.txt index afcc8696..d85f335e 100644 --- a/src/ipcpd/broadcast/CMakeLists.txt +++ b/src/ipcpd/broadcast/CMakeLists.txt @@ -13,16 +13,17 @@ include_directories(${CMAKE_SOURCE_DIR}/include)  include_directories(${CMAKE_BINARY_DIR}/include)  set(IPCP_BROADCAST_TARGET ipcpd-broadcast CACHE INTERNAL "") +set(IPCP_BROADCAST_MPL 60 CACHE STRING +    "Default maximum packet lifetime for the broadcast IPCP, in seconds")  set(SOURCE_FILES    # Add source files here    connmgr.c    dt.c -  enroll.c    main.c    ) -add_executable(ipcpd-broadcast ${SOURCE_FILES} ${IPCP_SOURCES} +add_executable(ipcpd-broadcast ${SOURCE_FILES} ${IPCP_SOURCES} ${COMMON_SOURCES}    ${LAYER_CONFIG_PROTO_SRCS})  target_link_libraries(ipcpd-broadcast LINK_PUBLIC ouroboros-dev) diff --git a/src/ipcpd/broadcast/connmgr.c b/src/ipcpd/broadcast/connmgr.c index b65f48b0..f297175d 100644 --- a/src/ipcpd/broadcast/connmgr.c +++ b/src/ipcpd/broadcast/connmgr.c @@ -1,5 +1,5 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   *   * Handles connections between components   * diff --git a/src/ipcpd/broadcast/dt.c b/src/ipcpd/broadcast/dt.c index 00476027..938c9085 100644 --- a/src/ipcpd/broadcast/dt.c +++ b/src/ipcpd/broadcast/dt.c @@ -1,5 +1,5 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   *   * Forward loop for broadcast   * @@ -78,15 +78,16 @@ static int dt_add_nb(int fd)          list_for_each(p, &fwd.nbs) {                  struct nb * el = list_entry(p, struct nb, next);                  if (el->fd == fd) { -                        log_dbg("Already know neighbor.");                          pthread_rwlock_unlock(&fwd.nbs_lock); -                        return -EPERM; +                        log_warn("Already know neighbor on fd %d.", fd); +                        return 0;                  }          }          nb = malloc(sizeof(*nb));          if (nb == NULL) {                  pthread_rwlock_unlock(&fwd.nbs_lock); +                log_err("Failed to malloc neighbor struct.");                  return -ENOMEM;          } @@ -96,10 +97,10 @@ static int dt_add_nb(int fd)          ++fwd.nbs_len; -        log_dbg("Neighbor %d added.", fd); -          pthread_rwlock_unlock(&fwd.nbs_lock); +        log_dbg("Neighbor %d added.", fd); +          return 0;  } @@ -124,6 +125,8 @@ static int dt_del_nb(int fd)          pthread_rwlock_unlock(&fwd.nbs_lock); +        log_err("Neighbor not found on fd %d.", fd); +          return -EPERM;  } @@ -191,7 +194,7 @@ static void * dt_reader(void * o)          while (true) {                  ret = fevent(fwd.set, fq, NULL);                  if (ret < 0) { -                        log_warn("Event error: %d.", ret); +                        log_warn("Event warning: %d.", ret);                          continue;                  } @@ -226,13 +229,13 @@ static void handle_event(void *       self,          switch (event) {          case NOTIFY_DT_CONN_ADD: -                if (dt_add_nb(c->flow_info.fd)) -                        log_dbg("Failed to add neighbor."); +                if (dt_add_nb(c->flow_info.fd) < 0) +                        log_err("Failed to add neighbor.");                  fset_add(fwd.set, c->flow_info.fd);                  break;          case NOTIFY_DT_CONN_DEL: -                if (dt_del_nb(c->flow_info.fd)) -                        log_dbg("Failed to delete neighbor."); +                if (dt_del_nb(c->flow_info.fd) < 0) +                        log_err("Failed to delete neighbor.");                  fset_del(fwd.set, c->flow_info.fd);                  break;          default: diff --git a/src/ipcpd/broadcast/dt.h b/src/ipcpd/broadcast/dt.h index 4331bd3e..8d3b83f8 100644 --- a/src/ipcpd/broadcast/dt.h +++ b/src/ipcpd/broadcast/dt.h @@ -1,5 +1,5 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   *   * Forward loop for broadcast   * diff --git a/src/ipcpd/broadcast/main.c b/src/ipcpd/broadcast/main.c index 522d1391..151b38c8 100644 --- a/src/ipcpd/broadcast/main.c +++ b/src/ipcpd/broadcast/main.c @@ -1,5 +1,5 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   *   * Broadcast IPC Process   * @@ -31,14 +31,15 @@  #define OUROBOROS_PREFIX "broadcast-ipcp"  #define THIS_TYPE IPCP_BROADCAST -#include <ouroboros/errno.h> -#include <ouroboros/hash.h>  #include <ouroboros/dev.h> +#include <ouroboros/errno.h>  #include <ouroboros/ipcp-dev.h>  #include <ouroboros/logs.h>  #include <ouroboros/notifier.h> +#include <ouroboros/np1_flow.h> +#include <ouroboros/random.h>  #include <ouroboros/rib.h> -#include <ouroboros/time_utils.h> +#include <ouroboros/time.h>  #include "common/connmgr.h"  #include "common/enroll.h" @@ -52,54 +53,33 @@  #include <assert.h>  #include <inttypes.h> -struct ipcp ipcpi; - -static int initialize_components(const struct ipcp_config * conf) +static int initialize_components(void)  { -        ipcpi.layer_name = strdup(conf->layer_info.layer_name); -        if (ipcpi.layer_name == NULL) { -                log_err("Failed to set layer name."); -                goto fail_layer_name; -        } - -        ipcpi.dir_hash_algo = conf->layer_info.dir_hash_algo; -          assert(ipcp_dir_hash_len() != 0); -        if (dt_init()) { +        if (dt_init() < 0) {                  log_err("Failed to initialize forwarding component."); -                goto fail_dt; +                return -1;          }          ipcp_set_state(IPCP_INIT);          return 0; - - fail_dt: -        free(ipcpi.layer_name); - fail_layer_name: -        return -1;  }  static void finalize_components(void)  {          dt_fini(); - -        free(ipcpi.layer_name);  }  static int start_components(void)  { -        assert(ipcp_get_state() == IPCP_INIT); - -        ipcp_set_state(IPCP_OPERATIONAL); - -        if (enroll_start()) { +        if (enroll_start() < 0) {                  log_err("Failed to start enrollment.");                  goto fail_enroll_start;          } -        if (connmgr_start()) { +        if (connmgr_start() < 0) {                  log_err("Failed to start AP connection manager.");                  goto fail_connmgr_start;          } @@ -115,52 +95,56 @@ static int start_components(void)  static void stop_components(void)  { -        assert(ipcp_get_state() == IPCP_OPERATIONAL || -               ipcp_get_state() == IPCP_SHUTDOWN); -          connmgr_stop();          enroll_stop(); - -        ipcp_set_state(IPCP_INIT);  }  static int broadcast_ipcp_enroll(const char *        dst,                                   struct layer_info * info)  { +        struct ipcp_config * conf;          struct conn conn; +        uint8_t     id[ENROLL_ID_LEN]; -        if (connmgr_alloc(COMPID_ENROLL, dst, NULL, &conn)) { -                log_err("Failed to get connection."); -                goto fail_er_flow; +        if (random_buffer(id, ENROLL_ID_LEN) < 0) { +                log_err("Failed to generate enrollment ID."); +                goto fail_id; +        } + +        log_info_id(id, "Requesting enrollment."); + +        if (connmgr_alloc(COMPID_ENROLL, dst, NULL, &conn) < 0) { +                log_err_id(id, "Failed to get connection."); +                goto fail_id;          }          /* Get boot state from peer. */ -        if (enroll_boot(&conn)) { -                log_err("Failed to get boot information."); +        if (enroll_boot(&conn, id) < 0) { +                log_err_id(id, "Failed to get boot information.");                  goto fail_enroll_boot;          } -        if (initialize_components(enroll_get_conf())) { -                log_err("Failed to initialize IPCP components."); +        conf = enroll_get_conf(); +        *info = conf->layer_info; + +        if (initialize_components() < 0) { +                log_err_id(id, "Failed to initialize components.");                  goto fail_enroll_boot;          } -        if (start_components()) { -                log_err("Failed to start components."); +        if (start_components() < 0) { +                log_err_id(id, "Failed to start components.");                  goto fail_start_comp;          } -        if (enroll_done(&conn, 0)) -                log_warn("Failed to confirm enrollment with peer."); - -        if (connmgr_dealloc(COMPID_ENROLL, &conn)) -                log_warn("Failed to deallocate enrollment flow."); +        if (enroll_ack(&conn, id, 0) < 0) +                log_err_id(id, "Failed to confirm enrollment."); -        log_info("Enrolled with %s.", dst); +        if (connmgr_dealloc(COMPID_ENROLL, &conn) < 0) +                log_warn_id(id, "Failed to dealloc enrollment flow."); -        info->dir_hash_algo = ipcpi.dir_hash_algo; -        strcpy(info->layer_name, ipcpi.layer_name); +        log_info_id(id, "Enrolled with %s.", dst);          return 0; @@ -168,18 +152,19 @@ static int broadcast_ipcp_enroll(const char *        dst,          finalize_components();   fail_enroll_boot:          connmgr_dealloc(COMPID_ENROLL, &conn); - fail_er_flow: + fail_id:          return -1;  } -static int broadcast_ipcp_bootstrap(const struct ipcp_config * conf) +static int broadcast_ipcp_bootstrap(struct ipcp_config * conf)  {          assert(conf);          assert(conf->type == THIS_TYPE); +        assert(conf->layer_info.dir_hash_algo == DIR_HASH_SHA3_256);          enroll_bootstrap(conf); -        if (initialize_components(conf)) { +        if (initialize_components()) {                  log_err("Failed to init IPCP components.");                  goto fail_init;          } @@ -189,8 +174,6 @@ static int broadcast_ipcp_bootstrap(const struct ipcp_config * conf)                  goto fail_start;          } -        log_dbg("Bootstrapped in layer %s.", conf->layer_info.layer_name); -          return 0;   fail_start: @@ -203,40 +186,63 @@ static int name_check(const uint8_t * dst)  {          uint8_t * buf;          size_t    len; -        int       ret; +        int       err; +        char      layer[LAYER_NAME_SIZE + 1]; -        len = hash_len(ipcpi.dir_hash_algo); +        len = ipcp_dir_hash_len();          buf =  malloc(len); -        if (buf == NULL) -                return -ENOMEM; +        if (buf == NULL) { +                log_err("Failed to malloc buffer."); +                err = -ENOMEM; +                goto fail_buf; +        } + +        err = ipcp_get_layer_name(layer); +        if (err < 0) { +                log_err("Failed to get layer name."); +                goto fail_layer; +        } -        str_hash(ipcpi.dir_hash_algo, buf, ipcpi.layer_name); +        str_hash(HASH_SHA3_256, buf, layer); -        ret = memcmp(buf, dst, len); +        if (memcmp(buf, dst, len) < 0) { +                log_err("Hash mismatch for layer %s.", layer); +                err = -ENAME; +                goto fail_layer; +        }          free(buf); -        return ret; +        return 0; + + fail_layer: +        free(buf); + fail_buf: +        return err;  }  static int broadcast_ipcp_join(int             fd, -                               const uint8_t * dst, -                               qosspec_t       qs) +                               const uint8_t * dst)  { +        int         err;          struct conn conn; - -        (void) qs; +        time_t      mpl = IPCP_BROADCAST_MPL; +        buffer_t    data = BUF_INIT;          memset(&conn, 0, sizeof(conn));          conn.flow_info.fd = fd; +        conn.flow_info.qs = qos_np1; -        if (name_check(dst) != 0) -                return -1; +        err = name_check(dst); +        if (err < 0) { +                log_err("Failed to check name."); +                return err; +        }          notifier_event(NOTIFY_DT_CONN_ADD, &conn); -        ipcp_flow_alloc_reply(fd, 0, NULL, 0); +        ipcp_flow_alloc_reply(fd, 0, mpl, &data);          return 0;  } @@ -251,12 +257,11 @@ int broadcast_ipcp_dealloc(int fd)          notifier_event(NOTIFY_DT_CONN_DEL, &conn); -        flow_dealloc(fd); +        ipcp_flow_dealloc(fd);          return 0;  } -  static struct ipcp_ops broadcast_ops = {          .ipcp_bootstrap       = broadcast_ipcp_bootstrap,          .ipcp_enroll          = broadcast_ipcp_enroll, @@ -275,7 +280,7 @@ int main(int    argc,           char * argv[])  {          if (ipcp_init(argc, argv, &broadcast_ops, THIS_TYPE) < 0) { -                log_err("Failed to init IPCP."); +                log_err("Failed to initialize IPCP.");                  goto fail_init;          } @@ -294,24 +299,20 @@ int main(int    argc,                  goto fail_enroll_init;          } -        if (ipcp_boot() < 0) { +        if (ipcp_start() < 0) {                  log_err("Failed to boot IPCP."); -                goto fail_boot; -        } - -        if (ipcp_create_r(0)) { -                log_err("Failed to notify IRMd we are initialized."); -                ipcp_set_state(IPCP_NULL); -                goto fail_create_r; +                goto fail_start;          } -        ipcp_shutdown(); +        ipcp_sigwait();          if (ipcp_get_state() == IPCP_SHUTDOWN) {                  stop_components();                  finalize_components();          } +        ipcp_stop(); +          enroll_fini();          connmgr_fini(); @@ -322,9 +323,7 @@ int main(int    argc,          exit(EXIT_SUCCESS); - fail_create_r: -        ipcp_shutdown(); - fail_boot: + fail_start:          enroll_fini();   fail_enroll_init:          connmgr_fini(); @@ -333,6 +332,5 @@ int main(int    argc,   fail_notifier_init:          ipcp_fini();   fail_init: -        ipcp_create_r(-1);          exit(EXIT_FAILURE);  } diff --git a/src/ipcpd/common/comp.h b/src/ipcpd/common/comp.h index 95e59b24..f3790d9c 100644 --- a/src/ipcpd/common/comp.h +++ b/src/ipcpd/common/comp.h @@ -1,5 +1,5 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   *   * Components for the unicast/broadcast IPC process   * @@ -23,7 +23,7 @@  #ifndef OUROBOROS_IPCPD_COMMON_COMP_H  #define OUROBOROS_IPCPD_COMMON_COMP_H -#include <ouroboros/cacep.h> +#include <ouroboros/cep.h>  #define DST_MAX_STRLEN 64 diff --git a/src/ipcpd/common/connmgr.c b/src/ipcpd/common/connmgr.c index 53a66992..1bb8c932 100644 --- a/src/ipcpd/common/connmgr.c +++ b/src/ipcpd/common/connmgr.c @@ -1,5 +1,5 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   *   * Handles connections between components   * @@ -22,9 +22,10 @@  #define OUROBOROS_PREFIX "connection-manager" +#include <ouroboros/cep.h>  #include <ouroboros/dev.h> -#include <ouroboros/cacep.h>  #include <ouroboros/errno.h> +#include <ouroboros/fccntl.h>  #include <ouroboros/list.h>  #include <ouroboros/logs.h>  #include <ouroboros/notifier.h> @@ -33,15 +34,9 @@  #include "connmgr.h"  #include "ipcp.h" -#include <string.h> -#include <stdlib.h>  #include <assert.h> - -enum connmgr_state { -        CONNMGR_NULL = 0, -        CONNMGR_INIT, -        CONNMGR_RUNNING -}; +#include <stdlib.h> +#include <string.h>  struct conn_el {          struct list_head next; @@ -60,7 +55,6 @@ struct comp {  struct {          struct comp        comps[COMPID_MAX]; -        enum connmgr_state state;          pthread_t          acceptor;  } connmgr; @@ -128,10 +122,12 @@ static int add_comp_conn(enum comp_id       id,  static void * flow_acceptor(void * o)  { -        int               fd; -        qosspec_t         qs; -        struct conn_info  rcv_info; -        struct conn_info  fail_info; +        int              fd; +        qosspec_t        qs; +        struct conn_info rcv_info; +        struct conn_info fail_info; +        struct timespec  timeo = TIMESPEC_INIT_MS(CONNMGR_RCV_TIMEOUT); +        int              err;          (void) o; @@ -143,38 +139,50 @@ static void * flow_acceptor(void * o)                  fd = flow_accept(&qs, NULL);                  if (fd < 0) {                          if (fd != -EIRMD) -                                log_warn("Flow accept failed: %d", fd); +                                log_err("Flow accept failed: %d", fd);                          continue;                  } -                if (cacep_rcv(fd, &rcv_info)) { -                        log_dbg("Error establishing application connection."); +                log_info("Handling incoming flow %d.",fd); + +                fccntl(fd, FLOWSRCVTIMEO, &timeo); + +                err = cep_rcv(fd, &rcv_info); +                if (err < 0) { +                        log_err("Error receiving OCEP info: %d.", err);                          flow_dealloc(fd);                          continue;                  } +                log_info("Request to connect to %s.", rcv_info.comp_name); +                  id = get_id_by_name(rcv_info.comp_name);                  if (id < 0) { -                        log_dbg("Connection request for unknown component %s.", +                        log_err("Connection request for unknown component %s.",                                  rcv_info.comp_name); -                        cacep_snd(fd, &fail_info); +                        cep_snd(fd, &fail_info);                          flow_dealloc(fd);                          continue;                  } -                assert(id < COMPID_MAX); - -                if (cacep_snd(fd, &connmgr.comps[id].info)) { -                        log_dbg("Failed to respond to request."); +                err = cep_snd(fd, &connmgr.comps[id].info); +                if (err < 0) { +                        log_err("Failed responding to OCEP request: %d.", err);                          flow_dealloc(fd);                          continue;                  } -                if (add_comp_conn(id, fd, qs, &rcv_info)) { -                        log_dbg("Failed to add new connection."); +                fccntl(fd, FLOWSRCVTIMEO, NULL); + +                err = add_comp_conn(id, fd, qs, &rcv_info); +                if (err < 0) { +                        log_err("Failed to add new connection: %d.", err);                          flow_dealloc(fd);                          continue;                  } + +                log_info("Finished handling incoming flow %d for %s.", +                         fd, rcv_info.comp_name);          }          return (void *) 0; @@ -213,10 +221,10 @@ static void handle_event(void *       self,  int connmgr_init(void)  { -        connmgr.state = CONNMGR_INIT; - -        if (notifier_reg(handle_event, NULL)) +        if (notifier_reg(handle_event, NULL)) { +                log_err("Failed to register notifier.");                  return -1; +        }          return 0;  } @@ -225,29 +233,26 @@ void connmgr_fini(void)  {          int i; -        notifier_unreg(handle_event); - -        if (connmgr.state == CONNMGR_RUNNING) -                pthread_join(connmgr.acceptor, NULL); -          for (i = 0; i < COMPID_MAX; ++i)                  connmgr_comp_fini(i); + +        notifier_unreg(handle_event);  }  int connmgr_start(void)  { -        if (pthread_create(&connmgr.acceptor, NULL, flow_acceptor, NULL)) +        if (pthread_create(&connmgr.acceptor, NULL, flow_acceptor, NULL)) { +                log_err("Failed to create pthread: %s.", strerror(errno));                  return -1; - -        connmgr.state = CONNMGR_RUNNING; +        }          return 0;  }  void connmgr_stop(void)  { -        if (connmgr.state == CONNMGR_RUNNING) -                pthread_cancel(connmgr.acceptor); +        pthread_cancel(connmgr.acceptor); +        pthread_join(connmgr.acceptor, NULL);  }  int connmgr_comp_init(enum comp_id             id, @@ -259,12 +264,14 @@ int connmgr_comp_init(enum comp_id             id,          comp = connmgr.comps + id; -        if (pthread_mutex_init(&comp->lock, NULL)) -                return -1; +        if (pthread_mutex_init(&comp->lock, NULL)) { +                log_err("Failed to initialize mutex: %s.", strerror(errno)); +                goto fail_mutex; +        }          if (pthread_cond_init(&comp->cond, NULL)) { -                pthread_mutex_destroy(&comp->lock); -                return -1; +                log_err("Failed to initialize condvar: %s.", strerror(errno)); +                goto fail_cond;          }          list_head_init(&comp->conns); @@ -273,6 +280,11 @@ int connmgr_comp_init(enum comp_id             id,          memcpy(&connmgr.comps[id].info, info, sizeof(connmgr.comps[id].info));          return 0; + + fail_cond: +        pthread_mutex_destroy(&comp->lock); + fail_mutex: +        return -1;  }  void connmgr_comp_fini(enum comp_id id) @@ -316,26 +328,32 @@ int connmgr_ipcp_connect(const char * dst,  {          struct conn_el * ce;          int              id; +        int              ret;          assert(dst);          assert(component);          ce = malloc(sizeof(*ce));          if (ce == NULL) { -                log_dbg("Out of memory."); -                return -1; +                log_err("Out of memory."); +                goto fail_malloc;          }          id = get_id_by_name(component);          if (id < 0) { -                log_dbg("No such component: %s", component); -                free(ce); -                return -1; +                log_err("No such component: %s", component); +                goto fail_id;          } -        if (connmgr_alloc(id, dst, &qs, &ce->conn)) { -                free(ce); -                return -1; +        pthread_cleanup_push(free, ce); + +        ret = connmgr_alloc(id, dst, &qs, &ce->conn); + +        pthread_cleanup_pop(false); + +        if (ret < 0) { +                log_err("Failed to allocate flow."); +                goto fail_id;          }          if (strlen(dst) > DST_MAX_STRLEN) { @@ -353,6 +371,11 @@ int connmgr_ipcp_connect(const char * dst,          pthread_mutex_unlock(&connmgr.comps[id].lock);          return 0; + + fail_id: +        free(ce); + fail_malloc: +        return -1;  }  int connmgr_ipcp_disconnect(const char * dst, @@ -366,8 +389,10 @@ int connmgr_ipcp_disconnect(const char * dst,          assert(component);          id = get_id_by_name(component); -        if (id < 0) +        if (id < 0) { +                log_err("No such component: %s.", component);                  return -1; +        }          pthread_mutex_lock(&connmgr.comps[id].lock); @@ -393,62 +418,63 @@ int connmgr_alloc(enum comp_id  id,                    qosspec_t *   qs,                    struct conn * conn)  { +        struct comp *   comp; +        int             fd; +        struct timespec timeo = TIMESPEC_INIT_MS(CONNMGR_RCV_TIMEOUT); +          assert(id >= 0 && id < COMPID_MAX);          assert(dst); -        conn->flow_info.fd = flow_alloc(dst, qs, NULL); -        if (conn->flow_info.fd < 0) { -                log_dbg("Failed to allocate flow to %s.", dst); -                return -1; +        comp = connmgr.comps + id; + +        fd = flow_alloc(dst, qs, NULL); +        if (fd < 0) { +                log_err("Failed to allocate flow to %s.", dst); +                goto fail_alloc;          } +        conn->flow_info.fd = fd; +          if (qs != NULL)                  conn->flow_info.qs = *qs;          else                  memset(&conn->flow_info.qs, 0, sizeof(conn->flow_info.qs)); -        log_dbg("Sending cacep info for protocol %s to fd %d.", -                connmgr.comps[id].info.protocol, conn->flow_info.fd); +        log_dbg("Sending OCEP info for protocol %s to fd %d.", +                comp->info.protocol, conn->flow_info.fd); -        if (cacep_snd(conn->flow_info.fd, &connmgr.comps[id].info)) { -                log_dbg("Failed to create application connection."); -                flow_dealloc(conn->flow_info.fd); -                return -1; +        fccntl(fd, FLOWSRCVTIMEO, &timeo); + +        if (cep_snd(fd, &comp->info)) { +                log_err("Failed to send OCEP info."); +                goto fail_cep;          } -        if (cacep_rcv(conn->flow_info.fd, &conn->conn_info)) { -                log_dbg("Failed to connect to application."); -                flow_dealloc(conn->flow_info.fd); -                return -1; +        if (cep_rcv(fd, &conn->conn_info)) { +                log_err("Failed to receive OCEP info."); +                goto fail_cep;          } -        if (strcmp(connmgr.comps[id].info.protocol, conn->conn_info.protocol)) { -                log_dbg("Unknown protocol (requested %s, got %s).", -                        connmgr.comps[id].info.protocol, -                        conn->conn_info.protocol); -                flow_dealloc(conn->flow_info.fd); -                return -1; +        if (strcmp(comp->info.protocol, conn->conn_info.protocol)) { +                log_err("Unknown protocol (requested %s, got %s).", +                        comp->info.protocol, conn->conn_info.protocol); +                goto fail_cep;          } -        if (connmgr.comps[id].info.pref_version != -            conn->conn_info.pref_version) { -                log_dbg("Unknown protocol version."); -                flow_dealloc(conn->flow_info.fd); -                return -1; +        if (comp->info.pref_version != conn->conn_info.pref_version) { +                log_err("Unknown protocol version %d.", +                        conn->conn_info.pref_version); +                goto fail_cep;          } -        if (connmgr.comps[id].info.pref_syntax != conn->conn_info.pref_syntax) { -                log_dbg("Unknown protocol syntax."); -                flow_dealloc(conn->flow_info.fd); -                return -1; +        if (comp->info.pref_syntax != conn->conn_info.pref_syntax) { +                log_err("Unknown protocol syntax."); +                goto fail_cep;          }          switch (id) {          case COMPID_DT:                  notifier_event(NOTIFY_DT_CONN_ADD, conn); -#if defined(BUILD_IPCP_UNICAST) && defined(IPCP_CONN_WAIT_DIR) -                dir_wait_running(); -#endif                  break;          case COMPID_MGMT:                  notifier_event(NOTIFY_MGMT_CONN_ADD, conn); @@ -458,6 +484,11 @@ int connmgr_alloc(enum comp_id  id,          }          return 0; + + fail_cep: +        flow_dealloc(conn->flow_info.fd); + fail_alloc: +        return -1;  }  int connmgr_dealloc(enum comp_id  id, @@ -467,7 +498,7 @@ int connmgr_dealloc(enum comp_id  id,          case COMPID_DT:                  notifier_event(NOTIFY_DT_CONN_DEL, conn);                  break; -#if defined(BUILD_IPCP_UNICAST) && defined(IPCP_CONN_WAIT_DIR) +#if defined(BUILD_IPCP_UNICAST)          case COMPID_MGMT:                  notifier_event(NOTIFY_MGMT_CONN_DEL, conn);                  break; @@ -503,6 +534,7 @@ int connmgr_wait(enum comp_id  id,          el = list_first_entry((&comp->pending), struct conn_el, next);          if (el == NULL) {                  pthread_mutex_unlock(&comp->lock); +                log_err("Failed to get connection element.");                  return -1;          } diff --git a/src/ipcpd/common/connmgr.h b/src/ipcpd/common/connmgr.h index 5f7b557f..0710dbbf 100644 --- a/src/ipcpd/common/connmgr.h +++ b/src/ipcpd/common/connmgr.h @@ -1,5 +1,5 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   *   * Handles the different AP connections   * @@ -23,7 +23,7 @@  #ifndef OUROBOROS_IPCPD_COMMON_CONNMGR_H  #define OUROBOROS_IPCPD_COMMON_CONNMGR_H -#include <ouroboros/cacep.h> +#include <ouroboros/cep.h>  #include <ouroboros/qos.h>  #include "comp.h" diff --git a/src/ipcpd/common/enroll.c b/src/ipcpd/common/enroll.c index 090067d8..8e5384a5 100644 --- a/src/ipcpd/common/enroll.c +++ b/src/ipcpd/common/enroll.c @@ -1,5 +1,5 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   *   * Enrollment Task   * @@ -28,13 +28,11 @@  #define OUROBOROS_PREFIX "enrollment" -#include <ouroboros/endian.h> -#include <ouroboros/errno.h> -#include <ouroboros/time_utils.h>  #include <ouroboros/dev.h> -#include <ouroboros/logs.h>  #include <ouroboros/errno.h> -#include <ouroboros/sockets.h> +#include <ouroboros/logs.h> +#include <ouroboros/serdes-oep.h> +#include <ouroboros/time.h>  #include "common/connmgr.h"  #include "common/enroll.h" @@ -45,281 +43,232 @@  #include <string.h>  #include <pthread.h> -#include "ipcp_config.pb-c.h" -typedef EnrollMsg enroll_msg_t; +#ifdef __APPLE__ +#define llabs labs +#endif  #define ENROLL_COMP             "Enrollment"  #define ENROLL_PROTO            "OEP" /* Ouroboros enrollment protocol */  #define ENROLL_WARN_TIME_OFFSET 20  #define ENROLL_BUF_LEN          1024 -enum enroll_state { -        ENROLL_NULL = 0, -        ENROLL_INIT, -        ENROLL_RUNNING -};  struct {          struct ipcp_config conf; -        enum enroll_state  state; +          pthread_t          listener;  } enroll; -static int send_rcv_enroll_msg(int fd) +static void * enroll_handle(void * o)  { -        enroll_msg_t    req = ENROLL_MSG__INIT; -        enroll_msg_t *  reply; -        uint8_t         buf[ENROLL_BUF_LEN]; -        ssize_t         len; -        ssize_t         delta_t; -        struct timespec t0; -        struct timespec rtt; - -        req.code = ENROLL_CODE__ENROLL_REQ; - -        len = enroll_msg__get_packed_size(&req); -        if (len < 0) { -                log_dbg("Failed pack request message."); -                return -1; -        } - -        enroll_msg__pack(&req, buf); - -        clock_gettime(CLOCK_REALTIME, &t0); +        struct enroll_req  req; +        struct enroll_resp resp; +        struct enroll_ack  ack; +        struct conn        conn; +        uint8_t             __buf[ENROLL_BUF_LEN]; +        buffer_t           buf; +        ssize_t            len; -        if (flow_write(fd, buf, len) < 0) { -                log_dbg("Failed to send request message."); -                return -1; -        } - -        len = flow_read(fd, buf, ENROLL_BUF_LEN); -        if (len < 0) { -                log_dbg("No enrollment reply received."); -                return -1; -        } - -        log_dbg("Received enrollment info (%zd bytes).", len); +        (void) o; -        reply = enroll_msg__unpack(NULL, len, buf); -        if (reply == NULL) { -                log_dbg("No enrollment response."); -                return -1; -        } +        buf.data = __buf; +        buf.len  = sizeof(__buf); -        if (reply->code != ENROLL_CODE__ENROLL_BOOT) { -                log_dbg("Failed to unpack enrollment response."); -                enroll_msg__free_unpacked(reply, NULL); -                return -1; -        } +        resp.response = 0; +        resp.conf = enroll.conf; -        if (!(reply->has_t_sec && reply->has_t_nsec)) { -                log_dbg("No time in response message."); -                enroll_msg__free_unpacked(reply, NULL); -                return -1; -        } - -        clock_gettime(CLOCK_REALTIME, &rtt); +        while (true) { +                buffer_t msg; +                int      fd; -        delta_t = ts_diff_ms(&t0, &rtt); +                if (connmgr_wait(COMPID_ENROLL, &conn)) { +                        log_err("Failed to get next connection."); +                        continue; +                } -        rtt.tv_sec  = reply->t_sec; -        rtt.tv_nsec = reply->t_nsec; - -        if (labs(ts_diff_ms(&t0, &rtt)) - delta_t > ENROLL_WARN_TIME_OFFSET) -                log_warn("Clock offset above threshold."); - -        strcpy(enroll.conf.layer_info.layer_name, -               reply->conf->layer_info->layer_name); -        enroll.conf.type           = reply->conf->ipcp_type; -#ifdef BUILD_IPCP_UNICAST -        enroll.conf.addr_size      = reply->conf->addr_size; -        enroll.conf.eid_size       = reply->conf->eid_size; -        enroll.conf.max_ttl        = reply->conf->max_ttl; -        enroll.conf.addr_auth_type = reply->conf->addr_auth_type; -        enroll.conf.routing_type   = reply->conf->routing_type; -        enroll.conf.cong_avoid     = reply->conf->cong_avoid; -#endif -        enroll.conf.layer_info.dir_hash_algo -                = reply->conf->layer_info->dir_hash_algo; -        enroll_msg__free_unpacked(reply, NULL); +                fd = conn.flow_info.fd; -        return 0; -} +                log_info("Incoming enrollment connection on flow %d.", fd); -static ssize_t enroll_pack(uint8_t ** buf) -{ -        enroll_msg_t      msg        = ENROLL_MSG__INIT; -        ipcp_config_msg_t config     = IPCP_CONFIG_MSG__INIT; -        layer_info_msg_t  layer_info = LAYER_INFO_MSG__INIT; -        struct timespec   now; -        ssize_t           len; +                len = flow_read(fd, buf.data, buf.len); +                if (len < 0) { +                        log_err("Failed to read from flow %d.", fd); +                        goto finish_flow; +                } -        clock_gettime(CLOCK_REALTIME, &now); - -        msg.code       = ENROLL_CODE__ENROLL_BOOT; -        msg.has_t_sec  = true; -        msg.t_sec      = now.tv_sec; -        msg.has_t_nsec = true; -        msg.t_nsec     = now.tv_nsec; -        msg.conf       = &config; - -        config.ipcp_type          = enroll.conf.type; -#ifdef BUILD_IPCP_UNICAST -        config.has_addr_size      = true; -        config.addr_size          = enroll.conf.addr_size; -        config.has_eid_size       = true; -        config.eid_size           = enroll.conf.eid_size; -        config.has_max_ttl        = true; -        config.max_ttl            = enroll.conf.max_ttl; -        config.has_addr_auth_type = true; -        config.addr_auth_type     = enroll.conf.addr_auth_type; -        config.has_routing_type   = true; -        config.routing_type       = enroll.conf.routing_type; -        config.has_cong_avoid     = true; -        config.cong_avoid         = enroll.conf.cong_avoid; -#endif -        config.layer_info         = &layer_info; +                msg.data = buf.data; +                msg.len = (size_t) len; -        layer_info.layer_name     = (char *) enroll.conf.layer_info.layer_name; -        layer_info.dir_hash_algo  = enroll.conf.layer_info.dir_hash_algo; +                if (enroll_req_des(&req, msg) < 0) { +                        log_err("Failed to unpack request message."); +                        goto finish_flow; +                } -        len = enroll_msg__get_packed_size(&msg); +                log_info_id(req.id, "Handling incoming enrollment."); -        *buf = malloc(len); -        if (*buf == NULL) -                return -1; +                ack.result = -100; -        enroll_msg__pack(&msg, *buf); +                clock_gettime(CLOCK_REALTIME, &resp.t); -        return len; -} +                memcpy(resp.id, req.id, ENROLL_ID_LEN); -static void * enroll_handle(void * o) -{ -        struct conn    conn; -        uint8_t        buf[ENROLL_BUF_LEN]; -        uint8_t *      reply; -        ssize_t        len; -        enroll_msg_t * msg; +                len = enroll_resp_ser(&resp, buf); +                if (len < 0) { +                        log_err_id(req.id, "Failed to pack reply."); +                        goto finish_enroll; +                } -        (void) o; +                log_dbg_id(req.id, "Sending enrollment info (%zd bytes).", len); -        while (true) { -                if (connmgr_wait(COMPID_ENROLL, &conn)) { -                        log_err("Failed to get next connection."); -                        continue; +                if (flow_write(conn.flow_info.fd, buf.data, len) < 0) { +                        log_err_id(req.id, "Failed te send response."); +                        goto finish_enroll;                  } -                len = flow_read(conn.flow_info.fd, buf, ENROLL_BUF_LEN); +                len = flow_read(conn.flow_info.fd, buf.data, buf.len);                  if (len < 0) { -                        log_err("Failed to read from flow."); -                        connmgr_dealloc(COMPID_ENROLL, &conn); -                        continue; +                        log_err_id(req.id, "Failed to read from flow."); +                        goto finish_enroll;                  } -                msg = enroll_msg__unpack(NULL, len, buf); -                if (msg == NULL) { -                        log_err("Failed to unpack message."); -                        connmgr_dealloc(COMPID_ENROLL, &conn); -                        continue; -                } +                msg.data = buf.data; +                msg.len = (size_t) len; -                if (msg->code != ENROLL_CODE__ENROLL_REQ) { -                        log_err("Wrong message type."); -                        connmgr_dealloc(COMPID_ENROLL, &conn); -                        enroll_msg__free_unpacked(msg, NULL); -                        continue; +                if (enroll_ack_des(&ack, msg) < 0) { +                        log_err_id(req.id, "Failed to unpack ack."); +                        goto finish_enroll;                  } -                log_dbg("Enrolling a new neighbor."); - -                enroll_msg__free_unpacked(msg, NULL); - -                len = enroll_pack(&reply); -                if (reply == NULL) { -                        log_err("Failed to pack enrollment message."); -                        connmgr_dealloc(COMPID_ENROLL, &conn); -                        continue; +                if (memcmp(req.id, ack.id, ENROLL_ID_LEN) != 0) +                       log_warn_id(req.id, "Enrollment ID mismatch."); + +         finish_enroll: +                switch(ack.result) { +                case 0: +                        log_info_id(req.id, "Enrollment completed."); +                        break; +                case -100: +                        log_warn_id(req.id, "Enrollment failed."); +                        break; +                default: +                        log_warn_id(req.id, "Enrollment failed at remote.");                  } +         finish_flow: +                connmgr_dealloc(COMPID_ENROLL, &conn); -                log_dbg("Sending enrollment info (%zd bytes).", len); +                log_info("Enrollment flow %d closed.", fd); +        } -                if (flow_write(conn.flow_info.fd, reply, len) < 0) { -                        log_err("Failed respond to enrollment request."); -                        connmgr_dealloc(COMPID_ENROLL, &conn); -                        free(reply); -                        continue; -                } +        return 0; +} -                free(reply); +int enroll_boot(struct conn *   conn, +                const uint8_t * id) +{ +        uint8_t            __buf[ENROLL_BUF_LEN]; +        buffer_t           buf; +        buffer_t           msg; +        ssize_t            len; +        ssize_t            delta_t; +        struct timespec    t0; +        struct timespec    rtt; +        int                fd; +        int                ret; +        struct enroll_req  req; +        struct enroll_resp resp; + +        fd = conn->flow_info.fd; + +        buf.data = __buf; +        buf.len  = sizeof(__buf); + +        memcpy(req.id, id, ENROLL_ID_LEN); + +        len = enroll_req_ser(&req, buf); +        if (len < 0) { +                log_err_id(id, "Failed to pack request message."); +                return -1; +        } -                len = flow_read(conn.flow_info.fd, buf, ENROLL_BUF_LEN); -                if (len < 0) { -                        log_err("Failed to read from flow."); -                        connmgr_dealloc(COMPID_ENROLL, &conn); -                        continue; -                } +        clock_gettime(CLOCK_REALTIME, &t0); -                msg = enroll_msg__unpack(NULL, len, buf); -                if (msg == NULL) { -                        log_err("Failed to unpack message."); -                        connmgr_dealloc(COMPID_ENROLL, &conn); -                        continue; -                } +        if (flow_write(fd, buf.data, len) < 0) { +                log_err_id(id, "Failed to send request message."); +                return -1; +        } -                if (msg->code != ENROLL_CODE__ENROLL_DONE || !msg->has_result) { -                        log_err("Wrong message type."); -                        enroll_msg__free_unpacked(msg, NULL); -                        connmgr_dealloc(COMPID_ENROLL, &conn); -                        continue; -                } +        len = flow_read(fd, buf.data, buf.len); +        if (len < 0) { +                log_err_id(id, "No reply received."); +                return -1; +        } -                if (msg->result == 0) -                        log_dbg("Neighbor enrollment successful."); -                else -                        log_dbg("Neigbor reported failed enrollment."); +        log_dbg_id(id, "Received configuration info (%zd bytes).", len); -                enroll_msg__free_unpacked(msg, NULL); +        msg.data = buf.data; +        msg.len  = len; -                connmgr_dealloc(COMPID_ENROLL, &conn); +        ret = enroll_resp_des(&resp, msg); +        if (ret < 0) { +                log_err_id(id, "Failed to unpack response message."); +                return -1;          } -        return 0; -} +        if (memcmp(resp.id, id, ENROLL_ID_LEN) != 0) { +                log_err_id(id, "Enrollment ID mismatch."); +                return -1; +        } -int enroll_boot(struct conn * conn) -{ -        log_dbg("Getting boot information."); +        if (resp.response < 0) { +                log_warn_id(id, "Remote denied request: %d.", resp.response); +                return -1; +        } -        if (send_rcv_enroll_msg(conn->flow_info.fd)) { -                log_err("Failed to enroll."); +        if (resp.conf.type != ipcp_get_type()) { +                log_err_id(id, "Wrong type in enrollment response %d (%d).", +                           resp.conf.type, ipcp_get_type());                  return -1;          } +        enroll.conf = resp.conf; + +        clock_gettime(CLOCK_REALTIME, &rtt); + +        delta_t = ts_diff_ms(&t0, &rtt); + +        rtt.tv_sec  = resp.t.tv_sec; +        rtt.tv_nsec = resp.t.tv_nsec; + +        if (llabs(ts_diff_ms(&t0, &rtt)) - delta_t > ENROLL_WARN_TIME_OFFSET) +                log_warn_id(id, "Clock offset above threshold."); +          return 0;  } -int enroll_done(struct conn * conn, -                int           result) +int enroll_ack(struct conn *   conn, +               const uint8_t * id, +               const int       result)  { -        enroll_msg_t msg = ENROLL_MSG__INIT; -        uint8_t      buf[ENROLL_BUF_LEN]; -        ssize_t       len; +        struct enroll_ack ack; +        uint8_t           __buf[ENROLL_BUF_LEN]; +        buffer_t          buf; +        ssize_t           len; -        msg.code       = ENROLL_CODE__ENROLL_DONE; -        msg.has_result = true; -        msg.result     = result; +        buf.data = __buf; +        buf.len  = sizeof(__buf); -        len = enroll_msg__get_packed_size(&msg); +        ack.result = result; + +        memcpy(ack.id, id, ENROLL_ID_LEN); + +        len = enroll_ack_ser(&ack, buf);          if (len < 0) { -                log_dbg("Failed pack request message."); +                log_err_id(id, "Failed to pack acknowledgement.");                  return -1;          } -        enroll_msg__pack(&msg, buf); - -        if (flow_write(conn->flow_info.fd, buf, len) < 0) { -                log_dbg("Failed to send acknowledgment."); +        if (flow_write(conn->flow_info.fd, buf.data, len) < 0) { +                log_err_id(id, "Failed to send acknowledgment.");                  return -1;          } @@ -355,16 +304,11 @@ int enroll_init(void)                  return -1;          } -        enroll.state = ENROLL_INIT; -          return 0;  }  void enroll_fini(void)  { -        if (enroll.state == ENROLL_RUNNING) -                pthread_join(enroll.listener, NULL); -          connmgr_comp_fini(COMPID_ENROLL);  } @@ -373,13 +317,11 @@ int enroll_start(void)          if (pthread_create(&enroll.listener, NULL, enroll_handle, NULL))                  return -1; -        enroll.state = ENROLL_RUNNING; -          return 0;  }  void enroll_stop(void)  { -        if (enroll.state == ENROLL_RUNNING) -                pthread_cancel(enroll.listener); +        pthread_cancel(enroll.listener); +        pthread_join(enroll.listener, NULL);  } diff --git a/src/ipcpd/common/enroll.h b/src/ipcpd/common/enroll.h index fb866416..f26c31a3 100644 --- a/src/ipcpd/common/enroll.h +++ b/src/ipcpd/common/enroll.h @@ -1,5 +1,5 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   *   * Enrollment Task   * @@ -37,10 +37,12 @@ void                 enroll_stop(void);  void                 enroll_bootstrap(const struct ipcp_config * conf); -int                  enroll_boot(struct conn * conn); +int                  enroll_boot(struct conn *   conn, +                                 const uint8_t * id); -int                  enroll_done(struct conn * conn, -                                 int           result); +int                  enroll_ack(struct conn *   conn, +                                const uint8_t * id, +                                const int       result);  struct ipcp_config * enroll_get_conf(void); diff --git a/src/ipcpd/config.h.in b/src/ipcpd/config.h.in index 0bf3ad69..d2af6440 100644 --- a/src/ipcpd/config.h.in +++ b/src/ipcpd/config.h.in @@ -1,5 +1,5 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   *   * IPC process configuration   * @@ -48,10 +48,17 @@  #define IPCP_SCHED_THR_MUL  @IPCP_SCHED_THR_MUL@  #define PFT_SIZE            @PFT_SIZE@  #define DHT_ENROLL_SLACK    @DHT_ENROLL_SLACK@ +#define IPCP_UNICAST_MPL    @IPCP_UNICAST_MPL@ +#define CONNMGR_RCV_TIMEOUT @CONNMGR_RCV_TIMEOUT@ -#cmakedefine IPCP_CONN_WAIT_DIR  #cmakedefine DISABLE_CORE_LOCK  #cmakedefine IPCP_FLOW_STATS +#cmakedefine IPCP_DEBUG_LOCAL +#ifdef CONFIG_OUROBOROS_DEBUG +#cmakedefine DEBUG_PROTO_DHT +#cmakedefine DEBUG_PROTO_OEP +#cmakedefine DEBUG_PROTO_LS +#endif  /* udp */  #cmakedefine HAVE_DDNS @@ -59,8 +66,9 @@  #define NSLOOKUP_EXEC       "@NSLOOKUP_EXECUTABLE@"  #define IPCP_UDP_RD_THR     @IPCP_UDP_RD_THR@  #define IPCP_UDP_WR_THR     @IPCP_UDP_WR_THR@ +#define IPCP_UDP_MPL        @IPCP_UDP_MPL@ -/* eth-llc */ +/* eth */  #cmakedefine HAVE_NETMAP  #cmakedefine HAVE_BPF  #cmakedefine HAVE_RAW_SOCKETS @@ -68,3 +76,11 @@  #define IPCP_ETH_RD_THR     @IPCP_ETH_RD_THR@  #define IPCP_ETH_WR_THR     @IPCP_ETH_WR_THR@  #define IPCP_ETH_LO_MTU     @IPCP_ETH_LO_MTU@ +#define IPCP_ETH_MPL        @IPCP_ETH_MPL@ + +/* local */ +#define IPCP_LOCAL_MPL      @IPCP_LOCAL_MPL@ + +/* broadcast */ +/* local */ +#define IPCP_BROADCAST_MPL  @IPCP_BROADCAST_MPL@ diff --git a/src/ipcpd/eth/CMakeLists.txt b/src/ipcpd/eth/CMakeLists.txt index d7105b4f..44299a59 100644 --- a/src/ipcpd/eth/CMakeLists.txt +++ b/src/ipcpd/eth/CMakeLists.txt @@ -85,16 +85,18 @@ if (HAVE_ETH)      "Bypass the Qdisc in the kernel when using raw sockets")    set(IPCP_ETH_LO_MTU 1500 CACHE STRING      "Restrict Ethernet MTU over loopback interfaces") +  set(IPCP_ETH_MPL 100 CACHE STRING +    "Default maximum packet lifetime for the Ethernet IPCPs, in ms")    set(ETH_LLC_SOURCES      # Add source files here -    ${CMAKE_CURRENT_SOURCE_DIR}/llc.c -    ) +    llc.c +  )    set(ETH_DIX_SOURCES      # Add source files here -    ${CMAKE_CURRENT_SOURCE_DIR}/dix.c -    ) +    dix.c +  )    set(IPCP_ETH_LLC_TARGET ipcpd-eth-llc CACHE INTERNAL "")    set(IPCP_ETH_DIX_TARGET ipcpd-eth-dix CACHE INTERNAL "") diff --git a/src/ipcpd/eth/dix.c b/src/ipcpd/eth/dix.c index ac117e37..37b9896d 100644 --- a/src/ipcpd/eth/dix.c +++ b/src/ipcpd/eth/dix.c @@ -1,5 +1,5 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   *   * IPC processes over Ethernet - DIX   * diff --git a/src/ipcpd/eth/eth.c b/src/ipcpd/eth/eth.c index 932034d5..0b6a91fb 100644 --- a/src/ipcpd/eth/eth.c +++ b/src/ipcpd/eth/eth.c @@ -1,5 +1,5 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   *   * IPC processes over Ethernet   * @@ -37,6 +37,7 @@  #include "config.h" +#include <ouroboros/endian.h>  #include <ouroboros/hash.h>  #include <ouroboros/errno.h>  #include <ouroboros/list.h> @@ -46,7 +47,7 @@  #include <ouroboros/ipcp-dev.h>  #include <ouroboros/fqueue.h>  #include <ouroboros/logs.h> -#include <ouroboros/time_utils.h> +#include <ouroboros/time.h>  #include <ouroboros/fccntl.h>  #include <ouroboros/pthread.h> @@ -87,31 +88,31 @@  #include <sys/mman.h>  #if defined(HAVE_NETMAP) -#define NETMAP_WITH_LIBS -#include <net/netmap_user.h> +  #define NETMAP_WITH_LIBS +  #include <net/netmap_user.h>  #elif defined(HAVE_BPF) -#define BPF_DEV_MAX          256 -#define BPF_BLEN             sysconf(_SC_PAGESIZE) -#include <net/bpf.h> +  #define BPF_DEV_MAX        256 +  #define BPF_BLEN           sysconf(_SC_PAGESIZE) +  #include <net/bpf.h>  #endif -#ifdef __linux__ +#define MAC_FMT "%02x:%02x:%02x:%02x:%02x:%02x" +#define MAC_VAL(a)                                         \ +        (uint8_t)(a)[0], (uint8_t)(a)[1], (uint8_t)(a)[2], \ +        (uint8_t)(a)[3], (uint8_t)(a)[4], (uint8_t)(a)[5] + +  #ifndef ETH_MAX_MTU          /* In if_ether.h as of Linux 4.10. */ -#define ETH_MAX_MTU          0xFFFFU +  #define ETH_MAX_MTU          0xFFFFU  #endif /* ETH_MAX_MTU */  #ifdef BUILD_ETH_DIX -#define ETH_MTU              eth_data.mtu -#define ETH_MTU_MAX          ETH_MAX_MTU +  #define ETH_MTU              eth_data.mtu +  #define ETH_MTU_MAX          ETH_MAX_MTU  #else -#define ETH_MTU              eth_data.mtu -#define ETH_MTU_MAX          1500 +  #define ETH_MTU              eth_data.mtu +  #define ETH_MTU_MAX          1500  #endif /* BUILD_ETH_DIX */ -#else /* __linux__ */ -#define ETH_MTU              1500 -#define ETH_MTU_MAX          ETH_MTU -#endif /* __linux__ */ -#define MAC_SIZE             6  #define ETH_TYPE_LENGTH_SIZE sizeof(uint16_t)  #define ETH_HEADER_SIZE      (2 * MAC_SIZE + ETH_TYPE_LENGTH_SIZE) @@ -135,7 +136,6 @@  #define ETH_FRAME_SIZE       (ETH_HEADER_SIZE + ETH_MTU_MAX)  #endif -#define ALLOC_TIMEO          10    /* ms */  #define NAME_QUERY_TIMEO     2000  /* ms */  #define MGMT_TIMEO           100   /* ms */  #define MGMT_FRAME_SIZE      2048 @@ -145,8 +145,6 @@  #define NAME_QUERY_REQ       2  #define NAME_QUERY_REPLY     3 -struct ipcp ipcpi; -  struct mgmt_msg {  #if defined(BUILD_ETH_DIX)          uint16_t seid; @@ -164,13 +162,13 @@ struct mgmt_msg {          uint32_t ber;          uint32_t max_gap;          uint32_t delay; -        uint16_t cypher_s; +        uint32_t timeout; +        int32_t  response;          uint8_t  in_order;  #if defined (BUILD_ETH_DIX)          uint8_t  code;          uint8_t  availability;  #endif -        int8_t   response;  } __attribute__((packed));  struct eth_frame { @@ -208,8 +206,9 @@ struct mgmt_frame {  struct {          struct shim_data * shim_data; -#ifdef __linux__ +          int                mtu; +#ifdef __linux__          int                if_idx;  #endif  #if defined(HAVE_NETMAP) @@ -454,16 +453,15 @@ static int eth_ipcp_send_frame(const uint8_t * dst_addr,          return 0;  } -static int eth_ipcp_alloc(const uint8_t * dst_addr, +static int eth_ipcp_alloc(const uint8_t *  dst_addr,  #if defined(BUILD_ETH_DIX) -                          uint16_t        eid, +                          uint16_t         eid,  #elif defined(BUILD_ETH_LLC) -                          uint8_t         ssap, +                          uint8_t          ssap,  #endif -                          const uint8_t * hash, -                          qosspec_t       qs, -                          const void *    data, -                          size_t          dlen) +                          const uint8_t *  hash, +                          qosspec_t        qs, +                          const buffer_t * data)  {          uint8_t *         buf;          struct mgmt_msg * msg; @@ -472,7 +470,7 @@ static int eth_ipcp_alloc(const uint8_t * dst_addr,          len = sizeof(*msg) + ipcp_dir_hash_len(); -        buf = malloc(len + ETH_HEADER_TOT_SIZE + dlen); +        buf = malloc(len + ETH_HEADER_TOT_SIZE + data->len);          if (buf == NULL)                  return -1; @@ -491,10 +489,11 @@ static int eth_ipcp_alloc(const uint8_t * dst_addr,          msg->ber          = hton32(qs.ber);          msg->in_order     = qs.in_order;          msg->max_gap      = hton32(qs.max_gap); -        msg->cypher_s     = hton16(qs.cypher_s); +        msg->timeout      = hton32(qs.timeout);          memcpy(msg + 1, hash, ipcp_dir_hash_len()); -        memcpy(buf + len + ETH_HEADER_TOT_SIZE, data, dlen); +        if (data->len > 0) +                memcpy(buf + len + ETH_HEADER_TOT_SIZE, data->data, data->len);          ret = eth_ipcp_send_frame(dst_addr,  #if defined(BUILD_ETH_DIX) @@ -503,28 +502,27 @@ static int eth_ipcp_alloc(const uint8_t * dst_addr,                                    reverse_bits(MGMT_SAP),                                    reverse_bits(MGMT_SAP),  #endif -                                  buf, len + dlen); +                                  buf, len + data->len);          free(buf);          return ret;  } -static int eth_ipcp_alloc_resp(uint8_t *    dst_addr, +static int eth_ipcp_alloc_resp(uint8_t *        dst_addr,  #if defined(BUILD_ETH_DIX) -                               uint16_t     seid, -                               uint16_t     deid, +                               uint16_t         seid, +                               uint16_t         deid,  #elif defined(BUILD_ETH_LLC) -                               uint8_t      ssap, -                               uint8_t      dsap, +                               uint8_t          ssap, +                               uint8_t          dsap,  #endif -                               int          response, -                               const void * data, -                               size_t       len) +                               int              response, +                               const buffer_t * data)  {          struct mgmt_msg * msg;          uint8_t *         buf; -        buf = malloc(sizeof(*msg) + ETH_HEADER_TOT_SIZE + len); +        buf = malloc(sizeof(*msg) + ETH_HEADER_TOT_SIZE + data->len);          if (buf == NULL)                  return -1; @@ -538,9 +536,10 @@ static int eth_ipcp_alloc_resp(uint8_t *    dst_addr,          msg->ssap     = ssap;          msg->dsap     = dsap;  #endif -        msg->response = response; +        msg->response = hton32(response); -        memcpy(msg + 1, data, len); +        if (data->len > 0) +                memcpy(msg + 1, data->data, data->len);          if (eth_ipcp_send_frame(dst_addr,  #if defined(BUILD_ETH_DIX) @@ -549,7 +548,7 @@ static int eth_ipcp_alloc_resp(uint8_t *    dst_addr,                                  reverse_bits(MGMT_SAP),                                  reverse_bits(MGMT_SAP),  #endif -                                buf, sizeof(*msg) + len)) { +                                buf, sizeof(*msg) + data->len)) {                  free(buf);                  return -1;          } @@ -559,42 +558,20 @@ static int eth_ipcp_alloc_resp(uint8_t *    dst_addr,          return 0;  } -static int eth_ipcp_req(uint8_t *       r_addr, +static int eth_ipcp_req(uint8_t *        r_addr,  #if defined(BUILD_ETH_DIX) -                        uint16_t        r_eid, +                        uint16_t         r_eid,  #elif defined(BUILD_ETH_LLC) -                        uint8_t         r_sap, +                        uint8_t          r_sap,  #endif -                        const uint8_t * dst, -                        qosspec_t       qs, -                        const void *    data, -                        size_t          len) +                        const uint8_t *  dst, +                        qosspec_t        qs, +                        const buffer_t * data)  { -        struct timespec ts = {0, ALLOC_TIMEO * MILLION}; -        struct timespec abstime; -        int             fd; - -        clock_gettime(PTHREAD_COND_CLOCK, &abstime); - -        pthread_mutex_lock(&ipcpi.alloc_lock); +        int fd; -        while (ipcpi.alloc_id != -1 && ipcp_get_state() == IPCP_OPERATIONAL) { -                ts_add(&abstime, &ts, &abstime); -                pthread_cond_timedwait(&ipcpi.alloc_cond, -                                       &ipcpi.alloc_lock, -                                       &abstime); -        } - -        if (ipcp_get_state() != IPCP_OPERATIONAL) { -                log_dbg("Won't allocate over non-operational IPCP."); -                pthread_mutex_unlock(&ipcpi.alloc_lock); -                return -1; -        } - -        /* reply to IRM, called under lock to prevent race */ -        fd = ipcp_flow_req_arr(dst, ipcp_dir_hash_len(), qs, data, len); +        fd = ipcp_wait_flow_req_arr(dst, qs, IPCP_ETH_MPL, data);          if (fd < 0) { -                pthread_mutex_unlock(&ipcpi.alloc_lock);                  log_err("Could not get new flow from IRMd.");                  return -1;          } @@ -609,11 +586,6 @@ static int eth_ipcp_req(uint8_t *       r_addr,          pthread_rwlock_unlock(ð_data.flows_lock); -        ipcpi.alloc_id = fd; -        pthread_cond_broadcast(&ipcpi.alloc_cond); - -        pthread_mutex_unlock(&ipcpi.alloc_lock); -  #if defined(BUILD_ETH_DIX)          log_dbg("New flow request, fd %d, remote endpoint %d.", fd, r_eid);  #elif defined(BUILD_ETH_LLC) @@ -622,20 +594,20 @@ static int eth_ipcp_req(uint8_t *       r_addr,          return 0;  } -static int eth_ipcp_alloc_reply(uint8_t *    r_addr, +static int eth_ipcp_alloc_reply(uint8_t *        r_addr,  #if defined(BUILD_ETH_DIX) -                                uint16_t     seid, -                                uint16_t     deid, +                                uint16_t         seid, +                                uint16_t         deid,  #elif defined(BUILD_ETH_LLC) -                                uint8_t      ssap, -                                int          dsap, +                                uint8_t          ssap, +                                int              dsap,  #endif -                                int          response, -                                const void * data, -                                size_t       len) +                                int              response, +                                const buffer_t * data)  { -        int ret = 0; -        int fd = -1; +        int    ret = 0; +        int    fd  = -1; +        time_t mpl = IPCP_ETH_MPL;          pthread_rwlock_wrlock(ð_data.flows_lock); @@ -670,11 +642,12 @@ static int eth_ipcp_alloc_reply(uint8_t *    r_addr,  #elif defined(BUILD_ETH_LLC)          log_dbg("Flow reply, fd %d, SSAP %d, DSAP %d.", fd, ssap, dsap);  #endif -        if ((ret = ipcp_flow_alloc_reply(fd, response, data, len)) < 0) +        if ((ret = ipcp_flow_alloc_reply(fd, response, mpl, data)) < 0) { +                log_err("Failed to reply to flow allocation.");                  return -1; +        }          return ret; -  }  static int eth_ipcp_name_query_req(const uint8_t * hash, @@ -718,11 +691,11 @@ static int eth_ipcp_name_query_req(const uint8_t * hash,  static int eth_ipcp_name_query_reply(const uint8_t * hash,                                       uint8_t *       r_addr)  { -        uint64_t address = 0; +        struct addr addr; -        memcpy(&address, r_addr, MAC_SIZE); +        memcpy(&addr.mac, r_addr, MAC_SIZE); -        shim_data_dir_add_entry(eth_data.shim_data, hash, address); +        shim_data_dir_add_entry(eth_data.shim_data, hash, addr);          shim_data_dir_query_respond(eth_data.shim_data, hash); @@ -736,6 +709,7 @@ static int eth_ipcp_mgmt_frame(const uint8_t * buf,          struct mgmt_msg * msg;          size_t            msg_len;          qosspec_t         qs; +        buffer_t          data;          msg = (struct mgmt_msg *) buf; @@ -752,7 +726,10 @@ static int eth_ipcp_mgmt_frame(const uint8_t * buf,                  qs.ber = ntoh32(msg->ber);                  qs.in_order = msg->in_order;                  qs.max_gap = ntoh32(msg->max_gap); -                qs.cypher_s = ntoh16(msg->cypher_s); +                qs.timeout = ntoh32(msg->timeout); + +                data.data = (uint8_t *) buf + msg_len; +                data.len  = len - msg_len;                  if (shim_data_reg_has(eth_data.shim_data,                                        buf + sizeof(*msg))) { @@ -764,13 +741,15 @@ static int eth_ipcp_mgmt_frame(const uint8_t * buf,  #endif                                       buf + sizeof(*msg),                                       qs, -                                     buf + msg_len, -                                     len - msg_len); +                                     &data);                  }                  break;          case FLOW_REPLY:                  assert(len >= sizeof(*msg)); +                data.data = (uint8_t *) buf + sizeof(*msg); +                data.len  = len - sizeof(*msg); +                  eth_ipcp_alloc_reply(r_addr,  #if defined(BUILD_ETH_DIX)                                       ntohs(msg->seid), @@ -779,9 +758,8 @@ static int eth_ipcp_mgmt_frame(const uint8_t * buf,                                       msg->ssap,                                       msg->dsap,  #endif -                                     msg->response, -                                     buf + sizeof(*msg), -                                     len - sizeof(*msg)); +                                     ntoh32(msg->response), +                                     &data);                  break;          case NAME_QUERY_REQ:                  eth_ipcp_name_query_req(buf + sizeof(*msg), r_addr); @@ -799,18 +777,15 @@ static int eth_ipcp_mgmt_frame(const uint8_t * buf,  static void * eth_ipcp_mgmt_handler(void * o)  { -        int                 ret; -        struct timespec     timeout = {(MGMT_TIMEO / 1000), -                                       (MGMT_TIMEO % 1000) * MILLION}; -        struct timespec     abstime; -        struct mgmt_frame * frame; -          (void) o;          pthread_cleanup_push(__cleanup_mutex_unlock, ð_data.mgmt_lock);          while (true) { -                ret = 0; +                int                 ret = 0; +                struct timespec     timeout = TIMESPEC_INIT_MS(MGMT_TIMEO); +                struct timespec     abstime; +                struct mgmt_frame * frame = NULL;                  clock_gettime(PTHREAD_COND_CLOCK, &abstime);                  ts_add(&abstime, &timeout, &abstime); @@ -822,23 +797,19 @@ static void * eth_ipcp_mgmt_handler(void * o)                          ret = -pthread_cond_timedwait(ð_data.mgmt_cond,                                                        ð_data.mgmt_lock,                                                        &abstime); +                if (ret != -ETIMEDOUT) +                        frame = list_first_entry((ð_data.mgmt_frames), +                                                 struct mgmt_frame, next); +                if (frame != NULL) +                        list_del(&frame->next); -                if (ret == -ETIMEDOUT) { -                        pthread_mutex_unlock(ð_data.mgmt_lock); -                        continue; -                } +                pthread_mutex_unlock(ð_data.mgmt_lock); -                frame = list_first_entry((ð_data.mgmt_frames), -                                         struct mgmt_frame, next); -                if (frame == NULL) { -                        pthread_mutex_unlock(ð_data.mgmt_lock); +                if (frame == NULL)                          continue; -                } - -                list_del(&frame->next); -                pthread_mutex_unlock(ð_data.mgmt_lock);                  eth_ipcp_mgmt_frame(frame->buf, frame->len, frame->r_addr); +                  free(frame);          } @@ -884,7 +855,7 @@ static void * eth_ipcp_packet_reader(void * o)                  buf = nm_nextpkt(eth_data.nmd, &hdr);                  if (buf == NULL) { -                        log_err("Bad read from netmap device."); +                        log_dbg("Bad read from netmap device.");                          continue;                  }  #else @@ -915,6 +886,7 @@ static void * eth_ipcp_packet_reader(void * o)                                   ETH_MTU + ETH_HEADER_TOT_SIZE, 0);      #endif                  if (frame_len <= 0) { +                        log_dbg("Failed to receive frame.");                          ipcp_sdb_release(sdb);                          continue;                  } @@ -941,22 +913,14 @@ static void * eth_ipcp_packet_reader(void * o)  #endif                  length = ntohs(e_frame->length);  #if defined(BUILD_ETH_DIX) -                if (e_frame->ethertype != eth_data.ethertype) { -#ifndef HAVE_NETMAP -                        ipcp_sdb_release(sdb); -#endif -                        continue; -                } +                if (e_frame->ethertype != eth_data.ethertype) +                        goto fail_frame;                  deid = ntohs(e_frame->eid);                  if (deid == MGMT_EID) {  #elif defined (BUILD_ETH_LLC) -                if (length > 0x05FF) {/* DIX */ -#ifndef HAVE_NETMAP -                        ipcp_sdb_release(sdb); -#endif -                        continue; -                } +                if (length > 0x05FF) /* DIX */ +                        goto fail_frame;                  length -= LLC_HEADER_SIZE; @@ -965,12 +929,12 @@ static void * eth_ipcp_packet_reader(void * o)                  if (ssap == MGMT_SAP && dsap == MGMT_SAP) {  #endif +                        ipcp_sdb_release(sdb); /* No need for the N+1 buffer. */ +                          frame = malloc(sizeof(*frame));                          if (frame == NULL) { -#ifndef HAVE_NETMAP -                                ipcp_sdb_release(sdb); -#endif -                                continue; +                                log_err("Failed to allocate frame."); +                                goto fail_frame;                          }                          memcpy(frame->buf, &e_frame->payload, length); @@ -981,10 +945,6 @@ static void * eth_ipcp_packet_reader(void * o)                          list_add(&frame->next, ð_data.mgmt_frames);                          pthread_cond_signal(ð_data.mgmt_cond);                          pthread_mutex_unlock(ð_data.mgmt_lock); - -#ifndef HAVE_NETMAP -                        ipcp_sdb_release(sdb); -#endif                  } else {                          pthread_rwlock_rdlock(ð_data.flows_lock); @@ -995,10 +955,7 @@ static void * eth_ipcp_packet_reader(void * o)  #endif                          if (fd < 0) {                                  pthread_rwlock_unlock(ð_data.flows_lock); -#ifndef HAVE_NETMAP -                                ipcp_sdb_release(sdb); -#endif -                                continue; +                                goto fail_frame;                          }  #ifdef BUILD_ETH_LLC @@ -1006,10 +963,7 @@ static void * eth_ipcp_packet_reader(void * o)                              || memcmp(eth_data.fd_to_ef[fd].r_addr,                                        e_frame->src_hwaddr, MAC_SIZE)) {                                  pthread_rwlock_unlock(ð_data.flows_lock); -#ifndef HAVE_NETMAP -                                ipcp_sdb_release(sdb); -#endif -                                continue; +                                goto fail_frame;                          }  #endif                          pthread_rwlock_unlock(ð_data.flows_lock); @@ -1017,9 +971,20 @@ static void * eth_ipcp_packet_reader(void * o)  #ifndef HAVE_NETMAP                          shm_du_buff_head_release(sdb, ETH_HEADER_TOT_SIZE);                          shm_du_buff_truncate(sdb, length); -                        ipcp_flow_write(fd, sdb);  #else -                        flow_write(fd, &e_frame->payload, length); +                        if (ipcp_sdb_reserve(&sdb, length)) +                                continue; + +                        buf = shm_du_buff_head(sdb); +                        memcpy(buf, &e_frame->payload, length); +#endif +                        if (np1_flow_write(fd, sdb) < 0) +                                ipcp_sdb_release(sdb); + +                        continue; + fail_frame: +#ifndef HAVE_NETMAP +                        ipcp_sdb_release(sdb);  #endif                  }          } @@ -1053,27 +1018,28 @@ static void * eth_ipcp_packet_writer(void * o)          (void) o; -        pthread_cleanup_push(cleanup_writer, fq); -          ipcp_lock_to_core(); +        pthread_cleanup_push(cleanup_writer, fq); +          while (true) {                  fevent(eth_data.np1_flows, fq, NULL);                  while ((fd = fqueue_next(fq)) >= 0) {                          if (fqueue_type(fq) != FLOW_PKT)                                  continue; -                        if (ipcp_flow_read(fd, &sdb)) { +                        if (np1_flow_read(fd, &sdb)) {                                  log_dbg("Bad read from fd %d.", fd);                                  continue;                          } -                        len = shm_du_buff_tail(sdb) - shm_du_buff_head(sdb); +                        len = shm_du_buff_len(sdb);                          if (shm_du_buff_head_alloc(sdb, ETH_HEADER_TOT_SIZE)                              == NULL) {                                  log_dbg("Failed to allocate header.");                                  ipcp_sdb_release(sdb); +                                continue;                          }                          pthread_rwlock_rdlock(ð_data.flows_lock); @@ -1089,14 +1055,15 @@ static void * eth_ipcp_packet_writer(void * o)                          pthread_rwlock_unlock(ð_data.flows_lock); -                        eth_ipcp_send_frame(r_addr, +                        if (eth_ipcp_send_frame(r_addr,  #if defined(BUILD_ETH_DIX)                                              deid,  #elif defined(BUILD_ETH_LLC)                                              dsap, ssap,  #endif                                              shm_du_buff_head(sdb), -                                            len); +                                            len)) +                                log_dbg("Failed to send frame.");                          ipcp_sdb_release(sdb);                  }          } @@ -1246,114 +1213,123 @@ static int open_bpf_device(void)  }  #endif -static int eth_ipcp_bootstrap(const struct ipcp_config * conf) -{ -        int              idx; -        struct ifreq     ifr; -#if defined(HAVE_NETMAP) -        char             ifn[IFNAMSIZ]; -#elif defined(HAVE_BPF) -        int              enable  = 1; -        int              disable = 0; -        int              blen; -#endif /* HAVE_NETMAP */ -  #if defined(__FreeBSD__) || defined(__APPLE__) +static int ifr_hwaddr_from_ifaddrs(struct ifreq * ifr) +{          struct ifaddrs * ifaddr;          struct ifaddrs * ifa; -#elif defined(__linux__) -        int              skfd; -#endif -#ifndef SHM_RDRB_MULTI_BLOCK -        size_t           maxsz; -#endif -#if defined(HAVE_RAW_SOCKETS) -    #if defined(IPCP_ETH_QDISC_BYPASS) -        int              qdisc_bypass = 1; -    #endif /* ENABLE_QDISC_BYPASS */ -        int              flags; -#endif -        assert(conf); -        assert(conf->type == THIS_TYPE); +        int              idx; -        ipcpi.dir_hash_algo = conf->layer_info.dir_hash_algo; -        ipcpi.layer_name = strdup(conf->layer_info.layer_name); -        if (ipcpi.layer_name == NULL) { -                log_err("Failed to set layer name"); -                return -ENOMEM; +        if (getifaddrs(&ifaddr) < 0)  { +                log_err("Could not get interfaces."); +                goto fail_ifaddrs;          } -        if (conf->dev == NULL) { -                log_err("Device name is NULL."); -                return -1; +        for (ifa = ifaddr, idx = 0; ifa != NULL; ifa = ifa->ifa_next, ++idx) { +                if (strcmp(ifa->ifa_name, ifr->ifr_name) == 0) +                        break;          } -        if (strlen(conf->dev) >= IFNAMSIZ) { -                log_err("Invalid device name: %s.", conf->dev); -                return -1; +        if (ifa == NULL) { +                log_err("Interface not found."); +                goto fail_ifa;          } -        memset(&ifr, 0, sizeof(ifr)); -        strcpy(ifr.ifr_name, conf->dev); +        memcpy(&ifr->ifr_addr, ifa->ifa_addr, sizeof(*ifa->ifa_addr)); -#ifdef BUILD_ETH_DIX -        if (conf->ethertype < 0x0600 || conf->ethertype == 0xFFFF) { -                log_err("Invalid Ethertype."); -                return -1; -        } -        eth_data.ethertype = htons(conf->ethertype); -#endif +        log_dbg("Interface %s hwaddr " MAC_FMT ".", ifr->ifr_name, +                MAC_VAL(ifr->ifr_addr.sa_data)); -#if defined(__FreeBSD__) || defined(__APPLE__) -        if (getifaddrs(&ifaddr) < 0)  { -                log_err("Could not get interfaces."); -                return -1; +        freeifaddrs(ifaddr); + +        return 0; + fail_ifa: +         freeifaddrs(ifaddr); + fail_ifaddrs: +        return -1; + +} +#elif defined(__linux__) +static int ifr_hwaddr_from_socket(struct ifreq * ifr) +{ +        int skfd; + +        skfd = socket(AF_UNIX, SOCK_STREAM, 0); +        if (skfd < 0) { +                log_err("Failed to open socket."); +                goto fail_socket;          } -        for (ifa = ifaddr, idx = 0; ifa != NULL; ifa = ifa->ifa_next, ++idx) { -                if (strcmp(ifa->ifa_name, conf->dev)) -                        continue; -                log_dbg("Interface %s found.", conf->dev); - -    #if defined(HAVE_NETMAP) || defined(HAVE_BPF) -                memcpy(eth_data.hw_addr, -                       LLADDR((struct sockaddr_dl *) (ifa)->ifa_addr), -                       MAC_SIZE); -    #elif defined (HAVE_RAW_SOCKETS) -                memcpy(&ifr.ifr_addr, ifa->ifa_addr, sizeof(*ifa->ifa_addr)); -    #endif -                break; +        if (ioctl(skfd, SIOCGIFHWADDR, ifr)) { +                log_err("Failed to get hwaddr."); +                goto fail_ifr;          } -        freeifaddrs(ifaddr); +        log_dbg("Interface %s hwaddr " MAC_FMT ".", ifr->ifr_name, +                MAC_VAL(ifr->ifr_hwaddr.sa_data)); -        if (ifa == NULL) { -                log_err("Interface not found."); -                return -1; -        } +        close(skfd); + +        return 0; + + fail_ifr: +        close(skfd); + fail_socket: +        return -1; +} +#endif +static int eth_ifr_hwaddr(struct ifreq * ifr) +{ +#if defined(__FreeBSD__) || defined(__APPLE__) +        return ifr_hwaddr_from_ifaddrs(ifr);  #elif defined(__linux__) +        return ifr_hwaddr_from_socket(ifr); +#else +        return -1; +#endif +} + +static int eth_ifr_mtu(struct ifreq * ifr) +{ +        int skfd; +          skfd = socket(AF_UNIX, SOCK_STREAM, 0);          if (skfd < 0) {                  log_err("Failed to open socket."); -                return -1; +                goto fail_socket;          } -        if (ioctl(skfd, SIOCGIFMTU, &ifr)) { +        if (ioctl(skfd, SIOCGIFMTU, ifr) < 0) {                  log_err("Failed to get MTU."); -                close(skfd); +                goto fail_mtu; +        } +        close(skfd); + +        return 0; + + fail_mtu: +        close(skfd); + fail_socket: +        return -1; +} + +static int eth_set_mtu(struct ifreq * ifr) +{ +        if (eth_ifr_mtu(ifr) < 0) { +                log_err("Failed to get interface MTU.");                  return -1;          } -        log_dbg("Device MTU is %d.", ifr.ifr_mtu); +        log_dbg("Device MTU is %d.", ifr->ifr_mtu); -        eth_data.mtu = MIN((int) ETH_MTU_MAX, ifr.ifr_mtu); -        if (memcmp(conf->dev, "lo", 2) == 0 && eth_data.mtu > IPCP_ETH_LO_MTU) { +        eth_data.mtu = MIN((int) ETH_MTU_MAX, ifr->ifr_mtu); +        if (memcmp(ifr->ifr_name, "lo", 2) == 0 && +                   eth_data.mtu > IPCP_ETH_LO_MTU) {                  log_dbg("Using loopback interface. MTU restricted to %d.",                           IPCP_ETH_LO_MTU);                  eth_data.mtu = IPCP_ETH_LO_MTU;          } -  #ifndef SHM_RDRB_MULTI_BLOCK          maxsz = SHM_RDRB_BLOCK_SIZE - 5 * sizeof(size_t) -                  (DU_BUFF_HEADSPACE + DU_BUFF_TAILSPACE); @@ -1364,30 +1340,18 @@ static int eth_ipcp_bootstrap(const struct ipcp_config * conf)  #endif          log_dbg("Layer MTU is %d.", eth_data.mtu); -        if (ioctl(skfd, SIOCGIFHWADDR, &ifr)) { -                log_err("Failed to get hwaddr."); -                close(skfd); -                return -1; -        } - -        close(skfd); - -        idx = if_nametoindex(conf->dev); -        if (idx == 0) { -                log_err("Failed to retrieve interface index."); -                return -1; -        } -        eth_data.if_idx = idx; -#endif /* __FreeBSD__ */ - +        return 0; +}  #if defined(HAVE_NETMAP) +static int eth_init_nmd(struct ifreq * ifr) +{          strcpy(ifn, "netmap:"); -        strcat(ifn, conf->dev); +        strcat(ifn, ifr->ifr_name);          eth_data.nmd = nm_open(ifn, NULL, 0, NULL);          if (eth_data.nmd == NULL) {                  log_err("Failed to open netmap device."); -                return -1; +                goto fail_nmd;          }          memset(ð_data.poll_in, 0, sizeof(eth_data.poll_in)); @@ -1399,11 +1363,22 @@ static int eth_ipcp_bootstrap(const struct ipcp_config * conf)          eth_data.poll_out.events = POLLOUT;          log_info("Using netmap device."); -#elif defined(HAVE_BPF) /* !HAVE_NETMAP */ + +        return 0; + fail_nmd: +        return -1; +} +#elif defined (HAVE_BPF) +static int eth_init_bpf(struct ifreq * ifr) +{ +        int enable  = 1; +        int disable = 0; +        int blen; +          eth_data.bpf = open_bpf_device();          if (eth_data.bpf < 0) {                  log_err("Failed to open bpf device."); -                return -1; +                goto fail_bpf;          }          ioctl(eth_data.bpf, BIOCGBLEN, &blen); @@ -1413,7 +1388,7 @@ static int eth_ipcp_bootstrap(const struct ipcp_config * conf)                  goto fail_device;          } -        if (ioctl(eth_data.bpf, BIOCSETIF, &ifr) < 0) { +        if (ioctl(eth_data.bpf, BIOCSETIF, ifr) < 0) {                  log_err("Failed to set interface.");                  goto fail_device;          } @@ -1434,25 +1409,42 @@ static int eth_ipcp_bootstrap(const struct ipcp_config * conf)          }          log_info("Using Berkeley Packet Filter."); + +        return 0; + + fail_device: +        close(eth_data.bpf); + fail_bpf: +        return -1; +}  #elif defined(HAVE_RAW_SOCKETS) +static int eth_init_raw_socket(struct ifreq * ifr) +{ +        int idx; +        int flags; +#if defined(IPCP_ETH_QDISC_BYPASS) +        int              qdisc_bypass = 1; +#endif /* ENABLE_QDISC_BYPASS */ + +        idx = if_nametoindex(ifr->ifr_name); +        if (idx == 0) { +                log_err("Failed to retrieve interface index."); +                return -1; +        }          memset(&(eth_data.device), 0, sizeof(eth_data.device));          eth_data.device.sll_ifindex  = idx;          eth_data.device.sll_family   = AF_PACKET; -        memcpy(eth_data.device.sll_addr, ifr.ifr_hwaddr.sa_data, MAC_SIZE); +        memcpy(eth_data.device.sll_addr, ifr->ifr_hwaddr.sa_data, MAC_SIZE);          eth_data.device.sll_halen    = MAC_SIZE;          eth_data.device.sll_protocol = htons(ETH_P_ALL); - -    #if defined (BUILD_ETH_DIX) +#if defined (BUILD_ETH_DIX)          eth_data.s_fd = socket(AF_PACKET, SOCK_RAW, eth_data.ethertype); -    #elif defined (BUILD_ETH_LLC) +#elif defined (BUILD_ETH_LLC)          eth_data.s_fd = socket(AF_PACKET, SOCK_RAW, htons(ETH_P_802_2)); -    #endif - -        log_info("Using raw socket device."); - +#endif          if (eth_data.s_fd < 0) {                  log_err("Failed to create socket."); -                return -1; +                goto fail_socket;          }          flags = fcntl(eth_data.s_fd, F_GETFL, 0); @@ -1466,80 +1458,140 @@ static int eth_ipcp_bootstrap(const struct ipcp_config * conf)                  goto fail_device;          } -    #if defined(IPCP_ETH_QDISC_BYPASS) +#if defined(IPCP_ETH_QDISC_BYPASS)          if (setsockopt(eth_data.s_fd, SOL_PACKET, PACKET_QDISC_BYPASS,                         &qdisc_bypass, sizeof(qdisc_bypass))) {                  log_info("Qdisc bypass not supported.");          } -    #endif +#endif          if (bind(eth_data.s_fd, (struct sockaddr *) ð_data.device, -                sizeof(eth_data.device))) { +                 sizeof(eth_data.device)) < 0) {                  log_err("Failed to bind socket to interface.");                  goto fail_device;          } +#ifdef __linux__ +        eth_data.if_idx = idx; +#endif +        log_info("Using raw socket device."); + +        return 0; + fail_device: +        close(eth_data.s_fd); + fail_socket: +        return -1; +} +#endif +static int eth_ipcp_bootstrap(struct ipcp_config * conf) +{ +        struct ifreq     ifr; +        int              i; +#if defined(HAVE_NETMAP) +        char             ifn[IFNAMSIZ];  #endif /* HAVE_NETMAP */ -        ipcp_set_state(IPCP_OPERATIONAL); -#if defined(__linux__) -        if (pthread_create(ð_data.if_monitor, -                           NULL, -                           eth_ipcp_if_monitor, -                           NULL)) { -                ipcp_set_state(IPCP_INIT); -                goto fail_device; +#ifndef SHM_RDRB_MULTI_BLOCK +        size_t           maxsz; +#endif +        assert(conf); +        assert(conf->type == THIS_TYPE); + +        memset(&ifr, 0, sizeof(ifr)); +        strcpy(ifr.ifr_name, conf->eth.dev); + +        if (strlen(conf->eth.dev) >= IFNAMSIZ) { +                log_err("Invalid device name: %s.", conf->eth.dev); +                return -1; +        } +#ifdef BUILD_ETH_DIX +        if (conf->eth.ethertype < 0x0600 || conf->eth.ethertype == 0xFFFF) { +                log_err("Invalid Ethertype: %d.", conf->eth.ethertype); +                return -1;          } +        eth_data.ethertype = htons(conf->eth.ethertype);  #endif +        if (eth_set_mtu(&ifr) < 0) { +                log_err("Failed to set MTU."); +                return -1; +        } -        if (pthread_create(ð_data.mgmt_handler, -                           NULL, -                           eth_ipcp_mgmt_handler, -                           NULL)) { -                ipcp_set_state(IPCP_INIT); +        if (eth_ifr_hwaddr(&ifr) < 0) { +                log_err("Failed to get hardware addr."); +                return -1; +        } +#if defined(HAVE_NETMAP) || defined(HAVE_BPF) +        memcpy(eth_data.hw_addr, LLADDR((struct sockaddr_dl *) &ifr.ifr_addr), +               MAC_SIZE); +#endif +#if defined(HAVE_NETMAP) +        if (eth_init_nmd(&ifr) < 0) { +                log_err("Failed to initialize netmap device."); +                return -1; +        } +#elif defined(HAVE_BPF) /* !HAVE_NETMAP */ +        if (eth_init_bpf(&ifr) < 0) { +                log_err("Failed to initialize BPF device."); +                return -1; +        } +#elif defined(HAVE_RAW_SOCKETS) +        if (eth_init_raw_socket(&ifr) < 0) { +                log_err("Failed to initialize raw socket device."); +                return -1; +        } +#endif /* HAVE_NETMAP */ +#if defined(__linux__) +        if (pthread_create(ð_data.if_monitor, NULL, +                           eth_ipcp_if_monitor, NULL)) { +                log_err("Failed to create monitor thread: %s.", +                        strerror(errno)); +                goto fail_monitor; +        } +#endif +        if (pthread_create(ð_data.mgmt_handler, NULL, +                           eth_ipcp_mgmt_handler, NULL)) { +                log_err("Failed to create mgmt handler thread: %s.", +                        strerror(errno));                  goto fail_mgmt_handler;          } -        for (idx = 0; idx < IPCP_ETH_RD_THR; ++idx) { -                if (pthread_create(ð_data.packet_reader[idx], -                                   NULL, -                                   eth_ipcp_packet_reader, -                                   NULL)) { -                        ipcp_set_state(IPCP_INIT); +        for (i = 0; i < IPCP_ETH_RD_THR; i++) { +                if (pthread_create(ð_data.packet_reader[i], NULL, +                                   eth_ipcp_packet_reader, NULL)) { +                        log_err("Failed to create packet reader thread: %s", +                                strerror(errno));                          goto fail_packet_reader;                  }          } -        for (idx = 0; idx < IPCP_ETH_WR_THR; ++idx) { -                if (pthread_create(ð_data.packet_writer[idx], -                                   NULL, -                                   eth_ipcp_packet_writer, -                                   NULL)) { -                        ipcp_set_state(IPCP_INIT); +        for (i = 0; i < IPCP_ETH_WR_THR; i++) { +                if (pthread_create(ð_data.packet_writer[i], NULL, +                                   eth_ipcp_packet_writer, NULL)) { +                        log_err("Failed to create packet writer thread: %s", +                                strerror(errno));                          goto fail_packet_writer;                  }          }  #if defined(BUILD_ETH_DIX)          log_dbg("Bootstrapped IPCP over DIX Ethernet with pid %d " -                "and Ethertype 0x%X.", getpid(), conf->ethertype); +                "and Ethertype 0x%X.", getpid(), conf->eth.ethertype);  #elif defined(BUILD_ETH_LLC)          log_dbg("Bootstrapped IPCP over Ethernet with LLC with pid %d.",                  getpid());  #endif -          return 0;   fail_packet_writer: -        while (idx > 0) { -                pthread_cancel(eth_data.packet_writer[--idx]); -                pthread_join(eth_data.packet_writer[idx], NULL); +        while (i-- > 0) { +                pthread_cancel(eth_data.packet_writer[i]); +                pthread_join(eth_data.packet_writer[i], NULL);          } -        idx = IPCP_ETH_RD_THR; +        i = IPCP_ETH_RD_THR;   fail_packet_reader: -        while (idx > 0) { -                pthread_cancel(eth_data.packet_reader[--idx]); -                pthread_join(eth_data.packet_reader[idx], NULL); +        while (i-- > 0) { +                pthread_cancel(eth_data.packet_reader[i]); +                pthread_join(eth_data.packet_reader[i], NULL);          }          pthread_cancel(eth_data.mgmt_handler);          pthread_join(eth_data.mgmt_handler, NULL); @@ -1548,8 +1600,8 @@ static int eth_ipcp_bootstrap(const struct ipcp_config * conf)          pthread_cancel(eth_data.if_monitor);          pthread_join(eth_data.if_monitor, NULL);  #endif -#if defined(__linux__) || !defined(HAVE_NETMAP) - fail_device: +#if defined(__linux__) + fail_monitor:  #endif  #if defined(HAVE_NETMAP)          nm_close(eth_data.nmd); @@ -1564,13 +1616,11 @@ static int eth_ipcp_bootstrap(const struct ipcp_config * conf)  static int eth_ipcp_reg(const uint8_t * hash)  {          if (shim_data_reg_add_entry(eth_data.shim_data, hash)) { -                log_err("Failed to add " HASH_FMT " to local registry.", -                        HASH_VAL(hash)); +                log_err("Failed to add " HASH_FMT32 " to local registry.", +                        HASH_VAL32(hash));                  return -1;          } -        log_dbg("Registered " HASH_FMT ".", HASH_VAL(hash)); -          return 0;  } @@ -1584,8 +1634,7 @@ static int eth_ipcp_unreg(const uint8_t * hash)  static int eth_ipcp_query(const uint8_t * hash)  {          uint8_t            r_addr[MAC_SIZE]; -        struct timespec    timeout = {(NAME_QUERY_TIMEO / 1000), -                                      (NAME_QUERY_TIMEO % 1000) * MILLION}; +        struct timespec    timeout = TIMESPEC_INIT_MS(NAME_QUERY_TIMEO);          struct dir_query * query;          int                ret;          uint8_t *          buf; @@ -1637,42 +1686,41 @@ static int eth_ipcp_query(const uint8_t * hash)          return ret;  } -static int eth_ipcp_flow_alloc(int             fd, -                               const uint8_t * hash, -                               qosspec_t       qs, -                               const void *    data, -                               size_t          len) +static int eth_ipcp_flow_alloc(int              fd, +                               const uint8_t *  hash, +                               qosspec_t        qs, +                               const buffer_t * data)  {  #ifdef BUILD_ETH_LLC          uint8_t  ssap = 0;  #endif          uint8_t  r_addr[MAC_SIZE]; -        uint64_t addr = 0; - -        log_dbg("Allocating flow to " HASH_FMT ".", HASH_VAL(hash)); +        struct addr addr;          assert(hash);          if (!shim_data_dir_has(eth_data.shim_data, hash)) { -                log_err("Destination unreachable."); +                log_err("Destination "HASH_FMT32 "unreachable.", +                        HASH_VAL32(hash));                  return -1;          } +          addr = shim_data_dir_get_addr(eth_data.shim_data, hash); +        memcpy(r_addr, &addr.mac, MAC_SIZE); -        pthread_rwlock_wrlock(ð_data.flows_lock);  #ifdef BUILD_ETH_LLC +        pthread_rwlock_wrlock(ð_data.flows_lock);          ssap = bmp_allocate(eth_data.saps);          if (!bmp_is_id_valid(eth_data.saps, ssap)) {                  pthread_rwlock_unlock(ð_data.flows_lock); +                log_err("Failed to allocate SSAP.");                  return -1;          }          eth_data.fd_to_ef[fd].sap = ssap;          eth_data.ef_to_fd[ssap]   = fd; -#endif          pthread_rwlock_unlock(ð_data.flows_lock); - -        memcpy(r_addr, &addr, MAC_SIZE); +#endif          if (eth_ipcp_alloc(r_addr,  #if defined(BUILD_ETH_DIX) @@ -1682,34 +1730,29 @@ static int eth_ipcp_flow_alloc(int             fd,  #endif                             hash,                             qs, -                           data, -                           len) < 0) { +                           data) < 0) {  #ifdef BUILD_ETH_LLC                  pthread_rwlock_wrlock(ð_data.flows_lock);                  bmp_release(eth_data.saps, eth_data.fd_to_ef[fd].sap);                  eth_data.fd_to_ef[fd].sap = -1;                  eth_data.ef_to_fd[ssap]   = -1;                  pthread_rwlock_unlock(ð_data.flows_lock); +                log_err("Failed to allocate with peer.");  #endif                  return -1;          }          fset_add(eth_data.np1_flows, fd); -#if defined(BUILD_ETH_DIX) -        log_dbg("Pending flow with fd %d.", fd); -#elif defined(BUILD_ETH_LLC) -        log_dbg("Pending flow with fd %d on SAP %d.", fd, ssap); +#if defined(BUILD_ETH_LLC) +        log_dbg("Assigned SAP %d for fd %d.", ssap, fd);  #endif          return 0;  } -static int eth_ipcp_flow_alloc_resp(int          fd, -                                    int          response, -                                    const void * data, -                                    size_t       len) +static int eth_ipcp_flow_alloc_resp(int              fd, +                                    int              response, +                                    const buffer_t * data)  { -        struct timespec ts    = {0, ALLOC_TIMEO * MILLION}; -        struct timespec abstime;  #if defined(BUILD_ETH_DIX)          uint16_t        r_eid;  #elif defined(BUILD_ETH_LLC) @@ -1718,27 +1761,11 @@ static int eth_ipcp_flow_alloc_resp(int          fd,  #endif          uint8_t         r_addr[MAC_SIZE]; -        clock_gettime(PTHREAD_COND_CLOCK, &abstime); - -        pthread_mutex_lock(&ipcpi.alloc_lock); - -        while (ipcpi.alloc_id != fd && ipcp_get_state() == IPCP_OPERATIONAL) { -                ts_add(&abstime, &ts, &abstime); -                pthread_cond_timedwait(&ipcpi.alloc_cond, -                                       &ipcpi.alloc_lock, -                                       &abstime); -        } - -        if (ipcp_get_state() != IPCP_OPERATIONAL) { -                pthread_mutex_unlock(&ipcpi.alloc_lock); +        if (ipcp_wait_flow_resp(fd) < 0) { +                log_err("Failed to wait for flow response.");                  return -1;          } -        ipcpi.alloc_id = -1; -        pthread_cond_broadcast(&ipcpi.alloc_cond); - -        pthread_mutex_unlock(&ipcpi.alloc_lock); -          pthread_rwlock_wrlock(ð_data.flows_lock);  #if defined(BUILD_ETH_DIX)          r_eid = eth_data.fd_to_ef[fd].r_eid; @@ -1746,6 +1773,7 @@ static int eth_ipcp_flow_alloc_resp(int          fd,          ssap = bmp_allocate(eth_data.saps);          if (!bmp_is_id_valid(eth_data.saps, ssap)) {                  pthread_rwlock_unlock(ð_data.flows_lock); +                log_err("Failed to allocate SSAP.");                  return -1;          } @@ -1764,21 +1792,19 @@ static int eth_ipcp_flow_alloc_resp(int          fd,                                  ssap, r_sap,  #endif                                  response, -                                data, -                                len) < 0) { +                                data) < 0) {  #ifdef BUILD_ETH_LLC                  pthread_rwlock_wrlock(ð_data.flows_lock);                  bmp_release(eth_data.saps, eth_data.fd_to_ef[fd].sap);                  pthread_rwlock_unlock(ð_data.flows_lock);  #endif +                log_err("Failed to respond to peer.");                  return -1;          }          fset_add(eth_data.np1_flows, fd); -#if defined(BUILD_ETH_DIX) -        log_dbg("Accepted flow, fd %d.", fd); -#elif defined(BUILD_ETH_LLC) -        log_dbg("Accepted flow, fd %d, SAP %d.", fd, (uint8_t)ssap); +#if defined(BUILD_ETH_LLC) +        log_dbg("Assigned SAP %d for fd %d.", ssap, fd);  #endif          return 0;  } @@ -1807,9 +1833,7 @@ static int eth_ipcp_flow_dealloc(int fd)          pthread_rwlock_unlock(ð_data.flows_lock); -        flow_dealloc(fd); - -        log_dbg("Flow with fd %d deallocated.", fd); +        ipcp_flow_dealloc(fd);          return 0;  } @@ -1833,9 +1857,6 @@ int main(int    argc,  {          int i; -        if (ipcp_init(argc, argv, ð_ops, THIS_TYPE) < 0) -                goto fail_init; -          if (eth_data_init() < 0) {  #if defined(BUILD_ETH_DIX)                  log_err("Failed to init eth-llc data."); @@ -1845,18 +1866,17 @@ int main(int    argc,                  goto fail_data_init;          } -        if (ipcp_boot() < 0) { -                log_err("Failed to boot IPCP."); -                goto fail_boot; +        if (ipcp_init(argc, argv, ð_ops, THIS_TYPE) < 0) { +                log_err("Failed to initialize IPCP."); +                goto fail_init;          } -        if (ipcp_create_r(0)) { -                log_err("Failed to notify IRMd we are initialized."); -                ipcp_set_state(IPCP_NULL); -                goto fail_create_r; +        if (ipcp_start() < 0) { +                log_err("Failed to start IPCP."); +                goto fail_start;          } -        ipcp_shutdown(); +        ipcp_sigwait();          if (ipcp_get_state() == IPCP_SHUTDOWN) {                  for (i = 0; i < IPCP_ETH_WR_THR; ++i) @@ -1879,19 +1899,18 @@ int main(int    argc,  #endif          } -        eth_data_fini(); +        ipcp_stop();          ipcp_fini(); +        eth_data_fini(); +          exit(EXIT_SUCCESS); - fail_create_r: -        ipcp_shutdown(); - fail_boot: -        eth_data_fini(); - fail_data_init: + fail_start:          ipcp_fini();   fail_init: -        ipcp_create_r(-1); +        eth_data_fini(); + fail_data_init:          exit(EXIT_FAILURE);  } diff --git a/src/ipcpd/eth/llc.c b/src/ipcpd/eth/llc.c index d1e8bbdc..c900dcab 100644 --- a/src/ipcpd/eth/llc.c +++ b/src/ipcpd/eth/llc.c @@ -1,5 +1,5 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   *   * IPC processes over Ethernet - LLC   * diff --git a/src/ipcpd/ipcp.c b/src/ipcpd/ipcp.c index e3e4221a..ebb9b1c5 100644 --- a/src/ipcpd/ipcp.c +++ b/src/ipcpd/ipcp.c @@ -1,5 +1,5 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   *   * IPC process main loop   * @@ -35,18 +35,21 @@  #define OUROBOROS_PREFIX  "ipcpd/ipcp"  #define IPCP_INFO         "info" +#define ALLOC_TIMEOUT     50 /* ms */ +#include <ouroboros/bitmap.h> +#include <ouroboros/dev.h> +#include <ouroboros/errno.h>  #include <ouroboros/hash.h> +#include <ouroboros/ipcp-dev.h>  #include <ouroboros/logs.h> -#include <ouroboros/time_utils.h> -#include <ouroboros/utils.h> -#include <ouroboros/sockets.h> -#include <ouroboros/errno.h> -#include <ouroboros/dev.h> -#include <ouroboros/bitmap.h>  #include <ouroboros/np1_flow.h> -#include <ouroboros/rib.h> +#include <ouroboros/protobuf.h>  #include <ouroboros/pthread.h> +#include <ouroboros/rib.h> +#include <ouroboros/sockets.h> +#include <ouroboros/time.h> +#include <ouroboros/utils.h>  #include "ipcp.h" @@ -61,13 +64,73 @@  #endif  #endif -char * info[LAYER_NAME_SIZE + 1] = { -        "_state", -        "_type", -        "_layer", -        NULL +#ifndef CLOCK_REALTIME_COARSE +#define CLOCK_REALTIME_COARSE CLOCK_REALTIME +#endif + +static char * ipcp_type_str[] = { +        "local", +        "unicast", +        "broadcast", +        "eth-llc", +        "eth-dix", +        "udp4", +        "udp6" +}; + +static char * dir_hash_str[] = { +        "SHA3-224", +        "SHA3-256", +        "SHA3-384", +        "SHA3-512", +        "CRC32", +        "MD5" +}; + +static char * ipcp_state_str[] = { +        "null", +        "init", +        "boot", +        "bootstrapped", +        "enrolled", +        "operational", +        "shutdown"  }; +struct { +        pid_t              irmd_pid; +        char *             name; + +        enum ipcp_type     type; +        char               layer_name[LAYER_NAME_SIZE + 1]; + +        uint64_t           dt_addr; + +        enum hash_algo     dir_hash_algo; + +        struct ipcp_ops *  ops; +        int                irmd_fd; + +        enum ipcp_state    state; +        pthread_cond_t     state_cond; +        pthread_mutex_t    state_mtx; + +        int                sockfd; +        char *             sock_path; + +        struct list_head   cmds; +        pthread_cond_t     cmd_cond; +        pthread_mutex_t    cmd_lock; + +        int                alloc_id; +        pthread_cond_t     alloc_cond; +        pthread_mutex_t    alloc_lock; + +        struct tpm *       tpm; + +        pthread_t          acceptor; +} ipcpd; +  struct cmd {          struct list_head next; @@ -76,9 +139,38 @@ struct cmd {          int              fd;  }; +enum ipcp_type ipcp_get_type(void) +{ +        return ipcpd.type; +} + +const char * ipcp_get_name(void) +{ +        return ipcpd.name; +} + +void ipcp_set_dir_hash_algo(enum hash_algo algo) +{ +        ipcpd.dir_hash_algo = algo; +} + +size_t ipcp_dir_hash_len(void) +{ +        return hash_len(ipcpd.dir_hash_algo); +} + +int ipcp_get_layer_name(char * layer) +{ +        if (ipcp_get_state() < IPCP_OPERATIONAL) +                return -EIPCPSTATE; + +        strcpy(layer, ipcpd.layer_name); +        return 0; +} +  uint8_t * ipcp_hash_dup(const uint8_t * hash)  { -        uint8_t * dup = malloc(hash_len(ipcpi.dir_hash_algo)); +        uint8_t * dup = malloc(hash_len(ipcpd.dir_hash_algo));          if (dup == NULL)                  return NULL; @@ -102,6 +194,13 @@ void ipcp_hash_str(char *          buf,          buf[2 * i] = '\0';  } +static const char * info[] = { +        "_state", +        "_type", +        "_layer", +        NULL +}; +  static int ipcp_rib_read(const char * path,                           char *       buf,                           size_t       len) @@ -129,18 +228,20 @@ static int ipcp_rib_read(const char * path,          }          if (strcmp(entry, info[1]) == 0) { /* _type */ -                if (ipcpi.type == IPCP_LOCAL) +                if (ipcpd.type == IPCP_LOCAL)                          strcpy(buf, "local\n"); -                else if (ipcpi.type == IPCP_UNICAST) +                else if (ipcpd.type == IPCP_UNICAST)                          strcpy(buf, "unicast\n"); -                else if (ipcpi.type == IPCP_BROADCAST) +                else if (ipcpd.type == IPCP_BROADCAST)                          strcpy(buf, "broadcast\n"); -                else if (ipcpi.type == IPCP_ETH_LLC) +                else if (ipcpd.type == IPCP_ETH_LLC)                          strcpy(buf, "eth-llc\n"); -                else if (ipcpi.type == IPCP_ETH_DIX) +                else if (ipcpd.type == IPCP_ETH_DIX)                          strcpy(buf, "eth-dix\n"); -                else if (ipcpi.type == IPCP_UDP) -                        strcpy(buf, "udp\n"); +                else if (ipcpd.type == IPCP_UDP4) +                        strcpy(buf, "udp4\n"); +                else if (ipcpd.type == IPCP_UDP6) +                        strcpy(buf, "udp6\n");                  else                          strcpy(buf, "bug\n");          } @@ -150,7 +251,7 @@ static int ipcp_rib_read(const char * path,                  if (ipcp_get_state() < IPCP_OPERATIONAL)                          strcpy(buf, "(null)");                  else -                        strcpy(buf, ipcpi.layer_name); +                        strcpy(buf, ipcpd.layer_name);                  buf[strlen(buf)] = '\n';          } @@ -162,38 +263,40 @@ static int ipcp_rib_readdir(char *** buf)  {          int  i = 0; -        while (info[i] != NULL) -                i++; +        while (info[i++] != NULL);          *buf = malloc(sizeof(**buf) * i);          if (*buf == NULL) -                goto fail; +                goto fail_entries;          i = 0;          while (info[i] != NULL) {                  (*buf)[i] = strdup(info[i]); -                if (*buf == NULL) +                if ((*buf)[i] == NULL)                          goto fail_dup;                  i++;          }          return i;   fail_dup: -        while (--i > 0) +        while (i-- > 0)                  free((*buf)[i]); - fail:          free(*buf); - -        return -1; + fail_entries: +        return -ENOMEM;  }  static int ipcp_rib_getattr(const char *      path,                              struct rib_attr * attr)  { -        (void) path; +        char buf[LAYER_NAME_SIZE + 2]; +        struct timespec now; + +        clock_gettime(CLOCK_REALTIME_COARSE, &now); -        attr->size = LAYER_NAME_SIZE; +        attr->size = ipcp_rib_read(path, buf, LAYER_NAME_SIZE + 2); +        attr->mtime = now.tv_sec;          return 0;  } @@ -206,24 +309,18 @@ static struct rib_ops r_ops = {  static void * acceptloop(void * o)  { -        int            csockfd; -        struct timeval tv = {(SOCKET_TIMEOUT / 1000), -                             (SOCKET_TIMEOUT % 1000) * 1000}; +        int csockfd;          (void) o;          while (ipcp_get_state() != IPCP_SHUTDOWN && -               ipcp_get_state() != IPCP_NULL) { +               ipcp_get_state() != IPCP_INIT) {                  struct cmd * cmd; -                csockfd = accept(ipcpi.sockfd, 0, 0); +                csockfd = accept(ipcpd.sockfd, 0, 0);                  if (csockfd < 0)                          continue; -                if (setsockopt(csockfd, SOL_SOCKET, SO_RCVTIMEO, -                               (void *) &tv, sizeof(tv))) -                        log_warn("Failed to set timeout on socket."); -                  cmd = malloc(sizeof(*cmd));                  if (cmd == NULL) {                          log_err("Out of memory"); @@ -248,51 +345,462 @@ static void * acceptloop(void * o)                  cmd->fd = csockfd; -                pthread_mutex_lock(&ipcpi.cmd_lock); +                pthread_mutex_lock(&ipcpd.cmd_lock); -                list_add(&cmd->next, &ipcpi.cmds); +                list_add(&cmd->next, &ipcpd.cmds); -                pthread_cond_signal(&ipcpi.cmd_cond); +                pthread_cond_signal(&ipcpd.cmd_cond); -                pthread_mutex_unlock(&ipcpi.cmd_lock); +                pthread_mutex_unlock(&ipcpd.cmd_lock);          }          return (void *) 0;  } +int ipcp_wait_flow_req_arr(const uint8_t *  dst, +                           qosspec_t        qs, +                           time_t           mpl, +                           const buffer_t * data) +{ +        struct timespec ts = TIMESPEC_INIT_MS(ALLOC_TIMEOUT); +        struct timespec abstime; +        int             fd; +        buffer_t        hash; + +        hash.data = (uint8_t *) dst; +        hash.len  = ipcp_dir_hash_len(); + +        clock_gettime(PTHREAD_COND_CLOCK, &abstime); + +        pthread_mutex_lock(&ipcpd.alloc_lock); + +        while (ipcpd.alloc_id != -1 && ipcp_get_state() == IPCP_OPERATIONAL) { +                ts_add(&abstime, &ts, &abstime); +                pthread_cond_timedwait(&ipcpd.alloc_cond, +                                       &ipcpd.alloc_lock, +                                       &abstime); +        } + +        if (ipcp_get_state() != IPCP_OPERATIONAL) { +                pthread_mutex_unlock(&ipcpd.alloc_lock); +                log_err("Won't allocate over non-operational IPCP."); +                return -EIPCPSTATE; +        } + +        assert(ipcpd.alloc_id == -1); + +        fd = ipcp_flow_req_arr(&hash, qs, mpl, data); +        if (fd < 0) { +                pthread_mutex_unlock(&ipcpd.alloc_lock); +                log_err("Failed to get fd for flow."); +                return fd; +        } + +        ipcpd.alloc_id = fd; +        pthread_cond_broadcast(&ipcpd.alloc_cond); + +        pthread_mutex_unlock(&ipcpd.alloc_lock); + +        return fd; + +} + +int ipcp_wait_flow_resp(const int fd) +{ +        struct timespec ts = TIMESPEC_INIT_MS(ALLOC_TIMEOUT); +        struct timespec abstime; + +        clock_gettime(PTHREAD_COND_CLOCK, &abstime); + +        pthread_mutex_lock(&ipcpd.alloc_lock); + +        while (ipcpd.alloc_id != fd && ipcp_get_state() == IPCP_OPERATIONAL) { +                ts_add(&abstime, &ts, &abstime); +                pthread_cond_timedwait(&ipcpd.alloc_cond, +                                       &ipcpd.alloc_lock, +                                       &abstime); +        } + +        if (ipcp_get_state() != IPCP_OPERATIONAL) { +                pthread_mutex_unlock(&ipcpd.alloc_lock); +                return -1; +        } + +        assert(ipcpd.alloc_id == fd); + +        ipcpd.alloc_id = -1; +        pthread_cond_broadcast(&ipcpd.alloc_cond); + +        pthread_mutex_unlock(&ipcpd.alloc_lock); + +        return 0; +} +  static void free_msg(void * o)  {          ipcp_msg__free_unpacked((ipcp_msg_t *) o, NULL);  } -static void * mainloop(void * o) + +static void do_bootstrap(ipcp_config_msg_t * conf_msg, +                         ipcp_msg_t *        ret_msg)  { -        int                 sfd; -        buffer_t            buffer;          struct ipcp_config  conf; -        struct layer_info   info; -        ipcp_config_msg_t * conf_msg; -        ipcp_msg_t *        msg; +        struct layer_info * info; + +        log_info("Bootstrapping..."); + +        if (ipcpd.ops->ipcp_bootstrap == NULL) { +                log_err("Failed to Bootstrap: operation unsupported."); +                ret_msg->result = -ENOTSUP; +                return; +        } + +        if (ipcp_get_state() != IPCP_BOOT) { + +                log_err("Failed to bootstrap: IPCP in state <%s>, need <%s>.", +                        ipcp_state_str[ipcp_get_state()], +                        ipcp_state_str[IPCP_BOOT]); +                ret_msg->result = -EIPCPSTATE; +                return; +        } + +        conf = ipcp_config_msg_to_s(conf_msg); +        switch(conf.type) { /* FIXED algorithms */ +        case IPCP_UDP4: +                /* FALLTHRU */ +        case IPCP_UDP6: +                conf.layer_info.dir_hash_algo = (enum pol_dir_hash) HASH_MD5; +                break; +        case IPCP_BROADCAST: +                conf.layer_info.dir_hash_algo = DIR_HASH_SHA3_256; +                break; +        default: +                break; +        } + +        ret_msg->result = ipcpd.ops->ipcp_bootstrap(&conf); +        if (ret_msg->result < 0) { +                log_err("Failed to bootstrap IPCP."); +                return; +        } + +        info = &conf.layer_info; + +        strcpy(ipcpd.layer_name, info->name); +        ipcpd.dir_hash_algo = (enum hash_algo) info->dir_hash_algo; +        ret_msg->layer_info = layer_info_s_to_msg(info); +        ipcp_set_state(IPCP_OPERATIONAL); + +        log_info("Finished bootstrapping in %s.", info->name); +        log_info("  type: %s", ipcp_type_str[ipcpd.type]); +        log_info("  hash: %s [%zd bytes]", +                dir_hash_str[ipcpd.dir_hash_algo], +                ipcp_dir_hash_len()); +} + +static void do_enroll(const char * dst, +                      ipcp_msg_t * ret_msg) +{ +        struct layer_info info; + +        log_info("Enrolling with %s...", dst); + +        if (ipcpd.ops->ipcp_enroll == NULL) { +                log_err("Failed to enroll: operation unsupported."); +                ret_msg->result = -ENOTSUP; +                return; +        } + +        if (ipcp_get_state() != IPCP_BOOT) { +                log_err("Failed to enroll: IPCP in state <%s>, need <%s>.", +                        ipcp_state_str[ipcp_get_state()], +                        ipcp_state_str[IPCP_BOOT]); +                ret_msg->result = -EIPCPSTATE; +                return; +        } + +        ret_msg->result = ipcpd.ops->ipcp_enroll(dst, &info); +        if (ret_msg->result < 0) { +                log_err("Failed to bootstrap IPCP."); +                return; +        } + +        strcpy(ipcpd.layer_name, info.name); +        ipcpd.dir_hash_algo = (enum hash_algo) info.dir_hash_algo; +        ret_msg->layer_info = layer_info_s_to_msg(&info); +        ipcp_set_state(IPCP_OPERATIONAL); + +        log_info("Finished enrolling with %s in layer %s.", dst, info.name); +        log_info("  type: %s", ipcp_type_str[ipcpd.type]); +        log_info("  hash: %s [%zd bytes]", +                dir_hash_str[ipcpd.dir_hash_algo], +                ipcp_dir_hash_len()); +} + +static void do_connect(const char * dst, +                       const char * comp, +                       qosspec_t    qs, +                       ipcp_msg_t * ret_msg) +{ +        log_info("Connecting %s to %s...", comp, dst); + +        if (ipcpd.ops->ipcp_connect == NULL) { +                log_err("Failed to connect: operation unsupported."); +                ret_msg->result = -ENOTSUP; +                return; +        } + +        ret_msg->result = ipcpd.ops->ipcp_connect(dst, comp, qs); + +        log_info("Finished connecting."); +} + +static void do_disconnect(const char * dst, +                          const char * comp, +                          ipcp_msg_t * ret_msg) +{ +        log_info("Disconnecting %s from %s...", comp, dst); + +        if (ipcpd.ops->ipcp_disconnect == NULL) { +                log_err("Failed to disconnect: operation unsupported."); +                ret_msg->result = -ENOTSUP; +                return; +        } + +        ret_msg->result = ipcpd.ops->ipcp_disconnect(dst, comp); + +        log_info("Finished disconnecting %s from %s.", comp, dst); +} + +static void do_reg(const uint8_t * hash, +                   ipcp_msg_t *    ret_msg) +{ + +        log_info("Registering " HASH_FMT32 "...", HASH_VAL32(hash)); + +        if (ipcpd.ops->ipcp_reg == NULL) { +                log_err("Failed to register: operation unsupported."); +                ret_msg->result = -ENOTSUP; +                return; +        } + +        ret_msg->result = ipcpd.ops->ipcp_reg(hash); + +        log_info("Finished registering " HASH_FMT32 ".", HASH_VAL32(hash)); +} + +static void do_unreg(const uint8_t * hash, +                     ipcp_msg_t *    ret_msg) +{ +        log_info("Unregistering " HASH_FMT32 "...", HASH_VAL32(hash)); + +        if (ipcpd.ops->ipcp_unreg == NULL) { +                log_err("Failed to unregister: operation unsupported."); +                ret_msg->result = -ENOTSUP; +                return; +        } + +        ret_msg->result = ipcpd.ops->ipcp_unreg(hash); + +        log_info("Finished unregistering " HASH_FMT32 ".", HASH_VAL32(hash)); +} + +static void do_query(const uint8_t * hash, +                     ipcp_msg_t *    ret_msg) +{ +        /*  TODO: Log this operation when IRMd has internal caches. */ + +        if (ipcpd.ops->ipcp_query == NULL) { +                log_err("Failed to query: operation unsupported."); +                ret_msg->result = -ENOTSUP; +                return; +        } + +        if (ipcp_get_state() != IPCP_OPERATIONAL) { +                log_dbg("Failed to query: IPCP in state <%s>, need <%s>.", +                        ipcp_state_str[ipcp_get_state()], +                        ipcp_state_str[IPCP_OPERATIONAL]); +                ret_msg->result = -EIPCPSTATE; +                return; +        } + +        ret_msg->result = ipcpd.ops->ipcp_query(hash); +} + +static void do_flow_alloc(pid_t            pid, +                          int              flow_id, +                          uint8_t *        dst, +                          qosspec_t        qs, +                          const buffer_t * data, +                          ipcp_msg_t *     ret_msg) +{ +        int fd; + +        log_info("Allocating flow %d for %d to " HASH_FMT32 ".", +                 flow_id, pid, HASH_VAL32(dst)); + +        if (ipcpd.ops->ipcp_flow_alloc == NULL) { +                log_err("Flow allocation failed: operation unsupported."); +                ret_msg->result = -ENOTSUP; +                return; +        } + +        if (ipcp_get_state() != IPCP_OPERATIONAL) { +                log_err("Failed to enroll: IPCP in state <%s>, need <%s>.", +                        ipcp_state_str[ipcp_get_state()], +                        ipcp_state_str[IPCP_OPERATIONAL]); +                ret_msg->result = -EIPCPSTATE; +                return; +        } + +        fd = np1_flow_alloc(pid, flow_id); +        if (fd < 0) { +                log_err("Failed allocating n + 1 fd on flow_id %d: %d", +                        flow_id, fd); +                ret_msg->result = -EFLOWDOWN; +                return; +        } + +        ret_msg->result = ipcpd.ops->ipcp_flow_alloc(fd, dst, qs, data); + +        log_info("Finished allocating flow %d to " HASH_FMT32 ".", +                 flow_id, HASH_VAL32(dst)); +} + + +static void do_flow_join(pid_t           pid, +                         int             flow_id, +                         const uint8_t * dst, +                         ipcp_msg_t *    ret_msg) +{ +        int fd; + +        log_info("Joining layer " HASH_FMT32 ".", HASH_VAL32(dst)); + +        if (ipcpd.ops->ipcp_flow_join == NULL) { +                log_err("Failed to join: operation unsupported."); +                ret_msg->result = -ENOTSUP; +                return; +        } + +        if (ipcp_get_state() != IPCP_OPERATIONAL) { +                log_err("Failed to join: IPCP in state <%s>, need <%s>.", +                        ipcp_state_str[ipcp_get_state()], +                        ipcp_state_str[IPCP_OPERATIONAL]); +                ret_msg->result = -EIPCPSTATE; +                return; +        } + +        fd = np1_flow_alloc(pid, flow_id); +        if (fd < 0) { +                log_err("Failed allocating n + 1 fd on flow_id %d.", flow_id); +                ret_msg->result = -1; +                return; +        } + +        ret_msg->result = ipcpd.ops->ipcp_flow_join(fd, dst); + +        log_info("Finished joining layer " HASH_FMT32 ".", HASH_VAL32(dst)); +} + +static void do_flow_alloc_resp(int              resp, +                               int              flow_id, +                               const buffer_t * data, +                               ipcp_msg_t *     ret_msg) +{ +        int fd = -1; + +        log_info("Responding %d to alloc on flow_id %d.", resp, flow_id); + +        if (ipcpd.ops->ipcp_flow_alloc_resp == NULL) { +                log_err("Failed to respond on flow %d: operation unsupported.", +                        flow_id); +                ret_msg->result = -ENOTSUP; +                return; +        } + +        if (ipcp_get_state() != IPCP_OPERATIONAL) { +                log_err("Failed to respond to flow %d:" +                        "IPCP in state <%s>, need <%s>.", +                        flow_id, +                        ipcp_state_str[ipcp_get_state()], +                        ipcp_state_str[IPCP_OPERATIONAL]); +                ret_msg->result = -EIPCPSTATE; +                return; +        } + +        fd = np1_flow_resp(flow_id, resp); +        if (fd < 0) { +                log_warn("Flow_id %d is not known.", flow_id); +                ret_msg->result = -1; +                return; +        } + +        ret_msg->result = ipcpd.ops->ipcp_flow_alloc_resp(fd, resp, data); + +        log_info("Finished responding %d to allocation request.", +                 ret_msg->result); +} + +static void do_flow_dealloc(int          flow_id, +                            int          timeo_sec, +                            ipcp_msg_t * ret_msg) +{ +        int fd; + +        log_info("Deallocating flow %d.", flow_id); + +        if (ipcpd.ops->ipcp_flow_dealloc == NULL) { +                log_err("Failed to dealloc: operation unsupported."); +                ret_msg->result = -ENOTSUP; +                return; +        } + +        if (ipcp_get_state() != IPCP_OPERATIONAL) { +                log_err("Failed to enroll: IPCP in state <%s>, need <%s>.", +                        ipcp_state_str[ipcp_get_state()], +                        ipcp_state_str[IPCP_OPERATIONAL]); +                ret_msg->result = -EIPCPSTATE; +                return; +        } + +        fd = np1_flow_dealloc(flow_id, timeo_sec); +        if (fd < 0) { +                log_warn("Could not deallocate flow_id %d.", flow_id); +                ret_msg->result = -1; +                return; +        } + +        ret_msg->result = ipcpd.ops->ipcp_flow_dealloc(fd); + +        log_info("Finished deallocating flow %d.", flow_id); +} + +static void * mainloop(void * o) +{ +        int          sfd; +        buffer_t     buffer; +        ipcp_msg_t * msg;          (void) o;          while (true) { -                ipcp_msg_t          ret_msg    = IPCP_MSG__INIT; -                layer_info_msg_t    layer_info = LAYER_INFO_MSG__INIT; -                int                 fd         = -1; -                struct cmd *        cmd; -                qosspec_t           qs; +                ipcp_msg_t   ret_msg = IPCP_MSG__INIT; +                qosspec_t    qs; +                struct cmd * cmd; +                buffer_t     data;                  ret_msg.code = IPCP_MSG_CODE__IPCP_REPLY; -                pthread_mutex_lock(&ipcpi.cmd_lock); +                pthread_mutex_lock(&ipcpd.cmd_lock); -                pthread_cleanup_push(__cleanup_mutex_unlock, &ipcpi.cmd_lock); +                pthread_cleanup_push(__cleanup_mutex_unlock, &ipcpd.cmd_lock); -                while (list_is_empty(&ipcpi.cmds)) -                        pthread_cond_wait(&ipcpi.cmd_cond, &ipcpi.cmd_lock); +                while (list_is_empty(&ipcpd.cmds)) +                        pthread_cond_wait(&ipcpd.cmd_cond, &ipcpd.cmd_lock); -                cmd = list_last_entry(&ipcpi.cmds, struct cmd, next); +                cmd = list_last_entry(&ipcpd.cmds, struct cmd, next);                  list_del(&cmd->next);                  pthread_cleanup_pop(true); @@ -307,334 +815,72 @@ static void * mainloop(void * o)                          continue;                  } -                tpm_dec(ipcpi.tpm); +                tpm_begin_work(ipcpd.tpm);                  pthread_cleanup_push(__cleanup_close_ptr, &sfd);                  pthread_cleanup_push(free_msg, msg); +                ret_msg.has_result = true; +                  switch (msg->code) {                  case IPCP_MSG_CODE__IPCP_BOOTSTRAP: -                        ret_msg.has_result = true; - -                        if (ipcpi.ops->ipcp_bootstrap == NULL) { -                                log_err("Bootstrap unsupported."); -                                ret_msg.result = -ENOTSUP; -                                break; -                        } - -                        if (ipcp_get_state() != IPCP_INIT) { -                                log_err("IPCP in wrong state."); -                                ret_msg.result = -EIPCPSTATE; -                                break; -                        } - -                        conf_msg = msg->conf; -                        conf.type = conf_msg->ipcp_type; -                        strcpy(conf.layer_info.layer_name, -                               conf_msg->layer_info->layer_name); - -                        switch(conf_msg->ipcp_type) { -                        case IPCP_LOCAL: -                                break; -                        case IPCP_UNICAST: -                                conf.addr_size      = conf_msg->addr_size; -                                conf.eid_size       = conf_msg->eid_size; -                                conf.max_ttl        = conf_msg->max_ttl; -                                conf.addr_auth_type = conf_msg->addr_auth_type; -                                conf.routing_type   = conf_msg->routing_type; -                                conf.cong_avoid     = conf_msg->cong_avoid; -                                break; -                        case IPCP_ETH_DIX: -                                conf.ethertype = conf_msg->ethertype; -                                /* FALLTHRU */ -                        case IPCP_ETH_LLC: -                                conf.dev = conf_msg->dev; -                                break; -                        case IPCP_UDP: -                                conf.ip_addr  = conf_msg->ip_addr; -                                conf.dns_addr = conf_msg->dns_addr; -                                conf.port     = conf_msg->port; -                                conf.layer_info.dir_hash_algo = HASH_MD5; -                                layer_info.dir_hash_algo      = HASH_MD5; -                                break; -                        case IPCP_BROADCAST: -                                conf.layer_info.dir_hash_algo = HASH_SHA3_256; -                                layer_info.dir_hash_algo      = HASH_SHA3_256; -                                break; -                        default: -                                log_err("Unknown IPCP type: %d.", -                                        conf_msg->ipcp_type); -                                ret_msg.result = -EIPCP; -                                goto exit; /* break from outer switch/case */ -                        } - -                        /* UDP and broadcast use fixed hash algorithm. */ -                        if (conf_msg->ipcp_type != IPCP_UDP && -                            conf_msg->ipcp_type != IPCP_BROADCAST) { -                                switch(conf_msg->layer_info->dir_hash_algo) { -                                case DIR_HASH_SHA3_224: -                                        conf.layer_info.dir_hash_algo = -                                                HASH_SHA3_224; -                                        break; -                                case DIR_HASH_SHA3_256: -                                        conf.layer_info.dir_hash_algo = -                                                HASH_SHA3_256; -                                        break; -                                case DIR_HASH_SHA3_384: -                                        conf.layer_info.dir_hash_algo = -                                                HASH_SHA3_384; -                                        break; -                                case DIR_HASH_SHA3_512: -                                        conf.layer_info.dir_hash_algo = -                                                HASH_SHA3_512; -                                        break; -                                default: -                                        assert(false); -                                } - -                                layer_info.dir_hash_algo = -                                        conf.layer_info.dir_hash_algo; -                        } - -                        ret_msg.result = ipcpi.ops->ipcp_bootstrap(&conf); -                        if (ret_msg.result == 0) { -                                ret_msg.layer_info = &layer_info; -                                layer_info.layer_name = -                                        conf.layer_info.layer_name; -                        } +                        do_bootstrap(msg->conf, &ret_msg);                          break;                  case IPCP_MSG_CODE__IPCP_ENROLL: -                        ret_msg.has_result = true; - -                        if (ipcpi.ops->ipcp_enroll == NULL) { -                                log_err("Enroll unsupported."); -                                ret_msg.result = -ENOTSUP; -                                break; -                        } - -                        if (ipcp_get_state() != IPCP_INIT) { -                                log_err("IPCP in wrong state."); -                                ret_msg.result = -EIPCPSTATE; -                                break; -                        } - -                        ret_msg.result = ipcpi.ops->ipcp_enroll(msg->dst, -                                                                &info); -                        if (ret_msg.result == 0) { -                                ret_msg.layer_info       = &layer_info; -                                layer_info.dir_hash_algo = info.dir_hash_algo; -                                layer_info.layer_name    = info.layer_name; -                        } +                        do_enroll(msg->dst, &ret_msg);                          break;                  case IPCP_MSG_CODE__IPCP_CONNECT: -                        ret_msg.has_result = true; - -                        if (ipcpi.ops->ipcp_connect == NULL) { -                                log_err("Connect unsupported."); -                                ret_msg.result = -ENOTSUP; -                                break; -                        } - -                        qs = msg_to_spec(msg->qosspec); -                        ret_msg.result = ipcpi.ops->ipcp_connect(msg->dst, -                                                                 msg->comp, -                                                                 qs); +                        qs = qos_spec_msg_to_s(msg->qosspec); +                        do_connect(msg->dst, msg->comp, qs, &ret_msg);                          break;                  case IPCP_MSG_CODE__IPCP_DISCONNECT: -                        ret_msg.has_result = true; - -                        if (ipcpi.ops->ipcp_disconnect == NULL) { -                                log_err("Disconnect unsupported."); -                                ret_msg.result = -ENOTSUP; -                                break; -                        } - -                        ret_msg.result = ipcpi.ops->ipcp_disconnect(msg->dst, -                                                                    msg->comp); +                        do_disconnect(msg->dst, msg->comp, &ret_msg);                          break;                  case IPCP_MSG_CODE__IPCP_REG: -                        ret_msg.has_result = true; - -                        if (ipcpi.ops->ipcp_reg == NULL) { -                                log_err("Registration unsupported."); -                                ret_msg.result = -ENOTSUP; -                                break; -                        } -                          assert(msg->hash.len == ipcp_dir_hash_len()); - -                        ret_msg.result = -                                ipcpi.ops->ipcp_reg(msg->hash.data); +                        do_reg(msg->hash.data, &ret_msg);                          break;                  case IPCP_MSG_CODE__IPCP_UNREG: -                        ret_msg.has_result = true; - -                        if (ipcpi.ops->ipcp_unreg == NULL) { -                                log_err("Unregistration unsupported."); -                                ret_msg.result = -ENOTSUP; -                                break; -                        } -                          assert(msg->hash.len == ipcp_dir_hash_len()); - -                        ret_msg.result = -                                ipcpi.ops->ipcp_unreg(msg->hash.data); +                        do_unreg(msg->hash.data, &ret_msg);                          break;                  case IPCP_MSG_CODE__IPCP_QUERY: -                        ret_msg.has_result = true; - -                        if (ipcpi.ops->ipcp_query == NULL) { -                                log_err("Directory query unsupported."); -                                ret_msg.result = -ENOTSUP; -                                break; -                        } -                          assert(msg->hash.len == ipcp_dir_hash_len()); - -                        if (ipcp_get_state() != IPCP_OPERATIONAL) { -                                log_err("IPCP in wrong state."); -                                ret_msg.result = -EIPCPSTATE; -                                break; -                        } - -                        ret_msg.result = -                                ipcpi.ops->ipcp_query(msg->hash.data); +                        do_query(msg->hash.data, &ret_msg);                          break;                  case IPCP_MSG_CODE__IPCP_FLOW_ALLOC: -                        ret_msg.has_result = true; - -                        if (ipcpi.ops->ipcp_flow_alloc == NULL) { -                                log_err("Flow allocation unsupported."); -                                ret_msg.result = -ENOTSUP; -                                break; -                        } -                          assert(msg->hash.len == ipcp_dir_hash_len());                          assert(msg->pk.len > 0 ? msg->pk.data != NULL                                                 : msg->pk.data == NULL); - -                        if (ipcp_get_state() != IPCP_OPERATIONAL) { -                                log_err("IPCP in wrong state."); -                                ret_msg.result = -EIPCPSTATE; -                                break; -                        } - -                        qs = msg_to_spec(msg->qosspec); -                        fd = np1_flow_alloc(msg->pid, -                                            msg->flow_id, -                                            qs); -                        if (fd < 0) { -                                log_err("Failed allocating fd on flow_id %d.", -                                        msg->flow_id); -                                ret_msg.result = -1; -                                break; -                        } - -                        ret_msg.result = -                                ipcpi.ops->ipcp_flow_alloc(fd, -                                                           msg->hash.data, -                                                           qs, -                                                           msg->pk.data, -                                                           msg->pk.len); +                        data.len = msg->pk.len; +                        data.data = msg->pk.data; +                        qs = qos_spec_msg_to_s(msg->qosspec); +                        do_flow_alloc(msg->pid, msg->flow_id, +                                      msg->hash.data, qs, +                                      &data, &ret_msg);                          break;                  case IPCP_MSG_CODE__IPCP_FLOW_JOIN: -                        ret_msg.has_result = true; - -                        if (ipcpi.ops->ipcp_flow_join == NULL) { -                                log_err("Broadcast unsupported."); -                                ret_msg.result = -ENOTSUP; -                                break; -                        } -                          assert(msg->hash.len == ipcp_dir_hash_len()); - -                        if (ipcp_get_state() != IPCP_OPERATIONAL) { -                                log_err("IPCP in wrong state."); -                                ret_msg.result = -EIPCPSTATE; -                                break; -                        } - -                        qs = msg_to_spec(msg->qosspec); -                        fd = np1_flow_alloc(msg->pid, -                                            msg->flow_id, -                                            qs); -                        if (fd < 0) { -                                log_err("Failed allocating fd on flow_id %d.", -                                        msg->flow_id); -                                ret_msg.result = -1; -                                break; -                        } - -                        ret_msg.result = -                                ipcpi.ops->ipcp_flow_join(fd, -                                                          msg->hash.data, -                                                          qs); +                        do_flow_join(msg->pid, msg->flow_id, +                                     msg->hash.data, &ret_msg);                          break;                  case IPCP_MSG_CODE__IPCP_FLOW_ALLOC_RESP: -                        ret_msg.has_result = true; -                        if (ipcpi.ops->ipcp_flow_alloc_resp == NULL) { -                                log_err("Flow_alloc_resp unsupported."); -                                ret_msg.result = -ENOTSUP; -                                break; -                        } - -                        if (ipcp_get_state() != IPCP_OPERATIONAL) { -                                log_err("IPCP in wrong state."); -                                ret_msg.result = -EIPCPSTATE; -                                break; -                        } - -                        if (!msg->response) { -                                fd = np1_flow_resp(msg->flow_id); -                                if (fd < 0) { -                                        log_warn("Port_id %d is not known.", -                                                 msg->flow_id); -                                        ret_msg.result = -1; -                                        break; -                                } -                        } -                          assert(msg->pk.len > 0 ? msg->pk.data != NULL -                               : msg->pk.data == NULL); - -                        ret_msg.result = -                                ipcpi.ops->ipcp_flow_alloc_resp(fd, -                                                                msg->response, -                                                                msg->pk.data, -                                                                msg->pk.len); +                                               : msg->pk.data == NULL); +                        data.len = msg->pk.len; +                        data.data = msg->pk.data; +                        do_flow_alloc_resp(msg->response, msg->flow_id, +                                           &data, &ret_msg);                          break;                  case IPCP_MSG_CODE__IPCP_FLOW_DEALLOC: -                        ret_msg.has_result = true; -                        if (ipcpi.ops->ipcp_flow_dealloc == NULL) { -                                log_err("Flow deallocation unsupported."); -                                ret_msg.result = -ENOTSUP; -                                break; -                        } - -                        if (ipcp_get_state() != IPCP_OPERATIONAL) { -                                log_err("IPCP in wrong state."); -                                ret_msg.result = -EIPCPSTATE; -                                break; -                        } - -                        fd = np1_flow_dealloc(msg->flow_id); -                        if (fd < 0) { -                                log_warn("Could not deallocate flow_id %d.", -                                        msg->flow_id); -                                ret_msg.result = -1; -                                break; -                        } - -                        ret_msg.result = -                                ipcpi.ops->ipcp_flow_dealloc(fd); +                        do_flow_dealloc(msg->flow_id, msg->timeo_sec, &ret_msg);                          break;                  default: -                        ret_msg.has_result = true; -                        ret_msg.result     = -1; -                        log_err("Don't know that message code"); +                        ret_msg.result = -1; +                        log_err("Unknown message code: %d.", msg->code);                          break;                  } -        exit: +                  pthread_cleanup_pop(true);                  pthread_cleanup_pop(false); @@ -642,7 +888,7 @@ static void * mainloop(void * o)                  if (buffer.len == 0) {                          log_err("Failed to pack reply message");                          close(sfd); -                        tpm_inc(ipcpi.tpm); +                        tpm_end_work(ipcpd.tpm);                          continue;                  } @@ -650,21 +896,25 @@ static void * mainloop(void * o)                  if (buffer.data == NULL) {                          log_err("Failed to create reply buffer.");                          close(sfd); -                        tpm_inc(ipcpi.tpm); +                        tpm_end_work(ipcpd.tpm);                          continue;                  }                  ipcp_msg__pack(&ret_msg, buffer.data); +                if (ret_msg.layer_info != NULL) +                        layer_info_msg__free_unpacked(ret_msg.layer_info, NULL); + +                pthread_cleanup_push(free, buffer.data)                  pthread_cleanup_push(__cleanup_close_ptr, &sfd);                  if (write(sfd, buffer.data, buffer.len) == -1)                          log_warn("Failed to send reply message"); -                free(buffer.data); -                pthread_cleanup_pop(true); +                pthread_cleanup_pop(true); /* close sfd */ +                pthread_cleanup_pop(true); /* free buffer.data */ -                tpm_inc(ipcpi.tpm); +                tpm_end_work(ipcpd.tpm);          }          return (void *) 0; @@ -683,10 +933,10 @@ static int parse_args(int    argc,          if (atoi(argv[1]) == 0)                  return -1; -        ipcpi.irmd_pid = atoi(argv[1]); +        ipcpd.irmd_pid = atoi(argv[1]);          /* argument 2: IPCP name */ -        ipcpi.name = argv[2]; +        ipcpd.name = argv[2];          /* argument 3: syslog */          if (argv[3] != NULL) @@ -702,149 +952,174 @@ int ipcp_init(int               argc,  {          bool               log;          pthread_condattr_t cattr; -        int                ret = -1;          if (parse_args(argc, argv, &log))                  return -1;          log_init(log); -        ipcpi.irmd_fd   = -1; -        ipcpi.state     = IPCP_NULL; -        ipcpi.type      = type; +        ipcpd.type  = type;  #if defined (__linux__)          prctl(PR_SET_TIMERSLACK, IPCP_LINUX_SLACK_NS, 0, 0, 0);  #endif -        ipcpi.sock_path = ipcp_sock_path(getpid()); -        if (ipcpi.sock_path == NULL) +        ipcpd.sock_path = sock_path(getpid(), IPCP_SOCK_PATH_PREFIX); +        if (ipcpd.sock_path == NULL)                  goto fail_sock_path; -        ipcpi.sockfd = server_socket_open(ipcpi.sock_path); -        if (ipcpi.sockfd < 0) { -                log_err("Could not open server socket."); +        ipcpd.sockfd = server_socket_open(ipcpd.sock_path); +        if (ipcpd.sockfd < 0) { +                log_err("Failed to open server socket at %s.", +                        ipcpd.sock_path);                  goto fail_serv_sock;          } -        ipcpi.ops = ops; +        ipcpd.ops = ops; -        if (pthread_mutex_init(&ipcpi.state_mtx, NULL)) { -                log_err("Could not create mutex."); +        if (pthread_mutex_init(&ipcpd.state_mtx, NULL)) { +                log_err("Failed to create mutex.");                  goto fail_state_mtx;          }          if (pthread_condattr_init(&cattr)) { -                log_err("Could not create condattr."); +                log_err("Failed to create condattr.");                  goto fail_cond_attr;          }  #ifndef __APPLE__          pthread_condattr_setclock(&cattr, PTHREAD_COND_CLOCK);  #endif -        if (pthread_cond_init(&ipcpi.state_cond, &cattr)) { -                log_err("Could not init condvar."); +        if (pthread_cond_init(&ipcpd.state_cond, &cattr)) { +                log_err("Failed to init condvar.");                  goto fail_state_cond;          } -        if (pthread_mutex_init(&ipcpi.alloc_lock, NULL)) { +        if (pthread_mutex_init(&ipcpd.alloc_lock, NULL)) {                  log_err("Failed to init mutex.");                  goto fail_alloc_lock;          } -        if (pthread_cond_init(&ipcpi.alloc_cond, &cattr)) { +        if (pthread_cond_init(&ipcpd.alloc_cond, &cattr)) {                  log_err("Failed to init convar.");                  goto fail_alloc_cond;          } -        if (pthread_mutex_init(&ipcpi.cmd_lock, NULL)) { +        if (pthread_mutex_init(&ipcpd.cmd_lock, NULL)) {                  log_err("Failed to init mutex.");                  goto fail_cmd_lock;          } -        if (pthread_cond_init(&ipcpi.cmd_cond, &cattr)) { +        if (pthread_cond_init(&ipcpd.cmd_cond, &cattr)) {                  log_err("Failed to init convar.");                  goto fail_cmd_cond;          } -        if (rib_init(ipcpi.name)) { +        if (rib_init(ipcpd.name)) {                  log_err("Failed to initialize RIB.");                  goto fail_rib_init;          } -        list_head_init(&ipcpi.cmds); +        if (rib_reg(IPCP_INFO, &r_ops)) { +                log_err("Failed to register rib."); +                goto fail_rib_reg; +        } -        ipcpi.alloc_id = -1; +        list_head_init(&ipcpd.cmds); + +        ipcpd.tpm = tpm_create(IPCP_MIN_THREADS, IPCP_ADD_THREADS, +                               mainloop, NULL); +        if (ipcpd.tpm == NULL) { +                log_err("Failed to create threadpool manager."); +                goto fail_tpm_create; +        } + +        ipcpd.alloc_id = -1;          pthread_condattr_destroy(&cattr); +        ipcp_set_state(IPCP_INIT); + +        log_info("IPCP %s %d initialized.", ipcp_type_str[ipcpd.type], +                 getpid()); +          return 0; + fail_tpm_create: +        rib_unreg(IPCP_INFO); + fail_rib_reg: +        rib_fini();   fail_rib_init: -        pthread_cond_destroy(&ipcpi.cmd_cond); +        pthread_cond_destroy(&ipcpd.cmd_cond);   fail_cmd_cond: -        pthread_mutex_destroy(&ipcpi.cmd_lock); +        pthread_mutex_destroy(&ipcpd.cmd_lock);   fail_cmd_lock: -        pthread_cond_destroy(&ipcpi.alloc_cond); +        pthread_cond_destroy(&ipcpd.alloc_cond);   fail_alloc_cond: -        pthread_mutex_destroy(&ipcpi.alloc_lock); +        pthread_mutex_destroy(&ipcpd.alloc_lock);   fail_alloc_lock: -        pthread_cond_destroy(&ipcpi.state_cond); +        pthread_cond_destroy(&ipcpd.state_cond);   fail_state_cond:          pthread_condattr_destroy(&cattr);   fail_cond_attr: -        pthread_mutex_destroy(&ipcpi.state_mtx); +        pthread_mutex_destroy(&ipcpd.state_mtx);   fail_state_mtx: -        close(ipcpi.sockfd); +        close(ipcpd.sockfd);   fail_serv_sock: -        free(ipcpi.sock_path); +        free(ipcpd.sock_path);   fail_sock_path: -        return ret; +        return -1;  } -int ipcp_boot() +int ipcp_start(void)  { -        sigset_t  sigset; +        sigset_t         sigset; +        struct ipcp_info info; +          sigemptyset(&sigset);          sigaddset(&sigset, SIGINT);          sigaddset(&sigset, SIGQUIT);          sigaddset(&sigset, SIGHUP);          sigaddset(&sigset, SIGPIPE); -        ipcpi.tpm = tpm_create(IPCP_MIN_THREADS, IPCP_ADD_THREADS, -                               mainloop, NULL); -        if (ipcpi.tpm == NULL) -                goto fail_tpm_create; -          pthread_sigmask(SIG_BLOCK, &sigset, NULL); -        if (tpm_start(ipcpi.tpm)) -                goto fail_tpm_start; +        info.pid  = getpid(); +        info.type = ipcpd.type; +        strcpy(info.name, ipcpd.name); +        info.state = IPCP_BOOT; -        ipcp_set_state(IPCP_INIT); +        ipcp_set_state(IPCP_BOOT); -        if (rib_reg(IPCP_INFO, &r_ops)) -                goto fail_rib_reg; +        if (tpm_start(ipcpd.tpm)) { +                log_err("Failed to start threadpool manager."); +                goto fail_tpm_start; +        } -        if (pthread_create(&ipcpi.acceptor, NULL, acceptloop, NULL)) { +        if (pthread_create(&ipcpd.acceptor, NULL, acceptloop, NULL)) {                  log_err("Failed to create acceptor thread."); -                ipcp_set_state(IPCP_NULL);                  goto fail_acceptor;          } -        return 0; +        if (ipcp_create_r(&info)) { +                log_err("Failed to notify IRMd we are initialized."); +                goto fail_create_r; +        } +        return 0; + fail_create_r: +        pthread_cancel(ipcpd.acceptor); +        pthread_join(ipcpd.acceptor, NULL);   fail_acceptor: -        rib_unreg(IPCP_INFO); - fail_rib_reg: -        tpm_stop(ipcpi.tpm); +        tpm_stop(ipcpd.tpm);   fail_tpm_start: -        tpm_destroy(ipcpi.tpm); - fail_tpm_create: +        tpm_destroy(ipcpd.tpm); +        ipcp_set_state(IPCP_INIT); +        ipcp_create_r(&info);          return -1;  } -void ipcp_shutdown() +void ipcp_sigwait(void)  {          siginfo_t info; @@ -859,7 +1134,7 @@ void ipcp_shutdown()          sigaddset(&sigset, SIGTERM);          sigaddset(&sigset, SIGPIPE); -        while(ipcp_get_state() != IPCP_NULL && +        while(ipcp_get_state() != IPCP_INIT &&                ipcp_get_state() != IPCP_SHUTDOWN) {  #ifdef __APPLE__                  if (sigwait(&sigset, &sig) < 0) { @@ -873,16 +1148,19 @@ void ipcp_shutdown()  #ifdef __APPLE__                  memset(&info, 0, sizeof(info));                  info.si_signo = sig; -                info.si_pid   = ipcpi.irmd_pid; +                info.si_pid   = ipcpd.irmd_pid;  #endif                  switch(info.si_signo) {                  case SIGINT: +                        /* FALLTHRU */                  case SIGTERM: +                        /* FALLTHRU */                  case SIGHUP: +                        /* FALLTHRU */                  case SIGQUIT: -                        if (info.si_pid == ipcpi.irmd_pid) { -                                if (ipcp_get_state() == IPCP_INIT) -                                        ipcp_set_state(IPCP_NULL); +                        if (info.si_pid == ipcpd.irmd_pid) { +                                if (ipcp_get_state() == IPCP_BOOT) +                                        ipcp_set_state(IPCP_INIT);                                  if (ipcp_get_state() == IPCP_OPERATIONAL)                                          ipcp_set_state(IPCP_SHUTDOWN); @@ -890,97 +1168,77 @@ void ipcp_shutdown()                          break;                  case SIGPIPE:                          log_dbg("Ignored SIGPIPE."); +                        continue;                  default:                          continue;                  }          } +} + +void ipcp_stop(void) +{ +        log_info("IPCP %d shutting down.", getpid()); -        pthread_cancel(ipcpi.acceptor); +        pthread_cancel(ipcpd.acceptor); +        pthread_join(ipcpd.acceptor, NULL); -        pthread_join(ipcpi.acceptor, NULL); -        tpm_stop(ipcpi.tpm); -        tpm_destroy(ipcpi.tpm); +        tpm_stop(ipcpd.tpm); -        log_info("IPCP %d shutting down.", getpid()); +        ipcp_set_state(IPCP_INIT);  } -void ipcp_fini() +void ipcp_fini(void)  { +        tpm_destroy(ipcpd.tpm); + +        rib_unreg(IPCP_INFO); +          rib_fini(); -        close(ipcpi.sockfd); -        if (unlink(ipcpi.sock_path)) -                log_warn("Could not unlink %s.", ipcpi.sock_path); +        close(ipcpd.sockfd); +        if (unlink(ipcpd.sock_path)) +                log_warn("Could not unlink %s.", ipcpd.sock_path); -        free(ipcpi.sock_path); +        free(ipcpd.sock_path); -        pthread_cond_destroy(&ipcpi.state_cond); -        pthread_mutex_destroy(&ipcpi.state_mtx); -        pthread_cond_destroy(&ipcpi.alloc_cond); -        pthread_mutex_destroy(&ipcpi.alloc_lock); -        pthread_cond_destroy(&ipcpi.cmd_cond); -        pthread_mutex_destroy(&ipcpi.cmd_lock); +        pthread_cond_destroy(&ipcpd.state_cond); +        pthread_mutex_destroy(&ipcpd.state_mtx); +        pthread_cond_destroy(&ipcpd.alloc_cond); +        pthread_mutex_destroy(&ipcpd.alloc_lock); +        pthread_cond_destroy(&ipcpd.cmd_cond); +        pthread_mutex_destroy(&ipcpd.cmd_lock);          log_info("IPCP %d out.", getpid());          log_fini(); + +        ipcpd.state = IPCP_NULL;  }  void ipcp_set_state(enum ipcp_state state)  { -        pthread_mutex_lock(&ipcpi.state_mtx); +        pthread_mutex_lock(&ipcpd.state_mtx); -        ipcpi.state = state; +        ipcpd.state = state; -        pthread_cond_broadcast(&ipcpi.state_cond); -        pthread_mutex_unlock(&ipcpi.state_mtx); +        pthread_cond_broadcast(&ipcpd.state_cond); +        pthread_mutex_unlock(&ipcpd.state_mtx);  } -enum ipcp_state ipcp_get_state() +enum ipcp_state ipcp_get_state(void)  {          enum ipcp_state state; -        pthread_mutex_lock(&ipcpi.state_mtx); +        pthread_mutex_lock(&ipcpd.state_mtx); -        state = ipcpi.state; +        state = ipcpd.state; -        pthread_mutex_unlock(&ipcpi.state_mtx); +        pthread_mutex_unlock(&ipcpd.state_mtx);          return state;  } -int ipcp_wait_state(enum ipcp_state         state, -                    const struct timespec * timeout) -{ -        struct timespec abstime; -        int ret = 0; - -        clock_gettime(PTHREAD_COND_CLOCK, &abstime); -        ts_add(&abstime, timeout, &abstime); - -        pthread_mutex_lock(&ipcpi.state_mtx); - -        pthread_cleanup_push(__cleanup_mutex_unlock, &ipcpi.state_mtx); - -        while (ipcpi.state != state -               && ipcpi.state != IPCP_SHUTDOWN -               && ipcpi.state != IPCP_NULL -               && ret != -ETIMEDOUT) { -                if (timeout == NULL) -                        ret = -pthread_cond_wait(&ipcpi.state_cond, -                                                 &ipcpi.state_mtx); -                else -                        ret = -pthread_cond_timedwait(&ipcpi.state_cond, -                                                      &ipcpi.state_mtx, -                                                      &abstime); -        } - -        pthread_cleanup_pop(true); - -        return ret; -} -  void ipcp_lock_to_core(void)  {  #if defined(__linux__) && !defined(DISABLE_CORE_LOCK) diff --git a/src/ipcpd/ipcp.h b/src/ipcpd/ipcp.h index eff2ae12..e8c31a32 100644 --- a/src/ipcpd/ipcp.h +++ b/src/ipcpd/ipcp.h @@ -1,5 +1,5 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   *   * IPC process structure   * @@ -26,22 +26,18 @@  #include <ouroboros/hash.h>  #include <ouroboros/ipcp.h>  #include <ouroboros/list.h> +#include <ouroboros/protobuf.h> +#include <ouroboros/qos.h>  #include <ouroboros/sockets.h>  #include <ouroboros/tpm.h>  #include <pthread.h>  #include <time.h> -enum ipcp_state { -        IPCP_NULL = 0, -        IPCP_INIT, -        /* Layer name must be set for states below. */ -        IPCP_OPERATIONAL, -        IPCP_SHUTDOWN -}; +#define ipcp_dir_hash_strlen() (ipcp_dir_hash_len() * 2)  struct ipcp_ops { -        int   (* ipcp_bootstrap)(const struct ipcp_config * conf); +        int   (* ipcp_bootstrap)(struct ipcp_config * conf);          int   (* ipcp_enroll)(const char *        dst,                                struct layer_info * info); @@ -59,84 +55,59 @@ struct ipcp_ops {          int   (* ipcp_query)(const uint8_t * hash); -        int   (* ipcp_flow_alloc)(int             fd, -                                  const uint8_t * dst, -                                  qosspec_t       qs, -                                  const void *    data, -                                  size_t          len); +        int   (* ipcp_flow_alloc)(int              fd, +                                  const uint8_t *  dst, +                                  qosspec_t        qs, +                                  const buffer_t * data);          int   (* ipcp_flow_join)(int             fd, -                                 const uint8_t * dst, -                                 qosspec_t       qs); +                                 const uint8_t * dst); -        int   (* ipcp_flow_alloc_resp)(int          fd, -                                       int          response, -                                       const void * data, -                                       size_t       len); +        int   (* ipcp_flow_alloc_resp)(int              fd, +                                       int              response, +                                       const buffer_t * data);          int   (* ipcp_flow_dealloc)(int fd);  }; -#define ipcp_dir_hash_strlen() (hash_len(ipcpi.dir_hash_algo) * 2) -#define ipcp_dir_hash_len() (hash_len(ipcpi.dir_hash_algo)) - -extern struct ipcp { -        pid_t              irmd_pid; -        char *             name; - -        enum ipcp_type     type; -        char *             layer_name; - -        uint64_t           dt_addr; - -        enum hash_algo     dir_hash_algo; - -        struct ipcp_ops *  ops; -        int                irmd_fd; - -        enum ipcp_state    state; -        pthread_rwlock_t   state_lock; -        pthread_mutex_t    state_mtx; -        pthread_cond_t     state_cond; - -        int                sockfd; -        char *             sock_path; - -        struct list_head   cmds; -        pthread_cond_t     cmd_cond; -        pthread_mutex_t    cmd_lock; - -        int                alloc_id; -        pthread_cond_t     alloc_cond; -        pthread_mutex_t    alloc_lock; - -        struct tpm *       tpm; - -        pthread_t          acceptor; -} ipcpi; -  int             ipcp_init(int               argc,                            char **           argv,                            struct ipcp_ops * ops,                            enum ipcp_type    type); -int             ipcp_boot(void); +int             ipcp_start(void); -void            ipcp_shutdown(void); +void            ipcp_sigwait(void); + +void            ipcp_stop(void);  void            ipcp_fini(void); +enum ipcp_type  ipcp_get_type(void); + +const char *    ipcp_get_name(void); + +/* TODO: Only specify hash algorithm in directory policy */ +void            ipcp_set_dir_hash_algo(enum hash_algo algo); +  void            ipcp_set_state(enum ipcp_state state);  enum ipcp_state ipcp_get_state(void); -int             ipcp_wait_state(enum ipcp_state         state, -                                const struct timespec * timeout); +/* Helper functions to handle races during flow allocation */ +int             ipcp_wait_flow_req_arr(const uint8_t *  dst, +                                       qosspec_t        qs, +                                       time_t           mpl, +                                       const buffer_t * data); + +int             ipcp_wait_flow_resp(const int fd); -int             ipcp_parse_arg(int    argc, -                               char * argv[]);  /* Helper functions for directory entries, could be moved */ +size_t          ipcp_dir_hash_len(void); + +int             ipcp_get_layer_name(char * layer); +  uint8_t *       ipcp_hash_dup(const uint8_t * hash);  void            ipcp_hash_str(char            buf[], diff --git a/src/ipcpd/local/CMakeLists.txt b/src/ipcpd/local/CMakeLists.txt index a84f4f1b..08abff57 100644 --- a/src/ipcpd/local/CMakeLists.txt +++ b/src/ipcpd/local/CMakeLists.txt @@ -13,6 +13,8 @@ include_directories(${CMAKE_SOURCE_DIR}/include)  include_directories(${CMAKE_BINARY_DIR}/include)  set(IPCP_LOCAL_TARGET ipcpd-local CACHE INTERNAL "") +set(IPCP_LOCAL_MPL 100 CACHE STRING +    "Default maximum packet lifetime for the Ethernet IPCPs, in ms")  set(LOCAL_SOURCES    # Add source files here diff --git a/src/ipcpd/local/main.c b/src/ipcpd/local/main.c index 9c62c3cc..ffa6dc5a 100644 --- a/src/ipcpd/local/main.c +++ b/src/ipcpd/local/main.c @@ -1,5 +1,5 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   *   * Local IPC process   * @@ -48,10 +48,7 @@  #include <sys/wait.h>  #include <assert.h> -#define THIS_TYPE     IPCP_LOCAL -#define ALLOC_TIMEOUT 10 /* ms */ - -struct ipcp ipcpi; +#define THIS_TYPE IPCP_LOCAL  struct {          struct shim_data * shim_data; @@ -72,34 +69,39 @@ static int local_data_init(void)          local_data.flows = fset_create();          if (local_data.flows == NULL) -                return -ENFILE; +                goto fail_fset;          local_data.fq = fqueue_create(); -        if (local_data.fq == NULL) { -                fset_destroy(local_data.flows); -                return -ENOMEM; -        } +        if (local_data.fq == NULL) +                goto fail_fqueue;          local_data.shim_data = shim_data_create(); -        if (local_data.shim_data == NULL) { -                fqueue_destroy(local_data.fq); -                fset_destroy(local_data.flows); -                return -ENOMEM; -        } +        if (local_data.shim_data == NULL) +                goto fail_shim_data; -        pthread_rwlock_init(&local_data.lock, NULL); +        if (pthread_rwlock_init(&local_data.lock, NULL) < 0) +                goto fail_rwlock_init;          return 0; + + fail_rwlock_init: +        shim_data_destroy(local_data.shim_data); + fail_shim_data: +        fqueue_destroy(local_data.fq); + fail_fqueue: +        fset_destroy(local_data.flows); + fail_fset: +        return -ENOMEM;  }  static void local_data_fini(void){ +        pthread_rwlock_destroy(&local_data.lock);          shim_data_destroy(local_data.shim_data); -        fset_destroy(local_data.flows);          fqueue_destroy(local_data.fq); -        pthread_rwlock_destroy(&local_data.lock); +        fset_destroy(local_data.flows);  } -static void * ipcp_local_packet_loop(void * o) +static void * local_ipcp_packet_loop(void * o)  {          (void) o; @@ -135,54 +137,45 @@ static void * ipcp_local_packet_loop(void * o)          return (void *) 0;  } -static int ipcp_local_bootstrap(const struct ipcp_config * conf) +static int local_ipcp_bootstrap(struct ipcp_config * conf)  { +          assert(conf);          assert(conf->type == THIS_TYPE); -        ipcpi.dir_hash_algo = conf->layer_info.dir_hash_algo; -        ipcpi.layer_name = strdup(conf->layer_info.layer_name); -        if (ipcpi.layer_name == NULL) { -                log_err("Failed to set layer name"); -                return -ENOMEM; -        } - -        ipcp_set_state(IPCP_OPERATIONAL); +        (void) conf;          if (pthread_create(&local_data.packet_loop, NULL, -                           ipcp_local_packet_loop, NULL)) { +                           local_ipcp_packet_loop, NULL)) { +                log_err("Failed to create pthread: %s", strerror(errno));                  ipcp_set_state(IPCP_INIT);                  return -1;          } -        log_info("Bootstrapped local IPCP with pid %d.", getpid()); -          return 0;  } -static int ipcp_local_reg(const uint8_t * hash) +static int local_ipcp_reg(const uint8_t * hash)  {          if (shim_data_reg_add_entry(local_data.shim_data, hash)) { -                log_dbg("Failed to add " HASH_FMT " to local registry.", -                        HASH_VAL(hash)); +                log_err("Failed to add " HASH_FMT32 " to local registry.", +                        HASH_VAL32(hash));                  return -1;          } -        log_info("Registered " HASH_FMT ".", HASH_VAL(hash)); -          return 0;  } -static int ipcp_local_unreg(const uint8_t * hash) +static int local_ipcp_unreg(const uint8_t * hash)  {          shim_data_reg_del_entry(local_data.shim_data, hash); -        log_info("Unregistered " HASH_FMT ".",  HASH_VAL(hash)); +        log_info("Unregistered " HASH_FMT32 ".",  HASH_VAL32(hash));          return 0;  } -static int ipcp_local_query(const uint8_t * hash) +static int local_ipcp_query(const uint8_t * hash)  {          int ret; @@ -191,41 +184,19 @@ static int ipcp_local_query(const uint8_t * hash)          return ret;  } -static int ipcp_local_flow_alloc(int             fd, -                                 const uint8_t * dst, -                                 qosspec_t       qs, -                                 const void *    data, -                                 size_t          len) +static int local_ipcp_flow_alloc(int              fd, +                                 const uint8_t *  dst, +                                 qosspec_t        qs, +                                 const buffer_t * data)  { -        struct timespec ts     = {0, ALLOC_TIMEOUT * MILLION}; -        struct timespec abstime; -        int             out_fd = -1; +        int out_fd = -1; -        log_dbg("Allocating flow to " HASH_FMT " on fd %d.", HASH_VAL(dst), fd); +        log_dbg("Allocating flow to " HASH_FMT32 " on fd %d.", +                HASH_VAL32(dst), fd);          assert(dst); -        clock_gettime(PTHREAD_COND_CLOCK, &abstime); - -        pthread_mutex_lock(&ipcpi.alloc_lock); - -        while (ipcpi.alloc_id != -1 && ipcp_get_state() == IPCP_OPERATIONAL) { -                ts_add(&abstime, &ts, &abstime); -                pthread_cond_timedwait(&ipcpi.alloc_cond, -                                       &ipcpi.alloc_lock, -                                       &abstime); -        } - -        if (ipcp_get_state() != IPCP_OPERATIONAL) { -                log_dbg("Won't allocate over non-operational IPCP."); -                pthread_mutex_unlock(&ipcpi.alloc_lock); -                return -1; -        } - -        assert(ipcpi.alloc_id == -1); - -        out_fd = ipcp_flow_req_arr(dst, ipcp_dir_hash_len(), qs, data, len); +        out_fd = ipcp_wait_flow_req_arr(dst, qs, IPCP_LOCAL_MPL, data);          if (out_fd < 0) { -                pthread_mutex_unlock(&ipcpi.alloc_lock);                  log_dbg("Flow allocation failed: %d", out_fd);                  return -1;          } @@ -237,11 +208,6 @@ static int ipcp_local_flow_alloc(int             fd,          pthread_rwlock_unlock(&local_data.lock); -        ipcpi.alloc_id = out_fd; -        pthread_cond_broadcast(&ipcpi.alloc_cond); - -        pthread_mutex_unlock(&ipcpi.alloc_lock); -          fset_add(local_data.flows, fd);          log_info("Pending local allocation request on fd %d.", fd); @@ -249,39 +215,21 @@ static int ipcp_local_flow_alloc(int             fd,          return 0;  } -static int ipcp_local_flow_alloc_resp(int          fd, -                                      int          response, -                                      const void * data, -                                      size_t       len) +static int local_ipcp_flow_alloc_resp(int              fd, +                                      int              response, +                                      const buffer_t * data)  { -        struct timespec ts     = {0, ALLOC_TIMEOUT * MILLION}; -        struct timespec abstime; -        int             out_fd = -1; - -        clock_gettime(PTHREAD_COND_CLOCK, &abstime); - -        pthread_mutex_lock(&ipcpi.alloc_lock); +        struct timespec wait = TIMESPEC_INIT_MS(1); +        time_t          mpl  = IPCP_LOCAL_MPL; +        int             out_fd; -        while (ipcpi.alloc_id != fd && ipcp_get_state() == IPCP_OPERATIONAL) { -                ts_add(&abstime, &ts, &abstime); -                pthread_cond_timedwait(&ipcpi.alloc_cond, -                                       &ipcpi.alloc_lock, -                                       &abstime); -        } - -        if (ipcp_get_state() != IPCP_OPERATIONAL) { -                pthread_mutex_unlock(&ipcpi.alloc_lock); +        if (ipcp_wait_flow_resp(fd) < 0) { +                log_err("Failed waiting for IRMd response.");                  return -1;          } -        ipcpi.alloc_id = -1; -        pthread_cond_broadcast(&ipcpi.alloc_cond); - -        pthread_mutex_unlock(&ipcpi.alloc_lock); - -        pthread_rwlock_wrlock(&local_data.lock); - -        if (response) { +        if (response < 0) { +                pthread_rwlock_wrlock(&local_data.lock);                  if (local_data.in_out[fd] != -1)                          local_data.in_out[local_data.in_out[fd]] = fd;                  local_data.in_out[fd] = -1; @@ -289,25 +237,38 @@ static int ipcp_local_flow_alloc_resp(int          fd,                  return 0;          } +        pthread_rwlock_rdlock(&local_data.lock); +          out_fd = local_data.in_out[fd];          if (out_fd == -1) {                  pthread_rwlock_unlock(&local_data.lock); -                return -1; +                log_dbg("Potential race detected"); +                nanosleep(&wait, NULL); +                pthread_rwlock_rdlock(&local_data.lock); +                out_fd = local_data.in_out[fd];          }          pthread_rwlock_unlock(&local_data.lock); +        if (out_fd == -1) { +                log_err("Invalid out_fd."); +                return -1; +        } +          fset_add(local_data.flows, fd); -        if (ipcp_flow_alloc_reply(out_fd, response, data, len) < 0) +        if (ipcp_flow_alloc_reply(out_fd, response, mpl, data) < 0) { +                log_err("Failed to reply to allocation"); +                fset_del(local_data.flows, fd);                  return -1; +        }          log_info("Flow allocation completed, fds (%d, %d).", out_fd, fd);          return 0;  } -static int ipcp_local_flow_dealloc(int fd) +static int local_ipcp_flow_dealloc(int fd)  {          assert(!(fd < 0)); @@ -321,7 +282,7 @@ static int ipcp_local_flow_dealloc(int fd)          pthread_rwlock_unlock(&local_data.lock); -        flow_dealloc(fd); +        ipcp_flow_dealloc(fd);          log_info("Flow with fd %d deallocated.", fd); @@ -329,60 +290,54 @@ static int ipcp_local_flow_dealloc(int fd)  }  static struct ipcp_ops local_ops = { -        .ipcp_bootstrap       = ipcp_local_bootstrap, +        .ipcp_bootstrap       = local_ipcp_bootstrap,          .ipcp_enroll          = NULL,          .ipcp_connect         = NULL,          .ipcp_disconnect      = NULL, -        .ipcp_reg             = ipcp_local_reg, -        .ipcp_unreg           = ipcp_local_unreg, -        .ipcp_query           = ipcp_local_query, -        .ipcp_flow_alloc      = ipcp_local_flow_alloc, +        .ipcp_reg             = local_ipcp_reg, +        .ipcp_unreg           = local_ipcp_unreg, +        .ipcp_query           = local_ipcp_query, +        .ipcp_flow_alloc      = local_ipcp_flow_alloc,          .ipcp_flow_join       = NULL, -        .ipcp_flow_alloc_resp = ipcp_local_flow_alloc_resp, -        .ipcp_flow_dealloc    = ipcp_local_flow_dealloc +        .ipcp_flow_alloc_resp = local_ipcp_flow_alloc_resp, +        .ipcp_flow_dealloc    = local_ipcp_flow_dealloc  };  int main(int    argc,           char * argv[])  { -        if (ipcp_init(argc, argv, &local_ops, THIS_TYPE) < 0) -                goto fail_init; -          if (local_data_init() < 0) {                  log_err("Failed to init local data.");                  goto fail_data_init;          } -        if (ipcp_boot() < 0) { -                log_err("Failed to boot IPCP."); -                goto fail_boot; -        } +        if (ipcp_init(argc, argv, &local_ops, THIS_TYPE) < 0) +                goto fail_init; -        if (ipcp_create_r(0)) { -                log_err("Failed to notify IRMd we are initialized."); -                goto fail_create_r; +        if (ipcp_start() < 0) { +                log_err("Failed to start IPCP."); +                goto fail_start;          } -        ipcp_shutdown(); +        ipcp_sigwait();          if (ipcp_get_state() == IPCP_SHUTDOWN) {                  pthread_cancel(local_data.packet_loop);                  pthread_join(local_data.packet_loop, NULL);          } -        local_data_fini(); +        ipcp_stop();          ipcp_fini(); -        exit(EXIT_SUCCESS); - fail_create_r: -        ipcp_set_state(IPCP_NULL); -        ipcp_shutdown(); - fail_boot:          local_data_fini(); - fail_data_init: + +        exit(EXIT_SUCCESS); + + fail_start:          ipcp_fini();   fail_init: -        ipcp_create_r(-1); +        local_data_fini(); + fail_data_init:          exit(EXIT_FAILURE);  } diff --git a/src/ipcpd/shim-data.c b/src/ipcpd/shim-data.c index ade157ce..8801213a 100644 --- a/src/ipcpd/shim-data.c +++ b/src/ipcpd/shim-data.c @@ -1,5 +1,5 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   *   * IPC process utilities   * @@ -30,18 +30,18 @@  #define OUROBOROS_PREFIX "shim-data" -#include <ouroboros/endian.h> -#include <ouroboros/logs.h> -#include <ouroboros/list.h> -#include <ouroboros/time_utils.h>  #include <ouroboros/errno.h> +#include <ouroboros/hash.h> +#include <ouroboros/list.h> +#include <ouroboros/logs.h> +#include <ouroboros/time.h>  #include "shim-data.h"  #include "ipcp.h" -#include <string.h> -#include <stdlib.h>  #include <assert.h> +#include <stdlib.h> +#include <string.h>  struct reg_entry {          struct list_head list; @@ -51,7 +51,7 @@ struct reg_entry {  struct dir_entry {          struct list_head list;          uint8_t *        hash; -        uint64_t         addr; +        struct addr      addr;  };  static void destroy_dir_query(struct dir_query * query) @@ -108,14 +108,12 @@ static void reg_entry_destroy(struct reg_entry * entry)  {          assert(entry); -        if (entry->hash != NULL) -                free(entry->hash); - +        free(entry->hash);          free(entry);  } -static struct dir_entry * dir_entry_create(uint8_t * hash, -                                           uint64_t  addr) +static struct dir_entry * dir_entry_create(uint8_t *   hash, +                                           struct addr addr)  {          struct dir_entry * entry = malloc(sizeof(*entry));          if (entry == NULL) @@ -133,15 +131,15 @@ static void dir_entry_destroy(struct dir_entry * entry)  {          assert(entry); -        if (entry->hash != NULL) -                free(entry->hash); - +        free(entry->hash);          free(entry);  } -struct shim_data * shim_data_create() +struct shim_data * shim_data_create(void)  { -        struct shim_data * sd = malloc(sizeof(*sd)); +        struct shim_data * sd; + +        sd = malloc(sizeof(*sd));          if (sd == NULL)                  return NULL; @@ -151,11 +149,23 @@ struct shim_data * shim_data_create()          list_head_init(&sd->dir_queries);          /* init the locks */ -        pthread_rwlock_init(&sd->reg_lock, NULL); -        pthread_rwlock_init(&sd->dir_lock, NULL); -        pthread_mutex_init(&sd->dir_queries_lock, NULL); +        if (pthread_rwlock_init(&sd->reg_lock, NULL) < 0) +                goto fail_reg_lock_init; + +        if (pthread_rwlock_init(&sd->dir_lock, NULL) < 0) +                goto fail_dir_lock_init; + +        if (pthread_mutex_init(&sd->dir_queries_lock, NULL) < 0) +                goto fail_mutex_init;          return sd; + + fail_mutex_init: +        pthread_rwlock_destroy(&sd->dir_lock); + fail_dir_lock_init: +        pthread_rwlock_destroy(&sd->reg_lock); + fail_reg_lock_init: +        return NULL;  }  static void clear_registry(struct shim_data * data) @@ -244,13 +254,15 @@ static struct reg_entry * find_reg_entry_by_hash(struct shim_data * data,  static struct dir_entry * find_dir_entry(struct shim_data * data,                                           const uint8_t *    hash, -                                         uint64_t           addr) +                                         struct addr        addr)  {          struct list_head * h;          list_for_each(h, &data->directory) {                  struct dir_entry * e = list_entry(h, struct dir_entry, list); -                if (e->addr == addr && -                    !memcmp(e->hash, hash, ipcp_dir_hash_len())) +                if (memcmp(&e->addr, &addr, sizeof(addr)) != 0) +                        continue; + +                if (memcmp(e->hash, hash, ipcp_dir_hash_len()) == 0)                          return e;          } @@ -283,8 +295,8 @@ int shim_data_reg_add_entry(struct shim_data * data,          if (find_reg_entry_by_hash(data, hash)) {                  pthread_rwlock_unlock(&data->reg_lock); -                log_dbg(HASH_FMT " was already in the directory.", -                        HASH_VAL(hash)); +                log_dbg(HASH_FMT32 " was already in the directory.", +                        HASH_VAL32(hash));                  return 0;          } @@ -350,7 +362,7 @@ bool shim_data_reg_has(struct shim_data * data,  int shim_data_dir_add_entry(struct shim_data * data,                              const uint8_t *    hash, -                            uint64_t           addr) +                            struct addr        addr)  {          struct dir_entry * entry;          uint8_t * entry_hash; @@ -386,7 +398,7 @@ int shim_data_dir_add_entry(struct shim_data * data,  int shim_data_dir_del_entry(struct shim_data * data,                              const uint8_t *    hash, -                            uint64_t           addr) +                            struct addr        addr)  {          struct dir_entry * e;          if (data == NULL) @@ -423,19 +435,19 @@ bool shim_data_dir_has(struct shim_data * data,          return ret;  } -uint64_t shim_data_dir_get_addr(struct shim_data * data, -                                const uint8_t *    hash) +struct addr shim_data_dir_get_addr(struct shim_data * data, +                                   const uint8_t *    hash)  {          struct dir_entry * entry; -        uint64_t           addr; +        struct addr        addr = {0};          pthread_rwlock_rdlock(&data->dir_lock);          entry = find_dir_entry_any(data, hash); -          if (entry == NULL) {                  pthread_rwlock_unlock(&data->dir_lock); -                return 0; /* undefined behaviour, 0 may be a valid address */ +                log_warn("No address for " HASH_FMT32 ".", HASH_VAL32(hash)); +                return addr; /* undefined behaviour, 0 may be a valid address */          }          addr = entry->addr; diff --git a/src/ipcpd/shim-data.h b/src/ipcpd/shim-data.h index 12a4b02e..ea4ce413 100644 --- a/src/ipcpd/shim-data.h +++ b/src/ipcpd/shim-data.h @@ -1,5 +1,5 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   *   * Utitilies for building IPC processes   * @@ -25,9 +25,12 @@  #include <ouroboros/list.h> -#include <sys/types.h>  #include <pthread.h>  #include <stdint.h> +#include <netinet/in.h> +#include <sys/types.h> + +#define MAC_SIZE 6  enum dir_query_state {          QUERY_INIT = 0, @@ -46,6 +49,14 @@ struct dir_query {          pthread_cond_t       cond;  }; +struct addr { +        union { +                uint8_t         mac[MAC_SIZE]; +                struct in_addr  ip4; +                struct in6_addr ip6; +        }; +}; +  struct shim_data {          struct list_head registry;          pthread_rwlock_t reg_lock; @@ -72,16 +83,16 @@ bool               shim_data_reg_has(struct shim_data * data,  int                shim_data_dir_add_entry(struct shim_data * data,                                             const uint8_t *    hash, -                                           uint64_t           addr); +                                           struct addr        addr);  int                shim_data_dir_del_entry(struct shim_data * data,                                             const uint8_t *    hash, -                                           uint64_t           addr); +                                           struct addr        addr);  bool               shim_data_dir_has(struct shim_data * data,                                       const uint8_t *    hash); -uint64_t           shim_data_dir_get_addr(struct shim_data * data, +struct addr        shim_data_dir_get_addr(struct shim_data * data,                                            const uint8_t *    hash);  struct dir_query * shim_data_dir_query_create(struct shim_data * data, diff --git a/src/ipcpd/udp/CMakeLists.txt b/src/ipcpd/udp/CMakeLists.txt index f1a29ef6..27e32094 100644 --- a/src/ipcpd/udp/CMakeLists.txt +++ b/src/ipcpd/udp/CMakeLists.txt @@ -12,16 +12,25 @@ include_directories(${CURRENT_BINARY_PARENT_DIR})  include_directories(${CMAKE_SOURCE_DIR}/include)  include_directories(${CMAKE_BINARY_DIR}/include) -set(IPCP_UDP_TARGET ipcpd-udp CACHE INTERNAL "") +set(IPCP_UDP4_TARGET ipcpd-udp4 CACHE INTERNAL "") +set(IPCP_UDP6_TARGET ipcpd-udp6 CACHE INTERNAL "") -set(UDP_SOURCES +set(UDP4_SOURCES    # Add source files here -  ${CMAKE_CURRENT_SOURCE_DIR}/main.c -  ) +  udp4.c +) -add_executable(ipcpd-udp ${UDP_SOURCES} ${IPCP_SOURCES}) +set(UDP6_SOURCES +  # Add source files here +  udp6.c +) + +add_executable(ipcpd-udp4 ${UDP4_SOURCES} ${IPCP_SOURCES}) +target_link_libraries(ipcpd-udp4 LINK_PUBLIC ouroboros-dev) + +add_executable(ipcpd-udp6 ${UDP6_SOURCES} ${IPCP_SOURCES}) +target_link_libraries(ipcpd-udp6 LINK_PUBLIC ouroboros-dev) -target_link_libraries(ipcpd-udp LINK_PUBLIC ouroboros-dev)  # Find the nsupdate executable  find_program(NSUPDATE_EXECUTABLE @@ -55,13 +64,18 @@ else ()  endif ()  set(IPCP_UDP_RD_THR 3 CACHE STRING -  "Number of reader threads in UDP IPCP") +  "Number of reader threads in UDP IPCPs")  set(IPCP_UDP_WR_THR 3 CACHE STRING -  "Number of writer threads in UDP IPCP") +  "Number of writer threads in UDP IPCPs") +set(IPCP_UDP_MPL 5000 CACHE STRING +    "Default maximum packet lifetime for the UDP IPCPs, in ms")  include(AddCompileFlags)  if (CMAKE_BUILD_TYPE MATCHES "Debug*") -  add_compile_flags(ipcpd-udp -DCONFIG_OUROBOROS_DEBUG) +  add_compile_flags(ipcpd-udp4 -DCONFIG_OUROBOROS_DEBUG) +  add_compile_flags(ipcpd-udp6 -DCONFIG_OUROBOROS_DEBUG)  endif () -install(TARGETS ipcpd-udp RUNTIME DESTINATION ${CMAKE_INSTALL_SBINDIR}) +install(TARGETS ipcpd-udp4 RUNTIME DESTINATION ${CMAKE_INSTALL_SBINDIR}) +install(TARGETS ipcpd-udp6 RUNTIME DESTINATION ${CMAKE_INSTALL_SBINDIR}) + diff --git a/src/ipcpd/udp/main.c b/src/ipcpd/udp/udp.c index 6a3fb24a..be8069a4 100644 --- a/src/ipcpd/udp/main.c +++ b/src/ipcpd/udp/udp.c @@ -1,5 +1,5 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   *   * IPC process over UDP   * @@ -20,22 +20,14 @@   * Foundation, Inc., http://www.fsf.org/about/contact/.   */ -#if defined(__linux__) || defined(__CYGWIN__) -#define _DEFAULT_SOURCE -#else -#define _POSIX_C_SOURCE 200112L -#endif -  #include "config.h" -#define OUROBOROS_PREFIX "ipcpd/udp" -  #include <ouroboros/bitmap.h> +#include <ouroboros/endian.h>  #include <ouroboros/hash.h>  #include <ouroboros/list.h>  #include <ouroboros/utils.h>  #include <ouroboros/dev.h> -#include <ouroboros/ipcp-dev.h>  #include <ouroboros/fqueue.h>  #include <ouroboros/errno.h>  #include <ouroboros/logs.h> @@ -58,14 +50,12 @@  #define FLOW_REQ                 1  #define FLOW_REPLY               2 -#define THIS_TYPE                IPCP_UDP -#define IPCP_UDP_MAX_PACKET_SIZE 8980  #define OUR_HEADER_LEN           sizeof(uint32_t) /* adds eid */ -#define IPCP_UDP_BUF_SIZE        8980 -#define IPCP_UDP_MSG_SIZE        8980 +#define IPCP_UDP_BUF_SIZE        IPCP_UDP_MAX_PACKET_SIZE +#define IPCP_UDP_MSG_SIZE        IPCP_UDP_MAX_PACKET_SIZE +  #define DNS_TTL                  86400 -#define FD_UPDATE_TIMEOUT        100 /* microseconds */  #define SADDR                    ((struct sockaddr *) &udp_data.s_saddr)  #define SADDR_SIZE               (sizeof(udp_data.s_saddr)) @@ -81,45 +71,42 @@  #define SENDTO_FLAGS 0  #endif -struct ipcp ipcpi; -  /* Keep order for alignment. */  struct mgmt_msg {          uint32_t eid;          uint32_t s_eid;          uint32_t d_eid; -        uint8_t  code; -        int8_t   response; -        /* QoS parameters from spec */ -        uint8_t  availability; -        uint8_t  in_order; +        int32_t  response;          uint64_t bandwidth;          uint32_t delay;          uint32_t loss;          uint32_t ber;          uint32_t max_gap; -        uint16_t cypher_s; +        uint32_t timeout; +        uint8_t  code; +        /* QoS parameters from spec */ +        uint8_t  availability; +        uint8_t  in_order;  } __attribute__((packed));  struct mgmt_frame { -        struct list_head   next; -        struct sockaddr_in r_saddr; -        uint8_t            buf[MGMT_FRAME_BUF_SIZE]; -        size_t             len; +        struct list_head  next; +        struct __SOCKADDR r_saddr; +        uint8_t           buf[MGMT_FRAME_BUF_SIZE]; +        size_t            len;  };  /* UDP flow */  struct uf { -        int                d_eid; -        struct sockaddr_in r_saddr; +        int               d_eid; +        struct __SOCKADDR r_saddr;  };  struct {          struct shim_data * shim_data; -        uint32_t           dns_addr; - -        struct sockaddr_in s_saddr; +        struct __ADDR      dns_addr; +        struct __SOCKADDR  s_saddr;          int                s_fd;          fset_t *           np1_flows; @@ -136,14 +123,26 @@ struct {          struct list_head   mgmt_frames;  } udp_data; +static const char * __inet_ntop(const struct __ADDR * addr, +                                char *                buf) +{ +        return inet_ntop(__AF, addr, buf, __ADDRSTRLEN); +} +  static int udp_data_init(void)  { -        int i; +        int                i; +        pthread_condattr_t cattr;          if (pthread_rwlock_init(&udp_data.flows_lock, NULL))                  goto fail_rwlock_init; -        if (pthread_cond_init(&udp_data.mgmt_cond, NULL)) +        if (pthread_condattr_init(&cattr)) +                goto fail_condattr; +#ifndef __APPLE__ +        pthread_condattr_setclock(&cattr, PTHREAD_COND_CLOCK); +#endif +        if (pthread_cond_init(&udp_data.mgmt_cond, &cattr))                  goto fail_mgmt_cond;          if (pthread_mutex_init(&udp_data.mgmt_lock, NULL)) @@ -160,9 +159,12 @@ static int udp_data_init(void)          if (udp_data.shim_data == NULL)                  goto fail_data; +        pthread_condattr_destroy(&cattr); +          list_head_init(&udp_data.mgmt_frames);          return 0; +   fail_data:          fset_destroy(udp_data.np1_flows);   fail_fset: @@ -170,6 +172,8 @@ static int udp_data_init(void)   fail_mgmt_lock:          pthread_cond_destroy(&udp_data.mgmt_cond);   fail_mgmt_cond: +        pthread_condattr_destroy(&cattr); + fail_condattr:          pthread_rwlock_destroy(&udp_data.flows_lock);   fail_rwlock_init:          return -1; @@ -186,22 +190,21 @@ static void udp_data_fini(void)          pthread_mutex_destroy(&udp_data.mgmt_lock);  } -static int ipcp_udp_port_alloc(const struct sockaddr_in * r_saddr, -                               uint32_t                   s_eid, -                               const uint8_t *            dst, -                               qosspec_t                  qs, -                               const void *               data, -                               size_t                     dlen) +static int udp_ipcp_port_alloc(const struct __SOCKADDR * r_saddr, +                               uint32_t                  s_eid, +                               const uint8_t *           dst, +                               qosspec_t                 qs, +                               const buffer_t *          data)  {          uint8_t *         buf;          struct mgmt_msg * msg;          size_t            len; -        assert(dlen > 0 ? data != NULL : data == NULL); +        assert(data->len > 0 ? data->data != NULL : data->data == NULL);          len = sizeof(*msg) + ipcp_dir_hash_len(); -        buf = malloc(len + dlen); +        buf = malloc(len + data->len);          if (buf == NULL)                  return -1; @@ -216,14 +219,17 @@ static int ipcp_udp_port_alloc(const struct sockaddr_in * r_saddr,          msg->ber          = hton32(qs.ber);          msg->in_order     = qs.in_order;          msg->max_gap      = hton32(qs.max_gap); -        msg->cypher_s     = hton16(qs.cypher_s); +        msg->timeout      = hton32(qs.timeout);          memcpy(msg + 1, dst, ipcp_dir_hash_len()); -        memcpy(buf + len, data, dlen); +        if (data->len > 0) +                memcpy(buf + len, data->data, data->len); -        if (sendto(udp_data.s_fd, msg, len + dlen, +        if (sendto(udp_data.s_fd, msg, len + data->len,                     SENDTO_FLAGS,                     (const struct sockaddr *) r_saddr, sizeof(*r_saddr)) < 0) { +                log_err("Failed to send flow allocation request: %s.", +                        strerror(errno));                  free(buf);                  return -1;          } @@ -233,16 +239,15 @@ static int ipcp_udp_port_alloc(const struct sockaddr_in * r_saddr,          return 0;  } -static int ipcp_udp_port_alloc_resp(const struct sockaddr_in * r_saddr, -                                    uint32_t                   s_eid, -                                    uint32_t                   d_eid, -                                    int8_t                     response, -                                    const void *               data, -                                    size_t                     len) +static int udp_ipcp_port_alloc_resp(const struct __SOCKADDR * r_saddr, +                                    uint32_t                  s_eid, +                                    uint32_t                  d_eid, +                                    int32_t                   response, +                                    const buffer_t *          data)  {          struct mgmt_msg * msg; -        msg = malloc(sizeof(*msg) + len); +        msg = malloc(sizeof(*msg) + data->len);          if (msg == NULL)                  return -1; @@ -250,11 +255,12 @@ static int ipcp_udp_port_alloc_resp(const struct sockaddr_in * r_saddr,          msg->code     = FLOW_REPLY;          msg->s_eid    = hton32(s_eid);          msg->d_eid    = hton32(d_eid); -        msg->response = response; +        msg->response = hton32(response); -        memcpy(msg + 1, data, len); +        if (data->len > 0) +                memcpy(msg + 1, data->data, data->len); -        if (sendto(udp_data.s_fd, msg, sizeof(*msg) + len, +        if (sendto(udp_data.s_fd, msg, sizeof(*msg) + data->len,                     SENDTO_FLAGS,                     (const struct sockaddr *) r_saddr, sizeof(*r_saddr)) < 0 ) {                  free(msg); @@ -266,37 +272,16 @@ static int ipcp_udp_port_alloc_resp(const struct sockaddr_in * r_saddr,          return 0;  } -static int ipcp_udp_port_req(struct sockaddr_in * c_saddr, -                             int                  d_eid, -                             const uint8_t *      dst, -                             qosspec_t            qs, -                             const void *         data, -                             size_t               len) +static int udp_ipcp_port_req(struct __SOCKADDR * c_saddr, +                             int                 d_eid, +                             const uint8_t *     dst, +                             qosspec_t           qs, +                             const buffer_t *    data)  { -        struct timespec    ts        = {0, FD_UPDATE_TIMEOUT * 1000}; -        struct timespec    abstime; -        int                fd; - -        clock_gettime(PTHREAD_COND_CLOCK, &abstime); +        int fd; -        pthread_mutex_lock(&ipcpi.alloc_lock); - -        while (ipcpi.alloc_id != -1 && ipcp_get_state() == IPCP_OPERATIONAL) { -                ts_add(&abstime, &ts, &abstime); -                pthread_cond_timedwait(&ipcpi.alloc_cond, &ipcpi.alloc_lock, -                                       &abstime); -        } - -        if (ipcp_get_state() != IPCP_OPERATIONAL) { -                log_dbg("Won't allocate over non-operational IPCP."); -                pthread_mutex_unlock(&ipcpi.alloc_lock); -                return -1; -        } - -        /* reply to IRM */ -        fd = ipcp_flow_req_arr(dst, ipcp_dir_hash_len(), qs, data, len); +        fd = ipcp_wait_flow_req_arr(dst, qs, IPCP_UDP_MPL, data);          if (fd < 0) { -                pthread_mutex_unlock(&ipcpi.alloc_lock);                  log_err("Could not get new flow from IRMd.");                  return -1;          } @@ -308,30 +293,32 @@ static int ipcp_udp_port_req(struct sockaddr_in * c_saddr,          pthread_rwlock_unlock(&udp_data.flows_lock); -        ipcpi.alloc_id = fd; -        pthread_cond_broadcast(&ipcpi.alloc_cond); - -        pthread_mutex_unlock(&ipcpi.alloc_lock); -          log_dbg("Pending allocation request, fd %d, remote eid %d.",                  fd, d_eid);          return 0;  } -static int ipcp_udp_port_alloc_reply(const struct sockaddr_in * saddr, -                                     uint32_t                   s_eid, -                                     uint32_t                   d_eid, -                                     int8_t                     response, -                                     const void *               data, -                                     size_t                     len) +static int udp_ipcp_port_alloc_reply(const struct __SOCKADDR * saddr, +                                     uint32_t                  s_eid, +                                     uint32_t                  d_eid, +                                     int32_t                   response, +                                     const buffer_t *          data)  { +        time_t mpl = IPCP_UDP_MPL; +          pthread_rwlock_wrlock(&udp_data.flows_lock);          if (memcmp(&udp_data.fd_to_uf[s_eid].r_saddr, saddr, sizeof(*saddr))) { +                char ipstr[__ADDRSTRLEN];                  pthread_rwlock_unlock(&udp_data.flows_lock); -                log_warn("Flow allocation reply for %u from wrong source.", -                         s_eid); +                #ifdef BUILD_IPCP_UDP4 +                __inet_ntop(&saddr->sin_addr, ipstr); +                #else +                __inet_ntop(&saddr->sin6_addr, ipstr); +                #endif +                log_err("Flow allocation reply for %u from wrong source %s.", +                        s_eid, ipstr);                  return -1;          } @@ -340,8 +327,8 @@ static int ipcp_udp_port_alloc_reply(const struct sockaddr_in * saddr,          pthread_rwlock_unlock(&udp_data.flows_lock); -        if (ipcp_flow_alloc_reply(s_eid, response, data, len) < 0) { -                log_dbg("Failed to reply to flow allocation."); +        if (ipcp_flow_alloc_reply(s_eid, response, mpl, data) < 0) { +                log_err("Failed to reply to flow allocation.");                  return -1;          } @@ -351,13 +338,14 @@ static int ipcp_udp_port_alloc_reply(const struct sockaddr_in * saddr,          return 0;  } -static int ipcp_udp_mgmt_frame(const uint8_t *    buf, -                               size_t             len, -                               struct sockaddr_in c_saddr) +static int udp_ipcp_mgmt_frame(struct __SOCKADDR c_saddr, +                               const uint8_t *   buf, +                               size_t            len)  {          struct mgmt_msg * msg;          size_t            msg_len;          qosspec_t         qs; +        buffer_t          data;          msg = (struct mgmt_msg *) buf; @@ -367,6 +355,10 @@ static int ipcp_udp_mgmt_frame(const uint8_t *    buf,                  assert(len >= msg_len); +                data.len  = len - msg_len; +                data.data = (uint8_t *) buf + msg_len; + +                  qs.delay        = ntoh32(msg->delay);                  qs.bandwidth    = ntoh64(msg->bandwidth);                  qs.availability = msg->availability; @@ -374,28 +366,29 @@ static int ipcp_udp_mgmt_frame(const uint8_t *    buf,                  qs.ber          = ntoh32(msg->ber);                  qs.in_order     = msg->in_order;                  qs.max_gap      = ntoh32(msg->max_gap); -                qs.cypher_s     = ntoh16(msg->cypher_s); +                qs.timeout      = ntoh32(msg->timeout); -                return ipcp_udp_port_req(&c_saddr, ntoh32(msg->s_eid), +                return udp_ipcp_port_req(&c_saddr, ntoh32(msg->s_eid),                                           (uint8_t *) (msg + 1), qs, -                                         buf + msg_len, -                                         len - msg_len); +                                          &data);          case FLOW_REPLY:                  assert(len >= sizeof(*msg)); -                return ipcp_udp_port_alloc_reply(&c_saddr, +                data.len  = len - sizeof(*msg); +                data.data = (uint8_t *) buf + sizeof(*msg); + +                return udp_ipcp_port_alloc_reply(&c_saddr,                                                   ntoh32(msg->s_eid),                                                   ntoh32(msg->d_eid), -                                                 msg->response, -                                                 buf + sizeof(*msg), -                                                 len - sizeof(*msg)); +                                                 ntoh32(msg->response), +                                                 &data);          default:                  log_err("Unknown message received %d.", msg->code);                  return -1;          }  } -static void * ipcp_udp_mgmt_handler(void * o) +static void * udp_ipcp_mgmt_handler(void * o)  {          (void) o; @@ -417,7 +410,7 @@ static void * ipcp_udp_mgmt_handler(void * o)                  pthread_mutex_unlock(&udp_data.mgmt_lock); -                ipcp_udp_mgmt_frame(frame->buf, frame->len, frame->r_saddr); +                udp_ipcp_mgmt_frame(frame->r_saddr, frame->buf, frame->len);                  free(frame);          } @@ -427,7 +420,7 @@ static void * ipcp_udp_mgmt_handler(void * o)          return (void *) 0;  } -static void * ipcp_udp_packet_reader(void * o) +static void * udp_ipcp_packet_reader(void * o)  {          uint8_t    buf[IPCP_UDP_MAX_PACKET_SIZE];          uint8_t *  data; @@ -437,13 +430,17 @@ static void * ipcp_udp_packet_reader(void * o)          (void) o; +        ipcp_lock_to_core(); +          data  = buf + sizeof(uint32_t);          eid_p = (uint32_t *) buf;          while (true) { -                struct mgmt_frame * frame; -                struct sockaddr_in  r_saddr; -                socklen_t           len; +                struct mgmt_frame *  frame; +                struct __SOCKADDR    r_saddr; +                socklen_t            len; +                struct shm_du_buff * sdb; +                uint8_t *            head;                  len = sizeof(r_saddr); @@ -484,10 +481,18 @@ static void * ipcp_udp_packet_reader(void * o)                          continue;                  } -                flow_write(eid, data, n - sizeof(eid)); +                n-= sizeof(eid); + +                if (ipcp_sdb_reserve(&sdb, n)) +                        continue; + +                head = shm_du_buff_head(sdb); +                memcpy(head, data, n); +                if (np1_flow_write(eid, sdb) < 0) +                        ipcp_sdb_release(sdb);          } -        return 0; +        return (void *) 0;  }  static void cleanup_fqueue(void * fq) @@ -500,7 +505,7 @@ static void cleanup_sdb(void * sdb)          ipcp_sdb_release((struct shm_du_buff *) sdb);  } -static void * ipcp_udp_packet_writer(void * o) +static void * udp_ipcp_packet_writer(void * o)  {          fqueue_t * fq; @@ -515,9 +520,9 @@ static void * ipcp_udp_packet_writer(void * o)          pthread_cleanup_push(cleanup_fqueue, fq);          while (true) { -                struct sockaddr_in saddr; -                int                eid; -                int                fd; +                struct __SOCKADDR saddr; +                int               eid; +                int               fd;                  fevent(udp_data.np1_flows, fq, NULL);                  while ((fd = fqueue_next(fq)) >= 0) {                          struct shm_du_buff * sdb; @@ -527,12 +532,12 @@ static void * ipcp_udp_packet_writer(void * o)                          if (fqueue_type(fq) != FLOW_PKT)                                  continue; -                        if (ipcp_flow_read(fd, &sdb)) { +                        if (np1_flow_read(fd, &sdb)) {                                  log_dbg("Bad read from fd %d.", fd);                                  continue;                          } -                        len = shm_du_buff_tail(sdb) - shm_du_buff_head(sdb); +                        len = shm_du_buff_len(sdb);                          if (len > IPCP_UDP_MAX_PACKET_SIZE) {                                  log_dbg("Packet length exceeds MTU.");                                  ipcp_sdb_release(sdb); @@ -572,103 +577,115 @@ static void * ipcp_udp_packet_writer(void * o)          return (void *) 1;  } -static int ipcp_udp_bootstrap(const struct ipcp_config * conf) +static bool is_addr_specified(const struct __ADDR * addr) +{ +#ifdef BUILD_IPCP_UDP4 +        return addr->s_addr != 0; +#else +        return !IN6_IS_ADDR_UNSPECIFIED(addr); +#endif +} + +static int udp_ipcp_bootstrap(struct ipcp_config * conf)  { -        char ipstr[INET_ADDRSTRLEN]; -        char dnsstr[INET_ADDRSTRLEN]; -        char portstr[128]; /* port is max 64535 = 5 chars */ +        char ipstr[__ADDRSTRLEN]; +        char dnsstr[__ADDRSTRLEN];          int  i = 1; +#ifdef BUILD_IPCP_UDP4 +        struct udp4_config * udp; +        udp = &conf->udp4; +#else +        struct udp6_config * udp; +        udp = &conf->udp6; +#endif -        assert(conf); +        assert(conf != NULL);          assert(conf->type == THIS_TYPE); +        assert(conf->layer_info.dir_hash_algo == (enum pol_dir_hash) HASH_MD5); -        ipcpi.dir_hash_algo = conf->layer_info.dir_hash_algo; -        ipcpi.layer_name = strdup(conf->layer_info.layer_name); -        if (ipcpi.layer_name == NULL) { -                log_err("Failed to set layer name"); -                return -ENOMEM; -        } - -        if (inet_ntop(AF_INET, &conf->ip_addr, ipstr, INET_ADDRSTRLEN) -            == NULL) { -                log_err("Failed to convert IP address"); +        if (__inet_ntop(&udp->ip_addr, ipstr) == NULL) { +                log_err("Failed to convert IP address.");                  return -1;          } -        if (conf->dns_addr != 0) { -                if (inet_ntop(AF_INET, &conf->dns_addr, dnsstr, INET_ADDRSTRLEN) -                    == NULL) { -                        log_err("Failed to convert DNS address"); +        if (is_addr_specified(&udp->dns_addr)) { +                if (__inet_ntop(&udp->dns_addr, dnsstr) == NULL) { +                        log_err("Failed to convert DNS address.");                          return -1;                  }  #ifndef HAVE_DDNS -                log_warn("DNS disabled at compile time, address ignored"); +                log_warn("DNS disabled at compile time, address ignored.");  #endif          } else {                  strcpy(dnsstr, "not set");          }          /* UDP listen server */ -        udp_data.s_fd = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP); +        udp_data.s_fd = socket(__AF, SOCK_DGRAM, IPPROTO_UDP);          if (udp_data.s_fd < 0) {                  log_err("Can't create socket: %s", strerror(errno));                  goto fail_socket;          }          memset((char *) &udp_data.s_saddr, 0, sizeof(udp_data.s_saddr)); -        udp_data.s_saddr.sin_family      = AF_INET; -        udp_data.s_saddr.sin_addr.s_addr = conf->ip_addr; -        udp_data.s_saddr.sin_port        = htons(conf->port); - +#ifdef BUILD_IPCP_UDP4 +        udp_data.s_saddr.sin_family  = AF_INET; +        udp_data.s_saddr.sin_addr   = udp->ip_addr; +        udp_data.s_saddr.sin_port   = htons(udp->port); +#else +        udp_data.s_saddr.sin6_family = AF_INET6; +        udp_data.s_saddr.sin6_addr   = udp->ip_addr; +        udp_data.s_saddr.sin6_port   = htons(udp->port); +#endif          if (bind(udp_data.s_fd, SADDR, SADDR_SIZE) < 0) { -                log_err("Couldn't bind to %s.", ipstr); +                log_err("Couldn't bind to %s:%d. %s.", +                        ipstr, udp->port, strerror(errno));                  goto fail_bind;          } -        udp_data.dns_addr = conf->dns_addr; - -        ipcp_set_state(IPCP_OPERATIONAL); +        udp_data.dns_addr = udp->dns_addr;          if (pthread_create(&udp_data.mgmt_handler, NULL, -                           ipcp_udp_mgmt_handler, NULL)) { -                ipcp_set_state(IPCP_INIT); +                           udp_ipcp_mgmt_handler, NULL)) { +                log_err("Failed to create management thread.");                  goto fail_bind;          }          for (i = 0; i < IPCP_UDP_RD_THR; ++i) {                  if (pthread_create(&udp_data.packet_reader[i], NULL, -                                   ipcp_udp_packet_reader, NULL)) { -                        ipcp_set_state(IPCP_INIT); +                                   udp_ipcp_packet_reader, NULL)) { +                        log_err("Failed to create reader thread.");                          goto fail_packet_reader;                  }          }          for (i = 0; i < IPCP_UDP_WR_THR; ++i) {                  if (pthread_create(&udp_data.packet_writer[i], NULL, -                        ipcp_udp_packet_writer, NULL)) { -                        ipcp_set_state(IPCP_INIT); +                        udp_ipcp_packet_writer, NULL)) { +                        log_err("Failed to create writer thread.");                          goto fail_packet_writer;                  }          } -        sprintf(portstr, "%d", conf->port); - -        log_dbg("Bootstrapped IPCP over UDP with pid %d.", getpid()); +        log_dbg("Bootstrapped " TYPE_STR " with pid %d.", getpid());          log_dbg("Bound to IP address %s.", ipstr); -        log_dbg("Using port %u.", conf->port); -        log_dbg("DNS server address is %s.", dnsstr); +        log_dbg("Using port %u.", udp->port); +        if (is_addr_specified(&udp_data.dns_addr)) +                log_dbg("DNS server address is %s.", dnsstr); +        else +                log_dbg("DNS server not in use.");          return 0;   fail_packet_writer: -        while (i > 0) { -                pthread_cancel(udp_data.packet_writer[--i]); +        while (i-- > 0) { +                pthread_cancel(udp_data.packet_writer[i]);                  pthread_join(udp_data.packet_writer[i], NULL);          }          i = IPCP_UDP_RD_THR;   fail_packet_reader: -        while (i > 0) { -                pthread_cancel(udp_data.packet_reader[--i]); +        while (i-- > 0) { +                pthread_cancel(udp_data.packet_reader[i]);                  pthread_join(udp_data.packet_reader[i], NULL);          }          pthread_cancel(udp_data.mgmt_handler); @@ -684,20 +701,22 @@ static int ipcp_udp_bootstrap(const struct ipcp_config * conf)  /* NOTE: Disgusted with this crap */  static int ddns_send(char * cmd)  { -        pid_t pid     = -1; +        pid_t pid;          int   wstatus;          int   pipe_fd[2];          char * argv[] = {NSUPDATE_EXEC, 0};          char * envp[] = {0};          if (pipe(pipe_fd)) { -                log_err("Failed to create pipe."); +                log_err("Failed to create pipe: %s.", strerror(errno));                  return -1;          }          pid = fork();          if (pid == -1) { -                log_err("Failed to fork."); +                log_err("Failed to fork: %s.", strerror(errno)); +                close(pipe_fd[0]); +                close(pipe_fd[1]);                  return -1;          } @@ -705,12 +724,15 @@ static int ddns_send(char * cmd)                  close(pipe_fd[1]);                  dup2(pipe_fd[0], 0);                  execve(argv[0], &argv[0], envp); +                log_err("Failed to execute: %s", strerror(errno)); +                exit(1);          }          close(pipe_fd[0]);          if (write(pipe_fd[1], cmd, strlen(cmd)) == -1) { -                log_err("Failed to communicate with nsupdate."); +                log_err("Failed to communicate with nsupdate: %s.", +                        strerror(errno));                  close(pipe_fd[1]);                  return -1;          } @@ -726,32 +748,34 @@ static int ddns_send(char * cmd)          return 0;  } -static uint32_t ddns_resolve(char *   name, -                             uint32_t dns_addr) +static struct __ADDR ddns_resolve(char *        name, +                                  struct __ADDR dns_addr)  { -        pid_t    pid      = -1; -        int      wstatus; -        int      pipe_fd[2]; -        char     dnsstr[INET_ADDRSTRLEN]; -        char     buf[IPCP_UDP_BUF_SIZE]; -        ssize_t  count    = 0; -        char *   substr   = NULL; -        char *   substr2  = NULL; -        char *   addr_str = "Address:"; -        uint32_t ip_addr  = 0; - -        if (inet_ntop(AF_INET, &dns_addr, dnsstr, INET_ADDRSTRLEN) == NULL) -                return 0; +        pid_t          pid      = -1; +        int            wstatus; +        int            pipe_fd[2]; +        char           dnsstr[__ADDRSTRLEN]; +        char           buf[IPCP_UDP_BUF_SIZE]; +        ssize_t        count    = 0; +        char *         substr   = NULL; +        char *         substr2  = NULL; +        char *         addr_str = "Address:"; +        struct __ADDR  ip_addr  = __ADDR_ANY_INIT; + +        if (__inet_ntop(&dns_addr, dnsstr) == NULL) +                return ip_addr;          if (pipe(pipe_fd)) { -                log_err("Failed to create pipe."); -                return 0; +                log_err("Failed to create pipe: %s.", strerror(errno)); +                return ip_addr;          }          pid = fork();          if (pid == -1) { -                log_err("Failed to fork."); -                return 0; +                log_err("Failed to fork: %s.", strerror(errno)); +                close(pipe_fd[0]); +                close(pipe_fd[1]); +                return ip_addr;          }          if (pid == 0) { @@ -761,22 +785,24 @@ static uint32_t ddns_resolve(char *   name,                  close(pipe_fd[0]);                  dup2(pipe_fd[1], 1);                  execve(argv[0], &argv[0], envp); +                log_err("Failed to execute: %s", strerror(errno)); +                exit(1);          }          close(pipe_fd[1]); -        count = read(pipe_fd[0], buf, IPCP_UDP_BUF_SIZE); +        count = read(pipe_fd[0], buf, IPCP_UDP_BUF_SIZE - 1);          if (count <= 0) {                  log_err("Failed to communicate with nslookup.");                  close(pipe_fd[0]); -                return 0; +                return ip_addr;          }          close(pipe_fd[0]);          waitpid(pid, &wstatus, 0);          if (WIFEXITED(wstatus) && WEXITSTATUS(wstatus) == 0 && -            count != IPCP_UDP_BUF_SIZE) +            count != IPCP_UDP_BUF_SIZE - 1)                  log_dbg("Succesfully communicated with nslookup.");          else                  log_err("Failed to resolve DNS address."); @@ -790,40 +816,43 @@ static uint32_t ddns_resolve(char *   name,          if (substr2 == NULL || strstr(substr2, addr_str) == NULL) {                  log_err("Failed to resolve DNS address."); -                return 0; +                return ip_addr;          } -        if (inet_pton(AF_INET, substr2 + strlen(addr_str) + 1, &ip_addr) != 1) { +        if (inet_pton(__AF, substr2 + strlen(addr_str) + 1, &ip_addr) != 1) {                  log_err("Failed to resolve DNS address."); -                return 0; +                assert(!is_addr_specified(&ip_addr)); +                return ip_addr;          }          return ip_addr;  }  #endif -static int ipcp_udp_reg(const uint8_t * hash) +static int udp_ipcp_reg(const uint8_t * hash)  {  #ifdef HAVE_DDNS -        char     ipstr[INET_ADDRSTRLEN]; -        char     dnsstr[INET_ADDRSTRLEN]; -        char     cmd[1000]; -        uint32_t dns_addr; -        uint32_t ip_addr; +        char          ipstr[__ADDRSTRLEN]; +        char          dnsstr[__ADDRSTRLEN]; +        char          cmd[1000]; +        struct __ADDR dns_addr; +        struct __ADDR ip_addr;  #endif          char *   hashstr;          hashstr = malloc(ipcp_dir_hash_strlen() + 1); -        if (hashstr == NULL) +        if (hashstr == NULL) { +                log_err("Failed to malloc hashstr.");                  return -1; +        }          assert(hash);          ipcp_hash_str(hashstr, hash);          if (shim_data_reg_add_entry(udp_data.shim_data, hash)) { -                log_err("Failed to add " HASH_FMT " to local registry.", -                        HASH_VAL(hash)); +                log_err("Failed to add " HASH_FMT32 " to local registry.", +                        HASH_VAL32(hash));                  free(hashstr);                  return -1;          } @@ -833,17 +862,20 @@ static int ipcp_udp_reg(const uint8_t * hash)          dns_addr = udp_data.dns_addr; -        if (dns_addr != 0) { -                ip_addr = udp_data.s_saddr.sin_addr.s_addr; - -                if (inet_ntop(AF_INET, &ip_addr, -                              ipstr, INET_ADDRSTRLEN) == NULL) { +        if (is_addr_specified(&dns_addr)) { +#ifdef BUILD_IPCP_UDP4 +                ip_addr = udp_data.s_saddr.sin_addr; +#else +                ip_addr = udp_data.s_saddr.sin6_addr; +#endif +                if (__inet_ntop(&ip_addr, ipstr) == NULL) { +                        log_err("Failed to convert IP address to string.");                          free(hashstr);                          return -1;                  } -                if (inet_ntop(AF_INET, &dns_addr, -                              dnsstr, INET_ADDRSTRLEN) == NULL) { +                if (__inet_ntop(&dns_addr, dnsstr) == NULL) { +                        log_err("Failed to convert DNS address to string.");                          free(hashstr);                          return -1;                  } @@ -852,34 +884,35 @@ static int ipcp_udp_reg(const uint8_t * hash)                          dnsstr, hashstr, DNS_TTL, ipstr);                  if (ddns_send(cmd)) { +                        log_err("Failed to send DDNS message.");                          shim_data_reg_del_entry(udp_data.shim_data, hash);                          free(hashstr);                          return -1;                  }          }  #endif -        log_dbg("Registered " HASH_FMT ".", HASH_VAL(hash)); -          free(hashstr);          return 0;  } -static int ipcp_udp_unreg(const uint8_t * hash) +static int udp_ipcp_unreg(const uint8_t * hash)  {  #ifdef HAVE_DDNS -        char     dnsstr[INET_ADDRSTRLEN]; +        char          dnsstr[__ADDRSTRLEN];          /* max DNS name length + max IP length + max command length */ -        char     cmd[100]; -        uint32_t dns_addr; +        char          cmd[100]; +        struct __ADDR dns_addr;  #endif -        char *   hashstr; +        char * hashstr;          assert(hash);          hashstr = malloc(ipcp_dir_hash_strlen() + 1); -        if (hashstr == NULL) +        if (hashstr == NULL) { +                log_err("Failed to malloc hashstr.");                  return -1; +        }          ipcp_hash_str(hashstr, hash); @@ -888,9 +921,9 @@ static int ipcp_udp_unreg(const uint8_t * hash)          dns_addr = udp_data.dns_addr; -        if (dns_addr != 0) { -                if (inet_ntop(AF_INET, &dns_addr, dnsstr, INET_ADDRSTRLEN) -                    == NULL) { +        if (is_addr_specified(&dns_addr)) { +                if (__inet_ntop(&dns_addr, dnsstr) == NULL) { +                        log_err("Failed to convert DNS address to string.");                          free(hashstr);                          return -1;                  } @@ -903,26 +936,28 @@ static int ipcp_udp_unreg(const uint8_t * hash)          shim_data_reg_del_entry(udp_data.shim_data, hash); -        log_dbg("Unregistered " HASH_FMT ".", HASH_VAL(hash)); -          free(hashstr);          return 0;  } -static int ipcp_udp_query(const uint8_t * hash) +static int udp_ipcp_query(const uint8_t * hash)  { -        uint32_t         ip_addr  = 0; -        char *           hashstr; -        struct hostent * h; +        struct addr        addr = {}; +        char *             hashstr; +        struct addrinfo    hints; +        struct addrinfo  * ai;  #ifdef HAVE_DDNS -        uint32_t         dns_addr = 0; +        struct __ADDR      dns_addr = __ADDR_ANY_INIT; +        struct __ADDR      ip_addr = __ADDR_ANY_INIT;  #endif          assert(hash);          hashstr = malloc(ipcp_dir_hash_strlen() + 1); -        if (hashstr == NULL) +        if (hashstr == NULL) { +                log_err("Failed to malloc hashstr.");                  return -ENOMEM; +        }          ipcp_hash_str(hashstr, hash); @@ -934,28 +969,42 @@ static int ipcp_udp_query(const uint8_t * hash)  #ifdef HAVE_DDNS          dns_addr = udp_data.dns_addr; -        if (dns_addr != 0) { +        if (is_addr_specified(&dns_addr)) {                  ip_addr = ddns_resolve(hashstr, dns_addr); -                if (ip_addr == 0) { -                        log_dbg("Could not resolve %s.", hashstr); +                if (!is_addr_specified(&ip_addr)) { +                        log_err("Could not resolve %s.", hashstr);                          free(hashstr);                          return -1;                  }          } else {  #endif -                h = gethostbyname(hashstr); -                if (h == NULL) { -                        log_dbg("Could not resolve %s.", hashstr); +                memset(&hints, 0, sizeof(hints)); + +                hints.ai_family   = __AF; +                if (getaddrinfo(hashstr, NULL, &hints, &ai) != 0) { +                        log_err("Could not resolve %s: %s.", hashstr, +                                gai_strerror(errno)); +                        free(hashstr); +                        return -1; +                } + +                if (ai->ai_family != __AF) { +                        log_err("Wrong addres family for %s.", hashstr); +                        freeaddrinfo(ai);                          free(hashstr);                          return -1;                  } -                ip_addr = *((uint32_t *) (h->h_addr_list[0])); +        #ifdef BUILD_IPCP_UDP4 +                addr.ip4 = ((struct sockaddr_in *) (ai->ai_addr))->sin_addr; +        #else +                addr.ip6 = ((struct sockaddr_in6 *) (ai->ai_addr))->sin6_addr; +        #endif +                freeaddrinfo(ai);  #ifdef HAVE_DDNS          }  #endif - -        if (shim_data_dir_add_entry(udp_data.shim_data, hash, ip_addr)) { +        if (shim_data_dir_add_entry(udp_data.shim_data, hash, addr)) {                  log_err("Failed to add directory entry.");                  free(hashstr);                  return -1; @@ -966,38 +1015,51 @@ static int ipcp_udp_query(const uint8_t * hash)          return 0;  } -static int ipcp_udp_flow_alloc(int             fd, -                               const uint8_t * dst, -                               qosspec_t       qs, -                               const void *    data, -                               size_t          len) +static int udp_ipcp_flow_alloc(int              fd, +                               const uint8_t *  dst, +                               qosspec_t        qs, +                               const buffer_t * data)  { -        struct sockaddr_in r_saddr; /* Server address */ -        uint32_t           ip_addr = 0; -        char               ipstr[INET_ADDRSTRLEN]; - -        log_dbg("Allocating flow to " HASH_FMT ".", HASH_VAL(dst)); +        struct __SOCKADDR r_saddr; /* Server address */ +        struct __ADDR     ip_addr; +        struct addr       addr; +        char              ipstr[__ADDRSTRLEN];          (void) qs;          assert(dst);          if (!shim_data_dir_has(udp_data.shim_data, dst)) { -                log_dbg("Could not resolve destination."); +                log_err("Could not resolve destination.");                  return -1;          } -        ip_addr = (uint32_t) shim_data_dir_get_addr(udp_data.shim_data, dst); +        addr = shim_data_dir_get_addr(udp_data.shim_data, dst); +#ifdef BUILD_IPCP_UDP4 +        ip_addr = addr.ip4; +#else +        ip_addr = addr.ip6; +#endif +        if (__inet_ntop(&ip_addr, ipstr) == NULL) { +                log_err("Could not convert IP address."); +                return -1; +        } -        inet_ntop(AF_INET, &ip_addr, ipstr, INET_ADDRSTRLEN); -        log_dbg("Destination UDP ipcp resolved at %s.", ipstr); +        log_dbg("Destination " HASH_FMT32 " resolved at IP %s.", +                HASH_VAL32(dst), ipstr);          memset((char *) &r_saddr, 0, sizeof(r_saddr)); -        r_saddr.sin_family      = AF_INET; -        r_saddr.sin_addr.s_addr = ip_addr; -        r_saddr.sin_port        = udp_data.s_saddr.sin_port; +#ifdef BUILD_IPCP_UDP4 +        r_saddr.sin_family = AF_INET; +        r_saddr.sin_addr   = addr.ip4; +        r_saddr.sin_port   = udp_data.s_saddr.sin_port; +#else +        r_saddr.sin6_family = AF_INET6; +        r_saddr.sin6_addr   = addr.ip6; +        r_saddr.sin6_port   = udp_data.s_saddr.sin6_port; +#endif -        if (ipcp_udp_port_alloc(&r_saddr, fd, dst, qs, data, len) < 0) { +        if (udp_ipcp_port_alloc(&r_saddr, fd, dst, qs, data) < 0) {                  log_err("Could not allocate port.");                  return -1;          } @@ -1011,45 +1073,21 @@ static int ipcp_udp_flow_alloc(int             fd,          fset_add(udp_data.np1_flows, fd); -        log_dbg("Flow to %s pending on fd %d.", ipstr, fd); -          return 0;  } -static int ipcp_udp_flow_alloc_resp(int          fd, -                                    int          resp, -                                    const void * data, -                                    size_t       len) +static int udp_ipcp_flow_alloc_resp(int              fd, +                                    int              resp, +                                    const buffer_t * data)  { -        struct timespec    ts  = {0, FD_UPDATE_TIMEOUT * 1000}; -        struct timespec    abstime; -        struct sockaddr_in saddr; -        int                d_eid; - -        if (resp) -                return 0; - -        clock_gettime(PTHREAD_COND_CLOCK, &abstime); +        struct __SOCKADDR saddr; +        int               d_eid; -        pthread_mutex_lock(&ipcpi.alloc_lock); - -        while (ipcpi.alloc_id != fd && ipcp_get_state() == IPCP_OPERATIONAL) { -                ts_add(&abstime, &ts, &abstime); -                pthread_cond_timedwait(&ipcpi.alloc_cond, -                                       &ipcpi.alloc_lock, -                                       &abstime); -        } - -        if (ipcp_get_state() != IPCP_OPERATIONAL) { -                pthread_mutex_unlock(&ipcpi.alloc_lock); +        if (ipcp_wait_flow_resp(fd) < 0) { +                log_err("Failed to wait for flow response.");                  return -1;          } -        ipcpi.alloc_id = -1; -        pthread_cond_broadcast(&ipcpi.alloc_cond); - -        pthread_mutex_unlock(&ipcpi.alloc_lock); -          pthread_rwlock_rdlock(&udp_data.flows_lock);          saddr = udp_data.fd_to_uf[fd].r_saddr; @@ -1057,7 +1095,7 @@ static int ipcp_udp_flow_alloc_resp(int          fd,          pthread_rwlock_unlock(&udp_data.flows_lock); -        if (ipcp_udp_port_alloc_resp(&saddr, d_eid, fd, resp, data, len) < 0) { +        if (udp_ipcp_port_alloc_resp(&saddr, d_eid, fd, resp, data) < 0) {                  fset_del(udp_data.np1_flows, fd);                  log_err("Failed to respond to flow request.");                  return -1; @@ -1065,13 +1103,10 @@ static int ipcp_udp_flow_alloc_resp(int          fd,          fset_add(udp_data.np1_flows, fd); -        log_dbg("Accepted flow, fd %d on eid %d.", -                fd, d_eid); -          return 0;  } -static int ipcp_udp_flow_dealloc(int fd) +static int udp_ipcp_flow_dealloc(int fd)  {          ipcp_flow_fini(fd); @@ -1084,25 +1119,23 @@ static int ipcp_udp_flow_dealloc(int fd)          pthread_rwlock_unlock(&udp_data.flows_lock); -        flow_dealloc(fd); - -        log_dbg("Flow with fd %d deallocated.", fd); +        ipcp_flow_dealloc(fd);          return 0;  }  static struct ipcp_ops udp_ops = { -        .ipcp_bootstrap       = ipcp_udp_bootstrap, +        .ipcp_bootstrap       = udp_ipcp_bootstrap,          .ipcp_enroll          = NULL,          .ipcp_connect         = NULL,          .ipcp_disconnect      = NULL, -        .ipcp_reg             = ipcp_udp_reg, -        .ipcp_unreg           = ipcp_udp_unreg, -        .ipcp_query           = ipcp_udp_query, -        .ipcp_flow_alloc      = ipcp_udp_flow_alloc, +        .ipcp_reg             = udp_ipcp_reg, +        .ipcp_unreg           = udp_ipcp_unreg, +        .ipcp_query           = udp_ipcp_query, +        .ipcp_flow_alloc      = udp_ipcp_flow_alloc,          .ipcp_flow_join       = NULL, -        .ipcp_flow_alloc_resp = ipcp_udp_flow_alloc_resp, -        .ipcp_flow_dealloc    = ipcp_udp_flow_dealloc +        .ipcp_flow_alloc_resp = udp_ipcp_flow_alloc_resp, +        .ipcp_flow_dealloc    = udp_ipcp_flow_dealloc  };  int main(int    argc, @@ -1110,53 +1143,51 @@ int main(int    argc,  {          int i; -        if (ipcp_init(argc, argv, &udp_ops, THIS_TYPE) < 0) -                goto fail_init;          if (udp_data_init() < 0) {                  log_err("Failed to init udp data.");                  goto fail_data_init;          } -        if (ipcp_boot() < 0) { -                log_err("Failed to boot IPCP."); -                goto fail_boot; +        if (ipcp_init(argc, argv, &udp_ops, THIS_TYPE) < 0) { +                log_err("Failed to initialize IPCP."); +                goto fail_init;          } -        if (ipcp_create_r(0)) { -                log_err("Failed to notify IRMd we are initialized."); -                goto fail_create_r; +        if (ipcp_start() < 0) { +                log_err("Failed to start IPCP."); +                goto fail_start;          } -        ipcp_shutdown(); +        ipcp_sigwait();          if (ipcp_get_state() == IPCP_SHUTDOWN) { -                for (i = 0; i < IPCP_UDP_RD_THR; ++i) -                        pthread_cancel(udp_data.packet_reader[i]);                  for (i = 0; i < IPCP_UDP_WR_THR; ++i)                          pthread_cancel(udp_data.packet_writer[i]); +                for (i = 0; i < IPCP_UDP_RD_THR; ++i) +                        pthread_cancel(udp_data.packet_reader[i]);                  pthread_cancel(udp_data.mgmt_handler); -                for (i = 0; i < IPCP_UDP_RD_THR; ++i) -                        pthread_join(udp_data.packet_reader[i], NULL);                  for (i = 0; i < IPCP_UDP_WR_THR; ++i)                          pthread_join(udp_data.packet_writer[i], NULL); +                for (i = 0; i < IPCP_UDP_RD_THR; ++i) +                        pthread_join(udp_data.packet_reader[i], NULL);                  pthread_join(udp_data.mgmt_handler, NULL); +                close(udp_data.s_fd);          } -        udp_data_fini(); +        ipcp_stop();          ipcp_fini(); -        exit(EXIT_SUCCESS); - fail_create_r: -        ipcp_set_state(IPCP_NULL); -        ipcp_shutdown(); - fail_boot:          udp_data_fini(); - fail_data_init: + +        exit(EXIT_SUCCESS); + + fail_start:          ipcp_fini();   fail_init: -        ipcp_create_r(-1); +        udp_data_fini(); + fail_data_init:          exit(EXIT_FAILURE);  } diff --git a/src/ipcpd/udp/udp4.c b/src/ipcpd/udp/udp4.c new file mode 100644 index 00000000..07d5f818 --- /dev/null +++ b/src/ipcpd/udp/udp4.c @@ -0,0 +1,42 @@ +/* + * Ouroboros - Copyright (C) 2016 - 2024 + * + * IPC process over UDP/IPv4 + * + *    Dimitri Staessens <dimitri@ouroboros.rocks> + *    Sander Vrijders   <sander@ouroboros.rocks> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., http://www.fsf.org/about/contact/. + */ + +#if defined(__linux__) || defined(__CYGWIN__) +#define _DEFAULT_SOURCE +#else +#define _POSIX_C_SOURCE 200112L +#endif + +#include <ouroboros/ipcp-dev.h> + +#define BUILD_IPCP_UDP4 +#define THIS_TYPE                IPCP_UDP4 +#define TYPE_STR                 "IPCP over UDP/IPv4" +#define OUROBOROS_PREFIX         "ipcpd/udp4" +#define IPCP_UDP_MAX_PACKET_SIZE 8980 +#define __AF                     AF_INET +#define __ADDRSTRLEN             INET_ADDRSTRLEN +#define __SOCKADDR               sockaddr_in +#define __ADDR                   in_addr +#define __ADDR_ANY_INIT          { .s_addr = INADDR_ANY } + +#include "udp.c" diff --git a/src/ipcpd/udp/udp6.c b/src/ipcpd/udp/udp6.c new file mode 100644 index 00000000..b7924a3f --- /dev/null +++ b/src/ipcpd/udp/udp6.c @@ -0,0 +1,42 @@ +/* + * Ouroboros - Copyright (C) 2016 - 2024 + * + * IPC process over UDP/IPv6 + * + *    Dimitri Staessens <dimitri@ouroboros.rocks> + *    Sander Vrijders   <sander@ouroboros.rocks> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., http://www.fsf.org/about/contact/. + */ + +#if defined(__linux__) || defined(__CYGWIN__) +#define _DEFAULT_SOURCE +#else +#define _POSIX_C_SOURCE 200112L +#endif + +#include <ouroboros/ipcp-dev.h> + +#define BUILD_IPCP_UDP6 +#define THIS_TYPE               IPCP_UDP6 +#define TYPE_STR                "IPCP over UDP/IPv6" +#define OUROBOROS_PREFIX        "ipcpd/udp6" +#define IPCP_UDP_MAX_PACKET_SIZE 8952 +#define __AF                    AF_INET6 +#define __ADDRSTRLEN            INET6_ADDRSTRLEN +#define __SOCKADDR              sockaddr_in6 +#define __ADDR                  in6_addr +#define __ADDR_ANY_INIT         IN6ADDR_ANY_INIT + +#include "udp.c" diff --git a/src/ipcpd/unicast/CMakeLists.txt b/src/ipcpd/unicast/CMakeLists.txt index 07f12540..ced045e6 100644 --- a/src/ipcpd/unicast/CMakeLists.txt +++ b/src/ipcpd/unicast/CMakeLists.txt @@ -13,8 +13,14 @@ include_directories(${CMAKE_SOURCE_DIR}/include)  include_directories(${CMAKE_BINARY_DIR}/include)  set(IPCP_UNICAST_TARGET ipcpd-unicast CACHE INTERNAL "") +set(IPCP_UNICAST_MPL 10000 CACHE STRING +    "Default maximum packet lifetime for the unicast IPCP, in ms") +set(DEBUG_PROTO_DHT FALSE CACHE BOOL +  "Add DHT protocol message output to debug logging") +set(DEBUG_PROTO_LS FALSE CACHE BOOL +  "Add link state protocol message output to debug logging") -protobuf_generate_c(KAD_PROTO_SRCS KAD_PROTO_HDRS kademlia.proto) +protobuf_generate_c(DHT_PROTO_SRCS DHT_PROTO_HDRS dir/dht.proto)  math(EXPR PFT_EXPR "1 << 12")  set(PFT_SIZE ${PFT_EXPR} CACHE STRING @@ -29,34 +35,33 @@ if (HAVE_FUSE)      endif ()  endif () -set(SOURCE_FILES +set(IPCP_UNICAST_SOURCE_FILES    # Add source files here -  addr_auth.c +  addr-auth.c    ca.c    connmgr.c -  dht.c    dir.c    dt.c -  enroll.c    fa.c    main.c    pff.c    routing.c    psched.c    # Add policies last -  pol/pft.c -  pol/flat.c -  pol/link_state.c -  pol/graph.c -  pol/simple_pff.c -  pol/alternate_pff.c -  pol/multipath_pff.c -  pol/ca-mb-ecn.c -  pol/ca-nop.c +  addr-auth/flat.c +  ca/mb-ecn.c +  ca/nop.c +  dir/dht.c +  pff/simple.c +  pff/alternate.c +  pff/multipath.c +  pff/pft.c +  routing/link-state.c +  routing/graph.c    ) -add_executable(ipcpd-unicast ${SOURCE_FILES} ${IPCP_SOURCES} -  ${KAD_PROTO_SRCS} ${LAYER_CONFIG_PROTO_SRCS}) +add_executable(ipcpd-unicast ${IPCP_UNICAST_SOURCE_FILES} ${IPCP_SOURCES} ${COMMON_SOURCES} +  ${DHT_PROTO_SRCS} ${LAYER_CONFIG_PROTO_SRCS})  target_link_libraries(ipcpd-unicast LINK_PUBLIC ouroboros-dev)  include(AddCompileFlags) @@ -66,8 +71,6 @@ endif ()  install(TARGETS ipcpd-unicast RUNTIME DESTINATION ${CMAKE_INSTALL_SBINDIR}) -add_subdirectory(pol/tests) - -if (NOT GNU) -  add_subdirectory(tests) -endif () +add_subdirectory(pff/tests) +add_subdirectory(routing/tests) +add_subdirectory(dir/tests) diff --git a/src/ipcpd/unicast/addr_auth.c b/src/ipcpd/unicast/addr-auth.c index e508d0cb..908a4aa1 100644 --- a/src/ipcpd/unicast/addr_auth.c +++ b/src/ipcpd/unicast/addr-auth.c @@ -1,5 +1,5 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   *   * Address authority   * @@ -24,13 +24,12 @@  #include <ouroboros/logs.h> -#include "addr_auth.h" -#include "pol-addr-auth-ops.h" -#include "pol/flat.h" +#include "addr-auth.h" +#include "addr-auth/pol.h"  #include <stdlib.h> -struct pol_addr_auth_ops * ops; +struct addr_auth_ops * ops;  int addr_auth_init(enum pol_addr_auth type,                     const void *       info) diff --git a/src/ipcpd/unicast/addr_auth.h b/src/ipcpd/unicast/addr-auth.h index d26d3eb7..0d2cd4c0 100644 --- a/src/ipcpd/unicast/addr_auth.h +++ b/src/ipcpd/unicast/addr-auth.h @@ -1,5 +1,5 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   *   * Address authority   * @@ -27,6 +27,14 @@  #include <stdint.h> +#define ADDR_FMT32 "%02x.%02x.%02x.%02x" +#define ADDR_VAL32(a) \ +        ((uint8_t *) a)[0], ((uint8_t *) a)[1], \ +        ((uint8_t *) a)[2], ((uint8_t *) a)[3] + +#define ADDR_FMT64 ADDR_FMT32 "." ADDR_FMT32 +#define ADDR_VAL64(a) ADDR_VAL32(a), ADDR_VAL32(a + 4) +  int      addr_auth_init(enum pol_addr_auth type,                          const void *       info); diff --git a/src/ipcpd/unicast/pol/flat.c b/src/ipcpd/unicast/addr-auth/flat.c index f869f761..34ca1cef 100644 --- a/src/ipcpd/unicast/pol/flat.c +++ b/src/ipcpd/unicast/addr-auth/flat.c @@ -1,5 +1,5 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   *   * Policy for flat addresses in a distributed way   * @@ -29,28 +29,21 @@  #define OUROBOROS_PREFIX "flat-addr-auth"  #include <ouroboros/logs.h> -#include <ouroboros/errno.h> -#include <ouroboros/time_utils.h> -#include <ouroboros/utils.h> +#include <ouroboros/random.h> +#include "addr-auth.h"  #include "ipcp.h"  #include "flat.h" -#include <time.h> -#include <stdlib.h> -#include <math.h> -#include <string.h> -#include <assert.h> - -#define NAME_LEN 8 +#define NAME_LEN        8 +#define INVALID_ADDRESS 0  struct { -        uint8_t addr_size; +        uint8_t  addr_size; +        uint32_t addr;  } flat; -#define INVALID_ADDRESS 0 - -struct pol_addr_auth_ops flat_ops = { +struct addr_auth_ops flat_ops = {          .init    = flat_init,          .fini    = flat_fini,          .address = flat_address @@ -65,6 +58,15 @@ int flat_init(const void * info)                  return -1;          } +#if defined (CONFIG_OUROBOROS_DEBUG) && defined (IPCP_DEBUG_LOCAL) +        flat.addr = getpid(); +#else +        while (flat.addr == INVALID_ADDRESS) +                random_buffer(&flat.addr,sizeof(flat.addr)); +#endif +        log_dbg("Flat address initialized to " ADDR_FMT32 ".", +                ADDR_VAL32((uint8_t *) &flat.addr)); +          return 0;  } @@ -75,13 +77,5 @@ int flat_fini(void)  uint64_t flat_address(void)  { -        struct timespec t; -        uint32_t        addr; - -        clock_gettime(CLOCK_REALTIME, &t); -        srand(t.tv_nsec); - -        addr = (rand() % (RAND_MAX - 1) + 1) & 0xFFFFFFFF; - -        return addr; +        return (uint64_t) flat.addr;  } diff --git a/src/ipcpd/unicast/pol/flat.h b/src/ipcpd/unicast/addr-auth/flat.h index 21f7721a..d4b672c7 100644 --- a/src/ipcpd/unicast/pol/flat.h +++ b/src/ipcpd/unicast/addr-auth/flat.h @@ -1,5 +1,5 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   *   * Policy for flat addresses in a distributed way   * @@ -23,7 +23,7 @@  #ifndef OUROBOROS_IPCPD_UNICAST_FLAT_H  #define OUROBOROS_IPCPD_UNICAST_FLAT_H -#include "pol-addr-auth-ops.h" +#include "ops.h"  int      flat_init(const void * info); @@ -31,6 +31,6 @@ int      flat_fini(void);  uint64_t flat_address(void); -extern struct pol_addr_auth_ops flat_ops; +extern struct addr_auth_ops flat_ops;  #endif /* OUROBOROS_IPCPD_UNICAST_FLAT_H */ diff --git a/src/ipcpd/unicast/pol-addr-auth-ops.h b/src/ipcpd/unicast/addr-auth/ops.h index 395a5675..06b24cec 100644 --- a/src/ipcpd/unicast/pol-addr-auth-ops.h +++ b/src/ipcpd/unicast/addr-auth/ops.h @@ -1,5 +1,5 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   *   * Address authority policy ops   * @@ -20,10 +20,10 @@   * Foundation, Inc., http://www.fsf.org/about/contact/.   */ -#ifndef OUROBOROS_IPCPD_UNICAST_POL_ADDR_AUTH_OPS_H -#define OUROBOROS_IPCPD_UNICAST_POL_ADDR_AUTH_OPS_H +#ifndef OUROBOROS_IPCPD_UNICAST_ADDR_AUTH_OPS_H +#define OUROBOROS_IPCPD_UNICAST_ADDR_AUTH_OPS_H -struct pol_addr_auth_ops { +struct addr_auth_ops {          int      (* init)(const void * info);          int      (* fini)(void); @@ -31,4 +31,4 @@ struct pol_addr_auth_ops {          uint64_t (* address)(void);  }; -#endif /* OUROBOROS_IPCPD_UNICAST_POL_ADDR_AUTH_OPS_H */ +#endif /* OUROBOROS_IPCPD_UNICAST_ADDR_AUTH_OPS_H */ diff --git a/src/ipcpd/broadcast/enroll.c b/src/ipcpd/unicast/addr-auth/pol.h index 143f16d5..844308c6 100644 --- a/src/ipcpd/broadcast/enroll.c +++ b/src/ipcpd/unicast/addr-auth/pol.h @@ -1,7 +1,7 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   * - * Enrollment Task + * Address Authority policies   *   *    Dimitri Staessens <dimitri@ouroboros.rocks>   *    Sander Vrijders   <sander@ouroboros.rocks> @@ -19,6 +19,5 @@   * along with this program; if not, write to the Free Software   * Foundation, Inc., http://www.fsf.org/about/contact/.   */ -#define BUILD_IPCP_BROADCAST -#include "common/enroll.c" +#include "flat.h" diff --git a/src/ipcpd/unicast/ca.c b/src/ipcpd/unicast/ca.c index ddeb2849..1fcc9bb2 100644 --- a/src/ipcpd/unicast/ca.c +++ b/src/ipcpd/unicast/ca.c @@ -1,5 +1,5 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   *   * Congestion Avoidance   * @@ -25,12 +25,10 @@  #include <ouroboros/logs.h>  #include "ca.h" -#include "pol-ca-ops.h" -#include "pol/ca-mb-ecn.h" -#include "pol/ca-nop.h" +#include "ca/pol.h"  struct { -        struct pol_ca_ops * ops; +        struct ca_ops * ops;  } ca;  int ca_init(enum pol_cong_avoid pol) @@ -51,7 +49,6 @@ int ca_init(enum pol_cong_avoid pol)          return 0;  } -  void ca_fini(void)  {          ca.ops = NULL; diff --git a/src/ipcpd/unicast/ca.h b/src/ipcpd/unicast/ca.h index 8b221790..ea803e17 100644 --- a/src/ipcpd/unicast/ca.h +++ b/src/ipcpd/unicast/ca.h @@ -1,5 +1,5 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   *   * Congestion avoidance   * diff --git a/src/ipcpd/unicast/pol/ca-mb-ecn.c b/src/ipcpd/unicast/ca/mb-ecn.c index 7a88718f..d9a204b0 100644 --- a/src/ipcpd/unicast/pol/ca-mb-ecn.c +++ b/src/ipcpd/unicast/ca/mb-ecn.c @@ -1,5 +1,5 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   *   * Multi-bit ECN Congestion Avoidance   * @@ -29,9 +29,9 @@  #include "config.h"  #include <ouroboros/ipcp-dev.h> -#include <ouroboros/time_utils.h> +#include <ouroboros/time.h> -#include "ca-mb-ecn.h" +#include "mb-ecn.h"  #include <inttypes.h>  #include <stdlib.h> @@ -65,7 +65,7 @@ struct mb_ecn_ctx {          size_t          tx_slot;  }; -struct pol_ca_ops mb_ecn_ca_ops = { +struct ca_ops mb_ecn_ca_ops = {          .ctx_create     = mb_ecn_ctx_create,          .ctx_destroy    = mb_ecn_ctx_destroy,          .ctx_update_snd = mb_ecn_ctx_update_snd, @@ -187,7 +187,7 @@ ca_wnd_t mb_ecn_ctx_update_snd(void * _ctx,  void mb_ecn_wnd_wait(ca_wnd_t wnd)  {          if (wnd.wait > 0) { -                struct timespec s = {0, 0}; +                struct timespec s = TIMESPEC_INIT_S(0);                  if (wnd.wait > BILLION) /* Don't care throttling < 1s */                          s.tv_sec = 1;                  else diff --git a/src/ipcpd/unicast/pol/ca-mb-ecn.h b/src/ipcpd/unicast/ca/mb-ecn.h index a90ae3e2..9a2c8b49 100644 --- a/src/ipcpd/unicast/pol/ca-mb-ecn.h +++ b/src/ipcpd/unicast/ca/mb-ecn.h @@ -1,5 +1,5 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   *   * Multi-bit ECN Congestion Avoidance   * @@ -23,7 +23,7 @@  #ifndef OUROBOROS_IPCPD_UNICAST_CA_MB_ECN_H  #define OUROBOROS_IPCPD_UNICAST_CA_MB_ECN_H -#include "pol-ca-ops.h" +#include "ops.h"  void *   mb_ecn_ctx_create(void); @@ -51,6 +51,6 @@ ssize_t  mb_ecn_print_stats(void * ctx,                              char * buf,                              size_t len); -extern struct pol_ca_ops mb_ecn_ca_ops; +extern struct ca_ops mb_ecn_ca_ops;  #endif /* OUROBOROS_IPCPD_UNICAST_CA_MB_ECN_H */ diff --git a/src/ipcpd/unicast/pol/ca-nop.c b/src/ipcpd/unicast/ca/nop.c index db908c5c..617fc15b 100644 --- a/src/ipcpd/unicast/pol/ca-nop.c +++ b/src/ipcpd/unicast/ca/nop.c @@ -1,5 +1,5 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   *   * Dummy Congestion Avoidance   * @@ -20,11 +20,11 @@   * Foundation, Inc., http://www.fsf.org/about/contact/.   */ -#include "ca-nop.h" +#include "nop.h"  #include <string.h> -struct pol_ca_ops nop_ca_ops = { +struct ca_ops nop_ca_ops = {          .ctx_create     = nop_ctx_create,          .ctx_destroy    = nop_ctx_destroy,          .ctx_update_snd = nop_ctx_update_snd, diff --git a/src/ipcpd/unicast/pol/ca-nop.h b/src/ipcpd/unicast/ca/nop.h index 7b9d318f..248b198d 100644 --- a/src/ipcpd/unicast/pol/ca-nop.h +++ b/src/ipcpd/unicast/ca/nop.h @@ -1,5 +1,5 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   *   * Dummy Congestion Avoidance   * @@ -23,7 +23,7 @@  #ifndef OUROBOROS_IPCPD_UNICAST_CA_NOP_H  #define OUROBOROS_IPCPD_UNICAST_CA_NOP_H -#include "pol-ca-ops.h" +#include "ops.h"  void *   nop_ctx_create(void); @@ -47,6 +47,6 @@ int      nop_calc_ecn(int       fd,                        qoscube_t qc,                        size_t    len); -extern struct pol_ca_ops nop_ca_ops; +extern struct ca_ops nop_ca_ops;  #endif /* OUROBOROS_IPCPD_UNICAST_CA_NOP_H */ diff --git a/src/ipcpd/unicast/pol-ca-ops.h b/src/ipcpd/unicast/ca/ops.h index 88f6cf61..3a7b7248 100644 --- a/src/ipcpd/unicast/pol-ca-ops.h +++ b/src/ipcpd/unicast/ca/ops.h @@ -1,5 +1,5 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   *   * Congestion avoidance policy ops   * @@ -20,12 +20,12 @@   * Foundation, Inc., http://www.fsf.org/about/contact/.   */ -#ifndef OUROBOROS_IPCPD_UNICAST_POL_CA_OPS_H -#define OUROBOROS_IPCPD_UNICAST_POL_CA_OPS_H +#ifndef OUROBOROS_IPCPD_UNICAST_CA_OPS_H +#define OUROBOROS_IPCPD_UNICAST_CA_OPS_H  #include "ca.h" -struct pol_ca_ops { +struct ca_ops {          void *   (* ctx_create)(void);          void     (* ctx_destroy)(void * ctx); @@ -55,4 +55,4 @@ struct pol_ca_ops {  }; -#endif /* OUROBOROS_IPCPD_UNICAST_POL_CA_OPS_H */ +#endif /* OUROBOROS_IPCPD_UNICAST_CA_OPS_H */ diff --git a/src/ipcpd/unicast/ca/pol.h b/src/ipcpd/unicast/ca/pol.h new file mode 100644 index 00000000..db0a1a11 --- /dev/null +++ b/src/ipcpd/unicast/ca/pol.h @@ -0,0 +1,24 @@ +/* + * Ouroboros - Copyright (C) 2016 - 2024 + * + * Congestion avoidance policies + * + *    Dimitri Staessens <dimitri@ouroboros.rocks> + *    Sander Vrijders   <sander@ouroboros.rocks> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., http://www.fsf.org/about/contact/. + */ + +#include "mb-ecn.h" +#include "nop.h" diff --git a/src/ipcpd/unicast/connmgr.c b/src/ipcpd/unicast/connmgr.c index 904deff8..07568fb5 100644 --- a/src/ipcpd/unicast/connmgr.c +++ b/src/ipcpd/unicast/connmgr.c @@ -1,5 +1,5 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   *   * Handles connections between components   * @@ -32,8 +32,4 @@  #define BUILD_IPCP_UNICAST -#ifdef IPCP_CONN_WAIT_DIR - #include "dir.h" -#endif -  #include "common/connmgr.c" diff --git a/src/ipcpd/unicast/dht.c b/src/ipcpd/unicast/dht.c deleted file mode 100644 index 2b668f9f..00000000 --- a/src/ipcpd/unicast/dht.c +++ /dev/null @@ -1,2842 +0,0 @@ -/* - * Ouroboros - Copyright (C) 2016 - 2021 - * - * Distributed Hash Table based on Kademlia - * - *    Dimitri Staessens <dimitri@ouroboros.rocks> - *    Sander Vrijders   <sander@ouroboros.rocks> - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public License - * version 2.1 as published by the Free Software Foundation. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, write to the Free Software - * Foundation, Inc., http://www.fsf.org/about/contact/. - */ - -#if defined(__linux__) || defined(__CYGWIN__) -#define _DEFAULT_SOURCE -#else -#define _POSIX_C_SOURCE 200112L -#endif - -#include "config.h" - -#define DHT              "dht" -#define OUROBOROS_PREFIX DHT - -#include <ouroboros/hash.h> -#include <ouroboros/ipcp-dev.h> -#include <ouroboros/bitmap.h> -#include <ouroboros/errno.h> -#include <ouroboros/logs.h> -#include <ouroboros/list.h> -#include <ouroboros/notifier.h> -#include <ouroboros/random.h> -#include <ouroboros/time_utils.h> -#include <ouroboros/tpm.h> -#include <ouroboros/utils.h> -#include <ouroboros/pthread.h> - -#include "common/connmgr.h" -#include "dht.h" -#include "dt.h" - -#include <stdlib.h> -#include <string.h> -#include <assert.h> -#include <inttypes.h> -#include <limits.h> - -#include "kademlia.pb-c.h" -typedef KadMsg kad_msg_t; -typedef KadContactMsg kad_contact_msg_t; - -#ifndef CLOCK_REALTIME_COARSE -#define CLOCK_REALTIME_COARSE CLOCK_REALTIME -#endif - -#define DHT_MAX_REQS  2048 /* KAD recommends rnd(), bmp can be changed.    */ -#define KAD_ALPHA     3    /* Parallel factor, proven optimal value.       */ -#define KAD_K         8    /* Replication factor, MDHT value.              */ -#define KAD_T_REPL    900  /* Replication time, tied to k. MDHT value.     */ -#define KAD_T_REFR    900  /* Refresh time stale bucket, MDHT value.       */ -#define KAD_T_JOIN    8    /* Response time to wait for a join.            */ -#define KAD_T_RESP    5    /* Response time to wait for a response.        */ -#define KAD_R_PING    2    /* Ping retries before declaring peer dead.     */ -#define KAD_QUEER     15   /* Time to declare peer questionable.           */ -#define KAD_BETA      8    /* Bucket split factor, must be 1, 2, 4 or 8.   */ -#define KAD_RESP_RETR 6    /* Number of retries on sending a response.     */ -#define KAD_JOIN_RETR 8    /* Number of retries sending a join.            */ -#define KAD_JOIN_INTV 1    /* Time (seconds) between join retries.         */ -#define HANDLE_TIMEO  1000 /* Timeout for dht_handle_packet tpm check (ms) */ -#define DHT_RETR_ADDR 1    /* Number of addresses to return on retrieve    */ - -enum dht_state { -        DHT_INIT = 0, -        DHT_SHUTDOWN, -        DHT_JOINING, -        DHT_RUNNING, -}; - -enum kad_code { -        KAD_JOIN = 0, -        KAD_FIND_NODE, -        KAD_FIND_VALUE, -        /* Messages without a response below. */ -        KAD_STORE, -        KAD_RESPONSE -}; - -enum kad_req_state { -        REQ_NULL = 0, -        REQ_INIT, -        REQ_PENDING, -        REQ_RESPONSE, -        REQ_DONE, -        REQ_DESTROY -}; - -enum lookup_state { -        LU_NULL = 0, -        LU_INIT, -        LU_PENDING, -        LU_UPDATE, -        LU_COMPLETE, -        LU_DESTROY -}; - -struct kad_req { -        struct list_head   next; - -        uint32_t           cookie; -        enum kad_code      code; -        uint8_t *          key; -        uint64_t           addr; - -        enum kad_req_state state; -        pthread_cond_t     cond; -        pthread_mutex_t    lock; - -        time_t             t_exp; -}; - -struct cookie_el { -        struct list_head next; - -        uint32_t         cookie; -}; - -struct lookup { -        struct list_head  next; - -        struct list_head  cookies; - -        uint8_t *         key; - -        struct list_head  contacts; -        size_t            n_contacts; - -        uint64_t *        addrs; -        size_t            n_addrs; - -        enum lookup_state state; -        pthread_cond_t    cond; -        pthread_mutex_t   lock; -}; - -struct val { -        struct list_head next; - -        uint64_t         addr; - -        time_t           t_exp; -        time_t           t_rep; -}; - -struct ref_entry { -        struct list_head next; - -        uint8_t *        key; - -        time_t           t_rep; -}; - -struct dht_entry { -        struct list_head next; - -        uint8_t *        key; -        size_t           n_vals; -        struct list_head vals; -}; - -struct contact { -        struct list_head next; - -        uint8_t *        id; -        uint64_t         addr; - -        size_t           fails; -        time_t           t_seen; -}; - -struct bucket { -        struct list_head contacts; -        size_t           n_contacts; - -        struct list_head alts; -        size_t           n_alts; - -        time_t           t_refr; - -        size_t           depth; -        uint8_t          mask; - -        struct bucket *  parent; -        struct bucket *  children[1L << KAD_BETA]; -}; - -struct cmd { -        struct list_head     next; - -        struct shm_du_buff * sdb; -}; - -struct dht { -        size_t           alpha; -        size_t           b; -        size_t           k; - -        time_t           t_expire; -        time_t           t_refresh; -        time_t           t_replic; -        time_t           t_repub; - -        uint8_t *        id; -        uint64_t         addr; - -        struct bucket *  buckets; - -        struct list_head entries; - -        struct list_head refs; - -        struct list_head lookups; - -        struct list_head requests; -        struct bmp *     cookies; - -        enum dht_state   state; -        struct list_head cmds; -        pthread_cond_t   cond; -        pthread_mutex_t  mtx; - -        pthread_rwlock_t lock; - -        uint64_t         eid; - -        struct tpm *     tpm; - -        pthread_t        worker; -}; - -struct join_info { -        struct dht * dht; -        uint64_t     addr; -}; - -struct packet_info { -        struct dht *         dht; -        struct shm_du_buff * sdb; -}; - -static uint8_t * dht_dup_key(const uint8_t * key, -                             size_t          len) -{ -        uint8_t * dup; - -        dup = malloc(sizeof(*dup) * len); -        if (dup == NULL) -                return NULL; - -        memcpy(dup, key, len); - -        return dup; -} - -static enum dht_state dht_get_state(struct dht * dht) -{ -        enum dht_state state; - -        pthread_mutex_lock(&dht->mtx); - -        state = dht->state; - -        pthread_mutex_unlock(&dht->mtx); - -        return state; -} - -static int dht_set_state(struct dht *   dht, -                         enum dht_state state) -{ -        pthread_mutex_lock(&dht->mtx); - -        if (state == DHT_JOINING && dht->state != DHT_INIT) { -                 pthread_mutex_unlock(&dht->mtx); -                 return -1; -        } - -        dht->state = state; - -        pthread_cond_broadcast(&dht->cond); - -        pthread_mutex_unlock(&dht->mtx); - -        return 0; -} - -int dht_wait_running(struct dht * dht) -{ -        int ret = 0; - -        pthread_mutex_lock(&dht->mtx); - -        pthread_cleanup_push(__cleanup_mutex_unlock, &dht->mtx); - -        while (dht->state == DHT_JOINING) -                pthread_cond_wait(&dht->cond, &dht->mtx); - -        if (dht->state != DHT_RUNNING) -                ret = -1; - -        pthread_cleanup_pop(true); - -        return ret; -} - -static uint8_t * create_id(size_t len) -{ -        uint8_t * id; - -        id = malloc(len); -        if (id == NULL) -                return NULL; - -        if (random_buffer(id, len) < 0) { -                free(id); -                return NULL; -        } - -        return id; -} - -static void kad_req_create(struct dht * dht, -                           kad_msg_t *  msg, -                           uint64_t     addr) -{ -        struct kad_req *   req; -        pthread_condattr_t cattr; -        struct timespec    t; -        size_t             b; - -        req = malloc(sizeof(*req)); -        if (req == NULL) -                return; - -        list_head_init(&req->next); - -        clock_gettime(CLOCK_REALTIME_COARSE, &t); - -        req->t_exp  = t.tv_sec + KAD_T_RESP; -        req->addr   = addr; -        req->state  = REQ_INIT; -        req->cookie = msg->cookie; -        req->code   = msg->code; -        req->key    = NULL; - -        pthread_rwlock_rdlock(&dht->lock); -        b = dht->b; -        pthread_rwlock_unlock(&dht->lock); - -        if (msg->has_key) { -                req->key = dht_dup_key(msg->key.data, b); -                if (req->key == NULL) { -                        free(req); -                        return; -                } -        } - -        if (pthread_mutex_init(&req->lock, NULL)) { -                free(req->key); -                free(req); -                return; -        } - -        pthread_condattr_init(&cattr); -#ifndef __APPLE__ -        pthread_condattr_setclock(&cattr, PTHREAD_COND_CLOCK); -#endif - -        if (pthread_cond_init(&req->cond, &cattr)) { -                pthread_condattr_destroy(&cattr); -                pthread_mutex_destroy(&req->lock); -                free(req->key); -                free(req); -                return; -        } - -        pthread_condattr_destroy(&cattr); - -        pthread_rwlock_wrlock(&dht->lock); - -        list_add(&req->next, &dht->requests); - -        pthread_rwlock_unlock(&dht->lock); -} - -static void cancel_req_destroy(void * o) -{ -        struct kad_req * req = (struct kad_req *) o; - -        pthread_mutex_unlock(&req->lock); - -        pthread_cond_destroy(&req->cond); -        pthread_mutex_destroy(&req->lock); - -        if (req->key != NULL) -                free(req->key); - -        free(req); -} - -static void kad_req_destroy(struct kad_req * req) -{ -        assert(req); - -        pthread_mutex_lock(&req->lock); - -        switch (req->state) { -        case REQ_DESTROY: -                pthread_mutex_unlock(&req->lock); -                return; -        case REQ_PENDING: -                req->state = REQ_DESTROY; -                pthread_cond_signal(&req->cond); -                break; -        case REQ_INIT: -        case REQ_DONE: -                req->state = REQ_NULL; -                break; -        case REQ_RESPONSE: -        case REQ_NULL: -        default: -                break; -        } - -        pthread_cleanup_push(cancel_req_destroy, req); - -        while (req->state != REQ_NULL && req->state != REQ_DONE) -                pthread_cond_wait(&req->cond, &req->lock); - -        pthread_cleanup_pop(true); -} - -static int kad_req_wait(struct kad_req * req, -                        time_t           t) -{ -        struct timespec timeo = {t, 0}; -        struct timespec abs; -        int ret = 0; - -        assert(req); - -        clock_gettime(PTHREAD_COND_CLOCK, &abs); - -        ts_add(&abs, &timeo, &abs); - -        pthread_mutex_lock(&req->lock); - -        req->state = REQ_PENDING; - -        pthread_cleanup_push(__cleanup_mutex_unlock, &req->lock); - -        while (req->state == REQ_PENDING && ret != -ETIMEDOUT) -                ret = -pthread_cond_timedwait(&req->cond, &req->lock, &abs); - -        switch(req->state) { -        case REQ_DESTROY: -                ret = -1; -                req->state = REQ_NULL; -                pthread_cond_signal(&req->cond); -                break; -        case REQ_PENDING: /* ETIMEDOUT */ -        case REQ_RESPONSE: -                req->state = REQ_DONE; -                pthread_cond_broadcast(&req->cond); -                break; -        default: -                break; -        } - -        pthread_cleanup_pop(true); - -        return ret; -} - -static void kad_req_respond(struct kad_req * req) -{ -        pthread_mutex_lock(&req->lock); - -        req->state = REQ_RESPONSE; -        pthread_cond_signal(&req->cond); - -        pthread_mutex_unlock(&req->lock); -} - -static struct contact * contact_create(const uint8_t * id, -                                       size_t          len, -                                       uint64_t        addr) -{ -        struct contact * c; -        struct timespec  t; - -        c = malloc(sizeof(*c)); -        if (c == NULL) -                return NULL; - -        list_head_init(&c->next); - -        clock_gettime(CLOCK_REALTIME_COARSE, &t); - -        c->addr   = addr; -        c->fails  = 0; -        c->t_seen = t.tv_sec; -        c->id     = dht_dup_key(id, len); -        if (c->id == NULL) { -                free(c); -                return NULL; -        } - -        return c; -} - -static void contact_destroy(struct contact * c) -{ -        if (c != NULL) -                free(c->id); - -        free(c); -} - -static struct bucket * iter_bucket(struct bucket * b, -                                   const uint8_t * id) -{ -        uint8_t byte; -        uint8_t mask; - -        assert(b); - -        if (b->children[0] == NULL) -                return b; - -        byte = id[(b->depth * KAD_BETA) / CHAR_BIT]; - -        mask = ((1L << KAD_BETA) - 1) & 0xFF; - -        byte >>= (CHAR_BIT - KAD_BETA) - -                (((b->depth) * KAD_BETA) & (CHAR_BIT - 1)); - -        return iter_bucket(b->children[(byte & mask)], id); -} - -static struct bucket * dht_get_bucket(struct dht *    dht, -                                      const uint8_t * id) -{ -        assert(dht->buckets); - -        return iter_bucket(dht->buckets, id); -} - -/* - * If someone builds a network where the n (n > k) closest nodes all - * have IDs starting with the same 64 bits: by all means, change this. - */ -static uint64_t dist(const uint8_t * src, -                     const uint8_t * dst) -{ -        return betoh64(*((uint64_t *) src) ^ *((uint64_t *) dst)); -} - -static size_t list_add_sorted(struct list_head * l, -                              struct contact *   c, -                              const uint8_t *    key) -{ -        struct list_head * p; - -        assert(l); -        assert(c); -        assert(key); -        assert(c->id); - -        list_for_each(p, l) { -                struct contact * e = list_entry(p, struct contact, next); -                if (dist(c->id, key) > dist(e->id, key)) -                        break; -        } - -        list_add_tail(&c->next, p); - -        return 1; -} - -static size_t dht_contact_list(struct dht *       dht, -                               struct list_head * l, -                               const uint8_t *    key) -{ -        struct list_head * p; -        struct bucket *    b; -        size_t             len = 0; -        size_t             i; -        struct timespec    t; - -        assert(l); -        assert(dht); -        assert(key); -        assert(list_is_empty(l)); - -        clock_gettime(CLOCK_REALTIME_COARSE, &t); - -        b = dht_get_bucket(dht, key); -        if (b == NULL) -                return 0; - -        b->t_refr = t.tv_sec + KAD_T_REFR; - -        if (b->n_contacts == dht->k || b->parent == NULL) { -                list_for_each(p, &b->contacts) { -                        struct contact * c; -                        c = list_entry(p, struct contact, next); -                        c = contact_create(c->id, dht->b, c->addr); -                        if (list_add_sorted(l, c, key) == 1) -                                if (++len == dht->k) -                                        break; -                } -        } else { -                struct bucket * d = b->parent; -                for (i = 0; i < (1L << KAD_BETA) && len < dht->k; ++i) { -                        list_for_each(p, &d->children[i]->contacts) { -                                struct contact * c; -                                c = list_entry(p, struct contact, next); -                                c = contact_create(c->id, dht->b, c->addr); -                                if (c == NULL) -                                        continue; -                                if (list_add_sorted(l, c, key) == 1) -                                        if (++len == dht->k) -                                                break; -                        } -                } -        } - -        assert(len == dht->k || b->parent == NULL); - -        return len; -} - -static struct lookup * lookup_create(struct dht *    dht, -                                     const uint8_t * id) -{ -        struct lookup *    lu; -        pthread_condattr_t cattr; - -        assert(dht); -        assert(id); - -        lu = malloc(sizeof(*lu)); -        if (lu == NULL) -                goto fail_malloc; - -        list_head_init(&lu->contacts); -        list_head_init(&lu->cookies); - -        lu->state   = LU_INIT; -        lu->addrs   = NULL; -        lu->n_addrs = 0; -        lu->key     = dht_dup_key(id, dht->b); -        if (lu->key == NULL) -                goto fail_id; - -        if (pthread_mutex_init(&lu->lock, NULL)) -                goto fail_mutex; - -        pthread_condattr_init(&cattr); -#ifndef __APPLE__ -        pthread_condattr_setclock(&cattr, PTHREAD_COND_CLOCK); -#endif - -        if (pthread_cond_init(&lu->cond, &cattr)) -                goto fail_cond; - -        pthread_condattr_destroy(&cattr); - -        pthread_rwlock_wrlock(&dht->lock); - -        list_add(&lu->next, &dht->lookups); - -        lu->n_contacts = dht_contact_list(dht, &lu->contacts, id); - -        pthread_rwlock_unlock(&dht->lock); - -        return lu; - - fail_cond: -        pthread_condattr_destroy(&cattr); -        pthread_mutex_destroy(&lu->lock); - fail_mutex: -        free(lu->key); - fail_id: -        free(lu); - fail_malloc: -        return NULL; -} - -static void cancel_lookup_destroy(void * o) -{ -        struct lookup *    lu; -        struct list_head * p; -        struct list_head * h; - -        lu = (struct lookup *) o; - -        if (lu->key != NULL) -                free(lu->key); -        if (lu->addrs != NULL) -                free(lu->addrs); - -        list_for_each_safe(p, h, &lu->contacts) { -                struct contact * c = list_entry(p, struct contact, next); -                list_del(&c->next); -                contact_destroy(c); -        } - -        list_for_each_safe(p, h, &lu->cookies) { -                struct cookie_el * c = list_entry(p, struct cookie_el, next); -                list_del(&c->next); -                free(c); -        } - -        pthread_mutex_unlock(&lu->lock); - -        pthread_mutex_destroy(&lu->lock); - -        free(lu); -} - -static void lookup_destroy(struct lookup * lu) -{ -        assert(lu); - -        pthread_mutex_lock(&lu->lock); - -        switch (lu->state) { -        case LU_DESTROY: -                pthread_mutex_unlock(&lu->lock); -                return; -        case LU_PENDING: -                lu->state = LU_DESTROY; -                pthread_cond_broadcast(&lu->cond); -                break; -        case LU_INIT: -        case LU_UPDATE: -        case LU_COMPLETE: -                lu->state = LU_NULL; -                break; -        case LU_NULL: -        default: -                break; -        } - -        pthread_cleanup_push(cancel_lookup_destroy, lu); - -        while (lu->state != LU_NULL) -                pthread_cond_wait(&lu->cond, &lu->lock); - -        pthread_cleanup_pop(true); -} - -static void lookup_update(struct dht *    dht, -                          struct lookup * lu, -                          kad_msg_t *     msg) -{ -        struct list_head * p = NULL; -        struct list_head * h; -        struct contact *   c = NULL; -        size_t             n; -        size_t             pos = 0; -        bool               mod = false; - -        assert(lu); -        assert(msg); - -        if (dht_get_state(dht) != DHT_RUNNING) -                return; - -        pthread_mutex_lock(&lu->lock); - -        list_for_each_safe(p, h, &lu->cookies) { -                struct cookie_el * e = list_entry(p, struct cookie_el, next); -                if (e->cookie == msg->cookie) { -                        list_del(&e->next); -                        free(e); -                        break; -                } -        } - -        if (lu->state == LU_COMPLETE) { -                pthread_mutex_unlock(&lu->lock); -                return; -        } - -        if (msg->n_addrs > 0) { -                if (lu->addrs == NULL) { -                        lu->addrs = malloc(sizeof(*lu->addrs) * msg->n_addrs); -                        for (n = 0; n < msg->n_addrs; ++n) -                                lu->addrs[n] = msg->addrs[n]; -                        lu->n_addrs = msg->n_addrs; -                } - -                lu->state = LU_COMPLETE; -                pthread_cond_broadcast(&lu->cond); -                pthread_mutex_unlock(&lu->lock); -                return; -        } - -        pthread_cleanup_push(__cleanup_mutex_unlock, &lu->lock); - -        while (lu->state == LU_INIT) { -                pthread_rwlock_unlock(&dht->lock); -                pthread_cond_wait(&lu->cond, &lu->lock); -                pthread_rwlock_rdlock(&dht->lock); -        } - -        pthread_cleanup_pop(false); - -        for (n = 0; n < msg->n_contacts; ++n) { -                c = contact_create(msg->contacts[n]->id.data, -                                   dht->b, msg->contacts[n]->addr); -                if (c == NULL) -                        continue; - -                pos = 0; - -                list_for_each(p, &lu->contacts) { -                        struct contact * e; -                        e = list_entry(p, struct contact, next); -                        if (!memcmp(e->id, c->id, dht->b)) { -                                contact_destroy(c); -                                c = NULL; -                                break; -                        } - -                        if (dist(c->id, lu->key) > dist(e->id, lu->key)) -                                break; - -                        pos++; -                } - -                if (c == NULL) -                        continue; - -                if (lu->n_contacts < dht->k) { -                        list_add_tail(&c->next, p); -                        ++lu->n_contacts; -                        mod = true; -                } else if (pos == dht->k) { -                        contact_destroy(c); -                } else { -                        struct contact * d; -                        list_add_tail(&c->next, p); -                        d = list_last_entry(&lu->contacts, -                                            struct contact, next); -                        list_del(&d->next); -                        assert(lu->contacts.prv != &d->next); -                        contact_destroy(d); -                        mod = true; -                } -        } - -        if (list_is_empty(&lu->cookies) && !mod) -                lu->state = LU_COMPLETE; -        else -                lu->state = LU_UPDATE; - -        pthread_cond_broadcast(&lu->cond); -        pthread_mutex_unlock(&lu->lock); -        return; -} - -static ssize_t lookup_get_addrs(struct lookup * lu, -                                uint64_t *      addrs) -{ -        ssize_t n; - -        assert(lu); - -        pthread_mutex_lock(&lu->lock); - -        for (n = 0; (size_t) n < lu->n_addrs; ++n) -                addrs[n] = lu->addrs[n]; - -        assert((size_t) n == lu->n_addrs); - -        pthread_mutex_unlock(&lu->lock); - -        return n; -} - -static ssize_t lookup_contact_addrs(struct lookup * lu, -                                    uint64_t *      addrs) -{ -        struct list_head * p; -        ssize_t            n = 0; - -        assert(lu); -        assert(addrs); - -        pthread_mutex_lock(&lu->lock); - -        list_for_each(p, &lu->contacts) { -                struct contact * c = list_entry(p, struct contact, next); -                addrs[n] = c->addr; -                n++; -        } - -        pthread_mutex_unlock(&lu->lock); - -        return n; -} - -static void lookup_new_addrs(struct lookup * lu, -                             uint64_t *      addrs) -{ -        struct list_head * p; -        size_t             n = 0; - -        assert(lu); -        assert(addrs); - -        pthread_mutex_lock(&lu->lock); - -        /* Uses fails to check if the contact has been contacted. */ -        list_for_each(p, &lu->contacts) { -                struct contact * c = list_entry(p, struct contact, next); -                if (c->fails == 0) { -                        c->fails = 1; -                        addrs[n] = c->addr; -                        n++; -                } - -                if (n == KAD_ALPHA) -                        break; -        } - -        assert(n <= KAD_ALPHA); - -        addrs[n] = 0; - -        pthread_mutex_unlock(&lu->lock); -} - -static void lookup_set_state(struct lookup *   lu, -                             enum lookup_state state) -{ -        pthread_mutex_lock(&lu->lock); - -        lu->state = state; -        pthread_cond_broadcast(&lu->cond); - -        pthread_mutex_unlock(&lu->lock); -} - -static void cancel_lookup_wait(void * o) -{ -        struct lookup * lu = (struct lookup *) o; -        lu->state = LU_NULL; -        pthread_mutex_unlock(&lu->lock); -        lookup_destroy(lu); -} - -static enum lookup_state lookup_wait(struct lookup * lu) -{ -        struct timespec   timeo = {KAD_T_RESP, 0}; -        struct timespec   abs; -        enum lookup_state state; -        int               ret = 0; - -        clock_gettime(PTHREAD_COND_CLOCK, &abs); - -        ts_add(&abs, &timeo, &abs); - -        pthread_mutex_lock(&lu->lock); - -        if (lu->state == LU_INIT || lu->state == LU_UPDATE) -                lu->state = LU_PENDING; - -        pthread_cleanup_push(cancel_lookup_wait, lu); - -        while (lu->state == LU_PENDING && ret != -ETIMEDOUT) -                ret = -pthread_cond_timedwait(&lu->cond, &lu->lock, &abs); - -        pthread_cleanup_pop(false); - -        if (ret == -ETIMEDOUT) -                lu->state = LU_COMPLETE; - -        state = lu->state; - -        pthread_mutex_unlock(&lu->lock); - -        return state; -} - -static struct kad_req * dht_find_request(struct dht * dht, -                                         kad_msg_t *  msg) -{ -        struct list_head * p; - -        assert(dht); -        assert(msg); - -        list_for_each(p, &dht->requests) { -                struct kad_req * r = list_entry(p, struct kad_req, next); -                if (r->cookie == msg->cookie) -                        return r; -        } - -        return NULL; -} - -static struct lookup * dht_find_lookup(struct dht *    dht, -                                       uint32_t        cookie) -{ -        struct list_head * p; -        struct list_head * p2; -        struct list_head * h2; - -        assert(dht); -        assert(cookie > 0); - -        list_for_each(p, &dht->lookups) { -                struct lookup * l = list_entry(p, struct lookup, next); -                pthread_mutex_lock(&l->lock); -                list_for_each_safe(p2, h2, &l->cookies) { -                        struct cookie_el * e; -                        e = list_entry(p2, struct cookie_el, next); -                        if (e->cookie == cookie) { -                                list_del(&e->next); -                                free(e); -                                pthread_mutex_unlock(&l->lock); -                                return l; -                        } -                } -                pthread_mutex_unlock(&l->lock); -        } - -        return NULL; -} - -static struct val * val_create(uint64_t addr, -                               time_t   exp) -{ -        struct val *    v; -        struct timespec t; - -        v = malloc(sizeof(*v)); -        if (v == NULL) -                return NULL; - -        list_head_init(&v->next); -        v->addr = addr; - -        clock_gettime(CLOCK_REALTIME_COARSE, &t); - -        v->t_exp = t.tv_sec + exp; -        v->t_rep = t.tv_sec + KAD_T_REPL; - -        return v; -} - -static void val_destroy(struct val * v) -{ -        assert(v); - -        free(v); -} - -static struct ref_entry * ref_entry_create(struct dht *    dht, -                                           const uint8_t * key) -{ -        struct ref_entry * e; -        struct timespec    t; - -        assert(dht); -        assert(key); - -        e = malloc(sizeof(*e)); -        if (e == NULL) -                return NULL; - -        e->key = dht_dup_key(key, dht->b); -        if (e->key == NULL) { -                free(e); -                return NULL; -        } - -        clock_gettime(CLOCK_REALTIME_COARSE, &t); - -        e->t_rep = t.tv_sec + dht->t_repub; - -        return e; -} - -static void ref_entry_destroy(struct ref_entry * e) -{ -        free(e->key); -        free(e); -} - -static struct dht_entry * dht_entry_create(struct dht *    dht, -                                           const uint8_t * key) -{ -        struct dht_entry * e; - -        assert(dht); -        assert(key); - -        e = malloc(sizeof(*e)); -        if (e == NULL) -                return NULL; - -        list_head_init(&e->next); -        list_head_init(&e->vals); - -        e->n_vals = 0; - -        e->key = dht_dup_key(key, dht->b); -        if (e->key == NULL) { -                free(e); -                return NULL; -        } - -        return e; -} - -static void dht_entry_destroy(struct dht_entry * e) -{ -        struct list_head * p; -        struct list_head * h; - -        assert(e); - -        list_for_each_safe(p, h, &e->vals) { -                struct val * v = list_entry(p, struct val, next); -                list_del(&v->next); -                val_destroy(v); -        } - -        free(e->key); - -        free(e); -} - -static int dht_entry_add_addr(struct dht_entry * e, -                              uint64_t           addr, -                              time_t             exp) -{ -        struct list_head * p; -        struct val * val; -        struct timespec t; - -        clock_gettime(CLOCK_REALTIME_COARSE, &t); - -        list_for_each(p, &e->vals) { -                struct val * v = list_entry(p, struct val, next); -                if (v->addr == addr) { -                        if (v->t_exp < t.tv_sec + exp) { -                                v->t_exp = t.tv_sec + exp; -                                v->t_rep = t.tv_sec + KAD_T_REPL; -                        } - -                        return 0; -                } -        } - -        val = val_create(addr, exp); -        if (val == NULL) -                return -ENOMEM; - -        list_add(&val->next, &e->vals); -        ++e->n_vals; - -        return 0; -} - - -static void dht_entry_del_addr(struct dht_entry * e, -                               uint64_t           addr) -{ -        struct list_head * p; -        struct list_head * h; - -        assert(e); - -        list_for_each_safe(p, h, &e->vals) { -                struct val * v = list_entry(p, struct val, next); -                if (v->addr == addr) { -                        list_del(&v->next); -                        val_destroy(v); -                        --e->n_vals; -                } -        } - -        if (e->n_vals == 0) { -                list_del(&e->next); -                dht_entry_destroy(e); -        } -} - -static uint64_t dht_entry_get_addr(struct dht *       dht, -                                   struct dht_entry * e) -{ -        struct list_head * p; - -        assert(e); -        assert(!list_is_empty(&e->vals)); - -        list_for_each(p, &e->vals) { -                struct val * v = list_entry(p, struct val, next); -                if (v->addr != dht->addr) -                        return v->addr; -        } - -        return 0; -} - -/* Forward declaration. */ -static struct lookup * kad_lookup(struct dht *    dht, -                                  const uint8_t * key, -                                  enum kad_code   code); - - -/* Build a refresh list. */ -static void bucket_refresh(struct dht *       dht, -                           struct bucket *    b, -                           time_t             t, -                           struct list_head * r) -{ -        size_t i; - -        if (*b->children != NULL) -                for (i = 0; i < (1L << KAD_BETA); ++i) -                        bucket_refresh(dht, b->children[i], t, r); - -        if (b->n_contacts == 0) -                return; - -        if (t > b->t_refr) { -                struct contact * c; -                struct contact * d; -                c = list_first_entry(&b->contacts, struct contact, next); -                d = contact_create(c->id, dht->b, c->addr); -                if (c != NULL) -                        list_add(&d->next, r); -                return; -        } -} - - -static struct bucket * bucket_create(void) -{ -        struct bucket * b; -        struct timespec t; -        size_t          i; - -        b = malloc(sizeof(*b)); -        if (b == NULL) -                return NULL; - -        list_head_init(&b->contacts); -        b->n_contacts = 0; - -        list_head_init(&b->alts); -        b->n_alts = 0; - -        clock_gettime(CLOCK_REALTIME_COARSE, &t); -        b->t_refr = t.tv_sec + KAD_T_REFR; - -        for (i = 0; i < (1L << KAD_BETA); ++i) -                b->children[i]  = NULL; - -        b->parent = NULL; -        b->depth = 0; - -        return b; -} - -static void bucket_destroy(struct bucket * b) -{ -        struct list_head * p; -        struct list_head * h; -        size_t             i; - -        assert(b); - -        for (i = 0; i < (1L << KAD_BETA); ++i) -                if (b->children[i] != NULL) -                        bucket_destroy(b->children[i]); - -        list_for_each_safe(p, h, &b->contacts) { -                struct contact * c = list_entry(p, struct contact, next); -                list_del(&c->next); -                contact_destroy(c); -                --b->n_contacts; -        } - -        list_for_each_safe(p, h, &b->alts) { -                struct contact * c = list_entry(p, struct contact, next); -                list_del(&c->next); -                contact_destroy(c); -                --b->n_contacts; -        } - -        free(b); -} - -static bool bucket_has_id(struct bucket * b, -                          const uint8_t * id) -{ -        uint8_t mask; -        uint8_t byte; - -        if (b->depth == 0) -                return true; - -        byte = id[(b->depth * KAD_BETA) / CHAR_BIT]; - -        mask = ((1L << KAD_BETA) - 1) & 0xFF; - -        byte >>= (CHAR_BIT - KAD_BETA) - -                (((b->depth - 1) * KAD_BETA) & (CHAR_BIT - 1)); - -        return ((byte & mask) == b->mask); -} - -static int split_bucket(struct bucket * b) -{ -        struct list_head * p; -        struct list_head * h; -        uint8_t mask = 0; -        size_t i; -        size_t c; - -        assert(b); -        assert(b->n_alts == 0); -        assert(b->n_contacts); -        assert(b->children[0] == NULL); - -        c = b->n_contacts; - -        for (i = 0; i < (1L << KAD_BETA); ++i) { -                b->children[i] = bucket_create(); -                if (b->children[i] == NULL) { -                        size_t j; -                        for (j = 0; j < i; ++j) -                                bucket_destroy(b->children[j]); -                        return -1; -                } - -                b->children[i]->depth  = b->depth + 1; -                b->children[i]->mask   = mask; -                b->children[i]->parent = b; - -                list_for_each_safe(p, h, &b->contacts) { -                        struct contact * c; -                        c = list_entry(p, struct contact, next); -                        if (bucket_has_id(b->children[i], c->id)) { -                                list_del(&c->next); -                                --b->n_contacts; -                                list_add(&c->next, &b->children[i]->contacts); -                                ++b->children[i]->n_contacts; -                        } -                } - -                mask++; -        } - -        for (i = 0; i < (1L << KAD_BETA); ++i) -                if (b->children[i]->n_contacts == c) -                        split_bucket(b->children[i]); - -        return 0; -} - -/* Locked externally to mandate update as (final) part of join transaction. */ -static int dht_update_bucket(struct dht *    dht, -                             const uint8_t * id, -                             uint64_t        addr) -{ -        struct list_head * p; -        struct list_head * h; -        struct bucket *    b; -        struct contact *   c; - -        assert(dht); - -        b = dht_get_bucket(dht, id); -        if (b == NULL) -                return -1; - -        c = contact_create(id, dht->b, addr); -        if (c == NULL) -                return -1; - -        list_for_each_safe(p, h, &b->contacts) { -                struct contact * d = list_entry(p, struct contact, next); -                if (d->addr == addr) { -                        list_del(&d->next); -                        contact_destroy(d); -                        --b->n_contacts; -                } -        } - -        if (b->n_contacts == dht->k) { -                if (bucket_has_id(b, dht->id)) { -                        list_add_tail(&c->next, &b->contacts); -                        ++b->n_contacts; -                        if (split_bucket(b)) { -                                list_del(&c->next); -                                contact_destroy(c); -                                --b->n_contacts; -                        } -                } else if (b->n_alts == dht->k) { -                        struct contact * d; -                        d = list_first_entry(&b->alts, struct contact, next); -                        list_del(&d->next); -                        contact_destroy(d); -                        list_add_tail(&c->next, &b->alts); -                } else { -                        list_add_tail(&c->next, &b->alts); -                        ++b->n_alts; -                } -        } else { -                list_add_tail(&c->next, &b->contacts); -                ++b->n_contacts; -        } - -        return 0; -} - -static int send_msg(struct dht * dht, -                    kad_msg_t *  msg, -                    uint64_t     addr) -{ -#ifndef __DHT_TEST__ -        struct shm_du_buff * sdb; -        size_t               len; -#endif -        int                  retr = 0; - -        if (msg->code == KAD_RESPONSE) -                retr = KAD_RESP_RETR; - -        pthread_rwlock_wrlock(&dht->lock); - -        if (dht->id != NULL) { -                msg->has_s_id = true; -                msg->s_id.data = dht->id; -                msg->s_id.len  = dht->b; -        } - -        msg->s_addr = dht->addr; - -        if (msg->code < KAD_STORE) { -                msg->cookie = bmp_allocate(dht->cookies); -                if (!bmp_is_id_valid(dht->cookies, msg->cookie)) { -                        pthread_rwlock_unlock(&dht->lock); -                        goto fail_bmp_alloc; -                } -        } - -        pthread_rwlock_unlock(&dht->lock); - -#ifndef __DHT_TEST__ -        len = kad_msg__get_packed_size(msg); -        if (len == 0) -                goto fail_msg; - -        while (true) { -                if (ipcp_sdb_reserve(&sdb, len)) -                        goto fail_msg; - -                kad_msg__pack(msg, shm_du_buff_head(sdb)); - -                if (dt_write_packet(addr, QOS_CUBE_BE, dht->eid, sdb) == 0) -                        break; - -                ipcp_sdb_release(sdb); - -                sleep(1); - -                if (--retr < 0) -                        goto fail_msg; -        } - -#else -        (void) addr; -        (void) retr; -#endif /* __DHT_TEST__ */ - -        if (msg->code < KAD_STORE && dht_get_state(dht) != DHT_SHUTDOWN) -                kad_req_create(dht, msg, addr); - -        return msg->cookie; -#ifndef __DHT_TEST__ - fail_msg: -        pthread_rwlock_wrlock(&dht->lock); -        bmp_release(dht->cookies, msg->cookie); -        pthread_rwlock_unlock(&dht->lock); -#endif /* !__DHT_TEST__ */ - fail_bmp_alloc: -        return -1; -} - -static struct dht_entry * dht_find_entry(struct dht *    dht, -                                         const uint8_t * key) -{ -        struct list_head * p; - -        list_for_each(p, &dht->entries) { -                struct dht_entry * e = list_entry(p, struct dht_entry, next); -                if (!memcmp(key, e->key, dht->b)) -                        return e; -        } - -        return NULL; -} - -static int kad_add(struct dht *              dht, -                   const kad_contact_msg_t * contacts, -                   ssize_t                   n, -                   time_t                    exp) -{ -        struct dht_entry * e; - -        pthread_rwlock_wrlock(&dht->lock); - -        while (n-- > 0) { -                if (contacts[n].id.len != dht->b) -                        log_warn("Bad key length in contact data."); - -                e = dht_find_entry(dht, contacts[n].id.data); -                if (e != NULL) { -                        if (dht_entry_add_addr(e, contacts[n].addr, exp)) -                                goto fail; -                } else { -                        e = dht_entry_create(dht, contacts[n].id.data); -                        if (e == NULL) -                                goto fail; - -                        if (dht_entry_add_addr(e, contacts[n].addr, exp)) { -                                dht_entry_destroy(e); -                                goto fail; -                        } - -                        list_add(&e->next, &dht->entries); -                } -        } - -        pthread_rwlock_unlock(&dht->lock); -        return 0; - - fail: -        pthread_rwlock_unlock(&dht->lock); -        return -ENOMEM; -} - -static int wait_resp(struct dht * dht, -                     kad_msg_t *  msg, -                     time_t       timeo) -{ -        struct kad_req * req; - -        assert(dht); -        assert(msg); - -        pthread_rwlock_rdlock(&dht->lock); - -        req = dht_find_request(dht, msg); -        if (req == NULL) { -                pthread_rwlock_unlock(&dht->lock); -                return -EPERM; -        } - -        pthread_rwlock_unlock(&dht->lock); - -        return kad_req_wait(req, timeo); -} - -static int kad_store(struct dht *    dht, -                     const uint8_t * key, -                     uint64_t        addr, -                     uint64_t        r_addr, -                     time_t          ttl) -{ -        kad_msg_t msg = KAD_MSG__INIT; -        kad_contact_msg_t cmsg = KAD_CONTACT_MSG__INIT; -        kad_contact_msg_t * cmsgp[1]; - -        cmsg.id.data = (uint8_t *) key; -        cmsg.addr    = addr; - -        pthread_rwlock_rdlock(&dht->lock); - -        cmsg.id.len  = dht->b; - -        pthread_rwlock_unlock(&dht->lock); - -        cmsgp[0] = &cmsg; - -        msg.code         = KAD_STORE; -        msg.has_t_expire = true; -        msg.t_expire     = ttl; -        msg.n_contacts   = 1; -        msg.contacts     = cmsgp; - -        if (send_msg(dht, &msg, r_addr) < 0) -                return -1; - -        return 0; -} - -static ssize_t kad_find(struct dht *     dht, -                        struct lookup *  lu, -                        const uint64_t * addrs, -                        enum kad_code    code) -{ -        kad_msg_t msg  = KAD_MSG__INIT; -        ssize_t   sent = 0; - -        assert(dht); -        assert(lu->key); - -        msg.code = code; - -        msg.has_key       = true; -        msg.key.data      = (uint8_t *) lu->key; -        msg.key.len       = dht->b; - -        while (*addrs != 0) { -                struct cookie_el * c; -                int ret; - -                if (*addrs == dht->addr) { -                        ++addrs; -                        continue; -                } - -                ret = send_msg(dht, &msg, *addrs); -                if (ret < 0) -                        break; - -                c = malloc(sizeof(*c)); -                if (c == NULL) -                        break; - -                c->cookie = (uint32_t) ret; - -                pthread_mutex_lock(&lu->lock); - -                list_add_tail(&c->next, &lu->cookies); - -                pthread_mutex_unlock(&lu->lock); - -                ++sent; -                ++addrs; -        } - -        return sent; -} - -static void lookup_detach(struct dht *    dht, -                          struct lookup * lu) -{ -        pthread_rwlock_wrlock(&dht->lock); - -        list_del(&lu->next); - -        pthread_rwlock_unlock(&dht->lock); -} - -static struct lookup * kad_lookup(struct dht *    dht, -                                  const uint8_t * id, -                                  enum kad_code   code) -{ -        uint64_t          addrs[KAD_ALPHA + 1]; -        enum lookup_state state; -        struct lookup *   lu; - -        lu = lookup_create(dht, id); -        if (lu == NULL) -                return NULL; - -        lookup_new_addrs(lu, addrs); - -        if (addrs[0] == 0) { -                lookup_detach(dht, lu); -                lookup_destroy(lu); -                return NULL; -        } - -        if (kad_find(dht, lu, addrs, code) == 0) { -                lookup_detach(dht, lu); -                return lu; -        } - -        while ((state = lookup_wait(lu)) != LU_COMPLETE) { -                switch (state) { -                case LU_UPDATE: -                        lookup_new_addrs(lu, addrs); -                        if (addrs[0] == 0) -                                break; - -                        kad_find(dht, lu, addrs, code); -                        break; -                case LU_DESTROY: -                        lookup_detach(dht, lu); -                        lookup_set_state(lu, LU_NULL); -                        return NULL; -                default: -                        break; -                } -        } - -        assert(state == LU_COMPLETE); - -        lookup_detach(dht, lu); - -        return lu; -} - -static void kad_publish(struct dht *    dht, -                        const uint8_t * key, -                        uint64_t        addr, -                        time_t          exp) -{ -        struct lookup * lu; -        uint64_t      * addrs; -        ssize_t         n; -        size_t          k; -        time_t          t_expire; - - -        assert(dht); -        assert(key); - -        pthread_rwlock_rdlock(&dht->lock); - -        k        = dht->k; -        t_expire = dht->t_expire; - -        pthread_rwlock_unlock(&dht->lock); - -        addrs = malloc(k * sizeof(*addrs)); -        if (addrs == NULL) -                return; - -        lu = kad_lookup(dht, key, KAD_FIND_NODE); -        if (lu == NULL) { -                free(addrs); -                return; -        } - -        n = lookup_contact_addrs(lu, addrs); - -        while (n-- > 0) { -                if (addrs[n] == dht->addr) { -                        kad_contact_msg_t msg = KAD_CONTACT_MSG__INIT; -                        msg.id.data = (uint8_t *) key; -                        msg.id.len  = dht->b; -                        msg.addr    = addr; -                        kad_add(dht, &msg, 1, exp); -                } else { -                        if (kad_store(dht, key, addr, addrs[n], t_expire)) -                                log_warn("Failed to send store message."); -                } -        } - -        lookup_destroy(lu); - -        free(addrs); -} - -static int kad_join(struct dht * dht, -                    uint64_t     addr) -{ -        kad_msg_t       msg = KAD_MSG__INIT; - -        msg.code = KAD_JOIN; - -        msg.has_alpha       = true; -        msg.has_b           = true; -        msg.has_k           = true; -        msg.has_t_refresh   = true; -        msg.has_t_replicate = true; -        msg.alpha           = KAD_ALPHA; -        msg.k               = KAD_K; -        msg.t_refresh       = KAD_T_REFR; -        msg.t_replicate     = KAD_T_REPL; - -        pthread_rwlock_rdlock(&dht->lock); - -        msg.b               = dht->b; - -        pthread_rwlock_unlock(&dht->lock); - -        if (send_msg(dht, &msg, addr) < 0) -                return -1; - -        if (wait_resp(dht, &msg, KAD_T_JOIN) < 0) -                return -1; - -        dht->id = create_id(dht->b); -        if (dht->id == NULL) -                return -1; - -        pthread_rwlock_wrlock(&dht->lock); - -        dht_update_bucket(dht, dht->id, dht->addr); - -        pthread_rwlock_unlock(&dht->lock); - -        return 0; -} - -static void dht_dead_peer(struct dht * dht, -                          uint8_t *    key, -                          uint64_t     addr) -{ -        struct list_head * p; -        struct list_head * h; -        struct bucket *    b; - -        b = dht_get_bucket(dht, key); - -        list_for_each_safe(p, h, &b->contacts) { -                struct contact * c = list_entry(p, struct contact, next); -                if (b->n_contacts + b->n_alts <= dht->k) { -                        ++c->fails; -                        return; -                } - -                if (c->addr == addr) { -                        list_del(&c->next); -                        contact_destroy(c); -                        --b->n_contacts; -                        break; -                } -        } - -        while (b->n_contacts < dht->k && b->n_alts > 0) { -                struct contact * c; -                c = list_first_entry(&b->alts, struct contact, next); -                list_del(&c->next); -                --b->n_alts; -                list_add(&c->next, &b->contacts); -                ++b->n_contacts; -        } -} - -static int dht_del(struct dht *    dht, -                   const uint8_t * key, -                   uint64_t        addr) -{ -        struct dht_entry * e; - -        pthread_rwlock_wrlock(&dht->lock); - -        e = dht_find_entry(dht, key); -        if (e == NULL) { -                pthread_rwlock_unlock(&dht->lock); -                return -EPERM; -        } - -        dht_entry_del_addr(e, addr); - -        pthread_rwlock_unlock(&dht->lock); - -        return 0; -} - -static buffer_t dht_retrieve(struct dht *    dht, -                             const uint8_t * key) -{ -        struct dht_entry * e; -        struct list_head * p; -        buffer_t           buf; -        uint64_t *         pos; -        size_t             addrs = 0; - -        pthread_rwlock_rdlock(&dht->lock); - -        e = dht_find_entry(dht, key); -        if (e == NULL) -                goto fail; - -        buf.len = MIN(DHT_RETR_ADDR, e->n_vals); -        if (buf.len == 0) -                goto fail; - -        pos = malloc(sizeof(dht->addr) * buf.len); -        if (pos == NULL) -                goto fail; - -        buf.data = (uint8_t *) pos; - -        list_for_each(p, &e->vals) { -                struct val * v = list_entry(p, struct val, next); -                *pos++ = v->addr; -                if (++addrs >= buf.len) -                        break; -        } - -        pthread_rwlock_unlock(&dht->lock); - -        return buf; - - fail: -        pthread_rwlock_unlock(&dht->lock); -        buf.len = 0; - -        return buf; -} - -static ssize_t dht_get_contacts(struct dht *          dht, -                                const uint8_t *       key, -                                kad_contact_msg_t *** msgs) -{ -        struct list_head   l; -        struct list_head * p; -        struct list_head * h; -        size_t             len; -        size_t             i = 0; - -        list_head_init(&l); - -        pthread_rwlock_wrlock(&dht->lock); - -        len = dht_contact_list(dht, &l, key); -        if (len == 0) { -                pthread_rwlock_unlock(&dht->lock); -                *msgs = NULL; -                return 0; -        } - -        *msgs = malloc(len * sizeof(**msgs)); -        if (*msgs == NULL) { -                pthread_rwlock_unlock(&dht->lock); -                return 0; -        } - -        list_for_each_safe(p, h, &l) { -                struct contact * c = list_entry(p, struct contact, next); -                (*msgs)[i] = malloc(sizeof(***msgs)); -                if ((*msgs)[i] == NULL) { -                        pthread_rwlock_unlock(&dht->lock); -                        while (i > 0) -                                free(*msgs[--i]); -                        free(*msgs); -                        *msgs = NULL; -                        return 0; -                } - -                kad_contact_msg__init((*msgs)[i]); - -                (*msgs)[i]->id.data = c->id; -                (*msgs)[i]->id.len  = dht->b; -                (*msgs)[i++]->addr  = c->addr; -                list_del(&c->next); -                free(c); -        } - -        pthread_rwlock_unlock(&dht->lock); - -        return i; -} - -static time_t gcd(time_t a, -                  time_t b) -{ -        if (a == 0) -                return b; - -        return gcd(b % a, a); -} - -static void * work(void * o) -{ -        struct dht *       dht; -        struct timespec    now; -        struct list_head * p; -        struct list_head * h; -        struct list_head   reflist; -        time_t             intv; -        struct lookup *    lu; - -        dht = (struct dht *) o; - -        pthread_rwlock_rdlock(&dht->lock); - -        intv = gcd(dht->t_expire, dht->t_repub); -        intv = gcd(intv, gcd(KAD_T_REPL, KAD_T_REFR)) / 2; - -        pthread_rwlock_unlock(&dht->lock); - -        list_head_init(&reflist); - -        while (true) { -                clock_gettime(CLOCK_REALTIME_COARSE, &now); - -                pthread_rwlock_wrlock(&dht->lock); - -                /* Republish registered hashes. */ -                list_for_each(p, &dht->refs) { -                        struct ref_entry * e; -                        uint8_t *          key; -                        uint64_t           addr; -                        time_t             t_expire; -                        e = list_entry(p, struct ref_entry, next); -                        if (now.tv_sec > e->t_rep) { -                                key = dht_dup_key(e->key, dht->b); -                                if (key == NULL) -                                        continue; -                                addr = dht->addr; -                                t_expire = dht->t_expire; -                                e->t_rep = now.tv_sec + dht->t_repub; - -                                pthread_rwlock_unlock(&dht->lock); -                                kad_publish(dht, key, addr, t_expire); -                                pthread_rwlock_wrlock(&dht->lock); -                                free(key); -                        } -                } - -                /* Remove stale entries and republish if necessary. */ -                list_for_each_safe(p, h, &dht->entries) { -                        struct list_head * p1; -                        struct list_head * h1; -                        struct dht_entry * e; -                        uint8_t *          key; -                        time_t             t_expire; -                        e = list_entry (p, struct dht_entry, next); -                        list_for_each_safe(p1, h1, &e->vals) { -                                struct val * v; -                                uint64_t     addr; -                                v = list_entry(p1, struct val, next); -                                if (now.tv_sec > v->t_exp) { -                                        list_del(&v->next); -                                        val_destroy(v); -                                        continue; -                                } - -                                if (now.tv_sec > v->t_rep) { -                                        key  = dht_dup_key(e->key, dht->b); -                                        addr = v->addr; -                                        t_expire = dht->t_expire = now.tv_sec; -                                        v->t_rep = now.tv_sec + dht->t_replic; -                                        pthread_rwlock_unlock(&dht->lock); -                                        kad_publish(dht, key, addr, t_expire); -                                        pthread_rwlock_wrlock(&dht->lock); -                                        free(key); -                                } -                        } -                } - -                /* Check the requests list for unresponsive nodes. */ -                list_for_each_safe(p, h, &dht->requests) { -                        struct kad_req * r; -                        r = list_entry(p, struct kad_req, next); -                        if (now.tv_sec > r->t_exp) { -                                list_del(&r->next); -                                bmp_release(dht->cookies, r->cookie); -                                dht_dead_peer(dht, r->key, r->addr); -                                kad_req_destroy(r); -                        } -                } - -                /* Refresh unaccessed buckets. */ -                bucket_refresh(dht, dht->buckets, now.tv_sec, &reflist); - -                pthread_rwlock_unlock(&dht->lock); - -                list_for_each_safe(p, h, &reflist) { -                        struct contact * c; -                        c = list_entry(p, struct contact, next); -                        lu = kad_lookup(dht, c->id, KAD_FIND_NODE); -                        if (lu != NULL) -                                lookup_destroy(lu); -                        list_del(&c->next); -                        contact_destroy(c); -                } - -                sleep(intv); -        } - -        return (void *) 0; -} - -static int kad_handle_join_resp(struct dht *     dht, -                                struct kad_req * req, -                                kad_msg_t *      msg) -{ -        assert(dht); -        assert(req); -        assert(msg); - -        /* We might send version numbers later to warn of updates if needed. */ -        if (!(msg->has_alpha && msg->has_b && msg->has_k && msg->has_t_expire && -              msg->has_t_refresh && msg->has_t_replicate)) { -                log_warn("Join refused by remote."); -                return -1; -        } - -        if (msg->b < sizeof(uint64_t)) { -                log_err("Hash sizes less than 8 bytes unsupported."); -                return -1; -        } - -        pthread_rwlock_wrlock(&dht->lock); - -        dht->buckets = bucket_create(); -        if (dht->buckets == NULL) { -                pthread_rwlock_unlock(&dht->lock); -                return -1; -        } - -        /* Likely corrupt packet. The member will refuse, we might here too. */ -        if (msg->alpha != KAD_ALPHA || msg->k != KAD_K) -                log_warn("Different kademlia parameters detected."); - -        if (msg->t_replicate != KAD_T_REPL) -                log_warn("Different kademlia replication time detected."); - -        if (msg->t_refresh != KAD_T_REFR) -                log_warn("Different kademlia refresh time detected."); - -        dht->k        = msg->k; -        dht->b        = msg->b; -        dht->t_expire = msg->t_expire; -        dht->t_repub  = MAX(1, dht->t_expire - 10); - -        if (pthread_create(&dht->worker, NULL, work, dht)) { -                bucket_destroy(dht->buckets); -                pthread_rwlock_unlock(&dht->lock); -                return -1; -        } - -        kad_req_respond(req); - -        dht_update_bucket(dht, msg->s_id.data, msg->s_addr); - -        pthread_rwlock_unlock(&dht->lock); - -        log_dbg("Enrollment of DHT completed."); - -        return 0; -} - -static int kad_handle_find_resp(struct dht *     dht, -                                struct kad_req * req, -                                kad_msg_t *      msg) -{ -        struct lookup * lu; - -        assert(dht); -        assert(req); -        assert(msg); - -        pthread_rwlock_rdlock(&dht->lock); - -        lu = dht_find_lookup(dht, req->cookie); -        if (lu == NULL) { -                pthread_rwlock_unlock(&dht->lock); -                return -1; -        } - -        lookup_update(dht, lu, msg); - -        pthread_rwlock_unlock(&dht->lock); - -        return 0; -} - -static void kad_handle_response(struct dht * dht, -                                kad_msg_t *  msg) -{ -        struct kad_req * req; - -        assert(dht); -        assert(msg); - -        pthread_rwlock_wrlock(&dht->lock); - -        req = dht_find_request(dht, msg); -        if (req == NULL) { -                pthread_rwlock_unlock(&dht->lock); -                return; -        } - -        bmp_release(dht->cookies, req->cookie); -        list_del(&req->next); - -        pthread_rwlock_unlock(&dht->lock); - -        switch(req->code) { -        case KAD_JOIN: -                if (kad_handle_join_resp(dht, req, msg)) -                        log_err("Enrollment of DHT failed."); -                break; -        case KAD_FIND_VALUE: -        case KAD_FIND_NODE: -                if (dht_get_state(dht) != DHT_RUNNING) -                        break; -                kad_handle_find_resp(dht, req, msg); -                break; -        default: -                break; -        } - -        kad_req_destroy(req); -} - -int dht_bootstrap(struct dht * dht, -                  size_t       b, -                  time_t       t_expire) -{ -        assert(dht); - -        pthread_rwlock_wrlock(&dht->lock); - -        dht->id = create_id(b); -        if (dht->id == NULL) -                goto fail_id; - -        dht->buckets = bucket_create(); -        if (dht->buckets == NULL) -                goto fail_buckets; - -        dht->buckets->depth = 0; -        dht->buckets->mask  = 0; - -        dht->b        = b / CHAR_BIT; -        dht->t_expire = MAX(2, t_expire); -        dht->t_repub  = MAX(1, t_expire - 10); -        dht->k        = KAD_K; - -        if (pthread_create(&dht->worker, NULL, work, dht)) -                goto fail_pthread_create; - -        dht->state = DHT_RUNNING; - -        dht_update_bucket(dht, dht->id, dht->addr); - -        pthread_rwlock_unlock(&dht->lock); - -        return 0; - - fail_pthread_create: -        bucket_destroy(dht->buckets); -        dht->buckets = NULL; - fail_buckets: -        free(dht->id); -        dht->id = NULL; - fail_id: -        pthread_rwlock_unlock(&dht->lock); -        return -1; -} - -static struct ref_entry * ref_entry_get(struct dht *    dht, -                                        const uint8_t * key) -{ -        struct list_head * p; - -        list_for_each(p, &dht->refs) { -                struct ref_entry * r = list_entry(p, struct ref_entry, next); -                if (!memcmp(key, r->key, dht-> b) ) -                        return r; -        } - -        return NULL; -} - -int dht_reg(struct dht *    dht, -            const uint8_t * key) -{ -        struct ref_entry * e; -        uint64_t           addr; -        time_t             t_expire; - -        assert(dht); -        assert(key); -        assert(dht->addr != 0); - -        if (dht_wait_running(dht)) -                return -1; - -        pthread_rwlock_wrlock(&dht->lock); - -        if (ref_entry_get(dht, key) != NULL) { -                log_dbg("Name already registered."); -                pthread_rwlock_unlock(&dht->lock); -                return 0; -        } - -        e = ref_entry_create(dht, key); -        if (e == NULL) { -                pthread_rwlock_unlock(&dht->lock); -                return -ENOMEM; -        } - -        list_add(&e->next, &dht->refs); - -        t_expire = dht->t_expire; -        addr = dht->addr; - -        pthread_rwlock_unlock(&dht->lock); - -        kad_publish(dht, key, addr, t_expire); - -        return 0; -} - -int dht_unreg(struct dht *    dht, -              const uint8_t * key) -{ -        struct list_head * p; -        struct list_head * h; - -        assert(dht); -        assert(key); - -        if (dht_get_state(dht) != DHT_RUNNING) -                return -1; - -        pthread_rwlock_wrlock(&dht->lock); - -        list_for_each_safe(p, h, &dht->refs) { -                struct ref_entry * r = list_entry(p, struct ref_entry, next); -                if (!memcmp(key, r->key, dht-> b) ) { -                        list_del(&r->next); -                        ref_entry_destroy(r); -                } -        } - -        dht_del(dht, key, dht->addr); - -        pthread_rwlock_unlock(&dht->lock); - -        return 0; -} - -uint64_t dht_query(struct dht *    dht, -                   const uint8_t * key) -{ -        struct dht_entry * e; -        struct lookup *    lu; -        uint64_t           addrs[KAD_K]; -        size_t             n; - -        addrs[0] = 0; - -        if (dht_wait_running(dht)) -                return 0; - -        pthread_rwlock_rdlock(&dht->lock); - -        e = dht_find_entry(dht, key); -        if (e != NULL) -                addrs[0] = dht_entry_get_addr(dht, e); - -        pthread_rwlock_unlock(&dht->lock); - -        if (addrs[0] != 0) -                return addrs[0]; - -        lu = kad_lookup(dht, key, KAD_FIND_VALUE); -        if (lu == NULL) -                return 0; - -        n = lookup_get_addrs(lu, addrs); -        if (n == 0) { -                lookup_destroy(lu); -                return 0; -        } - -        lookup_destroy(lu); - -        /* Current behaviour is anycast and return the first peer address. */ -        if (addrs[0] != dht->addr) -                return addrs[0]; - -        if (n > 1) -                return addrs[1]; - -        return 0; -} - -static void * dht_handle_packet(void * o) -{ -        struct dht * dht = (struct dht *) o; - -        assert(dht); - -        while (true) { -                kad_msg_t *          msg; -                kad_contact_msg_t ** cmsgs; -                kad_msg_t            resp_msg = KAD_MSG__INIT; -                uint64_t             addr; -                buffer_t             buf; -                size_t               i; -                size_t               b; -                size_t               t_expire; -                struct cmd *         cmd; - -                pthread_mutex_lock(&dht->mtx); - -                pthread_cleanup_push(__cleanup_mutex_unlock, &dht->mtx); - -                while (list_is_empty(&dht->cmds)) -                        pthread_cond_wait(&dht->cond, &dht->mtx); - -                cmd = list_last_entry(&dht->cmds, struct cmd, next); -                list_del(&cmd->next); - -                pthread_cleanup_pop(true); - -                i = shm_du_buff_tail(cmd->sdb) - shm_du_buff_head(cmd->sdb); - -                msg = kad_msg__unpack(NULL, i, shm_du_buff_head(cmd->sdb)); -#ifndef __DHT_TEST__ -                ipcp_sdb_release(cmd->sdb); -#endif -                free(cmd); - -                if (msg == NULL) { -                        log_err("Failed to unpack message."); -                        continue; -                } - -                if (msg->code != KAD_RESPONSE && dht_wait_running(dht)) { -                        kad_msg__free_unpacked(msg, NULL); -                        log_dbg("Got a request message when not running."); -                        continue; -                } - -                pthread_rwlock_rdlock(&dht->lock); - -                b        = dht->b; -                t_expire = dht->t_expire; - -                pthread_rwlock_unlock(&dht->lock); - -                if (msg->has_key && msg->key.len != b) { -                        kad_msg__free_unpacked(msg, NULL); -                        log_warn("Bad key in message."); -                        continue; -                } - -                if (msg->has_s_id && !msg->has_b && msg->s_id.len != b) { -                        kad_msg__free_unpacked(msg, NULL); -                        log_warn("Bad source ID in message of type %d.", -                                 msg->code); -                        continue; -                } - -                tpm_dec(dht->tpm); - -                addr = msg->s_addr; - -                resp_msg.code   = KAD_RESPONSE; -                resp_msg.cookie = msg->cookie; - -                switch(msg->code) { -                case KAD_JOIN: -                        /* Refuse enrollee on check fails. */ -                        if (msg->alpha != KAD_ALPHA || msg->k != KAD_K) { -                                log_warn("Parameter mismatch. " -                                         "DHT enrolment refused."); -                                break; -                        } - -                        if (msg->t_replicate != KAD_T_REPL) { -                                log_warn("Replication time mismatch. " -                                         "DHT enrolment refused."); - -                                break; -                        } - -                        if (msg->t_refresh != KAD_T_REFR) { -                                log_warn("Refresh time mismatch. " -                                         "DHT enrolment refused."); -                                break; -                        } - -                        resp_msg.has_alpha       = true; -                        resp_msg.has_b           = true; -                        resp_msg.has_k           = true; -                        resp_msg.has_t_expire    = true; -                        resp_msg.has_t_refresh   = true; -                        resp_msg.has_t_replicate = true; -                        resp_msg.alpha           = KAD_ALPHA; -                        resp_msg.b               = b; -                        resp_msg.k               = KAD_K; -                        resp_msg.t_expire        = t_expire; -                        resp_msg.t_refresh       = KAD_T_REFR; -                        resp_msg.t_replicate     = KAD_T_REPL; -                        break; -                case KAD_FIND_VALUE: -                        buf = dht_retrieve(dht, msg->key.data); -                        if (buf.len != 0) { -                                resp_msg.n_addrs = buf.len; -                                resp_msg.addrs   = (uint64_t *) buf.data; -                                break; -                        } -                        /* FALLTHRU */ -                case KAD_FIND_NODE: -                        /* Return k closest contacts. */ -                        resp_msg.n_contacts = -                                dht_get_contacts(dht, msg->key.data, &cmsgs); -                        resp_msg.contacts = cmsgs; -                        break; -                case KAD_STORE: -                        if (msg->n_contacts < 1) { -                                log_warn("No contacts in store message."); -                                break; -                        } - -                        if (!msg->has_t_expire) { -                                log_warn("No expiry time in store message."); -                                break; -                        } - -                        kad_add(dht, *msg->contacts, msg->n_contacts, -                                msg->t_expire); -                        break; -                case KAD_RESPONSE: -                        kad_handle_response(dht, msg); -                        break; -                default: -                        assert(false); -                        break; -                } - -                if (msg->code != KAD_JOIN) { -                        pthread_rwlock_wrlock(&dht->lock); -                        if (dht_get_state(dht) == DHT_JOINING && -                            dht->buckets == NULL) { -                                pthread_rwlock_unlock(&dht->lock); -                                goto finish; -                        } - -                        if (dht_update_bucket(dht, msg->s_id.data, addr)) -                                log_warn("Failed to update bucket."); -                        pthread_rwlock_unlock(&dht->lock); -                } - -                if (msg->code < KAD_STORE && send_msg(dht, &resp_msg, addr) < 0) -                                log_warn("Failed to send response."); - - finish: -                kad_msg__free_unpacked(msg, NULL); - -                if (resp_msg.n_addrs > 0) -                        free(resp_msg.addrs); - -                if (resp_msg.n_contacts == 0) { -                        tpm_inc(dht->tpm); -                        continue; -                } - -                for (i = 0; i < resp_msg.n_contacts; ++i) -                        kad_contact_msg__free_unpacked(resp_msg.contacts[i], -                                                       NULL); -                free(resp_msg.contacts); - -                tpm_inc(dht->tpm); -        } - -        return (void *) 0; -} - -static void dht_post_packet(void *               comp, -                            struct shm_du_buff * sdb) -{ -        struct cmd * cmd; -        struct dht * dht = (struct dht *) comp; - -        if (dht_get_state(dht) == DHT_SHUTDOWN) { -#ifndef __DHT_TEST__ -                ipcp_sdb_release(sdb); -#endif -                return; -        } - -        cmd = malloc(sizeof(*cmd)); -        if (cmd == NULL) { -                log_err("Command failed. Out of memory."); -                return; -        } - -        cmd->sdb = sdb; - -        pthread_mutex_lock(&dht->mtx); - -        list_add(&cmd->next, &dht->cmds); - -        pthread_cond_signal(&dht->cond); - -        pthread_mutex_unlock(&dht->mtx); -} - -void dht_destroy(struct dht * dht) -{ -        struct list_head * p; -        struct list_head * h; - -        if (dht == NULL) -                return; - -#ifndef __DHT_TEST__ -        tpm_stop(dht->tpm); - -        tpm_destroy(dht->tpm); -#endif -        if (dht_get_state(dht) == DHT_RUNNING) { -                dht_set_state(dht, DHT_SHUTDOWN); -                pthread_cancel(dht->worker); -                pthread_join(dht->worker, NULL); -        } - -        pthread_rwlock_wrlock(&dht->lock); - -        list_for_each_safe(p, h, &dht->cmds) { -                struct cmd * c = list_entry(p, struct cmd, next); -                list_del(&c->next); -#ifndef __DHT_TEST__ -                ipcp_sdb_release(c->sdb); -#endif -                free(c); -        } - -        list_for_each_safe(p, h, &dht->entries) { -                struct dht_entry * e = list_entry(p, struct dht_entry, next); -                list_del(&e->next); -                dht_entry_destroy(e); -        } - -        list_for_each_safe(p, h, &dht->requests) { -                struct kad_req * r = list_entry(p, struct kad_req, next); -                list_del(&r->next); -                kad_req_destroy(r); -        } - -        list_for_each_safe(p, h, &dht->refs) { -                struct ref_entry * e = list_entry(p, struct ref_entry, next); -                list_del(&e->next); -                ref_entry_destroy(e); -        } - -        list_for_each_safe(p, h, &dht->lookups) { -                struct lookup * l = list_entry(p, struct lookup, next); -                list_del(&l->next); -                lookup_destroy(l); -        } - -        pthread_rwlock_unlock(&dht->lock); - -        if (dht->buckets != NULL) -                bucket_destroy(dht->buckets); - -        bmp_destroy(dht->cookies); - -        pthread_mutex_destroy(&dht->mtx); - -        pthread_rwlock_destroy(&dht->lock); - -        free(dht->id); - -        free(dht); -} - -static void * join_thr(void * o) -{ -        struct join_info * info = (struct join_info *) o; -        struct lookup *    lu; -        size_t             retr = 0; - -        assert(info); - -        while (kad_join(info->dht, info->addr)) { -                if (dht_get_state(info->dht) == DHT_SHUTDOWN) { -                        log_dbg("DHT enrollment aborted."); -                        goto finish; -                } - -                if (retr++ == KAD_JOIN_RETR) { -                        dht_set_state(info->dht, DHT_INIT); -                        log_warn("DHT enrollment attempt failed."); -                        goto finish; -                } - -                sleep(KAD_JOIN_INTV); -        } - -        dht_set_state(info->dht, DHT_RUNNING); - -        lu = kad_lookup(info->dht, info->dht->id, KAD_FIND_NODE); -        if (lu != NULL) -                lookup_destroy(lu); - - finish: -        free(info); - -        return (void *) 0; -} - -static void handle_event(void *       self, -                         int          event, -                         const void * o) -{ -        struct dht * dht = (struct dht *) self; - -        if (event == NOTIFY_DT_CONN_ADD) { -                pthread_t          thr; -                struct join_info * inf; -                struct conn *      c     = (struct conn *) o; -                struct timespec    slack = {0, DHT_ENROLL_SLACK * MILLION}; - -                /* Give the pff some time to update for the new link. */ -                nanosleep(&slack, NULL); - -                switch(dht_get_state(dht)) { -                case DHT_INIT: -                        inf = malloc(sizeof(*inf)); -                        if (inf == NULL) -                                break; - -                        inf->dht  = dht; -                        inf->addr = c->conn_info.addr; - -                        if (dht_set_state(dht, DHT_JOINING) == 0 || -                            dht_wait_running(dht)) { -                                if (pthread_create(&thr, NULL, join_thr, inf)) { -                                        dht_set_state(dht, DHT_INIT); -                                        free(inf); -                                        return; -                                } -                                pthread_detach(thr); -                        } else { -                                free(inf); -                        } -                        break; -                case DHT_RUNNING: -                        /* -                         * FIXME: this lookup for effiency reasons -                         * causes a SEGV when stressed with rapid -                         * enrollments. -                         * lu = kad_lookup(dht, dht->id, KAD_FIND_NODE); -                         * if (lu != NULL) -                         *         lookup_destroy(lu); -                         */ -                        break; -                default: -                        break; -                } -        } -} - -struct dht * dht_create(uint64_t addr) -{ -        struct dht * dht; - -        dht = malloc(sizeof(*dht)); -        if (dht == NULL) -                goto fail_malloc; - -        dht->buckets = NULL; - -        list_head_init(&dht->entries); -        list_head_init(&dht->requests); -        list_head_init(&dht->refs); -        list_head_init(&dht->lookups); -        list_head_init(&dht->cmds); - -        if (pthread_rwlock_init(&dht->lock, NULL)) -                goto fail_rwlock; - -        if (pthread_mutex_init(&dht->mtx, NULL)) -                goto fail_mutex; - -        if (pthread_cond_init(&dht->cond, NULL)) -                goto fail_cond; - -        dht->cookies = bmp_create(DHT_MAX_REQS, 1); -        if (dht->cookies == NULL) -                goto fail_bmp; - -        dht->b    = 0; -        dht->addr = addr; -        dht->id   = NULL; -#ifndef __DHT_TEST__ -        dht->tpm = tpm_create(2, 1, dht_handle_packet, dht); -        if (dht->tpm == NULL) -                goto fail_tpm_create; - -        if (tpm_start(dht->tpm)) -                goto fail_tpm_start; - -        dht->eid   = dt_reg_comp(dht, &dht_post_packet, DHT); -        if ((int) dht->eid < 0) -                goto fail_tpm_start; - -        notifier_reg(handle_event, dht); -#else -        (void) handle_event; -        (void) dht_handle_packet; -        (void) dht_post_packet; -#endif -        dht->state = DHT_INIT; - -        return dht; -#ifndef __DHT_TEST__ - fail_tpm_start: -        tpm_destroy(dht->tpm); - fail_tpm_create: -        bmp_destroy(dht->cookies); -#endif - fail_bmp: -        pthread_cond_destroy(&dht->cond); - fail_cond: -        pthread_mutex_destroy(&dht->mtx); - fail_mutex: -        pthread_rwlock_destroy(&dht->lock); - fail_rwlock: -        free(dht); - fail_malloc: -        return NULL; -} diff --git a/src/ipcpd/unicast/dir.c b/src/ipcpd/unicast/dir.c index a30908b8..2b305626 100644 --- a/src/ipcpd/unicast/dir.c +++ b/src/ipcpd/unicast/dir.c @@ -1,7 +1,7 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   * - * Directory + * Directory Management   *   *    Dimitri Staessens <dimitri@ouroboros.rocks>   *    Sander Vrijders   <sander@ouroboros.rocks> @@ -34,8 +34,7 @@  #include <ouroboros/utils.h>  #include "dir.h" -#include "dht.h" -#include "ipcp.h" +#include "dir/pol.h"  #include <stdlib.h>  #include <string.h> @@ -43,60 +42,59 @@  #include <inttypes.h>  #include <limits.h> -#define KAD_B (hash_len(ipcpi.dir_hash_algo) * CHAR_BIT) +struct { +        struct dir_ops * ops; +} dir; -struct ipcp icpci; -struct dht * dht; - -int dir_init(void) +int dir_init(struct dir_config * conf)  { -        dht = dht_create(ipcpi.dt_addr); -        if (dht == NULL) -                return -ENOMEM; +        void * cfg; + +        assert(conf != NULL); + +        switch (conf->pol) { +        case DIR_DHT: +                log_info("Using DHT policy."); +                dir.ops = &dht_dir_ops; +                cfg = &conf->dht; +                break; +        default: /* DIR_INVALID */ +                log_err("Invalid directory policy %d.", conf->pol); +                return -EINVAL; +        } -        return 0; +        assert(dir.ops->init != NULL); + +        return dir.ops->init(cfg);  }  void dir_fini(void)  { -        dht_destroy(dht); +        dir.ops->fini(); +        dir.ops = NULL;  } -int dir_bootstrap(void) { -        log_dbg("Bootstrapping directory."); - -        /* TODO: get parameters for bootstrap from IRM tool. */ -        if (dht_bootstrap(dht, KAD_B, 86400)) { -                dht_destroy(dht); -                return -ENOMEM; -        } - -        log_info("Directory bootstrapped."); +int dir_start(void) +{ +        return dir.ops->start(); +} -        return 0; +void dir_stop(void) +{ +        dir.ops->stop();  }  int dir_reg(const uint8_t * hash)  { -        return dht_reg(dht, hash); +        return dir.ops->reg(hash);  }  int dir_unreg(const uint8_t * hash)  { -        return dht_unreg(dht, hash); +        return dir.ops->unreg(hash);  }  uint64_t dir_query(const uint8_t * hash)  { -        return dht_query(dht, hash); -} - -int dir_wait_running(void) -{ -        if (dht_wait_running(dht)) { -                log_warn("Directory did not bootstrap."); -                return -1; -        } - -        return 0; +        return dir.ops->query(hash);  } diff --git a/src/ipcpd/unicast/dir.h b/src/ipcpd/unicast/dir.h index 8aa79638..dbfde19f 100644 --- a/src/ipcpd/unicast/dir.h +++ b/src/ipcpd/unicast/dir.h @@ -1,5 +1,5 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   *   * Directory   * @@ -25,11 +25,14 @@  #include <inttypes.h> -int      dir_init(void); +/* may update the config! */ +int      dir_init(struct dir_config * conf);  void     dir_fini(void); -int      dir_bootstrap(void); +int      dir_start(void); + +void     dir_stop(void);  int      dir_reg(const uint8_t * hash); @@ -37,6 +40,4 @@ int      dir_unreg(const uint8_t * hash);  uint64_t dir_query(const uint8_t * hash); -int      dir_wait_running(void); -  #endif /* OUROBOROS_IPCPD_UNICAST_DIR_H */ diff --git a/src/ipcpd/unicast/dir/dht.c b/src/ipcpd/unicast/dir/dht.c new file mode 100644 index 00000000..6b06def9 --- /dev/null +++ b/src/ipcpd/unicast/dir/dht.c @@ -0,0 +1,4052 @@ +/* + * Ouroboros - Copyright (C) 2016 - 2024 + * + * Distributed Hash Table based on Kademlia + * + *    Dimitri Staessens <dimitri@ouroboros.rocks> + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public License + * version 2.1 as published by the Free Software Foundation. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., http://www.fsf.org/about/contact/. + */ + +#if !defined (__DHT_TEST__) +  #if defined(__linux__) || defined(__CYGWIN__) +    #define _DEFAULT_SOURCE +  #else +    #define _POSIX_C_SOURCE 200112L +  #endif +#endif + +#include "config.h" + +#define DHT              "dht" +#define OUROBOROS_PREFIX DHT + +#include <ouroboros/endian.h> +#include <ouroboros/hash.h> +#include <ouroboros/ipcp-dev.h> +#include <ouroboros/bitmap.h> +#include <ouroboros/errno.h> +#include <ouroboros/logs.h> +#include <ouroboros/list.h> +#include <ouroboros/random.h> +#include <ouroboros/rib.h> +#include <ouroboros/time.h> +#include <ouroboros/tpm.h> +#include <ouroboros/utils.h> +#include <ouroboros/pthread.h> + +#include "addr-auth.h" +#include "common/connmgr.h" +#include "dht.h" +#include "dt.h" +#include "ipcp.h" +#include "ops.h" + +#include <stdlib.h> +#include <string.h> +#include <assert.h> +#include <inttypes.h> +#include <limits.h> + +#include "dht.pb-c.h" +typedef DhtMsg              dht_msg_t; +typedef DhtContactMsg       dht_contact_msg_t; +typedef DhtStoreMsg         dht_store_msg_t; +typedef DhtFindReqMsg       dht_find_req_msg_t; +typedef DhtFindNodeRspMsg   dht_find_node_rsp_msg_t; +typedef DhtFindValueRspMsg  dht_find_value_rsp_msg_t; +typedef ProtobufCBinaryData binary_data_t; + +#ifndef CLOCK_REALTIME_COARSE +#define CLOCK_REALTIME_COARSE CLOCK_REALTIME +#endif + +#define DHT_MAX_REQS  128  /* KAD recommends rnd(), bmp can be changed.    */ +#define DHT_WARN_REQS 100  /* Warn if number of requests exceeds this.     */ +#define DHT_MAX_VALS  8    /* Max number of values to return for a key.    */ +#define DHT_T_CACHE   60   /* Max cache time for values (s)                */ +#define DHT_T_RESP    2    /* Response time to wait for a response (s).    */ +#define DHT_N_REPUB   5    /* Republish if expiry within n replications.   */ +#define DHT_R_PING    2    /* Ping retries before declaring peer dead.     */ +#define DHT_QUEER     15   /* Time to declare peer questionable.           */ +#define DHT_BETA      8    /* Bucket split factor, must be 1, 2, 4 or 8.   */ +#define DHT_RESP_RETR 6    /* Number of retries on sending a response.     */ +#define HANDLE_TIMEO  1000 /* Timeout for dht_handle_packet tpm check (ms) */ +#define DHT_INVALID   0    /* Invalid cookie value.                        */ + +#define KEY_FMT "K<" HASH_FMT64 ">" +#define KEY_VAL(key) HASH_VAL64(key) + +#define VAL_FMT "V<" HASH_FMT64 ">" +#define VAL_VAL(val) HASH_VAL64((val).data) + +#define KV_FMT "<" HASH_FMT64 ", " HASH_FMT64 ">" +#define KV_VAL(key, val) HASH_VAL64(key), HASH_VAL64((val).data) + +#define PEER_FMT "[" HASH_FMT64 "|" ADDR_FMT32 "]" +#define PEER_VAL(id, addr) HASH_VAL64(id), ADDR_VAL32(&(addr)) + +#define DHT_CODE(msg) dht_code_str[(msg)->code] + +#define TX_HDR_FMT "%s --> " PEER_FMT +#define TX_HDR_VAL(msg, id, addr) DHT_CODE(msg), PEER_VAL(id, addr) + +#define RX_HDR_FMT "%s <-- " PEER_FMT +#define RX_HDR_VAL(msg) DHT_CODE(msg), \ +        PEER_VAL(msg->src->id.data, msg->src->addr) + +#define CK_FMT "|" HASH_FMT64 "|" +#define CK_VAL(cookie) HASH_VAL64(&(cookie)) + +#define IS_REQUEST(code) \ +        (code == DHT_FIND_NODE_REQ || code == DHT_FIND_VALUE_REQ) + +enum dht_code { +        DHT_STORE, +        DHT_FIND_NODE_REQ, +        DHT_FIND_NODE_RSP, +        DHT_FIND_VALUE_REQ, +        DHT_FIND_VALUE_RSP +}; + +const char * dht_code_str[] = { +        "DHT_STORE", +        "DHT_FIND_NODE_REQ", +        "DHT_FIND_NODE_RSP", +        "DHT_FIND_VALUE_REQ", +        "DHT_FIND_VALUE_RSP" +}; + +enum dht_state { +        DHT_NULL = 0, +        DHT_INIT, +        DHT_RUNNING +}; + +struct val_entry { +        struct list_head next; + +        buffer_t         val; + +        time_t           t_exp;   /* Expiry time           */ +        time_t           t_repl;  /* Last replication time */ +}; + +struct dht_entry { +        struct list_head next; + +        uint8_t *        key; + +        struct { +                struct list_head list; +                size_t           len; +        } vals;  /* We don't own these, only replicate */ + +        struct { +                struct list_head list; +                size_t           len; +        } lvals; /* We own these, must be republished  */ +}; + +struct contact { +        struct list_head next; + +        uint8_t *        id; +        uint64_t         addr; + +        size_t           fails; +        time_t           t_seen; +}; + +struct peer_entry { +        struct list_head next; + +        uint64_t         cookie; +        uint8_t *        id; +        uint64_t         addr; +        enum dht_code    code; + +        time_t           t_sent; +}; + +struct dht_req { +        struct list_head next; + +        uint8_t *        key; +        time_t           t_exp; + +        struct { +                struct list_head list; +                size_t           len; +        } peers; + +        struct { +                struct list_head list; +                size_t           len; +        } cache; +}; + +struct bucket { +        struct { +                struct list_head list; +                size_t           len; +        } contacts; + +        struct { +                struct list_head list; +                size_t           len; +        } alts; + +        time_t           t_refr; + +        size_t           depth; +        uint8_t          mask; + +        struct bucket *  parent; +        struct bucket *  children[1L << DHT_BETA]; +}; + +struct cmd { +        struct list_head next; +        buffer_t         cbuf; +}; + +struct dir_ops dht_dir_ops = { +        .init  = (int (*)(void *)) dht_init, +        .fini  = dht_fini, +        .start = dht_start, +        .stop  = dht_stop, +        .reg   = dht_reg, +        .unreg = dht_unreg, +        .query = dht_query +}; + +struct { +        struct { /* Kademlia parameters */ +                uint32_t alpha;     /* Number of concurrent requests   */ +                size_t   k;         /* Number of replicas to store     */ +                time_t   t_expire;  /* Expiry time for values (s)      */ +                time_t   t_refresh; /* Refresh time for contacts (s)   */ +                time_t   t_repl;    /* Replication time for values (s) */ +        }; + +        buffer_t       id; + +        time_t         t0;    /* Creation time               */ +        uint64_t       addr;  /* Our own address             */ +        uint64_t       peer;  /* Enrollment peer address     */ +        uint64_t       magic; /* Magic cookie for retransmit */ + +        uint64_t       eid;   /* Entity ID                   */ + +        struct tpm *   tpm; +        pthread_t      worker; + +        enum dht_state state; + +        struct { +                struct { +                        struct bucket * root; +                } contacts; + +                struct { +                        struct list_head list; +                        size_t           len; +                        size_t           vals; +                        size_t           lvals; +                } kv; + +                pthread_rwlock_t lock; +        } db; + +        struct { +                struct list_head list; +                size_t           len; +                pthread_cond_t   cond; +                pthread_mutex_t  mtx; +        } reqs; + +        struct { +                struct list_head list; +                pthread_cond_t   cond; +                pthread_mutex_t  mtx; +        } cmds; +} dht; + + +/* DHT RIB */ + +static const char * dht_dir[] = { +        "database", +        "stats", +        NULL +}; + +const char * dht_stats = \ +        "DHT: " HASH_FMT64 "\n" +        "  Created: %s\n" +        "  Address: " ADDR_FMT32 "\n" +        "  Kademlia parameters:\n" +        "     Number of concurrent requests (alpha): %10zu\n" +        "     Number of replicas (k):                %10zu\n" +        "     Expiry time for values (s):            %10ld\n" +        "     Refresh time for contacts (s):         %10ld\n" +        "     Replication time for values (s):       %10ld\n" +        "  Number of keys:                           %10zu\n" +        "  Number of local values:                   %10zu\n" +        "  Number of non-local values:               %10zu\n"; + +static int dht_rib_statfile(char * buf, +                            size_t len) +{ +        struct tm * tm; +        char        tmstr[RIB_TM_STRLEN]; +        size_t      keys; +        size_t      vals; +        size_t      lvals; + +        assert(buf != NULL); +        assert(len > 0); + +        pthread_rwlock_rdlock(&dht.db.lock); + +        keys  = dht.db.kv.len; +        lvals = dht.db.kv.lvals; +        vals  = dht.db.kv.vals; + +        pthread_rwlock_unlock(&dht.db.lock); + +        tm = gmtime(&dht.t0); +        strftime(tmstr, sizeof(tmstr), RIB_TM_FORMAT, tm); + +        snprintf(buf, len, dht_stats, +                 HASH_VAL64(dht.id.data), +                 tmstr, +                 ADDR_VAL32(&dht.addr), +                 dht.alpha, dht.k, +                 dht.t_expire, dht.t_refresh, dht.t_repl, +                 keys, vals, lvals); + +        return strlen(buf); +} + +static size_t dht_db_file_len(void) +{ +        size_t sz; +        size_t vals; + +        sz = 18; /* DHT database + 2 * \n */ + +        pthread_rwlock_rdlock(&dht.db.lock); + +        if (dht.db.kv.len == 0) { +                pthread_rwlock_unlock(&dht.db.lock); +                sz += 14; /* No entries */ +                return sz; +        } + +        sz += 39 * 3 + 1; /* tally + extra newline */ +        sz += dht.db.kv.len * (25 + 19 + 23 + 1); + +        vals = dht.db.kv.vals + dht.db.kv.lvals; + +        sz += vals * (48 + 2 * RIB_TM_STRLEN); + +        pthread_rwlock_unlock(&dht.db.lock); + +        return sz; +} + +static int dht_rib_dbfile(char * buf, +                          size_t len) +{ +        struct tm * tm; +        char        tmstr[RIB_TM_STRLEN]; +        char        exstr[RIB_TM_STRLEN]; +        size_t      i = 0; +        struct      list_head * p; + +        assert(buf != NULL); +        assert(len > 0); + +        pthread_rwlock_rdlock(&dht.db.lock); + +        if (dht.db.kv.len == 0) { +                i += snprintf(buf, len, "  No entries.\n"); +                pthread_rwlock_unlock(&dht.db.lock); +                return i; +        } + +        i += snprintf(buf + i, len - i, "DHT database:\n\n"); +        i += snprintf(buf + i, len - i, +                      "Number of keys:             %10zu\n" +                      "Number of local values:     %10zu\n" +                      "Number of non-local values: %10zu\n\n", +                      dht.db.kv.len, dht.db.kv.vals, dht.db.kv.lvals); + +        list_for_each(p, &dht.db.kv.list) { +                struct dht_entry * e = list_entry(p, struct dht_entry, next); +                struct list_head * h; + +                i += snprintf(buf + i, len - i, "Key: " KEY_FMT "\n", +                              KEY_VAL(e->key)); +                i += snprintf(buf + i, len - i, "  Local entries:\n"); + +                list_for_each(h, &e->vals.list) { +                        struct val_entry * v; + +                        v = list_entry(h, struct val_entry, next); + +                        tm = gmtime(&v->t_repl); +                        strftime(tmstr, sizeof(tmstr), RIB_TM_FORMAT, tm); + +                        tm = gmtime(&v->t_exp); +                        strftime(exstr, sizeof(exstr), RIB_TM_FORMAT, tm); + +                        i += snprintf(buf + i, len - i, +                                "    " VAL_FMT +                                ", t_replicated=%.*s, t_expire=%.*s\n", +                                VAL_VAL(v->val), +                                RIB_TM_STRLEN, tmstr, +                                RIB_TM_STRLEN, exstr); +                } + +                i += snprintf(buf + i, len - i, "\n"); + +                i += snprintf(buf + i, len - i, "  Non-local entries:\n"); + +                list_for_each(h, &e->lvals.list) { +                        struct val_entry * v; + +                        v= list_entry(h, struct val_entry, next); + +                        tm = gmtime(&v->t_repl); +                        strftime(tmstr, sizeof(tmstr), RIB_TM_FORMAT, tm); + +                        tm = gmtime(&v->t_exp); +                        strftime(exstr, sizeof(exstr), RIB_TM_FORMAT, tm); + +                        i += snprintf(buf + i, len - i, +                                "    " VAL_FMT +                                ", t_replicated=%.*s, t_expire=%.*s\n", +                                VAL_VAL(v->val), +                                RIB_TM_STRLEN, tmstr, +                                RIB_TM_STRLEN, exstr); + +                } +        } + +        pthread_rwlock_unlock(&dht.db.lock); + +        printf("DHT RIB DB file generated (%zu bytes).\n", i); + +        return i; +} + +static int dht_rib_read(const char * path, +                        char *       buf, +                        size_t       len) +{ +        char * entry; + +        entry = strstr(path, RIB_SEPARATOR) + 1; + +        if (strcmp(entry, "database") == 0) { +                return dht_rib_dbfile(buf, len); +        } else if (strcmp(entry, "stats") == 0) { +                return dht_rib_statfile(buf, len); +        } + +        return 0; +} + +static int dht_rib_readdir(char *** buf) +{ +        int i = 0; + +        while (dht_dir[i++] != NULL); + +        *buf = malloc(sizeof(**buf) * i); +        if (*buf == NULL) +                goto fail_buf; + +        i = 0; + +        while (dht_dir[i] != NULL) { +                (*buf)[i] = strdup(dht_dir[i]); +                if ((*buf)[i] == NULL) +                        goto fail_dup; +                i++; +        } + +        return i; + fail_dup: +        freepp(char, *buf, i); + fail_buf: +        return -ENOMEM; +} + +static int dht_rib_getattr(const char *      path, +                           struct rib_attr * attr) +{ +        struct timespec now; +        char *          entry; + +        clock_gettime(CLOCK_REALTIME_COARSE, &now); + +        attr->mtime = now.tv_sec; + +        entry = strstr(path, RIB_SEPARATOR) + 1; + +        if (strcmp(entry, "database") == 0) { +                attr->size = dht_db_file_len(); +        } else if (strcmp(entry, "stats") == 0) { +                attr->size =  545; +        } + +        return 0; +} + +static struct rib_ops r_ops = { +        .read    = dht_rib_read, +        .readdir = dht_rib_readdir, +        .getattr = dht_rib_getattr +}; + +/* Helper functions */ + +static uint8_t * generate_id(void) +{ +        uint8_t * id; + +        if(dht.id.len < sizeof(uint64_t)) { +                log_err("DHT ID length is too short (%zu < %zu).", +                        dht.id.len, sizeof(uint64_t)); +                return NULL; +        } + +        id = malloc(dht.id.len); +        if (id == NULL) { +                log_err("Failed to malloc ID."); +                goto fail_id; +        } + +        if (random_buffer(id, dht.id.len) < 0) { +                log_err("Failed to generate random ID."); +                goto fail_rnd; +        } + +        return id; + fail_rnd: +        free(id); + fail_id: +        return NULL; +} + +static uint64_t generate_cookie(void) +{ +        uint64_t cookie = DHT_INVALID; + +        while (cookie == DHT_INVALID) +                random_buffer((uint8_t *) &cookie, sizeof(cookie)); + +        return cookie; +} + +/* + * If someone builds a network where the n (n > k) closest nodes all + * have IDs starting with the same 64 bits: by all means, change this. + */ +static uint64_t dist(const uint8_t * src, +                     const uint8_t * dst) +{ +        assert(dht.id.len >= sizeof(uint64_t)); + +        return betoh64(*((uint64_t *) src) ^ *((uint64_t *) dst)); +} + +#define IS_CLOSER(x, y) (dist((x), dht.id.data) < dist((y), dht.id.data)) + +static int addr_to_buf(const uint64_t addr, +                       buffer_t *     buf) +{ +        size_t len; +        uint64_t _addr; + +        len = sizeof(addr); +        _addr = hton64(addr); + +        assert(buf != NULL); + +        buf->data = malloc(len); +        if (buf->data == NULL) +                goto fail_malloc; + +        buf->len = sizeof(_addr); +        memcpy(buf->data, &_addr, sizeof(_addr)); + +        return 0; + fail_malloc: +        return -ENOMEM; +} + +static int buf_to_addr(const buffer_t buf, +                       uint64_t *     addr) +{ +        assert(addr != NULL); +        assert(buf.data != NULL); + +        if (buf.len != sizeof(*addr)) +                return - EINVAL; + +        *addr = ntoh64(*((uint64_t *) buf.data)); + +        if (*addr == dht.addr) +                *addr = INVALID_ADDR; + +        return 0; +} + +static uint8_t * dht_dup_key(const uint8_t * key) +{ +        uint8_t * dup; + +        assert(key != NULL); +        assert(dht.id.len != 0); + +        dup = malloc(dht.id.len); +        if (dup == NULL) +                return NULL; + +        memcpy(dup, key, dht.id.len); + +        return dup; +} + +/* DHT */ + +static struct val_entry * val_entry_create(const buffer_t val, +                                           time_t         exp) +{ +        struct val_entry * e; +        struct timespec    now; + +        assert(val.data != NULL); +        assert(val.len > 0); + +        clock_gettime(CLOCK_REALTIME_COARSE, &now); + +#ifndef __DHT_TEST_ALLOW_EXPIRED__ +        if (exp < now.tv_sec) +                return NULL; /* Refuse to add expired values */ +#endif +        e = malloc(sizeof(*e)); +        if (e == NULL) +                goto fail_entry; + +        list_head_init(&e->next); + +        e->val.len = val.len; +        e->val.data = malloc(val.len); +        if (e->val.data == NULL) +                goto fail_val; + +        memcpy(e->val.data, val.data, val.len); + +        e->t_repl  = 0; +        e->t_exp   = exp; + +        return e; + + fail_val: +        free(e); + fail_entry: +        return NULL; +} + +static void val_entry_destroy(struct val_entry * v) +{ +        assert(v->val.data != NULL); + +        freebuf(v->val); +        free(v); +} + +static struct dht_entry * dht_entry_create(const uint8_t * key) +{ +        struct dht_entry * e; + +        assert(key != NULL); + +        e = malloc(sizeof(*e)); +        if (e == NULL) +                goto fail_entry; + +        list_head_init(&e->next); +        list_head_init(&e->vals.list); +        list_head_init(&e->lvals.list); + +        e->vals.len = 0; +        e->lvals.len = 0; + +        e->key = dht_dup_key(key); +        if (e->key == NULL) +                goto fail_key; + +        return e; + fail_key: +        free(e); + fail_entry: +        return NULL; +} + +static void dht_entry_destroy(struct dht_entry * e) +{ +        struct list_head * p; +        struct list_head * h; + +        assert(e != NULL); + +        list_for_each_safe(p, h, &e->vals.list) { +                struct val_entry * v = list_entry(p, struct val_entry, next); +                list_del(&v->next); +                val_entry_destroy(v); +                --e->vals.len; +                --dht.db.kv.vals; +        } + +        list_for_each_safe(p, h, &e->lvals.list) { +                struct val_entry * v = list_entry(p, struct val_entry, next); +                list_del(&v->next); +                val_entry_destroy(v); +                --e->lvals.len; +                --dht.db.kv.lvals; +        } + +        free(e->key); + +        assert(e->vals.len == 0 && e->lvals.len == 0); + +        free(e); +} + +static struct val_entry * dht_entry_get_lval(const struct dht_entry * e, +                                             const buffer_t           val) +{ +        struct list_head * p; + +        assert(e != NULL); +        assert(val.data != NULL); +        assert(val.len > 0); + +        list_for_each(p, &e->lvals.list) { +                struct val_entry * v = list_entry(p, struct val_entry, next); +                if (bufcmp(&v->val, &val) == 0) +                        return v; +        } + +        return NULL; +} + +static struct val_entry * dht_entry_get_val(const struct dht_entry * e, +                                            const buffer_t           val) +{ +        struct list_head * p; + +        assert(e != NULL); +        assert(val.data != NULL); +        assert(val.len > 0); + +        list_for_each(p, &e->vals.list) { +                struct val_entry * v = list_entry(p, struct val_entry, next); +                if (bufcmp(&v->val, &val) == 0) +                        return v; + +        } + +        return NULL; +} + +static int dht_entry_update_val(struct dht_entry * e, +                                buffer_t           val, +                                time_t             exp) +{ +        struct val_entry * v; +        struct timespec    now; + +        assert(e != NULL); +        assert(val.data != NULL); +        assert(val.len > 0); + +        clock_gettime(CLOCK_REALTIME_COARSE, &now); + +        if (exp < now.tv_sec) +                return -EINVAL; /* Refuse to add expired values */ + +        if (dht_entry_get_lval(e, val) != NULL) { +                log_dbg(KV_FMT " Val already in lvals.", KV_VAL(e->key, val)); +                return 0; /* Refuse to add local values */ +        } + +        v = dht_entry_get_val(e, val); +        if (v == NULL) { +                v = val_entry_create(val, exp); +                if (v == NULL) +                        return -ENOMEM; + +                list_add_tail(&v->next, &e->vals.list); +                ++e->vals.len; +                ++dht.db.kv.vals; + +                return 0; +        } + +        if (v->t_exp < exp) +                v->t_exp  = exp; + +        return 0; +} + +static int dht_entry_update_lval(struct dht_entry * e, +                                 buffer_t           val) +{ +        struct val_entry * v; +        struct timespec    now; + +        assert(e != NULL); +        assert(val.data != NULL); +        assert(val.len > 0); + +        clock_gettime(CLOCK_REALTIME_COARSE, &now); + +        v = dht_entry_get_lval(e, val); +        if (v == NULL) { +                log_dbg(KV_FMT " Adding lval.", KV_VAL(e->key, val)); +                v = val_entry_create(val, now.tv_sec + dht.t_expire); +                if (v == NULL) +                        return -ENOMEM; + +                list_add_tail(&v->next, &e->lvals.list); +                ++e->lvals.len; +                ++dht.db.kv.lvals; + +                return 0; +        } + +        return 0; +} + +static int dht_entry_remove_lval(struct dht_entry * e, +                                 buffer_t           val) +{ +        struct val_entry * v; + +        assert(e != NULL); +        assert(val.data != NULL); +        assert(val.len > 0); + +        v = dht_entry_get_lval(e, val); +        if (v == NULL) +                return -ENOENT; + +        log_dbg(KV_FMT " Removing lval.", KV_VAL(e->key, val)); + +        list_del(&v->next); +        val_entry_destroy(v); +        --e->lvals.len; +        --dht.db.kv.lvals; + +        return 0; +} + +#define IS_EXPIRED(v, now) ((now)->tv_sec > (v)->t_exp) +static void dht_entry_remove_expired_vals(struct dht_entry * e) +{ +        struct list_head * p; +        struct list_head * h; +        struct timespec    now; + +        assert(e != NULL); + +        clock_gettime(CLOCK_REALTIME_COARSE, &now); + +        list_for_each_safe(p, h, &e->vals.list) { +                struct val_entry * v = list_entry(p, struct val_entry, next); +                if (!IS_EXPIRED(v, &now)) +                        continue; + +                log_dbg(KV_FMT " Value expired." , KV_VAL(e->key, v->val)); +                list_del(&v->next); +                val_entry_destroy(v); +                --e->vals.len; +                --dht.db.kv.vals; +        } +} + +static struct dht_entry * __dht_kv_find_entry(const uint8_t * key) +{ +        struct list_head * p; + +        assert(key != NULL); + +        list_for_each(p, &dht.db.kv.list) { +                struct dht_entry * e = list_entry(p, struct dht_entry, next); +                if (!memcmp(key, e->key, dht.id.len)) +                        return e; +        } + +        return NULL; +} + +static void dht_kv_remove_expired_entries(void) +{ +        struct list_head * p; +        struct list_head * h; +        struct timespec    now; + +        clock_gettime(CLOCK_REALTIME_COARSE, &now); + +        pthread_rwlock_wrlock(&dht.db.lock); + +        list_for_each_safe(p, h, &dht.db.kv.list) { +                struct dht_entry * e = list_entry(p, struct dht_entry, next); +                dht_entry_remove_expired_vals(e); +                if (e->lvals.len > 0 || e->vals.len > 0) +                        continue; + +                log_dbg(KEY_FMT " Entry removed. ", KEY_VAL(e->key)); +                list_del(&e->next); +                dht_entry_destroy(e); +                --dht.db.kv.len; +        } + +        pthread_rwlock_unlock(&dht.db.lock); +} + + +static struct contact * contact_create(const uint8_t * id, +                                       uint64_t        addr) +{ +        struct contact * c; +        struct timespec  t; + +        c = malloc(sizeof(*c)); +        if (c == NULL) +                return NULL; + +        list_head_init(&c->next); + +        clock_gettime(CLOCK_REALTIME_COARSE, &t); + +        c->addr   = addr; +        c->fails  = 0; +        c->t_seen = t.tv_sec; +        c->id     = dht_dup_key(id); +        if (c->id == NULL) { +                free(c); +                return NULL; +        } + +        return c; +} + +static void contact_destroy(struct contact * c) +{ +        assert(c != NULL); +        assert(list_is_empty(&c->next)); + +        free(c->id); +        free(c); +} + +static struct dht_req * dht_req_create(const uint8_t * key) +{ +        struct dht_req * req; +        struct timespec  now; + +        assert(key != NULL); + +        clock_gettime(PTHREAD_COND_CLOCK, &now); + +        req = malloc(sizeof(*req)); +        if (req == NULL) +                goto fail_malloc; + +        list_head_init(&req->next); + +        req->t_exp = now.tv_sec + DHT_T_RESP; + +        list_head_init(&req->peers.list); +        req->peers.len = 0; + +        req->key = dht_dup_key(key); +        if (req->key == NULL) +                goto fail_dup_key; + +        list_head_init(&req->cache.list); +        req->cache.len = 0; + +        return req; + + fail_dup_key: +        free(req); + fail_malloc: +        return NULL; +} + +static void dht_req_destroy(struct dht_req * req) +{ +        struct list_head * p; +        struct list_head * h; + +        assert(req); +        assert(req->key); + +        list_for_each_safe(p, h, &req->peers.list) { +                struct peer_entry * e = list_entry(p, struct peer_entry, next); +                list_del(&e->next); +                free(e->id); +                free(e); +                --req->peers.len; +        } + +        list_for_each_safe(p, h, &req->cache.list) { +                struct val_entry * e = list_entry(p, struct val_entry, next); +                list_del(&e->next); +                val_entry_destroy(e); +                --req->cache.len; +        } + +        free(req->key); + +        assert(req->peers.len == 0); + +        free(req); +} + +static struct peer_entry * dht_req_get_peer(struct dht_req *    req, +                                            struct peer_entry * e) +{ +        struct list_head * p; + +        list_for_each(p, &req->peers.list) { +                struct peer_entry * x = list_entry(p, struct peer_entry, next); +                if (x->addr == e->addr) +                        return x; +        } + +        return NULL; +} + +#define IS_MAGIC(peer) ((peer)->cookie == dht.magic) +void dht_req_add_peer(struct dht_req * req, +                      struct peer_entry * e) +{ +        struct peer_entry * x; /* existing */ +        struct list_head *  p; /* iterator */ +        size_t              pos = 0; + +        assert(req   != NULL); +        assert(e     != NULL); +        assert(e->id != NULL); + +        /* +         * Dedupe messages to the same peer, unless +         *   1) The previous request was FIND_NODE and now it's FIND_VALUE +         *   2) We urgently need contacts from emergency peer (magic cookie) +         */ +        x = dht_req_get_peer(req, e); +        if (x != NULL && x->code >= e->code && !IS_MAGIC(e)) +                goto skip; + +        /* Find how this contact ranks in distance to the key */ +        list_for_each(p, &req->peers.list) { +                struct peer_entry * y = list_entry(p, struct peer_entry, next); +                if (IS_CLOSER(y->id, e->id)) { +                        pos++; +                        continue; +                } +                break; +        } + +        /* Add a new peer to this request if we need to */ +        if (pos < dht.alpha || !IS_MAGIC(e)) { +                x = malloc(sizeof(*x)); +                if (x == NULL) { +                        log_err("Failed to malloc peer entry."); +                        goto skip; +                } + +                x->cookie = e->cookie; +                x->addr   = e->addr; +                x->code   = e->code; +                x->t_sent = e->t_sent; +                x->id     = dht_dup_key(e->id); +                if (x->id == NULL) { +                        log_err("Failed to dup peer ID."); +                        free(x); +                        goto skip; +                } + +                if (IS_MAGIC(e)) +                        list_add(&x->next, p); +                else +                        list_add_tail(&x->next, p); +                ++req->peers.len; +                return; +        } + skip: +        list_del(&e->next); +        free(e->id); +        free(e); +} + +static size_t dht_req_add_peers(struct dht_req *   req, +                                struct list_head * pl) +{ +        struct list_head *  p; +        struct list_head *  h; +        size_t              n = 0; + +        assert(req != NULL); +        assert(pl  != NULL); + +        list_for_each_safe(p, h, pl) { +                struct peer_entry * e = list_entry(p, struct peer_entry, next); +                dht_req_add_peer(req, e); +        } + +        return n; +} + +static bool dht_req_has_peer(struct dht_req * req, +                             uint64_t         cookie) +{ +        struct list_head * p; + +        assert(req != NULL); + +        list_for_each(p, &req->peers.list) { +                struct peer_entry * e = list_entry(p, struct peer_entry, next); +                if (e->cookie == cookie) +                        return true; +        } + +        return false; +} + +static void peer_list_destroy(struct list_head * pl) +{ +        struct list_head * p; +        struct list_head * h; + +        assert(pl != NULL); + +        list_for_each_safe(p, h, pl) { +                struct peer_entry * e = list_entry(p, struct peer_entry, next); +                list_del(&e->next); +                free(e->id); +                free(e); +        } +} + +static int dht_kv_create_peer_list(struct list_head * cl, +                                   struct list_head * pl, +                                   enum dht_code      code) +{ +        struct list_head *  p; +        struct list_head *  h; +        struct timespec     now; +        size_t              len; + +        assert(cl != NULL); +        assert(pl != NULL); +        assert(list_is_empty(pl)); + +        clock_gettime(CLOCK_REALTIME_COARSE, &now); + +        len = 0; + +        list_for_each_safe(p, h, cl) { +                struct contact * c = list_entry(p, struct contact, next); +                struct peer_entry * e; +                if (len++ == dht.alpha) +                        break; + +                e = malloc(sizeof(*e)); +                if (e == NULL) +                        return -ENOMEM; + +                e->cookie = generate_cookie(); +                e->code   = code; +                e->addr   = c->addr; +                e->t_sent = now.tv_sec; + +                e->id = c->id; + +                list_add_tail(&e->next, pl); + +                list_del(&c->next); +                c->id = NULL; /* we stole the id */ +                contact_destroy(c); +        } + +        return 0; +} + +static struct dht_req * __dht_kv_req_get_req(const uint8_t * key) +{ +        struct list_head * p; + +        list_for_each(p, &dht.reqs.list) { +                struct dht_req * r = list_entry(p, struct dht_req, next); +                if (memcmp(r->key, key, dht.id.len) == 0) +                        return r; +        } + +        return NULL; +} + +static struct dht_req * __dht_kv_get_req_cache(const uint8_t * key) +{ +        struct dht_req * req; + +        assert(key != NULL); + +        req = __dht_kv_req_get_req(key); +        if (req == NULL) +                return NULL; + +        if (req->cache.len == 0) +                return NULL; + +        return req; +} + +static void __dht_kv_req_remove(const uint8_t * key) +{ +        struct dht_req * req; + +        assert(key != NULL); + +        req = __dht_kv_req_get_req(key); +        if (req == NULL) +                return; + +        list_del(&req->next); +        --dht.reqs.len; + +        dht_req_destroy(req); +} + +static struct dht_req * __dht_kv_get_req_peer(const uint8_t * key, +                                              uint64_t        cookie) +{ +        struct dht_req * req; + +        assert(key != NULL); + +        req = __dht_kv_req_get_req(key); +        if (req == NULL) +                return NULL; + +        if (!dht_req_has_peer(req, cookie)) +                return NULL; + +        return req; +} + +static bool dht_kv_has_req(const uint8_t * key, +                           uint64_t        cookie) +{ +        bool found; + +        pthread_mutex_lock(&dht.reqs.mtx); + +        found = __dht_kv_get_req_peer(key, cookie) != NULL; + +        pthread_mutex_unlock(&dht.reqs.mtx); + +        return found; +} + +/* + * This will filter the peer list for addresses that still need to be + * contacted. + */ +static int dht_kv_update_req(const uint8_t *    key, +                             struct list_head * pl) +{ +        struct dht_req * req; +        struct timespec  now; + +        assert(key != NULL); +        assert(pl != NULL); +        assert(!list_is_empty(pl)); + +        clock_gettime(PTHREAD_COND_CLOCK, &now); + +        pthread_mutex_lock(&dht.reqs.mtx); + +        req = __dht_kv_req_get_req(key); +        if (req == NULL) { +                if (dht.reqs.len == DHT_MAX_REQS) { +                        log_err(KEY_FMT " Max reqs reached (%zu).", +                                KEY_VAL(key), dht.reqs.len); +                        peer_list_destroy(pl); +                        goto fail_req; +                } +                req = dht_req_create(key); +                if (req == NULL) { +                        log_err(KEY_FMT "Failed to create req.", KEY_VAL(key)); +                        goto fail_req; +                } +                list_add_tail(&req->next, &dht.reqs.list); +                ++dht.reqs.len; +        } + +        if (req->cache.len > 0) /* Already have values */ +                peer_list_destroy(pl); + +        dht_req_add_peers(req, pl); +        req->t_exp = now.tv_sec + DHT_T_RESP; + +        if (dht.reqs.len > DHT_WARN_REQS) { +                log_warn("Number of outstanding requests (%zu) exceeds %u.", +                         dht.reqs.len, DHT_WARN_REQS); +        } + +        pthread_mutex_unlock(&dht.reqs.mtx); + +        return 0; + fail_req: +        pthread_mutex_unlock(&dht.reqs.mtx); +        return -1; +} + +static int dht_kv_respond_req(uint8_t *       key, +                              binary_data_t * vals, +                              size_t          len) +{ +        struct dht_req * req; +        struct timespec  now; +        size_t i; + +        assert(key != NULL); +        assert(vals != NULL); +        assert(len > 0); + +        clock_gettime(CLOCK_REALTIME_COARSE, &now); + +        pthread_mutex_lock(&dht.reqs.mtx); + +        req = __dht_kv_req_get_req(key); +        if (req == NULL) { +                log_dbg(KEY_FMT " Failed to find req.", KEY_VAL(key)); +                goto fail_req; +        } + +        for (i = 0; i < len; ++i) { +                struct val_entry * e; +                buffer_t val; +                val.data = vals[i].data; +                val.len = vals[i].len; +                e = val_entry_create(val, now.tv_sec + DHT_T_CACHE); +                if (e == NULL) { +                        log_err(" Failed to create val_entry."); +                        continue; +                } + +                list_add_tail(&e->next, &req->cache.list); +                ++req->cache.len; +        } + +        pthread_cond_broadcast(&dht.reqs.cond); + +        pthread_mutex_unlock(&dht.reqs.mtx); + +        return 0; + fail_req: +        pthread_mutex_unlock(&dht.reqs.mtx); +        return -1; +} + +static ssize_t dht_kv_wait_req(const uint8_t * key, +                               buffer_t **     vals) +{ +        struct list_head * p; +        struct dht_req *   req; +        struct timespec    t; +#ifdef __DHT_TEST__ +        struct timespec    intv = TIMESPEC_INIT_MS(10); +#else +        struct timespec    intv = TIMESPEC_INIT_S(DHT_T_RESP); +#endif +        size_t             max; +        size_t             i = 0; +        int                ret = 0; + +        assert(key != NULL); +        assert(vals != NULL); + +        clock_gettime(PTHREAD_COND_CLOCK, &t); + +        ts_add(&t, &intv, &t); + +        pthread_mutex_lock(&dht.reqs.mtx); + +        pthread_cleanup_push(__cleanup_mutex_unlock, &dht.reqs.mtx); + +        while ((req = __dht_kv_get_req_cache(key)) == NULL) { +                ret = pthread_cond_timedwait(&dht.reqs.cond, &dht.reqs.mtx, &t); +                if (ret == ETIMEDOUT) +                        break; +        } + +        pthread_cleanup_pop(false); + +        if (ret == ETIMEDOUT) { +                log_warn(KEY_FMT " Req timed out.", KEY_VAL(key)); +                __dht_kv_req_remove(key); +                goto timedout; +        } + +        max = MIN(req->cache.len, DHT_MAX_VALS); +        if (max == 0) +                goto no_vals; + +        *vals = malloc(max * sizeof(**vals)); +        if (*vals == NULL) { +                log_err(KEY_FMT "Failed to malloc val buffer.", KEY_VAL(key)); +                goto fail_vals; +        } + +        memset(*vals, 0, max * sizeof(**vals)); + +        list_for_each(p, &req->cache.list) { +                struct val_entry * v; +                if (i == max) +                        break; /* We have enough values */ +                v = list_entry(p, struct val_entry, next); +                (*vals)[i].data = malloc(v->val.len); +                if ((*vals)[i].data == NULL) +                        goto fail_val_data; + +                (*vals)[i].len = v->val.len; +                memcpy((*vals)[i++].data, v->val.data, v->val.len); +        } + +        pthread_mutex_unlock(&dht.reqs.mtx); + +        return i; + no_vals: +        pthread_mutex_unlock(&dht.reqs.mtx); +        *vals = NULL; +        return 0; + fail_val_data: +        freebufs(*vals, i); + fail_vals: +        pthread_mutex_unlock(&dht.reqs.mtx); +        return -ENOMEM; + timedout: +        pthread_mutex_unlock(&dht.reqs.mtx); +        return -ETIMEDOUT; +} + +static struct bucket * iter_bucket(struct bucket * b, +                                   const uint8_t * id) +{ +        uint8_t byte; +        uint8_t mask; + +        assert(b != NULL); + +        if (b->children[0] == NULL) +                return b; + +        byte = id[(b->depth * DHT_BETA) / CHAR_BIT]; + +        mask = ((1L << DHT_BETA) - 1) & 0xFF; + +        byte >>= (CHAR_BIT - DHT_BETA) - +                (((b->depth) * DHT_BETA) & (CHAR_BIT - 1)); + +        return iter_bucket(b->children[(byte & mask)], id); +} + +static struct bucket * __dht_kv_get_bucket(const uint8_t * id) +{ +        assert(dht.db.contacts.root != NULL); + +        return iter_bucket(dht.db.contacts.root, id); +} + +static void contact_list_add(struct list_head * l, +                             struct contact *   c) +{ +        struct list_head * p; + +        assert(l != NULL); +        assert(c != NULL); + +        list_for_each(p, l) { +                struct contact * e = list_entry(p, struct contact, next); +                if (IS_CLOSER(e->id, c->id)) +                        continue; +        } + +        list_add_tail(&c->next, p); +} + +static ssize_t dht_kv_contact_list(const uint8_t *    key, +                                   struct list_head * l, +                                   size_t             max) +{ +        struct list_head * p; +        struct bucket *    b; +        struct timespec    t; +        size_t             i; +        size_t             len = 0; + +        assert(l   != NULL); +        assert(key != NULL); +        assert(list_is_empty(l)); + +        clock_gettime(CLOCK_REALTIME_COARSE, &t); + +        max = MIN(max, dht.k); + +        pthread_rwlock_rdlock(&dht.db.lock); + +        b = __dht_kv_get_bucket(key); +        if (b == NULL) { +                log_err(KEY_FMT " Failed to get bucket.", KEY_VAL(key)); +                goto fail_bucket; +        } + +        b->t_refr = t.tv_sec + dht.t_refresh; + +        if (b->contacts.len == dht.k || b->parent == NULL) { +                list_for_each(p, &b->contacts.list) { +                        struct contact * c; +                        struct contact * d; +                        c = list_entry(p, struct contact, next); +                        if (c->addr == dht.addr) +                                continue; +                        d = contact_create(c->id, c->addr); +                        if (d == NULL) +                                continue; +                        contact_list_add(l, d); +                        if (++len == max) +                                break; +                } +        } else { +                struct bucket * d = b->parent; +                for (i = 0; i < (1L << DHT_BETA) && len < dht.k; ++i) { +                        list_for_each(p, &d->children[i]->contacts.list) { +                                struct contact * c; +                                struct contact * d; +                                c = list_entry(p, struct contact, next); +                                if (c->addr == dht.addr) +                                        continue; +                                d = contact_create(c->id, c->addr); +                                if (d == NULL) +                                        continue; +                                contact_list_add(l, d); +                                if (++len == max) +                                        break; +                        } +                } +        } + +        pthread_rwlock_unlock(&dht.db.lock); + +        return len; + fail_bucket: +        pthread_rwlock_unlock(&dht.db.lock); +        return -1; +} + +static void contact_list_destroy(struct list_head * l) +{ +        struct list_head * p; +        struct list_head * h; + +        assert(l != NULL); + +        list_for_each_safe(p, h, l) { +                struct contact * c = list_entry(p, struct contact, next); +                list_del(&c->next); +                contact_destroy(c); +        } +} + +static ssize_t dht_kv_get_contacts(const uint8_t *       key, +                                   dht_contact_msg_t *** msgs) +{ +        struct list_head   cl; +        struct list_head * p; +        struct list_head * h; +        size_t             len; +        size_t             i = 0; + +        assert(key != NULL); +        assert(msgs != NULL); + +        list_head_init(&cl); + +        len = dht_kv_contact_list(key, &cl, dht.k); +        if (len == 0) { +                *msgs = NULL; +                return 0; +        } + +        *msgs = malloc(len * sizeof(**msgs)); +        if (*msgs == NULL) +                goto fail_msgs; + +        list_for_each_safe(p, h, &cl) { +                struct contact * c; +                (*msgs)[i] = malloc(sizeof(***msgs)); +                if ((*msgs)[i] == NULL) +                        goto fail_contact; + +                dht_contact_msg__init((*msgs)[i]); +                c = list_entry(p, struct contact, next); +                list_del(&c->next); +                (*msgs)[i]->id.data = c->id; +                (*msgs)[i]->id.len  = dht.id.len; +                (*msgs)[i++]->addr  = c->addr; +                free(c); +        } + +        return i; + fail_contact: +        while (i-- > 0) +                dht_contact_msg__free_unpacked((*msgs)[i], NULL); +        free(*msgs); +        *msgs = NULL; + fail_msgs: +        contact_list_destroy(&cl); +        return -ENOMEM; +} + +/* Build a refresh list. */ +static void __dht_kv_bucket_refresh_list(struct bucket *    b, +                                         time_t             t, +                                         struct list_head * r) +{ +        struct contact * c; +        struct contact * d; + +        assert(b != NULL); + +        if (t < b->t_refr) +                return; + +        if (*b->children != NULL) { +                size_t i; +                for (i = 0; i < (1L << DHT_BETA); ++i) +                        __dht_kv_bucket_refresh_list(b->children[i], t, r); +        } + +        if (b->contacts.len == 0) +                return; + +        c = list_first_entry(&b->contacts.list, struct contact, next); +        if (t > c->t_seen + dht.t_refresh) { +                d = contact_create(c->id, c->addr); +                if (d != NULL) +                        list_add(&d->next, r); +        } +} + +static struct bucket * bucket_create(void) +{ +        struct bucket * b; +        struct timespec t; +        size_t          i; + +        b = malloc(sizeof(*b)); +        if (b == NULL) +                return NULL; + +        list_head_init(&b->contacts.list); +        b->contacts.len = 0; + +        list_head_init(&b->alts.list); +        b->alts.len = 0; + +        clock_gettime(CLOCK_REALTIME_COARSE, &t); +        b->t_refr = t.tv_sec + dht.t_refresh; + +        for (i = 0; i < (1L << DHT_BETA); ++i) +                b->children[i]  = NULL; + +        b->parent = NULL; +        b->depth = 0; +        b->mask  = 0; + +        return b; +} + +static void bucket_destroy(struct bucket * b) +{ +        struct list_head * p; +        struct list_head * h; +        size_t             i; + +        assert(b != NULL); + +        for (i = 0; i < (1L << DHT_BETA); ++i) +                if (b->children[i] != NULL) +                        bucket_destroy(b->children[i]); + +        list_for_each_safe(p, h, &b->contacts.list) { +                struct contact * c = list_entry(p, struct contact, next); +                list_del(&c->next); +                contact_destroy(c); +                --b->contacts.len; +        } + +        list_for_each_safe(p, h, &b->alts.list) { +                struct contact * c = list_entry(p, struct contact, next); +                list_del(&c->next); +                contact_destroy(c); +                --b->alts.len; +        } + +        free(b); +} + +static bool bucket_has_id(struct bucket * b, +                          const uint8_t * id) +{ +        uint8_t mask; +        uint8_t byte; + +        if (b->depth == 0) +                return true; + +        byte = id[(b->depth * DHT_BETA) / CHAR_BIT]; + +        mask = ((1L << DHT_BETA) - 1) & 0xFF; + +        byte >>= (CHAR_BIT - DHT_BETA) - +                (((b->depth - 1) * DHT_BETA) & (CHAR_BIT - 1)); + +        return ((byte & mask) == b->mask); +} + +static int move_contacts(struct bucket * b, +                         struct bucket * c) +{ +        struct list_head * p; +        struct list_head * h; +        struct contact *   d; + +        assert(b != NULL); +        assert(c != NULL); + +        list_for_each_safe(p, h, &b->contacts.list) { +                d = list_entry(p, struct contact, next); +                if (bucket_has_id(c, d->id)) { +                        list_del(&d->next); +                        --b->contacts.len; +                        list_add_tail(&d->next, &c->contacts.list); +                        ++c->contacts.len; +                } +        } + +        return 0; +} + +static int split_bucket(struct bucket * b) +{ +        uint8_t mask = 0; +        size_t i; +        size_t b_len; + +        assert(b); +        assert(b->alts.len == 0); +        assert(b->contacts.len != 0); +        assert(b->children[0] == NULL); + +        b_len = b->contacts.len; + +        for (i = 0; i < (1L << DHT_BETA); ++i) { +                b->children[i] = bucket_create(); +                if (b->children[i] == NULL) +                        goto fail_child; + +                b->children[i]->depth  = b->depth + 1; +                b->children[i]->mask   = mask; +                b->children[i]->parent = b; + +                move_contacts(b, b->children[i]); + +                mask++; +        } + +        for (i = 0; i < (1L << DHT_BETA); ++i) +                if (b->children[i]->contacts.len == b_len) +                        split_bucket(b->children[i]); + +        return 0; + fail_child: +        while (i-- > 0) +                bucket_destroy(b->children[i]); +        return -1; +} + +static int dht_kv_update_contacts(const uint8_t * id, +                                  uint64_t        addr) +{ +        struct list_head * p; +        struct list_head * h; +        struct bucket *    b; +        struct contact *   c; + +        assert(id != NULL); +        assert(addr != INVALID_ADDR); + +        pthread_rwlock_wrlock(&dht.db.lock); + +        b = __dht_kv_get_bucket(id); +        if (b == NULL) { +                log_err(PEER_FMT " Failed to get bucket.", PEER_VAL(id, addr)); +                        goto fail_update; +        } + +        c = contact_create(id, addr); +        if (c == NULL) { +                log_err(PEER_FMT " Failed to create contact.", +                        PEER_VAL(id, addr)); +                goto fail_update; +        } + +        list_for_each_safe(p, h, &b->contacts.list) { +                struct contact * d = list_entry(p, struct contact, next); +                if (d->addr == addr) { +                        list_del(&d->next); +                        contact_destroy(d); +                        --b->contacts.len; +                } +        } + +        if (b->contacts.len == dht.k) { +                if (bucket_has_id(b, dht.id.data)) { +                        list_add_tail(&c->next, &b->contacts.list); +                        ++b->contacts.len; +                        if (split_bucket(b)) { +                                list_del(&c->next); +                                contact_destroy(c); +                                --b->contacts.len; +                        } +                } else if (b->alts.len == dht.k) { +                        struct contact * d; +                        d = list_first_entry(&b->alts.list, +                                struct contact, next); +                        list_del(&d->next); +                        contact_destroy(d); +                        list_add_tail(&c->next, &b->alts.list); +                        ++b->alts.len; +                } else { +                        list_add_tail(&c->next, &b->alts.list); +                        ++b->alts.len; +                } +        } else { +                list_add_tail(&c->next, &b->contacts.list); +                ++b->contacts.len; +        } + +        pthread_rwlock_unlock(&dht.db.lock); + +        return 0; + fail_update: +        pthread_rwlock_unlock(&dht.db.lock); +        return -1; +} + +static time_t gcd(time_t a, +                  time_t b) +{ +        if (a == 0) +                return b; + +        return gcd(b % a, a); +} + +static dht_contact_msg_t * dht_kv_src_contact_msg(void) +{ +        dht_contact_msg_t * src; + +        src = malloc(sizeof(*src)); +        if (src == NULL) +                goto fail_malloc; + +        dht_contact_msg__init(src); + +        src->id.data = dht_dup_key(dht.id.data); +        if (src->id.data == NULL) +                goto fail_id; + +        src->id.len  = dht.id.len; +        src->addr    = dht.addr; + +        return src; + fail_id: +        dht_contact_msg__free_unpacked(src, NULL); + fail_malloc: +        return NULL; +} + +static dht_msg_t * dht_kv_find_req_msg(const uint8_t * key, +                                       enum dht_code   code) +{ +        dht_msg_t * msg; + +        assert(key != NULL); + +        msg = malloc(sizeof(*msg)); +        if (msg == NULL) +                goto fail_malloc; + +        dht_msg__init(msg); +        msg->code = code; + +        msg->src = dht_kv_src_contact_msg(); +        if (msg->src == NULL) +                goto fail_msg; + +        msg->find = malloc(sizeof(*msg->find)); +        if (msg->find == NULL) +                goto fail_msg; + +        dht_find_req_msg__init(msg->find); + +        msg->find->key.data = dht_dup_key(key); +        if (msg->find->key.data == NULL) +                goto fail_msg; + +        msg->find->key.len = dht.id.len; +        msg->find->cookie  = DHT_INVALID; + +        return msg; + + fail_msg: +        dht_msg__free_unpacked(msg, NULL); + fail_malloc: +        return NULL; +} + +static dht_msg_t * dht_kv_find_node_req_msg(const uint8_t * key) +{ +        return dht_kv_find_req_msg(key, DHT_FIND_NODE_REQ); +} + +static dht_msg_t * dht_kv_find_value_req_msg(const uint8_t * key) +{ +        return dht_kv_find_req_msg(key, DHT_FIND_VALUE_REQ); +} + +static dht_msg_t * dht_kv_find_node_rsp_msg(uint8_t *             key, +                                            uint64_t              cookie, +                                            dht_contact_msg_t *** contacts, +                                            size_t                len) +{ +        dht_msg_t * msg; + +        msg = malloc(sizeof(*msg)); +        if (msg == NULL) +                goto fail_malloc; + +        dht_msg__init(msg); +        msg->code = DHT_FIND_NODE_RSP; + +        msg->src = dht_kv_src_contact_msg(); +        if (msg->src == NULL) +                goto fail_msg; + +        msg->node = malloc(sizeof(*msg->node)); +        if (msg->node == NULL) +                goto fail_msg; + +        dht_find_node_rsp_msg__init(msg->node); + +        msg->node->key.data = dht_dup_key(key); +        if (msg->node->key.data == NULL) +                goto fail_msg; + +        msg->node->cookie     = cookie; +        msg->node->key.len    = dht.id.len; +        msg->node->n_contacts = len; +        if (len != 0) { /* Steal the ptr */ +                msg->node->contacts = *contacts; +                *contacts = NULL; +        } + +        return msg; + + fail_msg: +        dht_msg__free_unpacked(msg, NULL); + fail_malloc: +        return NULL; +} + +static dht_msg_t * dht_kv_find_value_rsp_msg(uint8_t *             key, +                                             uint64_t              cookie, +                                             dht_contact_msg_t *** contacts, +                                             size_t                n_contacts, +                                             buffer_t **           vals, +                                             size_t                n_vals) +{ +        dht_msg_t * msg; + +        msg = dht_kv_find_node_rsp_msg(key, cookie, contacts, n_contacts); +        if (msg == NULL) +                goto fail_node_rsp; + +        msg->code = DHT_FIND_VALUE_RSP; + +        msg->val = malloc(sizeof(*msg->val)); +        if (msg->val == NULL) +                goto fail_msg; + +        dht_find_value_rsp_msg__init(msg->val); + +        msg->val->n_values = n_vals; +        if (n_vals != 0)  /* Steal the ptr */ +                msg->val->values = (binary_data_t *) *vals; + +        return msg; + + fail_msg: +        dht_msg__free_unpacked(msg, NULL); + fail_node_rsp: +        return NULL; +} + +static dht_msg_t * dht_kv_store_msg(const uint8_t * key, +                                    const buffer_t  val, +                                    time_t          exp) +{ +        dht_msg_t * msg; + +        assert(key != NULL); +        assert(val.data != NULL); +        assert(val.len > 0); + +        msg = malloc(sizeof(*msg)); +        if (msg == NULL) +                goto fail_malloc; + +        dht_msg__init(msg); + +        msg->code = DHT_STORE; + +        msg->src = dht_kv_src_contact_msg(); +        if (msg->src == NULL) +                goto fail_msg; + +        msg->store = malloc(sizeof(*msg->store)); +        if (msg->store == NULL) +                goto fail_msg; + +        dht_store_msg__init(msg->store); + +        msg->store->key.data = dht_dup_key(key); +        if (msg->store->key.data == NULL) +                goto fail_msg; + +        msg->store->key.len = dht.id.len; +        msg->store->val.data = malloc(val.len); +        if (msg->store->val.data == NULL) +                goto fail_msg; + +        memcpy(msg->store->val.data, val.data, val.len); + +        msg->store->val.len = val.len; +        msg->store->exp = exp; + +        return msg; + + fail_msg: +        dht_msg__free_unpacked(msg, NULL); + fail_malloc: +        return NULL; +} + +static ssize_t dht_kv_retrieve(const uint8_t * key, +                               buffer_t **     vals) +{ +        struct dht_entry * e; +        struct list_head * p; +        size_t             n; +        size_t             i; + +        assert(key  != NULL); + +        pthread_rwlock_rdlock(&dht.db.lock); + +        e = __dht_kv_find_entry(key); +        if (e == NULL) +                goto no_vals; + +        n = MIN(DHT_MAX_VALS, e->vals.len + e->lvals.len); +        if (n == 0) +                goto no_vals; + +        *vals = malloc(n * sizeof(**vals)); +        if (*vals == NULL) +                goto fail_vals; + +        memset(*vals, 0, n * sizeof(**vals)); + +        i = 0; + +        list_for_each(p, &e->vals.list) { +                struct val_entry * v; +                if (i == n) +                        break; /* We have enough values */ +                v = list_entry(p, struct val_entry, next); +                (*vals)[i].data = malloc(v->val.len); +                if ((*vals)[i].data == NULL) +                        goto fail_val_data; + +                (*vals)[i].len = v->val.len; +                memcpy((*vals)[i++].data, v->val.data, v->val.len); +        } + +        list_for_each(p, &e->lvals.list) { +                struct val_entry * v; +                if (i == n) +                        break; /* We have enough values */ +                v = list_entry(p, struct val_entry, next); +                (*vals)[i].data = malloc(v->val.len); +                if ((*vals)[i].data == NULL) +                        goto fail_val_data; + +                (*vals)[i].len = v->val.len; +                memcpy((*vals)[i++].data, v->val.data, v->val.len); +        } + +        pthread_rwlock_unlock(&dht.db.lock); + +        return (ssize_t) i; + + fail_val_data: +        pthread_rwlock_unlock(&dht.db.lock); +        freebufs(*vals, i); +        *vals = NULL; +        return -ENOMEM; + fail_vals: +        pthread_rwlock_unlock(&dht.db.lock); +        return -ENOMEM; + no_vals: +        pthread_rwlock_unlock(&dht.db.lock); +        *vals = NULL; +        return 0; +} + +static void __cleanup_dht_msg(void * msg) +{ +        dht_msg__free_unpacked((dht_msg_t *) msg, NULL); +} + +#ifdef DEBUG_PROTO_DHT +static void dht_kv_debug_msg(dht_msg_t * msg) +{ +        struct tm *   tm; +        char          tmstr[RIB_TM_STRLEN]; +        time_t        stamp; +        size_t        i; + +        if (msg == NULL) +                return; + +        pthread_cleanup_push(__cleanup_dht_msg, msg); + +        switch (msg->code) { +        case DHT_STORE: +                log_proto("  key: " HASH_FMT64 " [%zu bytes]", +                          HASH_VAL64(msg->store->key.data), +                          msg->store->key.len); +                log_proto("  val: " HASH_FMT64 " [%zu bytes]", +                          HASH_VAL64(msg->store->val.data), +                          msg->store->val.len); +                stamp = msg->store->exp; +                tm = gmtime(&stamp); +                strftime(tmstr, sizeof(tmstr), RIB_TM_FORMAT, tm); +                log_proto("  exp: %s.", tmstr); +                break; +        case DHT_FIND_NODE_REQ: +                /* FALLTHRU */ +        case DHT_FIND_VALUE_REQ: +                log_proto("  cookie: " HASH_FMT64, +                          HASH_VAL64(&msg->find->cookie)); +                log_proto("  key:    " HASH_FMT64 " [%zu bytes]", +                          HASH_VAL64(msg->find->key.data), +                          msg->find->key.len); +                break; +        case DHT_FIND_VALUE_RSP: +                log_proto("  cookie: " HASH_FMT64, +                          HASH_VAL64(&msg->node->cookie)); +                log_proto("  key:    " HASH_FMT64 " [%zu bytes]", +                          HASH_VAL64(msg->node->key.data), +                          msg->node->key.len); +                log_proto("  values: [%zd]", msg->val->n_values); +                for (i = 0; i < msg->val->n_values; i++) +                        log_proto("    " HASH_FMT64 " [%zu bytes]", +                                  HASH_VAL64(msg->val->values[i].data), +                                  msg->val->values[i].len); +                log_proto("  contacts: [%zd]", msg->node->n_contacts); +                for (i = 0; i < msg->node->n_contacts; i++) { +                        dht_contact_msg_t * c = msg->node->contacts[i]; +                        log_proto("    " PEER_FMT, +                                  PEER_VAL(c->id.data, c->addr)); +                } +                break; +        case DHT_FIND_NODE_RSP: +                log_proto("  cookie: " HASH_FMT64, +                        HASH_VAL64(&msg->node->cookie)); +                log_proto("  key:    " HASH_FMT64 " [%zu bytes]", +                          HASH_VAL64(msg->node->key.data), msg->node->key.len); +                log_proto("  contacts: [%zd]", msg->node->n_contacts); +                for (i = 0; i < msg->node->n_contacts; i++) { +                        dht_contact_msg_t * c = msg->node->contacts[i]; +                        log_proto("    " PEER_FMT, +                                  PEER_VAL(c->id.data, c->addr)); +                } + +                break; +        default: +                break; +        } + +        pthread_cleanup_pop(false); +} + +static void dht_kv_debug_msg_snd(dht_msg_t * msg, +                                 uint8_t *   id, +                                 uint64_t    addr) +{ +        if (msg == NULL) +                return; + +        log_proto(TX_HDR_FMT ".", TX_HDR_VAL(msg, id, addr)); + +        dht_kv_debug_msg(msg); +} + +static void dht_kv_debug_msg_rcv(dht_msg_t * msg) +{ +        if (msg == NULL) +                return; + +        log_proto(RX_HDR_FMT ".", RX_HDR_VAL(msg)); + +        dht_kv_debug_msg(msg); +} +#endif + +#ifndef __DHT_TEST__ +static int dht_send_msg(dht_msg_t * msg, +                        uint64_t    addr) +{ +        size_t               len; +        struct shm_du_buff * sdb; + +        if (msg == NULL) +                return 0; + +        assert(addr != INVALID_ADDR && addr != dht.addr); + +        len = dht_msg__get_packed_size(msg); +        if (len == 0) { +                log_warn("%s failed to pack.", DHT_CODE(msg)); +                goto fail_msg; +        } + +        if (ipcp_sdb_reserve(&sdb, len)) { +                log_warn("%s failed to get sdb.", DHT_CODE(msg)); +                goto fail_msg; +        } + +        dht_msg__pack(msg, shm_du_buff_head(sdb)); + +        if (dt_write_packet(addr, QOS_CUBE_BE, dht.eid, sdb) < 0) { +                log_warn("%s write failed", DHT_CODE(msg)); +                goto fail_send; +        } + +        return 0; + fail_send: +        ipcp_sdb_release(sdb); + fail_msg: +        return -1; +} +#else /* funtion for testing  */ +static int dht_send_msg(dht_msg_t * msg, +                        uint64_t    addr) +{ +        buffer_t buf; + +        assert(msg != NULL); +        assert(addr != INVALID_ADDR && addr != dht.addr); + +        buf.len = dht_msg__get_packed_size(msg); +        if (buf.len == 0) { +                log_warn("%s failed to pack.", DHT_CODE(msg)); +                goto fail_msg; +        } + +        buf.data = malloc(buf.len); +        if (buf.data == NULL) { +                log_warn("%s failed to malloc buf.", DHT_CODE(msg)); +                goto fail_msg; +        } + +        dht_msg__pack(msg, buf.data); + +        if (sink_send_msg(&buf, addr) < 0) { +                log_warn("%s write failed", DHT_CODE(msg)); +                goto fail_send; +        } + +        return 0; + fail_send: +        freebuf(buf); + fail_msg: +        return -1; +} +#endif /* __DHT_TEST__ */ + +static void __cleanup_peer_list(void * pl) +{ +        struct list_head * p; +        struct list_head * h; + +        assert(pl != NULL); + +        list_for_each_safe(p, h, (struct list_head *) pl) { +                struct peer_entry * e = list_entry(p, struct peer_entry, next); +                list_del(&e->next); +                free(e->id); +                free(e); +        } +} + + +static int dht_kv_send_msgs(dht_msg_t *        msg, +                            struct list_head * pl) +{ +        struct list_head * p; +        struct list_head * h; + +        pthread_cleanup_push(__cleanup_dht_msg, msg); +        pthread_cleanup_push(__cleanup_peer_list, pl); + +        list_for_each_safe(p, h, pl) { +                struct peer_entry * e = list_entry(p, struct peer_entry, next); +                if (IS_REQUEST(msg->code)) { +                        msg->find->cookie = e->cookie; +                        assert(msg->find->cookie != DHT_INVALID); +                } +                if (dht_send_msg(msg, e->addr) < 0) +                        continue; + +#ifdef DEBUG_PROTO_DHT +                dht_kv_debug_msg_snd(msg, e->id, e->addr); +#endif +                list_del(&e->next); +                free(e->id); +                free(e); +        } + +        pthread_cleanup_pop(false); +        pthread_cleanup_pop(false); + +        return list_is_empty(pl) ? 0 : -1; +} + +static int dht_kv_get_peer_list_for_msg(dht_msg_t *        msg, +                                        struct list_head * pl) +{ +        struct list_head   cl;  /* contact list       */ +        uint8_t *          key; /* key in the request */ +        size_t             max; + +        assert(msg != NULL); + +        assert(list_is_empty(pl)); + +        max = msg->code == DHT_STORE ? dht.k : dht.alpha; + +        switch (msg->code) { +        case DHT_FIND_NODE_REQ: +                /* FALLTHRU */ +        case DHT_FIND_VALUE_REQ: +                key = msg->find->key.data; +                break; +        case DHT_STORE: +                key = msg->store->key.data; +                break; +        default: +                log_err("Invalid DHT msg code (%d).", msg->code); +                return -1; +        } + +        list_head_init(&cl); + +        if (dht_kv_contact_list(key, &cl, max) < 0) { +                log_err(KEY_FMT " Failed to get contact list.", KEY_VAL(key)); +                goto fail_contacts; +        } + +        if (list_is_empty(&cl)) { +                log_warn(KEY_FMT " No available contacts.", KEY_VAL(key)); +                goto fail_contacts; +        } + +        if (dht_kv_create_peer_list(&cl, pl, msg->code) < 0) { +                log_warn(KEY_FMT " Failed to get peer list.", KEY_VAL(key)); +                goto fail_peers; +        } + +        contact_list_destroy(&cl); +        return 0; + fail_peers: +        contact_list_destroy(&cl); + fail_contacts: +        return -1; +} + +static int dht_kv_store_remote(const uint8_t * key, +                               const buffer_t  val, +                               time_t          exp) +{ +        dht_msg_t *      msg; +        struct timespec  now; +        struct list_head pl; + +        assert(key != NULL); +        assert(val.data != NULL); +        assert(val.len > 0); + +        clock_gettime(CLOCK_REALTIME_COARSE, &now); + +        msg = dht_kv_store_msg(key, val, exp); +        if (msg == NULL) { +                log_err(KV_FMT " Failed to create %s.", +                        KV_VAL(key, val), dht_code_str[DHT_STORE]); +                goto fail_msg; +        } + +        list_head_init(&pl); + +        if (dht_kv_get_peer_list_for_msg(msg, &pl) < 0) { +                log_dbg(KV_FMT " Failed to get peer list.", KV_VAL(key, val)); +                goto fail_peer_list; +        } + +        if (dht_kv_send_msgs(msg, &pl) < 0) { +                log_warn(KV_FMT " Failed to send any %s msg.", +                         KV_VAL(key, val), DHT_CODE(msg)); +                goto fail_msgs; +        } + +        dht_msg__free_unpacked(msg, NULL); + +        return 0; + fail_msgs: +        peer_list_destroy(&pl); + fail_peer_list: +        dht_msg__free_unpacked(msg, NULL); + fail_msg: +        return -1; +} + +/* recursive lookup, start with pl NULL */ +static int dht_kv_query_contacts(const uint8_t *    key, +                                 struct list_head * pl) +{ +        struct list_head p; + +        dht_msg_t * msg; + +        assert(key != NULL); + +        msg = dht_kv_find_node_req_msg(key); +        if (msg == NULL) { +                log_err(KEY_FMT " Failed to create %s msg.", +                        KEY_VAL(key), dht_code_str[DHT_FIND_NODE_REQ]); +                goto fail_msg; +        } + +        if (pl == NULL) { +                list_head_init(&p); +                pl = &p; +        } + +        if (list_is_empty(pl) && dht_kv_get_peer_list_for_msg(msg, pl) < 0) { +                log_warn(KEY_FMT " Failed to get peer list.", KEY_VAL(key)); +                goto fail_peer_list; +        } + +        if (dht_kv_update_req(key, pl) < 0) { +                log_warn(KEY_FMT " Failed to update req.", KEY_VAL(key)); +                goto fail_update; +        } + +        if (dht_kv_send_msgs(msg, pl)) { +                log_warn(KEY_FMT " Failed to send any %s msg.", +                         KEY_VAL(key), DHT_CODE(msg)); +                goto fail_update; +        } + +        dht_msg__free_unpacked(msg, NULL); + +        return 0; + fail_update: +        peer_list_destroy(pl); + fail_peer_list: +        dht_msg__free_unpacked(msg, NULL); + fail_msg: +        return -1; +} + +/* recursive lookup, start with pl NULL */ +static ssize_t dht_kv_query_remote(const uint8_t *    key, +                                   buffer_t **        vals, +                                   struct list_head * pl) +{ +        struct list_head p; +        dht_msg_t *      msg; + +        assert(key != NULL); + +        msg = dht_kv_find_value_req_msg(key); +        if (msg == NULL) { +                log_err(KEY_FMT " Failed to create value req.", KEY_VAL(key)); +                goto fail_msg; +        } + +        if (pl == NULL) { +                list_head_init(&p); +                pl = &p; +        } + +        if (list_is_empty(pl) && dht_kv_get_peer_list_for_msg(msg, pl) < 0) { +                log_warn(KEY_FMT " Failed to get peer list.", KEY_VAL(key)); +                goto fail_peer_list; +        } + +        if (dht_kv_update_req(key, pl) < 0) { +                log_err(KEY_FMT " Failed to update request.", KEY_VAL(key)); +                goto fail_update; +        } + +        if (dht_kv_send_msgs(msg, pl)) { +                log_warn(KEY_FMT " Failed to send %s msg.", +                         KEY_VAL(key), DHT_CODE(msg)); +                goto fail_update; +        } + +        dht_msg__free_unpacked(msg, NULL); + +        if (vals == NULL) /* recursive lookup, already waiting */ +                return 0; + +        return dht_kv_wait_req(key, vals); + fail_update: +        peer_list_destroy(pl); + fail_peer_list: +        dht_msg__free_unpacked(msg, NULL); + fail_msg: +        return -1; +} + +static void __add_dht_kv_entry(struct dht_entry * e) +{ +        struct list_head * p; + +        assert(e != NULL); + +        list_for_each(p, &dht.db.kv.list) { +                struct dht_entry * d = list_entry(p, struct dht_entry, next); +                if (IS_CLOSER(d->key, e->key)) +                        continue; +                break; +        } + +        list_add_tail(&e->next, p); +        ++dht.db.kv.len; +} + +/* incoming store message */ +static int dht_kv_store(const uint8_t * key, +                        const buffer_t  val, +                        time_t          exp) +{ +        struct dht_entry * e; +        bool               new = false; + +        assert(key != NULL); +        assert(val.data != NULL); +        assert(val.len > 0); + +        pthread_rwlock_wrlock(&dht.db.lock); + +        e = __dht_kv_find_entry(key); +        if (e == NULL) { +                log_dbg(KV_FMT " Adding entry (store).", KV_VAL(key, val)); +                e = dht_entry_create(key); +                if (e == NULL) +                        goto fail; + +                new = true; + +                __add_dht_kv_entry(e); +        } + +        if (dht_entry_update_val(e, val, exp) < 0) +                goto fail_add; + +        pthread_rwlock_unlock(&dht.db.lock); + +        return 0; + fail_add: +        if (new) { +                list_del(&e->next); +                dht_entry_destroy(e); +                --dht.db.kv.len; +        } + fail: +        pthread_rwlock_unlock(&dht.db.lock); +        return -1; +} + +static int dht_kv_publish(const uint8_t * key, +                          const buffer_t  val) +{ +        struct dht_entry * e; +        struct timespec    now; +        bool               new = false; + +        assert(key != NULL); +        assert(val.data != NULL); +        assert(val.len > 0); + +        clock_gettime(CLOCK_REALTIME_COARSE, &now); + +        pthread_rwlock_wrlock(&dht.db.lock); + +        e = __dht_kv_find_entry(key); +        if (e == NULL) { +                log_dbg(KV_FMT " Adding entry (publish).", KV_VAL(key, val)); +                e = dht_entry_create(key); +                if (e == NULL) +                        goto fail; + +                __add_dht_kv_entry(e); +                new = true; +        } + +        if (dht_entry_update_lval(e, val) < 0) +                goto fail_add; + +        pthread_rwlock_unlock(&dht.db.lock); + +        dht_kv_store_remote(key, val, now.tv_sec + dht.t_expire); + +        return 0; + fail_add: +        if (new) { +                list_del(&e->next); +                dht_entry_destroy(e); +                --dht.db.kv.len; +        } + fail: +        pthread_rwlock_unlock(&dht.db.lock); +        return -1; +} + +static int dht_kv_unpublish(const uint8_t * key, +                            const buffer_t  val) +{ +        struct dht_entry * e; +        int                rc; + +        assert(key != NULL); + +        pthread_rwlock_wrlock(&dht.db.lock); + +        e = __dht_kv_find_entry(key); +        if (e == NULL) +                goto no_entry; + +        rc = dht_entry_remove_lval(e, val); + +        pthread_rwlock_unlock(&dht.db.lock); + +        return rc; + no_entry: +        pthread_rwlock_unlock(&dht.db.lock); +        return -ENOENT; + +} + +/* message validation */ +static int dht_kv_validate_store_msg(const dht_store_msg_t * store) +{ +        if (store == NULL) { +                log_warn("Store in msg is NULL."); +                return -EINVAL; +        } + +        if (store->key.data == NULL || store->key.len == 0) { +                log_warn("Invalid key in DHT store msg."); +                return -EINVAL; +        } + +        if (store->key.len != dht.id.len) { +                log_warn("Invalid key length in DHT store msg."); +                return -EINVAL; +        } + +        if (store->val.data == NULL || store->val.len == 0) { +                log_warn("Invalid value in DHT store msg."); +                return -EINVAL; +        } + +        return 0; +} + +static int validate_find_req_msg(const dht_find_req_msg_t * req) +{ +        if (req == NULL) { +                log_warn("Request in msg is NULL."); +                return -EINVAL; +        } + +        if (req->key.data == NULL || req->key.len == 0) { +                log_warn("Find request without key."); +                return -EINVAL; +        } + +        if (req->key.len != dht.id.len) { +                log_warn("Invalid key length in request msg."); +                return -EINVAL; +        } + +        return 0; +} + +static int validate_node_rsp_msg(const dht_find_node_rsp_msg_t * rsp) +{ +        if (rsp == NULL) { +                log_warn("Node rsp in msg is NULL."); +                return -EINVAL; +        } + +        if (rsp->key.data == NULL) { +                log_warn("Invalid key in DHT response msg."); +                return -EINVAL; +        } + +        if (rsp->key.len != dht.id.len) { +                log_warn("Invalid key length in DHT response msg."); +                return -EINVAL; +        } + +        if (!dht_kv_has_req(rsp->key.data, rsp->cookie)) { +                log_warn(KEY_FMT " No request " CK_FMT  ".", +                         KEY_VAL(rsp->key.data), CK_VAL(rsp->cookie)); + +                return -EINVAL; +        } + +        return 0; +} + +static int validate_value_rsp_msg(const dht_find_value_rsp_msg_t * rsp) +{ +        if (rsp == NULL) { +                log_warn("Invalid DHT find value response msg."); +                return -EINVAL; +        } + +        if (rsp->values == NULL && rsp->n_values > 0) { +                log_dbg("No values in DHT response msg."); +                return 0; +        } + +        if (rsp->n_values == 0 && rsp->values != NULL) { +                log_dbg("DHT response did not set values NULL."); +                return 0; +        } + +        return 0; +} + +static int dht_kv_validate_msg(dht_msg_t * msg) +{ + +        assert(msg != NULL); + +        if (msg->src->id.len != dht.id.len) { +                log_warn("%s Invalid source contact ID.", DHT_CODE(msg)); +                return -EINVAL; +        } + +        if (msg->src->addr == INVALID_ADDR) { +                log_warn("%s Invalid source address.", DHT_CODE(msg)); +                return -EINVAL; +        } + +        switch (msg->code) { +        case DHT_FIND_VALUE_REQ: +                /* FALLTHRU */ +        case DHT_FIND_NODE_REQ: +                if (validate_find_req_msg(msg->find) < 0) +                        return -EINVAL; +                break; +        case DHT_FIND_VALUE_RSP: +                if (validate_value_rsp_msg(msg->val) < 0) +                        return -EINVAL; +                /* FALLTHRU */ +        case DHT_FIND_NODE_RSP: +                if (validate_node_rsp_msg(msg->node) < 0) +                        return -EINVAL; +                break; +        case DHT_STORE: +                if (dht_kv_validate_store_msg(msg->store) < 0) +                        return -EINVAL; +                break; +        default: +                log_warn("Invalid DHT msg code (%d).", msg->code); +                return -ENOENT; +        } + +        return 0; +} + +static void do_dht_kv_store(const dht_store_msg_t * store) +{ +        struct tm * tm; +        char        tmstr[RIB_TM_STRLEN]; +        buffer_t    val; +        uint8_t *   key; +        time_t      exp; + +        assert(store != NULL); + +        val.data = store->val.data; +        val.len  = store->val.len; +        key      = store->key.data; +        exp      = store->exp; + +        if (dht_kv_store(store->key.data, val, store->exp) < 0) { +                log_err(KV_FMT " Failed to store.", KV_VAL(key, val)); +                return; +        } + +        tm = gmtime(&exp); +        strftime(tmstr, sizeof(tmstr), RIB_TM_FORMAT, tm); +        log_dbg(KV_FMT " Stored value until %s.", KV_VAL(key, val), tmstr); +} + +static dht_msg_t * do_dht_kv_find_node_req(const dht_find_req_msg_t * req) +{ +        dht_contact_msg_t ** contacts; +        dht_msg_t *          rsp; +        uint8_t *            key; +        uint64_t             cookie; +        ssize_t              len; + +        assert(req  != NULL); + +        key    = req->key.data; +        cookie = req->cookie; + +        len = dht_kv_get_contacts(key, &contacts); +        if (len < 0) { +                log_warn(KEY_FMT " Failed to get contacts.", KEY_VAL(key)); +                goto fail_contacts; +        } + +        rsp = dht_kv_find_node_rsp_msg(key, cookie, &contacts, len); +        if (rsp == NULL) { +                log_err(KEY_FMT " Failed to create %s.", KEY_VAL(key), +                        dht_code_str[DHT_FIND_NODE_RSP]); +                goto fail_msg; +        } + +        assert(rsp->code == DHT_FIND_NODE_RSP); + +        log_info(KEY_FMT " Responding with %zd contacts", KEY_VAL(key), len); + +        return rsp; + fail_msg: +        while (len-- > 0) +                dht_contact_msg__free_unpacked(contacts[len], NULL); +        free(contacts); + fail_contacts: +        return NULL; +} + +static void dht_kv_process_node_rsp(dht_contact_msg_t ** contacts, +                                    size_t               len, +                                    struct list_head *   pl, +                                    enum dht_code        code) +{ +        struct timespec now; +        size_t          i; + +        assert(contacts != NULL); +        assert(len > 0); +        assert(pl != NULL); +        assert(list_is_empty(pl)); + +        clock_gettime(CLOCK_REALTIME_COARSE, &now); + +        for (i = 0; i < len; i++) { +                dht_contact_msg_t * c = contacts[i]; +                struct peer_entry * e; +                if (c->addr == dht.addr) +                        continue; + +                if (dht_kv_update_contacts(c->id.data, c->addr) < 0) +                        log_warn(PEER_FMT " Failed to update contacts.", +                                 PEER_VAL(c->id.data, c->addr)); + +                e = malloc(sizeof(*e)); +                if (e == NULL) { +                        log_err(PEER_FMT " Failed to malloc entry.", +                                PEER_VAL(c->id.data, c->addr)); +                        continue; +                } + +                e->id = dht_dup_key(c->id.data); +                if (e->id == NULL) { +                        log_warn(PEER_FMT " Failed to duplicate id.", +                                 PEER_VAL(c->id.data, c->addr)); +                        free(e); +                        continue; +                } + +                e->cookie = generate_cookie(); +                e->code   = code; +                e->addr   = c->addr; +                e->t_sent = now.tv_sec; + +                list_add_tail(&e->next, pl); +        } +} + +static dht_msg_t * do_dht_kv_find_value_req(const dht_find_req_msg_t * req) +{ +        dht_contact_msg_t ** contacts; +        ssize_t              n_contacts; +        buffer_t *           vals; +        ssize_t              n_vals; +        dht_msg_t *          rsp; +        uint8_t *            key; +        uint64_t             cookie; + +        assert(req != NULL); + +        key    = req->key.data; +        cookie = req->cookie; + +        n_contacts = dht_kv_get_contacts(key, &contacts); +        if (n_contacts < 0) { +                log_warn(KEY_FMT " Failed to get contacts.", KEY_VAL(key)); +                goto fail_contacts; +        } + +        assert(n_contacts > 0 || contacts == NULL); + +        n_vals = dht_kv_retrieve(key, &vals); +        if (n_vals < 0) { +                log_dbg(KEY_FMT " Failed to get values.", KEY_VAL(key)); +                goto fail_vals; +        } + +        if (n_vals == 0) +                log_dbg(KEY_FMT " No values found.", KEY_VAL(key)); + +        rsp = dht_kv_find_value_rsp_msg(key, cookie, &contacts, n_contacts, +                                        &vals, n_vals); +        if (rsp == NULL) { +                log_err(KEY_FMT " Failed to create %s msg.", +                        KEY_VAL(key), dht_code_str[DHT_FIND_VALUE_RSP]); +                goto fail_msg; +        } + +        log_info(KEY_FMT " Responding with %zd contacts, %zd values.", +                 KEY_VAL(req->key.data), n_contacts, n_vals); + +        return rsp; + + fail_msg: +        freebufs(vals, n_vals); + fail_vals: +        while (n_contacts-- > 0) +                dht_contact_msg__free_unpacked(contacts[n_contacts], NULL); +        free(contacts); + fail_contacts: +        return NULL; +} + +static void do_dht_kv_find_node_rsp(const dht_find_node_rsp_msg_t * rsp) +{ +        struct list_head pl; + +        assert(rsp != NULL); + +        list_head_init(&pl); + +        dht_kv_process_node_rsp(rsp->contacts, rsp->n_contacts, &pl, +                                DHT_FIND_NODE_REQ); + +        if (list_is_empty(&pl)) +                goto no_contacts; + +        if (dht_kv_update_req(rsp->key.data, &pl) < 0) { +                log_err(KEY_FMT " Failed to update request.", +                        KEY_VAL(rsp->key.data)); +                goto fail_update; +        } + +        dht_kv_query_contacts(rsp->key.data, &pl); + +        return; + + fail_update: +        peer_list_destroy(&pl); + no_contacts: +        return; +} + +static void do_dht_kv_find_value_rsp(const dht_find_node_rsp_msg_t  * node, +                                     const dht_find_value_rsp_msg_t * val) +{ +        struct list_head pl; +        uint8_t *        key; + +        assert(node != NULL); +        assert(val != NULL); + +        list_head_init(&pl); + +        key = node->key.data; + +        dht_kv_process_node_rsp(node->contacts, node->n_contacts, &pl, +                                DHT_FIND_VALUE_REQ); + +        if (val->n_values > 0) { +                log_dbg(KEY_FMT " %zd new values received.", +                        KEY_VAL(key), val->n_values); +                if (dht_kv_respond_req(key, val->values, val->n_values) < 0) +                        log_warn(KEY_FMT " Failed to respond to request.", +                                 KEY_VAL(key)); +                peer_list_destroy(&pl); +                return; /* done! */ +        } + +        if (list_is_empty(&pl)) +                goto no_contacts; + +        if (dht_kv_update_req(key, &pl) < 0) { +                log_err(KEY_FMT " Failed to update request.", KEY_VAL(key)); +                goto fail_update; +        } + +        dht_kv_query_remote(key, NULL, &pl); + +        return; + fail_update: +        peer_list_destroy(&pl); + no_contacts: +        return; +} + +static dht_msg_t * dht_wait_for_dht_msg(void) +{ +        dht_msg_t *  msg; +        struct cmd * cmd; + +        pthread_mutex_lock(&dht.cmds.mtx); + +        pthread_cleanup_push(__cleanup_mutex_unlock, &dht.cmds.mtx); + +        while (list_is_empty(&dht.cmds.list)) +                pthread_cond_wait(&dht.cmds.cond, &dht.cmds.mtx); + +        cmd = list_last_entry(&dht.cmds.list, struct cmd, next); +        list_del(&cmd->next); + +        pthread_cleanup_pop(true); + +        msg = dht_msg__unpack(NULL, cmd->cbuf.len, cmd->cbuf.data); +        if (msg == NULL) +                log_warn("Failed to unpack DHT msg."); + +        freebuf(cmd->cbuf); +        free(cmd); + +        return msg; +} + +static void do_dht_msg(dht_msg_t * msg) +{ +        dht_msg_t * rsp = NULL; +        uint8_t *   id; +        uint64_t    addr; + +#ifdef DEBUG_PROTO_DHT +        dht_kv_debug_msg_rcv(msg); +#endif +        if (dht_kv_validate_msg(msg) == -EINVAL) { +                log_warn("%s Validation failed.", DHT_CODE(msg)); +                dht_msg__free_unpacked(msg, NULL); +                return; +        } + +        id =   msg->src->id.data; +        addr = msg->src->addr; + +        if (dht_kv_update_contacts(id, addr) < 0) +                log_warn(PEER_FMT " Failed to update contact from msg src.", +                         PEER_VAL(id, addr)); + +        pthread_cleanup_push(__cleanup_dht_msg, msg); + +        switch(msg->code) { +        case DHT_FIND_VALUE_REQ: +                rsp = do_dht_kv_find_value_req(msg->find); +                break; +        case DHT_FIND_NODE_REQ: +                rsp = do_dht_kv_find_node_req(msg->find); +                break; +        case DHT_STORE: +                do_dht_kv_store(msg->store); +                break; +        case DHT_FIND_NODE_RSP: +                do_dht_kv_find_node_rsp(msg->node); +                break; +        case DHT_FIND_VALUE_RSP: +                do_dht_kv_find_value_rsp(msg->node, msg->val); +                break; +        default: +                assert(false); /* already validated */ +        } + +        pthread_cleanup_pop(true); + +        if (rsp == NULL) +                return; + +        pthread_cleanup_push(__cleanup_dht_msg, rsp); + +        dht_send_msg(rsp, addr); + +        pthread_cleanup_pop(true); /* free rsp */ +} + +static void * dht_handle_packet(void * o) +{ +        (void) o; + +        while (true) { +                dht_msg_t * msg; + +                msg = dht_wait_for_dht_msg(); +                if (msg == NULL) +                        continue; + +                tpm_begin_work(dht.tpm); + +                do_dht_msg(msg); + +                tpm_end_work(dht.tpm); +        } + +        return (void *) 0; +} +#ifndef __DHT_TEST__ +static void dht_post_packet(void *               comp, +                            struct shm_du_buff * sdb) +{ +        struct cmd * cmd; + +        (void) comp; + +        cmd = malloc(sizeof(*cmd)); +        if (cmd == NULL) { +                log_err("Command malloc failed."); +                goto fail_cmd; +        } + +        cmd->cbuf.data = malloc(shm_du_buff_len(sdb)); +        if (cmd->cbuf.data == NULL) { +                log_err("Command buffer malloc failed."); +                goto fail_buf; +        } + +        cmd->cbuf.len = shm_du_buff_len(sdb); + +        memcpy(cmd->cbuf.data, shm_du_buff_head(sdb), cmd->cbuf.len); + +        ipcp_sdb_release(sdb); + +        pthread_mutex_lock(&dht.cmds.mtx); + +        list_add(&cmd->next, &dht.cmds.list); + +        pthread_cond_signal(&dht.cmds.cond); + +        pthread_mutex_unlock(&dht.cmds.mtx); + +        return; + + fail_buf: +        free(cmd); + fail_cmd: +        ipcp_sdb_release(sdb); +        return; +} +#endif + +int dht_reg(const uint8_t * key) +{ +        buffer_t val; + +        if (addr_to_buf(dht.addr, &val) < 0) { +                log_err("Failed to convert address to buffer."); +                goto fail_a2b; +        } + +        if (dht_kv_publish(key, val)) { +                log_err(KV_FMT " Failed to publish.", KV_VAL(key, val)); +                goto fail_publish; +        } + +        freebuf(val); + +        return 0; + fail_publish: +        freebuf(val); + fail_a2b: +        return -1; +} + +int dht_unreg(const uint8_t * key) +{ +        buffer_t val; + +        if (addr_to_buf(dht.addr, &val) < 0) { +                log_err("Failed to convert address to buffer."); +                goto fail_a2b; +        } + +        if (dht_kv_unpublish(key, val)) { +                log_err(KV_FMT " Failed to unpublish.", KV_VAL(key, val)); +                goto fail_unpublish; +        } + +        freebuf(val); + +        return 0; + fail_unpublish: +        freebuf(val); + fail_a2b: +        return -ENOMEM; +} + +uint64_t dht_query(const uint8_t * key) +{ +        buffer_t *       vals; +        ssize_t          n; +        uint64_t         addr; + +        n = dht_kv_retrieve(key, &vals); +        if (n < 0) { +                log_err(KEY_FMT " Failed to query db.", KEY_VAL(key)); +                goto fail_vals; +        } + +        if (n == 0) { +                assert(vals == NULL); + +                log_dbg(KEY_FMT " No local values.", KEY_VAL(key)); +                n = dht_kv_query_remote(key, &vals, NULL); +                if (n < 0) { +                        log_warn(KEY_FMT " Failed to query DHT.", KEY_VAL(key)); +                        goto fail_vals; +                } +                if (n == 0) { +                        log_dbg(KEY_FMT " No values.", KEY_VAL(key)); +                        goto no_vals; +                } +        } + +        if (buf_to_addr(vals[0], &addr) < 0) { +                log_err(VAL_FMT " Failed addr conversion.", VAL_VAL(vals[0])); +                goto fail_b2a; +        } + +        if (n > 1 && addr == INVALID_ADDR && buf_to_addr(vals[1], &addr) < 0) { +                log_err(VAL_FMT " Failed addr conversion.", VAL_VAL(vals[1])); +                goto fail_b2a; +        } + +        freebufs(vals, n); + +        return addr; + fail_b2a: +        freebufs(vals, n); +        return INVALID_ADDR; + no_vals: +        free(vals); + fail_vals: +        return INVALID_ADDR; +} + +static int emergency_peer(struct list_head * pl) +{ +        struct peer_entry * e; +        struct timespec     now; + +        assert(pl != NULL); +        assert(list_is_empty(pl)); + +        if (dht.peer == INVALID_ADDR) +                return -1; + +        clock_gettime(CLOCK_REALTIME_COARSE, &now); + +        e = malloc(sizeof(*e)); +        if (e == NULL) { +                log_err("Failed to malloc emergency peer entry."); +                goto fail_malloc; +        } + +        e->id = dht_dup_key(dht.id.data); +        if (e->id == NULL) { +                log_err("Failed to duplicate DHT ID for emergency peer."); +                goto fail_id; +        } + +        e->addr   = dht.peer; +        e->cookie = dht.magic; +        e->code   = DHT_FIND_NODE_REQ; +        e->t_sent = now.tv_sec; + +        list_add_tail(&e->next, pl); + +        return 0; + fail_id: +        free(e); + fail_malloc: +        return -ENOMEM; +} + +static int dht_kv_seed_bootstrap_peer(void) +{ +        struct list_head pl; + +        list_head_init(&pl); + +        if (dht.peer == INVALID_ADDR) { +                log_dbg("No-one to contact."); +                return 0; +        } + +        if (emergency_peer(&pl) < 0) { +                log_err("Could not create emergency peer."); +                goto fail_peer; +        } + +        log_dbg("Pinging emergency peer " ADDR_FMT32 ".", +                ADDR_VAL32(&dht.peer)); + +        if (dht_kv_query_contacts(dht.id.data, &pl) < 0) { +                log_warn("Failed to bootstrap peer."); +                goto fail_query; +        } + +        peer_list_destroy(&pl); + +        return 0; + fail_query: +        peer_list_destroy(&pl); + fail_peer: +        return -EAGAIN; +} + +static void dht_kv_check_contacts(void) +{ +        struct list_head cl; +        struct list_head pl; + +        list_head_init(&cl); + +        dht_kv_contact_list(dht.id.data, &cl, dht.k); + +        if (!list_is_empty(&cl)) +                goto success; + +        contact_list_destroy(&cl); + +        list_head_init(&pl); + +        if (dht.peer == INVALID_ADDR) { +                log_dbg("No-one to contact."); +                return; +        } + +        if (emergency_peer(&pl) < 0) { +                log_err("Could not create emergency peer."); +                goto fail_peer; +        } + +        log_dbg("No contacts found, using emergency peer " ADDR_FMT32 ".", +                ADDR_VAL32(&dht.peer)); + +        dht_kv_query_contacts(dht.id.data, &pl); + +        peer_list_destroy(&pl); + +        return; + success: +        contact_list_destroy(&cl); +        return; + fail_peer: +        return; +} + +static void dht_kv_remove_expired_reqs(void) +{ +        struct list_head * p; +        struct list_head * h; +        struct timespec    now; + +        clock_gettime(PTHREAD_COND_CLOCK, &now); + +        pthread_mutex_lock(&dht.reqs.mtx); + +        list_for_each_safe(p, h, &dht.reqs.list) { +                struct dht_req * e; +                e = list_entry(p, struct dht_req, next); +                if (IS_EXPIRED(e, &now)) { +                        log_dbg(KEY_FMT " Removing expired request.", +                                KEY_VAL(e->key)); +                        list_del(&e->next); +                        dht_req_destroy(e); +                        --dht.reqs.len; +                } +        } + +        pthread_mutex_unlock(&dht.reqs.mtx); +} + +static void value_list_destroy(struct list_head * vl) +{ +        struct list_head * p; +        struct list_head * h; + +        assert(vl != NULL); + +        list_for_each_safe(p, h, vl) { +                struct val_entry * v = list_entry(p, struct val_entry, next); +                list_del(&v->next); +                val_entry_destroy(v); +        } +} + +#define MUST_REPLICATE(v, now) ((now)->tv_sec > (v)->t_repl + dht.t_repl) +#define MUST_REPUBLISH(v, now) /* Close to expiry deadline */ \ +        (((v)->t_exp - (now)->tv_sec) < (DHT_N_REPUB * dht.t_repl)) +static void dht_entry_get_repl_lists(const struct dht_entry * e, +                                     struct list_head *       repl, +                                     struct list_head *       rebl, +                                     struct timespec *        now) +{ +        struct list_head * p; +        struct val_entry * n; + +        list_for_each(p, &e->vals.list) { +                struct val_entry * v = list_entry(p, struct val_entry, next); +                if (MUST_REPLICATE(v, now) && !IS_EXPIRED(v, now)) { +                        n = val_entry_create(v->val, v->t_exp); +                        if (n == NULL) +                                continue; + +                        list_add_tail(&n->next, repl); +                } +        } + +        list_for_each(p, &e->lvals.list) { +                struct val_entry * v = list_entry(p, struct val_entry, next); +                if (MUST_REPLICATE(v, now) && MUST_REPUBLISH(v, now)) { +                        /* Add expire time here, to allow creating val_entry */ +                        n = val_entry_create(v->val, now->tv_sec + dht.t_expire); +                        if (n == NULL) +                                continue; + +                        list_add_tail(&n->next, rebl); +                } +        } +} + +static int dht_kv_next_values(uint8_t *          key, +                              struct list_head * repl, +                              struct list_head * rebl) +{ +        struct timespec    now; +        struct list_head * p; +        struct list_head * h; +        struct dht_entry * e = NULL; + +        assert(key != NULL); +        assert(repl != NULL); +        assert(rebl != NULL); + +        clock_gettime(CLOCK_REALTIME_COARSE, &now); + +        assert(list_is_empty(repl)); +        assert(list_is_empty(rebl)); + +        pthread_rwlock_rdlock(&dht.db.lock); + +        if (dht.db.kv.len == 0) +                goto no_entries; + +        list_for_each_safe(p, h, &dht.db.kv.list) { +                e = list_entry(p, struct dht_entry, next); +                if (IS_CLOSER(e->key, key)) +                        continue;  /* Already processed */ +        } + +        if (e != NULL) { +                memcpy(key, e->key, dht.id.len); +                dht_entry_get_repl_lists(e, repl, rebl, &now); +        } + no_entries: +        pthread_rwlock_unlock(&dht.db.lock); + +        return list_is_empty(repl) && list_is_empty(rebl) ? -ENOENT : 0; +} + +static void dht_kv_replicate_value(const uint8_t *         key, +                                   struct val_entry *      v, +                                   const struct timespec * now) +{ +        assert(MUST_REPLICATE(v, now)); + +        (void) now; + +        if (dht_kv_store_remote(key, v->val, v->t_exp) == 0) { +                log_dbg(KV_FMT " Replicated.", KV_VAL(key, v->val)); +                return; +        } + +        log_dbg(KV_FMT " Replication failed.", KV_VAL(key, v->val)); + +        list_del(&v->next); +        val_entry_destroy(v); +} + +static void dht_kv_republish_value(const uint8_t *  key, +                            struct val_entry *      v, +                            const struct timespec * now) +{ +        assert(MUST_REPLICATE(v, now)); + +        if (MUST_REPUBLISH(v, now)) +                assert(v->t_exp >= now->tv_sec + dht.t_expire); + +        if (dht_kv_store_remote(key, v->val, v->t_exp) == 0) { +                log_dbg(KV_FMT " Republished.", KV_VAL(key, v->val)); +                return; +        } + +        if (MUST_REPUBLISH(v, now)) +                log_warn(KV_FMT " Republish failed.", KV_VAL(key, v->val)); +        else +                log_dbg(KV_FMT " Replication failed.", KV_VAL(key, v->val)); + +        list_del(&v->next); +        val_entry_destroy(v); +} + +static void dht_kv_update_replication_times(const uint8_t *         key, +                                            struct list_head *      repl, +                                            struct list_head *      rebl, +                                            const struct timespec * now) +{ +        struct dht_entry * e; +        struct list_head * p; +        struct list_head * h; +        struct val_entry * v; + +        assert(key != NULL); +        assert(repl != NULL); +        assert(rebl != NULL); +        assert(now != NULL); + +        pthread_rwlock_wrlock(&dht.db.lock); + +        e = __dht_kv_find_entry(key); +        if (e == NULL) { +                pthread_rwlock_unlock(&dht.db.lock); +                return; +        } + +        list_for_each_safe(p, h, repl) { +                struct val_entry * x; +                v = list_entry(p, struct val_entry, next); +                x = dht_entry_get_val(e, v->val); +                if (x == NULL) { +                        log_err(KV_FMT " Not in vals.", KV_VAL(key, v->val)); +                        continue; +                } + +                x->t_repl = now->tv_sec; + +                list_del(&v->next); +                val_entry_destroy(v); +        } + +        list_for_each_safe(p, h, rebl) { +                struct val_entry * x; +                v = list_entry(p, struct val_entry, next); +                x = dht_entry_get_lval(e, v->val); +                if (x == NULL) { +                        log_err(KV_FMT " Not in lvals.", KV_VAL(key, v->val)); +                        continue; +                } + +                x->t_repl = now->tv_sec; +                if (v->t_exp > x->t_exp) { +                        x->t_exp = v->t_exp; /* update expiration time */ +                } + +                list_del(&v->next); +                val_entry_destroy(v); +        } + +        pthread_rwlock_unlock(&dht.db.lock); +} + +static void __cleanup_value_list(void * o) +{ +        return value_list_destroy((struct list_head *) o); +} + +static void dht_kv_replicate_values(const uint8_t *    key, +                                    struct list_head * repl, +                                    struct list_head * rebl) +{ +        struct timespec    now; +        struct list_head * p; +        struct list_head * h; + +        clock_gettime(CLOCK_REALTIME_COARSE, &now); + +        pthread_cleanup_push(__cleanup_value_list, repl); +        pthread_cleanup_push(__cleanup_value_list, rebl); + +        list_for_each_safe(p, h, repl) { +                struct val_entry * v; +                v = list_entry(p, struct val_entry, next); +                dht_kv_replicate_value(key, v, &now); +        } + +        list_for_each_safe(p, h, rebl) { +                struct val_entry * v; +                v = list_entry(p, struct val_entry, next); +                dht_kv_republish_value(key, v, &now); +        } + +        pthread_cleanup_pop(false); +        pthread_cleanup_pop(false); + +        /* removes non-replicated items from the list */ +        dht_kv_update_replication_times(key, repl, rebl, &now); + +        if (list_is_empty(repl) && list_is_empty(rebl)) +                return; + +        log_warn(KEY_FMT " Failed to update replication times.", KEY_VAL(key)); +} + +static void dht_kv_replicate(void) +{ +        struct list_head repl; /* list of values to replicate       */ +        struct list_head rebl; /* list of local values to republish */ +        uint8_t *        key; + +        key = dht_dup_key(dht.id.data); /* dist == 0 */ +        if (key == NULL) { +                log_err("Replicate: Failed to duplicate DHT ID."); +                return; +        } + +        list_head_init(&repl); +        list_head_init(&rebl); + +        pthread_cleanup_push(free, key); + +        while (dht_kv_next_values(key, &repl, &rebl) == 0) { +                dht_kv_replicate_values(key, &repl, &rebl); +                if (!list_is_empty(&repl)) { +                        log_warn(KEY_FMT " Replication items left.", +                                 KEY_VAL(key)); +                        value_list_destroy(&repl); +                } + +                if (!list_is_empty(&rebl)) { +                        log_warn(KEY_FMT " Republish items left.", +                                 KEY_VAL(key)); +                        value_list_destroy(&rebl); +                } +        } + +        pthread_cleanup_pop(true); +} + +static void dht_kv_refresh_contacts(void) +{ +        struct list_head * p; +        struct list_head * h; +        struct list_head   rl; /* refresh list */ +        struct timespec    now; + +        list_head_init(&rl); + +        clock_gettime(CLOCK_REALTIME_COARSE, &now); + +        pthread_rwlock_rdlock(&dht.db.lock); + +        __dht_kv_bucket_refresh_list(dht.db.contacts.root, now.tv_sec, &rl); + +        pthread_rwlock_unlock(&dht.db.lock); + +        list_for_each_safe(p, h, &rl) { +                struct contact * c; +                c = list_entry(p, struct contact, next); +                log_dbg(PEER_FMT " Refreshing contact.", +                        PEER_VAL(c->id, c->addr)); +                dht_kv_query_contacts(c->id, NULL); +                list_del(&c->next); +                contact_destroy(c); +        } + +        assert(list_is_empty(&rl)); +} + +static void (*tasks[])(void) = { +        dht_kv_check_contacts, +        dht_kv_remove_expired_entries, +        dht_kv_remove_expired_reqs, +        dht_kv_replicate, +        dht_kv_refresh_contacts, +        NULL +}; + +static void * work(void * o) +{ +        struct timespec now = TIMESPEC_INIT_MS(1); +        time_t          intv; +        size_t          n; /* number of tasks */ + +        n = sizeof(tasks) / sizeof(tasks[0]) - 1; /* last is NULL */ + +        (void) o; + +        while (dht_kv_seed_bootstrap_peer() == -EAGAIN) { +                ts_add(&now, &now, &now); /* exponential backoff */ +                if (now.tv_sec > 1)       /* cap at 1 second     */ +                        now.tv_sec = 1; +                nanosleep(&now, NULL); +        } + +        intv = gcd(dht.t_expire, (dht.t_expire - DHT_N_REPUB * dht.t_repl)); +        intv = gcd(intv, gcd(dht.t_repl, dht.t_refresh)) / 2; +        intv = MAX(1, intv / n); + +        log_dbg("DHT worker starting %ld seconds interval.", intv * n); + +        while (true) { +                int i = 0; +                while (tasks[i] != NULL) { +                        tasks[i++](); +                        sleep(intv); +                } +        } + +        return (void *) 0; +} + +int dht_start(void) +{ +        dht.state = DHT_RUNNING; + +        if (tpm_start(dht.tpm)) +                goto fail_tpm_start; + +#ifndef __DHT_TEST__ +        if (pthread_create(&dht.worker, NULL, work, NULL)) { +                log_err("Failed to create DHT worker thread."); +                goto fail_worker; +        } + +        dht.eid = dt_reg_comp(&dht, &dht_post_packet, DHT); +        if ((int) dht.eid < 0) { +                log_err("Failed to register DHT component."); +                goto fail_reg; +        } +#else +        (void) work; +#endif +        return 0; +#ifndef __DHT_TEST__ + fail_reg: +        pthread_cancel(dht.worker); +        pthread_join(dht.worker, NULL); + fail_worker: +        tpm_stop(dht.tpm); +#endif + fail_tpm_start: +        dht.state = DHT_INIT; +        return -1; +} + +void dht_stop(void) +{ +        assert(dht.state == DHT_RUNNING); + +#ifndef __DHT_TEST__ +        dt_unreg_comp(dht.eid); + +        pthread_cancel(dht.worker); +        pthread_join(dht.worker, NULL); +#endif +        tpm_stop(dht.tpm); + +        dht.state = DHT_INIT; +} + +int dht_init(struct dir_dht_config * conf) +{ +        struct timespec now; +        pthread_condattr_t cattr; + +        assert(conf != NULL); + +        clock_gettime(CLOCK_REALTIME_COARSE, &now); + +#ifndef __DHT_TEST__ +        dht.id.len    = ipcp_dir_hash_len(); +        dht.addr      = addr_auth_address(); +#else +        dht.id.len    = DHT_TEST_KEY_LEN; +        dht.addr      = DHT_TEST_ADDR; +#endif +        dht.t0        = now.tv_sec; +        dht.alpha     = conf->params.alpha; +        dht.k         = conf->params.k; +        dht.t_expire  = conf->params.t_expire; +        dht.t_refresh = conf->params.t_refresh; +        dht.t_repl    = conf->params.t_replicate; +        dht.peer      = conf->peer; + +        dht.magic = generate_cookie(); + +        /* Send my address on enrollment */ +        conf->peer    = dht.addr; + +        dht.id.data = generate_id(); +        if (dht.id.data == NULL) { +                log_err("Failed to create DHT ID."); +                goto fail_id; +        } + +        list_head_init(&dht.cmds.list); + +        if (pthread_mutex_init(&dht.cmds.mtx, NULL)) { +                log_err("Failed to initialize command mutex."); +                goto fail_cmds_mutex; +        } + +        if (pthread_cond_init(&dht.cmds.cond, NULL)) { +                log_err("Failed to initialize command condvar."); +                goto fail_cmds_cond; +        } + +        list_head_init(&dht.reqs.list); +        dht.reqs.len = 0; + +        if (pthread_mutex_init(&dht.reqs.mtx, NULL)) { +                log_err("Failed to initialize request mutex."); +                goto fail_reqs_mutex; +        } + +        if (pthread_condattr_init(&cattr)) { +                log_err("Failed to initialize request condvar attributes."); +                goto fail_cattr; +        } +#ifndef __APPLE__ +        if (pthread_condattr_setclock(&cattr, PTHREAD_COND_CLOCK)) { +                log_err("Failed to set request condvar clock."); +                goto fail_cattr; +        } +#endif +        if (pthread_cond_init(&dht.reqs.cond, &cattr)) { +                log_err("Failed to initialize request condvar."); +                goto fail_reqs_cond; +        } + +        list_head_init(&dht.db.kv.list); +        dht.db.kv.len   = 0; +        dht.db.kv.vals  = 0; +        dht.db.kv.lvals = 0; + +        if (pthread_rwlock_init(&dht.db.lock, NULL)) { +                log_err("Failed to initialize store rwlock."); +                goto fail_rwlock; +        } + +        dht.db.contacts.root = bucket_create(); +        if (dht.db.contacts.root == NULL) { +                log_err("Failed to create DHT buckets."); +                goto fail_buckets; +        } + +        if (rib_reg(DHT, &r_ops) < 0) { +                log_err("Failed to register DHT RIB operations."); +                goto fail_rib_reg; +        } + +        dht.tpm = tpm_create(2, 1, dht_handle_packet, NULL); +        if (dht.tpm == NULL) { +                log_err("Failed to create TPM for DHT."); +                goto fail_tpm_create; +        } + +        if (dht_kv_update_contacts(dht.id.data, dht.addr) < 0) +                log_warn("Failed to update contacts with DHT ID."); + +        pthread_condattr_destroy(&cattr); +#ifndef __DHT_TEST__ +        log_info("DHT initialized."); +        log_dbg("  ID: " HASH_FMT64 " [%zu bytes].", +                HASH_VAL64(dht.id.data), dht.id.len); +        log_dbg("  address: " ADDR_FMT32 ".", ADDR_VAL32(&dht.addr)); +        log_dbg("  peer: " ADDR_FMT32 ".", ADDR_VAL32(&dht.peer)); +        log_dbg("  magic cookie: " HASH_FMT64 ".", HASH_VAL64(&dht.magic)); +        log_info("  parameters: alpha=%u, k=%zu, t_expire=%ld, " +                "t_refresh=%ld, t_replicate=%ld.", +                dht.alpha, dht.k, dht.t_expire, dht.t_refresh, dht.t_repl); +#endif +        dht.state = DHT_INIT; + +        return 0; + + fail_tpm_create: +        rib_unreg(DHT); + fail_rib_reg: +        bucket_destroy(dht.db.contacts.root); + fail_buckets: +        pthread_rwlock_destroy(&dht.db.lock); + fail_rwlock: +        pthread_cond_destroy(&dht.reqs.cond); + fail_reqs_cond: +        pthread_condattr_destroy(&cattr); + fail_cattr: +        pthread_mutex_destroy(&dht.reqs.mtx); + fail_reqs_mutex: +        pthread_cond_destroy(&dht.cmds.cond); + fail_cmds_cond: +        pthread_mutex_destroy(&dht.cmds.mtx); + fail_cmds_mutex: +        freebuf(dht.id); + fail_id: +        return -1; +} + +void dht_fini(void) +{ +        struct list_head * p; +        struct list_head * h; + +        rib_unreg(DHT); + +        tpm_destroy(dht.tpm); + +        pthread_mutex_lock(&dht.cmds.mtx); + +        list_for_each_safe(p, h, &dht.cmds.list) { +                struct cmd * c = list_entry(p, struct cmd, next); +                list_del(&c->next); +                freebuf(c->cbuf); +                free(c); +        } + +        pthread_mutex_unlock(&dht.cmds.mtx); + +        pthread_cond_destroy(&dht.cmds.cond); +        pthread_mutex_destroy(&dht.cmds.mtx); + +        pthread_mutex_lock(&dht.reqs.mtx); + +        list_for_each_safe(p, h, &dht.reqs.list) { +                struct dht_req * r = list_entry(p, struct dht_req, next); +                list_del(&r->next); +                dht_req_destroy(r); +                dht.reqs.len--; +        } + +        pthread_mutex_unlock(&dht.reqs.mtx); + +        pthread_cond_destroy(&dht.reqs.cond); +        pthread_mutex_destroy(&dht.reqs.mtx); + +        pthread_rwlock_wrlock(&dht.db.lock); + +        list_for_each_safe(p, h, &dht.db.kv.list) { +                struct dht_entry * e = list_entry(p, struct dht_entry, next); +                list_del(&e->next); +                dht_entry_destroy(e); +                dht.db.kv.len--; +        } + +        if (dht.db.contacts.root != NULL) +                bucket_destroy(dht.db.contacts.root); + +        pthread_rwlock_unlock(&dht.db.lock); + +        pthread_rwlock_destroy(&dht.db.lock); + +        assert(dht.db.kv.len == 0); +        assert(dht.db.kv.vals == 0); +        assert(dht.db.kv.lvals == 0); +        assert(dht.reqs.len == 0); + +        freebuf(dht.id); +} diff --git a/src/ipcpd/unicast/dht.h b/src/ipcpd/unicast/dir/dht.h index df394714..852a5130 100644 --- a/src/ipcpd/unicast/dht.h +++ b/src/ipcpd/unicast/dir/dht.h @@ -1,5 +1,5 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   *   * Distributed Hash Table based on Kademlia   * @@ -20,33 +20,30 @@   * Foundation, Inc., http://www.fsf.org/about/contact/.   */ -#ifndef OUROBOROS_IPCPD_UNICAST_DHT_H -#define OUROBOROS_IPCPD_UNICAST_DHT_H +#ifndef OUROBOROS_IPCPD_UNICAST_DIR_DHT_H +#define OUROBOROS_IPCPD_UNICAST_DIR_DHT_H  #include <ouroboros/ipcp-dev.h> +#include "ops.h" +  #include <stdint.h>  #include <sys/types.h> -struct dht; +int      dht_init(struct dir_dht_config * conf); -struct dht * dht_create(uint64_t addr); +void     dht_fini(void); -int          dht_bootstrap(struct dht * dht, -                           size_t       b, -                           time_t       t_expire); +int      dht_start(void); -void         dht_destroy(struct dht * dht); +void     dht_stop(void); -int          dht_reg(struct dht *    dht, -                     const uint8_t * key); +int      dht_reg(const uint8_t * key); -int          dht_unreg(struct dht *    dht, -                       const uint8_t * key); +int      dht_unreg(const uint8_t * key); -uint64_t     dht_query(struct dht *    dht, -                       const uint8_t * key); +uint64_t dht_query(const uint8_t * key); -int          dht_wait_running(struct dht * dht); +extern struct dir_ops dht_dir_ops; -#endif /* OUROBOROS_IPCPD_UNICAST_DHT_H */ +#endif /* OUROBOROS_IPCPD_UNICAST_DIR_DHT_H */ diff --git a/src/ipcpd/unicast/dir/dht.proto b/src/ipcpd/unicast/dir/dht.proto new file mode 100644 index 00000000..ea74805f --- /dev/null +++ b/src/ipcpd/unicast/dir/dht.proto @@ -0,0 +1,58 @@ +/* + * Ouroboros - Copyright (C) 2016 - 2024 + * + * DHT protocol, based on Kademlia + * + *    Dimitri Staessens <dimitri@ouroboros.rocks> + *    Sander Vrijders   <sander@ouroboros.rocks> + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public License + * version 2.1 as published by the Free Software Foundation. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., http://www.fsf.org/about/contact/. + */ + +syntax = "proto2"; + +message dht_contact_msg { +        required bytes  id   = 1; +        required uint64 addr = 2; +} + +message dht_find_req_msg { +        required uint64 cookie = 1; +        required bytes key     = 2; +} + +message dht_find_node_rsp_msg { +        required uint64          cookie   = 1; +        required bytes           key      = 2; +        repeated dht_contact_msg contacts = 3; +} + +message dht_find_value_rsp_msg { +        repeated bytes values = 1; +} + +message dht_store_msg { +        required bytes  key = 1; +        required bytes  val = 2; +        required uint32 exp = 3; +} + +message dht_msg { +        required uint32                 code  = 1; +        required dht_contact_msg        src   = 2; +        optional dht_store_msg          store = 3; +        optional dht_find_req_msg       find  = 4; +        optional dht_find_node_rsp_msg  node  = 5; +        optional dht_find_value_rsp_msg val   = 6; +} diff --git a/src/ipcpd/unicast/dir/ops.h b/src/ipcpd/unicast/dir/ops.h new file mode 100644 index 00000000..8c6e5eb5 --- /dev/null +++ b/src/ipcpd/unicast/dir/ops.h @@ -0,0 +1,42 @@ +/* + * Ouroboros - Copyright (C) 2016 - 2024 + * + * Directory policy ops + * + *    Dimitri Staessens <dimitri@ouroboros.rocks> + *    Sander Vrijders   <sander@ouroboros.rocks> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., http://www.fsf.org/about/contact/. + */ + +#ifndef OUROBOROS_IPCPD_UNICAST_DIR_OPS_H +#define OUROBOROS_IPCPD_UNICAST_DIR_OPS_H + +struct dir_ops { +        int      (* init)(void * config); + +        void     (* fini)(void); + +        int      (* start)(void); + +        void     (* stop)(void); + +        int      (* reg)(const uint8_t * hash); + +        int      (* unreg)(const uint8_t * hash); + +        uint64_t (* query)(const uint8_t * hash); +}; + +#endif /* OUROBOROS_IPCPD_UNICAST_DIR_OPS_H */ diff --git a/src/ipcpd/unicast/dir/pol.h b/src/ipcpd/unicast/dir/pol.h new file mode 100644 index 00000000..eae4b2e7 --- /dev/null +++ b/src/ipcpd/unicast/dir/pol.h @@ -0,0 +1,23 @@ +/* + * Ouroboros - Copyright (C) 2016 - 2024 + * + * Directory policies + * + *    Dimitri Staessens <dimitri@ouroboros.rocks> + *    Sander Vrijders   <sander@ouroboros.rocks> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., http://www.fsf.org/about/contact/. + */ + +#include "dht.h" diff --git a/src/ipcpd/unicast/dir/tests/CMakeLists.txt b/src/ipcpd/unicast/dir/tests/CMakeLists.txt new file mode 100644 index 00000000..f62ed993 --- /dev/null +++ b/src/ipcpd/unicast/dir/tests/CMakeLists.txt @@ -0,0 +1,40 @@ +get_filename_component(CURRENT_SOURCE_PARENT_DIR +  ${CMAKE_CURRENT_SOURCE_DIR} DIRECTORY) +get_filename_component(CURRENT_BINARY_PARENT_DIR +  ${CMAKE_CURRENT_BINARY_DIR} DIRECTORY) + +include_directories(${CMAKE_CURRENT_SOURCE_DIR}) +include_directories(${CMAKE_CURRENT_BINARY_DIR}) + +include_directories(${CURRENT_SOURCE_PARENT_DIR}) +include_directories(${CURRENT_BINARY_PARENT_DIR}) + +include_directories(${CMAKE_SOURCE_DIR}/include) +include_directories(${CMAKE_BINARY_DIR}/include) + +get_filename_component(PARENT_PATH ${CMAKE_CURRENT_SOURCE_DIR} DIRECTORY) +get_filename_component(PARENT_DIR ${PARENT_PATH} NAME) + +create_test_sourcelist(${PARENT_DIR}_tests test_suite.c +  # Add new tests here +  dht_test.c +  ) + +protobuf_generate_c(DHT_PROTO_SRCS KAD_PROTO_HDRS ../dht.proto) +add_executable(${PARENT_DIR}_test EXCLUDE_FROM_ALL ${${PARENT_DIR}_tests} +  ${DHT_PROTO_SRCS}) +target_link_libraries(${PARENT_DIR}_test ouroboros-common) + +add_dependencies(check ${PARENT_DIR}_test) + +set(tests_to_run ${${PARENT_DIR}_tests}) +if(CMAKE_VERSION VERSION_LESS "3.29.0") +  remove(tests_to_run test_suite.c) +else () +  list(POP_FRONT tests_to_run) +endif() + +foreach (test ${tests_to_run}) +  get_filename_component(test_name ${test} NAME_WE) +  add_test(${test_name} ${C_TEST_PATH}/${PARENT_DIR}_test ${test_name}) +endforeach (test) diff --git a/src/ipcpd/unicast/dir/tests/dht_test.c b/src/ipcpd/unicast/dir/tests/dht_test.c new file mode 100644 index 00000000..cb6b0f9f --- /dev/null +++ b/src/ipcpd/unicast/dir/tests/dht_test.c @@ -0,0 +1,1925 @@ +/* + * Ouroboros - Copyright (C) 2016 - 2024 + * + * Unit tests of the DHT + * + *    Dimitri Staessens <dimitri@ouroboros.rocks> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., http://www.fsf.org/about/contact/. + */ + +#define __DHT_TEST__ + +#if defined(__linux__) || defined(__CYGWIN__) +#define _DEFAULT_SOURCE +#else +#define _POSIX_C_SOURCE 200112L +#endif + +#include <ouroboros/test.h> +#include <ouroboros/list.h> +#include <ouroboros/utils.h> + +#include "dht.pb-c.h" + +#include <assert.h> +#include <inttypes.h> +#include <time.h> +#include <stdlib.h> +#include <stdio.h> + +#define DHT_MAX_RAND_SIZE 64 +#define DHT_TEST_KEY_LEN  32 +#define DHT_TEST_ADDR     0x1234567890abcdefULL + +/* forward declare for use in the dht code */ +/* Packet sink for DHT tests */ +struct { +        bool   enabled; + +        struct list_head list; +        size_t len; +} sink; + +struct message { +        struct   list_head next; +        void *   msg; +        uint64_t dst; +}; + +static int sink_send_msg(buffer_t * pkt, +                         uint64_t  addr) +{ +        struct message *   m; + +        assert(pkt  != NULL); +        assert(addr != 0); + +        assert(!list_is_empty(&sink.list) || sink.len == 0); + +        if (!sink.enabled) +                goto finish; + +        m = malloc(sizeof(*m)); +        if (m == NULL) { +                printf("Failed to malloc message."); +                goto fail_malloc; +        } + +        m->msg = dht_msg__unpack(NULL, pkt->len, pkt->data); +        if (m->msg == NULL) +                goto fail_unpack; + +        m->dst = addr; + +        list_add_tail(&m->next, &sink.list); + +        ++sink.len; + finish: +        freebuf(*pkt); + +        return 0; + fail_unpack: +        free(m); + fail_malloc: +        freebuf(*pkt); +        return -1; +} + +#include "dht.c" + +/* Test helpers */ + +static void sink_init(void) +{ +        list_head_init(&sink.list); +        sink.len = 0; +        sink.enabled = true; +} + +static void sink_clear(void) +{ +        struct list_head * p; +        struct list_head * h; + +        list_for_each_safe(p, h, &sink.list) { +                struct message * m = list_entry(p, struct message, next); +                list_del(&m->next); +                dht_msg__free_unpacked((dht_msg_t *) m->msg, NULL); +                free(m); +                --sink.len; +        } + +        assert(list_is_empty(&sink.list)); +} + +static void sink_fini(void) +{ +        sink_clear(); + +        assert(list_is_empty(&sink.list) || sink.len != 0); +} + +static dht_msg_t * sink_read(void) +{ +        struct message * m; +        dht_msg_t *      msg; + +        assert(!list_is_empty(&sink.list) || sink.len == 0); + +        if (list_is_empty(&sink.list)) +                return NULL; + +        m = list_first_entry(&sink.list, struct message, next); + +        --sink.len; + +        list_del(&m->next); + +        msg = m->msg; + +        free(m); + +        return (dht_msg_t *) msg; +} + +static const buffer_t test_val = { +        .data = (uint8_t *) "test_value", +        .len = 10 +}; + +static const buffer_t test_val2 = { +        .data = (uint8_t *) "test_value_2", +        .len = 12 +}; + +static int random_value_len(buffer_t * b) +{ +        assert(b != NULL); +        assert(b->len > 0 && b->len <= DHT_MAX_RAND_SIZE); + +        b->data = malloc(b->len); +        if (b->data == NULL) +                goto fail_malloc; + +        random_buffer(b->data, b->len); + +        return 0; + + fail_malloc: +        return -ENOMEM; +} + +static int random_value(buffer_t * b) +{ +        assert(b != NULL); + +        b->len = rand() % DHT_MAX_RAND_SIZE + 1; + +        return random_value_len(b); +} + +static int fill_dht_with_contacts(size_t n) +{ +        size_t    i; +        uint8_t * id; + +        for (i = 0; i < n; i++) { +                uint64_t addr = generate_cookie(); +                id = generate_id(); +                if (id == NULL) +                        goto fail_id; + +                if (dht_kv_update_contacts(id, addr) < 0) +                        goto fail_update; +                free(id); +        } + +        return 0; + + fail_update: +        free(id); + fail_id: +        return -1; +} + +static int fill_store_with_random_values(const uint8_t * key, +                                         size_t          len, +                                         size_t          n_values) +{ +        buffer_t        val; +        struct timespec now; +        size_t          i; +        uint8_t *       _key; + +        clock_gettime(CLOCK_REALTIME_COARSE, &now); + +        for (i = 0; i < n_values; ++i) { +                if (key != NULL) +                        _key = (uint8_t *) key; +                else { +                        _key = generate_id(); +                        if (_key == NULL) +                                goto fail_key; +                } + +                if (len == 0) +                        val.len = rand() % DHT_MAX_RAND_SIZE + 1; +                else +                        val.len = len; + +                if (random_value_len(&val) < 0) +                        goto fail_value; + +                if (dht_kv_store(_key, val, now.tv_sec + 10) < 0) +                        goto fail_store; + +                freebuf(val); +                if (key == NULL) +                        free(_key); +        } + +        return 0; + + fail_store: +        freebuf(val); + fail_value: +        free(_key); + fail_key: +        return -1; +} + +static int random_contact_list(dht_contact_msg_t *** contacts, +                               size_t                max) +{ +        size_t i; + +        assert(contacts != NULL); +        assert(max > 0); + +        *contacts = malloc(max * sizeof(**contacts)); +        if (*contacts == NULL) +                goto fail_malloc; + +        for (i = 0; i < max; i++) { +                (*contacts)[i] = malloc(sizeof(*(*contacts)[i])); +                if ((*contacts)[i] == NULL) +                        goto fail_contacts; + +                dht_contact_msg__init((*contacts)[i]); + +                (*contacts)[i]->id.data = generate_id(); +                if ((*contacts)[i]->id.data == NULL) +                        goto fail_contact; + +                (*contacts)[i]->id.len = dht.id.len; +                (*contacts)[i]->addr = generate_cookie(); +        } + +        return 0; + + fail_contact: +        dht_contact_msg__free_unpacked((*contacts)[i], NULL); + fail_contacts: +        while (i-- > 0) +                free((*contacts)[i]); +        free(*contacts); + fail_malloc: +        return -ENOMEM; +} + +static void clear_contacts(dht_contact_msg_t ** contacts, +                           size_t               len) +{ +        size_t i; + +        assert(contacts != NULL); +        if (*contacts == NULL) +                return; + +        for (i = 0; i < len; ++i) +                dht_contact_msg__free_unpacked((contacts)[i], NULL); + +        free(*contacts); +        *contacts = NULL; +} + +/* Start of actual tests */ +static struct dir_dht_config test_dht_config = { +        .params = { +                .alpha       = 3, +                .k           = 8, +                .t_expire    = 86400, +                .t_refresh   = 900, +                .t_replicate = 900 +        } +}; + +static int test_dht_init_fini(void) +{ +        TEST_START(); + +        if (dht_init(&test_dht_config) < 0) { +                printf("Failed to create dht.\n"); +                goto fail_init; +        } + +        dht_fini(); + +        TEST_SUCCESS(); + +        return TEST_RC_SUCCESS; + fail_init: +        TEST_FAIL(); +        return TEST_RC_FAIL; +} + +static int test_dht_start_stop(void) +{ +        TEST_START(); + +        if (dht_init(&test_dht_config) < 0) { +                printf("Failed to create dht.\n"); +                goto fail_init; +        } + +        if (dht_start() < 0) { +                printf("Failed to start dht.\n"); +                goto fail_start; +        } + +        dht_stop(); + +        dht_fini(); + +        TEST_SUCCESS(); + +        return TEST_RC_SUCCESS; + + fail_start: +        dht_fini(); + fail_init: +        TEST_FAIL(); +        return TEST_RC_FAIL; +} + +static int test_val_entry_create_destroy(void) +{ +        struct val_entry * e; +        struct timespec    now; + +        TEST_START(); + +        clock_gettime(CLOCK_REALTIME_COARSE, &now); + +        if (dht_init(&test_dht_config) < 0) { +                printf("Failed to create dht.\n"); +                goto fail_init; +        } + +        e = val_entry_create(test_val, now.tv_sec + 10); +        if (e == NULL) { +                printf("Failed to create val entry.\n"); +                goto fail_entry; +        } + +        val_entry_destroy(e); + +        dht_fini(); + +        TEST_SUCCESS(); + +        return TEST_RC_SUCCESS; + + fail_entry: +        dht_fini(); + fail_init: +        TEST_FAIL(); +        return TEST_RC_FAIL; +} + +static int test_dht_entry_create_destroy(void) +{ +        struct dht_entry * e; + +        TEST_START(); + +        if (dht_init(&test_dht_config) < 0) { +                printf("Failed to create dht.\n"); +                goto fail_init; +        } + +        e = dht_entry_create(dht.id.data); +        if (e == NULL) { +                printf("Failed to create dht entry.\n"); +                goto fail_entry; +        } + +        dht_entry_destroy(e); + +        dht_fini(); + +        TEST_SUCCESS(); + +        return TEST_RC_SUCCESS; + + fail_entry: +        dht_fini(); + fail_init: +        TEST_FAIL(); +        return TEST_RC_FAIL; +} + +static int test_dht_entry_update_get_val(void) +{ +        struct dht_entry * e; +        struct val_entry * v; +        struct timespec    now; + +        TEST_START(); + +        clock_gettime(CLOCK_REALTIME_COARSE, &now); + +        if (dht_init(&test_dht_config) < 0) { +                printf("Failed to create dht.\n"); +                goto fail_init; +        } + +        e = dht_entry_create(dht.id.data); +        if (e == NULL) { +                printf("Failed to create dht entry.\n"); +                goto fail_entry; +        } + +        if (dht_entry_get_val(e, test_val) != NULL) { +                printf("Found value in empty dht entry.\n"); +                goto fail_get; +        } + +        if (dht_entry_update_val(e, test_val, now.tv_sec + 10) < 0) { +                printf("Failed to update dht entry value.\n"); +                goto fail_get; +        } + +        if (dht_entry_get_val(e, test_val2) != NULL) { +                printf("Found value in dht entry with different key.\n"); +                goto fail_get; +        } + +        v = dht_entry_get_val(e, test_val); +        if (v == NULL) { +                printf("Failed to get value from dht entry.\n"); +                goto fail_get; +        } + +        if (v->val.len != test_val.len) { +                printf("Length in dht entry does not match expected.\n"); +                goto fail_get; +        } + +        if(memcmp(v->val.data, test_val.data, test_val.len) != 0) { +                printf("Data in dht entry does not match expected.\n"); +                goto fail_get; +        } + +        if (dht_entry_update_val(e, test_val, now.tv_sec + 15) < 0) { +                printf("Failed to update exsting dht entry value.\n"); +                goto fail_get; +        } + +        if (v->t_exp != now.tv_sec + 15) { +                printf("Expiration time in dht entry value not updated.\n"); +                goto fail_get; +        } + +        if (dht_entry_update_val(e, test_val, now.tv_sec + 5) < 0) { +                printf("Failed to update existing dht entry value (5).\n"); +                goto fail_get; +        } + +        if (v->t_exp != now.tv_sec + 15) { +                printf("Expiration time in dht entry shortened.\n"); +                goto fail_get; +        } + +        if (dht_entry_get_val(e, test_val) != v) { +                printf("Wrong value in dht entry found after update.\n"); +                goto fail_get; +        } + +        dht_entry_destroy(e); + +        dht_fini(); + +        TEST_SUCCESS(); + +        return TEST_RC_SUCCESS; + + fail_get: +        dht_entry_destroy(e); + fail_entry: +        dht_fini(); + fail_init: +        TEST_FAIL(); +        return TEST_RC_FAIL; +} + +static int test_dht_entry_update_get_lval(void) +{ +        struct dht_entry * e; +        struct val_entry * v; +        struct timespec    now; + +        TEST_START(); + +        clock_gettime(CLOCK_REALTIME_COARSE, &now); + +        if (dht_init(&test_dht_config) < 0) { +                printf("Failed to create dht.\n"); +                goto fail_init; +        } + +        e = dht_entry_create(dht.id.data); +        if (e == NULL) { +                printf("Failed to create dht entry.\n"); +                goto fail_entry; +        } + +        if (dht_entry_get_lval(e, test_val) != NULL) { +                printf("Found value in empty dht entry.\n"); +                goto fail_get; +        } + +        if (dht_entry_update_lval(e, test_val) < 0) { +                printf("Failed to update dht entry value.\n"); +                goto fail_get; +        } + +        v = dht_entry_get_lval(e, test_val); +        if (v== NULL) { +                printf("Failed to get value from dht entry.\n"); +                goto fail_get; +        } + +        if (dht_entry_get_lval(e, test_val2) != NULL) { +                printf("Found value in dht entry in vals.\n"); +                goto fail_get; +        } + +        if (v->val.len != test_val.len) { +                printf("Length in dht entry does not match expected.\n"); +                goto fail_get; +        } + +        if(memcmp(v->val.data, test_val.data, test_val.len) != 0) { +                printf("Data in dht entry does not match expected.\n"); +                goto fail_get; +        } + +        if (dht_entry_update_lval(e, test_val) < 0) { +                printf("Failed to update existing dht entry value.\n"); +                goto fail_get; +        } + +        if (dht_entry_get_lval(e, test_val) != v) { +                printf("Wrong value in dht entry found after update.\n"); +                goto fail_get; +        } + +        dht_entry_destroy(e); + +        dht_fini(); + +        TEST_SUCCESS(); + +        return TEST_RC_SUCCESS; + + fail_get: +        dht_entry_destroy(e); + fail_entry: +        dht_fini(); + fail_init: +        TEST_FAIL(); +        return TEST_RC_FAIL; +} + +static int test_dht_kv_contact_create_destroy(void) +{ +        struct contact * c; + +        TEST_START(); + +        if (dht_init(&test_dht_config) < 0) { +                printf("Failed to create dht.\n"); +                goto fail_init; +        } + +        c = contact_create(dht.id.data, dht.addr); +        if (c == NULL) { +                printf("Failed to create contact.\n"); +                goto fail_contact; +        } + +        contact_destroy(c); + +        dht_fini(); + +        TEST_SUCCESS(); + +        return TEST_RC_SUCCESS; + + fail_contact: +        dht_fini(); + fail_init: +        TEST_FAIL(); +        return TEST_RC_FAIL; +} + +static int test_dht_kv_update_bucket(void) +{ +        TEST_START(); + +        if (dht_init(&test_dht_config) < 0) { +                printf("Failed to create dht.\n"); +                goto fail_init; +        } + +        if (fill_dht_with_contacts(1000) < 0) { +                printf("Failed to fill bucket with contacts.\n"); +                goto fail_update; +        } + +        dht_fini(); + +        TEST_SUCCESS(); + +        return TEST_RC_SUCCESS; + + fail_update: +        dht_fini(); + fail_init: +        TEST_FAIL(); +        return TEST_RC_FAIL; +} + +static int test_dht_kv_contact_list(void) +{ +        struct list_head cl; +        ssize_t          len; +        ssize_t          items; + +        TEST_START(); + +        list_head_init(&cl); + +        if (dht_init(&test_dht_config) < 0) { +                printf("Failed to create dht.\n"); +                goto fail_init; +        } + +        items = 5; + +        if (fill_dht_with_contacts(items) < 0) { +                printf("Failed to fill bucket with contacts.\n"); +                goto fail_fill; +        } + +        len = dht_kv_contact_list(dht.id.data, &cl, dht.k); +        if (len < 0) { +                printf("Failed to get contact list.\n"); +                goto fail_fill; +        } + +        if (len != items) { +                printf("Failed to get contacts (%zu != %zu).\n", len, items); +                goto fail_contact_list; +        } + +        contact_list_destroy(&cl); + +        items = 100; + +        if (fill_dht_with_contacts(items) < 0) { +                printf("Failed to fill bucket with contacts.\n"); +                goto fail_fill; +        } + +        len = dht_kv_contact_list(dht.id.data, &cl, items); +        if (len < 0) { +                printf("Failed to get contact list.\n"); +                goto fail_fill; +        } + +        if ((size_t) len < dht.k) { +                printf("Failed to get contacts (%zu < %zu).\n", len, dht.k); +                goto fail_contact_list; +        } + +        contact_list_destroy(&cl); + +        dht_fini(); + +        TEST_SUCCESS(); + +        return TEST_RC_SUCCESS; + + fail_contact_list: +        contact_list_destroy(&cl); + fail_fill: +        dht_fini(); + fail_init: +        TEST_FAIL(); +        return TEST_RC_FAIL; +} + +static int test_dht_kv_get_values(void) +{ +        buffer_t * vals; +        ssize_t    len; +        size_t     n = sizeof(uint64_t); + +        TEST_START(); + +        if (dht_init(&test_dht_config) < 0) { +                printf("Failed to create dht.\n"); +                goto fail_init; +        } + +        if (fill_store_with_random_values(dht.id.data, n, 3) < 0) { +                printf("Failed to fill store with random values.\n"); +                goto fail_fill; +        } + +        len = dht_kv_retrieve(dht.id.data, &vals); +        if (len < 0) { +                printf("Failed to get values from store.\n"); +                goto fail_fill; +        } + +        if (len != 3) { +                printf("Failed to get %ld values (%zu).\n", 3L, len); +                goto fail_get_values; +        } + +        freebufs(vals, len); + +        if (fill_store_with_random_values(dht.id.data, n, 20) < 0) { +                printf("Failed to fill store with random values.\n"); +                goto fail_fill; +        } + +        len = dht_kv_retrieve(dht.id.data, &vals); +        if (len < 0) { +                printf("Failed to get values from store.\n"); +                goto fail_fill; +        } + +        if (len != DHT_MAX_VALS) { +                printf("Failed to get %d values.\n", DHT_MAX_VALS); +                goto fail_get_values; +        } + +        freebufs(vals, len); + +        dht_fini(); + +        TEST_SUCCESS(); + +        return TEST_RC_SUCCESS; + + fail_get_values: +        freebufs(vals, len); + fail_fill: +        dht_fini(); + fail_init: +        TEST_FAIL(); +        return TEST_RC_FAIL; +} + +static int test_dht_kv_find_node_req_msg(void) +{ +        dht_msg_t * msg; +        dht_msg_t * upk; +        size_t      len; +        uint8_t *   buf; + +        TEST_START(); + +        if (dht_init(&test_dht_config) < 0) { +                printf("Failed to create dht.\n"); +                goto fail_init; +        } + +        msg = dht_kv_find_node_req_msg(dht.id.data); +        if (msg == NULL) { +                printf("Failed to get find node request message.\n"); +                goto fail_msg; +        } + +        if (msg->code != DHT_FIND_NODE_REQ) { +                printf("Wrong code in find_node_req message (%s != %s).\n", +                        dht_code_str[msg->code], +                        dht_code_str[DHT_FIND_NODE_REQ]); +                goto fail_msg; +        } + +        len = dht_msg__get_packed_size(msg); +        if (len == 0) { +                printf("Failed to get packed length of find_node_req.\n"); +                goto fail_msg; +        } + +        buf = malloc(len); +        if (buf == NULL) { +                printf("Failed to malloc find_node_req buf.\n"); +                goto fail_msg; +        } + +        if (dht_msg__pack(msg, buf) != len) { +                printf("Failed to pack find_node_req message.\n"); +                goto fail_pack; +        } + +        upk = dht_msg__unpack(NULL, len, buf); +        if (upk == NULL) { +                printf("Failed to unpack find_value_req message.\n"); +                goto fail_unpack; +        } + +        free(buf); +        dht_msg__free_unpacked(msg, NULL); +        dht_msg__free_unpacked(upk, NULL); + +        dht_fini(); + +        TEST_SUCCESS(); + +        return TEST_RC_SUCCESS; + + fail_unpack: +        dht_msg__free_unpacked(msg, NULL); + fail_pack: +        free(buf); + fail_msg: +        dht_fini(); + fail_init: +        TEST_FAIL(); +        return TEST_RC_FAIL; +} + +static int test_dht_kv_find_node_rsp_msg(void) +{ +        dht_contact_msg_t ** contacts; +        dht_msg_t *          msg; +        dht_msg_t *          upk; +        size_t               len; +        uint8_t *            buf; + +        TEST_START(); + +        if (dht_init(&test_dht_config) < 0) { +                printf("Failed to create dht.\n"); +                goto fail_init; +        } + +        msg = dht_kv_find_node_rsp_msg(dht.id.data, 0, &contacts, 0); +        if (msg == NULL) { +                printf("Failed to get find node response message.\n"); +                goto fail_msg; +        } + +        if (msg->code != DHT_FIND_NODE_RSP) { +                printf("Wrong code in find_node_rsp message (%s != %s).\n", +                       dht_code_str[msg->code], +                       dht_code_str[DHT_FIND_NODE_RSP]); +                goto fail_msg; +        } + +        len = dht_msg__get_packed_size(msg); +        if (len == 0) { +                printf("Failed to get packed length of find_node_rsp.\n"); +                goto fail_msg; +        } + +        buf = malloc(len); +        if (buf == NULL) { +                printf("Failed to malloc find_node_rsp buf.\n"); +                goto fail_msg; +        } + +        if (dht_msg__pack(msg, buf) != len) { +                printf("Failed to pack find_node_rsp message.\n"); +                goto fail_pack; +        } + +        upk = dht_msg__unpack(NULL, len, buf); +        if (upk == NULL) { +                printf("Failed to unpack find_node_rsp message.\n"); +                goto fail_unpack; +        } + +        free(buf); +        dht_msg__free_unpacked(msg, NULL); +        dht_msg__free_unpacked(upk, NULL); + +        dht_fini(); + +        TEST_SUCCESS(); + +        return TEST_RC_SUCCESS; + + fail_unpack: +        dht_msg__free_unpacked(msg, NULL); + fail_pack: +        free(buf); + fail_msg: +        dht_fini(); + fail_init: +        TEST_FAIL(); +        return TEST_RC_FAIL; +} + +static int test_dht_kv_find_node_rsp_msg_contacts(void) +{ +        dht_contact_msg_t ** contacts; +        dht_msg_t *          msg; +        dht_msg_t *          upk; +        uint8_t *            buf; +        size_t               len; +        ssize_t              n; + +        TEST_START(); + +        if (dht_init(&test_dht_config) < 0) { +                printf("Failed to create dht.\n"); +                goto fail_init; +        } + +        if (fill_dht_with_contacts(100) < 0) { +                printf("Failed to fill bucket with contacts.\n"); +                goto fail_fill; +        } + +        n = dht_kv_get_contacts(dht.id.data, &contacts); +        if (n < 0) { +                printf("Failed to get contacts.\n"); +                goto fail_fill; +        } + +        if ((size_t) n < dht.k) { +                printf("Failed to get enough contacts (%zu < %zu).\n", n, dht.k); +                goto fail_fill; +        } + +        msg = dht_kv_find_node_rsp_msg(dht.id.data, 0, &contacts, n); +        if (msg == NULL) { +                printf("Failed to build find node response message.\n"); +                goto fail_msg; +        } + +        len = dht_msg__get_packed_size(msg); +        if (len == 0) { +                printf("Failed to get packed length of find_node_rsp.\n"); +                goto fail_msg; +        } + +        buf = malloc(len); +        if (buf == NULL) { +                printf("Failed to malloc find_node_rsp buf.\n"); +                goto fail_msg; +        } + +        if (dht_msg__pack(msg, buf) != len) { +                printf("Failed to pack find_node_rsp message.\n"); +                goto fail_pack; +        } + +        upk = dht_msg__unpack(NULL, len, buf); +        if (upk == NULL) { +                printf("Failed to unpack find_node_rsp message.\n"); +                goto fail_unpack; +        } + +        free(buf); +        dht_msg__free_unpacked(msg, NULL); +        dht_msg__free_unpacked(upk, NULL); + +        dht_fini(); + +        TEST_SUCCESS(); + +        return TEST_RC_SUCCESS; + + fail_unpack: +        dht_msg__free_unpacked(msg, NULL); + fail_pack: +        free(buf); + fail_msg: +        clear_contacts(contacts, n); + fail_fill: +        dht_fini(); + fail_init: +        TEST_FAIL(); +        return TEST_RC_FAIL; +} + +static int test_dht_kv_find_value_req_msg(void) +{ +        dht_msg_t * msg; +        dht_msg_t * upk; +        size_t      len; +        uint8_t *   buf; + +        TEST_START(); + +        if (dht_init(&test_dht_config) < 0) { +                printf("Failed to create dht.\n"); +                goto fail_init; +        } + +        msg = dht_kv_find_value_req_msg(dht.id.data); +        if (msg == NULL) { +                printf("Failed to build find value request message.\n"); +                goto fail_msg; +        } + +        if (msg->code != DHT_FIND_VALUE_REQ) { +                printf("Wrong code in find_value_req message (%s != %s).\n", +                       dht_code_str[msg->code], +                       dht_code_str[DHT_FIND_VALUE_REQ]); +                goto fail_msg; +        } + +        len = dht_msg__get_packed_size(msg); +        if (len == 0) { +                printf("Failed to get packed length of find_value_req.\n"); +                goto fail_msg; +        } + +        buf = malloc(len); +        if (buf == NULL) { +                printf("Failed to malloc find_node_req buf.\n"); +                goto fail_msg; +        } + +        if (dht_msg__pack(msg, buf) != len) { +                printf("Failed to pack find_value_req message.\n"); +                goto fail_pack; +        } + +        upk = dht_msg__unpack(NULL, len, buf); +        if (upk == NULL) { +                printf("Failed to unpack find_value_req message.\n"); +                goto fail_unpack; +        } + +        free(buf); +        dht_msg__free_unpacked(msg, NULL); +        dht_msg__free_unpacked(upk, NULL); + +        dht_fini(); + +        TEST_SUCCESS(); + +        return TEST_RC_SUCCESS; + + fail_unpack: +        dht_msg__free_unpacked(msg, NULL); + fail_pack: +        free(buf); + fail_msg: +        dht_fini(); + fail_init: +        TEST_FAIL(); +        return TEST_RC_FAIL; +} + +static int test_dht_kv_find_value_rsp_msg(void) +{ +        dht_msg_t * msg; +        dht_msg_t * upk; +        size_t      len; +        uint8_t *   buf; + +        TEST_START(); + +        if (dht_init(&test_dht_config) < 0) { +                printf("Failed to create dht.\n"); +                goto fail_init; +        } + +        msg = dht_kv_find_value_rsp_msg(dht.id.data, 0, NULL, 0, NULL, 0); +        if (msg == NULL) { +                printf("Failed to build find value response message.\n"); +                goto fail_msg; +        } + +        if (msg->code != DHT_FIND_VALUE_RSP) { +                printf("Wrong code in find_value_rsp message (%s != %s).\n", +                       dht_code_str[msg->code], +                       dht_code_str[DHT_FIND_VALUE_RSP]); +                goto fail_msg; +        } + +        len = dht_msg__get_packed_size(msg); +        if (len == 0) { +                printf("Failed to get packed length of find_value_rsp.\n"); +                goto fail_msg; +        } + +        buf = malloc(len); +        if (buf == NULL) { +                printf("Failed to malloc find_value_rsp buf.\n"); +                goto fail_msg; +        } + +        if (dht_msg__pack(msg, buf) != len) { +                printf("Failed to pack find_value_rsp message.\n"); +                goto fail_pack; +        } + +        upk = dht_msg__unpack(NULL, len, buf); +        if (upk == NULL) { +                printf("Failed to unpack find_value_rsp message.\n"); +                goto fail_unpack; +        } + +        free(buf); +        dht_msg__free_unpacked(msg, NULL); +        dht_msg__free_unpacked(upk, NULL); + +        dht_fini(); + +        TEST_SUCCESS(); + +        return TEST_RC_SUCCESS; + + fail_unpack: +        dht_msg__free_unpacked(msg, NULL); + fail_pack: +        free(buf); + fail_msg: +        dht_fini(); + fail_init: +        TEST_FAIL(); +        return TEST_RC_FAIL; +} + +static int test_dht_kv_find_value_rsp_msg_contacts(void) +{ +        dht_msg_t *          msg; +        dht_msg_t *          upk; +        size_t               len; +        uint8_t *            buf; +        dht_contact_msg_t ** contacts; +        ssize_t              n; + +        TEST_START(); + +        if (dht_init(&test_dht_config) < 0) { +                printf("Failed to create dht.\n"); +                goto fail_init; +        } + +        if (fill_dht_with_contacts(100) < 0) { +                printf("Failed to fill bucket with contacts.\n"); +                goto fail_fill; +        } + +        n = dht_kv_get_contacts(dht.id.data, &contacts); +        if (n < 0) { +                printf("Failed to get contacts.\n"); +                goto fail_fill; +        } + +        if ((size_t) n < dht.k) { +                printf("Failed to get enough contacts (%zu < %zu).\n", n, dht.k); +                goto fail_fill; +        } + +        msg = dht_kv_find_value_rsp_msg(dht.id.data, 0, &contacts, n, NULL, 0); +        if (msg == NULL) { +                printf("Failed to build find value response message.\n"); +                goto fail_msg; +        } + +        len = dht_msg__get_packed_size(msg); +        if (len == 0) { +                printf("Failed to get packed length of find_value_rsp.\n"); +                goto fail_msg; +        } + +        buf = malloc(len); +        if (buf == NULL) { +                printf("Failed to malloc find_value_rsp buf.\n"); +                goto fail_msg; +        } + +        if (dht_msg__pack(msg, buf) != len) { +                printf("Failed to pack find_value_rsp message.\n"); +                goto fail_pack; +        } + +        upk = dht_msg__unpack(NULL, len, buf); +        if (upk == NULL) { +                printf("Failed to unpack find_value_rsp message.\n"); +                goto fail_unpack; +        } + +        free(buf); +        dht_msg__free_unpacked(msg, NULL); +        dht_msg__free_unpacked(upk, NULL); + +        dht_fini(); + +        TEST_SUCCESS(); + +        return TEST_RC_SUCCESS; + + fail_unpack: +        dht_msg__free_unpacked(msg, NULL); + fail_pack: +        free(buf); + fail_msg: +        clear_contacts(contacts, n); + fail_fill: +        dht_fini(); + fail_init: +        TEST_FAIL(); +        return TEST_RC_FAIL; +} + +static int test_dht_kv_find_value_rsp_msg_values(void) +{ +        dht_msg_t * msg; +        dht_msg_t * upk; +        size_t      len; +        uint8_t *   buf; +        buffer_t *  values; +        size_t      i; +        uint64_t    ck; + +        TEST_START(); + +        ck = generate_cookie(); + +        if (dht_init(&test_dht_config) < 0) { +                printf("Failed to create dht.\n"); +                goto fail_init; +        } + +        values = malloc(sizeof(*values) * 8); +        if (values == NULL) { +                printf("Failed to malloc values.\n"); +                goto fail_values; +        } + +        for (i = 0; i < 8; i++) { +                if (random_value(&values[i]) < 0) { +                        printf("Failed to create random value.\n"); +                        goto fail_fill; +                } +        } + +        msg = dht_kv_find_value_rsp_msg(dht.id.data, ck, NULL, 0, &values, 8); +        if (msg == NULL) { +                printf("Failed to build find value response message.\n"); +                goto fail_msg; +        } + +        values = NULL; /* msg owns the values now */ + +        len = dht_msg__get_packed_size(msg); +        if (len == 0) { +                printf("Failed to get packed length of find_value_rsp.\n"); +                goto fail_msg; +        } + +        buf = malloc(len); +        if (buf == NULL) { +                printf("Failed to malloc find_value_rsp buf.\n"); +                goto fail_msg; +        } + +        if (dht_msg__pack(msg, buf) != len) { +                printf("Failed to pack find_value_rsp message.\n"); +                goto fail_pack; +        } + +        upk = dht_msg__unpack(NULL, len, buf); +        if (upk == NULL) { +                printf("Failed to unpack find_value_rsp message.\n"); +                goto fail_unpack; +        } + +        if (upk->code != DHT_FIND_VALUE_RSP) { +                printf("Wrong code in find_value_rsp message (%s != %s).\n", +                       dht_code_str[upk->code], +                       dht_code_str[DHT_FIND_VALUE_RSP]); +                goto fail_unpack; +        } + +        if (upk->val == NULL) { +                printf("No values in find_value_rsp message.\n"); +                goto fail_unpack; +        } + +        if (upk->val->n_values != 8) { +                printf("Not enough values in find_value_rsp (%zu != %lu).\n", +                       upk->val->n_values, 8UL); +                goto fail_unpack; +        } + +        free(buf); +        dht_msg__free_unpacked(msg, NULL); +        dht_msg__free_unpacked(upk, NULL); + +        free(values); + +        dht_fini(); + +        TEST_SUCCESS(); + +        return TEST_RC_SUCCESS; + + fail_unpack: +        dht_msg__free_unpacked(msg, NULL); + fail_pack: +        free(buf); + fail_msg: + fail_fill: +        while((i--) > 0) +                freebuf(values[i]); +        free(values); + fail_values: +        dht_fini(); + fail_init: +        TEST_FAIL(); +        return TEST_RC_FAIL; +} + +static int test_dht_kv_store_msg(void) +{ +        dht_msg_t *     msg; +        size_t          len; +        uint8_t *       buf; +        struct timespec now; + +        TEST_START(); + +        clock_gettime(CLOCK_REALTIME_COARSE, &now); + +        if (dht_init(&test_dht_config) < 0) { +                printf("Failed to create dht.\n"); +                goto fail_init; +        } + +        msg = dht_kv_store_msg(dht.id.data, test_val, now.tv_sec + 10); +        if (msg == NULL) { +                printf("Failed to get store message.\n"); +                goto fail_msg; +        } + +        if (msg->code != DHT_STORE) { +                printf("Wrong code in store message (%s != %s).\n", +                       dht_code_str[msg->code], +                       dht_code_str[DHT_STORE]); +                goto fail_store_msg; +        } + +        if (dht_kv_validate_msg(msg) < 0) { +                printf("Failed to validate store message.\n"); +                goto fail_store_msg; +        } + +        len = dht_msg__get_packed_size(msg); +        if (len == 0) { +                printf("Failed to get packed msg length.\n"); +                goto fail_msg; +        } + +        buf = malloc(len); +        if (buf == NULL) { +                printf("Failed to malloc store msg buf.\n"); +                goto fail_msg; +        } + +        if (dht_msg__pack(msg, buf) != len) { +                printf("Failed to pack store message.\n"); +                goto fail_pack; +        } + +        free(buf); + +        dht_msg__free_unpacked(msg, NULL); + +        dht_fini(); + +        TEST_SUCCESS(); + +        return TEST_RC_SUCCESS; + + fail_pack: +        free(buf); + fail_store_msg: +        dht_msg__free_unpacked(msg, NULL); + fail_msg: +        dht_fini(); + fail_init: +        TEST_FAIL(); +        return TEST_RC_FAIL; +} + +static int test_dht_kv_query_contacts_req_rsp(void) +{ +        dht_msg_t *          req; +        dht_msg_t *          rsp; +        dht_contact_msg_t ** contacts; +        size_t               len = 2; + +        uint8_t * key; + +        TEST_START(); + +        sink_init(); + +        if (dht_init(&test_dht_config) < 0) { +                printf("Failed to create dht.\n"); +                goto fail_init; +        } + +        if (fill_dht_with_contacts(1) < 0) { +                printf("Failed to fill bucket with contacts.\n"); +                goto fail_prep; +        } + +        key = generate_id(); +        if (key == NULL) { +                printf("Failed to generate key.\n"); +                goto fail_prep; +        } + +        if (dht_kv_query_contacts(key, NULL) < 0) { +                printf("Failed to query contacts.\n"); +                goto fail_query; +        } + +        req = sink_read(); +        if (req == NULL) { +                printf("Failed to read request from sink.\n"); +                goto fail_query; +        } + +        if (dht_kv_validate_msg(req) < 0) { +                printf("Failed to validate find node req.\n"); +                goto fail_val_req; +        } + +        if (random_contact_list(&contacts, len) < 0) { +                printf("Failed to create random contact.\n"); +                goto fail_val_req; +        } + +        rsp = dht_kv_find_node_rsp_msg(key, req->find->cookie, &contacts, len); +        if (rsp == NULL) { +                printf("Failed to create find node response message.\n"); +                goto fail_rsp; +        } + +        memcpy(rsp->src->id.data, dht.id.data, dht.id.len); +        rsp->src->addr = generate_cookie(); + +        if (dht_kv_validate_msg(rsp) < 0) { +                printf("Failed to validate find node response message.\n"); +                goto fail_val_rsp; +        } + +        do_dht_kv_find_node_rsp(rsp->node); + +        /* dht_contact_msg__free_unpacked(contacts[0], NULL); set to NULL */ + +        free(contacts); + +        dht_msg__free_unpacked(rsp, NULL); + +        free(key); + +        dht_msg__free_unpacked(req, NULL); + +        sink_fini(); + +        dht_fini(); + +        TEST_SUCCESS(); + +        return TEST_RC_SUCCESS; + + fail_val_rsp: +        dht_msg__free_unpacked(rsp, NULL); + fail_rsp: +        while (len-- > 0) +                dht_contact_msg__free_unpacked(contacts[len], NULL); +        free(contacts); + fail_val_req: +        dht_msg__free_unpacked(req, NULL); + fail_query: +        free(key); + fail_prep: +        dht_fini(); + fail_init: +        sink_fini(); +        TEST_FAIL(); +        return TEST_RC_FAIL; +} + +static int test_dht_req_create_destroy(void) +{ +        struct dht_req * req; + +        TEST_START(); + +        if (dht_init(&test_dht_config) < 0) { +                printf("Failed to create dht.\n"); +                goto fail_init; +        } + +        req = dht_req_create(dht.id.data); +        if (req == NULL) { +                printf("Failed to create kad request.\n"); +                goto fail_req; +        } + +        dht_req_destroy(req); + +        dht_fini(); + +        TEST_SUCCESS(); + +        return TEST_RC_SUCCESS; + + fail_req: +        dht_fini(); + fail_init: +        TEST_FAIL(); +        return TEST_RC_FAIL; +} + +static int test_dht_reg_unreg(void) +{ +        TEST_START(); + +        sink_init(); + +        if (dht_init(&test_dht_config) < 0) { +                printf("Failed to create dht.\n"); +                goto fail_init; +        } + +        if (dht_reg(dht.id.data) < 0) { +                printf("Failed to register own id.\n"); +                goto fail_reg; +        } + +        if (sink.len != 0) { +                printf("Packet sent without contacts!"); +                goto fail_msg; +        } + +        if (dht_unreg(dht.id.data) < 0) { +                printf("Failed to unregister own id.\n"); +                goto fail_msg; +        } + +        dht_fini(); + +        sink_fini(); + +        TEST_SUCCESS(); + +        return TEST_RC_SUCCESS; + + fail_msg: +        dht_unreg(dht.id.data); + fail_reg: +        dht_fini(); + fail_init: +        sink_fini(); +        TEST_FAIL(); +        return TEST_RC_FAIL; +} + +static int test_dht_reg_unreg_contacts(void) +{ +        dht_msg_t * msg; + +        TEST_START(); + +        sink_init(); + +        if (dht_init(&test_dht_config) < 0) { +                printf("Failed to create dht.\n"); +                goto fail_init; +        } + +        if (fill_dht_with_contacts(4) < 0) { +                printf("Failed to fill bucket with contacts.\n"); +                goto fail_reg; +        } + +        if (dht_reg(dht.id.data) < 0) { +                printf("Failed to register own id.\n"); +                goto fail_reg; +        } + +        if (sink.len != dht.alpha) { +                printf("Packet sent to too few contacts!\n"); +                goto fail_msg; +        } + +        msg = sink_read(); +        if (msg == NULL) { +                printf("Failed to read message from sink.\n"); +                goto fail_msg; +        } + +        if (msg->code != DHT_STORE) { +                printf("Wrong code in dht reg message (%s != %s).\n", +                       dht_code_str[msg->code], +                       dht_code_str[DHT_STORE]); +                goto fail_validation; +        } + +        if (dht_kv_validate_msg(msg) < 0) { +                printf("Failed to validate dht message.\n"); +                goto fail_validation; +        } + +        if (dht_unreg(dht.id.data) < 0) { +                printf("Failed to unregister own id.\n"); +                goto fail_validation; +        } + +        dht_msg__free_unpacked(msg, NULL); + +        dht_fini(); + +        sink_fini(); + +        TEST_SUCCESS(); + +        return TEST_RC_SUCCESS; + + fail_validation: +        dht_msg__free_unpacked(msg, NULL); + fail_msg: +        sink_clear(); +        dht_unreg(dht.id.data); + fail_reg: +        dht_fini(); + fail_init: +        sink_fini(); +        TEST_FAIL(); +        return TEST_RC_FAIL; +} + +static int test_dht_reg_query_local(void) +{ +        struct timespec now; +        buffer_t test_addr; + +        TEST_START(); + +        clock_gettime(CLOCK_REALTIME_COARSE, &now); + +        if (addr_to_buf(1234321, &test_addr) < 0) { +                printf("Failed to convert test address to buffer.\n"); +                goto fail_buf; +        } + +        if (dht_init(&test_dht_config) < 0) { +                printf("Failed to create dht.\n"); +                goto fail_init; +        } + +        if (dht_reg(dht.id.data) < 0) { +                printf("Failed to register own id.\n"); +                goto fail_reg; +        } + +        if (dht_query(dht.id.data) == dht.addr) { +                printf("Succeeded to query own id.\n"); +                goto fail_get; +        } + +        if (dht_kv_store(dht.id.data, test_addr, now.tv_sec + 5) < 0) { +                printf("Failed to publish value.\n"); +                goto fail_get; +        } + +        if (dht_query(dht.id.data) != 1234321) { +                printf("Failed to return remote addr.\n"); +                goto fail_get; +        } + +        if (dht_unreg(dht.id.data) < 0) { +                printf("Failed to unregister own id.\n"); +                goto fail_get; +        } + +        freebuf(test_addr); + +        dht_fini(); + +        TEST_SUCCESS(); + +        return TEST_RC_SUCCESS; + + fail_get: +        dht_unreg(dht.id.data); + fail_reg: +        dht_fini(); + fail_init: +        freebuf(test_addr); + fail_buf: +        TEST_FAIL(); +        return TEST_RC_FAIL; +} + +static int test_dht_query(void) +{ +        uint8_t *             key; +        struct dir_dht_config cfg; + +        TEST_START(); + +        sink_init(); + +        cfg = test_dht_config; +        cfg.peer = generate_cookie(); + +        if (dht_init(&cfg)) { +                printf("Failed to create dht.\n"); +                goto fail_init; +        } + +        key = generate_id(); +        if (key == NULL) { +                printf("Failed to generate key.\n"); +                goto fail_key; +        } + +        if (dht_query(key) != INVALID_ADDR) { +                printf("Succeeded to get address without contacts.\n"); +                goto fail_get; +        } + +        if (sink.len != 0) { +                printf("Packet sent without contacts!"); +                goto fail_test; +        } + +        free(key); + +        dht_fini(); + +        sink_fini(); + +        TEST_SUCCESS(); + +        return TEST_RC_SUCCESS; + + fail_test: +        sink_clear(); + fail_get: +        free(key); + fail_key: +        dht_fini(); + fail_init: +        sink_fini(); +        return TEST_RC_FAIL; +} + +static int test_dht_query_contacts(void) +{ +        dht_msg_t *           msg; +        uint8_t *             key; +        struct dir_dht_config cfg; + + +        TEST_START(); + +        sink_init(); + +        cfg = test_dht_config; +        cfg.peer = generate_cookie(); + +        if (dht_init(&cfg)) { +                printf("Failed to create dht.\n"); +                goto fail_init; +        } + +        if (fill_dht_with_contacts(10) < 0) { +                printf("Failed to fill with contacts!"); +                goto fail_contacts; +        } + +        key = generate_id(); +        if (key == NULL) { +                printf("Failed to generate key."); +                goto fail_contacts; +        } + +        if (dht_query(key) != INVALID_ADDR) { +                printf("Succeeded to get address for random id.\n"); +                goto fail_query; +        } + +        msg = sink_read(); +        if (msg == NULL) { +                printf("Failed to read message.!\n"); +                goto fail_read; +        } + +        if (dht_kv_validate_msg(msg) < 0) { +                printf("Failed to validate dht message.\n"); +                goto fail_msg; +        } + +        if (msg->code != DHT_FIND_VALUE_REQ) { +                printf("Failed to validate dht message.\n"); +                goto fail_msg; +        } + +        dht_msg__free_unpacked(msg, NULL); + +        free(key); + +        sink_clear(); + +        dht_fini(); + +        sink_fini(); + +        TEST_SUCCESS(); + +        return TEST_RC_SUCCESS; + fail_msg: +        dht_msg__free_unpacked(msg, NULL); + fail_read: +        sink_clear(); + fail_query: +        free(key); + fail_contacts: +        dht_fini(); + fail_init: +        sink_fini(); +        return TEST_RC_FAIL; +} + +int dht_test(int     argc, +             char ** argv) +{ +        int rc = 0; + +        (void) argc; +        (void) argv; + +        rc |= test_dht_init_fini(); +        rc |= test_dht_start_stop(); +        rc |= test_val_entry_create_destroy(); +        rc |= test_dht_entry_create_destroy(); +        rc |= test_dht_entry_update_get_val(); +        rc |= test_dht_entry_update_get_lval(); +        rc |= test_dht_kv_contact_create_destroy(); +        rc |= test_dht_kv_contact_list(); +        rc |= test_dht_kv_update_bucket(); +        rc |= test_dht_kv_get_values(); +        rc |= test_dht_kv_find_node_req_msg(); +        rc |= test_dht_kv_find_node_rsp_msg(); +        rc |= test_dht_kv_find_node_rsp_msg_contacts(); +        rc |= test_dht_kv_query_contacts_req_rsp(); +        rc |= test_dht_kv_find_value_req_msg(); +        rc |= test_dht_kv_find_value_rsp_msg(); +        rc |= test_dht_kv_find_value_rsp_msg_contacts(); +        rc |= test_dht_kv_find_value_rsp_msg_values(); +        rc |= test_dht_kv_store_msg(); +        rc |= test_dht_req_create_destroy(); +        rc |= test_dht_reg_unreg(); +        rc |= test_dht_reg_unreg_contacts(); +        rc |= test_dht_reg_query_local(); +        rc |= test_dht_query(); +        rc |= test_dht_query_contacts(); + +        return rc; +} diff --git a/src/ipcpd/unicast/dt.c b/src/ipcpd/unicast/dt.c index 0f504daa..e2679ffe 100644 --- a/src/ipcpd/unicast/dt.c +++ b/src/ipcpd/unicast/dt.c @@ -1,5 +1,5 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   *   * Data Transfer Component   * @@ -41,6 +41,7 @@  #include <ouroboros/fccntl.h>  #endif +#include "addr-auth.h"  #include "common/comp.h"  #include "common/connmgr.h"  #include "ca.h" @@ -59,7 +60,7 @@  #include <assert.h>  #define QOS_BLOCK_LEN   672 -#define RIB_FILE_STRLEN (189 + QOS_BLOCK_LEN * QOS_CUBE_MAX) +#define RIB_FILE_STRLEN (169 + RIB_TM_STRLEN + QOS_BLOCK_LEN * QOS_CUBE_MAX)  #define RIB_NAME_STRLEN 256  #ifndef CLOCK_REALTIME_COARSE @@ -144,6 +145,8 @@ static void dt_pci_shrink(struct shm_du_buff * sdb)  struct {          struct psched *    psched; +        uint64_t           addr; +          struct pff *       pff[QOS_CUBE_MAX];          struct routing_i * routing[QOS_CUBE_MAX];  #ifdef IPCP_FLOW_STATS @@ -186,7 +189,7 @@ static int dt_rib_read(const char * path,          char        str[QOS_BLOCK_LEN + 1];          char        addrstr[20];          char *      entry; -        char        tmstr[20]; +        char        tmstr[RIB_TM_STRLEN];          size_t      rxqlen = 0;          size_t      txqlen = 0;          struct tm * tm; @@ -209,13 +212,13 @@ static int dt_rib_read(const char * path,                  return 0;          } -        if (dt.stat[fd].addr == ipcpi.dt_addr) +        if (dt.stat[fd].addr == dt.addr)                  sprintf(addrstr, "%s", dt.comps[fd].name);          else -                sprintf(addrstr, "%" PRIu64, dt.stat[fd].addr); +                sprintf(addrstr, ADDR_FMT32, ADDR_VAL32(&dt.stat[fd].addr)); -        tm = localtime(&dt.stat[fd].stamp); -        strftime(tmstr, sizeof(tmstr), "%F %T", tm); +        tm = gmtime(&dt.stat[fd].stamp); +        strftime(tmstr, sizeof(tmstr), RIB_TM_FORMAT, tm);          if (fd >= PROG_RES_FDS) {                  fccntl(fd, FLOWGRXQLEN, &rxqlen); @@ -223,11 +226,11 @@ static int dt_rib_read(const char * path,          }          sprintf(buf, -                "Flow established at:      %20s\n" +                "Flow established at:      %.*s\n"                  "Endpoint address:         %20s\n"                  "Queued packets (rx):      %20zu\n"                  "Queued packets (tx):      %20zu\n\n", -                tmstr, addrstr, rxqlen, txqlen); +                RIB_TM_STRLEN - 1, tmstr, addrstr, rxqlen, txqlen);          for (i = 0; i < QOS_CUBE_MAX; ++i) {                  sprintf(str,                          "Qos cube %3d:\n" @@ -285,48 +288,45 @@ static int dt_rib_readdir(char *** buf)          pthread_rwlock_rdlock(&dt.lock);          if (dt.n_flows < 1) { -                pthread_rwlock_unlock(&dt.lock); -                return 0; +                *buf = NULL; +                goto no_flows;          }          *buf = malloc(sizeof(**buf) * dt.n_flows); -        if (*buf == NULL) { -                pthread_rwlock_unlock(&dt.lock); -                return -ENOMEM; -        } +        if (*buf == NULL) +                goto fail_entries;          for (i = 0; i < PROG_MAX_FLOWS; ++i) {                  pthread_mutex_lock(&dt.stat[i].lock);                  if (dt.stat[i].stamp == 0) {                          pthread_mutex_unlock(&dt.stat[i].lock); -                        /* Optimization: skip unused res_fds. */ -                        if (i < PROG_RES_FDS) -                                i = PROG_RES_FDS; -                        continue; +                        break;                  } +                pthread_mutex_unlock(&dt.stat[i].lock); +                  sprintf(entry, "%zu", i);                  (*buf)[idx] = malloc(strlen(entry) + 1); -                if ((*buf)[idx] == NULL) { -                        while (idx-- > 0) -                                free((*buf)[idx]); -                        free(buf); -                        pthread_mutex_unlock(&dt.stat[i].lock); -                        pthread_rwlock_unlock(&dt.lock); -                        return -ENOMEM; -                } +                if ((*buf)[idx] == NULL) +                        goto fail_entry;                  strcpy((*buf)[idx++], entry); -                pthread_mutex_unlock(&dt.stat[i].lock);          } -        assert((size_t) idx == dt.n_flows); - + no_flows:          pthread_rwlock_unlock(&dt.lock);          return idx; + + fail_entry: +        while (idx-- > 0) +                free((*buf)[idx]); +        free(*buf); + fail_entries: +        pthread_rwlock_unlock(&dt.lock); +        return -ENOMEM;  #else          (void) buf;          return 0; @@ -399,6 +399,7 @@ static void handle_event(void *       self,                           const void * o)  {          struct conn * c; +        int           fd;          (void) self; @@ -406,19 +407,20 @@ static void handle_event(void *       self,          switch (event) {          case NOTIFY_DT_CONN_ADD: +                fd = c->flow_info.fd;  #ifdef IPCP_FLOW_STATS -                stat_used(c->flow_info.fd, c->conn_info.addr); +                stat_used(fd, c->conn_info.addr);  #endif -                psched_add(dt.psched, c->flow_info.fd); -                log_dbg("Added fd %d to packet scheduler.", c->flow_info.fd); +                psched_add(dt.psched, fd); +                log_dbg("Added fd %d to packet scheduler.", fd);                  break;          case NOTIFY_DT_CONN_DEL: +                fd = c->flow_info.fd;  #ifdef IPCP_FLOW_STATS -                stat_used(c->flow_info.fd, INVALID_ADDR); +                stat_used(fd, INVALID_ADDR);  #endif -                psched_del(dt.psched, c->flow_info.fd); -                log_dbg("Removed fd %d from " -                        "packet scheduler.", c->flow_info.fd); +                psched_del(dt.psched, fd); +                log_dbg("Removed fd %d from packet scheduler.", fd);                  break;          default:                  break; @@ -435,7 +437,7 @@ static void packet_handler(int                  fd,          uint8_t *     head;          size_t        len; -        len = shm_du_buff_tail(sdb) - shm_du_buff_head(sdb); +        len = shm_du_buff_len(sdb);  #ifndef IPCP_FLOW_STATS          (void)        fd; @@ -452,7 +454,7 @@ static void packet_handler(int                  fd,          head = shm_du_buff_head(sdb);          dt_pci_des(head, &dt_pci); -        if (dt_pci.dst_addr != ipcpi.dt_addr) { +        if (dt_pci.dst_addr != dt.addr) {                  if (dt_pci.ttl == 0) {                          log_dbg("TTL was zero.");                          ipcp_sdb_release(sdb); @@ -563,33 +565,36 @@ static void * dt_conn_handle(void * o)          return 0;  } -int dt_init(enum pol_routing pr, -            uint8_t          addr_size, -            uint8_t          eid_size, -            uint8_t          max_ttl) +int dt_init(struct dt_config cfg)  {          int              i;          int              j;          char             dtstr[RIB_NAME_STRLEN + 1]; -        int              pp; +        enum pol_pff     pp;          struct conn_info info;          memset(&info, 0, sizeof(info)); +        dt.addr = addr_auth_address(); +        if (dt.addr == INVALID_ADDR) { +                log_err("Failed to get address"); +                return -1; +        } +          strcpy(info.comp_name, DT_COMP);          strcpy(info.protocol, DT_PROTO);          info.pref_version = 1;          info.pref_syntax  = PROTO_FIXED; -        info.addr         = ipcpi.dt_addr; +        info.addr         = dt.addr; -        if (eid_size != 8) { /* only support 64 bits from now */ +        if (cfg.eid_size != 8) { /* only support 64 bits from now */                  log_warn("Invalid EID size. Only 64 bit is supported."); -                eid_size = 8; +                cfg.eid_size = 8;          } -        dt_pci_info.addr_size = addr_size; -        dt_pci_info.eid_size  = eid_size; -        dt_pci_info.max_ttl   = max_ttl; +        dt_pci_info.addr_size = cfg.addr_size; +        dt_pci_info.eid_size  = cfg.eid_size; +        dt_pci_info.max_ttl   = cfg.max_ttl;          dt_pci_info.qc_o      = dt_pci_info.addr_size;          dt_pci_info.ttl_o     = dt_pci_info.qc_o + QOS_LEN; @@ -597,18 +602,12 @@ int dt_init(enum pol_routing pr,          dt_pci_info.eid_o     = dt_pci_info.ecn_o + ECN_LEN;          dt_pci_info.head_size = dt_pci_info.eid_o + dt_pci_info.eid_size; -        if (notifier_reg(handle_event, NULL)) { -                log_err("Failed to register with notifier."); -                goto fail_notifier_reg; -        } -          if (connmgr_comp_init(COMPID_DT, &info)) {                  log_err("Failed to register with connmgr.");                  goto fail_connmgr_comp_init;          } -        pp = routing_init(pr); -        if (pp < 0) { +        if (routing_init(&cfg.routing, &pp) < 0) {                  log_err("Failed to init routing.");                  goto fail_routing;          } @@ -645,6 +644,7 @@ int dt_init(enum pol_routing pr,          for (i = 0; i < PROG_MAX_FLOWS; ++i)                  if (pthread_mutex_init(&dt.stat[i].lock, NULL)) { +                        log_err("Failed to init mutex for flow %d.", i);                          for (j = 0; j < i; ++j)                                  pthread_mutex_destroy(&dt.stat[j].lock);                          goto fail_stat_lock; @@ -652,9 +652,11 @@ int dt_init(enum pol_routing pr,          dt.n_flows = 0;  #endif -        sprintf(dtstr, "%s.%" PRIu64, DT, ipcpi.dt_addr); -        if (rib_reg(dtstr, &r_ops)) +        sprintf(dtstr, "%s." ADDR_FMT32, DT, ADDR_VAL32(&dt.addr)); +        if (rib_reg(dtstr, &r_ops)) { +                log_err("Failed to register RIB.");                  goto fail_rib_reg; +        }          return 0; @@ -678,8 +680,6 @@ int dt_init(enum pol_routing pr,   fail_routing:          connmgr_comp_fini(COMPID_DT);   fail_connmgr_comp_init: -        notifier_unreg(&handle_event); - fail_notifier_reg:          return -1;  } @@ -688,7 +688,7 @@ void dt_fini(void)          char dtstr[RIB_NAME_STRLEN + 1];          int i; -        sprintf(dtstr, "%s.%" PRIu64, DT, ipcpi.dt_addr); +        sprintf(dtstr, "%s.%" PRIu64, DT, dt.addr);          rib_unreg(dtstr);  #ifdef IPCP_FLOW_STATS          for (i = 0; i < PROG_MAX_FLOWS; ++i) @@ -707,31 +707,53 @@ void dt_fini(void)          routing_fini();          connmgr_comp_fini(COMPID_DT); - -        notifier_unreg(&handle_event);  }  int dt_start(void)  { -        dt.psched = psched_create(packet_handler); +        dt.psched = psched_create(packet_handler, ipcp_flow_read);          if (dt.psched == NULL) {                  log_err("Failed to create N-1 packet scheduler."); -                return -1; +                goto fail_psched; +        } + +        if (notifier_reg(handle_event, NULL)) { +                log_err("Failed to register with notifier."); +                goto fail_notifier_reg;          }          if (pthread_create(&dt.listener, NULL, dt_conn_handle, NULL)) {                  log_err("Failed to create listener thread."); -                psched_destroy(dt.psched); -                return -1; +                goto fail_listener; +        } + +        if (routing_start() < 0) { +                log_err("Failed to start routing."); +                goto fail_routing;          }          return 0; + + fail_routing: +        pthread_cancel(dt.listener); +        pthread_join(dt.listener, NULL); + fail_listener: +        notifier_unreg(&handle_event); + fail_notifier_reg: +        psched_destroy(dt.psched); + fail_psched: +        return -1;  }  void dt_stop(void)  { +        routing_stop(); +          pthread_cancel(dt.listener);          pthread_join(dt.listener, NULL); + +        notifier_unreg(&handle_event); +          psched_destroy(dt.psched);  } @@ -741,13 +763,13 @@ int dt_reg_comp(void * comp,  {          int eid; -        assert(func); +        assert(func != NULL);          pthread_rwlock_wrlock(&dt.lock);          eid = bmp_allocate(dt.res_fds);          if (!bmp_is_id_valid(dt.res_fds, eid)) { -                log_warn("Reserved EIDs depleted."); +                log_err("Cannot allocate EID.");                  pthread_rwlock_unlock(&dt.lock);                  return -EBADF;          } @@ -762,11 +784,28 @@ int dt_reg_comp(void * comp,          pthread_rwlock_unlock(&dt.lock);  #ifdef IPCP_FLOW_STATS -        stat_used(eid, ipcpi.dt_addr); +        stat_used(eid, dt.addr);  #endif          return eid;  } +void dt_unreg_comp(int eid) +{ +        assert(eid >= 0 && eid < PROG_RES_FDS); + +        pthread_rwlock_wrlock(&dt.lock); + +        assert(dt.comps[eid].post_packet != NULL); + +        dt.comps[eid].post_packet = NULL; +        dt.comps[eid].comp        = NULL; +        dt.comps[eid].name        = NULL; + +        pthread_rwlock_unlock(&dt.lock); + +        return; +} +  int dt_write_packet(uint64_t             dst_addr,                      qoscube_t            qc,                      uint64_t             eid, @@ -779,9 +818,9 @@ int dt_write_packet(uint64_t             dst_addr,          size_t        len;          assert(sdb); -        assert(dst_addr != ipcpi.dt_addr); +        assert(dst_addr != dt.addr); -        len = shm_du_buff_tail(sdb) - shm_du_buff_head(sdb); +        len = shm_du_buff_len(sdb);  #ifdef IPCP_FLOW_STATS          if (eid < PROG_RES_FDS) { @@ -795,7 +834,8 @@ int dt_write_packet(uint64_t             dst_addr,  #endif          fd = pff_nhop(dt.pff[qc], dst_addr);          if (fd < 0) { -                log_dbg("Could not get nhop for addr %" PRIu64 ".", dst_addr); +                log_dbg("Could not get nhop for " ADDR_FMT32 ".", +                        ADDR_VAL32(&dst_addr));  #ifdef IPCP_FLOW_STATS                  if (eid < PROG_RES_FDS) {                          pthread_mutex_lock(&dt.stat[eid].lock); @@ -815,7 +855,7 @@ int dt_write_packet(uint64_t             dst_addr,                  goto fail_write;          } -        len = shm_du_buff_tail(sdb) - shm_du_buff_head(sdb); +        len = shm_du_buff_len(sdb);          dt_pci.dst_addr = dst_addr;          dt_pci.qc       = qc; diff --git a/src/ipcpd/unicast/dt.h b/src/ipcpd/unicast/dt.h index e1abbe28..2c5b7978 100644 --- a/src/ipcpd/unicast/dt.h +++ b/src/ipcpd/unicast/dt.h @@ -1,5 +1,5 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   *   * Data Transfer component   * @@ -31,11 +31,7 @@  #define DT_PROTO     "dtp"  #define INVALID_ADDR 0 -int  dt_init(enum pol_routing pr, -             uint8_t          addr_size, -             uint8_t          eid_size, -             uint8_t          max_ttl -); +int  dt_init(struct dt_config cfg);  void dt_fini(void); @@ -43,9 +39,11 @@ int  dt_start(void);  void dt_stop(void); -int  dt_reg_comp(void * comp, +int  dt_reg_comp(void *  comp,                   void (* func)(void * comp, struct shm_du_buff * sdb), -                 char * name); +                 char *  name); + +void dt_unreg_comp(int eid);  int  dt_write_packet(uint64_t             dst_addr,                       qoscube_t            qc, diff --git a/src/ipcpd/unicast/enroll.c b/src/ipcpd/unicast/enroll.c deleted file mode 100644 index 500a3895..00000000 --- a/src/ipcpd/unicast/enroll.c +++ /dev/null @@ -1,3 +0,0 @@ -#define BUILD_IPCP_UNICAST - -#include "common/enroll.c" diff --git a/src/ipcpd/unicast/fa.c b/src/ipcpd/unicast/fa.c index 6e6d52f0..ac168bd9 100644 --- a/src/ipcpd/unicast/fa.c +++ b/src/ipcpd/unicast/fa.c @@ -1,5 +1,5 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   *   * Flow allocator of the IPC Process   * @@ -31,6 +31,7 @@  #define FA               "flow-allocator"  #define OUROBOROS_PREFIX FA +#include <ouroboros/endian.h>  #include <ouroboros/logs.h>  #include <ouroboros/fqueue.h>  #include <ouroboros/errno.h> @@ -40,6 +41,7 @@  #include <ouroboros/random.h>  #include <ouroboros/pthread.h> +#include "addr-auth.h"  #include "dir.h"  #include "fa.h"  #include "psched.h" @@ -55,7 +57,7 @@  #define CLOCK_REALTIME_COARSE CLOCK_REALTIME  #endif -#define TIMEOUT 10000 /* nanoseconds */ +#define TIMEOUT 10 * MILLION /* nanoseconds */  #define FLOW_REQ    0  #define FLOW_REPLY  1 @@ -68,18 +70,17 @@ struct fa_msg {          uint64_t s_addr;          uint64_t r_eid;          uint64_t s_eid; -        uint8_t  code; -        int8_t   response; -        uint16_t ece; -        /* QoS parameters from spec, aligned */ -        uint8_t  availability; -        uint8_t  in_order; -        uint32_t delay;          uint64_t bandwidth; +        int32_t  response; +        uint32_t delay;          uint32_t loss;          uint32_t ber;          uint32_t max_gap; -        uint16_t cypher_s; +        uint32_t timeout; +        uint16_t ece; +        uint8_t  code; +        uint8_t  availability; +        uint8_t  in_order;  } __attribute__((packed));  struct cmd { @@ -133,7 +134,7 @@ static int fa_rib_read(const char * path,          char             r_addrstr[21];          char             s_eidstr[21];          char             r_eidstr[21]; -        char             tmstr[20]; +        char             tmstr[RIB_TM_STRLEN];          char             castr[1024];          char *           entry;          struct tm *      tm; @@ -143,7 +144,7 @@ static int fa_rib_read(const char * path,          fd = atoi(entry); -        if (fd < 0 || fd > PROG_MAX_FLOWS) +        if (fd < 0 || fd >= PROG_MAX_FLOWS)                  return -1;          if (len < 1536) @@ -164,8 +165,8 @@ static int fa_rib_read(const char * path,          sprintf(s_eidstr, "%" PRIu64, flow->s_eid);          sprintf(r_eidstr, "%" PRIu64, flow->r_eid); -        tm = localtime(&flow->stamp); -        strftime(tmstr, sizeof(tmstr), "%F %T", tm); +        tm = gmtime(&flow->stamp); +        strftime(tmstr, sizeof(tmstr), RIB_TM_FORMAT, tm);          ca_print_stats(flow->ctx, castr, 1024); @@ -215,15 +216,13 @@ static int fa_rib_readdir(char *** buf)          pthread_rwlock_rdlock(&fa.flows_lock);          if (fa.n_flows < 1) { -                pthread_rwlock_unlock(&fa.flows_lock); -                return 0; +                *buf = NULL; +                goto no_flows;          }          *buf = malloc(sizeof(**buf) * fa.n_flows); -        if (*buf == NULL) { -                pthread_rwlock_unlock(&fa.flows_lock); -                return -ENOMEM; -        } +        if (*buf == NULL) +                goto fail_entries;          for (i = 0; i < PROG_MAX_FLOWS; ++i) {                  struct fa_flow * flow; @@ -235,22 +234,25 @@ static int fa_rib_readdir(char *** buf)                  sprintf(entry, "%zu", i);                  (*buf)[idx] = malloc(strlen(entry) + 1); -                if ((*buf)[idx] == NULL) { -                        while (idx-- > 0) -                                free((*buf)[idx]); -                        free(buf); -                        pthread_rwlock_unlock(&fa.flows_lock); -                        return -ENOMEM; -                } +                if ((*buf)[idx] == NULL) +                        goto fail_entry;                  strcpy((*buf)[idx++], entry);          }          assert((size_t) idx == fa.n_flows); - + no_flows:          pthread_rwlock_unlock(&fa.flows_lock);          return idx; + + fail_entry: +        while (idx-- > 0) +                free((*buf)[idx]); +        free(*buf); + fail_entries: +        pthread_rwlock_unlock(&fa.flows_lock); +        return -ENOMEM;  #else          (void) buf;          return 0; @@ -303,7 +305,7 @@ static int eid_to_fd(uint64_t eid)          fd = eid & 0xFFFFFFFF; -        if (fd < 0 || fd > PROG_MAX_FLOWS) +        if (fd < 0 || fd >= PROG_MAX_FLOWS)                  return -1;          flow = &fa.flows[fd]; @@ -340,7 +342,7 @@ static void packet_handler(int                  fd,          pthread_rwlock_wrlock(&fa.flows_lock); -        len = shm_du_buff_tail(sdb) - shm_du_buff_head(sdb); +        len = shm_du_buff_len(sdb);  #ifdef IPCP_FLOW_STATS          ++flow->p_snd; @@ -357,7 +359,7 @@ static void packet_handler(int                  fd,          if (dt_write_packet(r_addr, qc, r_eid, sdb)) {                  ipcp_sdb_release(sdb); -                log_warn("Failed to forward packet."); +                log_dbg("Failed to forward packet.");  #ifdef IPCP_FLOW_STATS                  pthread_rwlock_wrlock(&fa.flows_lock);                  ++flow->p_snd_f; @@ -435,167 +437,194 @@ static void fa_post_packet(void *               comp,          pthread_mutex_unlock(&fa.mtx);  } -static void * fa_handle_packet(void * o) +static size_t fa_wait_for_fa_msg(struct fa_msg * msg)  { -        struct timespec ts  = {0, TIMEOUT * 1000}; +        struct cmd * cmd; +        size_t       len; -        (void) o; +        pthread_mutex_lock(&fa.mtx); -        while (true) { -                struct timespec  abstime; -                int              fd; -                uint8_t          buf[MSGBUFSZ]; -                struct fa_msg *  msg; -                qosspec_t        qs; -                struct cmd *     cmd; -                size_t           len; -                size_t           msg_len; -                struct fa_flow * flow; +        pthread_cleanup_push(__cleanup_mutex_unlock, &fa.mtx); -                pthread_mutex_lock(&fa.mtx); +        while (list_is_empty(&fa.cmds)) +                pthread_cond_wait(&fa.cond, &fa.mtx); -                pthread_cleanup_push(__cleanup_mutex_unlock, &fa.mtx); +        cmd = list_last_entry(&fa.cmds, struct cmd, next); +        list_del(&cmd->next); -                while (list_is_empty(&fa.cmds)) -                        pthread_cond_wait(&fa.cond, &fa.mtx); +        pthread_cleanup_pop(true); -                cmd = list_last_entry(&fa.cmds, struct cmd, next); -                list_del(&cmd->next); +        len = shm_du_buff_len(cmd->sdb); +        if (len > MSGBUFSZ || len < sizeof(*msg)) { +                log_warn("Invalid flow allocation message (len: %zd).", len); +                free(cmd); +                return 0; /* No valid message */ +        } -                pthread_cleanup_pop(true); +        memcpy(msg, shm_du_buff_head(cmd->sdb), len); -                len = shm_du_buff_tail(cmd->sdb) - shm_du_buff_head(cmd->sdb); +        ipcp_sdb_release(cmd->sdb); -                if (len > MSGBUFSZ) { -                        log_err("Message over buffer size."); -                        free(cmd); -                        continue; -                } +        free(cmd); -                msg = (struct fa_msg *) buf; +        return len; +} -                /* Depending on the message call the function in ipcp-dev.h */ +static int fa_handle_flow_req(struct fa_msg * msg, +                              size_t          len) +{ +        size_t           msg_len; +        int              fd; +        qosspec_t        qs; +        struct fa_flow * flow; +        uint8_t *        dst; +        buffer_t         data;  /* Piggbacked data on flow alloc request. */ -                memcpy(msg, shm_du_buff_head(cmd->sdb), len); +        msg_len = sizeof(*msg) + ipcp_dir_hash_len(); +        if (len < msg_len) { +                log_err("Invalid flow allocation request"); +                return -EPERM; +        } -                ipcp_sdb_release(cmd->sdb); +        dst       = (uint8_t *)(msg + 1); +        data.data = (uint8_t *) msg + msg_len; +        data.len  = len - msg_len; + +        qs.delay        = ntoh32(msg->delay); +        qs.bandwidth    = ntoh64(msg->bandwidth); +        qs.availability = msg->availability; +        qs.loss         = ntoh32(msg->loss); +        qs.ber          = ntoh32(msg->ber); +        qs.in_order     = msg->in_order; +        qs.max_gap      = ntoh32(msg->max_gap); +        qs.timeout      = ntoh32(msg->timeout); + +        fd = ipcp_wait_flow_req_arr(dst, qs, IPCP_UNICAST_MPL, &data); +        if (fd < 0) +                return fd; -                free(cmd); +        flow = &fa.flows[fd]; -                switch (msg->code) { -                case FLOW_REQ: -                        msg_len = sizeof(*msg) + ipcp_dir_hash_len(); +        pthread_rwlock_wrlock(&fa.flows_lock); -                        assert(len >= msg_len); +        fa_flow_init(flow); -                        clock_gettime(PTHREAD_COND_CLOCK, &abstime); +        flow->s_eid  = gen_eid(fd); +        flow->r_eid  = ntoh64(msg->s_eid); +        flow->r_addr = ntoh64(msg->s_addr); -                        pthread_mutex_lock(&ipcpi.alloc_lock); +        pthread_rwlock_unlock(&fa.flows_lock); -                        while (ipcpi.alloc_id != -1 && -                               ipcp_get_state() == IPCP_OPERATIONAL) { -                                ts_add(&abstime, &ts, &abstime); -                                pthread_cond_timedwait(&ipcpi.alloc_cond, -                                                       &ipcpi.alloc_lock, -                                                       &abstime); -                        } +        return fd; +} -                        if (ipcp_get_state() != IPCP_OPERATIONAL) { -                                pthread_mutex_unlock(&ipcpi.alloc_lock); -                                log_dbg("Won't allocate over non-operational" -                                        "IPCP."); -                                continue; -                        } +static int fa_handle_flow_reply(struct fa_msg * msg, +                                size_t          len) +{ +        int              fd; +        struct fa_flow * flow; +        buffer_t         data;  /* Piggbacked data on flow alloc request. */ +        time_t           mpl = IPCP_UNICAST_MPL; +        int              response; -                        assert(ipcpi.alloc_id == -1); +        assert(len >= sizeof(*msg)); -                        qs.delay        = ntoh32(msg->delay); -                        qs.bandwidth    = ntoh64(msg->bandwidth); -                        qs.availability = msg->availability; -                        qs.loss         = ntoh32(msg->loss); -                        qs.ber          = ntoh32(msg->ber); -                        qs.in_order     = msg->in_order; -                        qs.max_gap      = ntoh32(msg->max_gap); -                        qs.cypher_s     = ntoh16(msg->cypher_s); +        data.data = (uint8_t *) msg + sizeof(*msg); +        data.len  = len - sizeof(*msg); -                        fd = ipcp_flow_req_arr((uint8_t *) (msg + 1), -                                               ipcp_dir_hash_len(), -                                               qs, -                                               buf + msg_len, -                                               len - msg_len); -                        if (fd < 0) { -                                pthread_mutex_unlock(&ipcpi.alloc_lock); -                                log_err("Failed to get fd for flow."); -                                continue; -                        } +        pthread_rwlock_wrlock(&fa.flows_lock); -                        flow = &fa.flows[fd]; +        fd = eid_to_fd(ntoh64(msg->r_eid)); +        if (fd < 0) { +                pthread_rwlock_unlock(&fa.flows_lock); +                log_err("Flow reply for unknown EID %" PRIu64 ".", +                        ntoh64(msg->r_eid)); +                return -ENOTALLOC; +        } -                        pthread_rwlock_wrlock(&fa.flows_lock); +        flow = &fa.flows[fd]; -                        fa_flow_init(flow); +        flow->r_eid = ntoh64(msg->s_eid); +        response = ntoh32(msg->response); -                        flow->s_eid  = gen_eid(fd); -                        flow->r_eid  = ntoh64(msg->s_eid); -                        flow->r_addr = ntoh64(msg->s_addr); +        log_dbg("IPCP received msg response %d for flow on fd %d.", +                response, fd); -                        pthread_rwlock_unlock(&fa.flows_lock); +        if (response < 0) +                fa_flow_fini(flow); +        else +                psched_add(fa.psched, fd); -                        ipcpi.alloc_id = fd; -                        pthread_cond_broadcast(&ipcpi.alloc_cond); +        pthread_rwlock_unlock(&fa.flows_lock); -                        pthread_mutex_unlock(&ipcpi.alloc_lock); +        if (ipcp_flow_alloc_reply(fd, response, mpl, &data) < 0) { +                log_err("Failed to reply for flow allocation on fd %d.", fd); +                return -EIRMD; +        } -                        break; -                case FLOW_REPLY: -                        assert(len >= sizeof(*msg)); +        return 0; +} -                        pthread_rwlock_wrlock(&fa.flows_lock); +static int fa_handle_flow_update(struct fa_msg * msg, +                                 size_t          len) +{ +        struct fa_flow * flow; +        int              fd; -                        fd = eid_to_fd(ntoh64(msg->r_eid)); -                        if (fd < 0) { -                                pthread_rwlock_unlock(&fa.flows_lock); -                                break; -                        } +        (void) len; +        assert(len >= sizeof(*msg)); -                        flow = &fa.flows[fd]; +        pthread_rwlock_wrlock(&fa.flows_lock); -                        flow->r_eid = ntoh64(msg->s_eid); +        fd = eid_to_fd(ntoh64(msg->r_eid)); +        if (fd < 0) { +                pthread_rwlock_unlock(&fa.flows_lock); +                log_err("Flow update for unknown EID %" PRIu64 ".", +                        ntoh64(msg->r_eid)); +                return -EPERM; +        } -                        if (msg->response < 0) -                                fa_flow_fini(flow); -                        else -                                psched_add(fa.psched, fd); +        flow = &fa.flows[fd]; +#ifdef IPCP_FLOW_STATS +        flow->u_rcv++; +#endif +        ca_ctx_update_ece(flow->ctx, ntoh16(msg->ece)); -                        pthread_rwlock_unlock(&fa.flows_lock); +        pthread_rwlock_unlock(&fa.flows_lock); -                        ipcp_flow_alloc_reply(fd, -                                              msg->response, -                                              buf + sizeof(*msg), -                                              len - sizeof(*msg)); -                        break; -                case FLOW_UPDATE: -                        assert(len >= sizeof(*msg)); +        return 0; +} -                        pthread_rwlock_wrlock(&fa.flows_lock); +static void * fa_handle_packet(void * o) +{ +        (void) o; -                        fd = eid_to_fd(ntoh64(msg->r_eid)); -                        if (fd < 0) { -                                pthread_rwlock_unlock(&fa.flows_lock); -                                break; -                        } +        while (true) { +                uint8_t          buf[MSGBUFSZ]; +                struct fa_msg *  msg; +                size_t           len; -                        flow = &fa.flows[fd]; -#ifdef IPCP_FLOW_STATS -                        flow->u_rcv++; -#endif -                        ca_ctx_update_ece(flow->ctx, ntoh16(msg->ece)); +                msg = (struct fa_msg *) buf; -                        pthread_rwlock_unlock(&fa.flows_lock); +                len = fa_wait_for_fa_msg(msg); +                if (len == 0) +                        continue; +                switch (msg->code) { +                case FLOW_REQ: +                        if (fa_handle_flow_req(msg, len) < 0) +                                log_err("Error handling flow alloc request."); +                        break; +                case FLOW_REPLY: +                        if (fa_handle_flow_reply(msg, len) < 0) +                                log_err("Error handling flow reply."); +                        break; +                case FLOW_UPDATE: +                        if (fa_handle_flow_update(msg, len) < 0) +                                log_err("Error handling flow update.");                          break;                  default: -                        log_err("Got an unknown flow allocation message."); +                        log_warn("Recieved unknown flow allocation message.");                          break;                  }          } @@ -622,19 +651,21 @@ int fa_init(void)          if (pthread_cond_init(&fa.cond, &cattr))                  goto fail_cond; -        pthread_condattr_destroy(&cattr); - -        list_head_init(&fa.cmds); -          if (rib_reg(FA, &r_ops))                  goto fail_rib_reg;          fa.eid = dt_reg_comp(&fa, &fa_post_packet, FA);          if ((int) fa.eid < 0) -                goto fail_rib_reg; +                goto fail_dt_reg; + +        list_head_init(&fa.cmds); + +        pthread_condattr_destroy(&cattr);          return 0; + fail_dt_reg: +        rib_unreg(FA);   fail_rib_reg:          pthread_cond_destroy(&fa.cond);   fail_cond: @@ -644,7 +675,6 @@ int fa_init(void)   fail_mtx:          pthread_rwlock_destroy(&fa.flows_lock);   fail_rwlock: -        log_err("Failed to initialize flow allocator.");          return -1;  } @@ -663,7 +693,7 @@ int fa_start(void)          int                 pol;          int                 max; -        fa.psched = psched_create(packet_handler); +        fa.psched = psched_create(packet_handler, np1_flow_read);          if (fa.psched == NULL) {                  log_err("Failed to start packet scheduler.");                  goto fail_psched; @@ -700,7 +730,6 @@ int fa_start(void)   fail_thread:          psched_destroy(fa.psched);   fail_psched: -        log_err("Failed to start flow allocator.");          return -1;  } @@ -712,11 +741,10 @@ void fa_stop(void)          psched_destroy(fa.psched);  } -int fa_alloc(int             fd, -             const uint8_t * dst, -             qosspec_t       qs, -             const void *    data, -             size_t          dlen) +int fa_alloc(int              fd, +             const uint8_t *  dst, +             qosspec_t        qs, +             const buffer_t * data)  {          struct fa_msg *      msg;          struct shm_du_buff * sdb; @@ -732,7 +760,7 @@ int fa_alloc(int             fd,          len = sizeof(*msg) + ipcp_dir_hash_len(); -        if (ipcp_sdb_reserve(&sdb, len + dlen)) +        if (ipcp_sdb_reserve(&sdb, len + data->len))                  return -1;          msg = (struct fa_msg *) shm_du_buff_head(sdb); @@ -742,7 +770,7 @@ int fa_alloc(int             fd,          msg->code         = FLOW_REQ;          msg->s_eid        = hton64(eid); -        msg->s_addr       = hton64(ipcpi.dt_addr); +        msg->s_addr       = hton64(addr_auth_address());          msg->delay        = hton32(qs.delay);          msg->bandwidth    = hton64(qs.bandwidth);          msg->availability = qs.availability; @@ -750,12 +778,14 @@ int fa_alloc(int             fd,          msg->ber          = hton32(qs.ber);          msg->in_order     = qs.in_order;          msg->max_gap      = hton32(qs.max_gap); -        msg->cypher_s     = hton16(qs.cypher_s); +        msg->timeout      = hton32(qs.timeout);          memcpy(msg + 1, dst, ipcp_dir_hash_len()); -        memcpy(shm_du_buff_head(sdb) + len, data, dlen); +        if (data->len > 0) +                memcpy(shm_du_buff_head(sdb) + len, data->data, data->len);          if (dt_write_packet(addr, qc, fa.eid, sdb)) { +                log_err("Failed to send flow allocation request packet.");                  ipcp_sdb_release(sdb);                  return -1;          } @@ -773,75 +803,66 @@ int fa_alloc(int             fd,          return 0;  } -int fa_alloc_resp(int          fd, -                  int          response, -                  const void * data, -                  size_t       len) +int fa_alloc_resp(int              fd, +                  int              response, +                  const buffer_t * data)  { -        struct timespec      ts = {0, TIMEOUT * 1000}; -        struct timespec      abstime;          struct fa_msg *      msg;          struct shm_du_buff * sdb;          struct fa_flow *     flow;          qoscube_t            qc = QOS_CUBE_BE; -        clock_gettime(PTHREAD_COND_CLOCK, &abstime); -          flow = &fa.flows[fd]; -        pthread_mutex_lock(&ipcpi.alloc_lock); - -        while (ipcpi.alloc_id != fd && ipcp_get_state() == IPCP_OPERATIONAL) { -                ts_add(&abstime, &ts, &abstime); -                pthread_cond_timedwait(&ipcpi.alloc_cond, -                                       &ipcpi.alloc_lock, -                                       &abstime); +        if (ipcp_wait_flow_resp(fd) < 0) { +                log_err("Failed to wait for flow response."); +                goto fail_alloc_resp;          } -        if (ipcp_get_state() != IPCP_OPERATIONAL) { -                pthread_mutex_unlock(&ipcpi.alloc_lock); -                return -1; -        } - -        ipcpi.alloc_id = -1; -        pthread_cond_broadcast(&ipcpi.alloc_cond); - -        pthread_mutex_unlock(&ipcpi.alloc_lock); - -        if (ipcp_sdb_reserve(&sdb, sizeof(*msg) + len)) { -                fa_flow_fini(flow); -                return -1; +        if (ipcp_sdb_reserve(&sdb, sizeof(*msg) + data->len)) { +                log_err("Failed to reserve sdb (%zu  bytes).", +                        sizeof(*msg) + data->len); +                goto fail_reserve;          }          msg = (struct fa_msg *) shm_du_buff_head(sdb);          memset(msg, 0, sizeof(*msg)); -        pthread_rwlock_wrlock(&fa.flows_lock); -          msg->code     = FLOW_REPLY; +        msg->response = hton32(response); +        if (data->len > 0) +                memcpy(msg + 1, data->data, data->len); + +        pthread_rwlock_rdlock(&fa.flows_lock); +          msg->r_eid    = hton64(flow->r_eid);          msg->s_eid    = hton64(flow->s_eid); -        msg->response = response; -        memcpy(msg + 1, data, len); +        pthread_rwlock_unlock(&fa.flows_lock); + +        if (dt_write_packet(flow->r_addr, qc, fa.eid, sdb)) { +                log_err("Failed to send flow allocation response packet."); +                goto fail_packet; +        }          if (response < 0) { +                pthread_rwlock_wrlock(&fa.flows_lock);                  fa_flow_fini(flow); -                ipcp_sdb_release(sdb); +                pthread_rwlock_unlock(&fa.flows_lock);          } else {                  psched_add(fa.psched, fd);          } -        if (dt_write_packet(flow->r_addr, qc, fa.eid, sdb)) { -                fa_flow_fini(flow); -                pthread_rwlock_unlock(&fa.flows_lock); -                ipcp_sdb_release(sdb); -                return -1; -        } +        return 0; + fail_packet: +        ipcp_sdb_release(sdb); + fail_reserve: +        pthread_rwlock_wrlock(&fa.flows_lock); +        fa_flow_fini(flow);          pthread_rwlock_unlock(&fa.flows_lock); - -        return 0; + fail_alloc_resp: +        return -1;  }  int fa_dealloc(int fd) @@ -857,7 +878,7 @@ int fa_dealloc(int fd)          pthread_rwlock_unlock(&fa.flows_lock); -        flow_dealloc(fd); +        ipcp_flow_dealloc(fd);          return 0;  } @@ -872,6 +893,7 @@ static int fa_update_remote(int      fd,          uint64_t             r_addr;          if (ipcp_sdb_reserve(&sdb, sizeof(*msg))) { +                log_err("Failed to reserve sdb (%zu  bytes).", sizeof(*msg));                  return -1;          } @@ -895,6 +917,7 @@ static int fa_update_remote(int      fd,          if (dt_write_packet(r_addr, qc, fa.eid, sdb)) { +                log_err("Failed to send flow update packet.");                  ipcp_sdb_release(sdb);                  return -1;          } @@ -912,13 +935,14 @@ void  fa_np1_rcv(uint64_t             eid,          int              fd;          size_t           len; -        len = shm_du_buff_tail(sdb) - shm_du_buff_head(sdb); +        len = shm_du_buff_len(sdb);          pthread_rwlock_wrlock(&fa.flows_lock);          fd = eid_to_fd(eid);          if (fd < 0) {                  pthread_rwlock_unlock(&fa.flows_lock); +                log_dbg("Received packet for unknown EID %" PRIu64 ".", eid);                  ipcp_sdb_release(sdb);                  return;          } @@ -934,6 +958,7 @@ void  fa_np1_rcv(uint64_t             eid,          pthread_rwlock_unlock(&fa.flows_lock);          if (ipcp_flow_write(fd, sdb) < 0) { +                log_dbg("Failed to write to flow %d.", fd);                  ipcp_sdb_release(sdb);  #ifdef IPCP_FLOW_STATS                  pthread_rwlock_wrlock(&fa.flows_lock); diff --git a/src/ipcpd/unicast/fa.h b/src/ipcpd/unicast/fa.h index 376fdb20..1e716966 100644 --- a/src/ipcpd/unicast/fa.h +++ b/src/ipcpd/unicast/fa.h @@ -1,5 +1,5 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   *   * Flow allocator of the IPC Process   * @@ -34,16 +34,14 @@ int  fa_start(void);  void fa_stop(void); -int  fa_alloc(int             fd, -              const uint8_t * dst, -              qosspec_t       qs, -              const void *    data, -              size_t          len); +int  fa_alloc(int              fd, +              const uint8_t *  dst, +              qosspec_t        qs, +              const buffer_t * data); -int  fa_alloc_resp(int          fd, -                   int          response, -                   const void * data, -                   size_t       len); +int  fa_alloc_resp(int              fd, +                   int              response, +                   const buffer_t * data);  int  fa_dealloc(int fd); diff --git a/src/ipcpd/unicast/kademlia.proto b/src/ipcpd/unicast/kademlia.proto deleted file mode 100644 index 58f5e787..00000000 --- a/src/ipcpd/unicast/kademlia.proto +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Ouroboros - Copyright (C) 2016 - 2021 - * - * KAD protocol - * - *    Dimitri Staessens <dimitri@ouroboros.rocks> - *    Sander Vrijders   <sander@ouroboros.rocks> - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public License - * version 2.1 as published by the Free Software Foundation. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, write to the Free Software - * Foundation, Inc., http://www.fsf.org/about/contact/. - */ - -syntax = "proto2"; - -message kad_contact_msg { -        required bytes  id   = 1; -        required uint64 addr = 2; -}; - -message kad_msg { -        required uint32 code              =  1; -        required uint32 cookie            =  2; -        required uint64 s_addr            =  3; -        optional bytes  s_id              =  4; -        optional bytes  key               =  5; -        repeated uint64 addrs             =  6; -        repeated kad_contact_msg contacts =  7; -        // enrolment parameters -        optional uint32 alpha             =  8; -        optional uint32 b                 =  9; -        optional uint32 k                 = 10; -        optional uint32 t_expire          = 11; -        optional uint32 t_refresh         = 12; -        optional uint32 t_replicate       = 13; -};
\ No newline at end of file diff --git a/src/ipcpd/unicast/main.c b/src/ipcpd/unicast/main.c index 018dd1c6..7989d3e1 100644 --- a/src/ipcpd/unicast/main.c +++ b/src/ipcpd/unicast/main.c @@ -1,5 +1,5 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   *   * Unicast IPC Process   * @@ -32,16 +32,16 @@  #define THIS_TYPE IPCP_UNICAST  #include <ouroboros/errno.h> -#include <ouroboros/hash.h>  #include <ouroboros/ipcp-dev.h>  #include <ouroboros/logs.h>  #include <ouroboros/notifier.h> +#include <ouroboros/random.h>  #include <ouroboros/rib.h> -#include <ouroboros/time_utils.h> +#include <ouroboros/time.h>  #include "common/connmgr.h"  #include "common/enroll.h" -#include "addr_auth.h" +#include "addr-auth.h"  #include "ca.h"  #include "dir.h"  #include "dt.h" @@ -55,237 +55,203 @@  #include <assert.h>  #include <inttypes.h> -struct ipcp ipcpi; - -static int initialize_components(const struct ipcp_config * conf) +static int initialize_components(struct ipcp_config * conf)  { -        ipcpi.layer_name = strdup(conf->layer_info.layer_name); -        if (ipcpi.layer_name == NULL) { -                log_err("Failed to set layer name."); -                goto fail_layer_name; -        } - -        ipcpi.dir_hash_algo = conf->layer_info.dir_hash_algo; -          assert(ipcp_dir_hash_len() != 0); -        if (addr_auth_init(conf->addr_auth_type, -                           &conf->addr_size)) { +        if (addr_auth_init(conf->unicast.addr_auth_type, +                           &conf->unicast.dt.addr_size)) {                  log_err("Failed to init address authority.");                  goto fail_addr_auth;          } -        ipcpi.dt_addr = addr_auth_address(); -        if (ipcpi.dt_addr == 0) { -                log_err("Failed to get a valid address."); -                goto fail_addr_auth; -        } - -        log_dbg("IPCP got address %" PRIu64 ".", ipcpi.dt_addr); +        log_info("IPCP got address %" PRIu64 ".", addr_auth_address()); -        if (ca_init(conf->cong_avoid)) { +        if (ca_init(conf->unicast.cong_avoid)) {                  log_err("Failed to initialize congestion avoidance.");                  goto fail_ca;          } -        if (dt_init(conf->routing_type, -                    conf->addr_size, -                    conf->eid_size, -                    conf->max_ttl)) { +        if (dt_init(conf->unicast.dt)) {                  log_err("Failed to initialize data transfer component.");                  goto fail_dt;          } -        if (fa_init()) { -                log_err("Failed to initialize flow allocator component."); -                goto fail_fa; -        } +        ipcp_set_dir_hash_algo((enum hash_algo) conf->layer_info.dir_hash_algo); -        if (dir_init()) { +        if (dir_init(&conf->unicast.dir)) {                  log_err("Failed to initialize directory.");                  goto fail_dir;          } +        if (fa_init()) { +                log_err("Failed to initialize flow allocator component."); +                goto fail_fa; +        } +          ipcp_set_state(IPCP_INIT);          return 0; - fail_dir: -        fa_fini();   fail_fa: +        dir_fini(); + fail_dir:          dt_fini();   fail_dt:          ca_fini();   fail_ca:          addr_auth_fini();   fail_addr_auth: -        free(ipcpi.layer_name); - fail_layer_name:          return -1;  }  static void finalize_components(void)  { -        dir_fini(); -          fa_fini(); +        dir_fini(); +          dt_fini();          ca_fini();          addr_auth_fini(); - -        free(ipcpi.layer_name);  }  static int start_components(void)  { -        assert(ipcp_get_state() == IPCP_INIT); +        if (connmgr_start() < 0) { +                log_err("Failed to start AP connection manager."); +                goto fail_connmgr_start; +        } -        ipcp_set_state(IPCP_OPERATIONAL); +        if (dt_start() < 0) { +                log_err("Failed to start data transfer."); +                goto fail_dt_start; +        } -        if (fa_start()) { +        if (fa_start() < 0) {                  log_err("Failed to start flow allocator.");                  goto fail_fa_start;          } -        if (enroll_start()) { +        if (enroll_start() < 0) {                  log_err("Failed to start enrollment.");                  goto fail_enroll_start;          } -        if (connmgr_start()) { -                log_err("Failed to start AP connection manager."); -                goto fail_connmgr_start; +        if (dir_start() < 0) { +                log_err("Failed to start directory."); +                goto fail_dir_start;          }          return 0; - fail_connmgr_start: + fail_dir_start:          enroll_stop();   fail_enroll_start:          fa_stop();   fail_fa_start: +        dt_stop(); + fail_dt_start: +        connmgr_stop(); + fail_connmgr_start:          ipcp_set_state(IPCP_INIT);          return -1;  }  static void stop_components(void)  { -        assert(ipcp_get_state() == IPCP_OPERATIONAL || -               ipcp_get_state() == IPCP_SHUTDOWN); - -        connmgr_stop(); +        dir_stop();          enroll_stop();          fa_stop(); -        ipcp_set_state(IPCP_INIT); -} +        dt_stop(); -static int bootstrap_components(void) -{ -        if (dir_bootstrap()) { -                log_err("Failed to bootstrap directory."); -                dt_stop(); -                return -1; -        } +        connmgr_stop(); -        return 0; +        ipcp_set_state(IPCP_BOOT);  }  static int unicast_ipcp_enroll(const char *        dst,                                 struct layer_info * info)  { -        struct conn conn; +        struct ipcp_config * conf; +        struct conn          conn; +        uint8_t              id[ENROLL_ID_LEN]; -        if (connmgr_alloc(COMPID_ENROLL, dst, NULL, &conn)) { -                log_err("Failed to get connection."); -                goto fail_er_flow; +        if (random_buffer(id, ENROLL_ID_LEN) < 0) { +                log_err("Failed to generate enrollment ID."); +                goto fail_id;          } -        /* Get boot state from peer. */ -        if (enroll_boot(&conn)) { -                log_err("Failed to get boot information."); -                goto fail_enroll_boot; +        log_info_id(id, "Requesting enrollment."); + +        if (connmgr_alloc(COMPID_ENROLL, dst, NULL, &conn) < 0) { +                log_err_id(id, "Failed to get connection."); +                goto fail_id;          } -        if (initialize_components(enroll_get_conf())) { -                log_err("Failed to initialize IPCP components."); +        /* Get boot state from peer. */ +        if (enroll_boot(&conn, id) < 0) { +                log_err_id(id, "Failed to get boot information.");                  goto fail_enroll_boot;          } -        if (dt_start()) { -                log_err("Failed to initialize IPCP components."); -                goto fail_dt_start; +        conf = enroll_get_conf(); + +        *info = conf->layer_info; + +        if (initialize_components(conf) < 0) { +                log_err_id(id, "Failed to initialize components."); +                goto fail_enroll_boot;          } -        if (start_components()) { -                log_err("Failed to start components."); +        if (start_components() < 0) { +                log_err_id(id, "Failed to start components.");                  goto fail_start_comp;          } -        if (enroll_done(&conn, 0)) -                log_warn("Failed to confirm enrollment with peer."); +        if (enroll_ack(&conn, id, 0) < 0) +                log_err_id(id, "Failed to confirm enrollment."); -        if (connmgr_dealloc(COMPID_ENROLL, &conn)) -                log_warn("Failed to deallocate enrollment flow."); +        if (connmgr_dealloc(COMPID_ENROLL, &conn) < 0) +                log_warn_id(id, "Failed to dealloc enrollment flow."); -        log_info("Enrolled with %s.", dst); - -        info->dir_hash_algo = ipcpi.dir_hash_algo; -        strcpy(info->layer_name, ipcpi.layer_name); +        log_info_id(id, "Enrolled with %s.", dst);          return 0;   fail_start_comp: -        dt_stop(); - fail_dt_start:          finalize_components();   fail_enroll_boot:          connmgr_dealloc(COMPID_ENROLL, &conn); - fail_er_flow: + fail_id:          return -1;  } -static int unicast_ipcp_bootstrap(const struct ipcp_config * conf) +static int unicast_ipcp_bootstrap(struct ipcp_config * conf)  {          assert(conf);          assert(conf->type == THIS_TYPE); -        enroll_bootstrap(conf); - -        if (initialize_components(conf)) { +        if (initialize_components(conf) < 0) {                  log_err("Failed to init IPCP components.");                  goto fail_init;          } -        if (dt_start()) { -                log_err("Failed to initialize IPCP components."); -                goto fail_dt_start; -        }; +        enroll_bootstrap(conf); -        if (start_components()) { +        if (start_components() < 0) {                  log_err("Failed to init IPCP components.");                  goto fail_start;          } -        if (bootstrap_components()) { -                log_err("Failed to bootstrap IPCP components."); -                goto fail_bootstrap; -        } - -        log_dbg("Bootstrapped in layer %s.", conf->layer_info.layer_name); -          return 0; - fail_bootstrap: -        stop_components();   fail_start: -        dt_stop(); - fail_dt_start:          finalize_components();   fail_init:          return -1; @@ -318,38 +284,34 @@ int main(int    argc,                  goto fail_init;          } -        if (notifier_init()) { +        if (notifier_init() < 0) {                  log_err("Failed to initialize notifier component.");                  goto fail_notifier_init;          } -        if (connmgr_init()) { +        if (connmgr_init() < 0) {                  log_err("Failed to initialize connection manager.");                  goto fail_connmgr_init;          } -        if (enroll_init()) { +        if (enroll_init() < 0) {                  log_err("Failed to initialize enrollment component.");                  goto fail_enroll_init;          } -        if (ipcp_boot() < 0) { -                log_err("Failed to boot IPCP."); -                goto fail_boot; -        } - -        if (ipcp_create_r(0)) { -                log_err("Failed to notify IRMd we are initialized."); -                ipcp_set_state(IPCP_NULL); -                goto fail_create_r; +        if (ipcp_start() < 0) { +                log_err("Failed to start IPCP."); +                goto fail_start;          } -        ipcp_shutdown(); +        ipcp_sigwait();          if (ipcp_get_state() == IPCP_SHUTDOWN) { -                dt_stop();                  stop_components(); +                ipcp_stop();                  finalize_components(); +        } else { +                ipcp_stop();          }          enroll_fini(); @@ -362,17 +324,14 @@ int main(int    argc,          exit(EXIT_SUCCESS); - fail_create_r: -        ipcp_shutdown(); - fail_boot: + fail_start:          enroll_fini();   fail_enroll_init:          connmgr_fini();   fail_connmgr_init:          notifier_fini();   fail_notifier_init: -       ipcp_fini(); +        ipcp_fini();   fail_init: -        ipcp_create_r(-1);          exit(EXIT_FAILURE);  } diff --git a/src/ipcpd/unicast/pff.c b/src/ipcpd/unicast/pff.c index 03682184..9b2aa2b4 100644 --- a/src/ipcpd/unicast/pff.c +++ b/src/ipcpd/unicast/pff.c @@ -1,5 +1,5 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   *   * PDU Forwarding Function   * @@ -26,14 +26,11 @@  #include <ouroboros/logs.h>  #include "pff.h" -#include "pol-pff-ops.h" -#include "pol/alternate_pff.h" -#include "pol/multipath_pff.h" -#include "pol/simple_pff.h" +#include "pff/pol.h"  struct pff { -        struct pol_pff_ops * ops; -        struct pff_i *       pff_i; +        struct pff_ops * ops; +        struct pff_i *   pff_i;  };  struct pff * pff_create(enum pol_pff pol) @@ -62,8 +59,10 @@ struct pff * pff_create(enum pol_pff pol)          }          pff->pff_i = pff->ops->create(); -        if (pff->pff_i == NULL) +        if (pff->pff_i == NULL) { +                log_err("Failed to create PFF instance.");                  goto err; +        }          return pff;   err: diff --git a/src/ipcpd/unicast/pff.h b/src/ipcpd/unicast/pff.h index 3ac9d5fb..f44e5531 100644 --- a/src/ipcpd/unicast/pff.h +++ b/src/ipcpd/unicast/pff.h @@ -1,5 +1,5 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   *   * PDU Forwarding Function   * diff --git a/src/ipcpd/unicast/pol/alternate_pff.c b/src/ipcpd/unicast/pff/alternate.c index 18d3dfed..85e85914 100644 --- a/src/ipcpd/unicast/pol/alternate_pff.c +++ b/src/ipcpd/unicast/pff/alternate.c @@ -1,5 +1,5 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   *   * Policy for PFF with alternate next hops   * @@ -28,7 +28,7 @@  #include <ouroboros/list.h>  #include "pft.h" -#include "alternate_pff.h" +#include "alternate.h"  #include <string.h>  #include <assert.h> @@ -54,7 +54,7 @@ struct pff_i {          pthread_rwlock_t lock;  }; -struct pol_pff_ops alternate_pff_ops = { +struct pff_ops alternate_pff_ops = {          .create            = alternate_pff_create,          .destroy           = alternate_pff_destroy,          .lock              = alternate_pff_lock, diff --git a/src/ipcpd/unicast/pol/alternate_pff.h b/src/ipcpd/unicast/pff/alternate.h index 9c7cc08f..96207e74 100644 --- a/src/ipcpd/unicast/pol/alternate_pff.h +++ b/src/ipcpd/unicast/pff/alternate.h @@ -1,5 +1,5 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   *   * Policy for PFF with alternate next hops   * @@ -23,7 +23,7 @@  #ifndef OUROBOROS_IPCPD_UNICAST_ALTERNATE_PFF_H  #define OUROBOROS_IPCPD_UNICAST_ALTERNATE_PFF_H -#include "pol-pff-ops.h" +#include "ops.h"  struct pff_i * alternate_pff_create(void); @@ -56,6 +56,6 @@ int            alternate_flow_state_change(struct pff_i * pff_i,                                             int            fd,                                             bool           up); -extern struct pol_pff_ops alternate_pff_ops; +extern struct pff_ops alternate_pff_ops;  #endif /* OUROBOROS_IPCPD_UNICAST_ALTERNATE_PFF_H */ diff --git a/src/ipcpd/unicast/pol/multipath_pff.c b/src/ipcpd/unicast/pff/multipath.c index 0d759ec4..cbab0f5f 100644 --- a/src/ipcpd/unicast/pol/multipath_pff.c +++ b/src/ipcpd/unicast/pff/multipath.c @@ -1,5 +1,5 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   *   * Policy for PFF supporting multipath routing   * @@ -28,7 +28,7 @@  #include <ouroboros/errno.h>  #include "pft.h" -#include "multipath_pff.h" +#include "multipath.h"  #include <string.h>  #include <assert.h> @@ -39,7 +39,7 @@ struct pff_i {          pthread_rwlock_t lock;  }; -struct pol_pff_ops multipath_pff_ops = { +struct pff_ops multipath_pff_ops = {          .create            = multipath_pff_create,          .destroy           = multipath_pff_destroy,          .lock              = multipath_pff_lock, @@ -58,21 +58,23 @@ struct pff_i * multipath_pff_create(void)          tmp = malloc(sizeof(*tmp));          if (tmp == NULL) -                return NULL; +                goto fail_malloc; -        if (pthread_rwlock_init(&tmp->lock, NULL)) { -                free(tmp); -                return NULL; -        } +        if (pthread_rwlock_init(&tmp->lock, NULL)) +                goto fail_rwlock;          tmp->pft = pft_create(PFT_SIZE, false); -        if (tmp->pft == NULL) { -                pthread_rwlock_destroy(&tmp->lock); -                free(tmp); -                return NULL; -        } +        if (tmp->pft == NULL) +                goto fail_pft;          return tmp; + + fail_pft: +        pthread_rwlock_destroy(&tmp->lock); + fail_rwlock: +        free(tmp); + fail_malloc: +        return NULL;  }  void multipath_pff_destroy(struct pff_i * pff_i) @@ -80,8 +82,8 @@ void multipath_pff_destroy(struct pff_i * pff_i)          assert(pff_i);          pft_destroy(pff_i->pft); -          pthread_rwlock_destroy(&pff_i->lock); +          free(pff_i);  } @@ -177,7 +179,7 @@ int multipath_pff_nhop(struct pff_i * pff_i,          assert(pff_i); -        pthread_rwlock_rdlock(&pff_i->lock); +        pthread_rwlock_wrlock(&pff_i->lock);          if (pft_lookup(pff_i->pft, addr, &fds, &len)) {                  pthread_rwlock_unlock(&pff_i->lock); @@ -189,7 +191,7 @@ int multipath_pff_nhop(struct pff_i * pff_i,          assert(len > 0);          /* Rotate fds left. */ -        memcpy(fds, fds + 1, (len - 1) * sizeof(*fds)); +        memmove(fds, fds + 1, (len - 1) * sizeof(*fds));          fds[len - 1] = fd;          pthread_rwlock_unlock(&pff_i->lock); diff --git a/src/ipcpd/unicast/pol/multipath_pff.h b/src/ipcpd/unicast/pff/multipath.h index 8168995e..0eb03476 100644 --- a/src/ipcpd/unicast/pol/multipath_pff.h +++ b/src/ipcpd/unicast/pff/multipath.h @@ -1,5 +1,5 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   *   * Policy for PFF supporting multipath routing   * @@ -24,7 +24,7 @@  #ifndef OUROBOROS_IPCPD_UNICAST_MULTIPATH_PFF_H  #define OUROBOROS_IPCPD_UNICAST_MULTIPATH_PFF_H -#include "pol-pff-ops.h" +#include "ops.h"  struct pff_i * multipath_pff_create(void); @@ -53,6 +53,6 @@ void multipath_pff_flush(struct pff_i * pff_i);  int            multipath_pff_nhop(struct pff_i * pff_i,                                    uint64_t       addr); -extern struct pol_pff_ops multipath_pff_ops; +extern struct pff_ops multipath_pff_ops;  #endif /* OUROBOROS_IPCPD_UNICAST_MULTIPATH_PFF_H */ diff --git a/src/ipcpd/unicast/pol-pff-ops.h b/src/ipcpd/unicast/pff/ops.h index 85615a1f..16a31273 100644 --- a/src/ipcpd/unicast/pol-pff-ops.h +++ b/src/ipcpd/unicast/pff/ops.h @@ -1,5 +1,5 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   *   * Pff policy ops   * @@ -20,14 +20,14 @@   * Foundation, Inc., http://www.fsf.org/about/contact/.   */ -#ifndef OUROBOROS_IPCPD_UNICAST_POL_PFF_OPS_H -#define OUROBOROS_IPCPD_UNICAST_POL_PFF_OPS_H +#ifndef OUROBOROS_IPCPD_UNICAST_PFF_OPS_H +#define OUROBOROS_IPCPD_UNICAST_PFF_OPS_H  #include <stdbool.h>  struct pff_i; -struct pol_pff_ops { +struct pff_ops {          struct pff_i * (* create)(void);          void           (* destroy)(struct pff_i * pff_i); @@ -60,4 +60,4 @@ struct pol_pff_ops {                                               bool           up);  }; -#endif /* OUROBOROS_IPCPD_UNICAST_POL_PFF_OPS_H */ +#endif /* OUROBOROS_IPCPD_UNICAST_PFF_OPS_H */ diff --git a/src/ipcpd/unicast/pol/pft.c b/src/ipcpd/unicast/pff/pft.c index e42b4a98..8c436113 100644 --- a/src/ipcpd/unicast/pol/pft.c +++ b/src/ipcpd/unicast/pff/pft.c @@ -1,5 +1,5 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   *   * Packet forwarding table (PFT) with chaining on collisions   * @@ -115,19 +115,11 @@ void pft_flush(struct pft * pft)  static uint64_t hash(uint64_t key)  { -        void *   res; -        uint64_t ret; -        uint8_t  keys[4]; +        uint64_t res[2]; -        memcpy(keys, &key, 4); +        mem_hash(HASH_MD5, res, (uint8_t *) &key, sizeof(key)); -        mem_hash(HASH_MD5, &res, keys, 4); - -        ret = (* (uint64_t *) res); - -        free(res); - -        return ret; +        return res[0];  }  static uint64_t calc_key(struct pft * pft, diff --git a/src/ipcpd/unicast/pol/pft.h b/src/ipcpd/unicast/pff/pft.h index 011ad414..711dabcb 100644 --- a/src/ipcpd/unicast/pol/pft.h +++ b/src/ipcpd/unicast/pff/pft.h @@ -1,5 +1,5 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   *   * Packet forwarding table (PFT) with chaining on collisions   * diff --git a/src/ipcpd/unicast/pff/pol.h b/src/ipcpd/unicast/pff/pol.h new file mode 100644 index 00000000..245b03c4 --- /dev/null +++ b/src/ipcpd/unicast/pff/pol.h @@ -0,0 +1,25 @@ +/* + * Ouroboros - Copyright (C) 2016 - 2024 + * + * PDU Forwarding Function policies + * + *    Dimitri Staessens <dimitri@ouroboros.rocks> + *    Sander Vrijders   <sander@ouroboros.rocks> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., http://www.fsf.org/about/contact/. + */ + +#include "alternate.h" +#include "multipath.h" +#include "simple.h" diff --git a/src/ipcpd/unicast/pol/simple_pff.c b/src/ipcpd/unicast/pff/simple.c index 13944aed..5f95e3ce 100644 --- a/src/ipcpd/unicast/pol/simple_pff.c +++ b/src/ipcpd/unicast/pff/simple.c @@ -1,5 +1,5 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   *   * Simple PDU Forwarding Function   * @@ -27,7 +27,7 @@  #include <ouroboros/errno.h>  #include "pft.h" -#include "simple_pff.h" +#include "simple.h"  #include <assert.h>  #include <pthread.h> @@ -37,7 +37,7 @@ struct pff_i {          pthread_rwlock_t lock;  }; -struct pol_pff_ops simple_pff_ops = { +struct pff_ops simple_pff_ops = {          .create            = simple_pff_create,          .destroy           = simple_pff_destroy,          .lock              = simple_pff_lock, diff --git a/src/ipcpd/unicast/pol/simple_pff.h b/src/ipcpd/unicast/pff/simple.h index 2b22c130..0966a186 100644 --- a/src/ipcpd/unicast/pol/simple_pff.h +++ b/src/ipcpd/unicast/pff/simple.h @@ -1,5 +1,5 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   *   * Simple policy for PFF   * @@ -23,7 +23,7 @@  #ifndef OUROBOROS_IPCPD_UNICAST_SIMPLE_PFF_H  #define OUROBOROS_IPCPD_UNICAST_SIMPLE_PFF_H -#include "pol-pff-ops.h" +#include "ops.h"  struct pff_i * simple_pff_create(void); @@ -52,6 +52,6 @@ void           simple_pff_flush(struct pff_i * pff_i);  int            simple_pff_nhop(struct pff_i * pff_i,                                 uint64_t       addr); -extern struct pol_pff_ops simple_pff_ops; +extern struct pff_ops simple_pff_ops;  #endif /* OUROBOROS_IPCPD_UNICAST_SIMPLE_PFF_H */ diff --git a/src/ipcpd/unicast/tests/CMakeLists.txt b/src/ipcpd/unicast/pff/tests/CMakeLists.txt index 482711d5..65705714 100644 --- a/src/ipcpd/unicast/tests/CMakeLists.txt +++ b/src/ipcpd/unicast/pff/tests/CMakeLists.txt @@ -17,19 +17,20 @@ get_filename_component(PARENT_DIR ${PARENT_PATH} NAME)  create_test_sourcelist(${PARENT_DIR}_tests test_suite.c    # Add new tests here -  dht_test.c +  pft_test.c    ) -protobuf_generate_c(KAD_PROTO_SRCS KAD_PROTO_HDRS ../kademlia.proto) - -add_executable(${PARENT_DIR}_test EXCLUDE_FROM_ALL ${${PARENT_DIR}_tests} -  ${KAD_PROTO_SRCS}) +add_executable(${PARENT_DIR}_test EXCLUDE_FROM_ALL ${${PARENT_DIR}_tests})  target_link_libraries(${PARENT_DIR}_test ouroboros-common)  add_dependencies(check ${PARENT_DIR}_test)  set(tests_to_run ${${PARENT_DIR}_tests}) -remove(tests_to_run test_suite.c) +if(CMAKE_VERSION VERSION_LESS "3.29.0") +  remove(tests_to_run test_suite.c) +else () +  list(POP_FRONT tests_to_run) +endif()  foreach (test ${tests_to_run})    get_filename_component(test_name ${test} NAME_WE) diff --git a/src/ipcpd/unicast/pol/tests/pft_test.c b/src/ipcpd/unicast/pff/tests/pft_test.c index c48267eb..18287fb8 100644 --- a/src/ipcpd/unicast/pol/tests/pft_test.c +++ b/src/ipcpd/unicast/pff/tests/pft_test.c @@ -1,5 +1,5 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   *   * Test of the hash table   * diff --git a/src/ipcpd/unicast/psched.c b/src/ipcpd/unicast/psched.c index 33ac5afe..7e12148b 100644 --- a/src/ipcpd/unicast/psched.c +++ b/src/ipcpd/unicast/psched.c @@ -1,5 +1,5 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   *   * Packet scheduler component   * @@ -50,6 +50,7 @@ static int qos_prio [] = {  struct psched {          fset_t *         set[QOS_CUBE_MAX];          next_packet_fn_t callback; +        read_fn_t        read;          pthread_t        readers[QOS_CUBE_MAX * IPCP_SCHED_THR_MUL];  }; @@ -101,7 +102,7 @@ static void * packet_reader(void * o)                                  notifier_event(NOTIFY_DT_FLOW_UP, &fd);                                  break;                          case FLOW_PKT: -                                if (ipcp_flow_read(fd, &sdb)) +                                if (sched->read(fd, &sdb) < 0)                                          continue;                                  sched->callback(fd, qc, sdb); @@ -117,7 +118,8 @@ static void * packet_reader(void * o)          return (void *) 0;  } -struct psched * psched_create(next_packet_fn_t callback) +struct psched * psched_create(next_packet_fn_t callback, +                              read_fn_t        read)  {          struct psched *       psched;          struct sched_info *   infos[QOS_CUBE_MAX * IPCP_SCHED_THR_MUL]; @@ -131,6 +133,7 @@ struct psched * psched_create(next_packet_fn_t callback)                  goto fail_malloc;          psched->callback = callback; +        psched->read     = read;          for (i = 0; i < QOS_CUBE_MAX; ++i) {                  psched->set[i] = fset_create(); diff --git a/src/ipcpd/unicast/psched.h b/src/ipcpd/unicast/psched.h index 1f22b34b..831f8084 100644 --- a/src/ipcpd/unicast/psched.h +++ b/src/ipcpd/unicast/psched.h @@ -1,5 +1,5 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   *   * Packet scheduler component   * @@ -30,7 +30,11 @@ typedef void (* next_packet_fn_t)(int                  fd,                                    qoscube_t            qc,                                    struct shm_du_buff * sdb); -struct psched * psched_create(next_packet_fn_t callback); +typedef int (* read_fn_t)(int                   fd, +                          struct shm_du_buff ** sdb); + +struct psched * psched_create(next_packet_fn_t callback, +                              read_fn_t        read);  void            psched_destroy(struct psched * psched); diff --git a/src/ipcpd/unicast/routing.c b/src/ipcpd/unicast/routing.c index 1b13ae0e..2ad7b234 100644 --- a/src/ipcpd/unicast/routing.c +++ b/src/ipcpd/unicast/routing.c @@ -1,5 +1,5 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   *   * Routing component of the IPCP   * @@ -26,35 +26,30 @@  #include "pff.h"  #include "routing.h" -#include "pol/link_state.h" +#include "routing/pol.h" -struct pol_routing_ops * r_ops; +struct routing_ops * r_ops; -int routing_init(enum pol_routing pr) +int routing_init(struct routing_config * conf, +                 enum pol_pff *          pff_type)  { -        enum pol_pff pff_type; +        void * cfg; -        switch (pr) { +        switch (conf->pol) {          case ROUTING_LINK_STATE: -                pff_type = PFF_SIMPLE; -                r_ops = &link_state_ops; -                break; -        case ROUTING_LINK_STATE_LFA: -                pff_type = PFF_ALTERNATE; -                r_ops = &link_state_ops; -                break; -        case ROUTING_LINK_STATE_ECMP: -                pff_type=PFF_MULTIPATH;                  r_ops = &link_state_ops; +                cfg = &conf->ls;                  break;          default:                  return -ENOTSUP;          } -        if (r_ops->init(pr)) -                return -1; +        return r_ops->init(cfg, pff_type); +} -        return pff_type; +int routing_start(void) +{ +        return r_ops->start();  }  struct routing_i * routing_i_create(struct pff * pff) @@ -67,6 +62,11 @@ void routing_i_destroy(struct routing_i * instance)          return r_ops->routing_i_destroy(instance);  } +void routing_stop(void) +{ +        r_ops->stop(); +} +  void routing_fini(void)  {          r_ops->fini(); diff --git a/src/ipcpd/unicast/routing.h b/src/ipcpd/unicast/routing.h index 2eaaeb68..e14960b5 100644 --- a/src/ipcpd/unicast/routing.h +++ b/src/ipcpd/unicast/routing.h @@ -1,5 +1,5 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   *   * Routing component of the IPCP   * @@ -30,10 +30,15 @@  #include <stdint.h> -int                routing_init(enum pol_routing pr); +int                routing_init(struct routing_config * conf, +                                enum pol_pff *          pff_type);  void               routing_fini(void); +int                routing_start(void); + +void               routing_stop(void); +  struct routing_i * routing_i_create(struct pff * pff);  void               routing_i_destroy(struct routing_i * instance); diff --git a/src/ipcpd/unicast/pol/graph.c b/src/ipcpd/unicast/routing/graph.c index 6ea5c507..32442dad 100644 --- a/src/ipcpd/unicast/pol/graph.c +++ b/src/ipcpd/unicast/routing/graph.c @@ -1,5 +1,5 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   *   * Undirected graph structure   * @@ -57,8 +57,11 @@ struct edge {  };  struct graph { -        size_t           nr_vertices; -        struct list_head vertices; +        struct { +                struct list_head list; +                size_t len; +        } vertices; +          pthread_mutex_t  lock;  }; @@ -67,7 +70,7 @@ static struct edge * find_edge_by_addr(struct vertex * vertex,  {          struct list_head * p; -        assert(vertex); +        assert(vertex != NULL);          list_for_each(p, &vertex->edges) {                  struct edge * e = list_entry(p, struct edge, next); @@ -85,7 +88,7 @@ static struct vertex * find_vertex_by_addr(struct graph * graph,          assert(graph); -        list_for_each(p, &graph->vertices) { +        list_for_each(p, &graph->vertices.list) {                  struct vertex * e = list_entry(p, struct vertex, next);                  if (e->addr == addr)                          return e; @@ -99,8 +102,8 @@ static struct edge * add_edge(struct vertex * vertex,  {          struct edge * edge; -        assert(vertex); -        assert(nb); +        assert(vertex != NULL); +        assert(nb != NULL);          edge = malloc(sizeof(*edge));          if (edge == NULL) @@ -139,7 +142,7 @@ static struct vertex * add_vertex(struct graph * graph,          vertex->addr = addr;          /* Keep them ordered on address. */ -        list_for_each(p, &graph->vertices) { +        list_for_each(p, &graph->vertices.list) {                  struct vertex * v = list_entry(p, struct vertex, next);                  if (v->addr > addr)                          break; @@ -151,13 +154,13 @@ static struct vertex * add_vertex(struct graph * graph,          list_add_tail(&vertex->next, p);          /* Increase the index of the vertices to the right. */ -        list_for_each(p, &graph->vertices) { +        list_for_each(p, &vertex->next) {                  struct vertex * v = list_entry(p, struct vertex, next);                  if (v->addr > addr)                          v->index++;          } -        graph->nr_vertices++; +        ++graph->vertices.len;          return vertex;  } @@ -168,13 +171,13 @@ static void del_vertex(struct graph *  graph,          struct list_head * p;          struct list_head * h; -        assert(graph); -        assert(vertex); +        assert(graph != NULL); +        assert(vertex != NULL);          list_del(&vertex->next);          /* Decrease the index of the vertices to the right. */ -        list_for_each(p, &graph->vertices) { +        list_for_each(p, &graph->vertices.list) {                  struct vertex * v = list_entry(p, struct vertex, next);                  if (v->addr > vertex->addr)                          v->index--; @@ -187,7 +190,7 @@ static void del_vertex(struct graph *  graph,          free(vertex); -        graph->nr_vertices--; +        --graph->vertices.len;  }  struct graph * graph_create(void) @@ -203,8 +206,8 @@ struct graph * graph_create(void)                  return NULL;          } -        graph->nr_vertices = 0; -        list_head_init(&graph->vertices); +        graph->vertices.len = 0; +        list_head_init(&graph->vertices.list);          return graph;  } @@ -218,7 +221,7 @@ void graph_destroy(struct graph * graph)          pthread_mutex_lock(&graph->lock); -        list_for_each_safe(p, n, &graph->vertices) { +        list_for_each_safe(p, n, &graph->vertices.list) {                  struct vertex * e = list_entry(p, struct vertex, next);                  del_vertex(graph, e);          } @@ -227,6 +230,8 @@ void graph_destroy(struct graph * graph)          pthread_mutex_destroy(&graph->lock); +        assert(graph->vertices.len == 0); +          free(graph);  } @@ -240,63 +245,35 @@ int graph_update_edge(struct graph * graph,          struct vertex * nb;          struct edge *   nb_e; -        assert(graph); +        assert(graph != NULL);          pthread_mutex_lock(&graph->lock);          v = find_vertex_by_addr(graph, s_addr); -        if (v == NULL) { -                v = add_vertex(graph, s_addr); -                if (v == NULL) { -                        pthread_mutex_unlock(&graph->lock); -                        log_err("Failed to add vertex."); -                        return -ENOMEM; -                } +        if (v == NULL && ((v = add_vertex(graph, s_addr)) == NULL)) {; +                log_err("Failed to add src vertex."); +                goto fail_add_s;          }          nb = find_vertex_by_addr(graph, d_addr); -        if (nb == NULL) { -                nb = add_vertex(graph, d_addr); -                if (nb == NULL) { -                        if (list_is_empty(&v->edges)) -                                del_vertex(graph, v); -                        pthread_mutex_unlock(&graph->lock); -                        log_err("Failed to add vertex."); -                        return -ENOMEM; -                } +        if (nb == NULL && ((nb = add_vertex(graph, d_addr)) == NULL)) { +               log_err("Failed to add dst vertex."); +                goto fail_add_d;          }          e = find_edge_by_addr(v, d_addr); -        if (e == NULL) { -                e = add_edge(v, nb); -                if (e == NULL) { -                        if (list_is_empty(&v->edges)) -                                del_vertex(graph, v); -                        if (list_is_empty(&nb->edges)) -                                del_vertex(graph, nb); -                        pthread_mutex_unlock(&graph->lock); -                        log_err("Failed to add edge."); -                        return -ENOMEM; -                } +        if (e == NULL && ((e = add_edge(v, nb)) == NULL)) { +                log_err("Failed to add edge to dst."); +                goto fail_add_edge_d;          }          e->announced++;          e->qs = qs;          nb_e = find_edge_by_addr(nb, s_addr); -        if (nb_e == NULL) { -                nb_e = add_edge(nb, v); -                if (nb_e == NULL) { -                        if (--e->announced == 0) -                                del_edge(e); -                        if (list_is_empty(&v->edges)) -                                del_vertex(graph, v); -                        if (list_is_empty(&nb->edges)) -                                del_vertex(graph, nb); -                        pthread_mutex_unlock(&graph->lock); -                        log_err("Failed to add edge."); -                        return -ENOMEM; -                } +        if (nb_e == NULL && ((nb_e = add_edge(nb, v)) == NULL)) {; +                log_err("Failed to add edge to src."); +                goto fail_add_edge_s;          }          nb_e->announced++; @@ -305,6 +282,19 @@ int graph_update_edge(struct graph * graph,          pthread_mutex_unlock(&graph->lock);          return 0; + fail_add_edge_s: +        if (--e->announced == 0) +                del_edge(e); + fail_add_edge_d: +        if (list_is_empty(&nb->edges)) +                del_vertex(graph, nb); + fail_add_d: +        if (list_is_empty(&v->edges)) +                del_vertex(graph, v); + fail_add_s: +        pthread_mutex_unlock(&graph->lock); +        return -ENOMEM; +  }  int graph_del_edge(struct graph * graph, @@ -322,30 +312,26 @@ int graph_del_edge(struct graph * graph,          v = find_vertex_by_addr(graph, s_addr);          if (v == NULL) { -                pthread_mutex_unlock(&graph->lock); -                log_err("No such source vertex."); -                return -1; +                log_err("Failed to find src vertex."); +                goto fail;          }          nb = find_vertex_by_addr(graph, d_addr);          if (nb == NULL) { -                pthread_mutex_unlock(&graph->lock);                  log_err("No such destination vertex."); -                return -1; +                goto fail;          }          e = find_edge_by_addr(v, d_addr);          if (e == NULL) { -                pthread_mutex_unlock(&graph->lock);                  log_err("No such source edge."); -                return -1; +                goto fail;          }          nb_e = find_edge_by_addr(nb, s_addr);          if (nb_e == NULL) { -                pthread_mutex_unlock(&graph->lock);                  log_err("No such destination edge."); -                return -1; +                goto fail;          }          if (--e->announced == 0) @@ -362,6 +348,10 @@ int graph_del_edge(struct graph * graph,          pthread_mutex_unlock(&graph->lock);          return 0; + + fail: +        pthread_mutex_unlock(&graph->lock); +        return -1;  }  static int get_min_vertex(struct graph *   graph, @@ -381,7 +371,7 @@ static int get_min_vertex(struct graph *   graph,          *v = NULL; -        list_for_each(p, &graph->vertices) { +        list_for_each(p, &graph->vertices.list) {                  if (!used[i] && dist[i] < min) {                          min = dist[i];                          index = i; @@ -413,24 +403,24 @@ static int dijkstra(struct graph *    graph,          assert(nhops);          assert(dist); -        *nhops = malloc(sizeof(**nhops) * graph->nr_vertices); +        *nhops = malloc(sizeof(**nhops) * graph->vertices.len);          if (*nhops == NULL)                  goto fail_pnhops; -        *dist = malloc(sizeof(**dist) * graph->nr_vertices); +        *dist = malloc(sizeof(**dist) * graph->vertices.len);          if (*dist == NULL)                  goto fail_pdist; -        used = malloc(sizeof(*used) * graph->nr_vertices); +        used = malloc(sizeof(*used) * graph->vertices.len);          if (used == NULL)                  goto fail_used;          /* Init the data structures */ -        memset(used, 0, sizeof(*used) * graph->nr_vertices); -        memset(*nhops, 0, sizeof(**nhops) * graph->nr_vertices); -        memset(*dist, 0, sizeof(**dist) * graph->nr_vertices); +        memset(used, 0, sizeof(*used) * graph->vertices.len); +        memset(*nhops, 0, sizeof(**nhops) * graph->vertices.len); +        memset(*dist, 0, sizeof(**dist) * graph->vertices.len); -        list_for_each(p, &graph->vertices) { +        list_for_each(p, &graph->vertices.list) {                  v = list_entry(p, struct vertex, next);                  (*dist)[i++]  = (v->addr == src) ? 0 : INT_MAX;          } @@ -527,7 +517,7 @@ static int graph_routing_table_simple(struct graph *     graph,          assert(dist);          /* We need at least 2 vertices for a table */ -        if (graph->nr_vertices < 2) +        if (graph->vertices.len < 2)                  goto fail_vertices;          if (dijkstra(graph, s_addr, &nhops, dist)) @@ -536,7 +526,7 @@ static int graph_routing_table_simple(struct graph *     graph,          list_head_init(table);          /* Now construct the routing table from the nhops. */ -        list_for_each(p, &graph->vertices) { +        list_for_each(p, &graph->vertices.list) {                  v = list_entry(p, struct vertex, next);                  /* This is the src */ @@ -634,7 +624,7 @@ static int graph_routing_table_lfa(struct graph *     graph,                  addrs[j] = -1;          } -        list_for_each(p, &graph->vertices) { +        list_for_each(p, &graph->vertices.list) {                  v = list_entry(p, struct vertex, next);                  if (v->addr != s_addr) @@ -660,7 +650,7 @@ static int graph_routing_table_lfa(struct graph *     graph,          }          /* Loop though all nodes to see if we have a LFA for them. */ -        list_for_each(p, &graph->vertices) { +        list_for_each(p, &graph->vertices.list) {                  v = list_entry(p, struct vertex, next);                  if (v->addr == s_addr) @@ -717,14 +707,14 @@ static int graph_routing_table_ecmp(struct graph *     graph,          assert(graph);          assert(dist); -        if (graph-> nr_vertices < 2) +        if (graph->vertices.len < 2)                  goto fail_vertices; -        forwarding = malloc(sizeof(*forwarding) * graph->nr_vertices); +        forwarding = malloc(sizeof(*forwarding) * graph->vertices.len);          if (forwarding == NULL)                  goto fail_vertices; -        for (i = 0; i < graph->nr_vertices; ++i) +        for (i = 0; i < graph->vertices.len; ++i)                  list_head_init(&forwarding[i]);          if (dijkstra(graph, s_addr, &nhops, dist)) @@ -745,7 +735,7 @@ static int graph_routing_table_ecmp(struct graph *     graph,                  free(nhops); -                list_for_each(h, &graph->vertices) { +                list_for_each(h, &graph->vertices.list) {                          v = list_entry(h, struct vertex, next);                          if (tmp_dist[v->index] + 1 == (*dist)[v->index]) {                                  n = malloc(sizeof(*n)); @@ -763,7 +753,7 @@ static int graph_routing_table_ecmp(struct graph *     graph,          list_head_init(table);          i = 0; -        list_for_each(p, &graph->vertices) { +        list_for_each(p, &graph->vertices.list) {                  v = list_entry(p, struct vertex, next);                  if (v->addr == s_addr) {                          ++i; diff --git a/src/ipcpd/unicast/pol/graph.h b/src/ipcpd/unicast/routing/graph.h index 632cc5a0..8190cc6c 100644 --- a/src/ipcpd/unicast/pol/graph.h +++ b/src/ipcpd/unicast/routing/graph.h @@ -1,5 +1,5 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   *   * Undirected graph structure   * diff --git a/src/ipcpd/unicast/pol/link_state.c b/src/ipcpd/unicast/routing/link-state.c index 08d39372..e5edf539 100644 --- a/src/ipcpd/unicast/pol/link_state.c +++ b/src/ipcpd/unicast/routing/link-state.c @@ -1,5 +1,5 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   *   * Link state routing policy   * @@ -42,11 +42,12 @@  #include <ouroboros/rib.h>  #include <ouroboros/utils.h> +#include "addr-auth.h"  #include "common/comp.h"  #include "common/connmgr.h"  #include "graph.h"  #include "ipcp.h" -#include "link_state.h" +#include "link-state.h"  #include "pff.h"  #include <assert.h> @@ -54,9 +55,6 @@  #include <inttypes.h>  #include <string.h> -#define RECALC_TIME    4 -#define LS_UPDATE_TIME 15 -#define LS_TIMEO       60  #define LS_ENTRY_SIZE  104  #define LSDB           "lsdb" @@ -64,6 +62,12 @@  #define CLOCK_REALTIME_COARSE CLOCK_REALTIME  #endif +#define LINK_FMT ADDR_FMT32 "--" ADDR_FMT32 +#define LINK_VAL(src, dst) ADDR_VAL32(&src), ADDR_VAL32(&dst) + +#define LSU_FMT "LSU ["ADDR_FMT32 " -- " ADDR_FMT32 " seq: %09" PRIu64 "]" +#define LSU_VAL(src, dst, seqno) ADDR_VAL32(&src), ADDR_VAL32(&dst), seqno +  struct lsa {          uint64_t d_addr;          uint64_t s_addr; @@ -106,30 +110,45 @@ struct nb {  };  struct { -        struct list_head  nbs; -        size_t            nbs_len; +        uint64_t          addr; + +        enum routing_algo routing_algo; + +        struct ls_config  conf; +          fset_t *          mgmt_set; -        struct list_head  db; -        size_t            db_len; +        struct graph * graph; + +        struct { +                struct { +                        struct list_head list; +                        size_t           len; +                } nbs; + +                struct { +                        struct list_head list; +                        size_t           len; +                } db; -        pthread_rwlock_t  db_lock; +                pthread_rwlock_t lock; +        }; -        struct graph *    graph; +        struct { +                struct list_head list; +                pthread_mutex_t  mtx; +        } instances;          pthread_t         lsupdate;          pthread_t         lsreader;          pthread_t         listener; - -        struct list_head  routing_instances; -        pthread_mutex_t   routing_i_lock; - -        enum routing_algo routing_algo;  } ls; -struct pol_routing_ops link_state_ops = { -        .init              = link_state_init, +struct routing_ops link_state_ops = { +        .init              = (int (*)(void *, enum pol_pff *)) link_state_init,          .fini              = link_state_fini, +        .start             = link_state_start, +        .stop              = link_state_stop,          .routing_i_create  = link_state_routing_i_create,          .routing_i_destroy = link_state_routing_i_destroy  }; @@ -138,7 +157,7 @@ static int str_adj(struct adjacency * adj,                     char *             buf,                     size_t             len)  { -        char        tmbuf[64]; +        char        tmstr[RIB_TM_STRLEN];          char        srcbuf[64];          char        dstbuf[64];          char        seqnobuf[64]; @@ -149,15 +168,16 @@ static int str_adj(struct adjacency * adj,          if (len < LS_ENTRY_SIZE)                  return -1; -        tm = localtime(&adj->stamp); -        strftime(tmbuf, sizeof(tmbuf), "%F %T", tm); /* 19 chars */ +        tm = gmtime(&adj->stamp); +        strftime(tmstr, sizeof(tmstr), RIB_TM_FORMAT, tm); -        sprintf(srcbuf, "%" PRIu64, adj->src); -        sprintf(dstbuf, "%" PRIu64, adj->dst); +        sprintf(srcbuf, ADDR_FMT32, ADDR_VAL32(&adj->src)); +        sprintf(dstbuf, ADDR_FMT32, ADDR_VAL32(&adj->dst));          sprintf(seqnobuf, "%" PRIu64, adj->seqno); -        sprintf(buf, "src: %20s\ndst: %20s\nseqno: %18s\nupd: %20s\n", -                srcbuf, dstbuf, seqnobuf, tmbuf); +        sprintf(buf, "src: %20s\ndst: %20s\nseqno: %18s\n" +                "upd: %s\n", +                srcbuf, dstbuf, seqnobuf, tmstr);          return LS_ENTRY_SIZE;  } @@ -169,9 +189,9 @@ static struct adjacency * get_adj(const char * path)          assert(path); -        list_for_each(p, &ls.db) { +        list_for_each(p, &ls.db.list) {                  struct adjacency * a = list_entry(p, struct adjacency, next); -                sprintf(entry, "%" PRIu64 ".%" PRIu64, a->src, a->dst); +                sprintf(entry, LINK_FMT, LINK_VAL(a->src, a->dst));                  if (strcmp(entry, path) == 0)                          return a;          } @@ -194,7 +214,7 @@ static int lsdb_rib_getattr(const char *      path,          clock_gettime(CLOCK_REALTIME_COARSE, &now); -        pthread_rwlock_rdlock(&ls.db_lock); +        pthread_rwlock_rdlock(&ls.lock);          adj = get_adj(entry);          if (adj != NULL) { @@ -205,7 +225,7 @@ static int lsdb_rib_getattr(const char *      path,                  attr->size  = 0;          } -        pthread_rwlock_unlock(&ls.db_lock); +        pthread_rwlock_unlock(&ls.lock);          return 0;  } @@ -223,9 +243,9 @@ static int lsdb_rib_read(const char * path,          entry = strstr(path, RIB_SEPARATOR) + 1;          assert(entry); -        pthread_rwlock_rdlock(&ls.db_lock); +        pthread_rwlock_rdlock(&ls.lock); -        if (ls.db_len + ls.nbs_len == 0) +        if (ls.db.len + ls.nbs.len == 0)                  goto fail;          a = get_adj(entry); @@ -236,11 +256,11 @@ static int lsdb_rib_read(const char * path,          if (size < 0)                  goto fail; -        pthread_rwlock_unlock(&ls.db_lock); +        pthread_rwlock_unlock(&ls.lock);          return size;   fail: -        pthread_rwlock_unlock(&ls.db_lock); +        pthread_rwlock_unlock(&ls.lock);          return -1;  } @@ -250,60 +270,52 @@ static int lsdb_rib_readdir(char *** buf)          char               entry[RIB_PATH_LEN + 1];          ssize_t            idx = 0; -        assert(buf); +        assert(buf != NULL); -        pthread_rwlock_rdlock(&ls.db_lock); +        pthread_rwlock_rdlock(&ls.lock); -        if (ls.db_len + ls.nbs_len == 0) { -                pthread_rwlock_unlock(&ls.db_lock); -                return 0; +        if (ls.db.len + ls.nbs.len == 0) { +                *buf = NULL; +                goto no_entries;          } -        *buf = malloc(sizeof(**buf) * (ls.db_len + ls.nbs_len)); -        if (*buf == NULL) { -                pthread_rwlock_unlock(&ls.db_lock); -                return -ENOMEM; -        } -        list_for_each(p, &ls.nbs) { +        *buf = malloc(sizeof(**buf) * (ls.db.len + ls.nbs.len)); +        if (*buf == NULL) +                goto fail_entries; + +        list_for_each(p, &ls.nbs.list) {                  struct nb * nb = list_entry(p, struct nb, next); -                char * str = (nb->type == NB_DT ? "dt." : "mgmt."); -                sprintf(entry, "%s%" PRIu64, str, nb->addr); +                char * str = (nb->type == NB_DT ? ".dt " : ".mgmt "); +                sprintf(entry, "%s" ADDR_FMT32 , str, ADDR_VAL32(&nb->addr));                  (*buf)[idx] = malloc(strlen(entry) + 1); -                if ((*buf)[idx] == NULL) { -                        while (idx-- > 0) -                                free((*buf)[idx]); -                        free(buf); -                        pthread_rwlock_unlock(&ls.db_lock); -                        return -ENOMEM; -                } - -                strcpy((*buf)[idx], entry); +                if ((*buf)[idx] == NULL) +                        goto fail_entry; -                idx++; +                strcpy((*buf)[idx++], entry);          } -        list_for_each(p, &ls.db) { +        list_for_each(p, &ls.db.list) {                  struct adjacency * a = list_entry(p, struct adjacency, next); -                sprintf(entry, "%" PRIu64 ".%" PRIu64, a->src, a->dst); +                sprintf(entry,  LINK_FMT, LINK_VAL(a->src, a->dst));                  (*buf)[idx] = malloc(strlen(entry) + 1); -                if ((*buf)[idx] == NULL) { -                        ssize_t j; -                        for (j = 0; j < idx; ++j) -                                free(*buf[j]); -                        free(buf); -                        pthread_rwlock_unlock(&ls.db_lock); -                        return -ENOMEM; -                } - -                strcpy((*buf)[idx], entry); +                if ((*buf)[idx] == NULL) +                        goto fail_entry; -                idx++; +                strcpy((*buf)[idx++], entry);          } - -        pthread_rwlock_unlock(&ls.db_lock); + no_entries: +        pthread_rwlock_unlock(&ls.lock);          return idx; + + fail_entry: +        while (idx-- > 0) +                free((*buf)[idx]); +        free(*buf); + fail_entries: +        pthread_rwlock_unlock(&ls.lock); +        return -ENOMEM;  }  static struct rib_ops r_ops = { @@ -319,28 +331,28 @@ static int lsdb_add_nb(uint64_t     addr,          struct list_head * p;          struct nb *        nb; -        pthread_rwlock_wrlock(&ls.db_lock); +        pthread_rwlock_wrlock(&ls.lock); -        list_for_each(p, &ls.nbs) { +        list_for_each(p, &ls.nbs.list) {                  struct nb * el = list_entry(p, struct nb, next); -                if (el->addr == addr && el->type == type) { -                        log_dbg("Already know %s neighbor %" PRIu64 ".", -                                type == NB_DT ? "dt" : "mgmt", addr); -                        if (el->fd != fd) { -                                log_warn("Existing neighbor assigned new fd."); -                                el->fd = fd; -                        } -                        pthread_rwlock_unlock(&ls.db_lock); -                        return -EPERM; -                } -                  if (addr > el->addr)                          break; +                if (el->addr != addr || el->type != type) +                        continue; + +                log_dbg("Already know %s neighbor " ADDR_FMT32 ".", +                        type == NB_DT ? "dt" : "mgmt", ADDR_VAL32(&addr)); +                if (el->fd != fd) { +                        log_warn("Existing neighbor assigned new fd."); +                        el->fd = fd; +                } +                pthread_rwlock_unlock(&ls.lock); +                return -EPERM;          }          nb = malloc(sizeof(*nb));          if (nb == NULL) { -                pthread_rwlock_unlock(&ls.db_lock); +                pthread_rwlock_unlock(&ls.lock);                  return -ENOMEM;          } @@ -350,12 +362,12 @@ static int lsdb_add_nb(uint64_t     addr,          list_add_tail(&nb->next, p); -        ++ls.nbs_len; +        ++ls.nbs.len; -        log_dbg("Type %s neighbor %" PRIu64 " added.", -                nb->type == NB_DT ? "dt" : "mgmt", addr); +        log_dbg("Type %s neighbor " ADDR_FMT32 " added.", +                nb->type == NB_DT ? "dt" : "mgmt", ADDR_VAL32(&addr)); -        pthread_rwlock_unlock(&ls.db_lock); +        pthread_rwlock_unlock(&ls.lock);          return 0;  } @@ -366,22 +378,23 @@ static int lsdb_del_nb(uint64_t addr,          struct list_head * p;          struct list_head * h; -        pthread_rwlock_wrlock(&ls.db_lock); +        pthread_rwlock_wrlock(&ls.lock); -        list_for_each_safe(p, h, &ls.nbs) { +        list_for_each_safe(p, h, &ls.nbs.list) {                  struct nb * nb = list_entry(p, struct nb, next); -                if (nb->addr == addr && nb->fd == fd) { -                        list_del(&nb->next); -                        --ls.nbs_len; -                        pthread_rwlock_unlock(&ls.db_lock); -                        log_dbg("Type %s neighbor %" PRIu64 " deleted.", -                                nb->type == NB_DT ? "dt" : "mgmt", addr); -                        free(nb); -                        return 0; -                } +                if (nb->addr != addr || nb->fd != fd) +                        continue; + +                list_del(&nb->next); +                --ls.nbs.len; +                pthread_rwlock_unlock(&ls.lock); +                log_dbg("Type %s neighbor " ADDR_FMT32 " deleted.", +                        nb->type == NB_DT ? "dt" : "mgmt", ADDR_VAL32(&addr)); +                free(nb); +                return 0;          } -        pthread_rwlock_unlock(&ls.db_lock); +        pthread_rwlock_unlock(&ls.lock);          return -EPERM;  } @@ -391,18 +404,18 @@ static int nbr_to_fd(uint64_t addr)          struct list_head * p;          int                fd; -        pthread_rwlock_rdlock(&ls.db_lock); +        pthread_rwlock_rdlock(&ls.lock); -        list_for_each(p, &ls.nbs) { +        list_for_each(p, &ls.nbs.list) {                  struct nb * nb = list_entry(p, struct nb, next);                  if (nb->addr == addr && nb->type == NB_DT) {                          fd = nb->fd; -                        pthread_rwlock_unlock(&ls.db_lock); +                        pthread_rwlock_unlock(&ls.lock);                          return fd;                  }          } -        pthread_rwlock_unlock(&ls.db_lock); +        pthread_rwlock_unlock(&ls.lock);          return -1;  } @@ -417,8 +430,7 @@ static void calculate_pff(struct routing_i * instance)          assert(instance); -        if (graph_routing_table(ls.graph, ls.routing_algo, -                                ipcpi.dt_addr, &table)) +        if (graph_routing_table(ls.graph, ls.routing_algo, ls.addr, &table))                  return;          pff_lock(instance->pff); @@ -453,8 +465,8 @@ static void set_pff_modified(bool calc)  {          struct list_head * p; -        pthread_mutex_lock(&ls.routing_i_lock); -        list_for_each(p, &ls.routing_instances) { +        pthread_mutex_lock(&ls.instances.mtx); +        list_for_each(p, &ls.instances.list) {                  struct routing_i * inst =                          list_entry(p, struct routing_i, next);                  pthread_mutex_lock(&inst->lock); @@ -463,7 +475,7 @@ static void set_pff_modified(bool calc)                  if (calc)                          calculate_pff(inst);          } -        pthread_mutex_unlock(&ls.routing_i_lock); +        pthread_mutex_unlock(&ls.instances.mtx);  }  static int lsdb_add_link(uint64_t    src, @@ -480,9 +492,9 @@ static int lsdb_add_link(uint64_t    src,          clock_gettime(CLOCK_REALTIME_COARSE, &now); -        pthread_rwlock_wrlock(&ls.db_lock); +        pthread_rwlock_wrlock(&ls.lock); -        list_for_each(p, &ls.db) { +        list_for_each(p, &ls.db.list) {                  struct adjacency * a = list_entry(p, struct adjacency, next);                  if (a->dst == dst && a->src == src) {                          if (a->seqno < seqno) { @@ -490,7 +502,7 @@ static int lsdb_add_link(uint64_t    src,                                  a->seqno = seqno;                                  ret = 0;                          } -                        pthread_rwlock_unlock(&ls.db_lock); +                        pthread_rwlock_unlock(&ls.lock);                          return ret;                  } @@ -500,7 +512,7 @@ static int lsdb_add_link(uint64_t    src,          adj = malloc(sizeof(*adj));          if (adj == NULL) { -                pthread_rwlock_unlock(&ls.db_lock); +                pthread_rwlock_unlock(&ls.lock);                  return -ENOMEM;          } @@ -511,12 +523,12 @@ static int lsdb_add_link(uint64_t    src,          list_add_tail(&adj->next, p); -        ls.db_len++; +        ls.db.len++;          if (graph_update_edge(ls.graph, src, dst, *qs))                  log_warn("Failed to add edge to graph."); -        pthread_rwlock_unlock(&ls.db_lock); +        pthread_rwlock_unlock(&ls.lock);          set_pff_modified(true); @@ -529,25 +541,25 @@ static int lsdb_del_link(uint64_t src,          struct list_head * p;          struct list_head * h; -        pthread_rwlock_wrlock(&ls.db_lock); +        pthread_rwlock_wrlock(&ls.lock); -        list_for_each_safe(p, h, &ls.db) { +        list_for_each_safe(p, h, &ls.db.list) {                  struct adjacency * a = list_entry(p, struct adjacency, next);                  if (a->dst == dst && a->src == src) {                          list_del(&a->next);                          if (graph_del_edge(ls.graph, src, dst))                                  log_warn("Failed to delete edge from graph."); -                        ls.db_len--; +                        ls.db.len--; -                        pthread_rwlock_unlock(&ls.db_lock); +                        pthread_rwlock_unlock(&ls.lock);                          set_pff_modified(false);                          free(a);                          return 0;                  }          } -        pthread_rwlock_unlock(&ls.db_lock); +        pthread_rwlock_unlock(&ls.lock);          return -EPERM;  } @@ -570,7 +582,7 @@ static void * periodic_recalc_pff(void * o)                  if (modified)                          calculate_pff(inst); -                sleep(RECALC_TIME); +                sleep(ls.conf.t_recalc);          }          return (void *) 0; @@ -587,10 +599,20 @@ static void send_lsm(uint64_t src,          lsm.s_addr = hton64(src);          lsm.seqno  = hton64(seqno); -        list_for_each(p, &ls.nbs) { +        list_for_each(p, &ls.nbs.list) {                  struct nb * nb = list_entry(p, struct nb, next); -                if (nb->type == NB_MGMT) -                        flow_write(nb->fd, &lsm, sizeof(lsm)); +                if (nb->type != NB_MGMT) +                        continue; + +                if (flow_write(nb->fd, &lsm, sizeof(lsm)) < 0) +                        log_err("Failed to send LSM to " ADDR_FMT32, +                                ADDR_VAL32(&nb->addr)); +#ifdef DEBUG_PROTO_LS +                else +                        log_proto(LSU_FMT " --> " ADDR_FMT32, +                                LSU_VAL(src, dst, seqno), +                                ADDR_VAL32(&nb->addr)); +#endif          }  } @@ -604,9 +626,9 @@ static void lsdb_replicate(int fd)          list_head_init(©);          /* Lock the lsdb, copy the lsms and send outside of lock. */ -        pthread_rwlock_rdlock(&ls.db_lock); +        pthread_rwlock_rdlock(&ls.lock); -        list_for_each(p, &ls.db) { +        list_for_each(p, &ls.db.list) {                  struct adjacency * adj;                  struct adjacency * cpy;                  adj = list_entry(p, struct adjacency, next); @@ -623,7 +645,7 @@ static void lsdb_replicate(int fd)                  list_add_tail(&cpy->next, ©);          } -        pthread_rwlock_unlock(&ls.db_lock); +        pthread_rwlock_unlock(&ls.lock);          list_for_each_safe(p, h, ©) {                  struct lsa         lsm; @@ -649,17 +671,17 @@ static void * lsupdate(void * o)          while (true) {                  clock_gettime(CLOCK_REALTIME_COARSE, &now); -                pthread_rwlock_wrlock(&ls.db_lock); +                pthread_rwlock_wrlock(&ls.lock); -                pthread_cleanup_push(__cleanup_rwlock_unlock, &ls.db_lock); +                pthread_cleanup_push(__cleanup_rwlock_unlock, &ls.lock); -                list_for_each_safe(p, h, &ls.db) { +                list_for_each_safe(p, h, &ls.db.list) {                          struct adjacency * adj;                          adj = list_entry(p, struct adjacency, next); -                        if (now.tv_sec - adj->stamp > LS_TIMEO) { +                        if (now.tv_sec > adj->stamp + ls.conf.t_timeo) {                                  list_del(&adj->next); -                                log_dbg("%" PRIu64 " - %" PRIu64" timed out.", -                                        adj->src, adj->dst); +                                log_dbg(LINK_FMT " timed out.", +                                        LINK_VAL(adj->src, adj->dst));                                  if (graph_del_edge(ls.graph, adj->src,                                                     adj->dst))                                          log_err("Failed to del edge."); @@ -667,7 +689,7 @@ static void * lsupdate(void * o)                                  continue;                          } -                        if (adj->src == ipcpi.dt_addr) { +                        if (adj->src == ls.addr) {                                  adj->seqno++;                                  send_lsm(adj->src, adj->dst, adj->seqno);                                  adj->stamp = now.tv_sec; @@ -676,7 +698,7 @@ static void * lsupdate(void * o)                  pthread_cleanup_pop(true); -                sleep(LS_UPDATE_TIME); +                sleep(ls.conf.t_update);          }          return (void *) 0; @@ -708,15 +730,36 @@ static void forward_lsm(uint8_t * buf,                          int       in_fd)  {          struct list_head * p; +#ifdef DEBUG_PROTO_LS +        struct lsa lsm; -        pthread_rwlock_rdlock(&ls.db_lock); +        assert(buf); +        assert(len >= sizeof(struct lsa)); + +        memcpy(&lsm, buf, sizeof(lsm)); + +        lsm.s_addr = ntoh64(lsm.s_addr); +        lsm.d_addr = ntoh64(lsm.d_addr); +        lsm.seqno  = ntoh64(lsm.seqno); +#endif +        pthread_rwlock_rdlock(&ls.lock); -        pthread_cleanup_push(__cleanup_rwlock_unlock, &ls.db_lock); +        pthread_cleanup_push(__cleanup_rwlock_unlock, &ls.lock); -        list_for_each(p, &ls.nbs) { +        list_for_each(p, &ls.nbs.list) {                  struct nb * nb = list_entry(p, struct nb, next); -                if (nb->type == NB_MGMT && nb->fd != in_fd) -                        flow_write(nb->fd, buf, len); +                if (nb->type != NB_MGMT || nb->fd == in_fd) +                        continue; + +                if (flow_write(nb->fd, buf, len) < 0) +                        log_err("Failed to forward LSM to " ADDR_FMT32, +                                ADDR_VAL32(&nb->addr)); +#ifdef DEBUG_PROTO_LS +                else +                        log_proto(LSU_FMT " --> " ADDR_FMT32 " [forwarded]", +                                LSU_VAL(lsm.s_addr, lsm.d_addr, lsm.seqno), +                                ADDR_VAL32(&nb->addr)); +#endif          }          pthread_cleanup_pop(true); @@ -729,13 +772,13 @@ static void cleanup_fqueue(void * fq)  static void * lsreader(void * o)  { -        fqueue_t *   fq; -        int          ret; -        uint8_t      buf[sizeof(struct lsa)]; -        int          fd; -        qosspec_t    qs; -        struct lsa * msg; -        size_t       len; +        fqueue_t * fq; +        int        ret; +        uint8_t    buf[sizeof(struct lsa)]; +        int        fd; +        qosspec_t  qs; +        struct lsa msg; +        size_t     len;          (void) o; @@ -758,15 +801,22 @@ static void * lsreader(void * o)                          if (fqueue_type(fq) != FLOW_PKT)                                  continue; -                        len = flow_read(fd, buf, sizeof(*msg)); -                        if (len <= 0 || len != sizeof(*msg)) +                        len = flow_read(fd, buf, sizeof(msg)); +                        if (len <= 0 || len != sizeof(msg))                                  continue; -                        msg = (struct lsa *) buf; - -                        if (lsdb_add_link(ntoh64(msg->s_addr), -                                          ntoh64(msg->d_addr), -                                          ntoh64(msg->seqno), +                        memcpy(&msg, buf, sizeof(msg)); +                        msg.s_addr = ntoh64(msg.s_addr); +                        msg.d_addr = ntoh64(msg.d_addr); +                        msg.seqno  = ntoh64(msg.seqno); +#ifdef DEBUG_PROTO_LS +                        log_proto(LSU_FMT " <-- " ADDR_FMT32, +                                  LSU_VAL(msg.s_addr, msg.d_addr, msg.seqno), +                                  ADDR_VAL32(&ls.addr)); +#endif +                        if (lsdb_add_link(msg.s_addr, +                                          msg.d_addr, +                                          msg.seqno,                                            &qs))                                  continue; @@ -787,14 +837,14 @@ static void flow_event(int  fd,          log_dbg("Notifying routing instances of flow event."); -        pthread_mutex_lock(&ls.routing_i_lock); +        pthread_mutex_lock(&ls.instances.mtx); -        list_for_each(p, &ls.routing_instances) { +        list_for_each(p, &ls.instances.list) {                  struct routing_i * ri = list_entry(p, struct routing_i, next);                  pff_flow_state_change(ri->pff, fd, up);          } -        pthread_mutex_unlock(&ls.routing_i_lock); +        pthread_mutex_unlock(&ls.instances.mtx);  }  static void handle_event(void *       self, @@ -816,17 +866,17 @@ static void handle_event(void *       self,          switch (event) {          case NOTIFY_DT_CONN_ADD: -                pthread_rwlock_rdlock(&ls.db_lock); +                pthread_rwlock_rdlock(&ls.lock); -                pthread_cleanup_push(__cleanup_rwlock_unlock, &ls.db_lock); +                pthread_cleanup_push(__cleanup_rwlock_unlock, &ls.lock); -                send_lsm(ipcpi.dt_addr, c->conn_info.addr, 0); +                send_lsm(ls.addr, c->conn_info.addr, 0);                  pthread_cleanup_pop(true);                  if (lsdb_add_nb(c->conn_info.addr, c->flow_info.fd, NB_DT))                          log_dbg("Failed to add neighbor to LSDB."); -                if (lsdb_add_link(ipcpi.dt_addr, c->conn_info.addr, 0, &qs)) +                if (lsdb_add_link(ls.addr, c->conn_info.addr, 0, &qs))                          log_dbg("Failed to add new adjacency to LSDB.");                  break;          case NOTIFY_DT_CONN_DEL: @@ -835,7 +885,7 @@ static void handle_event(void *       self,                  if (lsdb_del_nb(c->conn_info.addr, c->flow_info.fd))                          log_dbg("Failed to delete neighbor from LSDB."); -                if (lsdb_del_link(ipcpi.dt_addr, c->conn_info.addr)) +                if (lsdb_del_link(ls.addr, c->conn_info.addr))                          log_dbg("Local link was not in LSDB.");                  break;          case NOTIFY_DT_CONN_QOS: @@ -886,11 +936,11 @@ struct routing_i * link_state_routing_i_create(struct pff * pff)                             periodic_recalc_pff, tmp))                  goto fail_pthread_create_lsupdate; -        pthread_mutex_lock(&ls.routing_i_lock); +        pthread_mutex_lock(&ls.instances.mtx); -        list_add(&tmp->next, &ls.routing_instances); +        list_add(&tmp->next, &ls.instances.list); -        pthread_mutex_unlock(&ls.routing_i_lock); +        pthread_mutex_unlock(&ls.instances.mtx);          return tmp; @@ -906,11 +956,11 @@ void link_state_routing_i_destroy(struct routing_i * instance)  {          assert(instance); -        pthread_mutex_lock(&ls.routing_i_lock); +        pthread_mutex_lock(&ls.instances.mtx);          list_del(&instance->next); -        pthread_mutex_unlock(&ls.routing_i_lock); +        pthread_mutex_unlock(&ls.instances.mtx);          pthread_cancel(instance->calculator); @@ -921,96 +971,146 @@ void link_state_routing_i_destroy(struct routing_i * instance)          free(instance);  } -int link_state_init(enum pol_routing pr) +int link_state_start(void) +{ +        if (notifier_reg(handle_event, NULL)) { +                log_err("Failed to register link-state with notifier."); +                goto fail_notifier_reg; +        } + +        if (pthread_create(&ls.lsupdate, NULL, lsupdate, NULL)) { +                log_err("Failed to create lsupdate thread."); +                goto fail_pthread_create_lsupdate; +        } + +        if (pthread_create(&ls.lsreader, NULL, lsreader, NULL)) { +                log_err("Failed to create lsreader thread."); +                goto fail_pthread_create_lsreader; +        } + +        if (pthread_create(&ls.listener, NULL, ls_conn_handle, NULL)) { +                log_err("Failed to create listener thread."); +                goto fail_pthread_create_listener; +        } + +        return 0; + + fail_pthread_create_listener: +        pthread_cancel(ls.lsreader); +        pthread_join(ls.lsreader, NULL); + fail_pthread_create_lsreader: +        pthread_cancel(ls.lsupdate); +        pthread_join(ls.lsupdate, NULL); + fail_pthread_create_lsupdate: +        notifier_unreg(handle_event); + fail_notifier_reg: +        return -1; +} + +void link_state_stop(void) +{ +        pthread_cancel(ls.listener); +        pthread_cancel(ls.lsreader); +        pthread_cancel(ls.lsupdate); + +        pthread_join(ls.listener, NULL); +        pthread_join(ls.lsreader, NULL); +        pthread_join(ls.lsupdate, NULL); + +        notifier_unreg(handle_event); +} + + +int link_state_init(struct ls_config * conf, +                    enum pol_pff *     pff_type)  {          struct conn_info info; +        assert(conf != NULL); +        assert(pff_type != NULL); +          memset(&info, 0, sizeof(info)); +        ls.addr = addr_auth_address(); +          strcpy(info.comp_name, LS_COMP);          strcpy(info.protocol, LS_PROTO);          info.pref_version = 1;          info.pref_syntax  = PROTO_GPB; -        info.addr         = ipcpi.dt_addr; +        info.addr         = ls.addr; -        switch (pr) { -        case ROUTING_LINK_STATE: -                log_dbg("Using link state routing policy."); +        ls.conf = *conf; + +        switch (conf->pol) { +        case LS_SIMPLE: +                *pff_type = PFF_SIMPLE;                  ls.routing_algo = ROUTING_SIMPLE; +                log_dbg("Using Link State Routing policy.");                  break; -        case ROUTING_LINK_STATE_LFA: -                log_dbg("Using Loop-Free Alternates policy."); +        case LS_LFA:                  ls.routing_algo = ROUTING_LFA; +                *pff_type = PFF_ALTERNATE; +                log_dbg("Using Loop-Free Alternates policy.");                  break; -        case ROUTING_LINK_STATE_ECMP: -                log_dbg("Using Equal-Cost Multipath policy."); +        case LS_ECMP:                  ls.routing_algo = ROUTING_ECMP; +                *pff_type = PFF_MULTIPATH; +                log_dbg("Using Equal-Cost Multipath policy.");                  break;          default:                  goto fail_graph;          } +        log_dbg("LS update interval: %ld seconds.", ls.conf.t_update); +        log_dbg("LS link timeout   : %ld seconds.", ls.conf.t_timeo); +        log_dbg("LS recalc interval: %ld seconds.", ls.conf.t_recalc); +          ls.graph = graph_create();          if (ls.graph == NULL)                  goto fail_graph; -        if (notifier_reg(handle_event, NULL)) -                goto fail_notifier_reg; - -        if (pthread_rwlock_init(&ls.db_lock, NULL)) -                goto fail_db_lock_init; +        if (pthread_rwlock_init(&ls.lock, NULL)) { +                log_err("Failed to init lock."); +                goto fail_lock_init; +        } -        if (pthread_mutex_init(&ls.routing_i_lock, NULL)) +        if (pthread_mutex_init(&ls.instances.mtx, NULL)) { +                log_err("Failed to init instances mutex.");                  goto fail_routing_i_lock_init; +        } -        if (connmgr_comp_init(COMPID_MGMT, &info)) +        if (connmgr_comp_init(COMPID_MGMT, &info)) { +                log_err("Failed to init connmgr.");                  goto fail_connmgr_comp_init; +        }          ls.mgmt_set = fset_create(); -        if (ls.mgmt_set == NULL) +        if (ls.mgmt_set == NULL) { +                log_err("Failed to create fset.");                  goto fail_fset_create; +        } -        list_head_init(&ls.db); -        list_head_init(&ls.nbs); -        list_head_init(&ls.routing_instances); - -        if (pthread_create(&ls.lsupdate, NULL, lsupdate, NULL)) -                goto fail_pthread_create_lsupdate; - -        if (pthread_create(&ls.lsreader, NULL, lsreader, NULL)) -                goto fail_pthread_create_lsreader; - -        if (pthread_create(&ls.listener, NULL, ls_conn_handle, NULL)) -                goto fail_pthread_create_listener; +        list_head_init(&ls.db.list); +        list_head_init(&ls.nbs.list); +        list_head_init(&ls.instances.list);          if (rib_reg(LSDB, &r_ops))                  goto fail_rib_reg; -        ls.db_len  = 0; -        ls.nbs_len = 0; +        ls.db.len  = 0; +        ls.nbs.len = 0;          return 0;   fail_rib_reg: -        pthread_cancel(ls.listener); -        pthread_join(ls.listener, NULL); - fail_pthread_create_listener: -        pthread_cancel(ls.lsreader); -        pthread_join(ls.lsreader, NULL); - fail_pthread_create_lsreader: -        pthread_cancel(ls.lsupdate); -        pthread_join(ls.lsupdate, NULL); - fail_pthread_create_lsupdate:          fset_destroy(ls.mgmt_set);   fail_fset_create:          connmgr_comp_fini(COMPID_MGMT);   fail_connmgr_comp_init: -        pthread_mutex_destroy(&ls.routing_i_lock); +        pthread_mutex_destroy(&ls.instances.mtx);   fail_routing_i_lock_init: -        pthread_rwlock_destroy(&ls.db_lock); - fail_db_lock_init: -        notifier_unreg(handle_event); - fail_notifier_reg: +        pthread_rwlock_destroy(&ls.lock); + fail_lock_init:          graph_destroy(ls.graph);   fail_graph:          return -1; @@ -1023,33 +1123,23 @@ void link_state_fini(void)          rib_unreg(LSDB); -        notifier_unreg(handle_event); - -        pthread_cancel(ls.listener); -        pthread_cancel(ls.lsreader); -        pthread_cancel(ls.lsupdate); - -        pthread_join(ls.listener, NULL); -        pthread_join(ls.lsreader, NULL); -        pthread_join(ls.lsupdate, NULL); -          fset_destroy(ls.mgmt_set);          connmgr_comp_fini(COMPID_MGMT);          graph_destroy(ls.graph); -        pthread_rwlock_wrlock(&ls.db_lock); +        pthread_rwlock_wrlock(&ls.lock); -        list_for_each_safe(p, h, &ls.db) { +        list_for_each_safe(p, h, &ls.db.list) {                  struct adjacency * a = list_entry(p, struct adjacency, next);                  list_del(&a->next);                  free(a);          } -        pthread_rwlock_unlock(&ls.db_lock); +        pthread_rwlock_unlock(&ls.lock); -        pthread_rwlock_destroy(&ls.db_lock); +        pthread_rwlock_destroy(&ls.lock); -        pthread_mutex_destroy(&ls.routing_i_lock); +        pthread_mutex_destroy(&ls.instances.mtx);  } diff --git a/src/ipcpd/unicast/pol/link_state.h b/src/ipcpd/unicast/routing/link-state.h index 05b0ae5d..69eb6781 100644 --- a/src/ipcpd/unicast/pol/link_state.h +++ b/src/ipcpd/unicast/routing/link-state.h @@ -1,5 +1,5 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   *   * Link state routing policy   * @@ -26,16 +26,21 @@  #define LS_COMP  "Management"  #define LS_PROTO "LSP" -#include "pol-routing-ops.h" +#include "ops.h" -int                link_state_init(enum pol_routing pr); +int                link_state_init(struct ls_config * ls, +                                   enum pol_pff *     pff_type);  void               link_state_fini(void); +int                link_state_start(void); + +void               link_state_stop(void); +  struct routing_i * link_state_routing_i_create(struct pff * pff);  void               link_state_routing_i_destroy(struct routing_i * instance); -extern struct pol_routing_ops link_state_ops; +extern struct routing_ops link_state_ops;  #endif /* OUROBOROS_IPCPD_UNICAST_POL_LINK_STATE_H */ diff --git a/src/ipcpd/unicast/pol-routing-ops.h b/src/ipcpd/unicast/routing/ops.h index cea88582..4bf75c80 100644 --- a/src/ipcpd/unicast/pol-routing-ops.h +++ b/src/ipcpd/unicast/routing/ops.h @@ -1,5 +1,5 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   *   * Routing policy ops   * @@ -20,19 +20,24 @@   * Foundation, Inc., http://www.fsf.org/about/contact/.   */ -#ifndef OUROBOROS_IPCPD_UNICAST_POL_ROUTING_OPS_H -#define OUROBOROS_IPCPD_UNICAST_POL_ROUTING_OPS_H +#ifndef OUROBOROS_IPCPD_UNICAST_ROUTING_OPS_H +#define OUROBOROS_IPCPD_UNICAST_ROUTING_OPS_H  #include "pff.h" -struct pol_routing_ops { -        int                (* init)(enum pol_routing pr); +struct routing_ops { +        int                (* init)(void *         conf, +                                    enum pol_pff * pff_type);          void               (* fini)(void); +        int                (* start)(void); + +        void               (* stop)(void); +          struct routing_i * (* routing_i_create)(struct pff * pff);          void               (* routing_i_destroy)(struct routing_i * instance);  }; -#endif /* OUROBOROS_IPCPD_UNICAST_POL_ROUTING_OPS_H */ +#endif /* OUROBOROS_IPCPD_UNICAST_ROUTING_OPS_H */ diff --git a/src/ipcpd/unicast/routing/pol.h b/src/ipcpd/unicast/routing/pol.h new file mode 100644 index 00000000..b6a6f150 --- /dev/null +++ b/src/ipcpd/unicast/routing/pol.h @@ -0,0 +1,23 @@ +/* + * Ouroboros - Copyright (C) 2016 - 2024 + * + * Routing policies + * + *    Dimitri Staessens <dimitri@ouroboros.rocks> + *    Sander Vrijders   <sander@ouroboros.rocks> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., http://www.fsf.org/about/contact/. + */ + +#include "link-state.h" diff --git a/src/ipcpd/unicast/pol/tests/CMakeLists.txt b/src/ipcpd/unicast/routing/tests/CMakeLists.txt index 34d80e8d..9d24bf03 100644 --- a/src/ipcpd/unicast/pol/tests/CMakeLists.txt +++ b/src/ipcpd/unicast/routing/tests/CMakeLists.txt @@ -18,7 +18,6 @@ get_filename_component(PARENT_DIR ${PARENT_PATH} NAME)  create_test_sourcelist(${PARENT_DIR}_tests test_suite.c    # Add new tests here    graph_test.c -  pft_test.c    )  add_executable(${PARENT_DIR}_test EXCLUDE_FROM_ALL ${${PARENT_DIR}_tests}) @@ -27,7 +26,11 @@ target_link_libraries(${PARENT_DIR}_test ouroboros-common)  add_dependencies(check ${PARENT_DIR}_test)  set(tests_to_run ${${PARENT_DIR}_tests}) -remove(tests_to_run test_suite.c) +if(CMAKE_VERSION VERSION_LESS "3.29.0") +  remove(tests_to_run test_suite.c) +else () +  list(POP_FRONT tests_to_run) +endif()  foreach (test ${tests_to_run})    get_filename_component(test_name ${test} NAME_WE) diff --git a/src/ipcpd/unicast/pol/tests/graph_test.c b/src/ipcpd/unicast/routing/tests/graph_test.c index 217c7eab..d805640c 100644 --- a/src/ipcpd/unicast/pol/tests/graph_test.c +++ b/src/ipcpd/unicast/routing/tests/graph_test.c @@ -1,5 +1,5 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   *   * Test of the graph structure   * diff --git a/src/ipcpd/unicast/tests/dht_test.c b/src/ipcpd/unicast/tests/dht_test.c deleted file mode 100644 index 552af75c..00000000 --- a/src/ipcpd/unicast/tests/dht_test.c +++ /dev/null @@ -1,99 +0,0 @@ -/* - * Ouroboros - Copyright (C) 2016 - 2021 - * - * Unit tests of the DHT - * - *    Dimitri Staessens <dimitri@ouroboros.rocks> - *    Sander Vrijders   <sander@ouroboros.rocks> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., http://www.fsf.org/about/contact/. - */ - -#define __DHT_TEST__ - -#include "dht.c" - -#include <pthread.h> -#include <time.h> -#include <stdlib.h> -#include <stdio.h> - -#define KEY_LEN  32 - -#define EXP      86400 -#define CONTACTS 1000 - -int dht_test(int     argc, -             char ** argv) -{ -        struct dht * dht; -        uint64_t     addr = 0x0D1F; -        uint8_t      key[KEY_LEN]; -        size_t       i; - -        (void) argc; -        (void) argv; - -        dht = dht_create(addr); -        if (dht == NULL) { -                printf("Failed to create dht.\n"); -                return -1; -        } - -        dht_destroy(dht); - -        dht = dht_create(addr); -        if (dht == NULL) { -                printf("Failed to re-create dht.\n"); -                return -1; -        } - -        if (dht_bootstrap(dht, KEY_LEN, EXP)) { -                printf("Failed to bootstrap dht.\n"); -                dht_destroy(dht); -                return -1; -        } - -        dht_destroy(dht); - -        dht = dht_create(addr); -        if (dht == NULL) { -                printf("Failed to re-create dht.\n"); -                return -1; -        } - -        if (dht_bootstrap(dht, KEY_LEN, EXP)) { -                printf("Failed to bootstrap dht.\n"); -                dht_destroy(dht); -                return -1; -        } - -        for (i = 0; i < CONTACTS; ++i) { -                uint64_t addr; -                random_buffer(&addr, sizeof(addr)); -                random_buffer(key, KEY_LEN); -                pthread_rwlock_wrlock(&dht->lock); -                if (dht_update_bucket(dht, key, addr)) { -                        pthread_rwlock_unlock(&dht->lock); -                        printf("Failed to update bucket.\n"); -                        dht_destroy(dht); -                        return -1; -                } -                pthread_rwlock_unlock(&dht->lock); -        } - -        dht_destroy(dht); - -        return 0; -}  | 
