diff options
Diffstat (limited to 'src/ipcpd/unicast')
| -rw-r--r-- | src/ipcpd/unicast/CMakeLists.txt | 45 | ||||
| -rw-r--r-- | src/ipcpd/unicast/addr-auth.c (renamed from src/ipcpd/unicast/addr_auth.c) | 9 | ||||
| -rw-r--r-- | src/ipcpd/unicast/addr-auth.h (renamed from src/ipcpd/unicast/addr_auth.h) | 10 | ||||
| -rw-r--r-- | src/ipcpd/unicast/addr-auth/flat.c (renamed from src/ipcpd/unicast/pol/flat.c) | 42 | ||||
| -rw-r--r-- | src/ipcpd/unicast/addr-auth/flat.h (renamed from src/ipcpd/unicast/pol/flat.h) | 6 | ||||
| -rw-r--r-- | src/ipcpd/unicast/addr-auth/ops.h (renamed from src/ipcpd/unicast/pol-addr-auth-ops.h) | 10 | ||||
| -rw-r--r-- | src/ipcpd/unicast/addr-auth/pol.h | 23 | ||||
| -rw-r--r-- | src/ipcpd/unicast/ca.c | 9 | ||||
| -rw-r--r-- | src/ipcpd/unicast/ca.h | 2 | ||||
| -rw-r--r-- | src/ipcpd/unicast/ca/mb-ecn.c (renamed from src/ipcpd/unicast/pol/ca-mb-ecn.c) | 10 | ||||
| -rw-r--r-- | src/ipcpd/unicast/ca/mb-ecn.h (renamed from src/ipcpd/unicast/pol/ca-mb-ecn.h) | 6 | ||||
| -rw-r--r-- | src/ipcpd/unicast/ca/nop.c (renamed from src/ipcpd/unicast/pol/ca-nop.c) | 6 | ||||
| -rw-r--r-- | src/ipcpd/unicast/ca/nop.h (renamed from src/ipcpd/unicast/pol/ca-nop.h) | 6 | ||||
| -rw-r--r-- | src/ipcpd/unicast/ca/ops.h (renamed from src/ipcpd/unicast/pol-ca-ops.h) | 10 | ||||
| -rw-r--r-- | src/ipcpd/unicast/ca/pol.h | 24 | ||||
| -rw-r--r-- | src/ipcpd/unicast/connmgr.c | 6 | ||||
| -rw-r--r-- | src/ipcpd/unicast/dht.c | 2842 | ||||
| -rw-r--r-- | src/ipcpd/unicast/dir.c | 74 | ||||
| -rw-r--r-- | src/ipcpd/unicast/dir.h | 11 | ||||
| -rw-r--r-- | src/ipcpd/unicast/dir/dht.c | 4052 | ||||
| -rw-r--r-- | src/ipcpd/unicast/dir/dht.h (renamed from src/ipcpd/unicast/dht.h) | 31 | ||||
| -rw-r--r-- | src/ipcpd/unicast/dir/dht.proto | 58 | ||||
| -rw-r--r-- | src/ipcpd/unicast/dir/ops.h | 42 | ||||
| -rw-r--r-- | src/ipcpd/unicast/dir/pol.h | 23 | ||||
| -rw-r--r-- | src/ipcpd/unicast/dir/tests/CMakeLists.txt | 40 | ||||
| -rw-r--r-- | src/ipcpd/unicast/dir/tests/dht_test.c | 1925 | ||||
| -rw-r--r-- | src/ipcpd/unicast/dt.c | 190 | ||||
| -rw-r--r-- | src/ipcpd/unicast/dt.h | 14 | ||||
| -rw-r--r-- | src/ipcpd/unicast/enroll.c | 3 | ||||
| -rw-r--r-- | src/ipcpd/unicast/fa.c | 437 | ||||
| -rw-r--r-- | src/ipcpd/unicast/fa.h | 18 | ||||
| -rw-r--r-- | src/ipcpd/unicast/kademlia.proto | 45 | ||||
| -rw-r--r-- | src/ipcpd/unicast/main.c | 217 | ||||
| -rw-r--r-- | src/ipcpd/unicast/pff.c | 15 | ||||
| -rw-r--r-- | src/ipcpd/unicast/pff.h | 2 | ||||
| -rw-r--r-- | src/ipcpd/unicast/pff/alternate.c (renamed from src/ipcpd/unicast/pol/alternate_pff.c) | 6 | ||||
| -rw-r--r-- | src/ipcpd/unicast/pff/alternate.h (renamed from src/ipcpd/unicast/pol/alternate_pff.h) | 6 | ||||
| -rw-r--r-- | src/ipcpd/unicast/pff/multipath.c (renamed from src/ipcpd/unicast/pol/multipath_pff.c) | 34 | ||||
| -rw-r--r-- | src/ipcpd/unicast/pff/multipath.h (renamed from src/ipcpd/unicast/pol/multipath_pff.h) | 6 | ||||
| -rw-r--r-- | src/ipcpd/unicast/pff/ops.h (renamed from src/ipcpd/unicast/pol-pff-ops.h) | 10 | ||||
| -rw-r--r-- | src/ipcpd/unicast/pff/pft.c (renamed from src/ipcpd/unicast/pol/pft.c) | 16 | ||||
| -rw-r--r-- | src/ipcpd/unicast/pff/pft.h (renamed from src/ipcpd/unicast/pol/pft.h) | 2 | ||||
| -rw-r--r-- | src/ipcpd/unicast/pff/pol.h | 25 | ||||
| -rw-r--r-- | src/ipcpd/unicast/pff/simple.c (renamed from src/ipcpd/unicast/pol/simple_pff.c) | 6 | ||||
| -rw-r--r-- | src/ipcpd/unicast/pff/simple.h (renamed from src/ipcpd/unicast/pol/simple_pff.h) | 6 | ||||
| -rw-r--r-- | src/ipcpd/unicast/pff/tests/CMakeLists.txt (renamed from src/ipcpd/unicast/tests/CMakeLists.txt) | 13 | ||||
| -rw-r--r-- | src/ipcpd/unicast/pff/tests/pft_test.c (renamed from src/ipcpd/unicast/pol/tests/pft_test.c) | 2 | ||||
| -rw-r--r-- | src/ipcpd/unicast/psched.c | 9 | ||||
| -rw-r--r-- | src/ipcpd/unicast/psched.h | 8 | ||||
| -rw-r--r-- | src/ipcpd/unicast/routing.c | 36 | ||||
| -rw-r--r-- | src/ipcpd/unicast/routing.h | 9 | ||||
| -rw-r--r-- | src/ipcpd/unicast/routing/graph.c (renamed from src/ipcpd/unicast/pol/graph.c) | 158 | ||||
| -rw-r--r-- | src/ipcpd/unicast/routing/graph.h (renamed from src/ipcpd/unicast/pol/graph.h) | 2 | ||||
| -rw-r--r-- | src/ipcpd/unicast/routing/link-state.c (renamed from src/ipcpd/unicast/pol/link_state.c) | 546 | ||||
| -rw-r--r-- | src/ipcpd/unicast/routing/link-state.h (renamed from src/ipcpd/unicast/pol/link_state.h) | 13 | ||||
| -rw-r--r-- | src/ipcpd/unicast/routing/ops.h (renamed from src/ipcpd/unicast/pol-routing-ops.h) | 17 | ||||
| -rw-r--r-- | src/ipcpd/unicast/routing/pol.h | 23 | ||||
| -rw-r--r-- | src/ipcpd/unicast/routing/tests/CMakeLists.txt (renamed from src/ipcpd/unicast/pol/tests/CMakeLists.txt) | 7 | ||||
| -rw-r--r-- | src/ipcpd/unicast/routing/tests/graph_test.c (renamed from src/ipcpd/unicast/pol/tests/graph_test.c) | 2 | ||||
| -rw-r--r-- | src/ipcpd/unicast/tests/dht_test.c | 99 | 
60 files changed, 7341 insertions, 3983 deletions
diff --git a/src/ipcpd/unicast/CMakeLists.txt b/src/ipcpd/unicast/CMakeLists.txt index 07f12540..ced045e6 100644 --- a/src/ipcpd/unicast/CMakeLists.txt +++ b/src/ipcpd/unicast/CMakeLists.txt @@ -13,8 +13,14 @@ include_directories(${CMAKE_SOURCE_DIR}/include)  include_directories(${CMAKE_BINARY_DIR}/include)  set(IPCP_UNICAST_TARGET ipcpd-unicast CACHE INTERNAL "") +set(IPCP_UNICAST_MPL 10000 CACHE STRING +    "Default maximum packet lifetime for the unicast IPCP, in ms") +set(DEBUG_PROTO_DHT FALSE CACHE BOOL +  "Add DHT protocol message output to debug logging") +set(DEBUG_PROTO_LS FALSE CACHE BOOL +  "Add link state protocol message output to debug logging") -protobuf_generate_c(KAD_PROTO_SRCS KAD_PROTO_HDRS kademlia.proto) +protobuf_generate_c(DHT_PROTO_SRCS DHT_PROTO_HDRS dir/dht.proto)  math(EXPR PFT_EXPR "1 << 12")  set(PFT_SIZE ${PFT_EXPR} CACHE STRING @@ -29,34 +35,33 @@ if (HAVE_FUSE)      endif ()  endif () -set(SOURCE_FILES +set(IPCP_UNICAST_SOURCE_FILES    # Add source files here -  addr_auth.c +  addr-auth.c    ca.c    connmgr.c -  dht.c    dir.c    dt.c -  enroll.c    fa.c    main.c    pff.c    routing.c    psched.c    # Add policies last -  pol/pft.c -  pol/flat.c -  pol/link_state.c -  pol/graph.c -  pol/simple_pff.c -  pol/alternate_pff.c -  pol/multipath_pff.c -  pol/ca-mb-ecn.c -  pol/ca-nop.c +  addr-auth/flat.c +  ca/mb-ecn.c +  ca/nop.c +  dir/dht.c +  pff/simple.c +  pff/alternate.c +  pff/multipath.c +  pff/pft.c +  routing/link-state.c +  routing/graph.c    ) -add_executable(ipcpd-unicast ${SOURCE_FILES} ${IPCP_SOURCES} -  ${KAD_PROTO_SRCS} ${LAYER_CONFIG_PROTO_SRCS}) +add_executable(ipcpd-unicast ${IPCP_UNICAST_SOURCE_FILES} ${IPCP_SOURCES} ${COMMON_SOURCES} +  ${DHT_PROTO_SRCS} ${LAYER_CONFIG_PROTO_SRCS})  target_link_libraries(ipcpd-unicast LINK_PUBLIC ouroboros-dev)  include(AddCompileFlags) @@ -66,8 +71,6 @@ endif ()  install(TARGETS ipcpd-unicast RUNTIME DESTINATION ${CMAKE_INSTALL_SBINDIR}) -add_subdirectory(pol/tests) - -if (NOT GNU) -  add_subdirectory(tests) -endif () +add_subdirectory(pff/tests) +add_subdirectory(routing/tests) +add_subdirectory(dir/tests) diff --git a/src/ipcpd/unicast/addr_auth.c b/src/ipcpd/unicast/addr-auth.c index e508d0cb..908a4aa1 100644 --- a/src/ipcpd/unicast/addr_auth.c +++ b/src/ipcpd/unicast/addr-auth.c @@ -1,5 +1,5 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   *   * Address authority   * @@ -24,13 +24,12 @@  #include <ouroboros/logs.h> -#include "addr_auth.h" -#include "pol-addr-auth-ops.h" -#include "pol/flat.h" +#include "addr-auth.h" +#include "addr-auth/pol.h"  #include <stdlib.h> -struct pol_addr_auth_ops * ops; +struct addr_auth_ops * ops;  int addr_auth_init(enum pol_addr_auth type,                     const void *       info) diff --git a/src/ipcpd/unicast/addr_auth.h b/src/ipcpd/unicast/addr-auth.h index d26d3eb7..0d2cd4c0 100644 --- a/src/ipcpd/unicast/addr_auth.h +++ b/src/ipcpd/unicast/addr-auth.h @@ -1,5 +1,5 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   *   * Address authority   * @@ -27,6 +27,14 @@  #include <stdint.h> +#define ADDR_FMT32 "%02x.%02x.%02x.%02x" +#define ADDR_VAL32(a) \ +        ((uint8_t *) a)[0], ((uint8_t *) a)[1], \ +        ((uint8_t *) a)[2], ((uint8_t *) a)[3] + +#define ADDR_FMT64 ADDR_FMT32 "." ADDR_FMT32 +#define ADDR_VAL64(a) ADDR_VAL32(a), ADDR_VAL32(a + 4) +  int      addr_auth_init(enum pol_addr_auth type,                          const void *       info); diff --git a/src/ipcpd/unicast/pol/flat.c b/src/ipcpd/unicast/addr-auth/flat.c index f869f761..34ca1cef 100644 --- a/src/ipcpd/unicast/pol/flat.c +++ b/src/ipcpd/unicast/addr-auth/flat.c @@ -1,5 +1,5 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   *   * Policy for flat addresses in a distributed way   * @@ -29,28 +29,21 @@  #define OUROBOROS_PREFIX "flat-addr-auth"  #include <ouroboros/logs.h> -#include <ouroboros/errno.h> -#include <ouroboros/time_utils.h> -#include <ouroboros/utils.h> +#include <ouroboros/random.h> +#include "addr-auth.h"  #include "ipcp.h"  #include "flat.h" -#include <time.h> -#include <stdlib.h> -#include <math.h> -#include <string.h> -#include <assert.h> - -#define NAME_LEN 8 +#define NAME_LEN        8 +#define INVALID_ADDRESS 0  struct { -        uint8_t addr_size; +        uint8_t  addr_size; +        uint32_t addr;  } flat; -#define INVALID_ADDRESS 0 - -struct pol_addr_auth_ops flat_ops = { +struct addr_auth_ops flat_ops = {          .init    = flat_init,          .fini    = flat_fini,          .address = flat_address @@ -65,6 +58,15 @@ int flat_init(const void * info)                  return -1;          } +#if defined (CONFIG_OUROBOROS_DEBUG) && defined (IPCP_DEBUG_LOCAL) +        flat.addr = getpid(); +#else +        while (flat.addr == INVALID_ADDRESS) +                random_buffer(&flat.addr,sizeof(flat.addr)); +#endif +        log_dbg("Flat address initialized to " ADDR_FMT32 ".", +                ADDR_VAL32((uint8_t *) &flat.addr)); +          return 0;  } @@ -75,13 +77,5 @@ int flat_fini(void)  uint64_t flat_address(void)  { -        struct timespec t; -        uint32_t        addr; - -        clock_gettime(CLOCK_REALTIME, &t); -        srand(t.tv_nsec); - -        addr = (rand() % (RAND_MAX - 1) + 1) & 0xFFFFFFFF; - -        return addr; +        return (uint64_t) flat.addr;  } diff --git a/src/ipcpd/unicast/pol/flat.h b/src/ipcpd/unicast/addr-auth/flat.h index 21f7721a..d4b672c7 100644 --- a/src/ipcpd/unicast/pol/flat.h +++ b/src/ipcpd/unicast/addr-auth/flat.h @@ -1,5 +1,5 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   *   * Policy for flat addresses in a distributed way   * @@ -23,7 +23,7 @@  #ifndef OUROBOROS_IPCPD_UNICAST_FLAT_H  #define OUROBOROS_IPCPD_UNICAST_FLAT_H -#include "pol-addr-auth-ops.h" +#include "ops.h"  int      flat_init(const void * info); @@ -31,6 +31,6 @@ int      flat_fini(void);  uint64_t flat_address(void); -extern struct pol_addr_auth_ops flat_ops; +extern struct addr_auth_ops flat_ops;  #endif /* OUROBOROS_IPCPD_UNICAST_FLAT_H */ diff --git a/src/ipcpd/unicast/pol-addr-auth-ops.h b/src/ipcpd/unicast/addr-auth/ops.h index 395a5675..06b24cec 100644 --- a/src/ipcpd/unicast/pol-addr-auth-ops.h +++ b/src/ipcpd/unicast/addr-auth/ops.h @@ -1,5 +1,5 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   *   * Address authority policy ops   * @@ -20,10 +20,10 @@   * Foundation, Inc., http://www.fsf.org/about/contact/.   */ -#ifndef OUROBOROS_IPCPD_UNICAST_POL_ADDR_AUTH_OPS_H -#define OUROBOROS_IPCPD_UNICAST_POL_ADDR_AUTH_OPS_H +#ifndef OUROBOROS_IPCPD_UNICAST_ADDR_AUTH_OPS_H +#define OUROBOROS_IPCPD_UNICAST_ADDR_AUTH_OPS_H -struct pol_addr_auth_ops { +struct addr_auth_ops {          int      (* init)(const void * info);          int      (* fini)(void); @@ -31,4 +31,4 @@ struct pol_addr_auth_ops {          uint64_t (* address)(void);  }; -#endif /* OUROBOROS_IPCPD_UNICAST_POL_ADDR_AUTH_OPS_H */ +#endif /* OUROBOROS_IPCPD_UNICAST_ADDR_AUTH_OPS_H */ diff --git a/src/ipcpd/unicast/addr-auth/pol.h b/src/ipcpd/unicast/addr-auth/pol.h new file mode 100644 index 00000000..844308c6 --- /dev/null +++ b/src/ipcpd/unicast/addr-auth/pol.h @@ -0,0 +1,23 @@ +/* + * Ouroboros - Copyright (C) 2016 - 2024 + * + * Address Authority policies + * + *    Dimitri Staessens <dimitri@ouroboros.rocks> + *    Sander Vrijders   <sander@ouroboros.rocks> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., http://www.fsf.org/about/contact/. + */ + +#include "flat.h" diff --git a/src/ipcpd/unicast/ca.c b/src/ipcpd/unicast/ca.c index ddeb2849..1fcc9bb2 100644 --- a/src/ipcpd/unicast/ca.c +++ b/src/ipcpd/unicast/ca.c @@ -1,5 +1,5 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   *   * Congestion Avoidance   * @@ -25,12 +25,10 @@  #include <ouroboros/logs.h>  #include "ca.h" -#include "pol-ca-ops.h" -#include "pol/ca-mb-ecn.h" -#include "pol/ca-nop.h" +#include "ca/pol.h"  struct { -        struct pol_ca_ops * ops; +        struct ca_ops * ops;  } ca;  int ca_init(enum pol_cong_avoid pol) @@ -51,7 +49,6 @@ int ca_init(enum pol_cong_avoid pol)          return 0;  } -  void ca_fini(void)  {          ca.ops = NULL; diff --git a/src/ipcpd/unicast/ca.h b/src/ipcpd/unicast/ca.h index 8b221790..ea803e17 100644 --- a/src/ipcpd/unicast/ca.h +++ b/src/ipcpd/unicast/ca.h @@ -1,5 +1,5 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   *   * Congestion avoidance   * diff --git a/src/ipcpd/unicast/pol/ca-mb-ecn.c b/src/ipcpd/unicast/ca/mb-ecn.c index 7a88718f..d9a204b0 100644 --- a/src/ipcpd/unicast/pol/ca-mb-ecn.c +++ b/src/ipcpd/unicast/ca/mb-ecn.c @@ -1,5 +1,5 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   *   * Multi-bit ECN Congestion Avoidance   * @@ -29,9 +29,9 @@  #include "config.h"  #include <ouroboros/ipcp-dev.h> -#include <ouroboros/time_utils.h> +#include <ouroboros/time.h> -#include "ca-mb-ecn.h" +#include "mb-ecn.h"  #include <inttypes.h>  #include <stdlib.h> @@ -65,7 +65,7 @@ struct mb_ecn_ctx {          size_t          tx_slot;  }; -struct pol_ca_ops mb_ecn_ca_ops = { +struct ca_ops mb_ecn_ca_ops = {          .ctx_create     = mb_ecn_ctx_create,          .ctx_destroy    = mb_ecn_ctx_destroy,          .ctx_update_snd = mb_ecn_ctx_update_snd, @@ -187,7 +187,7 @@ ca_wnd_t mb_ecn_ctx_update_snd(void * _ctx,  void mb_ecn_wnd_wait(ca_wnd_t wnd)  {          if (wnd.wait > 0) { -                struct timespec s = {0, 0}; +                struct timespec s = TIMESPEC_INIT_S(0);                  if (wnd.wait > BILLION) /* Don't care throttling < 1s */                          s.tv_sec = 1;                  else diff --git a/src/ipcpd/unicast/pol/ca-mb-ecn.h b/src/ipcpd/unicast/ca/mb-ecn.h index a90ae3e2..9a2c8b49 100644 --- a/src/ipcpd/unicast/pol/ca-mb-ecn.h +++ b/src/ipcpd/unicast/ca/mb-ecn.h @@ -1,5 +1,5 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   *   * Multi-bit ECN Congestion Avoidance   * @@ -23,7 +23,7 @@  #ifndef OUROBOROS_IPCPD_UNICAST_CA_MB_ECN_H  #define OUROBOROS_IPCPD_UNICAST_CA_MB_ECN_H -#include "pol-ca-ops.h" +#include "ops.h"  void *   mb_ecn_ctx_create(void); @@ -51,6 +51,6 @@ ssize_t  mb_ecn_print_stats(void * ctx,                              char * buf,                              size_t len); -extern struct pol_ca_ops mb_ecn_ca_ops; +extern struct ca_ops mb_ecn_ca_ops;  #endif /* OUROBOROS_IPCPD_UNICAST_CA_MB_ECN_H */ diff --git a/src/ipcpd/unicast/pol/ca-nop.c b/src/ipcpd/unicast/ca/nop.c index db908c5c..617fc15b 100644 --- a/src/ipcpd/unicast/pol/ca-nop.c +++ b/src/ipcpd/unicast/ca/nop.c @@ -1,5 +1,5 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   *   * Dummy Congestion Avoidance   * @@ -20,11 +20,11 @@   * Foundation, Inc., http://www.fsf.org/about/contact/.   */ -#include "ca-nop.h" +#include "nop.h"  #include <string.h> -struct pol_ca_ops nop_ca_ops = { +struct ca_ops nop_ca_ops = {          .ctx_create     = nop_ctx_create,          .ctx_destroy    = nop_ctx_destroy,          .ctx_update_snd = nop_ctx_update_snd, diff --git a/src/ipcpd/unicast/pol/ca-nop.h b/src/ipcpd/unicast/ca/nop.h index 7b9d318f..248b198d 100644 --- a/src/ipcpd/unicast/pol/ca-nop.h +++ b/src/ipcpd/unicast/ca/nop.h @@ -1,5 +1,5 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   *   * Dummy Congestion Avoidance   * @@ -23,7 +23,7 @@  #ifndef OUROBOROS_IPCPD_UNICAST_CA_NOP_H  #define OUROBOROS_IPCPD_UNICAST_CA_NOP_H -#include "pol-ca-ops.h" +#include "ops.h"  void *   nop_ctx_create(void); @@ -47,6 +47,6 @@ int      nop_calc_ecn(int       fd,                        qoscube_t qc,                        size_t    len); -extern struct pol_ca_ops nop_ca_ops; +extern struct ca_ops nop_ca_ops;  #endif /* OUROBOROS_IPCPD_UNICAST_CA_NOP_H */ diff --git a/src/ipcpd/unicast/pol-ca-ops.h b/src/ipcpd/unicast/ca/ops.h index 88f6cf61..3a7b7248 100644 --- a/src/ipcpd/unicast/pol-ca-ops.h +++ b/src/ipcpd/unicast/ca/ops.h @@ -1,5 +1,5 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   *   * Congestion avoidance policy ops   * @@ -20,12 +20,12 @@   * Foundation, Inc., http://www.fsf.org/about/contact/.   */ -#ifndef OUROBOROS_IPCPD_UNICAST_POL_CA_OPS_H -#define OUROBOROS_IPCPD_UNICAST_POL_CA_OPS_H +#ifndef OUROBOROS_IPCPD_UNICAST_CA_OPS_H +#define OUROBOROS_IPCPD_UNICAST_CA_OPS_H  #include "ca.h" -struct pol_ca_ops { +struct ca_ops {          void *   (* ctx_create)(void);          void     (* ctx_destroy)(void * ctx); @@ -55,4 +55,4 @@ struct pol_ca_ops {  }; -#endif /* OUROBOROS_IPCPD_UNICAST_POL_CA_OPS_H */ +#endif /* OUROBOROS_IPCPD_UNICAST_CA_OPS_H */ diff --git a/src/ipcpd/unicast/ca/pol.h b/src/ipcpd/unicast/ca/pol.h new file mode 100644 index 00000000..db0a1a11 --- /dev/null +++ b/src/ipcpd/unicast/ca/pol.h @@ -0,0 +1,24 @@ +/* + * Ouroboros - Copyright (C) 2016 - 2024 + * + * Congestion avoidance policies + * + *    Dimitri Staessens <dimitri@ouroboros.rocks> + *    Sander Vrijders   <sander@ouroboros.rocks> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., http://www.fsf.org/about/contact/. + */ + +#include "mb-ecn.h" +#include "nop.h" diff --git a/src/ipcpd/unicast/connmgr.c b/src/ipcpd/unicast/connmgr.c index 904deff8..07568fb5 100644 --- a/src/ipcpd/unicast/connmgr.c +++ b/src/ipcpd/unicast/connmgr.c @@ -1,5 +1,5 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   *   * Handles connections between components   * @@ -32,8 +32,4 @@  #define BUILD_IPCP_UNICAST -#ifdef IPCP_CONN_WAIT_DIR - #include "dir.h" -#endif -  #include "common/connmgr.c" diff --git a/src/ipcpd/unicast/dht.c b/src/ipcpd/unicast/dht.c deleted file mode 100644 index 2b668f9f..00000000 --- a/src/ipcpd/unicast/dht.c +++ /dev/null @@ -1,2842 +0,0 @@ -/* - * Ouroboros - Copyright (C) 2016 - 2021 - * - * Distributed Hash Table based on Kademlia - * - *    Dimitri Staessens <dimitri@ouroboros.rocks> - *    Sander Vrijders   <sander@ouroboros.rocks> - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public License - * version 2.1 as published by the Free Software Foundation. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, write to the Free Software - * Foundation, Inc., http://www.fsf.org/about/contact/. - */ - -#if defined(__linux__) || defined(__CYGWIN__) -#define _DEFAULT_SOURCE -#else -#define _POSIX_C_SOURCE 200112L -#endif - -#include "config.h" - -#define DHT              "dht" -#define OUROBOROS_PREFIX DHT - -#include <ouroboros/hash.h> -#include <ouroboros/ipcp-dev.h> -#include <ouroboros/bitmap.h> -#include <ouroboros/errno.h> -#include <ouroboros/logs.h> -#include <ouroboros/list.h> -#include <ouroboros/notifier.h> -#include <ouroboros/random.h> -#include <ouroboros/time_utils.h> -#include <ouroboros/tpm.h> -#include <ouroboros/utils.h> -#include <ouroboros/pthread.h> - -#include "common/connmgr.h" -#include "dht.h" -#include "dt.h" - -#include <stdlib.h> -#include <string.h> -#include <assert.h> -#include <inttypes.h> -#include <limits.h> - -#include "kademlia.pb-c.h" -typedef KadMsg kad_msg_t; -typedef KadContactMsg kad_contact_msg_t; - -#ifndef CLOCK_REALTIME_COARSE -#define CLOCK_REALTIME_COARSE CLOCK_REALTIME -#endif - -#define DHT_MAX_REQS  2048 /* KAD recommends rnd(), bmp can be changed.    */ -#define KAD_ALPHA     3    /* Parallel factor, proven optimal value.       */ -#define KAD_K         8    /* Replication factor, MDHT value.              */ -#define KAD_T_REPL    900  /* Replication time, tied to k. MDHT value.     */ -#define KAD_T_REFR    900  /* Refresh time stale bucket, MDHT value.       */ -#define KAD_T_JOIN    8    /* Response time to wait for a join.            */ -#define KAD_T_RESP    5    /* Response time to wait for a response.        */ -#define KAD_R_PING    2    /* Ping retries before declaring peer dead.     */ -#define KAD_QUEER     15   /* Time to declare peer questionable.           */ -#define KAD_BETA      8    /* Bucket split factor, must be 1, 2, 4 or 8.   */ -#define KAD_RESP_RETR 6    /* Number of retries on sending a response.     */ -#define KAD_JOIN_RETR 8    /* Number of retries sending a join.            */ -#define KAD_JOIN_INTV 1    /* Time (seconds) between join retries.         */ -#define HANDLE_TIMEO  1000 /* Timeout for dht_handle_packet tpm check (ms) */ -#define DHT_RETR_ADDR 1    /* Number of addresses to return on retrieve    */ - -enum dht_state { -        DHT_INIT = 0, -        DHT_SHUTDOWN, -        DHT_JOINING, -        DHT_RUNNING, -}; - -enum kad_code { -        KAD_JOIN = 0, -        KAD_FIND_NODE, -        KAD_FIND_VALUE, -        /* Messages without a response below. */ -        KAD_STORE, -        KAD_RESPONSE -}; - -enum kad_req_state { -        REQ_NULL = 0, -        REQ_INIT, -        REQ_PENDING, -        REQ_RESPONSE, -        REQ_DONE, -        REQ_DESTROY -}; - -enum lookup_state { -        LU_NULL = 0, -        LU_INIT, -        LU_PENDING, -        LU_UPDATE, -        LU_COMPLETE, -        LU_DESTROY -}; - -struct kad_req { -        struct list_head   next; - -        uint32_t           cookie; -        enum kad_code      code; -        uint8_t *          key; -        uint64_t           addr; - -        enum kad_req_state state; -        pthread_cond_t     cond; -        pthread_mutex_t    lock; - -        time_t             t_exp; -}; - -struct cookie_el { -        struct list_head next; - -        uint32_t         cookie; -}; - -struct lookup { -        struct list_head  next; - -        struct list_head  cookies; - -        uint8_t *         key; - -        struct list_head  contacts; -        size_t            n_contacts; - -        uint64_t *        addrs; -        size_t            n_addrs; - -        enum lookup_state state; -        pthread_cond_t    cond; -        pthread_mutex_t   lock; -}; - -struct val { -        struct list_head next; - -        uint64_t         addr; - -        time_t           t_exp; -        time_t           t_rep; -}; - -struct ref_entry { -        struct list_head next; - -        uint8_t *        key; - -        time_t           t_rep; -}; - -struct dht_entry { -        struct list_head next; - -        uint8_t *        key; -        size_t           n_vals; -        struct list_head vals; -}; - -struct contact { -        struct list_head next; - -        uint8_t *        id; -        uint64_t         addr; - -        size_t           fails; -        time_t           t_seen; -}; - -struct bucket { -        struct list_head contacts; -        size_t           n_contacts; - -        struct list_head alts; -        size_t           n_alts; - -        time_t           t_refr; - -        size_t           depth; -        uint8_t          mask; - -        struct bucket *  parent; -        struct bucket *  children[1L << KAD_BETA]; -}; - -struct cmd { -        struct list_head     next; - -        struct shm_du_buff * sdb; -}; - -struct dht { -        size_t           alpha; -        size_t           b; -        size_t           k; - -        time_t           t_expire; -        time_t           t_refresh; -        time_t           t_replic; -        time_t           t_repub; - -        uint8_t *        id; -        uint64_t         addr; - -        struct bucket *  buckets; - -        struct list_head entries; - -        struct list_head refs; - -        struct list_head lookups; - -        struct list_head requests; -        struct bmp *     cookies; - -        enum dht_state   state; -        struct list_head cmds; -        pthread_cond_t   cond; -        pthread_mutex_t  mtx; - -        pthread_rwlock_t lock; - -        uint64_t         eid; - -        struct tpm *     tpm; - -        pthread_t        worker; -}; - -struct join_info { -        struct dht * dht; -        uint64_t     addr; -}; - -struct packet_info { -        struct dht *         dht; -        struct shm_du_buff * sdb; -}; - -static uint8_t * dht_dup_key(const uint8_t * key, -                             size_t          len) -{ -        uint8_t * dup; - -        dup = malloc(sizeof(*dup) * len); -        if (dup == NULL) -                return NULL; - -        memcpy(dup, key, len); - -        return dup; -} - -static enum dht_state dht_get_state(struct dht * dht) -{ -        enum dht_state state; - -        pthread_mutex_lock(&dht->mtx); - -        state = dht->state; - -        pthread_mutex_unlock(&dht->mtx); - -        return state; -} - -static int dht_set_state(struct dht *   dht, -                         enum dht_state state) -{ -        pthread_mutex_lock(&dht->mtx); - -        if (state == DHT_JOINING && dht->state != DHT_INIT) { -                 pthread_mutex_unlock(&dht->mtx); -                 return -1; -        } - -        dht->state = state; - -        pthread_cond_broadcast(&dht->cond); - -        pthread_mutex_unlock(&dht->mtx); - -        return 0; -} - -int dht_wait_running(struct dht * dht) -{ -        int ret = 0; - -        pthread_mutex_lock(&dht->mtx); - -        pthread_cleanup_push(__cleanup_mutex_unlock, &dht->mtx); - -        while (dht->state == DHT_JOINING) -                pthread_cond_wait(&dht->cond, &dht->mtx); - -        if (dht->state != DHT_RUNNING) -                ret = -1; - -        pthread_cleanup_pop(true); - -        return ret; -} - -static uint8_t * create_id(size_t len) -{ -        uint8_t * id; - -        id = malloc(len); -        if (id == NULL) -                return NULL; - -        if (random_buffer(id, len) < 0) { -                free(id); -                return NULL; -        } - -        return id; -} - -static void kad_req_create(struct dht * dht, -                           kad_msg_t *  msg, -                           uint64_t     addr) -{ -        struct kad_req *   req; -        pthread_condattr_t cattr; -        struct timespec    t; -        size_t             b; - -        req = malloc(sizeof(*req)); -        if (req == NULL) -                return; - -        list_head_init(&req->next); - -        clock_gettime(CLOCK_REALTIME_COARSE, &t); - -        req->t_exp  = t.tv_sec + KAD_T_RESP; -        req->addr   = addr; -        req->state  = REQ_INIT; -        req->cookie = msg->cookie; -        req->code   = msg->code; -        req->key    = NULL; - -        pthread_rwlock_rdlock(&dht->lock); -        b = dht->b; -        pthread_rwlock_unlock(&dht->lock); - -        if (msg->has_key) { -                req->key = dht_dup_key(msg->key.data, b); -                if (req->key == NULL) { -                        free(req); -                        return; -                } -        } - -        if (pthread_mutex_init(&req->lock, NULL)) { -                free(req->key); -                free(req); -                return; -        } - -        pthread_condattr_init(&cattr); -#ifndef __APPLE__ -        pthread_condattr_setclock(&cattr, PTHREAD_COND_CLOCK); -#endif - -        if (pthread_cond_init(&req->cond, &cattr)) { -                pthread_condattr_destroy(&cattr); -                pthread_mutex_destroy(&req->lock); -                free(req->key); -                free(req); -                return; -        } - -        pthread_condattr_destroy(&cattr); - -        pthread_rwlock_wrlock(&dht->lock); - -        list_add(&req->next, &dht->requests); - -        pthread_rwlock_unlock(&dht->lock); -} - -static void cancel_req_destroy(void * o) -{ -        struct kad_req * req = (struct kad_req *) o; - -        pthread_mutex_unlock(&req->lock); - -        pthread_cond_destroy(&req->cond); -        pthread_mutex_destroy(&req->lock); - -        if (req->key != NULL) -                free(req->key); - -        free(req); -} - -static void kad_req_destroy(struct kad_req * req) -{ -        assert(req); - -        pthread_mutex_lock(&req->lock); - -        switch (req->state) { -        case REQ_DESTROY: -                pthread_mutex_unlock(&req->lock); -                return; -        case REQ_PENDING: -                req->state = REQ_DESTROY; -                pthread_cond_signal(&req->cond); -                break; -        case REQ_INIT: -        case REQ_DONE: -                req->state = REQ_NULL; -                break; -        case REQ_RESPONSE: -        case REQ_NULL: -        default: -                break; -        } - -        pthread_cleanup_push(cancel_req_destroy, req); - -        while (req->state != REQ_NULL && req->state != REQ_DONE) -                pthread_cond_wait(&req->cond, &req->lock); - -        pthread_cleanup_pop(true); -} - -static int kad_req_wait(struct kad_req * req, -                        time_t           t) -{ -        struct timespec timeo = {t, 0}; -        struct timespec abs; -        int ret = 0; - -        assert(req); - -        clock_gettime(PTHREAD_COND_CLOCK, &abs); - -        ts_add(&abs, &timeo, &abs); - -        pthread_mutex_lock(&req->lock); - -        req->state = REQ_PENDING; - -        pthread_cleanup_push(__cleanup_mutex_unlock, &req->lock); - -        while (req->state == REQ_PENDING && ret != -ETIMEDOUT) -                ret = -pthread_cond_timedwait(&req->cond, &req->lock, &abs); - -        switch(req->state) { -        case REQ_DESTROY: -                ret = -1; -                req->state = REQ_NULL; -                pthread_cond_signal(&req->cond); -                break; -        case REQ_PENDING: /* ETIMEDOUT */ -        case REQ_RESPONSE: -                req->state = REQ_DONE; -                pthread_cond_broadcast(&req->cond); -                break; -        default: -                break; -        } - -        pthread_cleanup_pop(true); - -        return ret; -} - -static void kad_req_respond(struct kad_req * req) -{ -        pthread_mutex_lock(&req->lock); - -        req->state = REQ_RESPONSE; -        pthread_cond_signal(&req->cond); - -        pthread_mutex_unlock(&req->lock); -} - -static struct contact * contact_create(const uint8_t * id, -                                       size_t          len, -                                       uint64_t        addr) -{ -        struct contact * c; -        struct timespec  t; - -        c = malloc(sizeof(*c)); -        if (c == NULL) -                return NULL; - -        list_head_init(&c->next); - -        clock_gettime(CLOCK_REALTIME_COARSE, &t); - -        c->addr   = addr; -        c->fails  = 0; -        c->t_seen = t.tv_sec; -        c->id     = dht_dup_key(id, len); -        if (c->id == NULL) { -                free(c); -                return NULL; -        } - -        return c; -} - -static void contact_destroy(struct contact * c) -{ -        if (c != NULL) -                free(c->id); - -        free(c); -} - -static struct bucket * iter_bucket(struct bucket * b, -                                   const uint8_t * id) -{ -        uint8_t byte; -        uint8_t mask; - -        assert(b); - -        if (b->children[0] == NULL) -                return b; - -        byte = id[(b->depth * KAD_BETA) / CHAR_BIT]; - -        mask = ((1L << KAD_BETA) - 1) & 0xFF; - -        byte >>= (CHAR_BIT - KAD_BETA) - -                (((b->depth) * KAD_BETA) & (CHAR_BIT - 1)); - -        return iter_bucket(b->children[(byte & mask)], id); -} - -static struct bucket * dht_get_bucket(struct dht *    dht, -                                      const uint8_t * id) -{ -        assert(dht->buckets); - -        return iter_bucket(dht->buckets, id); -} - -/* - * If someone builds a network where the n (n > k) closest nodes all - * have IDs starting with the same 64 bits: by all means, change this. - */ -static uint64_t dist(const uint8_t * src, -                     const uint8_t * dst) -{ -        return betoh64(*((uint64_t *) src) ^ *((uint64_t *) dst)); -} - -static size_t list_add_sorted(struct list_head * l, -                              struct contact *   c, -                              const uint8_t *    key) -{ -        struct list_head * p; - -        assert(l); -        assert(c); -        assert(key); -        assert(c->id); - -        list_for_each(p, l) { -                struct contact * e = list_entry(p, struct contact, next); -                if (dist(c->id, key) > dist(e->id, key)) -                        break; -        } - -        list_add_tail(&c->next, p); - -        return 1; -} - -static size_t dht_contact_list(struct dht *       dht, -                               struct list_head * l, -                               const uint8_t *    key) -{ -        struct list_head * p; -        struct bucket *    b; -        size_t             len = 0; -        size_t             i; -        struct timespec    t; - -        assert(l); -        assert(dht); -        assert(key); -        assert(list_is_empty(l)); - -        clock_gettime(CLOCK_REALTIME_COARSE, &t); - -        b = dht_get_bucket(dht, key); -        if (b == NULL) -                return 0; - -        b->t_refr = t.tv_sec + KAD_T_REFR; - -        if (b->n_contacts == dht->k || b->parent == NULL) { -                list_for_each(p, &b->contacts) { -                        struct contact * c; -                        c = list_entry(p, struct contact, next); -                        c = contact_create(c->id, dht->b, c->addr); -                        if (list_add_sorted(l, c, key) == 1) -                                if (++len == dht->k) -                                        break; -                } -        } else { -                struct bucket * d = b->parent; -                for (i = 0; i < (1L << KAD_BETA) && len < dht->k; ++i) { -                        list_for_each(p, &d->children[i]->contacts) { -                                struct contact * c; -                                c = list_entry(p, struct contact, next); -                                c = contact_create(c->id, dht->b, c->addr); -                                if (c == NULL) -                                        continue; -                                if (list_add_sorted(l, c, key) == 1) -                                        if (++len == dht->k) -                                                break; -                        } -                } -        } - -        assert(len == dht->k || b->parent == NULL); - -        return len; -} - -static struct lookup * lookup_create(struct dht *    dht, -                                     const uint8_t * id) -{ -        struct lookup *    lu; -        pthread_condattr_t cattr; - -        assert(dht); -        assert(id); - -        lu = malloc(sizeof(*lu)); -        if (lu == NULL) -                goto fail_malloc; - -        list_head_init(&lu->contacts); -        list_head_init(&lu->cookies); - -        lu->state   = LU_INIT; -        lu->addrs   = NULL; -        lu->n_addrs = 0; -        lu->key     = dht_dup_key(id, dht->b); -        if (lu->key == NULL) -                goto fail_id; - -        if (pthread_mutex_init(&lu->lock, NULL)) -                goto fail_mutex; - -        pthread_condattr_init(&cattr); -#ifndef __APPLE__ -        pthread_condattr_setclock(&cattr, PTHREAD_COND_CLOCK); -#endif - -        if (pthread_cond_init(&lu->cond, &cattr)) -                goto fail_cond; - -        pthread_condattr_destroy(&cattr); - -        pthread_rwlock_wrlock(&dht->lock); - -        list_add(&lu->next, &dht->lookups); - -        lu->n_contacts = dht_contact_list(dht, &lu->contacts, id); - -        pthread_rwlock_unlock(&dht->lock); - -        return lu; - - fail_cond: -        pthread_condattr_destroy(&cattr); -        pthread_mutex_destroy(&lu->lock); - fail_mutex: -        free(lu->key); - fail_id: -        free(lu); - fail_malloc: -        return NULL; -} - -static void cancel_lookup_destroy(void * o) -{ -        struct lookup *    lu; -        struct list_head * p; -        struct list_head * h; - -        lu = (struct lookup *) o; - -        if (lu->key != NULL) -                free(lu->key); -        if (lu->addrs != NULL) -                free(lu->addrs); - -        list_for_each_safe(p, h, &lu->contacts) { -                struct contact * c = list_entry(p, struct contact, next); -                list_del(&c->next); -                contact_destroy(c); -        } - -        list_for_each_safe(p, h, &lu->cookies) { -                struct cookie_el * c = list_entry(p, struct cookie_el, next); -                list_del(&c->next); -                free(c); -        } - -        pthread_mutex_unlock(&lu->lock); - -        pthread_mutex_destroy(&lu->lock); - -        free(lu); -} - -static void lookup_destroy(struct lookup * lu) -{ -        assert(lu); - -        pthread_mutex_lock(&lu->lock); - -        switch (lu->state) { -        case LU_DESTROY: -                pthread_mutex_unlock(&lu->lock); -                return; -        case LU_PENDING: -                lu->state = LU_DESTROY; -                pthread_cond_broadcast(&lu->cond); -                break; -        case LU_INIT: -        case LU_UPDATE: -        case LU_COMPLETE: -                lu->state = LU_NULL; -                break; -        case LU_NULL: -        default: -                break; -        } - -        pthread_cleanup_push(cancel_lookup_destroy, lu); - -        while (lu->state != LU_NULL) -                pthread_cond_wait(&lu->cond, &lu->lock); - -        pthread_cleanup_pop(true); -} - -static void lookup_update(struct dht *    dht, -                          struct lookup * lu, -                          kad_msg_t *     msg) -{ -        struct list_head * p = NULL; -        struct list_head * h; -        struct contact *   c = NULL; -        size_t             n; -        size_t             pos = 0; -        bool               mod = false; - -        assert(lu); -        assert(msg); - -        if (dht_get_state(dht) != DHT_RUNNING) -                return; - -        pthread_mutex_lock(&lu->lock); - -        list_for_each_safe(p, h, &lu->cookies) { -                struct cookie_el * e = list_entry(p, struct cookie_el, next); -                if (e->cookie == msg->cookie) { -                        list_del(&e->next); -                        free(e); -                        break; -                } -        } - -        if (lu->state == LU_COMPLETE) { -                pthread_mutex_unlock(&lu->lock); -                return; -        } - -        if (msg->n_addrs > 0) { -                if (lu->addrs == NULL) { -                        lu->addrs = malloc(sizeof(*lu->addrs) * msg->n_addrs); -                        for (n = 0; n < msg->n_addrs; ++n) -                                lu->addrs[n] = msg->addrs[n]; -                        lu->n_addrs = msg->n_addrs; -                } - -                lu->state = LU_COMPLETE; -                pthread_cond_broadcast(&lu->cond); -                pthread_mutex_unlock(&lu->lock); -                return; -        } - -        pthread_cleanup_push(__cleanup_mutex_unlock, &lu->lock); - -        while (lu->state == LU_INIT) { -                pthread_rwlock_unlock(&dht->lock); -                pthread_cond_wait(&lu->cond, &lu->lock); -                pthread_rwlock_rdlock(&dht->lock); -        } - -        pthread_cleanup_pop(false); - -        for (n = 0; n < msg->n_contacts; ++n) { -                c = contact_create(msg->contacts[n]->id.data, -                                   dht->b, msg->contacts[n]->addr); -                if (c == NULL) -                        continue; - -                pos = 0; - -                list_for_each(p, &lu->contacts) { -                        struct contact * e; -                        e = list_entry(p, struct contact, next); -                        if (!memcmp(e->id, c->id, dht->b)) { -                                contact_destroy(c); -                                c = NULL; -                                break; -                        } - -                        if (dist(c->id, lu->key) > dist(e->id, lu->key)) -                                break; - -                        pos++; -                } - -                if (c == NULL) -                        continue; - -                if (lu->n_contacts < dht->k) { -                        list_add_tail(&c->next, p); -                        ++lu->n_contacts; -                        mod = true; -                } else if (pos == dht->k) { -                        contact_destroy(c); -                } else { -                        struct contact * d; -                        list_add_tail(&c->next, p); -                        d = list_last_entry(&lu->contacts, -                                            struct contact, next); -                        list_del(&d->next); -                        assert(lu->contacts.prv != &d->next); -                        contact_destroy(d); -                        mod = true; -                } -        } - -        if (list_is_empty(&lu->cookies) && !mod) -                lu->state = LU_COMPLETE; -        else -                lu->state = LU_UPDATE; - -        pthread_cond_broadcast(&lu->cond); -        pthread_mutex_unlock(&lu->lock); -        return; -} - -static ssize_t lookup_get_addrs(struct lookup * lu, -                                uint64_t *      addrs) -{ -        ssize_t n; - -        assert(lu); - -        pthread_mutex_lock(&lu->lock); - -        for (n = 0; (size_t) n < lu->n_addrs; ++n) -                addrs[n] = lu->addrs[n]; - -        assert((size_t) n == lu->n_addrs); - -        pthread_mutex_unlock(&lu->lock); - -        return n; -} - -static ssize_t lookup_contact_addrs(struct lookup * lu, -                                    uint64_t *      addrs) -{ -        struct list_head * p; -        ssize_t            n = 0; - -        assert(lu); -        assert(addrs); - -        pthread_mutex_lock(&lu->lock); - -        list_for_each(p, &lu->contacts) { -                struct contact * c = list_entry(p, struct contact, next); -                addrs[n] = c->addr; -                n++; -        } - -        pthread_mutex_unlock(&lu->lock); - -        return n; -} - -static void lookup_new_addrs(struct lookup * lu, -                             uint64_t *      addrs) -{ -        struct list_head * p; -        size_t             n = 0; - -        assert(lu); -        assert(addrs); - -        pthread_mutex_lock(&lu->lock); - -        /* Uses fails to check if the contact has been contacted. */ -        list_for_each(p, &lu->contacts) { -                struct contact * c = list_entry(p, struct contact, next); -                if (c->fails == 0) { -                        c->fails = 1; -                        addrs[n] = c->addr; -                        n++; -                } - -                if (n == KAD_ALPHA) -                        break; -        } - -        assert(n <= KAD_ALPHA); - -        addrs[n] = 0; - -        pthread_mutex_unlock(&lu->lock); -} - -static void lookup_set_state(struct lookup *   lu, -                             enum lookup_state state) -{ -        pthread_mutex_lock(&lu->lock); - -        lu->state = state; -        pthread_cond_broadcast(&lu->cond); - -        pthread_mutex_unlock(&lu->lock); -} - -static void cancel_lookup_wait(void * o) -{ -        struct lookup * lu = (struct lookup *) o; -        lu->state = LU_NULL; -        pthread_mutex_unlock(&lu->lock); -        lookup_destroy(lu); -} - -static enum lookup_state lookup_wait(struct lookup * lu) -{ -        struct timespec   timeo = {KAD_T_RESP, 0}; -        struct timespec   abs; -        enum lookup_state state; -        int               ret = 0; - -        clock_gettime(PTHREAD_COND_CLOCK, &abs); - -        ts_add(&abs, &timeo, &abs); - -        pthread_mutex_lock(&lu->lock); - -        if (lu->state == LU_INIT || lu->state == LU_UPDATE) -                lu->state = LU_PENDING; - -        pthread_cleanup_push(cancel_lookup_wait, lu); - -        while (lu->state == LU_PENDING && ret != -ETIMEDOUT) -                ret = -pthread_cond_timedwait(&lu->cond, &lu->lock, &abs); - -        pthread_cleanup_pop(false); - -        if (ret == -ETIMEDOUT) -                lu->state = LU_COMPLETE; - -        state = lu->state; - -        pthread_mutex_unlock(&lu->lock); - -        return state; -} - -static struct kad_req * dht_find_request(struct dht * dht, -                                         kad_msg_t *  msg) -{ -        struct list_head * p; - -        assert(dht); -        assert(msg); - -        list_for_each(p, &dht->requests) { -                struct kad_req * r = list_entry(p, struct kad_req, next); -                if (r->cookie == msg->cookie) -                        return r; -        } - -        return NULL; -} - -static struct lookup * dht_find_lookup(struct dht *    dht, -                                       uint32_t        cookie) -{ -        struct list_head * p; -        struct list_head * p2; -        struct list_head * h2; - -        assert(dht); -        assert(cookie > 0); - -        list_for_each(p, &dht->lookups) { -                struct lookup * l = list_entry(p, struct lookup, next); -                pthread_mutex_lock(&l->lock); -                list_for_each_safe(p2, h2, &l->cookies) { -                        struct cookie_el * e; -                        e = list_entry(p2, struct cookie_el, next); -                        if (e->cookie == cookie) { -                                list_del(&e->next); -                                free(e); -                                pthread_mutex_unlock(&l->lock); -                                return l; -                        } -                } -                pthread_mutex_unlock(&l->lock); -        } - -        return NULL; -} - -static struct val * val_create(uint64_t addr, -                               time_t   exp) -{ -        struct val *    v; -        struct timespec t; - -        v = malloc(sizeof(*v)); -        if (v == NULL) -                return NULL; - -        list_head_init(&v->next); -        v->addr = addr; - -        clock_gettime(CLOCK_REALTIME_COARSE, &t); - -        v->t_exp = t.tv_sec + exp; -        v->t_rep = t.tv_sec + KAD_T_REPL; - -        return v; -} - -static void val_destroy(struct val * v) -{ -        assert(v); - -        free(v); -} - -static struct ref_entry * ref_entry_create(struct dht *    dht, -                                           const uint8_t * key) -{ -        struct ref_entry * e; -        struct timespec    t; - -        assert(dht); -        assert(key); - -        e = malloc(sizeof(*e)); -        if (e == NULL) -                return NULL; - -        e->key = dht_dup_key(key, dht->b); -        if (e->key == NULL) { -                free(e); -                return NULL; -        } - -        clock_gettime(CLOCK_REALTIME_COARSE, &t); - -        e->t_rep = t.tv_sec + dht->t_repub; - -        return e; -} - -static void ref_entry_destroy(struct ref_entry * e) -{ -        free(e->key); -        free(e); -} - -static struct dht_entry * dht_entry_create(struct dht *    dht, -                                           const uint8_t * key) -{ -        struct dht_entry * e; - -        assert(dht); -        assert(key); - -        e = malloc(sizeof(*e)); -        if (e == NULL) -                return NULL; - -        list_head_init(&e->next); -        list_head_init(&e->vals); - -        e->n_vals = 0; - -        e->key = dht_dup_key(key, dht->b); -        if (e->key == NULL) { -                free(e); -                return NULL; -        } - -        return e; -} - -static void dht_entry_destroy(struct dht_entry * e) -{ -        struct list_head * p; -        struct list_head * h; - -        assert(e); - -        list_for_each_safe(p, h, &e->vals) { -                struct val * v = list_entry(p, struct val, next); -                list_del(&v->next); -                val_destroy(v); -        } - -        free(e->key); - -        free(e); -} - -static int dht_entry_add_addr(struct dht_entry * e, -                              uint64_t           addr, -                              time_t             exp) -{ -        struct list_head * p; -        struct val * val; -        struct timespec t; - -        clock_gettime(CLOCK_REALTIME_COARSE, &t); - -        list_for_each(p, &e->vals) { -                struct val * v = list_entry(p, struct val, next); -                if (v->addr == addr) { -                        if (v->t_exp < t.tv_sec + exp) { -                                v->t_exp = t.tv_sec + exp; -                                v->t_rep = t.tv_sec + KAD_T_REPL; -                        } - -                        return 0; -                } -        } - -        val = val_create(addr, exp); -        if (val == NULL) -                return -ENOMEM; - -        list_add(&val->next, &e->vals); -        ++e->n_vals; - -        return 0; -} - - -static void dht_entry_del_addr(struct dht_entry * e, -                               uint64_t           addr) -{ -        struct list_head * p; -        struct list_head * h; - -        assert(e); - -        list_for_each_safe(p, h, &e->vals) { -                struct val * v = list_entry(p, struct val, next); -                if (v->addr == addr) { -                        list_del(&v->next); -                        val_destroy(v); -                        --e->n_vals; -                } -        } - -        if (e->n_vals == 0) { -                list_del(&e->next); -                dht_entry_destroy(e); -        } -} - -static uint64_t dht_entry_get_addr(struct dht *       dht, -                                   struct dht_entry * e) -{ -        struct list_head * p; - -        assert(e); -        assert(!list_is_empty(&e->vals)); - -        list_for_each(p, &e->vals) { -                struct val * v = list_entry(p, struct val, next); -                if (v->addr != dht->addr) -                        return v->addr; -        } - -        return 0; -} - -/* Forward declaration. */ -static struct lookup * kad_lookup(struct dht *    dht, -                                  const uint8_t * key, -                                  enum kad_code   code); - - -/* Build a refresh list. */ -static void bucket_refresh(struct dht *       dht, -                           struct bucket *    b, -                           time_t             t, -                           struct list_head * r) -{ -        size_t i; - -        if (*b->children != NULL) -                for (i = 0; i < (1L << KAD_BETA); ++i) -                        bucket_refresh(dht, b->children[i], t, r); - -        if (b->n_contacts == 0) -                return; - -        if (t > b->t_refr) { -                struct contact * c; -                struct contact * d; -                c = list_first_entry(&b->contacts, struct contact, next); -                d = contact_create(c->id, dht->b, c->addr); -                if (c != NULL) -                        list_add(&d->next, r); -                return; -        } -} - - -static struct bucket * bucket_create(void) -{ -        struct bucket * b; -        struct timespec t; -        size_t          i; - -        b = malloc(sizeof(*b)); -        if (b == NULL) -                return NULL; - -        list_head_init(&b->contacts); -        b->n_contacts = 0; - -        list_head_init(&b->alts); -        b->n_alts = 0; - -        clock_gettime(CLOCK_REALTIME_COARSE, &t); -        b->t_refr = t.tv_sec + KAD_T_REFR; - -        for (i = 0; i < (1L << KAD_BETA); ++i) -                b->children[i]  = NULL; - -        b->parent = NULL; -        b->depth = 0; - -        return b; -} - -static void bucket_destroy(struct bucket * b) -{ -        struct list_head * p; -        struct list_head * h; -        size_t             i; - -        assert(b); - -        for (i = 0; i < (1L << KAD_BETA); ++i) -                if (b->children[i] != NULL) -                        bucket_destroy(b->children[i]); - -        list_for_each_safe(p, h, &b->contacts) { -                struct contact * c = list_entry(p, struct contact, next); -                list_del(&c->next); -                contact_destroy(c); -                --b->n_contacts; -        } - -        list_for_each_safe(p, h, &b->alts) { -                struct contact * c = list_entry(p, struct contact, next); -                list_del(&c->next); -                contact_destroy(c); -                --b->n_contacts; -        } - -        free(b); -} - -static bool bucket_has_id(struct bucket * b, -                          const uint8_t * id) -{ -        uint8_t mask; -        uint8_t byte; - -        if (b->depth == 0) -                return true; - -        byte = id[(b->depth * KAD_BETA) / CHAR_BIT]; - -        mask = ((1L << KAD_BETA) - 1) & 0xFF; - -        byte >>= (CHAR_BIT - KAD_BETA) - -                (((b->depth - 1) * KAD_BETA) & (CHAR_BIT - 1)); - -        return ((byte & mask) == b->mask); -} - -static int split_bucket(struct bucket * b) -{ -        struct list_head * p; -        struct list_head * h; -        uint8_t mask = 0; -        size_t i; -        size_t c; - -        assert(b); -        assert(b->n_alts == 0); -        assert(b->n_contacts); -        assert(b->children[0] == NULL); - -        c = b->n_contacts; - -        for (i = 0; i < (1L << KAD_BETA); ++i) { -                b->children[i] = bucket_create(); -                if (b->children[i] == NULL) { -                        size_t j; -                        for (j = 0; j < i; ++j) -                                bucket_destroy(b->children[j]); -                        return -1; -                } - -                b->children[i]->depth  = b->depth + 1; -                b->children[i]->mask   = mask; -                b->children[i]->parent = b; - -                list_for_each_safe(p, h, &b->contacts) { -                        struct contact * c; -                        c = list_entry(p, struct contact, next); -                        if (bucket_has_id(b->children[i], c->id)) { -                                list_del(&c->next); -                                --b->n_contacts; -                                list_add(&c->next, &b->children[i]->contacts); -                                ++b->children[i]->n_contacts; -                        } -                } - -                mask++; -        } - -        for (i = 0; i < (1L << KAD_BETA); ++i) -                if (b->children[i]->n_contacts == c) -                        split_bucket(b->children[i]); - -        return 0; -} - -/* Locked externally to mandate update as (final) part of join transaction. */ -static int dht_update_bucket(struct dht *    dht, -                             const uint8_t * id, -                             uint64_t        addr) -{ -        struct list_head * p; -        struct list_head * h; -        struct bucket *    b; -        struct contact *   c; - -        assert(dht); - -        b = dht_get_bucket(dht, id); -        if (b == NULL) -                return -1; - -        c = contact_create(id, dht->b, addr); -        if (c == NULL) -                return -1; - -        list_for_each_safe(p, h, &b->contacts) { -                struct contact * d = list_entry(p, struct contact, next); -                if (d->addr == addr) { -                        list_del(&d->next); -                        contact_destroy(d); -                        --b->n_contacts; -                } -        } - -        if (b->n_contacts == dht->k) { -                if (bucket_has_id(b, dht->id)) { -                        list_add_tail(&c->next, &b->contacts); -                        ++b->n_contacts; -                        if (split_bucket(b)) { -                                list_del(&c->next); -                                contact_destroy(c); -                                --b->n_contacts; -                        } -                } else if (b->n_alts == dht->k) { -                        struct contact * d; -                        d = list_first_entry(&b->alts, struct contact, next); -                        list_del(&d->next); -                        contact_destroy(d); -                        list_add_tail(&c->next, &b->alts); -                } else { -                        list_add_tail(&c->next, &b->alts); -                        ++b->n_alts; -                } -        } else { -                list_add_tail(&c->next, &b->contacts); -                ++b->n_contacts; -        } - -        return 0; -} - -static int send_msg(struct dht * dht, -                    kad_msg_t *  msg, -                    uint64_t     addr) -{ -#ifndef __DHT_TEST__ -        struct shm_du_buff * sdb; -        size_t               len; -#endif -        int                  retr = 0; - -        if (msg->code == KAD_RESPONSE) -                retr = KAD_RESP_RETR; - -        pthread_rwlock_wrlock(&dht->lock); - -        if (dht->id != NULL) { -                msg->has_s_id = true; -                msg->s_id.data = dht->id; -                msg->s_id.len  = dht->b; -        } - -        msg->s_addr = dht->addr; - -        if (msg->code < KAD_STORE) { -                msg->cookie = bmp_allocate(dht->cookies); -                if (!bmp_is_id_valid(dht->cookies, msg->cookie)) { -                        pthread_rwlock_unlock(&dht->lock); -                        goto fail_bmp_alloc; -                } -        } - -        pthread_rwlock_unlock(&dht->lock); - -#ifndef __DHT_TEST__ -        len = kad_msg__get_packed_size(msg); -        if (len == 0) -                goto fail_msg; - -        while (true) { -                if (ipcp_sdb_reserve(&sdb, len)) -                        goto fail_msg; - -                kad_msg__pack(msg, shm_du_buff_head(sdb)); - -                if (dt_write_packet(addr, QOS_CUBE_BE, dht->eid, sdb) == 0) -                        break; - -                ipcp_sdb_release(sdb); - -                sleep(1); - -                if (--retr < 0) -                        goto fail_msg; -        } - -#else -        (void) addr; -        (void) retr; -#endif /* __DHT_TEST__ */ - -        if (msg->code < KAD_STORE && dht_get_state(dht) != DHT_SHUTDOWN) -                kad_req_create(dht, msg, addr); - -        return msg->cookie; -#ifndef __DHT_TEST__ - fail_msg: -        pthread_rwlock_wrlock(&dht->lock); -        bmp_release(dht->cookies, msg->cookie); -        pthread_rwlock_unlock(&dht->lock); -#endif /* !__DHT_TEST__ */ - fail_bmp_alloc: -        return -1; -} - -static struct dht_entry * dht_find_entry(struct dht *    dht, -                                         const uint8_t * key) -{ -        struct list_head * p; - -        list_for_each(p, &dht->entries) { -                struct dht_entry * e = list_entry(p, struct dht_entry, next); -                if (!memcmp(key, e->key, dht->b)) -                        return e; -        } - -        return NULL; -} - -static int kad_add(struct dht *              dht, -                   const kad_contact_msg_t * contacts, -                   ssize_t                   n, -                   time_t                    exp) -{ -        struct dht_entry * e; - -        pthread_rwlock_wrlock(&dht->lock); - -        while (n-- > 0) { -                if (contacts[n].id.len != dht->b) -                        log_warn("Bad key length in contact data."); - -                e = dht_find_entry(dht, contacts[n].id.data); -                if (e != NULL) { -                        if (dht_entry_add_addr(e, contacts[n].addr, exp)) -                                goto fail; -                } else { -                        e = dht_entry_create(dht, contacts[n].id.data); -                        if (e == NULL) -                                goto fail; - -                        if (dht_entry_add_addr(e, contacts[n].addr, exp)) { -                                dht_entry_destroy(e); -                                goto fail; -                        } - -                        list_add(&e->next, &dht->entries); -                } -        } - -        pthread_rwlock_unlock(&dht->lock); -        return 0; - - fail: -        pthread_rwlock_unlock(&dht->lock); -        return -ENOMEM; -} - -static int wait_resp(struct dht * dht, -                     kad_msg_t *  msg, -                     time_t       timeo) -{ -        struct kad_req * req; - -        assert(dht); -        assert(msg); - -        pthread_rwlock_rdlock(&dht->lock); - -        req = dht_find_request(dht, msg); -        if (req == NULL) { -                pthread_rwlock_unlock(&dht->lock); -                return -EPERM; -        } - -        pthread_rwlock_unlock(&dht->lock); - -        return kad_req_wait(req, timeo); -} - -static int kad_store(struct dht *    dht, -                     const uint8_t * key, -                     uint64_t        addr, -                     uint64_t        r_addr, -                     time_t          ttl) -{ -        kad_msg_t msg = KAD_MSG__INIT; -        kad_contact_msg_t cmsg = KAD_CONTACT_MSG__INIT; -        kad_contact_msg_t * cmsgp[1]; - -        cmsg.id.data = (uint8_t *) key; -        cmsg.addr    = addr; - -        pthread_rwlock_rdlock(&dht->lock); - -        cmsg.id.len  = dht->b; - -        pthread_rwlock_unlock(&dht->lock); - -        cmsgp[0] = &cmsg; - -        msg.code         = KAD_STORE; -        msg.has_t_expire = true; -        msg.t_expire     = ttl; -        msg.n_contacts   = 1; -        msg.contacts     = cmsgp; - -        if (send_msg(dht, &msg, r_addr) < 0) -                return -1; - -        return 0; -} - -static ssize_t kad_find(struct dht *     dht, -                        struct lookup *  lu, -                        const uint64_t * addrs, -                        enum kad_code    code) -{ -        kad_msg_t msg  = KAD_MSG__INIT; -        ssize_t   sent = 0; - -        assert(dht); -        assert(lu->key); - -        msg.code = code; - -        msg.has_key       = true; -        msg.key.data      = (uint8_t *) lu->key; -        msg.key.len       = dht->b; - -        while (*addrs != 0) { -                struct cookie_el * c; -                int ret; - -                if (*addrs == dht->addr) { -                        ++addrs; -                        continue; -                } - -                ret = send_msg(dht, &msg, *addrs); -                if (ret < 0) -                        break; - -                c = malloc(sizeof(*c)); -                if (c == NULL) -                        break; - -                c->cookie = (uint32_t) ret; - -                pthread_mutex_lock(&lu->lock); - -                list_add_tail(&c->next, &lu->cookies); - -                pthread_mutex_unlock(&lu->lock); - -                ++sent; -                ++addrs; -        } - -        return sent; -} - -static void lookup_detach(struct dht *    dht, -                          struct lookup * lu) -{ -        pthread_rwlock_wrlock(&dht->lock); - -        list_del(&lu->next); - -        pthread_rwlock_unlock(&dht->lock); -} - -static struct lookup * kad_lookup(struct dht *    dht, -                                  const uint8_t * id, -                                  enum kad_code   code) -{ -        uint64_t          addrs[KAD_ALPHA + 1]; -        enum lookup_state state; -        struct lookup *   lu; - -        lu = lookup_create(dht, id); -        if (lu == NULL) -                return NULL; - -        lookup_new_addrs(lu, addrs); - -        if (addrs[0] == 0) { -                lookup_detach(dht, lu); -                lookup_destroy(lu); -                return NULL; -        } - -        if (kad_find(dht, lu, addrs, code) == 0) { -                lookup_detach(dht, lu); -                return lu; -        } - -        while ((state = lookup_wait(lu)) != LU_COMPLETE) { -                switch (state) { -                case LU_UPDATE: -                        lookup_new_addrs(lu, addrs); -                        if (addrs[0] == 0) -                                break; - -                        kad_find(dht, lu, addrs, code); -                        break; -                case LU_DESTROY: -                        lookup_detach(dht, lu); -                        lookup_set_state(lu, LU_NULL); -                        return NULL; -                default: -                        break; -                } -        } - -        assert(state == LU_COMPLETE); - -        lookup_detach(dht, lu); - -        return lu; -} - -static void kad_publish(struct dht *    dht, -                        const uint8_t * key, -                        uint64_t        addr, -                        time_t          exp) -{ -        struct lookup * lu; -        uint64_t      * addrs; -        ssize_t         n; -        size_t          k; -        time_t          t_expire; - - -        assert(dht); -        assert(key); - -        pthread_rwlock_rdlock(&dht->lock); - -        k        = dht->k; -        t_expire = dht->t_expire; - -        pthread_rwlock_unlock(&dht->lock); - -        addrs = malloc(k * sizeof(*addrs)); -        if (addrs == NULL) -                return; - -        lu = kad_lookup(dht, key, KAD_FIND_NODE); -        if (lu == NULL) { -                free(addrs); -                return; -        } - -        n = lookup_contact_addrs(lu, addrs); - -        while (n-- > 0) { -                if (addrs[n] == dht->addr) { -                        kad_contact_msg_t msg = KAD_CONTACT_MSG__INIT; -                        msg.id.data = (uint8_t *) key; -                        msg.id.len  = dht->b; -                        msg.addr    = addr; -                        kad_add(dht, &msg, 1, exp); -                } else { -                        if (kad_store(dht, key, addr, addrs[n], t_expire)) -                                log_warn("Failed to send store message."); -                } -        } - -        lookup_destroy(lu); - -        free(addrs); -} - -static int kad_join(struct dht * dht, -                    uint64_t     addr) -{ -        kad_msg_t       msg = KAD_MSG__INIT; - -        msg.code = KAD_JOIN; - -        msg.has_alpha       = true; -        msg.has_b           = true; -        msg.has_k           = true; -        msg.has_t_refresh   = true; -        msg.has_t_replicate = true; -        msg.alpha           = KAD_ALPHA; -        msg.k               = KAD_K; -        msg.t_refresh       = KAD_T_REFR; -        msg.t_replicate     = KAD_T_REPL; - -        pthread_rwlock_rdlock(&dht->lock); - -        msg.b               = dht->b; - -        pthread_rwlock_unlock(&dht->lock); - -        if (send_msg(dht, &msg, addr) < 0) -                return -1; - -        if (wait_resp(dht, &msg, KAD_T_JOIN) < 0) -                return -1; - -        dht->id = create_id(dht->b); -        if (dht->id == NULL) -                return -1; - -        pthread_rwlock_wrlock(&dht->lock); - -        dht_update_bucket(dht, dht->id, dht->addr); - -        pthread_rwlock_unlock(&dht->lock); - -        return 0; -} - -static void dht_dead_peer(struct dht * dht, -                          uint8_t *    key, -                          uint64_t     addr) -{ -        struct list_head * p; -        struct list_head * h; -        struct bucket *    b; - -        b = dht_get_bucket(dht, key); - -        list_for_each_safe(p, h, &b->contacts) { -                struct contact * c = list_entry(p, struct contact, next); -                if (b->n_contacts + b->n_alts <= dht->k) { -                        ++c->fails; -                        return; -                } - -                if (c->addr == addr) { -                        list_del(&c->next); -                        contact_destroy(c); -                        --b->n_contacts; -                        break; -                } -        } - -        while (b->n_contacts < dht->k && b->n_alts > 0) { -                struct contact * c; -                c = list_first_entry(&b->alts, struct contact, next); -                list_del(&c->next); -                --b->n_alts; -                list_add(&c->next, &b->contacts); -                ++b->n_contacts; -        } -} - -static int dht_del(struct dht *    dht, -                   const uint8_t * key, -                   uint64_t        addr) -{ -        struct dht_entry * e; - -        pthread_rwlock_wrlock(&dht->lock); - -        e = dht_find_entry(dht, key); -        if (e == NULL) { -                pthread_rwlock_unlock(&dht->lock); -                return -EPERM; -        } - -        dht_entry_del_addr(e, addr); - -        pthread_rwlock_unlock(&dht->lock); - -        return 0; -} - -static buffer_t dht_retrieve(struct dht *    dht, -                             const uint8_t * key) -{ -        struct dht_entry * e; -        struct list_head * p; -        buffer_t           buf; -        uint64_t *         pos; -        size_t             addrs = 0; - -        pthread_rwlock_rdlock(&dht->lock); - -        e = dht_find_entry(dht, key); -        if (e == NULL) -                goto fail; - -        buf.len = MIN(DHT_RETR_ADDR, e->n_vals); -        if (buf.len == 0) -                goto fail; - -        pos = malloc(sizeof(dht->addr) * buf.len); -        if (pos == NULL) -                goto fail; - -        buf.data = (uint8_t *) pos; - -        list_for_each(p, &e->vals) { -                struct val * v = list_entry(p, struct val, next); -                *pos++ = v->addr; -                if (++addrs >= buf.len) -                        break; -        } - -        pthread_rwlock_unlock(&dht->lock); - -        return buf; - - fail: -        pthread_rwlock_unlock(&dht->lock); -        buf.len = 0; - -        return buf; -} - -static ssize_t dht_get_contacts(struct dht *          dht, -                                const uint8_t *       key, -                                kad_contact_msg_t *** msgs) -{ -        struct list_head   l; -        struct list_head * p; -        struct list_head * h; -        size_t             len; -        size_t             i = 0; - -        list_head_init(&l); - -        pthread_rwlock_wrlock(&dht->lock); - -        len = dht_contact_list(dht, &l, key); -        if (len == 0) { -                pthread_rwlock_unlock(&dht->lock); -                *msgs = NULL; -                return 0; -        } - -        *msgs = malloc(len * sizeof(**msgs)); -        if (*msgs == NULL) { -                pthread_rwlock_unlock(&dht->lock); -                return 0; -        } - -        list_for_each_safe(p, h, &l) { -                struct contact * c = list_entry(p, struct contact, next); -                (*msgs)[i] = malloc(sizeof(***msgs)); -                if ((*msgs)[i] == NULL) { -                        pthread_rwlock_unlock(&dht->lock); -                        while (i > 0) -                                free(*msgs[--i]); -                        free(*msgs); -                        *msgs = NULL; -                        return 0; -                } - -                kad_contact_msg__init((*msgs)[i]); - -                (*msgs)[i]->id.data = c->id; -                (*msgs)[i]->id.len  = dht->b; -                (*msgs)[i++]->addr  = c->addr; -                list_del(&c->next); -                free(c); -        } - -        pthread_rwlock_unlock(&dht->lock); - -        return i; -} - -static time_t gcd(time_t a, -                  time_t b) -{ -        if (a == 0) -                return b; - -        return gcd(b % a, a); -} - -static void * work(void * o) -{ -        struct dht *       dht; -        struct timespec    now; -        struct list_head * p; -        struct list_head * h; -        struct list_head   reflist; -        time_t             intv; -        struct lookup *    lu; - -        dht = (struct dht *) o; - -        pthread_rwlock_rdlock(&dht->lock); - -        intv = gcd(dht->t_expire, dht->t_repub); -        intv = gcd(intv, gcd(KAD_T_REPL, KAD_T_REFR)) / 2; - -        pthread_rwlock_unlock(&dht->lock); - -        list_head_init(&reflist); - -        while (true) { -                clock_gettime(CLOCK_REALTIME_COARSE, &now); - -                pthread_rwlock_wrlock(&dht->lock); - -                /* Republish registered hashes. */ -                list_for_each(p, &dht->refs) { -                        struct ref_entry * e; -                        uint8_t *          key; -                        uint64_t           addr; -                        time_t             t_expire; -                        e = list_entry(p, struct ref_entry, next); -                        if (now.tv_sec > e->t_rep) { -                                key = dht_dup_key(e->key, dht->b); -                                if (key == NULL) -                                        continue; -                                addr = dht->addr; -                                t_expire = dht->t_expire; -                                e->t_rep = now.tv_sec + dht->t_repub; - -                                pthread_rwlock_unlock(&dht->lock); -                                kad_publish(dht, key, addr, t_expire); -                                pthread_rwlock_wrlock(&dht->lock); -                                free(key); -                        } -                } - -                /* Remove stale entries and republish if necessary. */ -                list_for_each_safe(p, h, &dht->entries) { -                        struct list_head * p1; -                        struct list_head * h1; -                        struct dht_entry * e; -                        uint8_t *          key; -                        time_t             t_expire; -                        e = list_entry (p, struct dht_entry, next); -                        list_for_each_safe(p1, h1, &e->vals) { -                                struct val * v; -                                uint64_t     addr; -                                v = list_entry(p1, struct val, next); -                                if (now.tv_sec > v->t_exp) { -                                        list_del(&v->next); -                                        val_destroy(v); -                                        continue; -                                } - -                                if (now.tv_sec > v->t_rep) { -                                        key  = dht_dup_key(e->key, dht->b); -                                        addr = v->addr; -                                        t_expire = dht->t_expire = now.tv_sec; -                                        v->t_rep = now.tv_sec + dht->t_replic; -                                        pthread_rwlock_unlock(&dht->lock); -                                        kad_publish(dht, key, addr, t_expire); -                                        pthread_rwlock_wrlock(&dht->lock); -                                        free(key); -                                } -                        } -                } - -                /* Check the requests list for unresponsive nodes. */ -                list_for_each_safe(p, h, &dht->requests) { -                        struct kad_req * r; -                        r = list_entry(p, struct kad_req, next); -                        if (now.tv_sec > r->t_exp) { -                                list_del(&r->next); -                                bmp_release(dht->cookies, r->cookie); -                                dht_dead_peer(dht, r->key, r->addr); -                                kad_req_destroy(r); -                        } -                } - -                /* Refresh unaccessed buckets. */ -                bucket_refresh(dht, dht->buckets, now.tv_sec, &reflist); - -                pthread_rwlock_unlock(&dht->lock); - -                list_for_each_safe(p, h, &reflist) { -                        struct contact * c; -                        c = list_entry(p, struct contact, next); -                        lu = kad_lookup(dht, c->id, KAD_FIND_NODE); -                        if (lu != NULL) -                                lookup_destroy(lu); -                        list_del(&c->next); -                        contact_destroy(c); -                } - -                sleep(intv); -        } - -        return (void *) 0; -} - -static int kad_handle_join_resp(struct dht *     dht, -                                struct kad_req * req, -                                kad_msg_t *      msg) -{ -        assert(dht); -        assert(req); -        assert(msg); - -        /* We might send version numbers later to warn of updates if needed. */ -        if (!(msg->has_alpha && msg->has_b && msg->has_k && msg->has_t_expire && -              msg->has_t_refresh && msg->has_t_replicate)) { -                log_warn("Join refused by remote."); -                return -1; -        } - -        if (msg->b < sizeof(uint64_t)) { -                log_err("Hash sizes less than 8 bytes unsupported."); -                return -1; -        } - -        pthread_rwlock_wrlock(&dht->lock); - -        dht->buckets = bucket_create(); -        if (dht->buckets == NULL) { -                pthread_rwlock_unlock(&dht->lock); -                return -1; -        } - -        /* Likely corrupt packet. The member will refuse, we might here too. */ -        if (msg->alpha != KAD_ALPHA || msg->k != KAD_K) -                log_warn("Different kademlia parameters detected."); - -        if (msg->t_replicate != KAD_T_REPL) -                log_warn("Different kademlia replication time detected."); - -        if (msg->t_refresh != KAD_T_REFR) -                log_warn("Different kademlia refresh time detected."); - -        dht->k        = msg->k; -        dht->b        = msg->b; -        dht->t_expire = msg->t_expire; -        dht->t_repub  = MAX(1, dht->t_expire - 10); - -        if (pthread_create(&dht->worker, NULL, work, dht)) { -                bucket_destroy(dht->buckets); -                pthread_rwlock_unlock(&dht->lock); -                return -1; -        } - -        kad_req_respond(req); - -        dht_update_bucket(dht, msg->s_id.data, msg->s_addr); - -        pthread_rwlock_unlock(&dht->lock); - -        log_dbg("Enrollment of DHT completed."); - -        return 0; -} - -static int kad_handle_find_resp(struct dht *     dht, -                                struct kad_req * req, -                                kad_msg_t *      msg) -{ -        struct lookup * lu; - -        assert(dht); -        assert(req); -        assert(msg); - -        pthread_rwlock_rdlock(&dht->lock); - -        lu = dht_find_lookup(dht, req->cookie); -        if (lu == NULL) { -                pthread_rwlock_unlock(&dht->lock); -                return -1; -        } - -        lookup_update(dht, lu, msg); - -        pthread_rwlock_unlock(&dht->lock); - -        return 0; -} - -static void kad_handle_response(struct dht * dht, -                                kad_msg_t *  msg) -{ -        struct kad_req * req; - -        assert(dht); -        assert(msg); - -        pthread_rwlock_wrlock(&dht->lock); - -        req = dht_find_request(dht, msg); -        if (req == NULL) { -                pthread_rwlock_unlock(&dht->lock); -                return; -        } - -        bmp_release(dht->cookies, req->cookie); -        list_del(&req->next); - -        pthread_rwlock_unlock(&dht->lock); - -        switch(req->code) { -        case KAD_JOIN: -                if (kad_handle_join_resp(dht, req, msg)) -                        log_err("Enrollment of DHT failed."); -                break; -        case KAD_FIND_VALUE: -        case KAD_FIND_NODE: -                if (dht_get_state(dht) != DHT_RUNNING) -                        break; -                kad_handle_find_resp(dht, req, msg); -                break; -        default: -                break; -        } - -        kad_req_destroy(req); -} - -int dht_bootstrap(struct dht * dht, -                  size_t       b, -                  time_t       t_expire) -{ -        assert(dht); - -        pthread_rwlock_wrlock(&dht->lock); - -        dht->id = create_id(b); -        if (dht->id == NULL) -                goto fail_id; - -        dht->buckets = bucket_create(); -        if (dht->buckets == NULL) -                goto fail_buckets; - -        dht->buckets->depth = 0; -        dht->buckets->mask  = 0; - -        dht->b        = b / CHAR_BIT; -        dht->t_expire = MAX(2, t_expire); -        dht->t_repub  = MAX(1, t_expire - 10); -        dht->k        = KAD_K; - -        if (pthread_create(&dht->worker, NULL, work, dht)) -                goto fail_pthread_create; - -        dht->state = DHT_RUNNING; - -        dht_update_bucket(dht, dht->id, dht->addr); - -        pthread_rwlock_unlock(&dht->lock); - -        return 0; - - fail_pthread_create: -        bucket_destroy(dht->buckets); -        dht->buckets = NULL; - fail_buckets: -        free(dht->id); -        dht->id = NULL; - fail_id: -        pthread_rwlock_unlock(&dht->lock); -        return -1; -} - -static struct ref_entry * ref_entry_get(struct dht *    dht, -                                        const uint8_t * key) -{ -        struct list_head * p; - -        list_for_each(p, &dht->refs) { -                struct ref_entry * r = list_entry(p, struct ref_entry, next); -                if (!memcmp(key, r->key, dht-> b) ) -                        return r; -        } - -        return NULL; -} - -int dht_reg(struct dht *    dht, -            const uint8_t * key) -{ -        struct ref_entry * e; -        uint64_t           addr; -        time_t             t_expire; - -        assert(dht); -        assert(key); -        assert(dht->addr != 0); - -        if (dht_wait_running(dht)) -                return -1; - -        pthread_rwlock_wrlock(&dht->lock); - -        if (ref_entry_get(dht, key) != NULL) { -                log_dbg("Name already registered."); -                pthread_rwlock_unlock(&dht->lock); -                return 0; -        } - -        e = ref_entry_create(dht, key); -        if (e == NULL) { -                pthread_rwlock_unlock(&dht->lock); -                return -ENOMEM; -        } - -        list_add(&e->next, &dht->refs); - -        t_expire = dht->t_expire; -        addr = dht->addr; - -        pthread_rwlock_unlock(&dht->lock); - -        kad_publish(dht, key, addr, t_expire); - -        return 0; -} - -int dht_unreg(struct dht *    dht, -              const uint8_t * key) -{ -        struct list_head * p; -        struct list_head * h; - -        assert(dht); -        assert(key); - -        if (dht_get_state(dht) != DHT_RUNNING) -                return -1; - -        pthread_rwlock_wrlock(&dht->lock); - -        list_for_each_safe(p, h, &dht->refs) { -                struct ref_entry * r = list_entry(p, struct ref_entry, next); -                if (!memcmp(key, r->key, dht-> b) ) { -                        list_del(&r->next); -                        ref_entry_destroy(r); -                } -        } - -        dht_del(dht, key, dht->addr); - -        pthread_rwlock_unlock(&dht->lock); - -        return 0; -} - -uint64_t dht_query(struct dht *    dht, -                   const uint8_t * key) -{ -        struct dht_entry * e; -        struct lookup *    lu; -        uint64_t           addrs[KAD_K]; -        size_t             n; - -        addrs[0] = 0; - -        if (dht_wait_running(dht)) -                return 0; - -        pthread_rwlock_rdlock(&dht->lock); - -        e = dht_find_entry(dht, key); -        if (e != NULL) -                addrs[0] = dht_entry_get_addr(dht, e); - -        pthread_rwlock_unlock(&dht->lock); - -        if (addrs[0] != 0) -                return addrs[0]; - -        lu = kad_lookup(dht, key, KAD_FIND_VALUE); -        if (lu == NULL) -                return 0; - -        n = lookup_get_addrs(lu, addrs); -        if (n == 0) { -                lookup_destroy(lu); -                return 0; -        } - -        lookup_destroy(lu); - -        /* Current behaviour is anycast and return the first peer address. */ -        if (addrs[0] != dht->addr) -                return addrs[0]; - -        if (n > 1) -                return addrs[1]; - -        return 0; -} - -static void * dht_handle_packet(void * o) -{ -        struct dht * dht = (struct dht *) o; - -        assert(dht); - -        while (true) { -                kad_msg_t *          msg; -                kad_contact_msg_t ** cmsgs; -                kad_msg_t            resp_msg = KAD_MSG__INIT; -                uint64_t             addr; -                buffer_t             buf; -                size_t               i; -                size_t               b; -                size_t               t_expire; -                struct cmd *         cmd; - -                pthread_mutex_lock(&dht->mtx); - -                pthread_cleanup_push(__cleanup_mutex_unlock, &dht->mtx); - -                while (list_is_empty(&dht->cmds)) -                        pthread_cond_wait(&dht->cond, &dht->mtx); - -                cmd = list_last_entry(&dht->cmds, struct cmd, next); -                list_del(&cmd->next); - -                pthread_cleanup_pop(true); - -                i = shm_du_buff_tail(cmd->sdb) - shm_du_buff_head(cmd->sdb); - -                msg = kad_msg__unpack(NULL, i, shm_du_buff_head(cmd->sdb)); -#ifndef __DHT_TEST__ -                ipcp_sdb_release(cmd->sdb); -#endif -                free(cmd); - -                if (msg == NULL) { -                        log_err("Failed to unpack message."); -                        continue; -                } - -                if (msg->code != KAD_RESPONSE && dht_wait_running(dht)) { -                        kad_msg__free_unpacked(msg, NULL); -                        log_dbg("Got a request message when not running."); -                        continue; -                } - -                pthread_rwlock_rdlock(&dht->lock); - -                b        = dht->b; -                t_expire = dht->t_expire; - -                pthread_rwlock_unlock(&dht->lock); - -                if (msg->has_key && msg->key.len != b) { -                        kad_msg__free_unpacked(msg, NULL); -                        log_warn("Bad key in message."); -                        continue; -                } - -                if (msg->has_s_id && !msg->has_b && msg->s_id.len != b) { -                        kad_msg__free_unpacked(msg, NULL); -                        log_warn("Bad source ID in message of type %d.", -                                 msg->code); -                        continue; -                } - -                tpm_dec(dht->tpm); - -                addr = msg->s_addr; - -                resp_msg.code   = KAD_RESPONSE; -                resp_msg.cookie = msg->cookie; - -                switch(msg->code) { -                case KAD_JOIN: -                        /* Refuse enrollee on check fails. */ -                        if (msg->alpha != KAD_ALPHA || msg->k != KAD_K) { -                                log_warn("Parameter mismatch. " -                                         "DHT enrolment refused."); -                                break; -                        } - -                        if (msg->t_replicate != KAD_T_REPL) { -                                log_warn("Replication time mismatch. " -                                         "DHT enrolment refused."); - -                                break; -                        } - -                        if (msg->t_refresh != KAD_T_REFR) { -                                log_warn("Refresh time mismatch. " -                                         "DHT enrolment refused."); -                                break; -                        } - -                        resp_msg.has_alpha       = true; -                        resp_msg.has_b           = true; -                        resp_msg.has_k           = true; -                        resp_msg.has_t_expire    = true; -                        resp_msg.has_t_refresh   = true; -                        resp_msg.has_t_replicate = true; -                        resp_msg.alpha           = KAD_ALPHA; -                        resp_msg.b               = b; -                        resp_msg.k               = KAD_K; -                        resp_msg.t_expire        = t_expire; -                        resp_msg.t_refresh       = KAD_T_REFR; -                        resp_msg.t_replicate     = KAD_T_REPL; -                        break; -                case KAD_FIND_VALUE: -                        buf = dht_retrieve(dht, msg->key.data); -                        if (buf.len != 0) { -                                resp_msg.n_addrs = buf.len; -                                resp_msg.addrs   = (uint64_t *) buf.data; -                                break; -                        } -                        /* FALLTHRU */ -                case KAD_FIND_NODE: -                        /* Return k closest contacts. */ -                        resp_msg.n_contacts = -                                dht_get_contacts(dht, msg->key.data, &cmsgs); -                        resp_msg.contacts = cmsgs; -                        break; -                case KAD_STORE: -                        if (msg->n_contacts < 1) { -                                log_warn("No contacts in store message."); -                                break; -                        } - -                        if (!msg->has_t_expire) { -                                log_warn("No expiry time in store message."); -                                break; -                        } - -                        kad_add(dht, *msg->contacts, msg->n_contacts, -                                msg->t_expire); -                        break; -                case KAD_RESPONSE: -                        kad_handle_response(dht, msg); -                        break; -                default: -                        assert(false); -                        break; -                } - -                if (msg->code != KAD_JOIN) { -                        pthread_rwlock_wrlock(&dht->lock); -                        if (dht_get_state(dht) == DHT_JOINING && -                            dht->buckets == NULL) { -                                pthread_rwlock_unlock(&dht->lock); -                                goto finish; -                        } - -                        if (dht_update_bucket(dht, msg->s_id.data, addr)) -                                log_warn("Failed to update bucket."); -                        pthread_rwlock_unlock(&dht->lock); -                } - -                if (msg->code < KAD_STORE && send_msg(dht, &resp_msg, addr) < 0) -                                log_warn("Failed to send response."); - - finish: -                kad_msg__free_unpacked(msg, NULL); - -                if (resp_msg.n_addrs > 0) -                        free(resp_msg.addrs); - -                if (resp_msg.n_contacts == 0) { -                        tpm_inc(dht->tpm); -                        continue; -                } - -                for (i = 0; i < resp_msg.n_contacts; ++i) -                        kad_contact_msg__free_unpacked(resp_msg.contacts[i], -                                                       NULL); -                free(resp_msg.contacts); - -                tpm_inc(dht->tpm); -        } - -        return (void *) 0; -} - -static void dht_post_packet(void *               comp, -                            struct shm_du_buff * sdb) -{ -        struct cmd * cmd; -        struct dht * dht = (struct dht *) comp; - -        if (dht_get_state(dht) == DHT_SHUTDOWN) { -#ifndef __DHT_TEST__ -                ipcp_sdb_release(sdb); -#endif -                return; -        } - -        cmd = malloc(sizeof(*cmd)); -        if (cmd == NULL) { -                log_err("Command failed. Out of memory."); -                return; -        } - -        cmd->sdb = sdb; - -        pthread_mutex_lock(&dht->mtx); - -        list_add(&cmd->next, &dht->cmds); - -        pthread_cond_signal(&dht->cond); - -        pthread_mutex_unlock(&dht->mtx); -} - -void dht_destroy(struct dht * dht) -{ -        struct list_head * p; -        struct list_head * h; - -        if (dht == NULL) -                return; - -#ifndef __DHT_TEST__ -        tpm_stop(dht->tpm); - -        tpm_destroy(dht->tpm); -#endif -        if (dht_get_state(dht) == DHT_RUNNING) { -                dht_set_state(dht, DHT_SHUTDOWN); -                pthread_cancel(dht->worker); -                pthread_join(dht->worker, NULL); -        } - -        pthread_rwlock_wrlock(&dht->lock); - -        list_for_each_safe(p, h, &dht->cmds) { -                struct cmd * c = list_entry(p, struct cmd, next); -                list_del(&c->next); -#ifndef __DHT_TEST__ -                ipcp_sdb_release(c->sdb); -#endif -                free(c); -        } - -        list_for_each_safe(p, h, &dht->entries) { -                struct dht_entry * e = list_entry(p, struct dht_entry, next); -                list_del(&e->next); -                dht_entry_destroy(e); -        } - -        list_for_each_safe(p, h, &dht->requests) { -                struct kad_req * r = list_entry(p, struct kad_req, next); -                list_del(&r->next); -                kad_req_destroy(r); -        } - -        list_for_each_safe(p, h, &dht->refs) { -                struct ref_entry * e = list_entry(p, struct ref_entry, next); -                list_del(&e->next); -                ref_entry_destroy(e); -        } - -        list_for_each_safe(p, h, &dht->lookups) { -                struct lookup * l = list_entry(p, struct lookup, next); -                list_del(&l->next); -                lookup_destroy(l); -        } - -        pthread_rwlock_unlock(&dht->lock); - -        if (dht->buckets != NULL) -                bucket_destroy(dht->buckets); - -        bmp_destroy(dht->cookies); - -        pthread_mutex_destroy(&dht->mtx); - -        pthread_rwlock_destroy(&dht->lock); - -        free(dht->id); - -        free(dht); -} - -static void * join_thr(void * o) -{ -        struct join_info * info = (struct join_info *) o; -        struct lookup *    lu; -        size_t             retr = 0; - -        assert(info); - -        while (kad_join(info->dht, info->addr)) { -                if (dht_get_state(info->dht) == DHT_SHUTDOWN) { -                        log_dbg("DHT enrollment aborted."); -                        goto finish; -                } - -                if (retr++ == KAD_JOIN_RETR) { -                        dht_set_state(info->dht, DHT_INIT); -                        log_warn("DHT enrollment attempt failed."); -                        goto finish; -                } - -                sleep(KAD_JOIN_INTV); -        } - -        dht_set_state(info->dht, DHT_RUNNING); - -        lu = kad_lookup(info->dht, info->dht->id, KAD_FIND_NODE); -        if (lu != NULL) -                lookup_destroy(lu); - - finish: -        free(info); - -        return (void *) 0; -} - -static void handle_event(void *       self, -                         int          event, -                         const void * o) -{ -        struct dht * dht = (struct dht *) self; - -        if (event == NOTIFY_DT_CONN_ADD) { -                pthread_t          thr; -                struct join_info * inf; -                struct conn *      c     = (struct conn *) o; -                struct timespec    slack = {0, DHT_ENROLL_SLACK * MILLION}; - -                /* Give the pff some time to update for the new link. */ -                nanosleep(&slack, NULL); - -                switch(dht_get_state(dht)) { -                case DHT_INIT: -                        inf = malloc(sizeof(*inf)); -                        if (inf == NULL) -                                break; - -                        inf->dht  = dht; -                        inf->addr = c->conn_info.addr; - -                        if (dht_set_state(dht, DHT_JOINING) == 0 || -                            dht_wait_running(dht)) { -                                if (pthread_create(&thr, NULL, join_thr, inf)) { -                                        dht_set_state(dht, DHT_INIT); -                                        free(inf); -                                        return; -                                } -                                pthread_detach(thr); -                        } else { -                                free(inf); -                        } -                        break; -                case DHT_RUNNING: -                        /* -                         * FIXME: this lookup for effiency reasons -                         * causes a SEGV when stressed with rapid -                         * enrollments. -                         * lu = kad_lookup(dht, dht->id, KAD_FIND_NODE); -                         * if (lu != NULL) -                         *         lookup_destroy(lu); -                         */ -                        break; -                default: -                        break; -                } -        } -} - -struct dht * dht_create(uint64_t addr) -{ -        struct dht * dht; - -        dht = malloc(sizeof(*dht)); -        if (dht == NULL) -                goto fail_malloc; - -        dht->buckets = NULL; - -        list_head_init(&dht->entries); -        list_head_init(&dht->requests); -        list_head_init(&dht->refs); -        list_head_init(&dht->lookups); -        list_head_init(&dht->cmds); - -        if (pthread_rwlock_init(&dht->lock, NULL)) -                goto fail_rwlock; - -        if (pthread_mutex_init(&dht->mtx, NULL)) -                goto fail_mutex; - -        if (pthread_cond_init(&dht->cond, NULL)) -                goto fail_cond; - -        dht->cookies = bmp_create(DHT_MAX_REQS, 1); -        if (dht->cookies == NULL) -                goto fail_bmp; - -        dht->b    = 0; -        dht->addr = addr; -        dht->id   = NULL; -#ifndef __DHT_TEST__ -        dht->tpm = tpm_create(2, 1, dht_handle_packet, dht); -        if (dht->tpm == NULL) -                goto fail_tpm_create; - -        if (tpm_start(dht->tpm)) -                goto fail_tpm_start; - -        dht->eid   = dt_reg_comp(dht, &dht_post_packet, DHT); -        if ((int) dht->eid < 0) -                goto fail_tpm_start; - -        notifier_reg(handle_event, dht); -#else -        (void) handle_event; -        (void) dht_handle_packet; -        (void) dht_post_packet; -#endif -        dht->state = DHT_INIT; - -        return dht; -#ifndef __DHT_TEST__ - fail_tpm_start: -        tpm_destroy(dht->tpm); - fail_tpm_create: -        bmp_destroy(dht->cookies); -#endif - fail_bmp: -        pthread_cond_destroy(&dht->cond); - fail_cond: -        pthread_mutex_destroy(&dht->mtx); - fail_mutex: -        pthread_rwlock_destroy(&dht->lock); - fail_rwlock: -        free(dht); - fail_malloc: -        return NULL; -} diff --git a/src/ipcpd/unicast/dir.c b/src/ipcpd/unicast/dir.c index a30908b8..2b305626 100644 --- a/src/ipcpd/unicast/dir.c +++ b/src/ipcpd/unicast/dir.c @@ -1,7 +1,7 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   * - * Directory + * Directory Management   *   *    Dimitri Staessens <dimitri@ouroboros.rocks>   *    Sander Vrijders   <sander@ouroboros.rocks> @@ -34,8 +34,7 @@  #include <ouroboros/utils.h>  #include "dir.h" -#include "dht.h" -#include "ipcp.h" +#include "dir/pol.h"  #include <stdlib.h>  #include <string.h> @@ -43,60 +42,59 @@  #include <inttypes.h>  #include <limits.h> -#define KAD_B (hash_len(ipcpi.dir_hash_algo) * CHAR_BIT) +struct { +        struct dir_ops * ops; +} dir; -struct ipcp icpci; -struct dht * dht; - -int dir_init(void) +int dir_init(struct dir_config * conf)  { -        dht = dht_create(ipcpi.dt_addr); -        if (dht == NULL) -                return -ENOMEM; +        void * cfg; + +        assert(conf != NULL); + +        switch (conf->pol) { +        case DIR_DHT: +                log_info("Using DHT policy."); +                dir.ops = &dht_dir_ops; +                cfg = &conf->dht; +                break; +        default: /* DIR_INVALID */ +                log_err("Invalid directory policy %d.", conf->pol); +                return -EINVAL; +        } -        return 0; +        assert(dir.ops->init != NULL); + +        return dir.ops->init(cfg);  }  void dir_fini(void)  { -        dht_destroy(dht); +        dir.ops->fini(); +        dir.ops = NULL;  } -int dir_bootstrap(void) { -        log_dbg("Bootstrapping directory."); - -        /* TODO: get parameters for bootstrap from IRM tool. */ -        if (dht_bootstrap(dht, KAD_B, 86400)) { -                dht_destroy(dht); -                return -ENOMEM; -        } - -        log_info("Directory bootstrapped."); +int dir_start(void) +{ +        return dir.ops->start(); +} -        return 0; +void dir_stop(void) +{ +        dir.ops->stop();  }  int dir_reg(const uint8_t * hash)  { -        return dht_reg(dht, hash); +        return dir.ops->reg(hash);  }  int dir_unreg(const uint8_t * hash)  { -        return dht_unreg(dht, hash); +        return dir.ops->unreg(hash);  }  uint64_t dir_query(const uint8_t * hash)  { -        return dht_query(dht, hash); -} - -int dir_wait_running(void) -{ -        if (dht_wait_running(dht)) { -                log_warn("Directory did not bootstrap."); -                return -1; -        } - -        return 0; +        return dir.ops->query(hash);  } diff --git a/src/ipcpd/unicast/dir.h b/src/ipcpd/unicast/dir.h index 8aa79638..dbfde19f 100644 --- a/src/ipcpd/unicast/dir.h +++ b/src/ipcpd/unicast/dir.h @@ -1,5 +1,5 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   *   * Directory   * @@ -25,11 +25,14 @@  #include <inttypes.h> -int      dir_init(void); +/* may update the config! */ +int      dir_init(struct dir_config * conf);  void     dir_fini(void); -int      dir_bootstrap(void); +int      dir_start(void); + +void     dir_stop(void);  int      dir_reg(const uint8_t * hash); @@ -37,6 +40,4 @@ int      dir_unreg(const uint8_t * hash);  uint64_t dir_query(const uint8_t * hash); -int      dir_wait_running(void); -  #endif /* OUROBOROS_IPCPD_UNICAST_DIR_H */ diff --git a/src/ipcpd/unicast/dir/dht.c b/src/ipcpd/unicast/dir/dht.c new file mode 100644 index 00000000..6b06def9 --- /dev/null +++ b/src/ipcpd/unicast/dir/dht.c @@ -0,0 +1,4052 @@ +/* + * Ouroboros - Copyright (C) 2016 - 2024 + * + * Distributed Hash Table based on Kademlia + * + *    Dimitri Staessens <dimitri@ouroboros.rocks> + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public License + * version 2.1 as published by the Free Software Foundation. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., http://www.fsf.org/about/contact/. + */ + +#if !defined (__DHT_TEST__) +  #if defined(__linux__) || defined(__CYGWIN__) +    #define _DEFAULT_SOURCE +  #else +    #define _POSIX_C_SOURCE 200112L +  #endif +#endif + +#include "config.h" + +#define DHT              "dht" +#define OUROBOROS_PREFIX DHT + +#include <ouroboros/endian.h> +#include <ouroboros/hash.h> +#include <ouroboros/ipcp-dev.h> +#include <ouroboros/bitmap.h> +#include <ouroboros/errno.h> +#include <ouroboros/logs.h> +#include <ouroboros/list.h> +#include <ouroboros/random.h> +#include <ouroboros/rib.h> +#include <ouroboros/time.h> +#include <ouroboros/tpm.h> +#include <ouroboros/utils.h> +#include <ouroboros/pthread.h> + +#include "addr-auth.h" +#include "common/connmgr.h" +#include "dht.h" +#include "dt.h" +#include "ipcp.h" +#include "ops.h" + +#include <stdlib.h> +#include <string.h> +#include <assert.h> +#include <inttypes.h> +#include <limits.h> + +#include "dht.pb-c.h" +typedef DhtMsg              dht_msg_t; +typedef DhtContactMsg       dht_contact_msg_t; +typedef DhtStoreMsg         dht_store_msg_t; +typedef DhtFindReqMsg       dht_find_req_msg_t; +typedef DhtFindNodeRspMsg   dht_find_node_rsp_msg_t; +typedef DhtFindValueRspMsg  dht_find_value_rsp_msg_t; +typedef ProtobufCBinaryData binary_data_t; + +#ifndef CLOCK_REALTIME_COARSE +#define CLOCK_REALTIME_COARSE CLOCK_REALTIME +#endif + +#define DHT_MAX_REQS  128  /* KAD recommends rnd(), bmp can be changed.    */ +#define DHT_WARN_REQS 100  /* Warn if number of requests exceeds this.     */ +#define DHT_MAX_VALS  8    /* Max number of values to return for a key.    */ +#define DHT_T_CACHE   60   /* Max cache time for values (s)                */ +#define DHT_T_RESP    2    /* Response time to wait for a response (s).    */ +#define DHT_N_REPUB   5    /* Republish if expiry within n replications.   */ +#define DHT_R_PING    2    /* Ping retries before declaring peer dead.     */ +#define DHT_QUEER     15   /* Time to declare peer questionable.           */ +#define DHT_BETA      8    /* Bucket split factor, must be 1, 2, 4 or 8.   */ +#define DHT_RESP_RETR 6    /* Number of retries on sending a response.     */ +#define HANDLE_TIMEO  1000 /* Timeout for dht_handle_packet tpm check (ms) */ +#define DHT_INVALID   0    /* Invalid cookie value.                        */ + +#define KEY_FMT "K<" HASH_FMT64 ">" +#define KEY_VAL(key) HASH_VAL64(key) + +#define VAL_FMT "V<" HASH_FMT64 ">" +#define VAL_VAL(val) HASH_VAL64((val).data) + +#define KV_FMT "<" HASH_FMT64 ", " HASH_FMT64 ">" +#define KV_VAL(key, val) HASH_VAL64(key), HASH_VAL64((val).data) + +#define PEER_FMT "[" HASH_FMT64 "|" ADDR_FMT32 "]" +#define PEER_VAL(id, addr) HASH_VAL64(id), ADDR_VAL32(&(addr)) + +#define DHT_CODE(msg) dht_code_str[(msg)->code] + +#define TX_HDR_FMT "%s --> " PEER_FMT +#define TX_HDR_VAL(msg, id, addr) DHT_CODE(msg), PEER_VAL(id, addr) + +#define RX_HDR_FMT "%s <-- " PEER_FMT +#define RX_HDR_VAL(msg) DHT_CODE(msg), \ +        PEER_VAL(msg->src->id.data, msg->src->addr) + +#define CK_FMT "|" HASH_FMT64 "|" +#define CK_VAL(cookie) HASH_VAL64(&(cookie)) + +#define IS_REQUEST(code) \ +        (code == DHT_FIND_NODE_REQ || code == DHT_FIND_VALUE_REQ) + +enum dht_code { +        DHT_STORE, +        DHT_FIND_NODE_REQ, +        DHT_FIND_NODE_RSP, +        DHT_FIND_VALUE_REQ, +        DHT_FIND_VALUE_RSP +}; + +const char * dht_code_str[] = { +        "DHT_STORE", +        "DHT_FIND_NODE_REQ", +        "DHT_FIND_NODE_RSP", +        "DHT_FIND_VALUE_REQ", +        "DHT_FIND_VALUE_RSP" +}; + +enum dht_state { +        DHT_NULL = 0, +        DHT_INIT, +        DHT_RUNNING +}; + +struct val_entry { +        struct list_head next; + +        buffer_t         val; + +        time_t           t_exp;   /* Expiry time           */ +        time_t           t_repl;  /* Last replication time */ +}; + +struct dht_entry { +        struct list_head next; + +        uint8_t *        key; + +        struct { +                struct list_head list; +                size_t           len; +        } vals;  /* We don't own these, only replicate */ + +        struct { +                struct list_head list; +                size_t           len; +        } lvals; /* We own these, must be republished  */ +}; + +struct contact { +        struct list_head next; + +        uint8_t *        id; +        uint64_t         addr; + +        size_t           fails; +        time_t           t_seen; +}; + +struct peer_entry { +        struct list_head next; + +        uint64_t         cookie; +        uint8_t *        id; +        uint64_t         addr; +        enum dht_code    code; + +        time_t           t_sent; +}; + +struct dht_req { +        struct list_head next; + +        uint8_t *        key; +        time_t           t_exp; + +        struct { +                struct list_head list; +                size_t           len; +        } peers; + +        struct { +                struct list_head list; +                size_t           len; +        } cache; +}; + +struct bucket { +        struct { +                struct list_head list; +                size_t           len; +        } contacts; + +        struct { +                struct list_head list; +                size_t           len; +        } alts; + +        time_t           t_refr; + +        size_t           depth; +        uint8_t          mask; + +        struct bucket *  parent; +        struct bucket *  children[1L << DHT_BETA]; +}; + +struct cmd { +        struct list_head next; +        buffer_t         cbuf; +}; + +struct dir_ops dht_dir_ops = { +        .init  = (int (*)(void *)) dht_init, +        .fini  = dht_fini, +        .start = dht_start, +        .stop  = dht_stop, +        .reg   = dht_reg, +        .unreg = dht_unreg, +        .query = dht_query +}; + +struct { +        struct { /* Kademlia parameters */ +                uint32_t alpha;     /* Number of concurrent requests   */ +                size_t   k;         /* Number of replicas to store     */ +                time_t   t_expire;  /* Expiry time for values (s)      */ +                time_t   t_refresh; /* Refresh time for contacts (s)   */ +                time_t   t_repl;    /* Replication time for values (s) */ +        }; + +        buffer_t       id; + +        time_t         t0;    /* Creation time               */ +        uint64_t       addr;  /* Our own address             */ +        uint64_t       peer;  /* Enrollment peer address     */ +        uint64_t       magic; /* Magic cookie for retransmit */ + +        uint64_t       eid;   /* Entity ID                   */ + +        struct tpm *   tpm; +        pthread_t      worker; + +        enum dht_state state; + +        struct { +                struct { +                        struct bucket * root; +                } contacts; + +                struct { +                        struct list_head list; +                        size_t           len; +                        size_t           vals; +                        size_t           lvals; +                } kv; + +                pthread_rwlock_t lock; +        } db; + +        struct { +                struct list_head list; +                size_t           len; +                pthread_cond_t   cond; +                pthread_mutex_t  mtx; +        } reqs; + +        struct { +                struct list_head list; +                pthread_cond_t   cond; +                pthread_mutex_t  mtx; +        } cmds; +} dht; + + +/* DHT RIB */ + +static const char * dht_dir[] = { +        "database", +        "stats", +        NULL +}; + +const char * dht_stats = \ +        "DHT: " HASH_FMT64 "\n" +        "  Created: %s\n" +        "  Address: " ADDR_FMT32 "\n" +        "  Kademlia parameters:\n" +        "     Number of concurrent requests (alpha): %10zu\n" +        "     Number of replicas (k):                %10zu\n" +        "     Expiry time for values (s):            %10ld\n" +        "     Refresh time for contacts (s):         %10ld\n" +        "     Replication time for values (s):       %10ld\n" +        "  Number of keys:                           %10zu\n" +        "  Number of local values:                   %10zu\n" +        "  Number of non-local values:               %10zu\n"; + +static int dht_rib_statfile(char * buf, +                            size_t len) +{ +        struct tm * tm; +        char        tmstr[RIB_TM_STRLEN]; +        size_t      keys; +        size_t      vals; +        size_t      lvals; + +        assert(buf != NULL); +        assert(len > 0); + +        pthread_rwlock_rdlock(&dht.db.lock); + +        keys  = dht.db.kv.len; +        lvals = dht.db.kv.lvals; +        vals  = dht.db.kv.vals; + +        pthread_rwlock_unlock(&dht.db.lock); + +        tm = gmtime(&dht.t0); +        strftime(tmstr, sizeof(tmstr), RIB_TM_FORMAT, tm); + +        snprintf(buf, len, dht_stats, +                 HASH_VAL64(dht.id.data), +                 tmstr, +                 ADDR_VAL32(&dht.addr), +                 dht.alpha, dht.k, +                 dht.t_expire, dht.t_refresh, dht.t_repl, +                 keys, vals, lvals); + +        return strlen(buf); +} + +static size_t dht_db_file_len(void) +{ +        size_t sz; +        size_t vals; + +        sz = 18; /* DHT database + 2 * \n */ + +        pthread_rwlock_rdlock(&dht.db.lock); + +        if (dht.db.kv.len == 0) { +                pthread_rwlock_unlock(&dht.db.lock); +                sz += 14; /* No entries */ +                return sz; +        } + +        sz += 39 * 3 + 1; /* tally + extra newline */ +        sz += dht.db.kv.len * (25 + 19 + 23 + 1); + +        vals = dht.db.kv.vals + dht.db.kv.lvals; + +        sz += vals * (48 + 2 * RIB_TM_STRLEN); + +        pthread_rwlock_unlock(&dht.db.lock); + +        return sz; +} + +static int dht_rib_dbfile(char * buf, +                          size_t len) +{ +        struct tm * tm; +        char        tmstr[RIB_TM_STRLEN]; +        char        exstr[RIB_TM_STRLEN]; +        size_t      i = 0; +        struct      list_head * p; + +        assert(buf != NULL); +        assert(len > 0); + +        pthread_rwlock_rdlock(&dht.db.lock); + +        if (dht.db.kv.len == 0) { +                i += snprintf(buf, len, "  No entries.\n"); +                pthread_rwlock_unlock(&dht.db.lock); +                return i; +        } + +        i += snprintf(buf + i, len - i, "DHT database:\n\n"); +        i += snprintf(buf + i, len - i, +                      "Number of keys:             %10zu\n" +                      "Number of local values:     %10zu\n" +                      "Number of non-local values: %10zu\n\n", +                      dht.db.kv.len, dht.db.kv.vals, dht.db.kv.lvals); + +        list_for_each(p, &dht.db.kv.list) { +                struct dht_entry * e = list_entry(p, struct dht_entry, next); +                struct list_head * h; + +                i += snprintf(buf + i, len - i, "Key: " KEY_FMT "\n", +                              KEY_VAL(e->key)); +                i += snprintf(buf + i, len - i, "  Local entries:\n"); + +                list_for_each(h, &e->vals.list) { +                        struct val_entry * v; + +                        v = list_entry(h, struct val_entry, next); + +                        tm = gmtime(&v->t_repl); +                        strftime(tmstr, sizeof(tmstr), RIB_TM_FORMAT, tm); + +                        tm = gmtime(&v->t_exp); +                        strftime(exstr, sizeof(exstr), RIB_TM_FORMAT, tm); + +                        i += snprintf(buf + i, len - i, +                                "    " VAL_FMT +                                ", t_replicated=%.*s, t_expire=%.*s\n", +                                VAL_VAL(v->val), +                                RIB_TM_STRLEN, tmstr, +                                RIB_TM_STRLEN, exstr); +                } + +                i += snprintf(buf + i, len - i, "\n"); + +                i += snprintf(buf + i, len - i, "  Non-local entries:\n"); + +                list_for_each(h, &e->lvals.list) { +                        struct val_entry * v; + +                        v= list_entry(h, struct val_entry, next); + +                        tm = gmtime(&v->t_repl); +                        strftime(tmstr, sizeof(tmstr), RIB_TM_FORMAT, tm); + +                        tm = gmtime(&v->t_exp); +                        strftime(exstr, sizeof(exstr), RIB_TM_FORMAT, tm); + +                        i += snprintf(buf + i, len - i, +                                "    " VAL_FMT +                                ", t_replicated=%.*s, t_expire=%.*s\n", +                                VAL_VAL(v->val), +                                RIB_TM_STRLEN, tmstr, +                                RIB_TM_STRLEN, exstr); + +                } +        } + +        pthread_rwlock_unlock(&dht.db.lock); + +        printf("DHT RIB DB file generated (%zu bytes).\n", i); + +        return i; +} + +static int dht_rib_read(const char * path, +                        char *       buf, +                        size_t       len) +{ +        char * entry; + +        entry = strstr(path, RIB_SEPARATOR) + 1; + +        if (strcmp(entry, "database") == 0) { +                return dht_rib_dbfile(buf, len); +        } else if (strcmp(entry, "stats") == 0) { +                return dht_rib_statfile(buf, len); +        } + +        return 0; +} + +static int dht_rib_readdir(char *** buf) +{ +        int i = 0; + +        while (dht_dir[i++] != NULL); + +        *buf = malloc(sizeof(**buf) * i); +        if (*buf == NULL) +                goto fail_buf; + +        i = 0; + +        while (dht_dir[i] != NULL) { +                (*buf)[i] = strdup(dht_dir[i]); +                if ((*buf)[i] == NULL) +                        goto fail_dup; +                i++; +        } + +        return i; + fail_dup: +        freepp(char, *buf, i); + fail_buf: +        return -ENOMEM; +} + +static int dht_rib_getattr(const char *      path, +                           struct rib_attr * attr) +{ +        struct timespec now; +        char *          entry; + +        clock_gettime(CLOCK_REALTIME_COARSE, &now); + +        attr->mtime = now.tv_sec; + +        entry = strstr(path, RIB_SEPARATOR) + 1; + +        if (strcmp(entry, "database") == 0) { +                attr->size = dht_db_file_len(); +        } else if (strcmp(entry, "stats") == 0) { +                attr->size =  545; +        } + +        return 0; +} + +static struct rib_ops r_ops = { +        .read    = dht_rib_read, +        .readdir = dht_rib_readdir, +        .getattr = dht_rib_getattr +}; + +/* Helper functions */ + +static uint8_t * generate_id(void) +{ +        uint8_t * id; + +        if(dht.id.len < sizeof(uint64_t)) { +                log_err("DHT ID length is too short (%zu < %zu).", +                        dht.id.len, sizeof(uint64_t)); +                return NULL; +        } + +        id = malloc(dht.id.len); +        if (id == NULL) { +                log_err("Failed to malloc ID."); +                goto fail_id; +        } + +        if (random_buffer(id, dht.id.len) < 0) { +                log_err("Failed to generate random ID."); +                goto fail_rnd; +        } + +        return id; + fail_rnd: +        free(id); + fail_id: +        return NULL; +} + +static uint64_t generate_cookie(void) +{ +        uint64_t cookie = DHT_INVALID; + +        while (cookie == DHT_INVALID) +                random_buffer((uint8_t *) &cookie, sizeof(cookie)); + +        return cookie; +} + +/* + * If someone builds a network where the n (n > k) closest nodes all + * have IDs starting with the same 64 bits: by all means, change this. + */ +static uint64_t dist(const uint8_t * src, +                     const uint8_t * dst) +{ +        assert(dht.id.len >= sizeof(uint64_t)); + +        return betoh64(*((uint64_t *) src) ^ *((uint64_t *) dst)); +} + +#define IS_CLOSER(x, y) (dist((x), dht.id.data) < dist((y), dht.id.data)) + +static int addr_to_buf(const uint64_t addr, +                       buffer_t *     buf) +{ +        size_t len; +        uint64_t _addr; + +        len = sizeof(addr); +        _addr = hton64(addr); + +        assert(buf != NULL); + +        buf->data = malloc(len); +        if (buf->data == NULL) +                goto fail_malloc; + +        buf->len = sizeof(_addr); +        memcpy(buf->data, &_addr, sizeof(_addr)); + +        return 0; + fail_malloc: +        return -ENOMEM; +} + +static int buf_to_addr(const buffer_t buf, +                       uint64_t *     addr) +{ +        assert(addr != NULL); +        assert(buf.data != NULL); + +        if (buf.len != sizeof(*addr)) +                return - EINVAL; + +        *addr = ntoh64(*((uint64_t *) buf.data)); + +        if (*addr == dht.addr) +                *addr = INVALID_ADDR; + +        return 0; +} + +static uint8_t * dht_dup_key(const uint8_t * key) +{ +        uint8_t * dup; + +        assert(key != NULL); +        assert(dht.id.len != 0); + +        dup = malloc(dht.id.len); +        if (dup == NULL) +                return NULL; + +        memcpy(dup, key, dht.id.len); + +        return dup; +} + +/* DHT */ + +static struct val_entry * val_entry_create(const buffer_t val, +                                           time_t         exp) +{ +        struct val_entry * e; +        struct timespec    now; + +        assert(val.data != NULL); +        assert(val.len > 0); + +        clock_gettime(CLOCK_REALTIME_COARSE, &now); + +#ifndef __DHT_TEST_ALLOW_EXPIRED__ +        if (exp < now.tv_sec) +                return NULL; /* Refuse to add expired values */ +#endif +        e = malloc(sizeof(*e)); +        if (e == NULL) +                goto fail_entry; + +        list_head_init(&e->next); + +        e->val.len = val.len; +        e->val.data = malloc(val.len); +        if (e->val.data == NULL) +                goto fail_val; + +        memcpy(e->val.data, val.data, val.len); + +        e->t_repl  = 0; +        e->t_exp   = exp; + +        return e; + + fail_val: +        free(e); + fail_entry: +        return NULL; +} + +static void val_entry_destroy(struct val_entry * v) +{ +        assert(v->val.data != NULL); + +        freebuf(v->val); +        free(v); +} + +static struct dht_entry * dht_entry_create(const uint8_t * key) +{ +        struct dht_entry * e; + +        assert(key != NULL); + +        e = malloc(sizeof(*e)); +        if (e == NULL) +                goto fail_entry; + +        list_head_init(&e->next); +        list_head_init(&e->vals.list); +        list_head_init(&e->lvals.list); + +        e->vals.len = 0; +        e->lvals.len = 0; + +        e->key = dht_dup_key(key); +        if (e->key == NULL) +                goto fail_key; + +        return e; + fail_key: +        free(e); + fail_entry: +        return NULL; +} + +static void dht_entry_destroy(struct dht_entry * e) +{ +        struct list_head * p; +        struct list_head * h; + +        assert(e != NULL); + +        list_for_each_safe(p, h, &e->vals.list) { +                struct val_entry * v = list_entry(p, struct val_entry, next); +                list_del(&v->next); +                val_entry_destroy(v); +                --e->vals.len; +                --dht.db.kv.vals; +        } + +        list_for_each_safe(p, h, &e->lvals.list) { +                struct val_entry * v = list_entry(p, struct val_entry, next); +                list_del(&v->next); +                val_entry_destroy(v); +                --e->lvals.len; +                --dht.db.kv.lvals; +        } + +        free(e->key); + +        assert(e->vals.len == 0 && e->lvals.len == 0); + +        free(e); +} + +static struct val_entry * dht_entry_get_lval(const struct dht_entry * e, +                                             const buffer_t           val) +{ +        struct list_head * p; + +        assert(e != NULL); +        assert(val.data != NULL); +        assert(val.len > 0); + +        list_for_each(p, &e->lvals.list) { +                struct val_entry * v = list_entry(p, struct val_entry, next); +                if (bufcmp(&v->val, &val) == 0) +                        return v; +        } + +        return NULL; +} + +static struct val_entry * dht_entry_get_val(const struct dht_entry * e, +                                            const buffer_t           val) +{ +        struct list_head * p; + +        assert(e != NULL); +        assert(val.data != NULL); +        assert(val.len > 0); + +        list_for_each(p, &e->vals.list) { +                struct val_entry * v = list_entry(p, struct val_entry, next); +                if (bufcmp(&v->val, &val) == 0) +                        return v; + +        } + +        return NULL; +} + +static int dht_entry_update_val(struct dht_entry * e, +                                buffer_t           val, +                                time_t             exp) +{ +        struct val_entry * v; +        struct timespec    now; + +        assert(e != NULL); +        assert(val.data != NULL); +        assert(val.len > 0); + +        clock_gettime(CLOCK_REALTIME_COARSE, &now); + +        if (exp < now.tv_sec) +                return -EINVAL; /* Refuse to add expired values */ + +        if (dht_entry_get_lval(e, val) != NULL) { +                log_dbg(KV_FMT " Val already in lvals.", KV_VAL(e->key, val)); +                return 0; /* Refuse to add local values */ +        } + +        v = dht_entry_get_val(e, val); +        if (v == NULL) { +                v = val_entry_create(val, exp); +                if (v == NULL) +                        return -ENOMEM; + +                list_add_tail(&v->next, &e->vals.list); +                ++e->vals.len; +                ++dht.db.kv.vals; + +                return 0; +        } + +        if (v->t_exp < exp) +                v->t_exp  = exp; + +        return 0; +} + +static int dht_entry_update_lval(struct dht_entry * e, +                                 buffer_t           val) +{ +        struct val_entry * v; +        struct timespec    now; + +        assert(e != NULL); +        assert(val.data != NULL); +        assert(val.len > 0); + +        clock_gettime(CLOCK_REALTIME_COARSE, &now); + +        v = dht_entry_get_lval(e, val); +        if (v == NULL) { +                log_dbg(KV_FMT " Adding lval.", KV_VAL(e->key, val)); +                v = val_entry_create(val, now.tv_sec + dht.t_expire); +                if (v == NULL) +                        return -ENOMEM; + +                list_add_tail(&v->next, &e->lvals.list); +                ++e->lvals.len; +                ++dht.db.kv.lvals; + +                return 0; +        } + +        return 0; +} + +static int dht_entry_remove_lval(struct dht_entry * e, +                                 buffer_t           val) +{ +        struct val_entry * v; + +        assert(e != NULL); +        assert(val.data != NULL); +        assert(val.len > 0); + +        v = dht_entry_get_lval(e, val); +        if (v == NULL) +                return -ENOENT; + +        log_dbg(KV_FMT " Removing lval.", KV_VAL(e->key, val)); + +        list_del(&v->next); +        val_entry_destroy(v); +        --e->lvals.len; +        --dht.db.kv.lvals; + +        return 0; +} + +#define IS_EXPIRED(v, now) ((now)->tv_sec > (v)->t_exp) +static void dht_entry_remove_expired_vals(struct dht_entry * e) +{ +        struct list_head * p; +        struct list_head * h; +        struct timespec    now; + +        assert(e != NULL); + +        clock_gettime(CLOCK_REALTIME_COARSE, &now); + +        list_for_each_safe(p, h, &e->vals.list) { +                struct val_entry * v = list_entry(p, struct val_entry, next); +                if (!IS_EXPIRED(v, &now)) +                        continue; + +                log_dbg(KV_FMT " Value expired." , KV_VAL(e->key, v->val)); +                list_del(&v->next); +                val_entry_destroy(v); +                --e->vals.len; +                --dht.db.kv.vals; +        } +} + +static struct dht_entry * __dht_kv_find_entry(const uint8_t * key) +{ +        struct list_head * p; + +        assert(key != NULL); + +        list_for_each(p, &dht.db.kv.list) { +                struct dht_entry * e = list_entry(p, struct dht_entry, next); +                if (!memcmp(key, e->key, dht.id.len)) +                        return e; +        } + +        return NULL; +} + +static void dht_kv_remove_expired_entries(void) +{ +        struct list_head * p; +        struct list_head * h; +        struct timespec    now; + +        clock_gettime(CLOCK_REALTIME_COARSE, &now); + +        pthread_rwlock_wrlock(&dht.db.lock); + +        list_for_each_safe(p, h, &dht.db.kv.list) { +                struct dht_entry * e = list_entry(p, struct dht_entry, next); +                dht_entry_remove_expired_vals(e); +                if (e->lvals.len > 0 || e->vals.len > 0) +                        continue; + +                log_dbg(KEY_FMT " Entry removed. ", KEY_VAL(e->key)); +                list_del(&e->next); +                dht_entry_destroy(e); +                --dht.db.kv.len; +        } + +        pthread_rwlock_unlock(&dht.db.lock); +} + + +static struct contact * contact_create(const uint8_t * id, +                                       uint64_t        addr) +{ +        struct contact * c; +        struct timespec  t; + +        c = malloc(sizeof(*c)); +        if (c == NULL) +                return NULL; + +        list_head_init(&c->next); + +        clock_gettime(CLOCK_REALTIME_COARSE, &t); + +        c->addr   = addr; +        c->fails  = 0; +        c->t_seen = t.tv_sec; +        c->id     = dht_dup_key(id); +        if (c->id == NULL) { +                free(c); +                return NULL; +        } + +        return c; +} + +static void contact_destroy(struct contact * c) +{ +        assert(c != NULL); +        assert(list_is_empty(&c->next)); + +        free(c->id); +        free(c); +} + +static struct dht_req * dht_req_create(const uint8_t * key) +{ +        struct dht_req * req; +        struct timespec  now; + +        assert(key != NULL); + +        clock_gettime(PTHREAD_COND_CLOCK, &now); + +        req = malloc(sizeof(*req)); +        if (req == NULL) +                goto fail_malloc; + +        list_head_init(&req->next); + +        req->t_exp = now.tv_sec + DHT_T_RESP; + +        list_head_init(&req->peers.list); +        req->peers.len = 0; + +        req->key = dht_dup_key(key); +        if (req->key == NULL) +                goto fail_dup_key; + +        list_head_init(&req->cache.list); +        req->cache.len = 0; + +        return req; + + fail_dup_key: +        free(req); + fail_malloc: +        return NULL; +} + +static void dht_req_destroy(struct dht_req * req) +{ +        struct list_head * p; +        struct list_head * h; + +        assert(req); +        assert(req->key); + +        list_for_each_safe(p, h, &req->peers.list) { +                struct peer_entry * e = list_entry(p, struct peer_entry, next); +                list_del(&e->next); +                free(e->id); +                free(e); +                --req->peers.len; +        } + +        list_for_each_safe(p, h, &req->cache.list) { +                struct val_entry * e = list_entry(p, struct val_entry, next); +                list_del(&e->next); +                val_entry_destroy(e); +                --req->cache.len; +        } + +        free(req->key); + +        assert(req->peers.len == 0); + +        free(req); +} + +static struct peer_entry * dht_req_get_peer(struct dht_req *    req, +                                            struct peer_entry * e) +{ +        struct list_head * p; + +        list_for_each(p, &req->peers.list) { +                struct peer_entry * x = list_entry(p, struct peer_entry, next); +                if (x->addr == e->addr) +                        return x; +        } + +        return NULL; +} + +#define IS_MAGIC(peer) ((peer)->cookie == dht.magic) +void dht_req_add_peer(struct dht_req * req, +                      struct peer_entry * e) +{ +        struct peer_entry * x; /* existing */ +        struct list_head *  p; /* iterator */ +        size_t              pos = 0; + +        assert(req   != NULL); +        assert(e     != NULL); +        assert(e->id != NULL); + +        /* +         * Dedupe messages to the same peer, unless +         *   1) The previous request was FIND_NODE and now it's FIND_VALUE +         *   2) We urgently need contacts from emergency peer (magic cookie) +         */ +        x = dht_req_get_peer(req, e); +        if (x != NULL && x->code >= e->code && !IS_MAGIC(e)) +                goto skip; + +        /* Find how this contact ranks in distance to the key */ +        list_for_each(p, &req->peers.list) { +                struct peer_entry * y = list_entry(p, struct peer_entry, next); +                if (IS_CLOSER(y->id, e->id)) { +                        pos++; +                        continue; +                } +                break; +        } + +        /* Add a new peer to this request if we need to */ +        if (pos < dht.alpha || !IS_MAGIC(e)) { +                x = malloc(sizeof(*x)); +                if (x == NULL) { +                        log_err("Failed to malloc peer entry."); +                        goto skip; +                } + +                x->cookie = e->cookie; +                x->addr   = e->addr; +                x->code   = e->code; +                x->t_sent = e->t_sent; +                x->id     = dht_dup_key(e->id); +                if (x->id == NULL) { +                        log_err("Failed to dup peer ID."); +                        free(x); +                        goto skip; +                } + +                if (IS_MAGIC(e)) +                        list_add(&x->next, p); +                else +                        list_add_tail(&x->next, p); +                ++req->peers.len; +                return; +        } + skip: +        list_del(&e->next); +        free(e->id); +        free(e); +} + +static size_t dht_req_add_peers(struct dht_req *   req, +                                struct list_head * pl) +{ +        struct list_head *  p; +        struct list_head *  h; +        size_t              n = 0; + +        assert(req != NULL); +        assert(pl  != NULL); + +        list_for_each_safe(p, h, pl) { +                struct peer_entry * e = list_entry(p, struct peer_entry, next); +                dht_req_add_peer(req, e); +        } + +        return n; +} + +static bool dht_req_has_peer(struct dht_req * req, +                             uint64_t         cookie) +{ +        struct list_head * p; + +        assert(req != NULL); + +        list_for_each(p, &req->peers.list) { +                struct peer_entry * e = list_entry(p, struct peer_entry, next); +                if (e->cookie == cookie) +                        return true; +        } + +        return false; +} + +static void peer_list_destroy(struct list_head * pl) +{ +        struct list_head * p; +        struct list_head * h; + +        assert(pl != NULL); + +        list_for_each_safe(p, h, pl) { +                struct peer_entry * e = list_entry(p, struct peer_entry, next); +                list_del(&e->next); +                free(e->id); +                free(e); +        } +} + +static int dht_kv_create_peer_list(struct list_head * cl, +                                   struct list_head * pl, +                                   enum dht_code      code) +{ +        struct list_head *  p; +        struct list_head *  h; +        struct timespec     now; +        size_t              len; + +        assert(cl != NULL); +        assert(pl != NULL); +        assert(list_is_empty(pl)); + +        clock_gettime(CLOCK_REALTIME_COARSE, &now); + +        len = 0; + +        list_for_each_safe(p, h, cl) { +                struct contact * c = list_entry(p, struct contact, next); +                struct peer_entry * e; +                if (len++ == dht.alpha) +                        break; + +                e = malloc(sizeof(*e)); +                if (e == NULL) +                        return -ENOMEM; + +                e->cookie = generate_cookie(); +                e->code   = code; +                e->addr   = c->addr; +                e->t_sent = now.tv_sec; + +                e->id = c->id; + +                list_add_tail(&e->next, pl); + +                list_del(&c->next); +                c->id = NULL; /* we stole the id */ +                contact_destroy(c); +        } + +        return 0; +} + +static struct dht_req * __dht_kv_req_get_req(const uint8_t * key) +{ +        struct list_head * p; + +        list_for_each(p, &dht.reqs.list) { +                struct dht_req * r = list_entry(p, struct dht_req, next); +                if (memcmp(r->key, key, dht.id.len) == 0) +                        return r; +        } + +        return NULL; +} + +static struct dht_req * __dht_kv_get_req_cache(const uint8_t * key) +{ +        struct dht_req * req; + +        assert(key != NULL); + +        req = __dht_kv_req_get_req(key); +        if (req == NULL) +                return NULL; + +        if (req->cache.len == 0) +                return NULL; + +        return req; +} + +static void __dht_kv_req_remove(const uint8_t * key) +{ +        struct dht_req * req; + +        assert(key != NULL); + +        req = __dht_kv_req_get_req(key); +        if (req == NULL) +                return; + +        list_del(&req->next); +        --dht.reqs.len; + +        dht_req_destroy(req); +} + +static struct dht_req * __dht_kv_get_req_peer(const uint8_t * key, +                                              uint64_t        cookie) +{ +        struct dht_req * req; + +        assert(key != NULL); + +        req = __dht_kv_req_get_req(key); +        if (req == NULL) +                return NULL; + +        if (!dht_req_has_peer(req, cookie)) +                return NULL; + +        return req; +} + +static bool dht_kv_has_req(const uint8_t * key, +                           uint64_t        cookie) +{ +        bool found; + +        pthread_mutex_lock(&dht.reqs.mtx); + +        found = __dht_kv_get_req_peer(key, cookie) != NULL; + +        pthread_mutex_unlock(&dht.reqs.mtx); + +        return found; +} + +/* + * This will filter the peer list for addresses that still need to be + * contacted. + */ +static int dht_kv_update_req(const uint8_t *    key, +                             struct list_head * pl) +{ +        struct dht_req * req; +        struct timespec  now; + +        assert(key != NULL); +        assert(pl != NULL); +        assert(!list_is_empty(pl)); + +        clock_gettime(PTHREAD_COND_CLOCK, &now); + +        pthread_mutex_lock(&dht.reqs.mtx); + +        req = __dht_kv_req_get_req(key); +        if (req == NULL) { +                if (dht.reqs.len == DHT_MAX_REQS) { +                        log_err(KEY_FMT " Max reqs reached (%zu).", +                                KEY_VAL(key), dht.reqs.len); +                        peer_list_destroy(pl); +                        goto fail_req; +                } +                req = dht_req_create(key); +                if (req == NULL) { +                        log_err(KEY_FMT "Failed to create req.", KEY_VAL(key)); +                        goto fail_req; +                } +                list_add_tail(&req->next, &dht.reqs.list); +                ++dht.reqs.len; +        } + +        if (req->cache.len > 0) /* Already have values */ +                peer_list_destroy(pl); + +        dht_req_add_peers(req, pl); +        req->t_exp = now.tv_sec + DHT_T_RESP; + +        if (dht.reqs.len > DHT_WARN_REQS) { +                log_warn("Number of outstanding requests (%zu) exceeds %u.", +                         dht.reqs.len, DHT_WARN_REQS); +        } + +        pthread_mutex_unlock(&dht.reqs.mtx); + +        return 0; + fail_req: +        pthread_mutex_unlock(&dht.reqs.mtx); +        return -1; +} + +static int dht_kv_respond_req(uint8_t *       key, +                              binary_data_t * vals, +                              size_t          len) +{ +        struct dht_req * req; +        struct timespec  now; +        size_t i; + +        assert(key != NULL); +        assert(vals != NULL); +        assert(len > 0); + +        clock_gettime(CLOCK_REALTIME_COARSE, &now); + +        pthread_mutex_lock(&dht.reqs.mtx); + +        req = __dht_kv_req_get_req(key); +        if (req == NULL) { +                log_dbg(KEY_FMT " Failed to find req.", KEY_VAL(key)); +                goto fail_req; +        } + +        for (i = 0; i < len; ++i) { +                struct val_entry * e; +                buffer_t val; +                val.data = vals[i].data; +                val.len = vals[i].len; +                e = val_entry_create(val, now.tv_sec + DHT_T_CACHE); +                if (e == NULL) { +                        log_err(" Failed to create val_entry."); +                        continue; +                } + +                list_add_tail(&e->next, &req->cache.list); +                ++req->cache.len; +        } + +        pthread_cond_broadcast(&dht.reqs.cond); + +        pthread_mutex_unlock(&dht.reqs.mtx); + +        return 0; + fail_req: +        pthread_mutex_unlock(&dht.reqs.mtx); +        return -1; +} + +static ssize_t dht_kv_wait_req(const uint8_t * key, +                               buffer_t **     vals) +{ +        struct list_head * p; +        struct dht_req *   req; +        struct timespec    t; +#ifdef __DHT_TEST__ +        struct timespec    intv = TIMESPEC_INIT_MS(10); +#else +        struct timespec    intv = TIMESPEC_INIT_S(DHT_T_RESP); +#endif +        size_t             max; +        size_t             i = 0; +        int                ret = 0; + +        assert(key != NULL); +        assert(vals != NULL); + +        clock_gettime(PTHREAD_COND_CLOCK, &t); + +        ts_add(&t, &intv, &t); + +        pthread_mutex_lock(&dht.reqs.mtx); + +        pthread_cleanup_push(__cleanup_mutex_unlock, &dht.reqs.mtx); + +        while ((req = __dht_kv_get_req_cache(key)) == NULL) { +                ret = pthread_cond_timedwait(&dht.reqs.cond, &dht.reqs.mtx, &t); +                if (ret == ETIMEDOUT) +                        break; +        } + +        pthread_cleanup_pop(false); + +        if (ret == ETIMEDOUT) { +                log_warn(KEY_FMT " Req timed out.", KEY_VAL(key)); +                __dht_kv_req_remove(key); +                goto timedout; +        } + +        max = MIN(req->cache.len, DHT_MAX_VALS); +        if (max == 0) +                goto no_vals; + +        *vals = malloc(max * sizeof(**vals)); +        if (*vals == NULL) { +                log_err(KEY_FMT "Failed to malloc val buffer.", KEY_VAL(key)); +                goto fail_vals; +        } + +        memset(*vals, 0, max * sizeof(**vals)); + +        list_for_each(p, &req->cache.list) { +                struct val_entry * v; +                if (i == max) +                        break; /* We have enough values */ +                v = list_entry(p, struct val_entry, next); +                (*vals)[i].data = malloc(v->val.len); +                if ((*vals)[i].data == NULL) +                        goto fail_val_data; + +                (*vals)[i].len = v->val.len; +                memcpy((*vals)[i++].data, v->val.data, v->val.len); +        } + +        pthread_mutex_unlock(&dht.reqs.mtx); + +        return i; + no_vals: +        pthread_mutex_unlock(&dht.reqs.mtx); +        *vals = NULL; +        return 0; + fail_val_data: +        freebufs(*vals, i); + fail_vals: +        pthread_mutex_unlock(&dht.reqs.mtx); +        return -ENOMEM; + timedout: +        pthread_mutex_unlock(&dht.reqs.mtx); +        return -ETIMEDOUT; +} + +static struct bucket * iter_bucket(struct bucket * b, +                                   const uint8_t * id) +{ +        uint8_t byte; +        uint8_t mask; + +        assert(b != NULL); + +        if (b->children[0] == NULL) +                return b; + +        byte = id[(b->depth * DHT_BETA) / CHAR_BIT]; + +        mask = ((1L << DHT_BETA) - 1) & 0xFF; + +        byte >>= (CHAR_BIT - DHT_BETA) - +                (((b->depth) * DHT_BETA) & (CHAR_BIT - 1)); + +        return iter_bucket(b->children[(byte & mask)], id); +} + +static struct bucket * __dht_kv_get_bucket(const uint8_t * id) +{ +        assert(dht.db.contacts.root != NULL); + +        return iter_bucket(dht.db.contacts.root, id); +} + +static void contact_list_add(struct list_head * l, +                             struct contact *   c) +{ +        struct list_head * p; + +        assert(l != NULL); +        assert(c != NULL); + +        list_for_each(p, l) { +                struct contact * e = list_entry(p, struct contact, next); +                if (IS_CLOSER(e->id, c->id)) +                        continue; +        } + +        list_add_tail(&c->next, p); +} + +static ssize_t dht_kv_contact_list(const uint8_t *    key, +                                   struct list_head * l, +                                   size_t             max) +{ +        struct list_head * p; +        struct bucket *    b; +        struct timespec    t; +        size_t             i; +        size_t             len = 0; + +        assert(l   != NULL); +        assert(key != NULL); +        assert(list_is_empty(l)); + +        clock_gettime(CLOCK_REALTIME_COARSE, &t); + +        max = MIN(max, dht.k); + +        pthread_rwlock_rdlock(&dht.db.lock); + +        b = __dht_kv_get_bucket(key); +        if (b == NULL) { +                log_err(KEY_FMT " Failed to get bucket.", KEY_VAL(key)); +                goto fail_bucket; +        } + +        b->t_refr = t.tv_sec + dht.t_refresh; + +        if (b->contacts.len == dht.k || b->parent == NULL) { +                list_for_each(p, &b->contacts.list) { +                        struct contact * c; +                        struct contact * d; +                        c = list_entry(p, struct contact, next); +                        if (c->addr == dht.addr) +                                continue; +                        d = contact_create(c->id, c->addr); +                        if (d == NULL) +                                continue; +                        contact_list_add(l, d); +                        if (++len == max) +                                break; +                } +        } else { +                struct bucket * d = b->parent; +                for (i = 0; i < (1L << DHT_BETA) && len < dht.k; ++i) { +                        list_for_each(p, &d->children[i]->contacts.list) { +                                struct contact * c; +                                struct contact * d; +                                c = list_entry(p, struct contact, next); +                                if (c->addr == dht.addr) +                                        continue; +                                d = contact_create(c->id, c->addr); +                                if (d == NULL) +                                        continue; +                                contact_list_add(l, d); +                                if (++len == max) +                                        break; +                        } +                } +        } + +        pthread_rwlock_unlock(&dht.db.lock); + +        return len; + fail_bucket: +        pthread_rwlock_unlock(&dht.db.lock); +        return -1; +} + +static void contact_list_destroy(struct list_head * l) +{ +        struct list_head * p; +        struct list_head * h; + +        assert(l != NULL); + +        list_for_each_safe(p, h, l) { +                struct contact * c = list_entry(p, struct contact, next); +                list_del(&c->next); +                contact_destroy(c); +        } +} + +static ssize_t dht_kv_get_contacts(const uint8_t *       key, +                                   dht_contact_msg_t *** msgs) +{ +        struct list_head   cl; +        struct list_head * p; +        struct list_head * h; +        size_t             len; +        size_t             i = 0; + +        assert(key != NULL); +        assert(msgs != NULL); + +        list_head_init(&cl); + +        len = dht_kv_contact_list(key, &cl, dht.k); +        if (len == 0) { +                *msgs = NULL; +                return 0; +        } + +        *msgs = malloc(len * sizeof(**msgs)); +        if (*msgs == NULL) +                goto fail_msgs; + +        list_for_each_safe(p, h, &cl) { +                struct contact * c; +                (*msgs)[i] = malloc(sizeof(***msgs)); +                if ((*msgs)[i] == NULL) +                        goto fail_contact; + +                dht_contact_msg__init((*msgs)[i]); +                c = list_entry(p, struct contact, next); +                list_del(&c->next); +                (*msgs)[i]->id.data = c->id; +                (*msgs)[i]->id.len  = dht.id.len; +                (*msgs)[i++]->addr  = c->addr; +                free(c); +        } + +        return i; + fail_contact: +        while (i-- > 0) +                dht_contact_msg__free_unpacked((*msgs)[i], NULL); +        free(*msgs); +        *msgs = NULL; + fail_msgs: +        contact_list_destroy(&cl); +        return -ENOMEM; +} + +/* Build a refresh list. */ +static void __dht_kv_bucket_refresh_list(struct bucket *    b, +                                         time_t             t, +                                         struct list_head * r) +{ +        struct contact * c; +        struct contact * d; + +        assert(b != NULL); + +        if (t < b->t_refr) +                return; + +        if (*b->children != NULL) { +                size_t i; +                for (i = 0; i < (1L << DHT_BETA); ++i) +                        __dht_kv_bucket_refresh_list(b->children[i], t, r); +        } + +        if (b->contacts.len == 0) +                return; + +        c = list_first_entry(&b->contacts.list, struct contact, next); +        if (t > c->t_seen + dht.t_refresh) { +                d = contact_create(c->id, c->addr); +                if (d != NULL) +                        list_add(&d->next, r); +        } +} + +static struct bucket * bucket_create(void) +{ +        struct bucket * b; +        struct timespec t; +        size_t          i; + +        b = malloc(sizeof(*b)); +        if (b == NULL) +                return NULL; + +        list_head_init(&b->contacts.list); +        b->contacts.len = 0; + +        list_head_init(&b->alts.list); +        b->alts.len = 0; + +        clock_gettime(CLOCK_REALTIME_COARSE, &t); +        b->t_refr = t.tv_sec + dht.t_refresh; + +        for (i = 0; i < (1L << DHT_BETA); ++i) +                b->children[i]  = NULL; + +        b->parent = NULL; +        b->depth = 0; +        b->mask  = 0; + +        return b; +} + +static void bucket_destroy(struct bucket * b) +{ +        struct list_head * p; +        struct list_head * h; +        size_t             i; + +        assert(b != NULL); + +        for (i = 0; i < (1L << DHT_BETA); ++i) +                if (b->children[i] != NULL) +                        bucket_destroy(b->children[i]); + +        list_for_each_safe(p, h, &b->contacts.list) { +                struct contact * c = list_entry(p, struct contact, next); +                list_del(&c->next); +                contact_destroy(c); +                --b->contacts.len; +        } + +        list_for_each_safe(p, h, &b->alts.list) { +                struct contact * c = list_entry(p, struct contact, next); +                list_del(&c->next); +                contact_destroy(c); +                --b->alts.len; +        } + +        free(b); +} + +static bool bucket_has_id(struct bucket * b, +                          const uint8_t * id) +{ +        uint8_t mask; +        uint8_t byte; + +        if (b->depth == 0) +                return true; + +        byte = id[(b->depth * DHT_BETA) / CHAR_BIT]; + +        mask = ((1L << DHT_BETA) - 1) & 0xFF; + +        byte >>= (CHAR_BIT - DHT_BETA) - +                (((b->depth - 1) * DHT_BETA) & (CHAR_BIT - 1)); + +        return ((byte & mask) == b->mask); +} + +static int move_contacts(struct bucket * b, +                         struct bucket * c) +{ +        struct list_head * p; +        struct list_head * h; +        struct contact *   d; + +        assert(b != NULL); +        assert(c != NULL); + +        list_for_each_safe(p, h, &b->contacts.list) { +                d = list_entry(p, struct contact, next); +                if (bucket_has_id(c, d->id)) { +                        list_del(&d->next); +                        --b->contacts.len; +                        list_add_tail(&d->next, &c->contacts.list); +                        ++c->contacts.len; +                } +        } + +        return 0; +} + +static int split_bucket(struct bucket * b) +{ +        uint8_t mask = 0; +        size_t i; +        size_t b_len; + +        assert(b); +        assert(b->alts.len == 0); +        assert(b->contacts.len != 0); +        assert(b->children[0] == NULL); + +        b_len = b->contacts.len; + +        for (i = 0; i < (1L << DHT_BETA); ++i) { +                b->children[i] = bucket_create(); +                if (b->children[i] == NULL) +                        goto fail_child; + +                b->children[i]->depth  = b->depth + 1; +                b->children[i]->mask   = mask; +                b->children[i]->parent = b; + +                move_contacts(b, b->children[i]); + +                mask++; +        } + +        for (i = 0; i < (1L << DHT_BETA); ++i) +                if (b->children[i]->contacts.len == b_len) +                        split_bucket(b->children[i]); + +        return 0; + fail_child: +        while (i-- > 0) +                bucket_destroy(b->children[i]); +        return -1; +} + +static int dht_kv_update_contacts(const uint8_t * id, +                                  uint64_t        addr) +{ +        struct list_head * p; +        struct list_head * h; +        struct bucket *    b; +        struct contact *   c; + +        assert(id != NULL); +        assert(addr != INVALID_ADDR); + +        pthread_rwlock_wrlock(&dht.db.lock); + +        b = __dht_kv_get_bucket(id); +        if (b == NULL) { +                log_err(PEER_FMT " Failed to get bucket.", PEER_VAL(id, addr)); +                        goto fail_update; +        } + +        c = contact_create(id, addr); +        if (c == NULL) { +                log_err(PEER_FMT " Failed to create contact.", +                        PEER_VAL(id, addr)); +                goto fail_update; +        } + +        list_for_each_safe(p, h, &b->contacts.list) { +                struct contact * d = list_entry(p, struct contact, next); +                if (d->addr == addr) { +                        list_del(&d->next); +                        contact_destroy(d); +                        --b->contacts.len; +                } +        } + +        if (b->contacts.len == dht.k) { +                if (bucket_has_id(b, dht.id.data)) { +                        list_add_tail(&c->next, &b->contacts.list); +                        ++b->contacts.len; +                        if (split_bucket(b)) { +                                list_del(&c->next); +                                contact_destroy(c); +                                --b->contacts.len; +                        } +                } else if (b->alts.len == dht.k) { +                        struct contact * d; +                        d = list_first_entry(&b->alts.list, +                                struct contact, next); +                        list_del(&d->next); +                        contact_destroy(d); +                        list_add_tail(&c->next, &b->alts.list); +                        ++b->alts.len; +                } else { +                        list_add_tail(&c->next, &b->alts.list); +                        ++b->alts.len; +                } +        } else { +                list_add_tail(&c->next, &b->contacts.list); +                ++b->contacts.len; +        } + +        pthread_rwlock_unlock(&dht.db.lock); + +        return 0; + fail_update: +        pthread_rwlock_unlock(&dht.db.lock); +        return -1; +} + +static time_t gcd(time_t a, +                  time_t b) +{ +        if (a == 0) +                return b; + +        return gcd(b % a, a); +} + +static dht_contact_msg_t * dht_kv_src_contact_msg(void) +{ +        dht_contact_msg_t * src; + +        src = malloc(sizeof(*src)); +        if (src == NULL) +                goto fail_malloc; + +        dht_contact_msg__init(src); + +        src->id.data = dht_dup_key(dht.id.data); +        if (src->id.data == NULL) +                goto fail_id; + +        src->id.len  = dht.id.len; +        src->addr    = dht.addr; + +        return src; + fail_id: +        dht_contact_msg__free_unpacked(src, NULL); + fail_malloc: +        return NULL; +} + +static dht_msg_t * dht_kv_find_req_msg(const uint8_t * key, +                                       enum dht_code   code) +{ +        dht_msg_t * msg; + +        assert(key != NULL); + +        msg = malloc(sizeof(*msg)); +        if (msg == NULL) +                goto fail_malloc; + +        dht_msg__init(msg); +        msg->code = code; + +        msg->src = dht_kv_src_contact_msg(); +        if (msg->src == NULL) +                goto fail_msg; + +        msg->find = malloc(sizeof(*msg->find)); +        if (msg->find == NULL) +                goto fail_msg; + +        dht_find_req_msg__init(msg->find); + +        msg->find->key.data = dht_dup_key(key); +        if (msg->find->key.data == NULL) +                goto fail_msg; + +        msg->find->key.len = dht.id.len; +        msg->find->cookie  = DHT_INVALID; + +        return msg; + + fail_msg: +        dht_msg__free_unpacked(msg, NULL); + fail_malloc: +        return NULL; +} + +static dht_msg_t * dht_kv_find_node_req_msg(const uint8_t * key) +{ +        return dht_kv_find_req_msg(key, DHT_FIND_NODE_REQ); +} + +static dht_msg_t * dht_kv_find_value_req_msg(const uint8_t * key) +{ +        return dht_kv_find_req_msg(key, DHT_FIND_VALUE_REQ); +} + +static dht_msg_t * dht_kv_find_node_rsp_msg(uint8_t *             key, +                                            uint64_t              cookie, +                                            dht_contact_msg_t *** contacts, +                                            size_t                len) +{ +        dht_msg_t * msg; + +        msg = malloc(sizeof(*msg)); +        if (msg == NULL) +                goto fail_malloc; + +        dht_msg__init(msg); +        msg->code = DHT_FIND_NODE_RSP; + +        msg->src = dht_kv_src_contact_msg(); +        if (msg->src == NULL) +                goto fail_msg; + +        msg->node = malloc(sizeof(*msg->node)); +        if (msg->node == NULL) +                goto fail_msg; + +        dht_find_node_rsp_msg__init(msg->node); + +        msg->node->key.data = dht_dup_key(key); +        if (msg->node->key.data == NULL) +                goto fail_msg; + +        msg->node->cookie     = cookie; +        msg->node->key.len    = dht.id.len; +        msg->node->n_contacts = len; +        if (len != 0) { /* Steal the ptr */ +                msg->node->contacts = *contacts; +                *contacts = NULL; +        } + +        return msg; + + fail_msg: +        dht_msg__free_unpacked(msg, NULL); + fail_malloc: +        return NULL; +} + +static dht_msg_t * dht_kv_find_value_rsp_msg(uint8_t *             key, +                                             uint64_t              cookie, +                                             dht_contact_msg_t *** contacts, +                                             size_t                n_contacts, +                                             buffer_t **           vals, +                                             size_t                n_vals) +{ +        dht_msg_t * msg; + +        msg = dht_kv_find_node_rsp_msg(key, cookie, contacts, n_contacts); +        if (msg == NULL) +                goto fail_node_rsp; + +        msg->code = DHT_FIND_VALUE_RSP; + +        msg->val = malloc(sizeof(*msg->val)); +        if (msg->val == NULL) +                goto fail_msg; + +        dht_find_value_rsp_msg__init(msg->val); + +        msg->val->n_values = n_vals; +        if (n_vals != 0)  /* Steal the ptr */ +                msg->val->values = (binary_data_t *) *vals; + +        return msg; + + fail_msg: +        dht_msg__free_unpacked(msg, NULL); + fail_node_rsp: +        return NULL; +} + +static dht_msg_t * dht_kv_store_msg(const uint8_t * key, +                                    const buffer_t  val, +                                    time_t          exp) +{ +        dht_msg_t * msg; + +        assert(key != NULL); +        assert(val.data != NULL); +        assert(val.len > 0); + +        msg = malloc(sizeof(*msg)); +        if (msg == NULL) +                goto fail_malloc; + +        dht_msg__init(msg); + +        msg->code = DHT_STORE; + +        msg->src = dht_kv_src_contact_msg(); +        if (msg->src == NULL) +                goto fail_msg; + +        msg->store = malloc(sizeof(*msg->store)); +        if (msg->store == NULL) +                goto fail_msg; + +        dht_store_msg__init(msg->store); + +        msg->store->key.data = dht_dup_key(key); +        if (msg->store->key.data == NULL) +                goto fail_msg; + +        msg->store->key.len = dht.id.len; +        msg->store->val.data = malloc(val.len); +        if (msg->store->val.data == NULL) +                goto fail_msg; + +        memcpy(msg->store->val.data, val.data, val.len); + +        msg->store->val.len = val.len; +        msg->store->exp = exp; + +        return msg; + + fail_msg: +        dht_msg__free_unpacked(msg, NULL); + fail_malloc: +        return NULL; +} + +static ssize_t dht_kv_retrieve(const uint8_t * key, +                               buffer_t **     vals) +{ +        struct dht_entry * e; +        struct list_head * p; +        size_t             n; +        size_t             i; + +        assert(key  != NULL); + +        pthread_rwlock_rdlock(&dht.db.lock); + +        e = __dht_kv_find_entry(key); +        if (e == NULL) +                goto no_vals; + +        n = MIN(DHT_MAX_VALS, e->vals.len + e->lvals.len); +        if (n == 0) +                goto no_vals; + +        *vals = malloc(n * sizeof(**vals)); +        if (*vals == NULL) +                goto fail_vals; + +        memset(*vals, 0, n * sizeof(**vals)); + +        i = 0; + +        list_for_each(p, &e->vals.list) { +                struct val_entry * v; +                if (i == n) +                        break; /* We have enough values */ +                v = list_entry(p, struct val_entry, next); +                (*vals)[i].data = malloc(v->val.len); +                if ((*vals)[i].data == NULL) +                        goto fail_val_data; + +                (*vals)[i].len = v->val.len; +                memcpy((*vals)[i++].data, v->val.data, v->val.len); +        } + +        list_for_each(p, &e->lvals.list) { +                struct val_entry * v; +                if (i == n) +                        break; /* We have enough values */ +                v = list_entry(p, struct val_entry, next); +                (*vals)[i].data = malloc(v->val.len); +                if ((*vals)[i].data == NULL) +                        goto fail_val_data; + +                (*vals)[i].len = v->val.len; +                memcpy((*vals)[i++].data, v->val.data, v->val.len); +        } + +        pthread_rwlock_unlock(&dht.db.lock); + +        return (ssize_t) i; + + fail_val_data: +        pthread_rwlock_unlock(&dht.db.lock); +        freebufs(*vals, i); +        *vals = NULL; +        return -ENOMEM; + fail_vals: +        pthread_rwlock_unlock(&dht.db.lock); +        return -ENOMEM; + no_vals: +        pthread_rwlock_unlock(&dht.db.lock); +        *vals = NULL; +        return 0; +} + +static void __cleanup_dht_msg(void * msg) +{ +        dht_msg__free_unpacked((dht_msg_t *) msg, NULL); +} + +#ifdef DEBUG_PROTO_DHT +static void dht_kv_debug_msg(dht_msg_t * msg) +{ +        struct tm *   tm; +        char          tmstr[RIB_TM_STRLEN]; +        time_t        stamp; +        size_t        i; + +        if (msg == NULL) +                return; + +        pthread_cleanup_push(__cleanup_dht_msg, msg); + +        switch (msg->code) { +        case DHT_STORE: +                log_proto("  key: " HASH_FMT64 " [%zu bytes]", +                          HASH_VAL64(msg->store->key.data), +                          msg->store->key.len); +                log_proto("  val: " HASH_FMT64 " [%zu bytes]", +                          HASH_VAL64(msg->store->val.data), +                          msg->store->val.len); +                stamp = msg->store->exp; +                tm = gmtime(&stamp); +                strftime(tmstr, sizeof(tmstr), RIB_TM_FORMAT, tm); +                log_proto("  exp: %s.", tmstr); +                break; +        case DHT_FIND_NODE_REQ: +                /* FALLTHRU */ +        case DHT_FIND_VALUE_REQ: +                log_proto("  cookie: " HASH_FMT64, +                          HASH_VAL64(&msg->find->cookie)); +                log_proto("  key:    " HASH_FMT64 " [%zu bytes]", +                          HASH_VAL64(msg->find->key.data), +                          msg->find->key.len); +                break; +        case DHT_FIND_VALUE_RSP: +                log_proto("  cookie: " HASH_FMT64, +                          HASH_VAL64(&msg->node->cookie)); +                log_proto("  key:    " HASH_FMT64 " [%zu bytes]", +                          HASH_VAL64(msg->node->key.data), +                          msg->node->key.len); +                log_proto("  values: [%zd]", msg->val->n_values); +                for (i = 0; i < msg->val->n_values; i++) +                        log_proto("    " HASH_FMT64 " [%zu bytes]", +                                  HASH_VAL64(msg->val->values[i].data), +                                  msg->val->values[i].len); +                log_proto("  contacts: [%zd]", msg->node->n_contacts); +                for (i = 0; i < msg->node->n_contacts; i++) { +                        dht_contact_msg_t * c = msg->node->contacts[i]; +                        log_proto("    " PEER_FMT, +                                  PEER_VAL(c->id.data, c->addr)); +                } +                break; +        case DHT_FIND_NODE_RSP: +                log_proto("  cookie: " HASH_FMT64, +                        HASH_VAL64(&msg->node->cookie)); +                log_proto("  key:    " HASH_FMT64 " [%zu bytes]", +                          HASH_VAL64(msg->node->key.data), msg->node->key.len); +                log_proto("  contacts: [%zd]", msg->node->n_contacts); +                for (i = 0; i < msg->node->n_contacts; i++) { +                        dht_contact_msg_t * c = msg->node->contacts[i]; +                        log_proto("    " PEER_FMT, +                                  PEER_VAL(c->id.data, c->addr)); +                } + +                break; +        default: +                break; +        } + +        pthread_cleanup_pop(false); +} + +static void dht_kv_debug_msg_snd(dht_msg_t * msg, +                                 uint8_t *   id, +                                 uint64_t    addr) +{ +        if (msg == NULL) +                return; + +        log_proto(TX_HDR_FMT ".", TX_HDR_VAL(msg, id, addr)); + +        dht_kv_debug_msg(msg); +} + +static void dht_kv_debug_msg_rcv(dht_msg_t * msg) +{ +        if (msg == NULL) +                return; + +        log_proto(RX_HDR_FMT ".", RX_HDR_VAL(msg)); + +        dht_kv_debug_msg(msg); +} +#endif + +#ifndef __DHT_TEST__ +static int dht_send_msg(dht_msg_t * msg, +                        uint64_t    addr) +{ +        size_t               len; +        struct shm_du_buff * sdb; + +        if (msg == NULL) +                return 0; + +        assert(addr != INVALID_ADDR && addr != dht.addr); + +        len = dht_msg__get_packed_size(msg); +        if (len == 0) { +                log_warn("%s failed to pack.", DHT_CODE(msg)); +                goto fail_msg; +        } + +        if (ipcp_sdb_reserve(&sdb, len)) { +                log_warn("%s failed to get sdb.", DHT_CODE(msg)); +                goto fail_msg; +        } + +        dht_msg__pack(msg, shm_du_buff_head(sdb)); + +        if (dt_write_packet(addr, QOS_CUBE_BE, dht.eid, sdb) < 0) { +                log_warn("%s write failed", DHT_CODE(msg)); +                goto fail_send; +        } + +        return 0; + fail_send: +        ipcp_sdb_release(sdb); + fail_msg: +        return -1; +} +#else /* funtion for testing  */ +static int dht_send_msg(dht_msg_t * msg, +                        uint64_t    addr) +{ +        buffer_t buf; + +        assert(msg != NULL); +        assert(addr != INVALID_ADDR && addr != dht.addr); + +        buf.len = dht_msg__get_packed_size(msg); +        if (buf.len == 0) { +                log_warn("%s failed to pack.", DHT_CODE(msg)); +                goto fail_msg; +        } + +        buf.data = malloc(buf.len); +        if (buf.data == NULL) { +                log_warn("%s failed to malloc buf.", DHT_CODE(msg)); +                goto fail_msg; +        } + +        dht_msg__pack(msg, buf.data); + +        if (sink_send_msg(&buf, addr) < 0) { +                log_warn("%s write failed", DHT_CODE(msg)); +                goto fail_send; +        } + +        return 0; + fail_send: +        freebuf(buf); + fail_msg: +        return -1; +} +#endif /* __DHT_TEST__ */ + +static void __cleanup_peer_list(void * pl) +{ +        struct list_head * p; +        struct list_head * h; + +        assert(pl != NULL); + +        list_for_each_safe(p, h, (struct list_head *) pl) { +                struct peer_entry * e = list_entry(p, struct peer_entry, next); +                list_del(&e->next); +                free(e->id); +                free(e); +        } +} + + +static int dht_kv_send_msgs(dht_msg_t *        msg, +                            struct list_head * pl) +{ +        struct list_head * p; +        struct list_head * h; + +        pthread_cleanup_push(__cleanup_dht_msg, msg); +        pthread_cleanup_push(__cleanup_peer_list, pl); + +        list_for_each_safe(p, h, pl) { +                struct peer_entry * e = list_entry(p, struct peer_entry, next); +                if (IS_REQUEST(msg->code)) { +                        msg->find->cookie = e->cookie; +                        assert(msg->find->cookie != DHT_INVALID); +                } +                if (dht_send_msg(msg, e->addr) < 0) +                        continue; + +#ifdef DEBUG_PROTO_DHT +                dht_kv_debug_msg_snd(msg, e->id, e->addr); +#endif +                list_del(&e->next); +                free(e->id); +                free(e); +        } + +        pthread_cleanup_pop(false); +        pthread_cleanup_pop(false); + +        return list_is_empty(pl) ? 0 : -1; +} + +static int dht_kv_get_peer_list_for_msg(dht_msg_t *        msg, +                                        struct list_head * pl) +{ +        struct list_head   cl;  /* contact list       */ +        uint8_t *          key; /* key in the request */ +        size_t             max; + +        assert(msg != NULL); + +        assert(list_is_empty(pl)); + +        max = msg->code == DHT_STORE ? dht.k : dht.alpha; + +        switch (msg->code) { +        case DHT_FIND_NODE_REQ: +                /* FALLTHRU */ +        case DHT_FIND_VALUE_REQ: +                key = msg->find->key.data; +                break; +        case DHT_STORE: +                key = msg->store->key.data; +                break; +        default: +                log_err("Invalid DHT msg code (%d).", msg->code); +                return -1; +        } + +        list_head_init(&cl); + +        if (dht_kv_contact_list(key, &cl, max) < 0) { +                log_err(KEY_FMT " Failed to get contact list.", KEY_VAL(key)); +                goto fail_contacts; +        } + +        if (list_is_empty(&cl)) { +                log_warn(KEY_FMT " No available contacts.", KEY_VAL(key)); +                goto fail_contacts; +        } + +        if (dht_kv_create_peer_list(&cl, pl, msg->code) < 0) { +                log_warn(KEY_FMT " Failed to get peer list.", KEY_VAL(key)); +                goto fail_peers; +        } + +        contact_list_destroy(&cl); +        return 0; + fail_peers: +        contact_list_destroy(&cl); + fail_contacts: +        return -1; +} + +static int dht_kv_store_remote(const uint8_t * key, +                               const buffer_t  val, +                               time_t          exp) +{ +        dht_msg_t *      msg; +        struct timespec  now; +        struct list_head pl; + +        assert(key != NULL); +        assert(val.data != NULL); +        assert(val.len > 0); + +        clock_gettime(CLOCK_REALTIME_COARSE, &now); + +        msg = dht_kv_store_msg(key, val, exp); +        if (msg == NULL) { +                log_err(KV_FMT " Failed to create %s.", +                        KV_VAL(key, val), dht_code_str[DHT_STORE]); +                goto fail_msg; +        } + +        list_head_init(&pl); + +        if (dht_kv_get_peer_list_for_msg(msg, &pl) < 0) { +                log_dbg(KV_FMT " Failed to get peer list.", KV_VAL(key, val)); +                goto fail_peer_list; +        } + +        if (dht_kv_send_msgs(msg, &pl) < 0) { +                log_warn(KV_FMT " Failed to send any %s msg.", +                         KV_VAL(key, val), DHT_CODE(msg)); +                goto fail_msgs; +        } + +        dht_msg__free_unpacked(msg, NULL); + +        return 0; + fail_msgs: +        peer_list_destroy(&pl); + fail_peer_list: +        dht_msg__free_unpacked(msg, NULL); + fail_msg: +        return -1; +} + +/* recursive lookup, start with pl NULL */ +static int dht_kv_query_contacts(const uint8_t *    key, +                                 struct list_head * pl) +{ +        struct list_head p; + +        dht_msg_t * msg; + +        assert(key != NULL); + +        msg = dht_kv_find_node_req_msg(key); +        if (msg == NULL) { +                log_err(KEY_FMT " Failed to create %s msg.", +                        KEY_VAL(key), dht_code_str[DHT_FIND_NODE_REQ]); +                goto fail_msg; +        } + +        if (pl == NULL) { +                list_head_init(&p); +                pl = &p; +        } + +        if (list_is_empty(pl) && dht_kv_get_peer_list_for_msg(msg, pl) < 0) { +                log_warn(KEY_FMT " Failed to get peer list.", KEY_VAL(key)); +                goto fail_peer_list; +        } + +        if (dht_kv_update_req(key, pl) < 0) { +                log_warn(KEY_FMT " Failed to update req.", KEY_VAL(key)); +                goto fail_update; +        } + +        if (dht_kv_send_msgs(msg, pl)) { +                log_warn(KEY_FMT " Failed to send any %s msg.", +                         KEY_VAL(key), DHT_CODE(msg)); +                goto fail_update; +        } + +        dht_msg__free_unpacked(msg, NULL); + +        return 0; + fail_update: +        peer_list_destroy(pl); + fail_peer_list: +        dht_msg__free_unpacked(msg, NULL); + fail_msg: +        return -1; +} + +/* recursive lookup, start with pl NULL */ +static ssize_t dht_kv_query_remote(const uint8_t *    key, +                                   buffer_t **        vals, +                                   struct list_head * pl) +{ +        struct list_head p; +        dht_msg_t *      msg; + +        assert(key != NULL); + +        msg = dht_kv_find_value_req_msg(key); +        if (msg == NULL) { +                log_err(KEY_FMT " Failed to create value req.", KEY_VAL(key)); +                goto fail_msg; +        } + +        if (pl == NULL) { +                list_head_init(&p); +                pl = &p; +        } + +        if (list_is_empty(pl) && dht_kv_get_peer_list_for_msg(msg, pl) < 0) { +                log_warn(KEY_FMT " Failed to get peer list.", KEY_VAL(key)); +                goto fail_peer_list; +        } + +        if (dht_kv_update_req(key, pl) < 0) { +                log_err(KEY_FMT " Failed to update request.", KEY_VAL(key)); +                goto fail_update; +        } + +        if (dht_kv_send_msgs(msg, pl)) { +                log_warn(KEY_FMT " Failed to send %s msg.", +                         KEY_VAL(key), DHT_CODE(msg)); +                goto fail_update; +        } + +        dht_msg__free_unpacked(msg, NULL); + +        if (vals == NULL) /* recursive lookup, already waiting */ +                return 0; + +        return dht_kv_wait_req(key, vals); + fail_update: +        peer_list_destroy(pl); + fail_peer_list: +        dht_msg__free_unpacked(msg, NULL); + fail_msg: +        return -1; +} + +static void __add_dht_kv_entry(struct dht_entry * e) +{ +        struct list_head * p; + +        assert(e != NULL); + +        list_for_each(p, &dht.db.kv.list) { +                struct dht_entry * d = list_entry(p, struct dht_entry, next); +                if (IS_CLOSER(d->key, e->key)) +                        continue; +                break; +        } + +        list_add_tail(&e->next, p); +        ++dht.db.kv.len; +} + +/* incoming store message */ +static int dht_kv_store(const uint8_t * key, +                        const buffer_t  val, +                        time_t          exp) +{ +        struct dht_entry * e; +        bool               new = false; + +        assert(key != NULL); +        assert(val.data != NULL); +        assert(val.len > 0); + +        pthread_rwlock_wrlock(&dht.db.lock); + +        e = __dht_kv_find_entry(key); +        if (e == NULL) { +                log_dbg(KV_FMT " Adding entry (store).", KV_VAL(key, val)); +                e = dht_entry_create(key); +                if (e == NULL) +                        goto fail; + +                new = true; + +                __add_dht_kv_entry(e); +        } + +        if (dht_entry_update_val(e, val, exp) < 0) +                goto fail_add; + +        pthread_rwlock_unlock(&dht.db.lock); + +        return 0; + fail_add: +        if (new) { +                list_del(&e->next); +                dht_entry_destroy(e); +                --dht.db.kv.len; +        } + fail: +        pthread_rwlock_unlock(&dht.db.lock); +        return -1; +} + +static int dht_kv_publish(const uint8_t * key, +                          const buffer_t  val) +{ +        struct dht_entry * e; +        struct timespec    now; +        bool               new = false; + +        assert(key != NULL); +        assert(val.data != NULL); +        assert(val.len > 0); + +        clock_gettime(CLOCK_REALTIME_COARSE, &now); + +        pthread_rwlock_wrlock(&dht.db.lock); + +        e = __dht_kv_find_entry(key); +        if (e == NULL) { +                log_dbg(KV_FMT " Adding entry (publish).", KV_VAL(key, val)); +                e = dht_entry_create(key); +                if (e == NULL) +                        goto fail; + +                __add_dht_kv_entry(e); +                new = true; +        } + +        if (dht_entry_update_lval(e, val) < 0) +                goto fail_add; + +        pthread_rwlock_unlock(&dht.db.lock); + +        dht_kv_store_remote(key, val, now.tv_sec + dht.t_expire); + +        return 0; + fail_add: +        if (new) { +                list_del(&e->next); +                dht_entry_destroy(e); +                --dht.db.kv.len; +        } + fail: +        pthread_rwlock_unlock(&dht.db.lock); +        return -1; +} + +static int dht_kv_unpublish(const uint8_t * key, +                            const buffer_t  val) +{ +        struct dht_entry * e; +        int                rc; + +        assert(key != NULL); + +        pthread_rwlock_wrlock(&dht.db.lock); + +        e = __dht_kv_find_entry(key); +        if (e == NULL) +                goto no_entry; + +        rc = dht_entry_remove_lval(e, val); + +        pthread_rwlock_unlock(&dht.db.lock); + +        return rc; + no_entry: +        pthread_rwlock_unlock(&dht.db.lock); +        return -ENOENT; + +} + +/* message validation */ +static int dht_kv_validate_store_msg(const dht_store_msg_t * store) +{ +        if (store == NULL) { +                log_warn("Store in msg is NULL."); +                return -EINVAL; +        } + +        if (store->key.data == NULL || store->key.len == 0) { +                log_warn("Invalid key in DHT store msg."); +                return -EINVAL; +        } + +        if (store->key.len != dht.id.len) { +                log_warn("Invalid key length in DHT store msg."); +                return -EINVAL; +        } + +        if (store->val.data == NULL || store->val.len == 0) { +                log_warn("Invalid value in DHT store msg."); +                return -EINVAL; +        } + +        return 0; +} + +static int validate_find_req_msg(const dht_find_req_msg_t * req) +{ +        if (req == NULL) { +                log_warn("Request in msg is NULL."); +                return -EINVAL; +        } + +        if (req->key.data == NULL || req->key.len == 0) { +                log_warn("Find request without key."); +                return -EINVAL; +        } + +        if (req->key.len != dht.id.len) { +                log_warn("Invalid key length in request msg."); +                return -EINVAL; +        } + +        return 0; +} + +static int validate_node_rsp_msg(const dht_find_node_rsp_msg_t * rsp) +{ +        if (rsp == NULL) { +                log_warn("Node rsp in msg is NULL."); +                return -EINVAL; +        } + +        if (rsp->key.data == NULL) { +                log_warn("Invalid key in DHT response msg."); +                return -EINVAL; +        } + +        if (rsp->key.len != dht.id.len) { +                log_warn("Invalid key length in DHT response msg."); +                return -EINVAL; +        } + +        if (!dht_kv_has_req(rsp->key.data, rsp->cookie)) { +                log_warn(KEY_FMT " No request " CK_FMT  ".", +                         KEY_VAL(rsp->key.data), CK_VAL(rsp->cookie)); + +                return -EINVAL; +        } + +        return 0; +} + +static int validate_value_rsp_msg(const dht_find_value_rsp_msg_t * rsp) +{ +        if (rsp == NULL) { +                log_warn("Invalid DHT find value response msg."); +                return -EINVAL; +        } + +        if (rsp->values == NULL && rsp->n_values > 0) { +                log_dbg("No values in DHT response msg."); +                return 0; +        } + +        if (rsp->n_values == 0 && rsp->values != NULL) { +                log_dbg("DHT response did not set values NULL."); +                return 0; +        } + +        return 0; +} + +static int dht_kv_validate_msg(dht_msg_t * msg) +{ + +        assert(msg != NULL); + +        if (msg->src->id.len != dht.id.len) { +                log_warn("%s Invalid source contact ID.", DHT_CODE(msg)); +                return -EINVAL; +        } + +        if (msg->src->addr == INVALID_ADDR) { +                log_warn("%s Invalid source address.", DHT_CODE(msg)); +                return -EINVAL; +        } + +        switch (msg->code) { +        case DHT_FIND_VALUE_REQ: +                /* FALLTHRU */ +        case DHT_FIND_NODE_REQ: +                if (validate_find_req_msg(msg->find) < 0) +                        return -EINVAL; +                break; +        case DHT_FIND_VALUE_RSP: +                if (validate_value_rsp_msg(msg->val) < 0) +                        return -EINVAL; +                /* FALLTHRU */ +        case DHT_FIND_NODE_RSP: +                if (validate_node_rsp_msg(msg->node) < 0) +                        return -EINVAL; +                break; +        case DHT_STORE: +                if (dht_kv_validate_store_msg(msg->store) < 0) +                        return -EINVAL; +                break; +        default: +                log_warn("Invalid DHT msg code (%d).", msg->code); +                return -ENOENT; +        } + +        return 0; +} + +static void do_dht_kv_store(const dht_store_msg_t * store) +{ +        struct tm * tm; +        char        tmstr[RIB_TM_STRLEN]; +        buffer_t    val; +        uint8_t *   key; +        time_t      exp; + +        assert(store != NULL); + +        val.data = store->val.data; +        val.len  = store->val.len; +        key      = store->key.data; +        exp      = store->exp; + +        if (dht_kv_store(store->key.data, val, store->exp) < 0) { +                log_err(KV_FMT " Failed to store.", KV_VAL(key, val)); +                return; +        } + +        tm = gmtime(&exp); +        strftime(tmstr, sizeof(tmstr), RIB_TM_FORMAT, tm); +        log_dbg(KV_FMT " Stored value until %s.", KV_VAL(key, val), tmstr); +} + +static dht_msg_t * do_dht_kv_find_node_req(const dht_find_req_msg_t * req) +{ +        dht_contact_msg_t ** contacts; +        dht_msg_t *          rsp; +        uint8_t *            key; +        uint64_t             cookie; +        ssize_t              len; + +        assert(req  != NULL); + +        key    = req->key.data; +        cookie = req->cookie; + +        len = dht_kv_get_contacts(key, &contacts); +        if (len < 0) { +                log_warn(KEY_FMT " Failed to get contacts.", KEY_VAL(key)); +                goto fail_contacts; +        } + +        rsp = dht_kv_find_node_rsp_msg(key, cookie, &contacts, len); +        if (rsp == NULL) { +                log_err(KEY_FMT " Failed to create %s.", KEY_VAL(key), +                        dht_code_str[DHT_FIND_NODE_RSP]); +                goto fail_msg; +        } + +        assert(rsp->code == DHT_FIND_NODE_RSP); + +        log_info(KEY_FMT " Responding with %zd contacts", KEY_VAL(key), len); + +        return rsp; + fail_msg: +        while (len-- > 0) +                dht_contact_msg__free_unpacked(contacts[len], NULL); +        free(contacts); + fail_contacts: +        return NULL; +} + +static void dht_kv_process_node_rsp(dht_contact_msg_t ** contacts, +                                    size_t               len, +                                    struct list_head *   pl, +                                    enum dht_code        code) +{ +        struct timespec now; +        size_t          i; + +        assert(contacts != NULL); +        assert(len > 0); +        assert(pl != NULL); +        assert(list_is_empty(pl)); + +        clock_gettime(CLOCK_REALTIME_COARSE, &now); + +        for (i = 0; i < len; i++) { +                dht_contact_msg_t * c = contacts[i]; +                struct peer_entry * e; +                if (c->addr == dht.addr) +                        continue; + +                if (dht_kv_update_contacts(c->id.data, c->addr) < 0) +                        log_warn(PEER_FMT " Failed to update contacts.", +                                 PEER_VAL(c->id.data, c->addr)); + +                e = malloc(sizeof(*e)); +                if (e == NULL) { +                        log_err(PEER_FMT " Failed to malloc entry.", +                                PEER_VAL(c->id.data, c->addr)); +                        continue; +                } + +                e->id = dht_dup_key(c->id.data); +                if (e->id == NULL) { +                        log_warn(PEER_FMT " Failed to duplicate id.", +                                 PEER_VAL(c->id.data, c->addr)); +                        free(e); +                        continue; +                } + +                e->cookie = generate_cookie(); +                e->code   = code; +                e->addr   = c->addr; +                e->t_sent = now.tv_sec; + +                list_add_tail(&e->next, pl); +        } +} + +static dht_msg_t * do_dht_kv_find_value_req(const dht_find_req_msg_t * req) +{ +        dht_contact_msg_t ** contacts; +        ssize_t              n_contacts; +        buffer_t *           vals; +        ssize_t              n_vals; +        dht_msg_t *          rsp; +        uint8_t *            key; +        uint64_t             cookie; + +        assert(req != NULL); + +        key    = req->key.data; +        cookie = req->cookie; + +        n_contacts = dht_kv_get_contacts(key, &contacts); +        if (n_contacts < 0) { +                log_warn(KEY_FMT " Failed to get contacts.", KEY_VAL(key)); +                goto fail_contacts; +        } + +        assert(n_contacts > 0 || contacts == NULL); + +        n_vals = dht_kv_retrieve(key, &vals); +        if (n_vals < 0) { +                log_dbg(KEY_FMT " Failed to get values.", KEY_VAL(key)); +                goto fail_vals; +        } + +        if (n_vals == 0) +                log_dbg(KEY_FMT " No values found.", KEY_VAL(key)); + +        rsp = dht_kv_find_value_rsp_msg(key, cookie, &contacts, n_contacts, +                                        &vals, n_vals); +        if (rsp == NULL) { +                log_err(KEY_FMT " Failed to create %s msg.", +                        KEY_VAL(key), dht_code_str[DHT_FIND_VALUE_RSP]); +                goto fail_msg; +        } + +        log_info(KEY_FMT " Responding with %zd contacts, %zd values.", +                 KEY_VAL(req->key.data), n_contacts, n_vals); + +        return rsp; + + fail_msg: +        freebufs(vals, n_vals); + fail_vals: +        while (n_contacts-- > 0) +                dht_contact_msg__free_unpacked(contacts[n_contacts], NULL); +        free(contacts); + fail_contacts: +        return NULL; +} + +static void do_dht_kv_find_node_rsp(const dht_find_node_rsp_msg_t * rsp) +{ +        struct list_head pl; + +        assert(rsp != NULL); + +        list_head_init(&pl); + +        dht_kv_process_node_rsp(rsp->contacts, rsp->n_contacts, &pl, +                                DHT_FIND_NODE_REQ); + +        if (list_is_empty(&pl)) +                goto no_contacts; + +        if (dht_kv_update_req(rsp->key.data, &pl) < 0) { +                log_err(KEY_FMT " Failed to update request.", +                        KEY_VAL(rsp->key.data)); +                goto fail_update; +        } + +        dht_kv_query_contacts(rsp->key.data, &pl); + +        return; + + fail_update: +        peer_list_destroy(&pl); + no_contacts: +        return; +} + +static void do_dht_kv_find_value_rsp(const dht_find_node_rsp_msg_t  * node, +                                     const dht_find_value_rsp_msg_t * val) +{ +        struct list_head pl; +        uint8_t *        key; + +        assert(node != NULL); +        assert(val != NULL); + +        list_head_init(&pl); + +        key = node->key.data; + +        dht_kv_process_node_rsp(node->contacts, node->n_contacts, &pl, +                                DHT_FIND_VALUE_REQ); + +        if (val->n_values > 0) { +                log_dbg(KEY_FMT " %zd new values received.", +                        KEY_VAL(key), val->n_values); +                if (dht_kv_respond_req(key, val->values, val->n_values) < 0) +                        log_warn(KEY_FMT " Failed to respond to request.", +                                 KEY_VAL(key)); +                peer_list_destroy(&pl); +                return; /* done! */ +        } + +        if (list_is_empty(&pl)) +                goto no_contacts; + +        if (dht_kv_update_req(key, &pl) < 0) { +                log_err(KEY_FMT " Failed to update request.", KEY_VAL(key)); +                goto fail_update; +        } + +        dht_kv_query_remote(key, NULL, &pl); + +        return; + fail_update: +        peer_list_destroy(&pl); + no_contacts: +        return; +} + +static dht_msg_t * dht_wait_for_dht_msg(void) +{ +        dht_msg_t *  msg; +        struct cmd * cmd; + +        pthread_mutex_lock(&dht.cmds.mtx); + +        pthread_cleanup_push(__cleanup_mutex_unlock, &dht.cmds.mtx); + +        while (list_is_empty(&dht.cmds.list)) +                pthread_cond_wait(&dht.cmds.cond, &dht.cmds.mtx); + +        cmd = list_last_entry(&dht.cmds.list, struct cmd, next); +        list_del(&cmd->next); + +        pthread_cleanup_pop(true); + +        msg = dht_msg__unpack(NULL, cmd->cbuf.len, cmd->cbuf.data); +        if (msg == NULL) +                log_warn("Failed to unpack DHT msg."); + +        freebuf(cmd->cbuf); +        free(cmd); + +        return msg; +} + +static void do_dht_msg(dht_msg_t * msg) +{ +        dht_msg_t * rsp = NULL; +        uint8_t *   id; +        uint64_t    addr; + +#ifdef DEBUG_PROTO_DHT +        dht_kv_debug_msg_rcv(msg); +#endif +        if (dht_kv_validate_msg(msg) == -EINVAL) { +                log_warn("%s Validation failed.", DHT_CODE(msg)); +                dht_msg__free_unpacked(msg, NULL); +                return; +        } + +        id =   msg->src->id.data; +        addr = msg->src->addr; + +        if (dht_kv_update_contacts(id, addr) < 0) +                log_warn(PEER_FMT " Failed to update contact from msg src.", +                         PEER_VAL(id, addr)); + +        pthread_cleanup_push(__cleanup_dht_msg, msg); + +        switch(msg->code) { +        case DHT_FIND_VALUE_REQ: +                rsp = do_dht_kv_find_value_req(msg->find); +                break; +        case DHT_FIND_NODE_REQ: +                rsp = do_dht_kv_find_node_req(msg->find); +                break; +        case DHT_STORE: +                do_dht_kv_store(msg->store); +                break; +        case DHT_FIND_NODE_RSP: +                do_dht_kv_find_node_rsp(msg->node); +                break; +        case DHT_FIND_VALUE_RSP: +                do_dht_kv_find_value_rsp(msg->node, msg->val); +                break; +        default: +                assert(false); /* already validated */ +        } + +        pthread_cleanup_pop(true); + +        if (rsp == NULL) +                return; + +        pthread_cleanup_push(__cleanup_dht_msg, rsp); + +        dht_send_msg(rsp, addr); + +        pthread_cleanup_pop(true); /* free rsp */ +} + +static void * dht_handle_packet(void * o) +{ +        (void) o; + +        while (true) { +                dht_msg_t * msg; + +                msg = dht_wait_for_dht_msg(); +                if (msg == NULL) +                        continue; + +                tpm_begin_work(dht.tpm); + +                do_dht_msg(msg); + +                tpm_end_work(dht.tpm); +        } + +        return (void *) 0; +} +#ifndef __DHT_TEST__ +static void dht_post_packet(void *               comp, +                            struct shm_du_buff * sdb) +{ +        struct cmd * cmd; + +        (void) comp; + +        cmd = malloc(sizeof(*cmd)); +        if (cmd == NULL) { +                log_err("Command malloc failed."); +                goto fail_cmd; +        } + +        cmd->cbuf.data = malloc(shm_du_buff_len(sdb)); +        if (cmd->cbuf.data == NULL) { +                log_err("Command buffer malloc failed."); +                goto fail_buf; +        } + +        cmd->cbuf.len = shm_du_buff_len(sdb); + +        memcpy(cmd->cbuf.data, shm_du_buff_head(sdb), cmd->cbuf.len); + +        ipcp_sdb_release(sdb); + +        pthread_mutex_lock(&dht.cmds.mtx); + +        list_add(&cmd->next, &dht.cmds.list); + +        pthread_cond_signal(&dht.cmds.cond); + +        pthread_mutex_unlock(&dht.cmds.mtx); + +        return; + + fail_buf: +        free(cmd); + fail_cmd: +        ipcp_sdb_release(sdb); +        return; +} +#endif + +int dht_reg(const uint8_t * key) +{ +        buffer_t val; + +        if (addr_to_buf(dht.addr, &val) < 0) { +                log_err("Failed to convert address to buffer."); +                goto fail_a2b; +        } + +        if (dht_kv_publish(key, val)) { +                log_err(KV_FMT " Failed to publish.", KV_VAL(key, val)); +                goto fail_publish; +        } + +        freebuf(val); + +        return 0; + fail_publish: +        freebuf(val); + fail_a2b: +        return -1; +} + +int dht_unreg(const uint8_t * key) +{ +        buffer_t val; + +        if (addr_to_buf(dht.addr, &val) < 0) { +                log_err("Failed to convert address to buffer."); +                goto fail_a2b; +        } + +        if (dht_kv_unpublish(key, val)) { +                log_err(KV_FMT " Failed to unpublish.", KV_VAL(key, val)); +                goto fail_unpublish; +        } + +        freebuf(val); + +        return 0; + fail_unpublish: +        freebuf(val); + fail_a2b: +        return -ENOMEM; +} + +uint64_t dht_query(const uint8_t * key) +{ +        buffer_t *       vals; +        ssize_t          n; +        uint64_t         addr; + +        n = dht_kv_retrieve(key, &vals); +        if (n < 0) { +                log_err(KEY_FMT " Failed to query db.", KEY_VAL(key)); +                goto fail_vals; +        } + +        if (n == 0) { +                assert(vals == NULL); + +                log_dbg(KEY_FMT " No local values.", KEY_VAL(key)); +                n = dht_kv_query_remote(key, &vals, NULL); +                if (n < 0) { +                        log_warn(KEY_FMT " Failed to query DHT.", KEY_VAL(key)); +                        goto fail_vals; +                } +                if (n == 0) { +                        log_dbg(KEY_FMT " No values.", KEY_VAL(key)); +                        goto no_vals; +                } +        } + +        if (buf_to_addr(vals[0], &addr) < 0) { +                log_err(VAL_FMT " Failed addr conversion.", VAL_VAL(vals[0])); +                goto fail_b2a; +        } + +        if (n > 1 && addr == INVALID_ADDR && buf_to_addr(vals[1], &addr) < 0) { +                log_err(VAL_FMT " Failed addr conversion.", VAL_VAL(vals[1])); +                goto fail_b2a; +        } + +        freebufs(vals, n); + +        return addr; + fail_b2a: +        freebufs(vals, n); +        return INVALID_ADDR; + no_vals: +        free(vals); + fail_vals: +        return INVALID_ADDR; +} + +static int emergency_peer(struct list_head * pl) +{ +        struct peer_entry * e; +        struct timespec     now; + +        assert(pl != NULL); +        assert(list_is_empty(pl)); + +        if (dht.peer == INVALID_ADDR) +                return -1; + +        clock_gettime(CLOCK_REALTIME_COARSE, &now); + +        e = malloc(sizeof(*e)); +        if (e == NULL) { +                log_err("Failed to malloc emergency peer entry."); +                goto fail_malloc; +        } + +        e->id = dht_dup_key(dht.id.data); +        if (e->id == NULL) { +                log_err("Failed to duplicate DHT ID for emergency peer."); +                goto fail_id; +        } + +        e->addr   = dht.peer; +        e->cookie = dht.magic; +        e->code   = DHT_FIND_NODE_REQ; +        e->t_sent = now.tv_sec; + +        list_add_tail(&e->next, pl); + +        return 0; + fail_id: +        free(e); + fail_malloc: +        return -ENOMEM; +} + +static int dht_kv_seed_bootstrap_peer(void) +{ +        struct list_head pl; + +        list_head_init(&pl); + +        if (dht.peer == INVALID_ADDR) { +                log_dbg("No-one to contact."); +                return 0; +        } + +        if (emergency_peer(&pl) < 0) { +                log_err("Could not create emergency peer."); +                goto fail_peer; +        } + +        log_dbg("Pinging emergency peer " ADDR_FMT32 ".", +                ADDR_VAL32(&dht.peer)); + +        if (dht_kv_query_contacts(dht.id.data, &pl) < 0) { +                log_warn("Failed to bootstrap peer."); +                goto fail_query; +        } + +        peer_list_destroy(&pl); + +        return 0; + fail_query: +        peer_list_destroy(&pl); + fail_peer: +        return -EAGAIN; +} + +static void dht_kv_check_contacts(void) +{ +        struct list_head cl; +        struct list_head pl; + +        list_head_init(&cl); + +        dht_kv_contact_list(dht.id.data, &cl, dht.k); + +        if (!list_is_empty(&cl)) +                goto success; + +        contact_list_destroy(&cl); + +        list_head_init(&pl); + +        if (dht.peer == INVALID_ADDR) { +                log_dbg("No-one to contact."); +                return; +        } + +        if (emergency_peer(&pl) < 0) { +                log_err("Could not create emergency peer."); +                goto fail_peer; +        } + +        log_dbg("No contacts found, using emergency peer " ADDR_FMT32 ".", +                ADDR_VAL32(&dht.peer)); + +        dht_kv_query_contacts(dht.id.data, &pl); + +        peer_list_destroy(&pl); + +        return; + success: +        contact_list_destroy(&cl); +        return; + fail_peer: +        return; +} + +static void dht_kv_remove_expired_reqs(void) +{ +        struct list_head * p; +        struct list_head * h; +        struct timespec    now; + +        clock_gettime(PTHREAD_COND_CLOCK, &now); + +        pthread_mutex_lock(&dht.reqs.mtx); + +        list_for_each_safe(p, h, &dht.reqs.list) { +                struct dht_req * e; +                e = list_entry(p, struct dht_req, next); +                if (IS_EXPIRED(e, &now)) { +                        log_dbg(KEY_FMT " Removing expired request.", +                                KEY_VAL(e->key)); +                        list_del(&e->next); +                        dht_req_destroy(e); +                        --dht.reqs.len; +                } +        } + +        pthread_mutex_unlock(&dht.reqs.mtx); +} + +static void value_list_destroy(struct list_head * vl) +{ +        struct list_head * p; +        struct list_head * h; + +        assert(vl != NULL); + +        list_for_each_safe(p, h, vl) { +                struct val_entry * v = list_entry(p, struct val_entry, next); +                list_del(&v->next); +                val_entry_destroy(v); +        } +} + +#define MUST_REPLICATE(v, now) ((now)->tv_sec > (v)->t_repl + dht.t_repl) +#define MUST_REPUBLISH(v, now) /* Close to expiry deadline */ \ +        (((v)->t_exp - (now)->tv_sec) < (DHT_N_REPUB * dht.t_repl)) +static void dht_entry_get_repl_lists(const struct dht_entry * e, +                                     struct list_head *       repl, +                                     struct list_head *       rebl, +                                     struct timespec *        now) +{ +        struct list_head * p; +        struct val_entry * n; + +        list_for_each(p, &e->vals.list) { +                struct val_entry * v = list_entry(p, struct val_entry, next); +                if (MUST_REPLICATE(v, now) && !IS_EXPIRED(v, now)) { +                        n = val_entry_create(v->val, v->t_exp); +                        if (n == NULL) +                                continue; + +                        list_add_tail(&n->next, repl); +                } +        } + +        list_for_each(p, &e->lvals.list) { +                struct val_entry * v = list_entry(p, struct val_entry, next); +                if (MUST_REPLICATE(v, now) && MUST_REPUBLISH(v, now)) { +                        /* Add expire time here, to allow creating val_entry */ +                        n = val_entry_create(v->val, now->tv_sec + dht.t_expire); +                        if (n == NULL) +                                continue; + +                        list_add_tail(&n->next, rebl); +                } +        } +} + +static int dht_kv_next_values(uint8_t *          key, +                              struct list_head * repl, +                              struct list_head * rebl) +{ +        struct timespec    now; +        struct list_head * p; +        struct list_head * h; +        struct dht_entry * e = NULL; + +        assert(key != NULL); +        assert(repl != NULL); +        assert(rebl != NULL); + +        clock_gettime(CLOCK_REALTIME_COARSE, &now); + +        assert(list_is_empty(repl)); +        assert(list_is_empty(rebl)); + +        pthread_rwlock_rdlock(&dht.db.lock); + +        if (dht.db.kv.len == 0) +                goto no_entries; + +        list_for_each_safe(p, h, &dht.db.kv.list) { +                e = list_entry(p, struct dht_entry, next); +                if (IS_CLOSER(e->key, key)) +                        continue;  /* Already processed */ +        } + +        if (e != NULL) { +                memcpy(key, e->key, dht.id.len); +                dht_entry_get_repl_lists(e, repl, rebl, &now); +        } + no_entries: +        pthread_rwlock_unlock(&dht.db.lock); + +        return list_is_empty(repl) && list_is_empty(rebl) ? -ENOENT : 0; +} + +static void dht_kv_replicate_value(const uint8_t *         key, +                                   struct val_entry *      v, +                                   const struct timespec * now) +{ +        assert(MUST_REPLICATE(v, now)); + +        (void) now; + +        if (dht_kv_store_remote(key, v->val, v->t_exp) == 0) { +                log_dbg(KV_FMT " Replicated.", KV_VAL(key, v->val)); +                return; +        } + +        log_dbg(KV_FMT " Replication failed.", KV_VAL(key, v->val)); + +        list_del(&v->next); +        val_entry_destroy(v); +} + +static void dht_kv_republish_value(const uint8_t *  key, +                            struct val_entry *      v, +                            const struct timespec * now) +{ +        assert(MUST_REPLICATE(v, now)); + +        if (MUST_REPUBLISH(v, now)) +                assert(v->t_exp >= now->tv_sec + dht.t_expire); + +        if (dht_kv_store_remote(key, v->val, v->t_exp) == 0) { +                log_dbg(KV_FMT " Republished.", KV_VAL(key, v->val)); +                return; +        } + +        if (MUST_REPUBLISH(v, now)) +                log_warn(KV_FMT " Republish failed.", KV_VAL(key, v->val)); +        else +                log_dbg(KV_FMT " Replication failed.", KV_VAL(key, v->val)); + +        list_del(&v->next); +        val_entry_destroy(v); +} + +static void dht_kv_update_replication_times(const uint8_t *         key, +                                            struct list_head *      repl, +                                            struct list_head *      rebl, +                                            const struct timespec * now) +{ +        struct dht_entry * e; +        struct list_head * p; +        struct list_head * h; +        struct val_entry * v; + +        assert(key != NULL); +        assert(repl != NULL); +        assert(rebl != NULL); +        assert(now != NULL); + +        pthread_rwlock_wrlock(&dht.db.lock); + +        e = __dht_kv_find_entry(key); +        if (e == NULL) { +                pthread_rwlock_unlock(&dht.db.lock); +                return; +        } + +        list_for_each_safe(p, h, repl) { +                struct val_entry * x; +                v = list_entry(p, struct val_entry, next); +                x = dht_entry_get_val(e, v->val); +                if (x == NULL) { +                        log_err(KV_FMT " Not in vals.", KV_VAL(key, v->val)); +                        continue; +                } + +                x->t_repl = now->tv_sec; + +                list_del(&v->next); +                val_entry_destroy(v); +        } + +        list_for_each_safe(p, h, rebl) { +                struct val_entry * x; +                v = list_entry(p, struct val_entry, next); +                x = dht_entry_get_lval(e, v->val); +                if (x == NULL) { +                        log_err(KV_FMT " Not in lvals.", KV_VAL(key, v->val)); +                        continue; +                } + +                x->t_repl = now->tv_sec; +                if (v->t_exp > x->t_exp) { +                        x->t_exp = v->t_exp; /* update expiration time */ +                } + +                list_del(&v->next); +                val_entry_destroy(v); +        } + +        pthread_rwlock_unlock(&dht.db.lock); +} + +static void __cleanup_value_list(void * o) +{ +        return value_list_destroy((struct list_head *) o); +} + +static void dht_kv_replicate_values(const uint8_t *    key, +                                    struct list_head * repl, +                                    struct list_head * rebl) +{ +        struct timespec    now; +        struct list_head * p; +        struct list_head * h; + +        clock_gettime(CLOCK_REALTIME_COARSE, &now); + +        pthread_cleanup_push(__cleanup_value_list, repl); +        pthread_cleanup_push(__cleanup_value_list, rebl); + +        list_for_each_safe(p, h, repl) { +                struct val_entry * v; +                v = list_entry(p, struct val_entry, next); +                dht_kv_replicate_value(key, v, &now); +        } + +        list_for_each_safe(p, h, rebl) { +                struct val_entry * v; +                v = list_entry(p, struct val_entry, next); +                dht_kv_republish_value(key, v, &now); +        } + +        pthread_cleanup_pop(false); +        pthread_cleanup_pop(false); + +        /* removes non-replicated items from the list */ +        dht_kv_update_replication_times(key, repl, rebl, &now); + +        if (list_is_empty(repl) && list_is_empty(rebl)) +                return; + +        log_warn(KEY_FMT " Failed to update replication times.", KEY_VAL(key)); +} + +static void dht_kv_replicate(void) +{ +        struct list_head repl; /* list of values to replicate       */ +        struct list_head rebl; /* list of local values to republish */ +        uint8_t *        key; + +        key = dht_dup_key(dht.id.data); /* dist == 0 */ +        if (key == NULL) { +                log_err("Replicate: Failed to duplicate DHT ID."); +                return; +        } + +        list_head_init(&repl); +        list_head_init(&rebl); + +        pthread_cleanup_push(free, key); + +        while (dht_kv_next_values(key, &repl, &rebl) == 0) { +                dht_kv_replicate_values(key, &repl, &rebl); +                if (!list_is_empty(&repl)) { +                        log_warn(KEY_FMT " Replication items left.", +                                 KEY_VAL(key)); +                        value_list_destroy(&repl); +                } + +                if (!list_is_empty(&rebl)) { +                        log_warn(KEY_FMT " Republish items left.", +                                 KEY_VAL(key)); +                        value_list_destroy(&rebl); +                } +        } + +        pthread_cleanup_pop(true); +} + +static void dht_kv_refresh_contacts(void) +{ +        struct list_head * p; +        struct list_head * h; +        struct list_head   rl; /* refresh list */ +        struct timespec    now; + +        list_head_init(&rl); + +        clock_gettime(CLOCK_REALTIME_COARSE, &now); + +        pthread_rwlock_rdlock(&dht.db.lock); + +        __dht_kv_bucket_refresh_list(dht.db.contacts.root, now.tv_sec, &rl); + +        pthread_rwlock_unlock(&dht.db.lock); + +        list_for_each_safe(p, h, &rl) { +                struct contact * c; +                c = list_entry(p, struct contact, next); +                log_dbg(PEER_FMT " Refreshing contact.", +                        PEER_VAL(c->id, c->addr)); +                dht_kv_query_contacts(c->id, NULL); +                list_del(&c->next); +                contact_destroy(c); +        } + +        assert(list_is_empty(&rl)); +} + +static void (*tasks[])(void) = { +        dht_kv_check_contacts, +        dht_kv_remove_expired_entries, +        dht_kv_remove_expired_reqs, +        dht_kv_replicate, +        dht_kv_refresh_contacts, +        NULL +}; + +static void * work(void * o) +{ +        struct timespec now = TIMESPEC_INIT_MS(1); +        time_t          intv; +        size_t          n; /* number of tasks */ + +        n = sizeof(tasks) / sizeof(tasks[0]) - 1; /* last is NULL */ + +        (void) o; + +        while (dht_kv_seed_bootstrap_peer() == -EAGAIN) { +                ts_add(&now, &now, &now); /* exponential backoff */ +                if (now.tv_sec > 1)       /* cap at 1 second     */ +                        now.tv_sec = 1; +                nanosleep(&now, NULL); +        } + +        intv = gcd(dht.t_expire, (dht.t_expire - DHT_N_REPUB * dht.t_repl)); +        intv = gcd(intv, gcd(dht.t_repl, dht.t_refresh)) / 2; +        intv = MAX(1, intv / n); + +        log_dbg("DHT worker starting %ld seconds interval.", intv * n); + +        while (true) { +                int i = 0; +                while (tasks[i] != NULL) { +                        tasks[i++](); +                        sleep(intv); +                } +        } + +        return (void *) 0; +} + +int dht_start(void) +{ +        dht.state = DHT_RUNNING; + +        if (tpm_start(dht.tpm)) +                goto fail_tpm_start; + +#ifndef __DHT_TEST__ +        if (pthread_create(&dht.worker, NULL, work, NULL)) { +                log_err("Failed to create DHT worker thread."); +                goto fail_worker; +        } + +        dht.eid = dt_reg_comp(&dht, &dht_post_packet, DHT); +        if ((int) dht.eid < 0) { +                log_err("Failed to register DHT component."); +                goto fail_reg; +        } +#else +        (void) work; +#endif +        return 0; +#ifndef __DHT_TEST__ + fail_reg: +        pthread_cancel(dht.worker); +        pthread_join(dht.worker, NULL); + fail_worker: +        tpm_stop(dht.tpm); +#endif + fail_tpm_start: +        dht.state = DHT_INIT; +        return -1; +} + +void dht_stop(void) +{ +        assert(dht.state == DHT_RUNNING); + +#ifndef __DHT_TEST__ +        dt_unreg_comp(dht.eid); + +        pthread_cancel(dht.worker); +        pthread_join(dht.worker, NULL); +#endif +        tpm_stop(dht.tpm); + +        dht.state = DHT_INIT; +} + +int dht_init(struct dir_dht_config * conf) +{ +        struct timespec now; +        pthread_condattr_t cattr; + +        assert(conf != NULL); + +        clock_gettime(CLOCK_REALTIME_COARSE, &now); + +#ifndef __DHT_TEST__ +        dht.id.len    = ipcp_dir_hash_len(); +        dht.addr      = addr_auth_address(); +#else +        dht.id.len    = DHT_TEST_KEY_LEN; +        dht.addr      = DHT_TEST_ADDR; +#endif +        dht.t0        = now.tv_sec; +        dht.alpha     = conf->params.alpha; +        dht.k         = conf->params.k; +        dht.t_expire  = conf->params.t_expire; +        dht.t_refresh = conf->params.t_refresh; +        dht.t_repl    = conf->params.t_replicate; +        dht.peer      = conf->peer; + +        dht.magic = generate_cookie(); + +        /* Send my address on enrollment */ +        conf->peer    = dht.addr; + +        dht.id.data = generate_id(); +        if (dht.id.data == NULL) { +                log_err("Failed to create DHT ID."); +                goto fail_id; +        } + +        list_head_init(&dht.cmds.list); + +        if (pthread_mutex_init(&dht.cmds.mtx, NULL)) { +                log_err("Failed to initialize command mutex."); +                goto fail_cmds_mutex; +        } + +        if (pthread_cond_init(&dht.cmds.cond, NULL)) { +                log_err("Failed to initialize command condvar."); +                goto fail_cmds_cond; +        } + +        list_head_init(&dht.reqs.list); +        dht.reqs.len = 0; + +        if (pthread_mutex_init(&dht.reqs.mtx, NULL)) { +                log_err("Failed to initialize request mutex."); +                goto fail_reqs_mutex; +        } + +        if (pthread_condattr_init(&cattr)) { +                log_err("Failed to initialize request condvar attributes."); +                goto fail_cattr; +        } +#ifndef __APPLE__ +        if (pthread_condattr_setclock(&cattr, PTHREAD_COND_CLOCK)) { +                log_err("Failed to set request condvar clock."); +                goto fail_cattr; +        } +#endif +        if (pthread_cond_init(&dht.reqs.cond, &cattr)) { +                log_err("Failed to initialize request condvar."); +                goto fail_reqs_cond; +        } + +        list_head_init(&dht.db.kv.list); +        dht.db.kv.len   = 0; +        dht.db.kv.vals  = 0; +        dht.db.kv.lvals = 0; + +        if (pthread_rwlock_init(&dht.db.lock, NULL)) { +                log_err("Failed to initialize store rwlock."); +                goto fail_rwlock; +        } + +        dht.db.contacts.root = bucket_create(); +        if (dht.db.contacts.root == NULL) { +                log_err("Failed to create DHT buckets."); +                goto fail_buckets; +        } + +        if (rib_reg(DHT, &r_ops) < 0) { +                log_err("Failed to register DHT RIB operations."); +                goto fail_rib_reg; +        } + +        dht.tpm = tpm_create(2, 1, dht_handle_packet, NULL); +        if (dht.tpm == NULL) { +                log_err("Failed to create TPM for DHT."); +                goto fail_tpm_create; +        } + +        if (dht_kv_update_contacts(dht.id.data, dht.addr) < 0) +                log_warn("Failed to update contacts with DHT ID."); + +        pthread_condattr_destroy(&cattr); +#ifndef __DHT_TEST__ +        log_info("DHT initialized."); +        log_dbg("  ID: " HASH_FMT64 " [%zu bytes].", +                HASH_VAL64(dht.id.data), dht.id.len); +        log_dbg("  address: " ADDR_FMT32 ".", ADDR_VAL32(&dht.addr)); +        log_dbg("  peer: " ADDR_FMT32 ".", ADDR_VAL32(&dht.peer)); +        log_dbg("  magic cookie: " HASH_FMT64 ".", HASH_VAL64(&dht.magic)); +        log_info("  parameters: alpha=%u, k=%zu, t_expire=%ld, " +                "t_refresh=%ld, t_replicate=%ld.", +                dht.alpha, dht.k, dht.t_expire, dht.t_refresh, dht.t_repl); +#endif +        dht.state = DHT_INIT; + +        return 0; + + fail_tpm_create: +        rib_unreg(DHT); + fail_rib_reg: +        bucket_destroy(dht.db.contacts.root); + fail_buckets: +        pthread_rwlock_destroy(&dht.db.lock); + fail_rwlock: +        pthread_cond_destroy(&dht.reqs.cond); + fail_reqs_cond: +        pthread_condattr_destroy(&cattr); + fail_cattr: +        pthread_mutex_destroy(&dht.reqs.mtx); + fail_reqs_mutex: +        pthread_cond_destroy(&dht.cmds.cond); + fail_cmds_cond: +        pthread_mutex_destroy(&dht.cmds.mtx); + fail_cmds_mutex: +        freebuf(dht.id); + fail_id: +        return -1; +} + +void dht_fini(void) +{ +        struct list_head * p; +        struct list_head * h; + +        rib_unreg(DHT); + +        tpm_destroy(dht.tpm); + +        pthread_mutex_lock(&dht.cmds.mtx); + +        list_for_each_safe(p, h, &dht.cmds.list) { +                struct cmd * c = list_entry(p, struct cmd, next); +                list_del(&c->next); +                freebuf(c->cbuf); +                free(c); +        } + +        pthread_mutex_unlock(&dht.cmds.mtx); + +        pthread_cond_destroy(&dht.cmds.cond); +        pthread_mutex_destroy(&dht.cmds.mtx); + +        pthread_mutex_lock(&dht.reqs.mtx); + +        list_for_each_safe(p, h, &dht.reqs.list) { +                struct dht_req * r = list_entry(p, struct dht_req, next); +                list_del(&r->next); +                dht_req_destroy(r); +                dht.reqs.len--; +        } + +        pthread_mutex_unlock(&dht.reqs.mtx); + +        pthread_cond_destroy(&dht.reqs.cond); +        pthread_mutex_destroy(&dht.reqs.mtx); + +        pthread_rwlock_wrlock(&dht.db.lock); + +        list_for_each_safe(p, h, &dht.db.kv.list) { +                struct dht_entry * e = list_entry(p, struct dht_entry, next); +                list_del(&e->next); +                dht_entry_destroy(e); +                dht.db.kv.len--; +        } + +        if (dht.db.contacts.root != NULL) +                bucket_destroy(dht.db.contacts.root); + +        pthread_rwlock_unlock(&dht.db.lock); + +        pthread_rwlock_destroy(&dht.db.lock); + +        assert(dht.db.kv.len == 0); +        assert(dht.db.kv.vals == 0); +        assert(dht.db.kv.lvals == 0); +        assert(dht.reqs.len == 0); + +        freebuf(dht.id); +} diff --git a/src/ipcpd/unicast/dht.h b/src/ipcpd/unicast/dir/dht.h index df394714..852a5130 100644 --- a/src/ipcpd/unicast/dht.h +++ b/src/ipcpd/unicast/dir/dht.h @@ -1,5 +1,5 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   *   * Distributed Hash Table based on Kademlia   * @@ -20,33 +20,30 @@   * Foundation, Inc., http://www.fsf.org/about/contact/.   */ -#ifndef OUROBOROS_IPCPD_UNICAST_DHT_H -#define OUROBOROS_IPCPD_UNICAST_DHT_H +#ifndef OUROBOROS_IPCPD_UNICAST_DIR_DHT_H +#define OUROBOROS_IPCPD_UNICAST_DIR_DHT_H  #include <ouroboros/ipcp-dev.h> +#include "ops.h" +  #include <stdint.h>  #include <sys/types.h> -struct dht; +int      dht_init(struct dir_dht_config * conf); -struct dht * dht_create(uint64_t addr); +void     dht_fini(void); -int          dht_bootstrap(struct dht * dht, -                           size_t       b, -                           time_t       t_expire); +int      dht_start(void); -void         dht_destroy(struct dht * dht); +void     dht_stop(void); -int          dht_reg(struct dht *    dht, -                     const uint8_t * key); +int      dht_reg(const uint8_t * key); -int          dht_unreg(struct dht *    dht, -                       const uint8_t * key); +int      dht_unreg(const uint8_t * key); -uint64_t     dht_query(struct dht *    dht, -                       const uint8_t * key); +uint64_t dht_query(const uint8_t * key); -int          dht_wait_running(struct dht * dht); +extern struct dir_ops dht_dir_ops; -#endif /* OUROBOROS_IPCPD_UNICAST_DHT_H */ +#endif /* OUROBOROS_IPCPD_UNICAST_DIR_DHT_H */ diff --git a/src/ipcpd/unicast/dir/dht.proto b/src/ipcpd/unicast/dir/dht.proto new file mode 100644 index 00000000..ea74805f --- /dev/null +++ b/src/ipcpd/unicast/dir/dht.proto @@ -0,0 +1,58 @@ +/* + * Ouroboros - Copyright (C) 2016 - 2024 + * + * DHT protocol, based on Kademlia + * + *    Dimitri Staessens <dimitri@ouroboros.rocks> + *    Sander Vrijders   <sander@ouroboros.rocks> + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public License + * version 2.1 as published by the Free Software Foundation. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., http://www.fsf.org/about/contact/. + */ + +syntax = "proto2"; + +message dht_contact_msg { +        required bytes  id   = 1; +        required uint64 addr = 2; +} + +message dht_find_req_msg { +        required uint64 cookie = 1; +        required bytes key     = 2; +} + +message dht_find_node_rsp_msg { +        required uint64          cookie   = 1; +        required bytes           key      = 2; +        repeated dht_contact_msg contacts = 3; +} + +message dht_find_value_rsp_msg { +        repeated bytes values = 1; +} + +message dht_store_msg { +        required bytes  key = 1; +        required bytes  val = 2; +        required uint32 exp = 3; +} + +message dht_msg { +        required uint32                 code  = 1; +        required dht_contact_msg        src   = 2; +        optional dht_store_msg          store = 3; +        optional dht_find_req_msg       find  = 4; +        optional dht_find_node_rsp_msg  node  = 5; +        optional dht_find_value_rsp_msg val   = 6; +} diff --git a/src/ipcpd/unicast/dir/ops.h b/src/ipcpd/unicast/dir/ops.h new file mode 100644 index 00000000..8c6e5eb5 --- /dev/null +++ b/src/ipcpd/unicast/dir/ops.h @@ -0,0 +1,42 @@ +/* + * Ouroboros - Copyright (C) 2016 - 2024 + * + * Directory policy ops + * + *    Dimitri Staessens <dimitri@ouroboros.rocks> + *    Sander Vrijders   <sander@ouroboros.rocks> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., http://www.fsf.org/about/contact/. + */ + +#ifndef OUROBOROS_IPCPD_UNICAST_DIR_OPS_H +#define OUROBOROS_IPCPD_UNICAST_DIR_OPS_H + +struct dir_ops { +        int      (* init)(void * config); + +        void     (* fini)(void); + +        int      (* start)(void); + +        void     (* stop)(void); + +        int      (* reg)(const uint8_t * hash); + +        int      (* unreg)(const uint8_t * hash); + +        uint64_t (* query)(const uint8_t * hash); +}; + +#endif /* OUROBOROS_IPCPD_UNICAST_DIR_OPS_H */ diff --git a/src/ipcpd/unicast/dir/pol.h b/src/ipcpd/unicast/dir/pol.h new file mode 100644 index 00000000..eae4b2e7 --- /dev/null +++ b/src/ipcpd/unicast/dir/pol.h @@ -0,0 +1,23 @@ +/* + * Ouroboros - Copyright (C) 2016 - 2024 + * + * Directory policies + * + *    Dimitri Staessens <dimitri@ouroboros.rocks> + *    Sander Vrijders   <sander@ouroboros.rocks> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., http://www.fsf.org/about/contact/. + */ + +#include "dht.h" diff --git a/src/ipcpd/unicast/dir/tests/CMakeLists.txt b/src/ipcpd/unicast/dir/tests/CMakeLists.txt new file mode 100644 index 00000000..f62ed993 --- /dev/null +++ b/src/ipcpd/unicast/dir/tests/CMakeLists.txt @@ -0,0 +1,40 @@ +get_filename_component(CURRENT_SOURCE_PARENT_DIR +  ${CMAKE_CURRENT_SOURCE_DIR} DIRECTORY) +get_filename_component(CURRENT_BINARY_PARENT_DIR +  ${CMAKE_CURRENT_BINARY_DIR} DIRECTORY) + +include_directories(${CMAKE_CURRENT_SOURCE_DIR}) +include_directories(${CMAKE_CURRENT_BINARY_DIR}) + +include_directories(${CURRENT_SOURCE_PARENT_DIR}) +include_directories(${CURRENT_BINARY_PARENT_DIR}) + +include_directories(${CMAKE_SOURCE_DIR}/include) +include_directories(${CMAKE_BINARY_DIR}/include) + +get_filename_component(PARENT_PATH ${CMAKE_CURRENT_SOURCE_DIR} DIRECTORY) +get_filename_component(PARENT_DIR ${PARENT_PATH} NAME) + +create_test_sourcelist(${PARENT_DIR}_tests test_suite.c +  # Add new tests here +  dht_test.c +  ) + +protobuf_generate_c(DHT_PROTO_SRCS KAD_PROTO_HDRS ../dht.proto) +add_executable(${PARENT_DIR}_test EXCLUDE_FROM_ALL ${${PARENT_DIR}_tests} +  ${DHT_PROTO_SRCS}) +target_link_libraries(${PARENT_DIR}_test ouroboros-common) + +add_dependencies(check ${PARENT_DIR}_test) + +set(tests_to_run ${${PARENT_DIR}_tests}) +if(CMAKE_VERSION VERSION_LESS "3.29.0") +  remove(tests_to_run test_suite.c) +else () +  list(POP_FRONT tests_to_run) +endif() + +foreach (test ${tests_to_run}) +  get_filename_component(test_name ${test} NAME_WE) +  add_test(${test_name} ${C_TEST_PATH}/${PARENT_DIR}_test ${test_name}) +endforeach (test) diff --git a/src/ipcpd/unicast/dir/tests/dht_test.c b/src/ipcpd/unicast/dir/tests/dht_test.c new file mode 100644 index 00000000..cb6b0f9f --- /dev/null +++ b/src/ipcpd/unicast/dir/tests/dht_test.c @@ -0,0 +1,1925 @@ +/* + * Ouroboros - Copyright (C) 2016 - 2024 + * + * Unit tests of the DHT + * + *    Dimitri Staessens <dimitri@ouroboros.rocks> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., http://www.fsf.org/about/contact/. + */ + +#define __DHT_TEST__ + +#if defined(__linux__) || defined(__CYGWIN__) +#define _DEFAULT_SOURCE +#else +#define _POSIX_C_SOURCE 200112L +#endif + +#include <ouroboros/test.h> +#include <ouroboros/list.h> +#include <ouroboros/utils.h> + +#include "dht.pb-c.h" + +#include <assert.h> +#include <inttypes.h> +#include <time.h> +#include <stdlib.h> +#include <stdio.h> + +#define DHT_MAX_RAND_SIZE 64 +#define DHT_TEST_KEY_LEN  32 +#define DHT_TEST_ADDR     0x1234567890abcdefULL + +/* forward declare for use in the dht code */ +/* Packet sink for DHT tests */ +struct { +        bool   enabled; + +        struct list_head list; +        size_t len; +} sink; + +struct message { +        struct   list_head next; +        void *   msg; +        uint64_t dst; +}; + +static int sink_send_msg(buffer_t * pkt, +                         uint64_t  addr) +{ +        struct message *   m; + +        assert(pkt  != NULL); +        assert(addr != 0); + +        assert(!list_is_empty(&sink.list) || sink.len == 0); + +        if (!sink.enabled) +                goto finish; + +        m = malloc(sizeof(*m)); +        if (m == NULL) { +                printf("Failed to malloc message."); +                goto fail_malloc; +        } + +        m->msg = dht_msg__unpack(NULL, pkt->len, pkt->data); +        if (m->msg == NULL) +                goto fail_unpack; + +        m->dst = addr; + +        list_add_tail(&m->next, &sink.list); + +        ++sink.len; + finish: +        freebuf(*pkt); + +        return 0; + fail_unpack: +        free(m); + fail_malloc: +        freebuf(*pkt); +        return -1; +} + +#include "dht.c" + +/* Test helpers */ + +static void sink_init(void) +{ +        list_head_init(&sink.list); +        sink.len = 0; +        sink.enabled = true; +} + +static void sink_clear(void) +{ +        struct list_head * p; +        struct list_head * h; + +        list_for_each_safe(p, h, &sink.list) { +                struct message * m = list_entry(p, struct message, next); +                list_del(&m->next); +                dht_msg__free_unpacked((dht_msg_t *) m->msg, NULL); +                free(m); +                --sink.len; +        } + +        assert(list_is_empty(&sink.list)); +} + +static void sink_fini(void) +{ +        sink_clear(); + +        assert(list_is_empty(&sink.list) || sink.len != 0); +} + +static dht_msg_t * sink_read(void) +{ +        struct message * m; +        dht_msg_t *      msg; + +        assert(!list_is_empty(&sink.list) || sink.len == 0); + +        if (list_is_empty(&sink.list)) +                return NULL; + +        m = list_first_entry(&sink.list, struct message, next); + +        --sink.len; + +        list_del(&m->next); + +        msg = m->msg; + +        free(m); + +        return (dht_msg_t *) msg; +} + +static const buffer_t test_val = { +        .data = (uint8_t *) "test_value", +        .len = 10 +}; + +static const buffer_t test_val2 = { +        .data = (uint8_t *) "test_value_2", +        .len = 12 +}; + +static int random_value_len(buffer_t * b) +{ +        assert(b != NULL); +        assert(b->len > 0 && b->len <= DHT_MAX_RAND_SIZE); + +        b->data = malloc(b->len); +        if (b->data == NULL) +                goto fail_malloc; + +        random_buffer(b->data, b->len); + +        return 0; + + fail_malloc: +        return -ENOMEM; +} + +static int random_value(buffer_t * b) +{ +        assert(b != NULL); + +        b->len = rand() % DHT_MAX_RAND_SIZE + 1; + +        return random_value_len(b); +} + +static int fill_dht_with_contacts(size_t n) +{ +        size_t    i; +        uint8_t * id; + +        for (i = 0; i < n; i++) { +                uint64_t addr = generate_cookie(); +                id = generate_id(); +                if (id == NULL) +                        goto fail_id; + +                if (dht_kv_update_contacts(id, addr) < 0) +                        goto fail_update; +                free(id); +        } + +        return 0; + + fail_update: +        free(id); + fail_id: +        return -1; +} + +static int fill_store_with_random_values(const uint8_t * key, +                                         size_t          len, +                                         size_t          n_values) +{ +        buffer_t        val; +        struct timespec now; +        size_t          i; +        uint8_t *       _key; + +        clock_gettime(CLOCK_REALTIME_COARSE, &now); + +        for (i = 0; i < n_values; ++i) { +                if (key != NULL) +                        _key = (uint8_t *) key; +                else { +                        _key = generate_id(); +                        if (_key == NULL) +                                goto fail_key; +                } + +                if (len == 0) +                        val.len = rand() % DHT_MAX_RAND_SIZE + 1; +                else +                        val.len = len; + +                if (random_value_len(&val) < 0) +                        goto fail_value; + +                if (dht_kv_store(_key, val, now.tv_sec + 10) < 0) +                        goto fail_store; + +                freebuf(val); +                if (key == NULL) +                        free(_key); +        } + +        return 0; + + fail_store: +        freebuf(val); + fail_value: +        free(_key); + fail_key: +        return -1; +} + +static int random_contact_list(dht_contact_msg_t *** contacts, +                               size_t                max) +{ +        size_t i; + +        assert(contacts != NULL); +        assert(max > 0); + +        *contacts = malloc(max * sizeof(**contacts)); +        if (*contacts == NULL) +                goto fail_malloc; + +        for (i = 0; i < max; i++) { +                (*contacts)[i] = malloc(sizeof(*(*contacts)[i])); +                if ((*contacts)[i] == NULL) +                        goto fail_contacts; + +                dht_contact_msg__init((*contacts)[i]); + +                (*contacts)[i]->id.data = generate_id(); +                if ((*contacts)[i]->id.data == NULL) +                        goto fail_contact; + +                (*contacts)[i]->id.len = dht.id.len; +                (*contacts)[i]->addr = generate_cookie(); +        } + +        return 0; + + fail_contact: +        dht_contact_msg__free_unpacked((*contacts)[i], NULL); + fail_contacts: +        while (i-- > 0) +                free((*contacts)[i]); +        free(*contacts); + fail_malloc: +        return -ENOMEM; +} + +static void clear_contacts(dht_contact_msg_t ** contacts, +                           size_t               len) +{ +        size_t i; + +        assert(contacts != NULL); +        if (*contacts == NULL) +                return; + +        for (i = 0; i < len; ++i) +                dht_contact_msg__free_unpacked((contacts)[i], NULL); + +        free(*contacts); +        *contacts = NULL; +} + +/* Start of actual tests */ +static struct dir_dht_config test_dht_config = { +        .params = { +                .alpha       = 3, +                .k           = 8, +                .t_expire    = 86400, +                .t_refresh   = 900, +                .t_replicate = 900 +        } +}; + +static int test_dht_init_fini(void) +{ +        TEST_START(); + +        if (dht_init(&test_dht_config) < 0) { +                printf("Failed to create dht.\n"); +                goto fail_init; +        } + +        dht_fini(); + +        TEST_SUCCESS(); + +        return TEST_RC_SUCCESS; + fail_init: +        TEST_FAIL(); +        return TEST_RC_FAIL; +} + +static int test_dht_start_stop(void) +{ +        TEST_START(); + +        if (dht_init(&test_dht_config) < 0) { +                printf("Failed to create dht.\n"); +                goto fail_init; +        } + +        if (dht_start() < 0) { +                printf("Failed to start dht.\n"); +                goto fail_start; +        } + +        dht_stop(); + +        dht_fini(); + +        TEST_SUCCESS(); + +        return TEST_RC_SUCCESS; + + fail_start: +        dht_fini(); + fail_init: +        TEST_FAIL(); +        return TEST_RC_FAIL; +} + +static int test_val_entry_create_destroy(void) +{ +        struct val_entry * e; +        struct timespec    now; + +        TEST_START(); + +        clock_gettime(CLOCK_REALTIME_COARSE, &now); + +        if (dht_init(&test_dht_config) < 0) { +                printf("Failed to create dht.\n"); +                goto fail_init; +        } + +        e = val_entry_create(test_val, now.tv_sec + 10); +        if (e == NULL) { +                printf("Failed to create val entry.\n"); +                goto fail_entry; +        } + +        val_entry_destroy(e); + +        dht_fini(); + +        TEST_SUCCESS(); + +        return TEST_RC_SUCCESS; + + fail_entry: +        dht_fini(); + fail_init: +        TEST_FAIL(); +        return TEST_RC_FAIL; +} + +static int test_dht_entry_create_destroy(void) +{ +        struct dht_entry * e; + +        TEST_START(); + +        if (dht_init(&test_dht_config) < 0) { +                printf("Failed to create dht.\n"); +                goto fail_init; +        } + +        e = dht_entry_create(dht.id.data); +        if (e == NULL) { +                printf("Failed to create dht entry.\n"); +                goto fail_entry; +        } + +        dht_entry_destroy(e); + +        dht_fini(); + +        TEST_SUCCESS(); + +        return TEST_RC_SUCCESS; + + fail_entry: +        dht_fini(); + fail_init: +        TEST_FAIL(); +        return TEST_RC_FAIL; +} + +static int test_dht_entry_update_get_val(void) +{ +        struct dht_entry * e; +        struct val_entry * v; +        struct timespec    now; + +        TEST_START(); + +        clock_gettime(CLOCK_REALTIME_COARSE, &now); + +        if (dht_init(&test_dht_config) < 0) { +                printf("Failed to create dht.\n"); +                goto fail_init; +        } + +        e = dht_entry_create(dht.id.data); +        if (e == NULL) { +                printf("Failed to create dht entry.\n"); +                goto fail_entry; +        } + +        if (dht_entry_get_val(e, test_val) != NULL) { +                printf("Found value in empty dht entry.\n"); +                goto fail_get; +        } + +        if (dht_entry_update_val(e, test_val, now.tv_sec + 10) < 0) { +                printf("Failed to update dht entry value.\n"); +                goto fail_get; +        } + +        if (dht_entry_get_val(e, test_val2) != NULL) { +                printf("Found value in dht entry with different key.\n"); +                goto fail_get; +        } + +        v = dht_entry_get_val(e, test_val); +        if (v == NULL) { +                printf("Failed to get value from dht entry.\n"); +                goto fail_get; +        } + +        if (v->val.len != test_val.len) { +                printf("Length in dht entry does not match expected.\n"); +                goto fail_get; +        } + +        if(memcmp(v->val.data, test_val.data, test_val.len) != 0) { +                printf("Data in dht entry does not match expected.\n"); +                goto fail_get; +        } + +        if (dht_entry_update_val(e, test_val, now.tv_sec + 15) < 0) { +                printf("Failed to update exsting dht entry value.\n"); +                goto fail_get; +        } + +        if (v->t_exp != now.tv_sec + 15) { +                printf("Expiration time in dht entry value not updated.\n"); +                goto fail_get; +        } + +        if (dht_entry_update_val(e, test_val, now.tv_sec + 5) < 0) { +                printf("Failed to update existing dht entry value (5).\n"); +                goto fail_get; +        } + +        if (v->t_exp != now.tv_sec + 15) { +                printf("Expiration time in dht entry shortened.\n"); +                goto fail_get; +        } + +        if (dht_entry_get_val(e, test_val) != v) { +                printf("Wrong value in dht entry found after update.\n"); +                goto fail_get; +        } + +        dht_entry_destroy(e); + +        dht_fini(); + +        TEST_SUCCESS(); + +        return TEST_RC_SUCCESS; + + fail_get: +        dht_entry_destroy(e); + fail_entry: +        dht_fini(); + fail_init: +        TEST_FAIL(); +        return TEST_RC_FAIL; +} + +static int test_dht_entry_update_get_lval(void) +{ +        struct dht_entry * e; +        struct val_entry * v; +        struct timespec    now; + +        TEST_START(); + +        clock_gettime(CLOCK_REALTIME_COARSE, &now); + +        if (dht_init(&test_dht_config) < 0) { +                printf("Failed to create dht.\n"); +                goto fail_init; +        } + +        e = dht_entry_create(dht.id.data); +        if (e == NULL) { +                printf("Failed to create dht entry.\n"); +                goto fail_entry; +        } + +        if (dht_entry_get_lval(e, test_val) != NULL) { +                printf("Found value in empty dht entry.\n"); +                goto fail_get; +        } + +        if (dht_entry_update_lval(e, test_val) < 0) { +                printf("Failed to update dht entry value.\n"); +                goto fail_get; +        } + +        v = dht_entry_get_lval(e, test_val); +        if (v== NULL) { +                printf("Failed to get value from dht entry.\n"); +                goto fail_get; +        } + +        if (dht_entry_get_lval(e, test_val2) != NULL) { +                printf("Found value in dht entry in vals.\n"); +                goto fail_get; +        } + +        if (v->val.len != test_val.len) { +                printf("Length in dht entry does not match expected.\n"); +                goto fail_get; +        } + +        if(memcmp(v->val.data, test_val.data, test_val.len) != 0) { +                printf("Data in dht entry does not match expected.\n"); +                goto fail_get; +        } + +        if (dht_entry_update_lval(e, test_val) < 0) { +                printf("Failed to update existing dht entry value.\n"); +                goto fail_get; +        } + +        if (dht_entry_get_lval(e, test_val) != v) { +                printf("Wrong value in dht entry found after update.\n"); +                goto fail_get; +        } + +        dht_entry_destroy(e); + +        dht_fini(); + +        TEST_SUCCESS(); + +        return TEST_RC_SUCCESS; + + fail_get: +        dht_entry_destroy(e); + fail_entry: +        dht_fini(); + fail_init: +        TEST_FAIL(); +        return TEST_RC_FAIL; +} + +static int test_dht_kv_contact_create_destroy(void) +{ +        struct contact * c; + +        TEST_START(); + +        if (dht_init(&test_dht_config) < 0) { +                printf("Failed to create dht.\n"); +                goto fail_init; +        } + +        c = contact_create(dht.id.data, dht.addr); +        if (c == NULL) { +                printf("Failed to create contact.\n"); +                goto fail_contact; +        } + +        contact_destroy(c); + +        dht_fini(); + +        TEST_SUCCESS(); + +        return TEST_RC_SUCCESS; + + fail_contact: +        dht_fini(); + fail_init: +        TEST_FAIL(); +        return TEST_RC_FAIL; +} + +static int test_dht_kv_update_bucket(void) +{ +        TEST_START(); + +        if (dht_init(&test_dht_config) < 0) { +                printf("Failed to create dht.\n"); +                goto fail_init; +        } + +        if (fill_dht_with_contacts(1000) < 0) { +                printf("Failed to fill bucket with contacts.\n"); +                goto fail_update; +        } + +        dht_fini(); + +        TEST_SUCCESS(); + +        return TEST_RC_SUCCESS; + + fail_update: +        dht_fini(); + fail_init: +        TEST_FAIL(); +        return TEST_RC_FAIL; +} + +static int test_dht_kv_contact_list(void) +{ +        struct list_head cl; +        ssize_t          len; +        ssize_t          items; + +        TEST_START(); + +        list_head_init(&cl); + +        if (dht_init(&test_dht_config) < 0) { +                printf("Failed to create dht.\n"); +                goto fail_init; +        } + +        items = 5; + +        if (fill_dht_with_contacts(items) < 0) { +                printf("Failed to fill bucket with contacts.\n"); +                goto fail_fill; +        } + +        len = dht_kv_contact_list(dht.id.data, &cl, dht.k); +        if (len < 0) { +                printf("Failed to get contact list.\n"); +                goto fail_fill; +        } + +        if (len != items) { +                printf("Failed to get contacts (%zu != %zu).\n", len, items); +                goto fail_contact_list; +        } + +        contact_list_destroy(&cl); + +        items = 100; + +        if (fill_dht_with_contacts(items) < 0) { +                printf("Failed to fill bucket with contacts.\n"); +                goto fail_fill; +        } + +        len = dht_kv_contact_list(dht.id.data, &cl, items); +        if (len < 0) { +                printf("Failed to get contact list.\n"); +                goto fail_fill; +        } + +        if ((size_t) len < dht.k) { +                printf("Failed to get contacts (%zu < %zu).\n", len, dht.k); +                goto fail_contact_list; +        } + +        contact_list_destroy(&cl); + +        dht_fini(); + +        TEST_SUCCESS(); + +        return TEST_RC_SUCCESS; + + fail_contact_list: +        contact_list_destroy(&cl); + fail_fill: +        dht_fini(); + fail_init: +        TEST_FAIL(); +        return TEST_RC_FAIL; +} + +static int test_dht_kv_get_values(void) +{ +        buffer_t * vals; +        ssize_t    len; +        size_t     n = sizeof(uint64_t); + +        TEST_START(); + +        if (dht_init(&test_dht_config) < 0) { +                printf("Failed to create dht.\n"); +                goto fail_init; +        } + +        if (fill_store_with_random_values(dht.id.data, n, 3) < 0) { +                printf("Failed to fill store with random values.\n"); +                goto fail_fill; +        } + +        len = dht_kv_retrieve(dht.id.data, &vals); +        if (len < 0) { +                printf("Failed to get values from store.\n"); +                goto fail_fill; +        } + +        if (len != 3) { +                printf("Failed to get %ld values (%zu).\n", 3L, len); +                goto fail_get_values; +        } + +        freebufs(vals, len); + +        if (fill_store_with_random_values(dht.id.data, n, 20) < 0) { +                printf("Failed to fill store with random values.\n"); +                goto fail_fill; +        } + +        len = dht_kv_retrieve(dht.id.data, &vals); +        if (len < 0) { +                printf("Failed to get values from store.\n"); +                goto fail_fill; +        } + +        if (len != DHT_MAX_VALS) { +                printf("Failed to get %d values.\n", DHT_MAX_VALS); +                goto fail_get_values; +        } + +        freebufs(vals, len); + +        dht_fini(); + +        TEST_SUCCESS(); + +        return TEST_RC_SUCCESS; + + fail_get_values: +        freebufs(vals, len); + fail_fill: +        dht_fini(); + fail_init: +        TEST_FAIL(); +        return TEST_RC_FAIL; +} + +static int test_dht_kv_find_node_req_msg(void) +{ +        dht_msg_t * msg; +        dht_msg_t * upk; +        size_t      len; +        uint8_t *   buf; + +        TEST_START(); + +        if (dht_init(&test_dht_config) < 0) { +                printf("Failed to create dht.\n"); +                goto fail_init; +        } + +        msg = dht_kv_find_node_req_msg(dht.id.data); +        if (msg == NULL) { +                printf("Failed to get find node request message.\n"); +                goto fail_msg; +        } + +        if (msg->code != DHT_FIND_NODE_REQ) { +                printf("Wrong code in find_node_req message (%s != %s).\n", +                        dht_code_str[msg->code], +                        dht_code_str[DHT_FIND_NODE_REQ]); +                goto fail_msg; +        } + +        len = dht_msg__get_packed_size(msg); +        if (len == 0) { +                printf("Failed to get packed length of find_node_req.\n"); +                goto fail_msg; +        } + +        buf = malloc(len); +        if (buf == NULL) { +                printf("Failed to malloc find_node_req buf.\n"); +                goto fail_msg; +        } + +        if (dht_msg__pack(msg, buf) != len) { +                printf("Failed to pack find_node_req message.\n"); +                goto fail_pack; +        } + +        upk = dht_msg__unpack(NULL, len, buf); +        if (upk == NULL) { +                printf("Failed to unpack find_value_req message.\n"); +                goto fail_unpack; +        } + +        free(buf); +        dht_msg__free_unpacked(msg, NULL); +        dht_msg__free_unpacked(upk, NULL); + +        dht_fini(); + +        TEST_SUCCESS(); + +        return TEST_RC_SUCCESS; + + fail_unpack: +        dht_msg__free_unpacked(msg, NULL); + fail_pack: +        free(buf); + fail_msg: +        dht_fini(); + fail_init: +        TEST_FAIL(); +        return TEST_RC_FAIL; +} + +static int test_dht_kv_find_node_rsp_msg(void) +{ +        dht_contact_msg_t ** contacts; +        dht_msg_t *          msg; +        dht_msg_t *          upk; +        size_t               len; +        uint8_t *            buf; + +        TEST_START(); + +        if (dht_init(&test_dht_config) < 0) { +                printf("Failed to create dht.\n"); +                goto fail_init; +        } + +        msg = dht_kv_find_node_rsp_msg(dht.id.data, 0, &contacts, 0); +        if (msg == NULL) { +                printf("Failed to get find node response message.\n"); +                goto fail_msg; +        } + +        if (msg->code != DHT_FIND_NODE_RSP) { +                printf("Wrong code in find_node_rsp message (%s != %s).\n", +                       dht_code_str[msg->code], +                       dht_code_str[DHT_FIND_NODE_RSP]); +                goto fail_msg; +        } + +        len = dht_msg__get_packed_size(msg); +        if (len == 0) { +                printf("Failed to get packed length of find_node_rsp.\n"); +                goto fail_msg; +        } + +        buf = malloc(len); +        if (buf == NULL) { +                printf("Failed to malloc find_node_rsp buf.\n"); +                goto fail_msg; +        } + +        if (dht_msg__pack(msg, buf) != len) { +                printf("Failed to pack find_node_rsp message.\n"); +                goto fail_pack; +        } + +        upk = dht_msg__unpack(NULL, len, buf); +        if (upk == NULL) { +                printf("Failed to unpack find_node_rsp message.\n"); +                goto fail_unpack; +        } + +        free(buf); +        dht_msg__free_unpacked(msg, NULL); +        dht_msg__free_unpacked(upk, NULL); + +        dht_fini(); + +        TEST_SUCCESS(); + +        return TEST_RC_SUCCESS; + + fail_unpack: +        dht_msg__free_unpacked(msg, NULL); + fail_pack: +        free(buf); + fail_msg: +        dht_fini(); + fail_init: +        TEST_FAIL(); +        return TEST_RC_FAIL; +} + +static int test_dht_kv_find_node_rsp_msg_contacts(void) +{ +        dht_contact_msg_t ** contacts; +        dht_msg_t *          msg; +        dht_msg_t *          upk; +        uint8_t *            buf; +        size_t               len; +        ssize_t              n; + +        TEST_START(); + +        if (dht_init(&test_dht_config) < 0) { +                printf("Failed to create dht.\n"); +                goto fail_init; +        } + +        if (fill_dht_with_contacts(100) < 0) { +                printf("Failed to fill bucket with contacts.\n"); +                goto fail_fill; +        } + +        n = dht_kv_get_contacts(dht.id.data, &contacts); +        if (n < 0) { +                printf("Failed to get contacts.\n"); +                goto fail_fill; +        } + +        if ((size_t) n < dht.k) { +                printf("Failed to get enough contacts (%zu < %zu).\n", n, dht.k); +                goto fail_fill; +        } + +        msg = dht_kv_find_node_rsp_msg(dht.id.data, 0, &contacts, n); +        if (msg == NULL) { +                printf("Failed to build find node response message.\n"); +                goto fail_msg; +        } + +        len = dht_msg__get_packed_size(msg); +        if (len == 0) { +                printf("Failed to get packed length of find_node_rsp.\n"); +                goto fail_msg; +        } + +        buf = malloc(len); +        if (buf == NULL) { +                printf("Failed to malloc find_node_rsp buf.\n"); +                goto fail_msg; +        } + +        if (dht_msg__pack(msg, buf) != len) { +                printf("Failed to pack find_node_rsp message.\n"); +                goto fail_pack; +        } + +        upk = dht_msg__unpack(NULL, len, buf); +        if (upk == NULL) { +                printf("Failed to unpack find_node_rsp message.\n"); +                goto fail_unpack; +        } + +        free(buf); +        dht_msg__free_unpacked(msg, NULL); +        dht_msg__free_unpacked(upk, NULL); + +        dht_fini(); + +        TEST_SUCCESS(); + +        return TEST_RC_SUCCESS; + + fail_unpack: +        dht_msg__free_unpacked(msg, NULL); + fail_pack: +        free(buf); + fail_msg: +        clear_contacts(contacts, n); + fail_fill: +        dht_fini(); + fail_init: +        TEST_FAIL(); +        return TEST_RC_FAIL; +} + +static int test_dht_kv_find_value_req_msg(void) +{ +        dht_msg_t * msg; +        dht_msg_t * upk; +        size_t      len; +        uint8_t *   buf; + +        TEST_START(); + +        if (dht_init(&test_dht_config) < 0) { +                printf("Failed to create dht.\n"); +                goto fail_init; +        } + +        msg = dht_kv_find_value_req_msg(dht.id.data); +        if (msg == NULL) { +                printf("Failed to build find value request message.\n"); +                goto fail_msg; +        } + +        if (msg->code != DHT_FIND_VALUE_REQ) { +                printf("Wrong code in find_value_req message (%s != %s).\n", +                       dht_code_str[msg->code], +                       dht_code_str[DHT_FIND_VALUE_REQ]); +                goto fail_msg; +        } + +        len = dht_msg__get_packed_size(msg); +        if (len == 0) { +                printf("Failed to get packed length of find_value_req.\n"); +                goto fail_msg; +        } + +        buf = malloc(len); +        if (buf == NULL) { +                printf("Failed to malloc find_node_req buf.\n"); +                goto fail_msg; +        } + +        if (dht_msg__pack(msg, buf) != len) { +                printf("Failed to pack find_value_req message.\n"); +                goto fail_pack; +        } + +        upk = dht_msg__unpack(NULL, len, buf); +        if (upk == NULL) { +                printf("Failed to unpack find_value_req message.\n"); +                goto fail_unpack; +        } + +        free(buf); +        dht_msg__free_unpacked(msg, NULL); +        dht_msg__free_unpacked(upk, NULL); + +        dht_fini(); + +        TEST_SUCCESS(); + +        return TEST_RC_SUCCESS; + + fail_unpack: +        dht_msg__free_unpacked(msg, NULL); + fail_pack: +        free(buf); + fail_msg: +        dht_fini(); + fail_init: +        TEST_FAIL(); +        return TEST_RC_FAIL; +} + +static int test_dht_kv_find_value_rsp_msg(void) +{ +        dht_msg_t * msg; +        dht_msg_t * upk; +        size_t      len; +        uint8_t *   buf; + +        TEST_START(); + +        if (dht_init(&test_dht_config) < 0) { +                printf("Failed to create dht.\n"); +                goto fail_init; +        } + +        msg = dht_kv_find_value_rsp_msg(dht.id.data, 0, NULL, 0, NULL, 0); +        if (msg == NULL) { +                printf("Failed to build find value response message.\n"); +                goto fail_msg; +        } + +        if (msg->code != DHT_FIND_VALUE_RSP) { +                printf("Wrong code in find_value_rsp message (%s != %s).\n", +                       dht_code_str[msg->code], +                       dht_code_str[DHT_FIND_VALUE_RSP]); +                goto fail_msg; +        } + +        len = dht_msg__get_packed_size(msg); +        if (len == 0) { +                printf("Failed to get packed length of find_value_rsp.\n"); +                goto fail_msg; +        } + +        buf = malloc(len); +        if (buf == NULL) { +                printf("Failed to malloc find_value_rsp buf.\n"); +                goto fail_msg; +        } + +        if (dht_msg__pack(msg, buf) != len) { +                printf("Failed to pack find_value_rsp message.\n"); +                goto fail_pack; +        } + +        upk = dht_msg__unpack(NULL, len, buf); +        if (upk == NULL) { +                printf("Failed to unpack find_value_rsp message.\n"); +                goto fail_unpack; +        } + +        free(buf); +        dht_msg__free_unpacked(msg, NULL); +        dht_msg__free_unpacked(upk, NULL); + +        dht_fini(); + +        TEST_SUCCESS(); + +        return TEST_RC_SUCCESS; + + fail_unpack: +        dht_msg__free_unpacked(msg, NULL); + fail_pack: +        free(buf); + fail_msg: +        dht_fini(); + fail_init: +        TEST_FAIL(); +        return TEST_RC_FAIL; +} + +static int test_dht_kv_find_value_rsp_msg_contacts(void) +{ +        dht_msg_t *          msg; +        dht_msg_t *          upk; +        size_t               len; +        uint8_t *            buf; +        dht_contact_msg_t ** contacts; +        ssize_t              n; + +        TEST_START(); + +        if (dht_init(&test_dht_config) < 0) { +                printf("Failed to create dht.\n"); +                goto fail_init; +        } + +        if (fill_dht_with_contacts(100) < 0) { +                printf("Failed to fill bucket with contacts.\n"); +                goto fail_fill; +        } + +        n = dht_kv_get_contacts(dht.id.data, &contacts); +        if (n < 0) { +                printf("Failed to get contacts.\n"); +                goto fail_fill; +        } + +        if ((size_t) n < dht.k) { +                printf("Failed to get enough contacts (%zu < %zu).\n", n, dht.k); +                goto fail_fill; +        } + +        msg = dht_kv_find_value_rsp_msg(dht.id.data, 0, &contacts, n, NULL, 0); +        if (msg == NULL) { +                printf("Failed to build find value response message.\n"); +                goto fail_msg; +        } + +        len = dht_msg__get_packed_size(msg); +        if (len == 0) { +                printf("Failed to get packed length of find_value_rsp.\n"); +                goto fail_msg; +        } + +        buf = malloc(len); +        if (buf == NULL) { +                printf("Failed to malloc find_value_rsp buf.\n"); +                goto fail_msg; +        } + +        if (dht_msg__pack(msg, buf) != len) { +                printf("Failed to pack find_value_rsp message.\n"); +                goto fail_pack; +        } + +        upk = dht_msg__unpack(NULL, len, buf); +        if (upk == NULL) { +                printf("Failed to unpack find_value_rsp message.\n"); +                goto fail_unpack; +        } + +        free(buf); +        dht_msg__free_unpacked(msg, NULL); +        dht_msg__free_unpacked(upk, NULL); + +        dht_fini(); + +        TEST_SUCCESS(); + +        return TEST_RC_SUCCESS; + + fail_unpack: +        dht_msg__free_unpacked(msg, NULL); + fail_pack: +        free(buf); + fail_msg: +        clear_contacts(contacts, n); + fail_fill: +        dht_fini(); + fail_init: +        TEST_FAIL(); +        return TEST_RC_FAIL; +} + +static int test_dht_kv_find_value_rsp_msg_values(void) +{ +        dht_msg_t * msg; +        dht_msg_t * upk; +        size_t      len; +        uint8_t *   buf; +        buffer_t *  values; +        size_t      i; +        uint64_t    ck; + +        TEST_START(); + +        ck = generate_cookie(); + +        if (dht_init(&test_dht_config) < 0) { +                printf("Failed to create dht.\n"); +                goto fail_init; +        } + +        values = malloc(sizeof(*values) * 8); +        if (values == NULL) { +                printf("Failed to malloc values.\n"); +                goto fail_values; +        } + +        for (i = 0; i < 8; i++) { +                if (random_value(&values[i]) < 0) { +                        printf("Failed to create random value.\n"); +                        goto fail_fill; +                } +        } + +        msg = dht_kv_find_value_rsp_msg(dht.id.data, ck, NULL, 0, &values, 8); +        if (msg == NULL) { +                printf("Failed to build find value response message.\n"); +                goto fail_msg; +        } + +        values = NULL; /* msg owns the values now */ + +        len = dht_msg__get_packed_size(msg); +        if (len == 0) { +                printf("Failed to get packed length of find_value_rsp.\n"); +                goto fail_msg; +        } + +        buf = malloc(len); +        if (buf == NULL) { +                printf("Failed to malloc find_value_rsp buf.\n"); +                goto fail_msg; +        } + +        if (dht_msg__pack(msg, buf) != len) { +                printf("Failed to pack find_value_rsp message.\n"); +                goto fail_pack; +        } + +        upk = dht_msg__unpack(NULL, len, buf); +        if (upk == NULL) { +                printf("Failed to unpack find_value_rsp message.\n"); +                goto fail_unpack; +        } + +        if (upk->code != DHT_FIND_VALUE_RSP) { +                printf("Wrong code in find_value_rsp message (%s != %s).\n", +                       dht_code_str[upk->code], +                       dht_code_str[DHT_FIND_VALUE_RSP]); +                goto fail_unpack; +        } + +        if (upk->val == NULL) { +                printf("No values in find_value_rsp message.\n"); +                goto fail_unpack; +        } + +        if (upk->val->n_values != 8) { +                printf("Not enough values in find_value_rsp (%zu != %lu).\n", +                       upk->val->n_values, 8UL); +                goto fail_unpack; +        } + +        free(buf); +        dht_msg__free_unpacked(msg, NULL); +        dht_msg__free_unpacked(upk, NULL); + +        free(values); + +        dht_fini(); + +        TEST_SUCCESS(); + +        return TEST_RC_SUCCESS; + + fail_unpack: +        dht_msg__free_unpacked(msg, NULL); + fail_pack: +        free(buf); + fail_msg: + fail_fill: +        while((i--) > 0) +                freebuf(values[i]); +        free(values); + fail_values: +        dht_fini(); + fail_init: +        TEST_FAIL(); +        return TEST_RC_FAIL; +} + +static int test_dht_kv_store_msg(void) +{ +        dht_msg_t *     msg; +        size_t          len; +        uint8_t *       buf; +        struct timespec now; + +        TEST_START(); + +        clock_gettime(CLOCK_REALTIME_COARSE, &now); + +        if (dht_init(&test_dht_config) < 0) { +                printf("Failed to create dht.\n"); +                goto fail_init; +        } + +        msg = dht_kv_store_msg(dht.id.data, test_val, now.tv_sec + 10); +        if (msg == NULL) { +                printf("Failed to get store message.\n"); +                goto fail_msg; +        } + +        if (msg->code != DHT_STORE) { +                printf("Wrong code in store message (%s != %s).\n", +                       dht_code_str[msg->code], +                       dht_code_str[DHT_STORE]); +                goto fail_store_msg; +        } + +        if (dht_kv_validate_msg(msg) < 0) { +                printf("Failed to validate store message.\n"); +                goto fail_store_msg; +        } + +        len = dht_msg__get_packed_size(msg); +        if (len == 0) { +                printf("Failed to get packed msg length.\n"); +                goto fail_msg; +        } + +        buf = malloc(len); +        if (buf == NULL) { +                printf("Failed to malloc store msg buf.\n"); +                goto fail_msg; +        } + +        if (dht_msg__pack(msg, buf) != len) { +                printf("Failed to pack store message.\n"); +                goto fail_pack; +        } + +        free(buf); + +        dht_msg__free_unpacked(msg, NULL); + +        dht_fini(); + +        TEST_SUCCESS(); + +        return TEST_RC_SUCCESS; + + fail_pack: +        free(buf); + fail_store_msg: +        dht_msg__free_unpacked(msg, NULL); + fail_msg: +        dht_fini(); + fail_init: +        TEST_FAIL(); +        return TEST_RC_FAIL; +} + +static int test_dht_kv_query_contacts_req_rsp(void) +{ +        dht_msg_t *          req; +        dht_msg_t *          rsp; +        dht_contact_msg_t ** contacts; +        size_t               len = 2; + +        uint8_t * key; + +        TEST_START(); + +        sink_init(); + +        if (dht_init(&test_dht_config) < 0) { +                printf("Failed to create dht.\n"); +                goto fail_init; +        } + +        if (fill_dht_with_contacts(1) < 0) { +                printf("Failed to fill bucket with contacts.\n"); +                goto fail_prep; +        } + +        key = generate_id(); +        if (key == NULL) { +                printf("Failed to generate key.\n"); +                goto fail_prep; +        } + +        if (dht_kv_query_contacts(key, NULL) < 0) { +                printf("Failed to query contacts.\n"); +                goto fail_query; +        } + +        req = sink_read(); +        if (req == NULL) { +                printf("Failed to read request from sink.\n"); +                goto fail_query; +        } + +        if (dht_kv_validate_msg(req) < 0) { +                printf("Failed to validate find node req.\n"); +                goto fail_val_req; +        } + +        if (random_contact_list(&contacts, len) < 0) { +                printf("Failed to create random contact.\n"); +                goto fail_val_req; +        } + +        rsp = dht_kv_find_node_rsp_msg(key, req->find->cookie, &contacts, len); +        if (rsp == NULL) { +                printf("Failed to create find node response message.\n"); +                goto fail_rsp; +        } + +        memcpy(rsp->src->id.data, dht.id.data, dht.id.len); +        rsp->src->addr = generate_cookie(); + +        if (dht_kv_validate_msg(rsp) < 0) { +                printf("Failed to validate find node response message.\n"); +                goto fail_val_rsp; +        } + +        do_dht_kv_find_node_rsp(rsp->node); + +        /* dht_contact_msg__free_unpacked(contacts[0], NULL); set to NULL */ + +        free(contacts); + +        dht_msg__free_unpacked(rsp, NULL); + +        free(key); + +        dht_msg__free_unpacked(req, NULL); + +        sink_fini(); + +        dht_fini(); + +        TEST_SUCCESS(); + +        return TEST_RC_SUCCESS; + + fail_val_rsp: +        dht_msg__free_unpacked(rsp, NULL); + fail_rsp: +        while (len-- > 0) +                dht_contact_msg__free_unpacked(contacts[len], NULL); +        free(contacts); + fail_val_req: +        dht_msg__free_unpacked(req, NULL); + fail_query: +        free(key); + fail_prep: +        dht_fini(); + fail_init: +        sink_fini(); +        TEST_FAIL(); +        return TEST_RC_FAIL; +} + +static int test_dht_req_create_destroy(void) +{ +        struct dht_req * req; + +        TEST_START(); + +        if (dht_init(&test_dht_config) < 0) { +                printf("Failed to create dht.\n"); +                goto fail_init; +        } + +        req = dht_req_create(dht.id.data); +        if (req == NULL) { +                printf("Failed to create kad request.\n"); +                goto fail_req; +        } + +        dht_req_destroy(req); + +        dht_fini(); + +        TEST_SUCCESS(); + +        return TEST_RC_SUCCESS; + + fail_req: +        dht_fini(); + fail_init: +        TEST_FAIL(); +        return TEST_RC_FAIL; +} + +static int test_dht_reg_unreg(void) +{ +        TEST_START(); + +        sink_init(); + +        if (dht_init(&test_dht_config) < 0) { +                printf("Failed to create dht.\n"); +                goto fail_init; +        } + +        if (dht_reg(dht.id.data) < 0) { +                printf("Failed to register own id.\n"); +                goto fail_reg; +        } + +        if (sink.len != 0) { +                printf("Packet sent without contacts!"); +                goto fail_msg; +        } + +        if (dht_unreg(dht.id.data) < 0) { +                printf("Failed to unregister own id.\n"); +                goto fail_msg; +        } + +        dht_fini(); + +        sink_fini(); + +        TEST_SUCCESS(); + +        return TEST_RC_SUCCESS; + + fail_msg: +        dht_unreg(dht.id.data); + fail_reg: +        dht_fini(); + fail_init: +        sink_fini(); +        TEST_FAIL(); +        return TEST_RC_FAIL; +} + +static int test_dht_reg_unreg_contacts(void) +{ +        dht_msg_t * msg; + +        TEST_START(); + +        sink_init(); + +        if (dht_init(&test_dht_config) < 0) { +                printf("Failed to create dht.\n"); +                goto fail_init; +        } + +        if (fill_dht_with_contacts(4) < 0) { +                printf("Failed to fill bucket with contacts.\n"); +                goto fail_reg; +        } + +        if (dht_reg(dht.id.data) < 0) { +                printf("Failed to register own id.\n"); +                goto fail_reg; +        } + +        if (sink.len != dht.alpha) { +                printf("Packet sent to too few contacts!\n"); +                goto fail_msg; +        } + +        msg = sink_read(); +        if (msg == NULL) { +                printf("Failed to read message from sink.\n"); +                goto fail_msg; +        } + +        if (msg->code != DHT_STORE) { +                printf("Wrong code in dht reg message (%s != %s).\n", +                       dht_code_str[msg->code], +                       dht_code_str[DHT_STORE]); +                goto fail_validation; +        } + +        if (dht_kv_validate_msg(msg) < 0) { +                printf("Failed to validate dht message.\n"); +                goto fail_validation; +        } + +        if (dht_unreg(dht.id.data) < 0) { +                printf("Failed to unregister own id.\n"); +                goto fail_validation; +        } + +        dht_msg__free_unpacked(msg, NULL); + +        dht_fini(); + +        sink_fini(); + +        TEST_SUCCESS(); + +        return TEST_RC_SUCCESS; + + fail_validation: +        dht_msg__free_unpacked(msg, NULL); + fail_msg: +        sink_clear(); +        dht_unreg(dht.id.data); + fail_reg: +        dht_fini(); + fail_init: +        sink_fini(); +        TEST_FAIL(); +        return TEST_RC_FAIL; +} + +static int test_dht_reg_query_local(void) +{ +        struct timespec now; +        buffer_t test_addr; + +        TEST_START(); + +        clock_gettime(CLOCK_REALTIME_COARSE, &now); + +        if (addr_to_buf(1234321, &test_addr) < 0) { +                printf("Failed to convert test address to buffer.\n"); +                goto fail_buf; +        } + +        if (dht_init(&test_dht_config) < 0) { +                printf("Failed to create dht.\n"); +                goto fail_init; +        } + +        if (dht_reg(dht.id.data) < 0) { +                printf("Failed to register own id.\n"); +                goto fail_reg; +        } + +        if (dht_query(dht.id.data) == dht.addr) { +                printf("Succeeded to query own id.\n"); +                goto fail_get; +        } + +        if (dht_kv_store(dht.id.data, test_addr, now.tv_sec + 5) < 0) { +                printf("Failed to publish value.\n"); +                goto fail_get; +        } + +        if (dht_query(dht.id.data) != 1234321) { +                printf("Failed to return remote addr.\n"); +                goto fail_get; +        } + +        if (dht_unreg(dht.id.data) < 0) { +                printf("Failed to unregister own id.\n"); +                goto fail_get; +        } + +        freebuf(test_addr); + +        dht_fini(); + +        TEST_SUCCESS(); + +        return TEST_RC_SUCCESS; + + fail_get: +        dht_unreg(dht.id.data); + fail_reg: +        dht_fini(); + fail_init: +        freebuf(test_addr); + fail_buf: +        TEST_FAIL(); +        return TEST_RC_FAIL; +} + +static int test_dht_query(void) +{ +        uint8_t *             key; +        struct dir_dht_config cfg; + +        TEST_START(); + +        sink_init(); + +        cfg = test_dht_config; +        cfg.peer = generate_cookie(); + +        if (dht_init(&cfg)) { +                printf("Failed to create dht.\n"); +                goto fail_init; +        } + +        key = generate_id(); +        if (key == NULL) { +                printf("Failed to generate key.\n"); +                goto fail_key; +        } + +        if (dht_query(key) != INVALID_ADDR) { +                printf("Succeeded to get address without contacts.\n"); +                goto fail_get; +        } + +        if (sink.len != 0) { +                printf("Packet sent without contacts!"); +                goto fail_test; +        } + +        free(key); + +        dht_fini(); + +        sink_fini(); + +        TEST_SUCCESS(); + +        return TEST_RC_SUCCESS; + + fail_test: +        sink_clear(); + fail_get: +        free(key); + fail_key: +        dht_fini(); + fail_init: +        sink_fini(); +        return TEST_RC_FAIL; +} + +static int test_dht_query_contacts(void) +{ +        dht_msg_t *           msg; +        uint8_t *             key; +        struct dir_dht_config cfg; + + +        TEST_START(); + +        sink_init(); + +        cfg = test_dht_config; +        cfg.peer = generate_cookie(); + +        if (dht_init(&cfg)) { +                printf("Failed to create dht.\n"); +                goto fail_init; +        } + +        if (fill_dht_with_contacts(10) < 0) { +                printf("Failed to fill with contacts!"); +                goto fail_contacts; +        } + +        key = generate_id(); +        if (key == NULL) { +                printf("Failed to generate key."); +                goto fail_contacts; +        } + +        if (dht_query(key) != INVALID_ADDR) { +                printf("Succeeded to get address for random id.\n"); +                goto fail_query; +        } + +        msg = sink_read(); +        if (msg == NULL) { +                printf("Failed to read message.!\n"); +                goto fail_read; +        } + +        if (dht_kv_validate_msg(msg) < 0) { +                printf("Failed to validate dht message.\n"); +                goto fail_msg; +        } + +        if (msg->code != DHT_FIND_VALUE_REQ) { +                printf("Failed to validate dht message.\n"); +                goto fail_msg; +        } + +        dht_msg__free_unpacked(msg, NULL); + +        free(key); + +        sink_clear(); + +        dht_fini(); + +        sink_fini(); + +        TEST_SUCCESS(); + +        return TEST_RC_SUCCESS; + fail_msg: +        dht_msg__free_unpacked(msg, NULL); + fail_read: +        sink_clear(); + fail_query: +        free(key); + fail_contacts: +        dht_fini(); + fail_init: +        sink_fini(); +        return TEST_RC_FAIL; +} + +int dht_test(int     argc, +             char ** argv) +{ +        int rc = 0; + +        (void) argc; +        (void) argv; + +        rc |= test_dht_init_fini(); +        rc |= test_dht_start_stop(); +        rc |= test_val_entry_create_destroy(); +        rc |= test_dht_entry_create_destroy(); +        rc |= test_dht_entry_update_get_val(); +        rc |= test_dht_entry_update_get_lval(); +        rc |= test_dht_kv_contact_create_destroy(); +        rc |= test_dht_kv_contact_list(); +        rc |= test_dht_kv_update_bucket(); +        rc |= test_dht_kv_get_values(); +        rc |= test_dht_kv_find_node_req_msg(); +        rc |= test_dht_kv_find_node_rsp_msg(); +        rc |= test_dht_kv_find_node_rsp_msg_contacts(); +        rc |= test_dht_kv_query_contacts_req_rsp(); +        rc |= test_dht_kv_find_value_req_msg(); +        rc |= test_dht_kv_find_value_rsp_msg(); +        rc |= test_dht_kv_find_value_rsp_msg_contacts(); +        rc |= test_dht_kv_find_value_rsp_msg_values(); +        rc |= test_dht_kv_store_msg(); +        rc |= test_dht_req_create_destroy(); +        rc |= test_dht_reg_unreg(); +        rc |= test_dht_reg_unreg_contacts(); +        rc |= test_dht_reg_query_local(); +        rc |= test_dht_query(); +        rc |= test_dht_query_contacts(); + +        return rc; +} diff --git a/src/ipcpd/unicast/dt.c b/src/ipcpd/unicast/dt.c index 0f504daa..e2679ffe 100644 --- a/src/ipcpd/unicast/dt.c +++ b/src/ipcpd/unicast/dt.c @@ -1,5 +1,5 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   *   * Data Transfer Component   * @@ -41,6 +41,7 @@  #include <ouroboros/fccntl.h>  #endif +#include "addr-auth.h"  #include "common/comp.h"  #include "common/connmgr.h"  #include "ca.h" @@ -59,7 +60,7 @@  #include <assert.h>  #define QOS_BLOCK_LEN   672 -#define RIB_FILE_STRLEN (189 + QOS_BLOCK_LEN * QOS_CUBE_MAX) +#define RIB_FILE_STRLEN (169 + RIB_TM_STRLEN + QOS_BLOCK_LEN * QOS_CUBE_MAX)  #define RIB_NAME_STRLEN 256  #ifndef CLOCK_REALTIME_COARSE @@ -144,6 +145,8 @@ static void dt_pci_shrink(struct shm_du_buff * sdb)  struct {          struct psched *    psched; +        uint64_t           addr; +          struct pff *       pff[QOS_CUBE_MAX];          struct routing_i * routing[QOS_CUBE_MAX];  #ifdef IPCP_FLOW_STATS @@ -186,7 +189,7 @@ static int dt_rib_read(const char * path,          char        str[QOS_BLOCK_LEN + 1];          char        addrstr[20];          char *      entry; -        char        tmstr[20]; +        char        tmstr[RIB_TM_STRLEN];          size_t      rxqlen = 0;          size_t      txqlen = 0;          struct tm * tm; @@ -209,13 +212,13 @@ static int dt_rib_read(const char * path,                  return 0;          } -        if (dt.stat[fd].addr == ipcpi.dt_addr) +        if (dt.stat[fd].addr == dt.addr)                  sprintf(addrstr, "%s", dt.comps[fd].name);          else -                sprintf(addrstr, "%" PRIu64, dt.stat[fd].addr); +                sprintf(addrstr, ADDR_FMT32, ADDR_VAL32(&dt.stat[fd].addr)); -        tm = localtime(&dt.stat[fd].stamp); -        strftime(tmstr, sizeof(tmstr), "%F %T", tm); +        tm = gmtime(&dt.stat[fd].stamp); +        strftime(tmstr, sizeof(tmstr), RIB_TM_FORMAT, tm);          if (fd >= PROG_RES_FDS) {                  fccntl(fd, FLOWGRXQLEN, &rxqlen); @@ -223,11 +226,11 @@ static int dt_rib_read(const char * path,          }          sprintf(buf, -                "Flow established at:      %20s\n" +                "Flow established at:      %.*s\n"                  "Endpoint address:         %20s\n"                  "Queued packets (rx):      %20zu\n"                  "Queued packets (tx):      %20zu\n\n", -                tmstr, addrstr, rxqlen, txqlen); +                RIB_TM_STRLEN - 1, tmstr, addrstr, rxqlen, txqlen);          for (i = 0; i < QOS_CUBE_MAX; ++i) {                  sprintf(str,                          "Qos cube %3d:\n" @@ -285,48 +288,45 @@ static int dt_rib_readdir(char *** buf)          pthread_rwlock_rdlock(&dt.lock);          if (dt.n_flows < 1) { -                pthread_rwlock_unlock(&dt.lock); -                return 0; +                *buf = NULL; +                goto no_flows;          }          *buf = malloc(sizeof(**buf) * dt.n_flows); -        if (*buf == NULL) { -                pthread_rwlock_unlock(&dt.lock); -                return -ENOMEM; -        } +        if (*buf == NULL) +                goto fail_entries;          for (i = 0; i < PROG_MAX_FLOWS; ++i) {                  pthread_mutex_lock(&dt.stat[i].lock);                  if (dt.stat[i].stamp == 0) {                          pthread_mutex_unlock(&dt.stat[i].lock); -                        /* Optimization: skip unused res_fds. */ -                        if (i < PROG_RES_FDS) -                                i = PROG_RES_FDS; -                        continue; +                        break;                  } +                pthread_mutex_unlock(&dt.stat[i].lock); +                  sprintf(entry, "%zu", i);                  (*buf)[idx] = malloc(strlen(entry) + 1); -                if ((*buf)[idx] == NULL) { -                        while (idx-- > 0) -                                free((*buf)[idx]); -                        free(buf); -                        pthread_mutex_unlock(&dt.stat[i].lock); -                        pthread_rwlock_unlock(&dt.lock); -                        return -ENOMEM; -                } +                if ((*buf)[idx] == NULL) +                        goto fail_entry;                  strcpy((*buf)[idx++], entry); -                pthread_mutex_unlock(&dt.stat[i].lock);          } -        assert((size_t) idx == dt.n_flows); - + no_flows:          pthread_rwlock_unlock(&dt.lock);          return idx; + + fail_entry: +        while (idx-- > 0) +                free((*buf)[idx]); +        free(*buf); + fail_entries: +        pthread_rwlock_unlock(&dt.lock); +        return -ENOMEM;  #else          (void) buf;          return 0; @@ -399,6 +399,7 @@ static void handle_event(void *       self,                           const void * o)  {          struct conn * c; +        int           fd;          (void) self; @@ -406,19 +407,20 @@ static void handle_event(void *       self,          switch (event) {          case NOTIFY_DT_CONN_ADD: +                fd = c->flow_info.fd;  #ifdef IPCP_FLOW_STATS -                stat_used(c->flow_info.fd, c->conn_info.addr); +                stat_used(fd, c->conn_info.addr);  #endif -                psched_add(dt.psched, c->flow_info.fd); -                log_dbg("Added fd %d to packet scheduler.", c->flow_info.fd); +                psched_add(dt.psched, fd); +                log_dbg("Added fd %d to packet scheduler.", fd);                  break;          case NOTIFY_DT_CONN_DEL: +                fd = c->flow_info.fd;  #ifdef IPCP_FLOW_STATS -                stat_used(c->flow_info.fd, INVALID_ADDR); +                stat_used(fd, INVALID_ADDR);  #endif -                psched_del(dt.psched, c->flow_info.fd); -                log_dbg("Removed fd %d from " -                        "packet scheduler.", c->flow_info.fd); +                psched_del(dt.psched, fd); +                log_dbg("Removed fd %d from packet scheduler.", fd);                  break;          default:                  break; @@ -435,7 +437,7 @@ static void packet_handler(int                  fd,          uint8_t *     head;          size_t        len; -        len = shm_du_buff_tail(sdb) - shm_du_buff_head(sdb); +        len = shm_du_buff_len(sdb);  #ifndef IPCP_FLOW_STATS          (void)        fd; @@ -452,7 +454,7 @@ static void packet_handler(int                  fd,          head = shm_du_buff_head(sdb);          dt_pci_des(head, &dt_pci); -        if (dt_pci.dst_addr != ipcpi.dt_addr) { +        if (dt_pci.dst_addr != dt.addr) {                  if (dt_pci.ttl == 0) {                          log_dbg("TTL was zero.");                          ipcp_sdb_release(sdb); @@ -563,33 +565,36 @@ static void * dt_conn_handle(void * o)          return 0;  } -int dt_init(enum pol_routing pr, -            uint8_t          addr_size, -            uint8_t          eid_size, -            uint8_t          max_ttl) +int dt_init(struct dt_config cfg)  {          int              i;          int              j;          char             dtstr[RIB_NAME_STRLEN + 1]; -        int              pp; +        enum pol_pff     pp;          struct conn_info info;          memset(&info, 0, sizeof(info)); +        dt.addr = addr_auth_address(); +        if (dt.addr == INVALID_ADDR) { +                log_err("Failed to get address"); +                return -1; +        } +          strcpy(info.comp_name, DT_COMP);          strcpy(info.protocol, DT_PROTO);          info.pref_version = 1;          info.pref_syntax  = PROTO_FIXED; -        info.addr         = ipcpi.dt_addr; +        info.addr         = dt.addr; -        if (eid_size != 8) { /* only support 64 bits from now */ +        if (cfg.eid_size != 8) { /* only support 64 bits from now */                  log_warn("Invalid EID size. Only 64 bit is supported."); -                eid_size = 8; +                cfg.eid_size = 8;          } -        dt_pci_info.addr_size = addr_size; -        dt_pci_info.eid_size  = eid_size; -        dt_pci_info.max_ttl   = max_ttl; +        dt_pci_info.addr_size = cfg.addr_size; +        dt_pci_info.eid_size  = cfg.eid_size; +        dt_pci_info.max_ttl   = cfg.max_ttl;          dt_pci_info.qc_o      = dt_pci_info.addr_size;          dt_pci_info.ttl_o     = dt_pci_info.qc_o + QOS_LEN; @@ -597,18 +602,12 @@ int dt_init(enum pol_routing pr,          dt_pci_info.eid_o     = dt_pci_info.ecn_o + ECN_LEN;          dt_pci_info.head_size = dt_pci_info.eid_o + dt_pci_info.eid_size; -        if (notifier_reg(handle_event, NULL)) { -                log_err("Failed to register with notifier."); -                goto fail_notifier_reg; -        } -          if (connmgr_comp_init(COMPID_DT, &info)) {                  log_err("Failed to register with connmgr.");                  goto fail_connmgr_comp_init;          } -        pp = routing_init(pr); -        if (pp < 0) { +        if (routing_init(&cfg.routing, &pp) < 0) {                  log_err("Failed to init routing.");                  goto fail_routing;          } @@ -645,6 +644,7 @@ int dt_init(enum pol_routing pr,          for (i = 0; i < PROG_MAX_FLOWS; ++i)                  if (pthread_mutex_init(&dt.stat[i].lock, NULL)) { +                        log_err("Failed to init mutex for flow %d.", i);                          for (j = 0; j < i; ++j)                                  pthread_mutex_destroy(&dt.stat[j].lock);                          goto fail_stat_lock; @@ -652,9 +652,11 @@ int dt_init(enum pol_routing pr,          dt.n_flows = 0;  #endif -        sprintf(dtstr, "%s.%" PRIu64, DT, ipcpi.dt_addr); -        if (rib_reg(dtstr, &r_ops)) +        sprintf(dtstr, "%s." ADDR_FMT32, DT, ADDR_VAL32(&dt.addr)); +        if (rib_reg(dtstr, &r_ops)) { +                log_err("Failed to register RIB.");                  goto fail_rib_reg; +        }          return 0; @@ -678,8 +680,6 @@ int dt_init(enum pol_routing pr,   fail_routing:          connmgr_comp_fini(COMPID_DT);   fail_connmgr_comp_init: -        notifier_unreg(&handle_event); - fail_notifier_reg:          return -1;  } @@ -688,7 +688,7 @@ void dt_fini(void)          char dtstr[RIB_NAME_STRLEN + 1];          int i; -        sprintf(dtstr, "%s.%" PRIu64, DT, ipcpi.dt_addr); +        sprintf(dtstr, "%s.%" PRIu64, DT, dt.addr);          rib_unreg(dtstr);  #ifdef IPCP_FLOW_STATS          for (i = 0; i < PROG_MAX_FLOWS; ++i) @@ -707,31 +707,53 @@ void dt_fini(void)          routing_fini();          connmgr_comp_fini(COMPID_DT); - -        notifier_unreg(&handle_event);  }  int dt_start(void)  { -        dt.psched = psched_create(packet_handler); +        dt.psched = psched_create(packet_handler, ipcp_flow_read);          if (dt.psched == NULL) {                  log_err("Failed to create N-1 packet scheduler."); -                return -1; +                goto fail_psched; +        } + +        if (notifier_reg(handle_event, NULL)) { +                log_err("Failed to register with notifier."); +                goto fail_notifier_reg;          }          if (pthread_create(&dt.listener, NULL, dt_conn_handle, NULL)) {                  log_err("Failed to create listener thread."); -                psched_destroy(dt.psched); -                return -1; +                goto fail_listener; +        } + +        if (routing_start() < 0) { +                log_err("Failed to start routing."); +                goto fail_routing;          }          return 0; + + fail_routing: +        pthread_cancel(dt.listener); +        pthread_join(dt.listener, NULL); + fail_listener: +        notifier_unreg(&handle_event); + fail_notifier_reg: +        psched_destroy(dt.psched); + fail_psched: +        return -1;  }  void dt_stop(void)  { +        routing_stop(); +          pthread_cancel(dt.listener);          pthread_join(dt.listener, NULL); + +        notifier_unreg(&handle_event); +          psched_destroy(dt.psched);  } @@ -741,13 +763,13 @@ int dt_reg_comp(void * comp,  {          int eid; -        assert(func); +        assert(func != NULL);          pthread_rwlock_wrlock(&dt.lock);          eid = bmp_allocate(dt.res_fds);          if (!bmp_is_id_valid(dt.res_fds, eid)) { -                log_warn("Reserved EIDs depleted."); +                log_err("Cannot allocate EID.");                  pthread_rwlock_unlock(&dt.lock);                  return -EBADF;          } @@ -762,11 +784,28 @@ int dt_reg_comp(void * comp,          pthread_rwlock_unlock(&dt.lock);  #ifdef IPCP_FLOW_STATS -        stat_used(eid, ipcpi.dt_addr); +        stat_used(eid, dt.addr);  #endif          return eid;  } +void dt_unreg_comp(int eid) +{ +        assert(eid >= 0 && eid < PROG_RES_FDS); + +        pthread_rwlock_wrlock(&dt.lock); + +        assert(dt.comps[eid].post_packet != NULL); + +        dt.comps[eid].post_packet = NULL; +        dt.comps[eid].comp        = NULL; +        dt.comps[eid].name        = NULL; + +        pthread_rwlock_unlock(&dt.lock); + +        return; +} +  int dt_write_packet(uint64_t             dst_addr,                      qoscube_t            qc,                      uint64_t             eid, @@ -779,9 +818,9 @@ int dt_write_packet(uint64_t             dst_addr,          size_t        len;          assert(sdb); -        assert(dst_addr != ipcpi.dt_addr); +        assert(dst_addr != dt.addr); -        len = shm_du_buff_tail(sdb) - shm_du_buff_head(sdb); +        len = shm_du_buff_len(sdb);  #ifdef IPCP_FLOW_STATS          if (eid < PROG_RES_FDS) { @@ -795,7 +834,8 @@ int dt_write_packet(uint64_t             dst_addr,  #endif          fd = pff_nhop(dt.pff[qc], dst_addr);          if (fd < 0) { -                log_dbg("Could not get nhop for addr %" PRIu64 ".", dst_addr); +                log_dbg("Could not get nhop for " ADDR_FMT32 ".", +                        ADDR_VAL32(&dst_addr));  #ifdef IPCP_FLOW_STATS                  if (eid < PROG_RES_FDS) {                          pthread_mutex_lock(&dt.stat[eid].lock); @@ -815,7 +855,7 @@ int dt_write_packet(uint64_t             dst_addr,                  goto fail_write;          } -        len = shm_du_buff_tail(sdb) - shm_du_buff_head(sdb); +        len = shm_du_buff_len(sdb);          dt_pci.dst_addr = dst_addr;          dt_pci.qc       = qc; diff --git a/src/ipcpd/unicast/dt.h b/src/ipcpd/unicast/dt.h index e1abbe28..2c5b7978 100644 --- a/src/ipcpd/unicast/dt.h +++ b/src/ipcpd/unicast/dt.h @@ -1,5 +1,5 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   *   * Data Transfer component   * @@ -31,11 +31,7 @@  #define DT_PROTO     "dtp"  #define INVALID_ADDR 0 -int  dt_init(enum pol_routing pr, -             uint8_t          addr_size, -             uint8_t          eid_size, -             uint8_t          max_ttl -); +int  dt_init(struct dt_config cfg);  void dt_fini(void); @@ -43,9 +39,11 @@ int  dt_start(void);  void dt_stop(void); -int  dt_reg_comp(void * comp, +int  dt_reg_comp(void *  comp,                   void (* func)(void * comp, struct shm_du_buff * sdb), -                 char * name); +                 char *  name); + +void dt_unreg_comp(int eid);  int  dt_write_packet(uint64_t             dst_addr,                       qoscube_t            qc, diff --git a/src/ipcpd/unicast/enroll.c b/src/ipcpd/unicast/enroll.c deleted file mode 100644 index 500a3895..00000000 --- a/src/ipcpd/unicast/enroll.c +++ /dev/null @@ -1,3 +0,0 @@ -#define BUILD_IPCP_UNICAST - -#include "common/enroll.c" diff --git a/src/ipcpd/unicast/fa.c b/src/ipcpd/unicast/fa.c index 6e6d52f0..ac168bd9 100644 --- a/src/ipcpd/unicast/fa.c +++ b/src/ipcpd/unicast/fa.c @@ -1,5 +1,5 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   *   * Flow allocator of the IPC Process   * @@ -31,6 +31,7 @@  #define FA               "flow-allocator"  #define OUROBOROS_PREFIX FA +#include <ouroboros/endian.h>  #include <ouroboros/logs.h>  #include <ouroboros/fqueue.h>  #include <ouroboros/errno.h> @@ -40,6 +41,7 @@  #include <ouroboros/random.h>  #include <ouroboros/pthread.h> +#include "addr-auth.h"  #include "dir.h"  #include "fa.h"  #include "psched.h" @@ -55,7 +57,7 @@  #define CLOCK_REALTIME_COARSE CLOCK_REALTIME  #endif -#define TIMEOUT 10000 /* nanoseconds */ +#define TIMEOUT 10 * MILLION /* nanoseconds */  #define FLOW_REQ    0  #define FLOW_REPLY  1 @@ -68,18 +70,17 @@ struct fa_msg {          uint64_t s_addr;          uint64_t r_eid;          uint64_t s_eid; -        uint8_t  code; -        int8_t   response; -        uint16_t ece; -        /* QoS parameters from spec, aligned */ -        uint8_t  availability; -        uint8_t  in_order; -        uint32_t delay;          uint64_t bandwidth; +        int32_t  response; +        uint32_t delay;          uint32_t loss;          uint32_t ber;          uint32_t max_gap; -        uint16_t cypher_s; +        uint32_t timeout; +        uint16_t ece; +        uint8_t  code; +        uint8_t  availability; +        uint8_t  in_order;  } __attribute__((packed));  struct cmd { @@ -133,7 +134,7 @@ static int fa_rib_read(const char * path,          char             r_addrstr[21];          char             s_eidstr[21];          char             r_eidstr[21]; -        char             tmstr[20]; +        char             tmstr[RIB_TM_STRLEN];          char             castr[1024];          char *           entry;          struct tm *      tm; @@ -143,7 +144,7 @@ static int fa_rib_read(const char * path,          fd = atoi(entry); -        if (fd < 0 || fd > PROG_MAX_FLOWS) +        if (fd < 0 || fd >= PROG_MAX_FLOWS)                  return -1;          if (len < 1536) @@ -164,8 +165,8 @@ static int fa_rib_read(const char * path,          sprintf(s_eidstr, "%" PRIu64, flow->s_eid);          sprintf(r_eidstr, "%" PRIu64, flow->r_eid); -        tm = localtime(&flow->stamp); -        strftime(tmstr, sizeof(tmstr), "%F %T", tm); +        tm = gmtime(&flow->stamp); +        strftime(tmstr, sizeof(tmstr), RIB_TM_FORMAT, tm);          ca_print_stats(flow->ctx, castr, 1024); @@ -215,15 +216,13 @@ static int fa_rib_readdir(char *** buf)          pthread_rwlock_rdlock(&fa.flows_lock);          if (fa.n_flows < 1) { -                pthread_rwlock_unlock(&fa.flows_lock); -                return 0; +                *buf = NULL; +                goto no_flows;          }          *buf = malloc(sizeof(**buf) * fa.n_flows); -        if (*buf == NULL) { -                pthread_rwlock_unlock(&fa.flows_lock); -                return -ENOMEM; -        } +        if (*buf == NULL) +                goto fail_entries;          for (i = 0; i < PROG_MAX_FLOWS; ++i) {                  struct fa_flow * flow; @@ -235,22 +234,25 @@ static int fa_rib_readdir(char *** buf)                  sprintf(entry, "%zu", i);                  (*buf)[idx] = malloc(strlen(entry) + 1); -                if ((*buf)[idx] == NULL) { -                        while (idx-- > 0) -                                free((*buf)[idx]); -                        free(buf); -                        pthread_rwlock_unlock(&fa.flows_lock); -                        return -ENOMEM; -                } +                if ((*buf)[idx] == NULL) +                        goto fail_entry;                  strcpy((*buf)[idx++], entry);          }          assert((size_t) idx == fa.n_flows); - + no_flows:          pthread_rwlock_unlock(&fa.flows_lock);          return idx; + + fail_entry: +        while (idx-- > 0) +                free((*buf)[idx]); +        free(*buf); + fail_entries: +        pthread_rwlock_unlock(&fa.flows_lock); +        return -ENOMEM;  #else          (void) buf;          return 0; @@ -303,7 +305,7 @@ static int eid_to_fd(uint64_t eid)          fd = eid & 0xFFFFFFFF; -        if (fd < 0 || fd > PROG_MAX_FLOWS) +        if (fd < 0 || fd >= PROG_MAX_FLOWS)                  return -1;          flow = &fa.flows[fd]; @@ -340,7 +342,7 @@ static void packet_handler(int                  fd,          pthread_rwlock_wrlock(&fa.flows_lock); -        len = shm_du_buff_tail(sdb) - shm_du_buff_head(sdb); +        len = shm_du_buff_len(sdb);  #ifdef IPCP_FLOW_STATS          ++flow->p_snd; @@ -357,7 +359,7 @@ static void packet_handler(int                  fd,          if (dt_write_packet(r_addr, qc, r_eid, sdb)) {                  ipcp_sdb_release(sdb); -                log_warn("Failed to forward packet."); +                log_dbg("Failed to forward packet.");  #ifdef IPCP_FLOW_STATS                  pthread_rwlock_wrlock(&fa.flows_lock);                  ++flow->p_snd_f; @@ -435,167 +437,194 @@ static void fa_post_packet(void *               comp,          pthread_mutex_unlock(&fa.mtx);  } -static void * fa_handle_packet(void * o) +static size_t fa_wait_for_fa_msg(struct fa_msg * msg)  { -        struct timespec ts  = {0, TIMEOUT * 1000}; +        struct cmd * cmd; +        size_t       len; -        (void) o; +        pthread_mutex_lock(&fa.mtx); -        while (true) { -                struct timespec  abstime; -                int              fd; -                uint8_t          buf[MSGBUFSZ]; -                struct fa_msg *  msg; -                qosspec_t        qs; -                struct cmd *     cmd; -                size_t           len; -                size_t           msg_len; -                struct fa_flow * flow; +        pthread_cleanup_push(__cleanup_mutex_unlock, &fa.mtx); -                pthread_mutex_lock(&fa.mtx); +        while (list_is_empty(&fa.cmds)) +                pthread_cond_wait(&fa.cond, &fa.mtx); -                pthread_cleanup_push(__cleanup_mutex_unlock, &fa.mtx); +        cmd = list_last_entry(&fa.cmds, struct cmd, next); +        list_del(&cmd->next); -                while (list_is_empty(&fa.cmds)) -                        pthread_cond_wait(&fa.cond, &fa.mtx); +        pthread_cleanup_pop(true); -                cmd = list_last_entry(&fa.cmds, struct cmd, next); -                list_del(&cmd->next); +        len = shm_du_buff_len(cmd->sdb); +        if (len > MSGBUFSZ || len < sizeof(*msg)) { +                log_warn("Invalid flow allocation message (len: %zd).", len); +                free(cmd); +                return 0; /* No valid message */ +        } -                pthread_cleanup_pop(true); +        memcpy(msg, shm_du_buff_head(cmd->sdb), len); -                len = shm_du_buff_tail(cmd->sdb) - shm_du_buff_head(cmd->sdb); +        ipcp_sdb_release(cmd->sdb); -                if (len > MSGBUFSZ) { -                        log_err("Message over buffer size."); -                        free(cmd); -                        continue; -                } +        free(cmd); -                msg = (struct fa_msg *) buf; +        return len; +} -                /* Depending on the message call the function in ipcp-dev.h */ +static int fa_handle_flow_req(struct fa_msg * msg, +                              size_t          len) +{ +        size_t           msg_len; +        int              fd; +        qosspec_t        qs; +        struct fa_flow * flow; +        uint8_t *        dst; +        buffer_t         data;  /* Piggbacked data on flow alloc request. */ -                memcpy(msg, shm_du_buff_head(cmd->sdb), len); +        msg_len = sizeof(*msg) + ipcp_dir_hash_len(); +        if (len < msg_len) { +                log_err("Invalid flow allocation request"); +                return -EPERM; +        } -                ipcp_sdb_release(cmd->sdb); +        dst       = (uint8_t *)(msg + 1); +        data.data = (uint8_t *) msg + msg_len; +        data.len  = len - msg_len; + +        qs.delay        = ntoh32(msg->delay); +        qs.bandwidth    = ntoh64(msg->bandwidth); +        qs.availability = msg->availability; +        qs.loss         = ntoh32(msg->loss); +        qs.ber          = ntoh32(msg->ber); +        qs.in_order     = msg->in_order; +        qs.max_gap      = ntoh32(msg->max_gap); +        qs.timeout      = ntoh32(msg->timeout); + +        fd = ipcp_wait_flow_req_arr(dst, qs, IPCP_UNICAST_MPL, &data); +        if (fd < 0) +                return fd; -                free(cmd); +        flow = &fa.flows[fd]; -                switch (msg->code) { -                case FLOW_REQ: -                        msg_len = sizeof(*msg) + ipcp_dir_hash_len(); +        pthread_rwlock_wrlock(&fa.flows_lock); -                        assert(len >= msg_len); +        fa_flow_init(flow); -                        clock_gettime(PTHREAD_COND_CLOCK, &abstime); +        flow->s_eid  = gen_eid(fd); +        flow->r_eid  = ntoh64(msg->s_eid); +        flow->r_addr = ntoh64(msg->s_addr); -                        pthread_mutex_lock(&ipcpi.alloc_lock); +        pthread_rwlock_unlock(&fa.flows_lock); -                        while (ipcpi.alloc_id != -1 && -                               ipcp_get_state() == IPCP_OPERATIONAL) { -                                ts_add(&abstime, &ts, &abstime); -                                pthread_cond_timedwait(&ipcpi.alloc_cond, -                                                       &ipcpi.alloc_lock, -                                                       &abstime); -                        } +        return fd; +} -                        if (ipcp_get_state() != IPCP_OPERATIONAL) { -                                pthread_mutex_unlock(&ipcpi.alloc_lock); -                                log_dbg("Won't allocate over non-operational" -                                        "IPCP."); -                                continue; -                        } +static int fa_handle_flow_reply(struct fa_msg * msg, +                                size_t          len) +{ +        int              fd; +        struct fa_flow * flow; +        buffer_t         data;  /* Piggbacked data on flow alloc request. */ +        time_t           mpl = IPCP_UNICAST_MPL; +        int              response; -                        assert(ipcpi.alloc_id == -1); +        assert(len >= sizeof(*msg)); -                        qs.delay        = ntoh32(msg->delay); -                        qs.bandwidth    = ntoh64(msg->bandwidth); -                        qs.availability = msg->availability; -                        qs.loss         = ntoh32(msg->loss); -                        qs.ber          = ntoh32(msg->ber); -                        qs.in_order     = msg->in_order; -                        qs.max_gap      = ntoh32(msg->max_gap); -                        qs.cypher_s     = ntoh16(msg->cypher_s); +        data.data = (uint8_t *) msg + sizeof(*msg); +        data.len  = len - sizeof(*msg); -                        fd = ipcp_flow_req_arr((uint8_t *) (msg + 1), -                                               ipcp_dir_hash_len(), -                                               qs, -                                               buf + msg_len, -                                               len - msg_len); -                        if (fd < 0) { -                                pthread_mutex_unlock(&ipcpi.alloc_lock); -                                log_err("Failed to get fd for flow."); -                                continue; -                        } +        pthread_rwlock_wrlock(&fa.flows_lock); -                        flow = &fa.flows[fd]; +        fd = eid_to_fd(ntoh64(msg->r_eid)); +        if (fd < 0) { +                pthread_rwlock_unlock(&fa.flows_lock); +                log_err("Flow reply for unknown EID %" PRIu64 ".", +                        ntoh64(msg->r_eid)); +                return -ENOTALLOC; +        } -                        pthread_rwlock_wrlock(&fa.flows_lock); +        flow = &fa.flows[fd]; -                        fa_flow_init(flow); +        flow->r_eid = ntoh64(msg->s_eid); +        response = ntoh32(msg->response); -                        flow->s_eid  = gen_eid(fd); -                        flow->r_eid  = ntoh64(msg->s_eid); -                        flow->r_addr = ntoh64(msg->s_addr); +        log_dbg("IPCP received msg response %d for flow on fd %d.", +                response, fd); -                        pthread_rwlock_unlock(&fa.flows_lock); +        if (response < 0) +                fa_flow_fini(flow); +        else +                psched_add(fa.psched, fd); -                        ipcpi.alloc_id = fd; -                        pthread_cond_broadcast(&ipcpi.alloc_cond); +        pthread_rwlock_unlock(&fa.flows_lock); -                        pthread_mutex_unlock(&ipcpi.alloc_lock); +        if (ipcp_flow_alloc_reply(fd, response, mpl, &data) < 0) { +                log_err("Failed to reply for flow allocation on fd %d.", fd); +                return -EIRMD; +        } -                        break; -                case FLOW_REPLY: -                        assert(len >= sizeof(*msg)); +        return 0; +} -                        pthread_rwlock_wrlock(&fa.flows_lock); +static int fa_handle_flow_update(struct fa_msg * msg, +                                 size_t          len) +{ +        struct fa_flow * flow; +        int              fd; -                        fd = eid_to_fd(ntoh64(msg->r_eid)); -                        if (fd < 0) { -                                pthread_rwlock_unlock(&fa.flows_lock); -                                break; -                        } +        (void) len; +        assert(len >= sizeof(*msg)); -                        flow = &fa.flows[fd]; +        pthread_rwlock_wrlock(&fa.flows_lock); -                        flow->r_eid = ntoh64(msg->s_eid); +        fd = eid_to_fd(ntoh64(msg->r_eid)); +        if (fd < 0) { +                pthread_rwlock_unlock(&fa.flows_lock); +                log_err("Flow update for unknown EID %" PRIu64 ".", +                        ntoh64(msg->r_eid)); +                return -EPERM; +        } -                        if (msg->response < 0) -                                fa_flow_fini(flow); -                        else -                                psched_add(fa.psched, fd); +        flow = &fa.flows[fd]; +#ifdef IPCP_FLOW_STATS +        flow->u_rcv++; +#endif +        ca_ctx_update_ece(flow->ctx, ntoh16(msg->ece)); -                        pthread_rwlock_unlock(&fa.flows_lock); +        pthread_rwlock_unlock(&fa.flows_lock); -                        ipcp_flow_alloc_reply(fd, -                                              msg->response, -                                              buf + sizeof(*msg), -                                              len - sizeof(*msg)); -                        break; -                case FLOW_UPDATE: -                        assert(len >= sizeof(*msg)); +        return 0; +} -                        pthread_rwlock_wrlock(&fa.flows_lock); +static void * fa_handle_packet(void * o) +{ +        (void) o; -                        fd = eid_to_fd(ntoh64(msg->r_eid)); -                        if (fd < 0) { -                                pthread_rwlock_unlock(&fa.flows_lock); -                                break; -                        } +        while (true) { +                uint8_t          buf[MSGBUFSZ]; +                struct fa_msg *  msg; +                size_t           len; -                        flow = &fa.flows[fd]; -#ifdef IPCP_FLOW_STATS -                        flow->u_rcv++; -#endif -                        ca_ctx_update_ece(flow->ctx, ntoh16(msg->ece)); +                msg = (struct fa_msg *) buf; -                        pthread_rwlock_unlock(&fa.flows_lock); +                len = fa_wait_for_fa_msg(msg); +                if (len == 0) +                        continue; +                switch (msg->code) { +                case FLOW_REQ: +                        if (fa_handle_flow_req(msg, len) < 0) +                                log_err("Error handling flow alloc request."); +                        break; +                case FLOW_REPLY: +                        if (fa_handle_flow_reply(msg, len) < 0) +                                log_err("Error handling flow reply."); +                        break; +                case FLOW_UPDATE: +                        if (fa_handle_flow_update(msg, len) < 0) +                                log_err("Error handling flow update.");                          break;                  default: -                        log_err("Got an unknown flow allocation message."); +                        log_warn("Recieved unknown flow allocation message.");                          break;                  }          } @@ -622,19 +651,21 @@ int fa_init(void)          if (pthread_cond_init(&fa.cond, &cattr))                  goto fail_cond; -        pthread_condattr_destroy(&cattr); - -        list_head_init(&fa.cmds); -          if (rib_reg(FA, &r_ops))                  goto fail_rib_reg;          fa.eid = dt_reg_comp(&fa, &fa_post_packet, FA);          if ((int) fa.eid < 0) -                goto fail_rib_reg; +                goto fail_dt_reg; + +        list_head_init(&fa.cmds); + +        pthread_condattr_destroy(&cattr);          return 0; + fail_dt_reg: +        rib_unreg(FA);   fail_rib_reg:          pthread_cond_destroy(&fa.cond);   fail_cond: @@ -644,7 +675,6 @@ int fa_init(void)   fail_mtx:          pthread_rwlock_destroy(&fa.flows_lock);   fail_rwlock: -        log_err("Failed to initialize flow allocator.");          return -1;  } @@ -663,7 +693,7 @@ int fa_start(void)          int                 pol;          int                 max; -        fa.psched = psched_create(packet_handler); +        fa.psched = psched_create(packet_handler, np1_flow_read);          if (fa.psched == NULL) {                  log_err("Failed to start packet scheduler.");                  goto fail_psched; @@ -700,7 +730,6 @@ int fa_start(void)   fail_thread:          psched_destroy(fa.psched);   fail_psched: -        log_err("Failed to start flow allocator.");          return -1;  } @@ -712,11 +741,10 @@ void fa_stop(void)          psched_destroy(fa.psched);  } -int fa_alloc(int             fd, -             const uint8_t * dst, -             qosspec_t       qs, -             const void *    data, -             size_t          dlen) +int fa_alloc(int              fd, +             const uint8_t *  dst, +             qosspec_t        qs, +             const buffer_t * data)  {          struct fa_msg *      msg;          struct shm_du_buff * sdb; @@ -732,7 +760,7 @@ int fa_alloc(int             fd,          len = sizeof(*msg) + ipcp_dir_hash_len(); -        if (ipcp_sdb_reserve(&sdb, len + dlen)) +        if (ipcp_sdb_reserve(&sdb, len + data->len))                  return -1;          msg = (struct fa_msg *) shm_du_buff_head(sdb); @@ -742,7 +770,7 @@ int fa_alloc(int             fd,          msg->code         = FLOW_REQ;          msg->s_eid        = hton64(eid); -        msg->s_addr       = hton64(ipcpi.dt_addr); +        msg->s_addr       = hton64(addr_auth_address());          msg->delay        = hton32(qs.delay);          msg->bandwidth    = hton64(qs.bandwidth);          msg->availability = qs.availability; @@ -750,12 +778,14 @@ int fa_alloc(int             fd,          msg->ber          = hton32(qs.ber);          msg->in_order     = qs.in_order;          msg->max_gap      = hton32(qs.max_gap); -        msg->cypher_s     = hton16(qs.cypher_s); +        msg->timeout      = hton32(qs.timeout);          memcpy(msg + 1, dst, ipcp_dir_hash_len()); -        memcpy(shm_du_buff_head(sdb) + len, data, dlen); +        if (data->len > 0) +                memcpy(shm_du_buff_head(sdb) + len, data->data, data->len);          if (dt_write_packet(addr, qc, fa.eid, sdb)) { +                log_err("Failed to send flow allocation request packet.");                  ipcp_sdb_release(sdb);                  return -1;          } @@ -773,75 +803,66 @@ int fa_alloc(int             fd,          return 0;  } -int fa_alloc_resp(int          fd, -                  int          response, -                  const void * data, -                  size_t       len) +int fa_alloc_resp(int              fd, +                  int              response, +                  const buffer_t * data)  { -        struct timespec      ts = {0, TIMEOUT * 1000}; -        struct timespec      abstime;          struct fa_msg *      msg;          struct shm_du_buff * sdb;          struct fa_flow *     flow;          qoscube_t            qc = QOS_CUBE_BE; -        clock_gettime(PTHREAD_COND_CLOCK, &abstime); -          flow = &fa.flows[fd]; -        pthread_mutex_lock(&ipcpi.alloc_lock); - -        while (ipcpi.alloc_id != fd && ipcp_get_state() == IPCP_OPERATIONAL) { -                ts_add(&abstime, &ts, &abstime); -                pthread_cond_timedwait(&ipcpi.alloc_cond, -                                       &ipcpi.alloc_lock, -                                       &abstime); +        if (ipcp_wait_flow_resp(fd) < 0) { +                log_err("Failed to wait for flow response."); +                goto fail_alloc_resp;          } -        if (ipcp_get_state() != IPCP_OPERATIONAL) { -                pthread_mutex_unlock(&ipcpi.alloc_lock); -                return -1; -        } - -        ipcpi.alloc_id = -1; -        pthread_cond_broadcast(&ipcpi.alloc_cond); - -        pthread_mutex_unlock(&ipcpi.alloc_lock); - -        if (ipcp_sdb_reserve(&sdb, sizeof(*msg) + len)) { -                fa_flow_fini(flow); -                return -1; +        if (ipcp_sdb_reserve(&sdb, sizeof(*msg) + data->len)) { +                log_err("Failed to reserve sdb (%zu  bytes).", +                        sizeof(*msg) + data->len); +                goto fail_reserve;          }          msg = (struct fa_msg *) shm_du_buff_head(sdb);          memset(msg, 0, sizeof(*msg)); -        pthread_rwlock_wrlock(&fa.flows_lock); -          msg->code     = FLOW_REPLY; +        msg->response = hton32(response); +        if (data->len > 0) +                memcpy(msg + 1, data->data, data->len); + +        pthread_rwlock_rdlock(&fa.flows_lock); +          msg->r_eid    = hton64(flow->r_eid);          msg->s_eid    = hton64(flow->s_eid); -        msg->response = response; -        memcpy(msg + 1, data, len); +        pthread_rwlock_unlock(&fa.flows_lock); + +        if (dt_write_packet(flow->r_addr, qc, fa.eid, sdb)) { +                log_err("Failed to send flow allocation response packet."); +                goto fail_packet; +        }          if (response < 0) { +                pthread_rwlock_wrlock(&fa.flows_lock);                  fa_flow_fini(flow); -                ipcp_sdb_release(sdb); +                pthread_rwlock_unlock(&fa.flows_lock);          } else {                  psched_add(fa.psched, fd);          } -        if (dt_write_packet(flow->r_addr, qc, fa.eid, sdb)) { -                fa_flow_fini(flow); -                pthread_rwlock_unlock(&fa.flows_lock); -                ipcp_sdb_release(sdb); -                return -1; -        } +        return 0; + fail_packet: +        ipcp_sdb_release(sdb); + fail_reserve: +        pthread_rwlock_wrlock(&fa.flows_lock); +        fa_flow_fini(flow);          pthread_rwlock_unlock(&fa.flows_lock); - -        return 0; + fail_alloc_resp: +        return -1;  }  int fa_dealloc(int fd) @@ -857,7 +878,7 @@ int fa_dealloc(int fd)          pthread_rwlock_unlock(&fa.flows_lock); -        flow_dealloc(fd); +        ipcp_flow_dealloc(fd);          return 0;  } @@ -872,6 +893,7 @@ static int fa_update_remote(int      fd,          uint64_t             r_addr;          if (ipcp_sdb_reserve(&sdb, sizeof(*msg))) { +                log_err("Failed to reserve sdb (%zu  bytes).", sizeof(*msg));                  return -1;          } @@ -895,6 +917,7 @@ static int fa_update_remote(int      fd,          if (dt_write_packet(r_addr, qc, fa.eid, sdb)) { +                log_err("Failed to send flow update packet.");                  ipcp_sdb_release(sdb);                  return -1;          } @@ -912,13 +935,14 @@ void  fa_np1_rcv(uint64_t             eid,          int              fd;          size_t           len; -        len = shm_du_buff_tail(sdb) - shm_du_buff_head(sdb); +        len = shm_du_buff_len(sdb);          pthread_rwlock_wrlock(&fa.flows_lock);          fd = eid_to_fd(eid);          if (fd < 0) {                  pthread_rwlock_unlock(&fa.flows_lock); +                log_dbg("Received packet for unknown EID %" PRIu64 ".", eid);                  ipcp_sdb_release(sdb);                  return;          } @@ -934,6 +958,7 @@ void  fa_np1_rcv(uint64_t             eid,          pthread_rwlock_unlock(&fa.flows_lock);          if (ipcp_flow_write(fd, sdb) < 0) { +                log_dbg("Failed to write to flow %d.", fd);                  ipcp_sdb_release(sdb);  #ifdef IPCP_FLOW_STATS                  pthread_rwlock_wrlock(&fa.flows_lock); diff --git a/src/ipcpd/unicast/fa.h b/src/ipcpd/unicast/fa.h index 376fdb20..1e716966 100644 --- a/src/ipcpd/unicast/fa.h +++ b/src/ipcpd/unicast/fa.h @@ -1,5 +1,5 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   *   * Flow allocator of the IPC Process   * @@ -34,16 +34,14 @@ int  fa_start(void);  void fa_stop(void); -int  fa_alloc(int             fd, -              const uint8_t * dst, -              qosspec_t       qs, -              const void *    data, -              size_t          len); +int  fa_alloc(int              fd, +              const uint8_t *  dst, +              qosspec_t        qs, +              const buffer_t * data); -int  fa_alloc_resp(int          fd, -                   int          response, -                   const void * data, -                   size_t       len); +int  fa_alloc_resp(int              fd, +                   int              response, +                   const buffer_t * data);  int  fa_dealloc(int fd); diff --git a/src/ipcpd/unicast/kademlia.proto b/src/ipcpd/unicast/kademlia.proto deleted file mode 100644 index 58f5e787..00000000 --- a/src/ipcpd/unicast/kademlia.proto +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Ouroboros - Copyright (C) 2016 - 2021 - * - * KAD protocol - * - *    Dimitri Staessens <dimitri@ouroboros.rocks> - *    Sander Vrijders   <sander@ouroboros.rocks> - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public License - * version 2.1 as published by the Free Software Foundation. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, write to the Free Software - * Foundation, Inc., http://www.fsf.org/about/contact/. - */ - -syntax = "proto2"; - -message kad_contact_msg { -        required bytes  id   = 1; -        required uint64 addr = 2; -}; - -message kad_msg { -        required uint32 code              =  1; -        required uint32 cookie            =  2; -        required uint64 s_addr            =  3; -        optional bytes  s_id              =  4; -        optional bytes  key               =  5; -        repeated uint64 addrs             =  6; -        repeated kad_contact_msg contacts =  7; -        // enrolment parameters -        optional uint32 alpha             =  8; -        optional uint32 b                 =  9; -        optional uint32 k                 = 10; -        optional uint32 t_expire          = 11; -        optional uint32 t_refresh         = 12; -        optional uint32 t_replicate       = 13; -};
\ No newline at end of file diff --git a/src/ipcpd/unicast/main.c b/src/ipcpd/unicast/main.c index 018dd1c6..7989d3e1 100644 --- a/src/ipcpd/unicast/main.c +++ b/src/ipcpd/unicast/main.c @@ -1,5 +1,5 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   *   * Unicast IPC Process   * @@ -32,16 +32,16 @@  #define THIS_TYPE IPCP_UNICAST  #include <ouroboros/errno.h> -#include <ouroboros/hash.h>  #include <ouroboros/ipcp-dev.h>  #include <ouroboros/logs.h>  #include <ouroboros/notifier.h> +#include <ouroboros/random.h>  #include <ouroboros/rib.h> -#include <ouroboros/time_utils.h> +#include <ouroboros/time.h>  #include "common/connmgr.h"  #include "common/enroll.h" -#include "addr_auth.h" +#include "addr-auth.h"  #include "ca.h"  #include "dir.h"  #include "dt.h" @@ -55,237 +55,203 @@  #include <assert.h>  #include <inttypes.h> -struct ipcp ipcpi; - -static int initialize_components(const struct ipcp_config * conf) +static int initialize_components(struct ipcp_config * conf)  { -        ipcpi.layer_name = strdup(conf->layer_info.layer_name); -        if (ipcpi.layer_name == NULL) { -                log_err("Failed to set layer name."); -                goto fail_layer_name; -        } - -        ipcpi.dir_hash_algo = conf->layer_info.dir_hash_algo; -          assert(ipcp_dir_hash_len() != 0); -        if (addr_auth_init(conf->addr_auth_type, -                           &conf->addr_size)) { +        if (addr_auth_init(conf->unicast.addr_auth_type, +                           &conf->unicast.dt.addr_size)) {                  log_err("Failed to init address authority.");                  goto fail_addr_auth;          } -        ipcpi.dt_addr = addr_auth_address(); -        if (ipcpi.dt_addr == 0) { -                log_err("Failed to get a valid address."); -                goto fail_addr_auth; -        } - -        log_dbg("IPCP got address %" PRIu64 ".", ipcpi.dt_addr); +        log_info("IPCP got address %" PRIu64 ".", addr_auth_address()); -        if (ca_init(conf->cong_avoid)) { +        if (ca_init(conf->unicast.cong_avoid)) {                  log_err("Failed to initialize congestion avoidance.");                  goto fail_ca;          } -        if (dt_init(conf->routing_type, -                    conf->addr_size, -                    conf->eid_size, -                    conf->max_ttl)) { +        if (dt_init(conf->unicast.dt)) {                  log_err("Failed to initialize data transfer component.");                  goto fail_dt;          } -        if (fa_init()) { -                log_err("Failed to initialize flow allocator component."); -                goto fail_fa; -        } +        ipcp_set_dir_hash_algo((enum hash_algo) conf->layer_info.dir_hash_algo); -        if (dir_init()) { +        if (dir_init(&conf->unicast.dir)) {                  log_err("Failed to initialize directory.");                  goto fail_dir;          } +        if (fa_init()) { +                log_err("Failed to initialize flow allocator component."); +                goto fail_fa; +        } +          ipcp_set_state(IPCP_INIT);          return 0; - fail_dir: -        fa_fini();   fail_fa: +        dir_fini(); + fail_dir:          dt_fini();   fail_dt:          ca_fini();   fail_ca:          addr_auth_fini();   fail_addr_auth: -        free(ipcpi.layer_name); - fail_layer_name:          return -1;  }  static void finalize_components(void)  { -        dir_fini(); -          fa_fini(); +        dir_fini(); +          dt_fini();          ca_fini();          addr_auth_fini(); - -        free(ipcpi.layer_name);  }  static int start_components(void)  { -        assert(ipcp_get_state() == IPCP_INIT); +        if (connmgr_start() < 0) { +                log_err("Failed to start AP connection manager."); +                goto fail_connmgr_start; +        } -        ipcp_set_state(IPCP_OPERATIONAL); +        if (dt_start() < 0) { +                log_err("Failed to start data transfer."); +                goto fail_dt_start; +        } -        if (fa_start()) { +        if (fa_start() < 0) {                  log_err("Failed to start flow allocator.");                  goto fail_fa_start;          } -        if (enroll_start()) { +        if (enroll_start() < 0) {                  log_err("Failed to start enrollment.");                  goto fail_enroll_start;          } -        if (connmgr_start()) { -                log_err("Failed to start AP connection manager."); -                goto fail_connmgr_start; +        if (dir_start() < 0) { +                log_err("Failed to start directory."); +                goto fail_dir_start;          }          return 0; - fail_connmgr_start: + fail_dir_start:          enroll_stop();   fail_enroll_start:          fa_stop();   fail_fa_start: +        dt_stop(); + fail_dt_start: +        connmgr_stop(); + fail_connmgr_start:          ipcp_set_state(IPCP_INIT);          return -1;  }  static void stop_components(void)  { -        assert(ipcp_get_state() == IPCP_OPERATIONAL || -               ipcp_get_state() == IPCP_SHUTDOWN); - -        connmgr_stop(); +        dir_stop();          enroll_stop();          fa_stop(); -        ipcp_set_state(IPCP_INIT); -} +        dt_stop(); -static int bootstrap_components(void) -{ -        if (dir_bootstrap()) { -                log_err("Failed to bootstrap directory."); -                dt_stop(); -                return -1; -        } +        connmgr_stop(); -        return 0; +        ipcp_set_state(IPCP_BOOT);  }  static int unicast_ipcp_enroll(const char *        dst,                                 struct layer_info * info)  { -        struct conn conn; +        struct ipcp_config * conf; +        struct conn          conn; +        uint8_t              id[ENROLL_ID_LEN]; -        if (connmgr_alloc(COMPID_ENROLL, dst, NULL, &conn)) { -                log_err("Failed to get connection."); -                goto fail_er_flow; +        if (random_buffer(id, ENROLL_ID_LEN) < 0) { +                log_err("Failed to generate enrollment ID."); +                goto fail_id;          } -        /* Get boot state from peer. */ -        if (enroll_boot(&conn)) { -                log_err("Failed to get boot information."); -                goto fail_enroll_boot; +        log_info_id(id, "Requesting enrollment."); + +        if (connmgr_alloc(COMPID_ENROLL, dst, NULL, &conn) < 0) { +                log_err_id(id, "Failed to get connection."); +                goto fail_id;          } -        if (initialize_components(enroll_get_conf())) { -                log_err("Failed to initialize IPCP components."); +        /* Get boot state from peer. */ +        if (enroll_boot(&conn, id) < 0) { +                log_err_id(id, "Failed to get boot information.");                  goto fail_enroll_boot;          } -        if (dt_start()) { -                log_err("Failed to initialize IPCP components."); -                goto fail_dt_start; +        conf = enroll_get_conf(); + +        *info = conf->layer_info; + +        if (initialize_components(conf) < 0) { +                log_err_id(id, "Failed to initialize components."); +                goto fail_enroll_boot;          } -        if (start_components()) { -                log_err("Failed to start components."); +        if (start_components() < 0) { +                log_err_id(id, "Failed to start components.");                  goto fail_start_comp;          } -        if (enroll_done(&conn, 0)) -                log_warn("Failed to confirm enrollment with peer."); +        if (enroll_ack(&conn, id, 0) < 0) +                log_err_id(id, "Failed to confirm enrollment."); -        if (connmgr_dealloc(COMPID_ENROLL, &conn)) -                log_warn("Failed to deallocate enrollment flow."); +        if (connmgr_dealloc(COMPID_ENROLL, &conn) < 0) +                log_warn_id(id, "Failed to dealloc enrollment flow."); -        log_info("Enrolled with %s.", dst); - -        info->dir_hash_algo = ipcpi.dir_hash_algo; -        strcpy(info->layer_name, ipcpi.layer_name); +        log_info_id(id, "Enrolled with %s.", dst);          return 0;   fail_start_comp: -        dt_stop(); - fail_dt_start:          finalize_components();   fail_enroll_boot:          connmgr_dealloc(COMPID_ENROLL, &conn); - fail_er_flow: + fail_id:          return -1;  } -static int unicast_ipcp_bootstrap(const struct ipcp_config * conf) +static int unicast_ipcp_bootstrap(struct ipcp_config * conf)  {          assert(conf);          assert(conf->type == THIS_TYPE); -        enroll_bootstrap(conf); - -        if (initialize_components(conf)) { +        if (initialize_components(conf) < 0) {                  log_err("Failed to init IPCP components.");                  goto fail_init;          } -        if (dt_start()) { -                log_err("Failed to initialize IPCP components."); -                goto fail_dt_start; -        }; +        enroll_bootstrap(conf); -        if (start_components()) { +        if (start_components() < 0) {                  log_err("Failed to init IPCP components.");                  goto fail_start;          } -        if (bootstrap_components()) { -                log_err("Failed to bootstrap IPCP components."); -                goto fail_bootstrap; -        } - -        log_dbg("Bootstrapped in layer %s.", conf->layer_info.layer_name); -          return 0; - fail_bootstrap: -        stop_components();   fail_start: -        dt_stop(); - fail_dt_start:          finalize_components();   fail_init:          return -1; @@ -318,38 +284,34 @@ int main(int    argc,                  goto fail_init;          } -        if (notifier_init()) { +        if (notifier_init() < 0) {                  log_err("Failed to initialize notifier component.");                  goto fail_notifier_init;          } -        if (connmgr_init()) { +        if (connmgr_init() < 0) {                  log_err("Failed to initialize connection manager.");                  goto fail_connmgr_init;          } -        if (enroll_init()) { +        if (enroll_init() < 0) {                  log_err("Failed to initialize enrollment component.");                  goto fail_enroll_init;          } -        if (ipcp_boot() < 0) { -                log_err("Failed to boot IPCP."); -                goto fail_boot; -        } - -        if (ipcp_create_r(0)) { -                log_err("Failed to notify IRMd we are initialized."); -                ipcp_set_state(IPCP_NULL); -                goto fail_create_r; +        if (ipcp_start() < 0) { +                log_err("Failed to start IPCP."); +                goto fail_start;          } -        ipcp_shutdown(); +        ipcp_sigwait();          if (ipcp_get_state() == IPCP_SHUTDOWN) { -                dt_stop();                  stop_components(); +                ipcp_stop();                  finalize_components(); +        } else { +                ipcp_stop();          }          enroll_fini(); @@ -362,17 +324,14 @@ int main(int    argc,          exit(EXIT_SUCCESS); - fail_create_r: -        ipcp_shutdown(); - fail_boot: + fail_start:          enroll_fini();   fail_enroll_init:          connmgr_fini();   fail_connmgr_init:          notifier_fini();   fail_notifier_init: -       ipcp_fini(); +        ipcp_fini();   fail_init: -        ipcp_create_r(-1);          exit(EXIT_FAILURE);  } diff --git a/src/ipcpd/unicast/pff.c b/src/ipcpd/unicast/pff.c index 03682184..9b2aa2b4 100644 --- a/src/ipcpd/unicast/pff.c +++ b/src/ipcpd/unicast/pff.c @@ -1,5 +1,5 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   *   * PDU Forwarding Function   * @@ -26,14 +26,11 @@  #include <ouroboros/logs.h>  #include "pff.h" -#include "pol-pff-ops.h" -#include "pol/alternate_pff.h" -#include "pol/multipath_pff.h" -#include "pol/simple_pff.h" +#include "pff/pol.h"  struct pff { -        struct pol_pff_ops * ops; -        struct pff_i *       pff_i; +        struct pff_ops * ops; +        struct pff_i *   pff_i;  };  struct pff * pff_create(enum pol_pff pol) @@ -62,8 +59,10 @@ struct pff * pff_create(enum pol_pff pol)          }          pff->pff_i = pff->ops->create(); -        if (pff->pff_i == NULL) +        if (pff->pff_i == NULL) { +                log_err("Failed to create PFF instance.");                  goto err; +        }          return pff;   err: diff --git a/src/ipcpd/unicast/pff.h b/src/ipcpd/unicast/pff.h index 3ac9d5fb..f44e5531 100644 --- a/src/ipcpd/unicast/pff.h +++ b/src/ipcpd/unicast/pff.h @@ -1,5 +1,5 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   *   * PDU Forwarding Function   * diff --git a/src/ipcpd/unicast/pol/alternate_pff.c b/src/ipcpd/unicast/pff/alternate.c index 18d3dfed..85e85914 100644 --- a/src/ipcpd/unicast/pol/alternate_pff.c +++ b/src/ipcpd/unicast/pff/alternate.c @@ -1,5 +1,5 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   *   * Policy for PFF with alternate next hops   * @@ -28,7 +28,7 @@  #include <ouroboros/list.h>  #include "pft.h" -#include "alternate_pff.h" +#include "alternate.h"  #include <string.h>  #include <assert.h> @@ -54,7 +54,7 @@ struct pff_i {          pthread_rwlock_t lock;  }; -struct pol_pff_ops alternate_pff_ops = { +struct pff_ops alternate_pff_ops = {          .create            = alternate_pff_create,          .destroy           = alternate_pff_destroy,          .lock              = alternate_pff_lock, diff --git a/src/ipcpd/unicast/pol/alternate_pff.h b/src/ipcpd/unicast/pff/alternate.h index 9c7cc08f..96207e74 100644 --- a/src/ipcpd/unicast/pol/alternate_pff.h +++ b/src/ipcpd/unicast/pff/alternate.h @@ -1,5 +1,5 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   *   * Policy for PFF with alternate next hops   * @@ -23,7 +23,7 @@  #ifndef OUROBOROS_IPCPD_UNICAST_ALTERNATE_PFF_H  #define OUROBOROS_IPCPD_UNICAST_ALTERNATE_PFF_H -#include "pol-pff-ops.h" +#include "ops.h"  struct pff_i * alternate_pff_create(void); @@ -56,6 +56,6 @@ int            alternate_flow_state_change(struct pff_i * pff_i,                                             int            fd,                                             bool           up); -extern struct pol_pff_ops alternate_pff_ops; +extern struct pff_ops alternate_pff_ops;  #endif /* OUROBOROS_IPCPD_UNICAST_ALTERNATE_PFF_H */ diff --git a/src/ipcpd/unicast/pol/multipath_pff.c b/src/ipcpd/unicast/pff/multipath.c index 0d759ec4..cbab0f5f 100644 --- a/src/ipcpd/unicast/pol/multipath_pff.c +++ b/src/ipcpd/unicast/pff/multipath.c @@ -1,5 +1,5 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   *   * Policy for PFF supporting multipath routing   * @@ -28,7 +28,7 @@  #include <ouroboros/errno.h>  #include "pft.h" -#include "multipath_pff.h" +#include "multipath.h"  #include <string.h>  #include <assert.h> @@ -39,7 +39,7 @@ struct pff_i {          pthread_rwlock_t lock;  }; -struct pol_pff_ops multipath_pff_ops = { +struct pff_ops multipath_pff_ops = {          .create            = multipath_pff_create,          .destroy           = multipath_pff_destroy,          .lock              = multipath_pff_lock, @@ -58,21 +58,23 @@ struct pff_i * multipath_pff_create(void)          tmp = malloc(sizeof(*tmp));          if (tmp == NULL) -                return NULL; +                goto fail_malloc; -        if (pthread_rwlock_init(&tmp->lock, NULL)) { -                free(tmp); -                return NULL; -        } +        if (pthread_rwlock_init(&tmp->lock, NULL)) +                goto fail_rwlock;          tmp->pft = pft_create(PFT_SIZE, false); -        if (tmp->pft == NULL) { -                pthread_rwlock_destroy(&tmp->lock); -                free(tmp); -                return NULL; -        } +        if (tmp->pft == NULL) +                goto fail_pft;          return tmp; + + fail_pft: +        pthread_rwlock_destroy(&tmp->lock); + fail_rwlock: +        free(tmp); + fail_malloc: +        return NULL;  }  void multipath_pff_destroy(struct pff_i * pff_i) @@ -80,8 +82,8 @@ void multipath_pff_destroy(struct pff_i * pff_i)          assert(pff_i);          pft_destroy(pff_i->pft); -          pthread_rwlock_destroy(&pff_i->lock); +          free(pff_i);  } @@ -177,7 +179,7 @@ int multipath_pff_nhop(struct pff_i * pff_i,          assert(pff_i); -        pthread_rwlock_rdlock(&pff_i->lock); +        pthread_rwlock_wrlock(&pff_i->lock);          if (pft_lookup(pff_i->pft, addr, &fds, &len)) {                  pthread_rwlock_unlock(&pff_i->lock); @@ -189,7 +191,7 @@ int multipath_pff_nhop(struct pff_i * pff_i,          assert(len > 0);          /* Rotate fds left. */ -        memcpy(fds, fds + 1, (len - 1) * sizeof(*fds)); +        memmove(fds, fds + 1, (len - 1) * sizeof(*fds));          fds[len - 1] = fd;          pthread_rwlock_unlock(&pff_i->lock); diff --git a/src/ipcpd/unicast/pol/multipath_pff.h b/src/ipcpd/unicast/pff/multipath.h index 8168995e..0eb03476 100644 --- a/src/ipcpd/unicast/pol/multipath_pff.h +++ b/src/ipcpd/unicast/pff/multipath.h @@ -1,5 +1,5 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   *   * Policy for PFF supporting multipath routing   * @@ -24,7 +24,7 @@  #ifndef OUROBOROS_IPCPD_UNICAST_MULTIPATH_PFF_H  #define OUROBOROS_IPCPD_UNICAST_MULTIPATH_PFF_H -#include "pol-pff-ops.h" +#include "ops.h"  struct pff_i * multipath_pff_create(void); @@ -53,6 +53,6 @@ void multipath_pff_flush(struct pff_i * pff_i);  int            multipath_pff_nhop(struct pff_i * pff_i,                                    uint64_t       addr); -extern struct pol_pff_ops multipath_pff_ops; +extern struct pff_ops multipath_pff_ops;  #endif /* OUROBOROS_IPCPD_UNICAST_MULTIPATH_PFF_H */ diff --git a/src/ipcpd/unicast/pol-pff-ops.h b/src/ipcpd/unicast/pff/ops.h index 85615a1f..16a31273 100644 --- a/src/ipcpd/unicast/pol-pff-ops.h +++ b/src/ipcpd/unicast/pff/ops.h @@ -1,5 +1,5 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   *   * Pff policy ops   * @@ -20,14 +20,14 @@   * Foundation, Inc., http://www.fsf.org/about/contact/.   */ -#ifndef OUROBOROS_IPCPD_UNICAST_POL_PFF_OPS_H -#define OUROBOROS_IPCPD_UNICAST_POL_PFF_OPS_H +#ifndef OUROBOROS_IPCPD_UNICAST_PFF_OPS_H +#define OUROBOROS_IPCPD_UNICAST_PFF_OPS_H  #include <stdbool.h>  struct pff_i; -struct pol_pff_ops { +struct pff_ops {          struct pff_i * (* create)(void);          void           (* destroy)(struct pff_i * pff_i); @@ -60,4 +60,4 @@ struct pol_pff_ops {                                               bool           up);  }; -#endif /* OUROBOROS_IPCPD_UNICAST_POL_PFF_OPS_H */ +#endif /* OUROBOROS_IPCPD_UNICAST_PFF_OPS_H */ diff --git a/src/ipcpd/unicast/pol/pft.c b/src/ipcpd/unicast/pff/pft.c index e42b4a98..8c436113 100644 --- a/src/ipcpd/unicast/pol/pft.c +++ b/src/ipcpd/unicast/pff/pft.c @@ -1,5 +1,5 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   *   * Packet forwarding table (PFT) with chaining on collisions   * @@ -115,19 +115,11 @@ void pft_flush(struct pft * pft)  static uint64_t hash(uint64_t key)  { -        void *   res; -        uint64_t ret; -        uint8_t  keys[4]; +        uint64_t res[2]; -        memcpy(keys, &key, 4); +        mem_hash(HASH_MD5, res, (uint8_t *) &key, sizeof(key)); -        mem_hash(HASH_MD5, &res, keys, 4); - -        ret = (* (uint64_t *) res); - -        free(res); - -        return ret; +        return res[0];  }  static uint64_t calc_key(struct pft * pft, diff --git a/src/ipcpd/unicast/pol/pft.h b/src/ipcpd/unicast/pff/pft.h index 011ad414..711dabcb 100644 --- a/src/ipcpd/unicast/pol/pft.h +++ b/src/ipcpd/unicast/pff/pft.h @@ -1,5 +1,5 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   *   * Packet forwarding table (PFT) with chaining on collisions   * diff --git a/src/ipcpd/unicast/pff/pol.h b/src/ipcpd/unicast/pff/pol.h new file mode 100644 index 00000000..245b03c4 --- /dev/null +++ b/src/ipcpd/unicast/pff/pol.h @@ -0,0 +1,25 @@ +/* + * Ouroboros - Copyright (C) 2016 - 2024 + * + * PDU Forwarding Function policies + * + *    Dimitri Staessens <dimitri@ouroboros.rocks> + *    Sander Vrijders   <sander@ouroboros.rocks> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., http://www.fsf.org/about/contact/. + */ + +#include "alternate.h" +#include "multipath.h" +#include "simple.h" diff --git a/src/ipcpd/unicast/pol/simple_pff.c b/src/ipcpd/unicast/pff/simple.c index 13944aed..5f95e3ce 100644 --- a/src/ipcpd/unicast/pol/simple_pff.c +++ b/src/ipcpd/unicast/pff/simple.c @@ -1,5 +1,5 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   *   * Simple PDU Forwarding Function   * @@ -27,7 +27,7 @@  #include <ouroboros/errno.h>  #include "pft.h" -#include "simple_pff.h" +#include "simple.h"  #include <assert.h>  #include <pthread.h> @@ -37,7 +37,7 @@ struct pff_i {          pthread_rwlock_t lock;  }; -struct pol_pff_ops simple_pff_ops = { +struct pff_ops simple_pff_ops = {          .create            = simple_pff_create,          .destroy           = simple_pff_destroy,          .lock              = simple_pff_lock, diff --git a/src/ipcpd/unicast/pol/simple_pff.h b/src/ipcpd/unicast/pff/simple.h index 2b22c130..0966a186 100644 --- a/src/ipcpd/unicast/pol/simple_pff.h +++ b/src/ipcpd/unicast/pff/simple.h @@ -1,5 +1,5 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   *   * Simple policy for PFF   * @@ -23,7 +23,7 @@  #ifndef OUROBOROS_IPCPD_UNICAST_SIMPLE_PFF_H  #define OUROBOROS_IPCPD_UNICAST_SIMPLE_PFF_H -#include "pol-pff-ops.h" +#include "ops.h"  struct pff_i * simple_pff_create(void); @@ -52,6 +52,6 @@ void           simple_pff_flush(struct pff_i * pff_i);  int            simple_pff_nhop(struct pff_i * pff_i,                                 uint64_t       addr); -extern struct pol_pff_ops simple_pff_ops; +extern struct pff_ops simple_pff_ops;  #endif /* OUROBOROS_IPCPD_UNICAST_SIMPLE_PFF_H */ diff --git a/src/ipcpd/unicast/tests/CMakeLists.txt b/src/ipcpd/unicast/pff/tests/CMakeLists.txt index 482711d5..65705714 100644 --- a/src/ipcpd/unicast/tests/CMakeLists.txt +++ b/src/ipcpd/unicast/pff/tests/CMakeLists.txt @@ -17,19 +17,20 @@ get_filename_component(PARENT_DIR ${PARENT_PATH} NAME)  create_test_sourcelist(${PARENT_DIR}_tests test_suite.c    # Add new tests here -  dht_test.c +  pft_test.c    ) -protobuf_generate_c(KAD_PROTO_SRCS KAD_PROTO_HDRS ../kademlia.proto) - -add_executable(${PARENT_DIR}_test EXCLUDE_FROM_ALL ${${PARENT_DIR}_tests} -  ${KAD_PROTO_SRCS}) +add_executable(${PARENT_DIR}_test EXCLUDE_FROM_ALL ${${PARENT_DIR}_tests})  target_link_libraries(${PARENT_DIR}_test ouroboros-common)  add_dependencies(check ${PARENT_DIR}_test)  set(tests_to_run ${${PARENT_DIR}_tests}) -remove(tests_to_run test_suite.c) +if(CMAKE_VERSION VERSION_LESS "3.29.0") +  remove(tests_to_run test_suite.c) +else () +  list(POP_FRONT tests_to_run) +endif()  foreach (test ${tests_to_run})    get_filename_component(test_name ${test} NAME_WE) diff --git a/src/ipcpd/unicast/pol/tests/pft_test.c b/src/ipcpd/unicast/pff/tests/pft_test.c index c48267eb..18287fb8 100644 --- a/src/ipcpd/unicast/pol/tests/pft_test.c +++ b/src/ipcpd/unicast/pff/tests/pft_test.c @@ -1,5 +1,5 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   *   * Test of the hash table   * diff --git a/src/ipcpd/unicast/psched.c b/src/ipcpd/unicast/psched.c index 33ac5afe..7e12148b 100644 --- a/src/ipcpd/unicast/psched.c +++ b/src/ipcpd/unicast/psched.c @@ -1,5 +1,5 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   *   * Packet scheduler component   * @@ -50,6 +50,7 @@ static int qos_prio [] = {  struct psched {          fset_t *         set[QOS_CUBE_MAX];          next_packet_fn_t callback; +        read_fn_t        read;          pthread_t        readers[QOS_CUBE_MAX * IPCP_SCHED_THR_MUL];  }; @@ -101,7 +102,7 @@ static void * packet_reader(void * o)                                  notifier_event(NOTIFY_DT_FLOW_UP, &fd);                                  break;                          case FLOW_PKT: -                                if (ipcp_flow_read(fd, &sdb)) +                                if (sched->read(fd, &sdb) < 0)                                          continue;                                  sched->callback(fd, qc, sdb); @@ -117,7 +118,8 @@ static void * packet_reader(void * o)          return (void *) 0;  } -struct psched * psched_create(next_packet_fn_t callback) +struct psched * psched_create(next_packet_fn_t callback, +                              read_fn_t        read)  {          struct psched *       psched;          struct sched_info *   infos[QOS_CUBE_MAX * IPCP_SCHED_THR_MUL]; @@ -131,6 +133,7 @@ struct psched * psched_create(next_packet_fn_t callback)                  goto fail_malloc;          psched->callback = callback; +        psched->read     = read;          for (i = 0; i < QOS_CUBE_MAX; ++i) {                  psched->set[i] = fset_create(); diff --git a/src/ipcpd/unicast/psched.h b/src/ipcpd/unicast/psched.h index 1f22b34b..831f8084 100644 --- a/src/ipcpd/unicast/psched.h +++ b/src/ipcpd/unicast/psched.h @@ -1,5 +1,5 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   *   * Packet scheduler component   * @@ -30,7 +30,11 @@ typedef void (* next_packet_fn_t)(int                  fd,                                    qoscube_t            qc,                                    struct shm_du_buff * sdb); -struct psched * psched_create(next_packet_fn_t callback); +typedef int (* read_fn_t)(int                   fd, +                          struct shm_du_buff ** sdb); + +struct psched * psched_create(next_packet_fn_t callback, +                              read_fn_t        read);  void            psched_destroy(struct psched * psched); diff --git a/src/ipcpd/unicast/routing.c b/src/ipcpd/unicast/routing.c index 1b13ae0e..2ad7b234 100644 --- a/src/ipcpd/unicast/routing.c +++ b/src/ipcpd/unicast/routing.c @@ -1,5 +1,5 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   *   * Routing component of the IPCP   * @@ -26,35 +26,30 @@  #include "pff.h"  #include "routing.h" -#include "pol/link_state.h" +#include "routing/pol.h" -struct pol_routing_ops * r_ops; +struct routing_ops * r_ops; -int routing_init(enum pol_routing pr) +int routing_init(struct routing_config * conf, +                 enum pol_pff *          pff_type)  { -        enum pol_pff pff_type; +        void * cfg; -        switch (pr) { +        switch (conf->pol) {          case ROUTING_LINK_STATE: -                pff_type = PFF_SIMPLE; -                r_ops = &link_state_ops; -                break; -        case ROUTING_LINK_STATE_LFA: -                pff_type = PFF_ALTERNATE; -                r_ops = &link_state_ops; -                break; -        case ROUTING_LINK_STATE_ECMP: -                pff_type=PFF_MULTIPATH;                  r_ops = &link_state_ops; +                cfg = &conf->ls;                  break;          default:                  return -ENOTSUP;          } -        if (r_ops->init(pr)) -                return -1; +        return r_ops->init(cfg, pff_type); +} -        return pff_type; +int routing_start(void) +{ +        return r_ops->start();  }  struct routing_i * routing_i_create(struct pff * pff) @@ -67,6 +62,11 @@ void routing_i_destroy(struct routing_i * instance)          return r_ops->routing_i_destroy(instance);  } +void routing_stop(void) +{ +        r_ops->stop(); +} +  void routing_fini(void)  {          r_ops->fini(); diff --git a/src/ipcpd/unicast/routing.h b/src/ipcpd/unicast/routing.h index 2eaaeb68..e14960b5 100644 --- a/src/ipcpd/unicast/routing.h +++ b/src/ipcpd/unicast/routing.h @@ -1,5 +1,5 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   *   * Routing component of the IPCP   * @@ -30,10 +30,15 @@  #include <stdint.h> -int                routing_init(enum pol_routing pr); +int                routing_init(struct routing_config * conf, +                                enum pol_pff *          pff_type);  void               routing_fini(void); +int                routing_start(void); + +void               routing_stop(void); +  struct routing_i * routing_i_create(struct pff * pff);  void               routing_i_destroy(struct routing_i * instance); diff --git a/src/ipcpd/unicast/pol/graph.c b/src/ipcpd/unicast/routing/graph.c index 6ea5c507..32442dad 100644 --- a/src/ipcpd/unicast/pol/graph.c +++ b/src/ipcpd/unicast/routing/graph.c @@ -1,5 +1,5 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   *   * Undirected graph structure   * @@ -57,8 +57,11 @@ struct edge {  };  struct graph { -        size_t           nr_vertices; -        struct list_head vertices; +        struct { +                struct list_head list; +                size_t len; +        } vertices; +          pthread_mutex_t  lock;  }; @@ -67,7 +70,7 @@ static struct edge * find_edge_by_addr(struct vertex * vertex,  {          struct list_head * p; -        assert(vertex); +        assert(vertex != NULL);          list_for_each(p, &vertex->edges) {                  struct edge * e = list_entry(p, struct edge, next); @@ -85,7 +88,7 @@ static struct vertex * find_vertex_by_addr(struct graph * graph,          assert(graph); -        list_for_each(p, &graph->vertices) { +        list_for_each(p, &graph->vertices.list) {                  struct vertex * e = list_entry(p, struct vertex, next);                  if (e->addr == addr)                          return e; @@ -99,8 +102,8 @@ static struct edge * add_edge(struct vertex * vertex,  {          struct edge * edge; -        assert(vertex); -        assert(nb); +        assert(vertex != NULL); +        assert(nb != NULL);          edge = malloc(sizeof(*edge));          if (edge == NULL) @@ -139,7 +142,7 @@ static struct vertex * add_vertex(struct graph * graph,          vertex->addr = addr;          /* Keep them ordered on address. */ -        list_for_each(p, &graph->vertices) { +        list_for_each(p, &graph->vertices.list) {                  struct vertex * v = list_entry(p, struct vertex, next);                  if (v->addr > addr)                          break; @@ -151,13 +154,13 @@ static struct vertex * add_vertex(struct graph * graph,          list_add_tail(&vertex->next, p);          /* Increase the index of the vertices to the right. */ -        list_for_each(p, &graph->vertices) { +        list_for_each(p, &vertex->next) {                  struct vertex * v = list_entry(p, struct vertex, next);                  if (v->addr > addr)                          v->index++;          } -        graph->nr_vertices++; +        ++graph->vertices.len;          return vertex;  } @@ -168,13 +171,13 @@ static void del_vertex(struct graph *  graph,          struct list_head * p;          struct list_head * h; -        assert(graph); -        assert(vertex); +        assert(graph != NULL); +        assert(vertex != NULL);          list_del(&vertex->next);          /* Decrease the index of the vertices to the right. */ -        list_for_each(p, &graph->vertices) { +        list_for_each(p, &graph->vertices.list) {                  struct vertex * v = list_entry(p, struct vertex, next);                  if (v->addr > vertex->addr)                          v->index--; @@ -187,7 +190,7 @@ static void del_vertex(struct graph *  graph,          free(vertex); -        graph->nr_vertices--; +        --graph->vertices.len;  }  struct graph * graph_create(void) @@ -203,8 +206,8 @@ struct graph * graph_create(void)                  return NULL;          } -        graph->nr_vertices = 0; -        list_head_init(&graph->vertices); +        graph->vertices.len = 0; +        list_head_init(&graph->vertices.list);          return graph;  } @@ -218,7 +221,7 @@ void graph_destroy(struct graph * graph)          pthread_mutex_lock(&graph->lock); -        list_for_each_safe(p, n, &graph->vertices) { +        list_for_each_safe(p, n, &graph->vertices.list) {                  struct vertex * e = list_entry(p, struct vertex, next);                  del_vertex(graph, e);          } @@ -227,6 +230,8 @@ void graph_destroy(struct graph * graph)          pthread_mutex_destroy(&graph->lock); +        assert(graph->vertices.len == 0); +          free(graph);  } @@ -240,63 +245,35 @@ int graph_update_edge(struct graph * graph,          struct vertex * nb;          struct edge *   nb_e; -        assert(graph); +        assert(graph != NULL);          pthread_mutex_lock(&graph->lock);          v = find_vertex_by_addr(graph, s_addr); -        if (v == NULL) { -                v = add_vertex(graph, s_addr); -                if (v == NULL) { -                        pthread_mutex_unlock(&graph->lock); -                        log_err("Failed to add vertex."); -                        return -ENOMEM; -                } +        if (v == NULL && ((v = add_vertex(graph, s_addr)) == NULL)) {; +                log_err("Failed to add src vertex."); +                goto fail_add_s;          }          nb = find_vertex_by_addr(graph, d_addr); -        if (nb == NULL) { -                nb = add_vertex(graph, d_addr); -                if (nb == NULL) { -                        if (list_is_empty(&v->edges)) -                                del_vertex(graph, v); -                        pthread_mutex_unlock(&graph->lock); -                        log_err("Failed to add vertex."); -                        return -ENOMEM; -                } +        if (nb == NULL && ((nb = add_vertex(graph, d_addr)) == NULL)) { +               log_err("Failed to add dst vertex."); +                goto fail_add_d;          }          e = find_edge_by_addr(v, d_addr); -        if (e == NULL) { -                e = add_edge(v, nb); -                if (e == NULL) { -                        if (list_is_empty(&v->edges)) -                                del_vertex(graph, v); -                        if (list_is_empty(&nb->edges)) -                                del_vertex(graph, nb); -                        pthread_mutex_unlock(&graph->lock); -                        log_err("Failed to add edge."); -                        return -ENOMEM; -                } +        if (e == NULL && ((e = add_edge(v, nb)) == NULL)) { +                log_err("Failed to add edge to dst."); +                goto fail_add_edge_d;          }          e->announced++;          e->qs = qs;          nb_e = find_edge_by_addr(nb, s_addr); -        if (nb_e == NULL) { -                nb_e = add_edge(nb, v); -                if (nb_e == NULL) { -                        if (--e->announced == 0) -                                del_edge(e); -                        if (list_is_empty(&v->edges)) -                                del_vertex(graph, v); -                        if (list_is_empty(&nb->edges)) -                                del_vertex(graph, nb); -                        pthread_mutex_unlock(&graph->lock); -                        log_err("Failed to add edge."); -                        return -ENOMEM; -                } +        if (nb_e == NULL && ((nb_e = add_edge(nb, v)) == NULL)) {; +                log_err("Failed to add edge to src."); +                goto fail_add_edge_s;          }          nb_e->announced++; @@ -305,6 +282,19 @@ int graph_update_edge(struct graph * graph,          pthread_mutex_unlock(&graph->lock);          return 0; + fail_add_edge_s: +        if (--e->announced == 0) +                del_edge(e); + fail_add_edge_d: +        if (list_is_empty(&nb->edges)) +                del_vertex(graph, nb); + fail_add_d: +        if (list_is_empty(&v->edges)) +                del_vertex(graph, v); + fail_add_s: +        pthread_mutex_unlock(&graph->lock); +        return -ENOMEM; +  }  int graph_del_edge(struct graph * graph, @@ -322,30 +312,26 @@ int graph_del_edge(struct graph * graph,          v = find_vertex_by_addr(graph, s_addr);          if (v == NULL) { -                pthread_mutex_unlock(&graph->lock); -                log_err("No such source vertex."); -                return -1; +                log_err("Failed to find src vertex."); +                goto fail;          }          nb = find_vertex_by_addr(graph, d_addr);          if (nb == NULL) { -                pthread_mutex_unlock(&graph->lock);                  log_err("No such destination vertex."); -                return -1; +                goto fail;          }          e = find_edge_by_addr(v, d_addr);          if (e == NULL) { -                pthread_mutex_unlock(&graph->lock);                  log_err("No such source edge."); -                return -1; +                goto fail;          }          nb_e = find_edge_by_addr(nb, s_addr);          if (nb_e == NULL) { -                pthread_mutex_unlock(&graph->lock);                  log_err("No such destination edge."); -                return -1; +                goto fail;          }          if (--e->announced == 0) @@ -362,6 +348,10 @@ int graph_del_edge(struct graph * graph,          pthread_mutex_unlock(&graph->lock);          return 0; + + fail: +        pthread_mutex_unlock(&graph->lock); +        return -1;  }  static int get_min_vertex(struct graph *   graph, @@ -381,7 +371,7 @@ static int get_min_vertex(struct graph *   graph,          *v = NULL; -        list_for_each(p, &graph->vertices) { +        list_for_each(p, &graph->vertices.list) {                  if (!used[i] && dist[i] < min) {                          min = dist[i];                          index = i; @@ -413,24 +403,24 @@ static int dijkstra(struct graph *    graph,          assert(nhops);          assert(dist); -        *nhops = malloc(sizeof(**nhops) * graph->nr_vertices); +        *nhops = malloc(sizeof(**nhops) * graph->vertices.len);          if (*nhops == NULL)                  goto fail_pnhops; -        *dist = malloc(sizeof(**dist) * graph->nr_vertices); +        *dist = malloc(sizeof(**dist) * graph->vertices.len);          if (*dist == NULL)                  goto fail_pdist; -        used = malloc(sizeof(*used) * graph->nr_vertices); +        used = malloc(sizeof(*used) * graph->vertices.len);          if (used == NULL)                  goto fail_used;          /* Init the data structures */ -        memset(used, 0, sizeof(*used) * graph->nr_vertices); -        memset(*nhops, 0, sizeof(**nhops) * graph->nr_vertices); -        memset(*dist, 0, sizeof(**dist) * graph->nr_vertices); +        memset(used, 0, sizeof(*used) * graph->vertices.len); +        memset(*nhops, 0, sizeof(**nhops) * graph->vertices.len); +        memset(*dist, 0, sizeof(**dist) * graph->vertices.len); -        list_for_each(p, &graph->vertices) { +        list_for_each(p, &graph->vertices.list) {                  v = list_entry(p, struct vertex, next);                  (*dist)[i++]  = (v->addr == src) ? 0 : INT_MAX;          } @@ -527,7 +517,7 @@ static int graph_routing_table_simple(struct graph *     graph,          assert(dist);          /* We need at least 2 vertices for a table */ -        if (graph->nr_vertices < 2) +        if (graph->vertices.len < 2)                  goto fail_vertices;          if (dijkstra(graph, s_addr, &nhops, dist)) @@ -536,7 +526,7 @@ static int graph_routing_table_simple(struct graph *     graph,          list_head_init(table);          /* Now construct the routing table from the nhops. */ -        list_for_each(p, &graph->vertices) { +        list_for_each(p, &graph->vertices.list) {                  v = list_entry(p, struct vertex, next);                  /* This is the src */ @@ -634,7 +624,7 @@ static int graph_routing_table_lfa(struct graph *     graph,                  addrs[j] = -1;          } -        list_for_each(p, &graph->vertices) { +        list_for_each(p, &graph->vertices.list) {                  v = list_entry(p, struct vertex, next);                  if (v->addr != s_addr) @@ -660,7 +650,7 @@ static int graph_routing_table_lfa(struct graph *     graph,          }          /* Loop though all nodes to see if we have a LFA for them. */ -        list_for_each(p, &graph->vertices) { +        list_for_each(p, &graph->vertices.list) {                  v = list_entry(p, struct vertex, next);                  if (v->addr == s_addr) @@ -717,14 +707,14 @@ static int graph_routing_table_ecmp(struct graph *     graph,          assert(graph);          assert(dist); -        if (graph-> nr_vertices < 2) +        if (graph->vertices.len < 2)                  goto fail_vertices; -        forwarding = malloc(sizeof(*forwarding) * graph->nr_vertices); +        forwarding = malloc(sizeof(*forwarding) * graph->vertices.len);          if (forwarding == NULL)                  goto fail_vertices; -        for (i = 0; i < graph->nr_vertices; ++i) +        for (i = 0; i < graph->vertices.len; ++i)                  list_head_init(&forwarding[i]);          if (dijkstra(graph, s_addr, &nhops, dist)) @@ -745,7 +735,7 @@ static int graph_routing_table_ecmp(struct graph *     graph,                  free(nhops); -                list_for_each(h, &graph->vertices) { +                list_for_each(h, &graph->vertices.list) {                          v = list_entry(h, struct vertex, next);                          if (tmp_dist[v->index] + 1 == (*dist)[v->index]) {                                  n = malloc(sizeof(*n)); @@ -763,7 +753,7 @@ static int graph_routing_table_ecmp(struct graph *     graph,          list_head_init(table);          i = 0; -        list_for_each(p, &graph->vertices) { +        list_for_each(p, &graph->vertices.list) {                  v = list_entry(p, struct vertex, next);                  if (v->addr == s_addr) {                          ++i; diff --git a/src/ipcpd/unicast/pol/graph.h b/src/ipcpd/unicast/routing/graph.h index 632cc5a0..8190cc6c 100644 --- a/src/ipcpd/unicast/pol/graph.h +++ b/src/ipcpd/unicast/routing/graph.h @@ -1,5 +1,5 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   *   * Undirected graph structure   * diff --git a/src/ipcpd/unicast/pol/link_state.c b/src/ipcpd/unicast/routing/link-state.c index 08d39372..e5edf539 100644 --- a/src/ipcpd/unicast/pol/link_state.c +++ b/src/ipcpd/unicast/routing/link-state.c @@ -1,5 +1,5 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   *   * Link state routing policy   * @@ -42,11 +42,12 @@  #include <ouroboros/rib.h>  #include <ouroboros/utils.h> +#include "addr-auth.h"  #include "common/comp.h"  #include "common/connmgr.h"  #include "graph.h"  #include "ipcp.h" -#include "link_state.h" +#include "link-state.h"  #include "pff.h"  #include <assert.h> @@ -54,9 +55,6 @@  #include <inttypes.h>  #include <string.h> -#define RECALC_TIME    4 -#define LS_UPDATE_TIME 15 -#define LS_TIMEO       60  #define LS_ENTRY_SIZE  104  #define LSDB           "lsdb" @@ -64,6 +62,12 @@  #define CLOCK_REALTIME_COARSE CLOCK_REALTIME  #endif +#define LINK_FMT ADDR_FMT32 "--" ADDR_FMT32 +#define LINK_VAL(src, dst) ADDR_VAL32(&src), ADDR_VAL32(&dst) + +#define LSU_FMT "LSU ["ADDR_FMT32 " -- " ADDR_FMT32 " seq: %09" PRIu64 "]" +#define LSU_VAL(src, dst, seqno) ADDR_VAL32(&src), ADDR_VAL32(&dst), seqno +  struct lsa {          uint64_t d_addr;          uint64_t s_addr; @@ -106,30 +110,45 @@ struct nb {  };  struct { -        struct list_head  nbs; -        size_t            nbs_len; +        uint64_t          addr; + +        enum routing_algo routing_algo; + +        struct ls_config  conf; +          fset_t *          mgmt_set; -        struct list_head  db; -        size_t            db_len; +        struct graph * graph; + +        struct { +                struct { +                        struct list_head list; +                        size_t           len; +                } nbs; + +                struct { +                        struct list_head list; +                        size_t           len; +                } db; -        pthread_rwlock_t  db_lock; +                pthread_rwlock_t lock; +        }; -        struct graph *    graph; +        struct { +                struct list_head list; +                pthread_mutex_t  mtx; +        } instances;          pthread_t         lsupdate;          pthread_t         lsreader;          pthread_t         listener; - -        struct list_head  routing_instances; -        pthread_mutex_t   routing_i_lock; - -        enum routing_algo routing_algo;  } ls; -struct pol_routing_ops link_state_ops = { -        .init              = link_state_init, +struct routing_ops link_state_ops = { +        .init              = (int (*)(void *, enum pol_pff *)) link_state_init,          .fini              = link_state_fini, +        .start             = link_state_start, +        .stop              = link_state_stop,          .routing_i_create  = link_state_routing_i_create,          .routing_i_destroy = link_state_routing_i_destroy  }; @@ -138,7 +157,7 @@ static int str_adj(struct adjacency * adj,                     char *             buf,                     size_t             len)  { -        char        tmbuf[64]; +        char        tmstr[RIB_TM_STRLEN];          char        srcbuf[64];          char        dstbuf[64];          char        seqnobuf[64]; @@ -149,15 +168,16 @@ static int str_adj(struct adjacency * adj,          if (len < LS_ENTRY_SIZE)                  return -1; -        tm = localtime(&adj->stamp); -        strftime(tmbuf, sizeof(tmbuf), "%F %T", tm); /* 19 chars */ +        tm = gmtime(&adj->stamp); +        strftime(tmstr, sizeof(tmstr), RIB_TM_FORMAT, tm); -        sprintf(srcbuf, "%" PRIu64, adj->src); -        sprintf(dstbuf, "%" PRIu64, adj->dst); +        sprintf(srcbuf, ADDR_FMT32, ADDR_VAL32(&adj->src)); +        sprintf(dstbuf, ADDR_FMT32, ADDR_VAL32(&adj->dst));          sprintf(seqnobuf, "%" PRIu64, adj->seqno); -        sprintf(buf, "src: %20s\ndst: %20s\nseqno: %18s\nupd: %20s\n", -                srcbuf, dstbuf, seqnobuf, tmbuf); +        sprintf(buf, "src: %20s\ndst: %20s\nseqno: %18s\n" +                "upd: %s\n", +                srcbuf, dstbuf, seqnobuf, tmstr);          return LS_ENTRY_SIZE;  } @@ -169,9 +189,9 @@ static struct adjacency * get_adj(const char * path)          assert(path); -        list_for_each(p, &ls.db) { +        list_for_each(p, &ls.db.list) {                  struct adjacency * a = list_entry(p, struct adjacency, next); -                sprintf(entry, "%" PRIu64 ".%" PRIu64, a->src, a->dst); +                sprintf(entry, LINK_FMT, LINK_VAL(a->src, a->dst));                  if (strcmp(entry, path) == 0)                          return a;          } @@ -194,7 +214,7 @@ static int lsdb_rib_getattr(const char *      path,          clock_gettime(CLOCK_REALTIME_COARSE, &now); -        pthread_rwlock_rdlock(&ls.db_lock); +        pthread_rwlock_rdlock(&ls.lock);          adj = get_adj(entry);          if (adj != NULL) { @@ -205,7 +225,7 @@ static int lsdb_rib_getattr(const char *      path,                  attr->size  = 0;          } -        pthread_rwlock_unlock(&ls.db_lock); +        pthread_rwlock_unlock(&ls.lock);          return 0;  } @@ -223,9 +243,9 @@ static int lsdb_rib_read(const char * path,          entry = strstr(path, RIB_SEPARATOR) + 1;          assert(entry); -        pthread_rwlock_rdlock(&ls.db_lock); +        pthread_rwlock_rdlock(&ls.lock); -        if (ls.db_len + ls.nbs_len == 0) +        if (ls.db.len + ls.nbs.len == 0)                  goto fail;          a = get_adj(entry); @@ -236,11 +256,11 @@ static int lsdb_rib_read(const char * path,          if (size < 0)                  goto fail; -        pthread_rwlock_unlock(&ls.db_lock); +        pthread_rwlock_unlock(&ls.lock);          return size;   fail: -        pthread_rwlock_unlock(&ls.db_lock); +        pthread_rwlock_unlock(&ls.lock);          return -1;  } @@ -250,60 +270,52 @@ static int lsdb_rib_readdir(char *** buf)          char               entry[RIB_PATH_LEN + 1];          ssize_t            idx = 0; -        assert(buf); +        assert(buf != NULL); -        pthread_rwlock_rdlock(&ls.db_lock); +        pthread_rwlock_rdlock(&ls.lock); -        if (ls.db_len + ls.nbs_len == 0) { -                pthread_rwlock_unlock(&ls.db_lock); -                return 0; +        if (ls.db.len + ls.nbs.len == 0) { +                *buf = NULL; +                goto no_entries;          } -        *buf = malloc(sizeof(**buf) * (ls.db_len + ls.nbs_len)); -        if (*buf == NULL) { -                pthread_rwlock_unlock(&ls.db_lock); -                return -ENOMEM; -        } -        list_for_each(p, &ls.nbs) { +        *buf = malloc(sizeof(**buf) * (ls.db.len + ls.nbs.len)); +        if (*buf == NULL) +                goto fail_entries; + +        list_for_each(p, &ls.nbs.list) {                  struct nb * nb = list_entry(p, struct nb, next); -                char * str = (nb->type == NB_DT ? "dt." : "mgmt."); -                sprintf(entry, "%s%" PRIu64, str, nb->addr); +                char * str = (nb->type == NB_DT ? ".dt " : ".mgmt "); +                sprintf(entry, "%s" ADDR_FMT32 , str, ADDR_VAL32(&nb->addr));                  (*buf)[idx] = malloc(strlen(entry) + 1); -                if ((*buf)[idx] == NULL) { -                        while (idx-- > 0) -                                free((*buf)[idx]); -                        free(buf); -                        pthread_rwlock_unlock(&ls.db_lock); -                        return -ENOMEM; -                } - -                strcpy((*buf)[idx], entry); +                if ((*buf)[idx] == NULL) +                        goto fail_entry; -                idx++; +                strcpy((*buf)[idx++], entry);          } -        list_for_each(p, &ls.db) { +        list_for_each(p, &ls.db.list) {                  struct adjacency * a = list_entry(p, struct adjacency, next); -                sprintf(entry, "%" PRIu64 ".%" PRIu64, a->src, a->dst); +                sprintf(entry,  LINK_FMT, LINK_VAL(a->src, a->dst));                  (*buf)[idx] = malloc(strlen(entry) + 1); -                if ((*buf)[idx] == NULL) { -                        ssize_t j; -                        for (j = 0; j < idx; ++j) -                                free(*buf[j]); -                        free(buf); -                        pthread_rwlock_unlock(&ls.db_lock); -                        return -ENOMEM; -                } - -                strcpy((*buf)[idx], entry); +                if ((*buf)[idx] == NULL) +                        goto fail_entry; -                idx++; +                strcpy((*buf)[idx++], entry);          } - -        pthread_rwlock_unlock(&ls.db_lock); + no_entries: +        pthread_rwlock_unlock(&ls.lock);          return idx; + + fail_entry: +        while (idx-- > 0) +                free((*buf)[idx]); +        free(*buf); + fail_entries: +        pthread_rwlock_unlock(&ls.lock); +        return -ENOMEM;  }  static struct rib_ops r_ops = { @@ -319,28 +331,28 @@ static int lsdb_add_nb(uint64_t     addr,          struct list_head * p;          struct nb *        nb; -        pthread_rwlock_wrlock(&ls.db_lock); +        pthread_rwlock_wrlock(&ls.lock); -        list_for_each(p, &ls.nbs) { +        list_for_each(p, &ls.nbs.list) {                  struct nb * el = list_entry(p, struct nb, next); -                if (el->addr == addr && el->type == type) { -                        log_dbg("Already know %s neighbor %" PRIu64 ".", -                                type == NB_DT ? "dt" : "mgmt", addr); -                        if (el->fd != fd) { -                                log_warn("Existing neighbor assigned new fd."); -                                el->fd = fd; -                        } -                        pthread_rwlock_unlock(&ls.db_lock); -                        return -EPERM; -                } -                  if (addr > el->addr)                          break; +                if (el->addr != addr || el->type != type) +                        continue; + +                log_dbg("Already know %s neighbor " ADDR_FMT32 ".", +                        type == NB_DT ? "dt" : "mgmt", ADDR_VAL32(&addr)); +                if (el->fd != fd) { +                        log_warn("Existing neighbor assigned new fd."); +                        el->fd = fd; +                } +                pthread_rwlock_unlock(&ls.lock); +                return -EPERM;          }          nb = malloc(sizeof(*nb));          if (nb == NULL) { -                pthread_rwlock_unlock(&ls.db_lock); +                pthread_rwlock_unlock(&ls.lock);                  return -ENOMEM;          } @@ -350,12 +362,12 @@ static int lsdb_add_nb(uint64_t     addr,          list_add_tail(&nb->next, p); -        ++ls.nbs_len; +        ++ls.nbs.len; -        log_dbg("Type %s neighbor %" PRIu64 " added.", -                nb->type == NB_DT ? "dt" : "mgmt", addr); +        log_dbg("Type %s neighbor " ADDR_FMT32 " added.", +                nb->type == NB_DT ? "dt" : "mgmt", ADDR_VAL32(&addr)); -        pthread_rwlock_unlock(&ls.db_lock); +        pthread_rwlock_unlock(&ls.lock);          return 0;  } @@ -366,22 +378,23 @@ static int lsdb_del_nb(uint64_t addr,          struct list_head * p;          struct list_head * h; -        pthread_rwlock_wrlock(&ls.db_lock); +        pthread_rwlock_wrlock(&ls.lock); -        list_for_each_safe(p, h, &ls.nbs) { +        list_for_each_safe(p, h, &ls.nbs.list) {                  struct nb * nb = list_entry(p, struct nb, next); -                if (nb->addr == addr && nb->fd == fd) { -                        list_del(&nb->next); -                        --ls.nbs_len; -                        pthread_rwlock_unlock(&ls.db_lock); -                        log_dbg("Type %s neighbor %" PRIu64 " deleted.", -                                nb->type == NB_DT ? "dt" : "mgmt", addr); -                        free(nb); -                        return 0; -                } +                if (nb->addr != addr || nb->fd != fd) +                        continue; + +                list_del(&nb->next); +                --ls.nbs.len; +                pthread_rwlock_unlock(&ls.lock); +                log_dbg("Type %s neighbor " ADDR_FMT32 " deleted.", +                        nb->type == NB_DT ? "dt" : "mgmt", ADDR_VAL32(&addr)); +                free(nb); +                return 0;          } -        pthread_rwlock_unlock(&ls.db_lock); +        pthread_rwlock_unlock(&ls.lock);          return -EPERM;  } @@ -391,18 +404,18 @@ static int nbr_to_fd(uint64_t addr)          struct list_head * p;          int                fd; -        pthread_rwlock_rdlock(&ls.db_lock); +        pthread_rwlock_rdlock(&ls.lock); -        list_for_each(p, &ls.nbs) { +        list_for_each(p, &ls.nbs.list) {                  struct nb * nb = list_entry(p, struct nb, next);                  if (nb->addr == addr && nb->type == NB_DT) {                          fd = nb->fd; -                        pthread_rwlock_unlock(&ls.db_lock); +                        pthread_rwlock_unlock(&ls.lock);                          return fd;                  }          } -        pthread_rwlock_unlock(&ls.db_lock); +        pthread_rwlock_unlock(&ls.lock);          return -1;  } @@ -417,8 +430,7 @@ static void calculate_pff(struct routing_i * instance)          assert(instance); -        if (graph_routing_table(ls.graph, ls.routing_algo, -                                ipcpi.dt_addr, &table)) +        if (graph_routing_table(ls.graph, ls.routing_algo, ls.addr, &table))                  return;          pff_lock(instance->pff); @@ -453,8 +465,8 @@ static void set_pff_modified(bool calc)  {          struct list_head * p; -        pthread_mutex_lock(&ls.routing_i_lock); -        list_for_each(p, &ls.routing_instances) { +        pthread_mutex_lock(&ls.instances.mtx); +        list_for_each(p, &ls.instances.list) {                  struct routing_i * inst =                          list_entry(p, struct routing_i, next);                  pthread_mutex_lock(&inst->lock); @@ -463,7 +475,7 @@ static void set_pff_modified(bool calc)                  if (calc)                          calculate_pff(inst);          } -        pthread_mutex_unlock(&ls.routing_i_lock); +        pthread_mutex_unlock(&ls.instances.mtx);  }  static int lsdb_add_link(uint64_t    src, @@ -480,9 +492,9 @@ static int lsdb_add_link(uint64_t    src,          clock_gettime(CLOCK_REALTIME_COARSE, &now); -        pthread_rwlock_wrlock(&ls.db_lock); +        pthread_rwlock_wrlock(&ls.lock); -        list_for_each(p, &ls.db) { +        list_for_each(p, &ls.db.list) {                  struct adjacency * a = list_entry(p, struct adjacency, next);                  if (a->dst == dst && a->src == src) {                          if (a->seqno < seqno) { @@ -490,7 +502,7 @@ static int lsdb_add_link(uint64_t    src,                                  a->seqno = seqno;                                  ret = 0;                          } -                        pthread_rwlock_unlock(&ls.db_lock); +                        pthread_rwlock_unlock(&ls.lock);                          return ret;                  } @@ -500,7 +512,7 @@ static int lsdb_add_link(uint64_t    src,          adj = malloc(sizeof(*adj));          if (adj == NULL) { -                pthread_rwlock_unlock(&ls.db_lock); +                pthread_rwlock_unlock(&ls.lock);                  return -ENOMEM;          } @@ -511,12 +523,12 @@ static int lsdb_add_link(uint64_t    src,          list_add_tail(&adj->next, p); -        ls.db_len++; +        ls.db.len++;          if (graph_update_edge(ls.graph, src, dst, *qs))                  log_warn("Failed to add edge to graph."); -        pthread_rwlock_unlock(&ls.db_lock); +        pthread_rwlock_unlock(&ls.lock);          set_pff_modified(true); @@ -529,25 +541,25 @@ static int lsdb_del_link(uint64_t src,          struct list_head * p;          struct list_head * h; -        pthread_rwlock_wrlock(&ls.db_lock); +        pthread_rwlock_wrlock(&ls.lock); -        list_for_each_safe(p, h, &ls.db) { +        list_for_each_safe(p, h, &ls.db.list) {                  struct adjacency * a = list_entry(p, struct adjacency, next);                  if (a->dst == dst && a->src == src) {                          list_del(&a->next);                          if (graph_del_edge(ls.graph, src, dst))                                  log_warn("Failed to delete edge from graph."); -                        ls.db_len--; +                        ls.db.len--; -                        pthread_rwlock_unlock(&ls.db_lock); +                        pthread_rwlock_unlock(&ls.lock);                          set_pff_modified(false);                          free(a);                          return 0;                  }          } -        pthread_rwlock_unlock(&ls.db_lock); +        pthread_rwlock_unlock(&ls.lock);          return -EPERM;  } @@ -570,7 +582,7 @@ static void * periodic_recalc_pff(void * o)                  if (modified)                          calculate_pff(inst); -                sleep(RECALC_TIME); +                sleep(ls.conf.t_recalc);          }          return (void *) 0; @@ -587,10 +599,20 @@ static void send_lsm(uint64_t src,          lsm.s_addr = hton64(src);          lsm.seqno  = hton64(seqno); -        list_for_each(p, &ls.nbs) { +        list_for_each(p, &ls.nbs.list) {                  struct nb * nb = list_entry(p, struct nb, next); -                if (nb->type == NB_MGMT) -                        flow_write(nb->fd, &lsm, sizeof(lsm)); +                if (nb->type != NB_MGMT) +                        continue; + +                if (flow_write(nb->fd, &lsm, sizeof(lsm)) < 0) +                        log_err("Failed to send LSM to " ADDR_FMT32, +                                ADDR_VAL32(&nb->addr)); +#ifdef DEBUG_PROTO_LS +                else +                        log_proto(LSU_FMT " --> " ADDR_FMT32, +                                LSU_VAL(src, dst, seqno), +                                ADDR_VAL32(&nb->addr)); +#endif          }  } @@ -604,9 +626,9 @@ static void lsdb_replicate(int fd)          list_head_init(©);          /* Lock the lsdb, copy the lsms and send outside of lock. */ -        pthread_rwlock_rdlock(&ls.db_lock); +        pthread_rwlock_rdlock(&ls.lock); -        list_for_each(p, &ls.db) { +        list_for_each(p, &ls.db.list) {                  struct adjacency * adj;                  struct adjacency * cpy;                  adj = list_entry(p, struct adjacency, next); @@ -623,7 +645,7 @@ static void lsdb_replicate(int fd)                  list_add_tail(&cpy->next, ©);          } -        pthread_rwlock_unlock(&ls.db_lock); +        pthread_rwlock_unlock(&ls.lock);          list_for_each_safe(p, h, ©) {                  struct lsa         lsm; @@ -649,17 +671,17 @@ static void * lsupdate(void * o)          while (true) {                  clock_gettime(CLOCK_REALTIME_COARSE, &now); -                pthread_rwlock_wrlock(&ls.db_lock); +                pthread_rwlock_wrlock(&ls.lock); -                pthread_cleanup_push(__cleanup_rwlock_unlock, &ls.db_lock); +                pthread_cleanup_push(__cleanup_rwlock_unlock, &ls.lock); -                list_for_each_safe(p, h, &ls.db) { +                list_for_each_safe(p, h, &ls.db.list) {                          struct adjacency * adj;                          adj = list_entry(p, struct adjacency, next); -                        if (now.tv_sec - adj->stamp > LS_TIMEO) { +                        if (now.tv_sec > adj->stamp + ls.conf.t_timeo) {                                  list_del(&adj->next); -                                log_dbg("%" PRIu64 " - %" PRIu64" timed out.", -                                        adj->src, adj->dst); +                                log_dbg(LINK_FMT " timed out.", +                                        LINK_VAL(adj->src, adj->dst));                                  if (graph_del_edge(ls.graph, adj->src,                                                     adj->dst))                                          log_err("Failed to del edge."); @@ -667,7 +689,7 @@ static void * lsupdate(void * o)                                  continue;                          } -                        if (adj->src == ipcpi.dt_addr) { +                        if (adj->src == ls.addr) {                                  adj->seqno++;                                  send_lsm(adj->src, adj->dst, adj->seqno);                                  adj->stamp = now.tv_sec; @@ -676,7 +698,7 @@ static void * lsupdate(void * o)                  pthread_cleanup_pop(true); -                sleep(LS_UPDATE_TIME); +                sleep(ls.conf.t_update);          }          return (void *) 0; @@ -708,15 +730,36 @@ static void forward_lsm(uint8_t * buf,                          int       in_fd)  {          struct list_head * p; +#ifdef DEBUG_PROTO_LS +        struct lsa lsm; -        pthread_rwlock_rdlock(&ls.db_lock); +        assert(buf); +        assert(len >= sizeof(struct lsa)); + +        memcpy(&lsm, buf, sizeof(lsm)); + +        lsm.s_addr = ntoh64(lsm.s_addr); +        lsm.d_addr = ntoh64(lsm.d_addr); +        lsm.seqno  = ntoh64(lsm.seqno); +#endif +        pthread_rwlock_rdlock(&ls.lock); -        pthread_cleanup_push(__cleanup_rwlock_unlock, &ls.db_lock); +        pthread_cleanup_push(__cleanup_rwlock_unlock, &ls.lock); -        list_for_each(p, &ls.nbs) { +        list_for_each(p, &ls.nbs.list) {                  struct nb * nb = list_entry(p, struct nb, next); -                if (nb->type == NB_MGMT && nb->fd != in_fd) -                        flow_write(nb->fd, buf, len); +                if (nb->type != NB_MGMT || nb->fd == in_fd) +                        continue; + +                if (flow_write(nb->fd, buf, len) < 0) +                        log_err("Failed to forward LSM to " ADDR_FMT32, +                                ADDR_VAL32(&nb->addr)); +#ifdef DEBUG_PROTO_LS +                else +                        log_proto(LSU_FMT " --> " ADDR_FMT32 " [forwarded]", +                                LSU_VAL(lsm.s_addr, lsm.d_addr, lsm.seqno), +                                ADDR_VAL32(&nb->addr)); +#endif          }          pthread_cleanup_pop(true); @@ -729,13 +772,13 @@ static void cleanup_fqueue(void * fq)  static void * lsreader(void * o)  { -        fqueue_t *   fq; -        int          ret; -        uint8_t      buf[sizeof(struct lsa)]; -        int          fd; -        qosspec_t    qs; -        struct lsa * msg; -        size_t       len; +        fqueue_t * fq; +        int        ret; +        uint8_t    buf[sizeof(struct lsa)]; +        int        fd; +        qosspec_t  qs; +        struct lsa msg; +        size_t     len;          (void) o; @@ -758,15 +801,22 @@ static void * lsreader(void * o)                          if (fqueue_type(fq) != FLOW_PKT)                                  continue; -                        len = flow_read(fd, buf, sizeof(*msg)); -                        if (len <= 0 || len != sizeof(*msg)) +                        len = flow_read(fd, buf, sizeof(msg)); +                        if (len <= 0 || len != sizeof(msg))                                  continue; -                        msg = (struct lsa *) buf; - -                        if (lsdb_add_link(ntoh64(msg->s_addr), -                                          ntoh64(msg->d_addr), -                                          ntoh64(msg->seqno), +                        memcpy(&msg, buf, sizeof(msg)); +                        msg.s_addr = ntoh64(msg.s_addr); +                        msg.d_addr = ntoh64(msg.d_addr); +                        msg.seqno  = ntoh64(msg.seqno); +#ifdef DEBUG_PROTO_LS +                        log_proto(LSU_FMT " <-- " ADDR_FMT32, +                                  LSU_VAL(msg.s_addr, msg.d_addr, msg.seqno), +                                  ADDR_VAL32(&ls.addr)); +#endif +                        if (lsdb_add_link(msg.s_addr, +                                          msg.d_addr, +                                          msg.seqno,                                            &qs))                                  continue; @@ -787,14 +837,14 @@ static void flow_event(int  fd,          log_dbg("Notifying routing instances of flow event."); -        pthread_mutex_lock(&ls.routing_i_lock); +        pthread_mutex_lock(&ls.instances.mtx); -        list_for_each(p, &ls.routing_instances) { +        list_for_each(p, &ls.instances.list) {                  struct routing_i * ri = list_entry(p, struct routing_i, next);                  pff_flow_state_change(ri->pff, fd, up);          } -        pthread_mutex_unlock(&ls.routing_i_lock); +        pthread_mutex_unlock(&ls.instances.mtx);  }  static void handle_event(void *       self, @@ -816,17 +866,17 @@ static void handle_event(void *       self,          switch (event) {          case NOTIFY_DT_CONN_ADD: -                pthread_rwlock_rdlock(&ls.db_lock); +                pthread_rwlock_rdlock(&ls.lock); -                pthread_cleanup_push(__cleanup_rwlock_unlock, &ls.db_lock); +                pthread_cleanup_push(__cleanup_rwlock_unlock, &ls.lock); -                send_lsm(ipcpi.dt_addr, c->conn_info.addr, 0); +                send_lsm(ls.addr, c->conn_info.addr, 0);                  pthread_cleanup_pop(true);                  if (lsdb_add_nb(c->conn_info.addr, c->flow_info.fd, NB_DT))                          log_dbg("Failed to add neighbor to LSDB."); -                if (lsdb_add_link(ipcpi.dt_addr, c->conn_info.addr, 0, &qs)) +                if (lsdb_add_link(ls.addr, c->conn_info.addr, 0, &qs))                          log_dbg("Failed to add new adjacency to LSDB.");                  break;          case NOTIFY_DT_CONN_DEL: @@ -835,7 +885,7 @@ static void handle_event(void *       self,                  if (lsdb_del_nb(c->conn_info.addr, c->flow_info.fd))                          log_dbg("Failed to delete neighbor from LSDB."); -                if (lsdb_del_link(ipcpi.dt_addr, c->conn_info.addr)) +                if (lsdb_del_link(ls.addr, c->conn_info.addr))                          log_dbg("Local link was not in LSDB.");                  break;          case NOTIFY_DT_CONN_QOS: @@ -886,11 +936,11 @@ struct routing_i * link_state_routing_i_create(struct pff * pff)                             periodic_recalc_pff, tmp))                  goto fail_pthread_create_lsupdate; -        pthread_mutex_lock(&ls.routing_i_lock); +        pthread_mutex_lock(&ls.instances.mtx); -        list_add(&tmp->next, &ls.routing_instances); +        list_add(&tmp->next, &ls.instances.list); -        pthread_mutex_unlock(&ls.routing_i_lock); +        pthread_mutex_unlock(&ls.instances.mtx);          return tmp; @@ -906,11 +956,11 @@ void link_state_routing_i_destroy(struct routing_i * instance)  {          assert(instance); -        pthread_mutex_lock(&ls.routing_i_lock); +        pthread_mutex_lock(&ls.instances.mtx);          list_del(&instance->next); -        pthread_mutex_unlock(&ls.routing_i_lock); +        pthread_mutex_unlock(&ls.instances.mtx);          pthread_cancel(instance->calculator); @@ -921,96 +971,146 @@ void link_state_routing_i_destroy(struct routing_i * instance)          free(instance);  } -int link_state_init(enum pol_routing pr) +int link_state_start(void) +{ +        if (notifier_reg(handle_event, NULL)) { +                log_err("Failed to register link-state with notifier."); +                goto fail_notifier_reg; +        } + +        if (pthread_create(&ls.lsupdate, NULL, lsupdate, NULL)) { +                log_err("Failed to create lsupdate thread."); +                goto fail_pthread_create_lsupdate; +        } + +        if (pthread_create(&ls.lsreader, NULL, lsreader, NULL)) { +                log_err("Failed to create lsreader thread."); +                goto fail_pthread_create_lsreader; +        } + +        if (pthread_create(&ls.listener, NULL, ls_conn_handle, NULL)) { +                log_err("Failed to create listener thread."); +                goto fail_pthread_create_listener; +        } + +        return 0; + + fail_pthread_create_listener: +        pthread_cancel(ls.lsreader); +        pthread_join(ls.lsreader, NULL); + fail_pthread_create_lsreader: +        pthread_cancel(ls.lsupdate); +        pthread_join(ls.lsupdate, NULL); + fail_pthread_create_lsupdate: +        notifier_unreg(handle_event); + fail_notifier_reg: +        return -1; +} + +void link_state_stop(void) +{ +        pthread_cancel(ls.listener); +        pthread_cancel(ls.lsreader); +        pthread_cancel(ls.lsupdate); + +        pthread_join(ls.listener, NULL); +        pthread_join(ls.lsreader, NULL); +        pthread_join(ls.lsupdate, NULL); + +        notifier_unreg(handle_event); +} + + +int link_state_init(struct ls_config * conf, +                    enum pol_pff *     pff_type)  {          struct conn_info info; +        assert(conf != NULL); +        assert(pff_type != NULL); +          memset(&info, 0, sizeof(info)); +        ls.addr = addr_auth_address(); +          strcpy(info.comp_name, LS_COMP);          strcpy(info.protocol, LS_PROTO);          info.pref_version = 1;          info.pref_syntax  = PROTO_GPB; -        info.addr         = ipcpi.dt_addr; +        info.addr         = ls.addr; -        switch (pr) { -        case ROUTING_LINK_STATE: -                log_dbg("Using link state routing policy."); +        ls.conf = *conf; + +        switch (conf->pol) { +        case LS_SIMPLE: +                *pff_type = PFF_SIMPLE;                  ls.routing_algo = ROUTING_SIMPLE; +                log_dbg("Using Link State Routing policy.");                  break; -        case ROUTING_LINK_STATE_LFA: -                log_dbg("Using Loop-Free Alternates policy."); +        case LS_LFA:                  ls.routing_algo = ROUTING_LFA; +                *pff_type = PFF_ALTERNATE; +                log_dbg("Using Loop-Free Alternates policy.");                  break; -        case ROUTING_LINK_STATE_ECMP: -                log_dbg("Using Equal-Cost Multipath policy."); +        case LS_ECMP:                  ls.routing_algo = ROUTING_ECMP; +                *pff_type = PFF_MULTIPATH; +                log_dbg("Using Equal-Cost Multipath policy.");                  break;          default:                  goto fail_graph;          } +        log_dbg("LS update interval: %ld seconds.", ls.conf.t_update); +        log_dbg("LS link timeout   : %ld seconds.", ls.conf.t_timeo); +        log_dbg("LS recalc interval: %ld seconds.", ls.conf.t_recalc); +          ls.graph = graph_create();          if (ls.graph == NULL)                  goto fail_graph; -        if (notifier_reg(handle_event, NULL)) -                goto fail_notifier_reg; - -        if (pthread_rwlock_init(&ls.db_lock, NULL)) -                goto fail_db_lock_init; +        if (pthread_rwlock_init(&ls.lock, NULL)) { +                log_err("Failed to init lock."); +                goto fail_lock_init; +        } -        if (pthread_mutex_init(&ls.routing_i_lock, NULL)) +        if (pthread_mutex_init(&ls.instances.mtx, NULL)) { +                log_err("Failed to init instances mutex.");                  goto fail_routing_i_lock_init; +        } -        if (connmgr_comp_init(COMPID_MGMT, &info)) +        if (connmgr_comp_init(COMPID_MGMT, &info)) { +                log_err("Failed to init connmgr.");                  goto fail_connmgr_comp_init; +        }          ls.mgmt_set = fset_create(); -        if (ls.mgmt_set == NULL) +        if (ls.mgmt_set == NULL) { +                log_err("Failed to create fset.");                  goto fail_fset_create; +        } -        list_head_init(&ls.db); -        list_head_init(&ls.nbs); -        list_head_init(&ls.routing_instances); - -        if (pthread_create(&ls.lsupdate, NULL, lsupdate, NULL)) -                goto fail_pthread_create_lsupdate; - -        if (pthread_create(&ls.lsreader, NULL, lsreader, NULL)) -                goto fail_pthread_create_lsreader; - -        if (pthread_create(&ls.listener, NULL, ls_conn_handle, NULL)) -                goto fail_pthread_create_listener; +        list_head_init(&ls.db.list); +        list_head_init(&ls.nbs.list); +        list_head_init(&ls.instances.list);          if (rib_reg(LSDB, &r_ops))                  goto fail_rib_reg; -        ls.db_len  = 0; -        ls.nbs_len = 0; +        ls.db.len  = 0; +        ls.nbs.len = 0;          return 0;   fail_rib_reg: -        pthread_cancel(ls.listener); -        pthread_join(ls.listener, NULL); - fail_pthread_create_listener: -        pthread_cancel(ls.lsreader); -        pthread_join(ls.lsreader, NULL); - fail_pthread_create_lsreader: -        pthread_cancel(ls.lsupdate); -        pthread_join(ls.lsupdate, NULL); - fail_pthread_create_lsupdate:          fset_destroy(ls.mgmt_set);   fail_fset_create:          connmgr_comp_fini(COMPID_MGMT);   fail_connmgr_comp_init: -        pthread_mutex_destroy(&ls.routing_i_lock); +        pthread_mutex_destroy(&ls.instances.mtx);   fail_routing_i_lock_init: -        pthread_rwlock_destroy(&ls.db_lock); - fail_db_lock_init: -        notifier_unreg(handle_event); - fail_notifier_reg: +        pthread_rwlock_destroy(&ls.lock); + fail_lock_init:          graph_destroy(ls.graph);   fail_graph:          return -1; @@ -1023,33 +1123,23 @@ void link_state_fini(void)          rib_unreg(LSDB); -        notifier_unreg(handle_event); - -        pthread_cancel(ls.listener); -        pthread_cancel(ls.lsreader); -        pthread_cancel(ls.lsupdate); - -        pthread_join(ls.listener, NULL); -        pthread_join(ls.lsreader, NULL); -        pthread_join(ls.lsupdate, NULL); -          fset_destroy(ls.mgmt_set);          connmgr_comp_fini(COMPID_MGMT);          graph_destroy(ls.graph); -        pthread_rwlock_wrlock(&ls.db_lock); +        pthread_rwlock_wrlock(&ls.lock); -        list_for_each_safe(p, h, &ls.db) { +        list_for_each_safe(p, h, &ls.db.list) {                  struct adjacency * a = list_entry(p, struct adjacency, next);                  list_del(&a->next);                  free(a);          } -        pthread_rwlock_unlock(&ls.db_lock); +        pthread_rwlock_unlock(&ls.lock); -        pthread_rwlock_destroy(&ls.db_lock); +        pthread_rwlock_destroy(&ls.lock); -        pthread_mutex_destroy(&ls.routing_i_lock); +        pthread_mutex_destroy(&ls.instances.mtx);  } diff --git a/src/ipcpd/unicast/pol/link_state.h b/src/ipcpd/unicast/routing/link-state.h index 05b0ae5d..69eb6781 100644 --- a/src/ipcpd/unicast/pol/link_state.h +++ b/src/ipcpd/unicast/routing/link-state.h @@ -1,5 +1,5 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   *   * Link state routing policy   * @@ -26,16 +26,21 @@  #define LS_COMP  "Management"  #define LS_PROTO "LSP" -#include "pol-routing-ops.h" +#include "ops.h" -int                link_state_init(enum pol_routing pr); +int                link_state_init(struct ls_config * ls, +                                   enum pol_pff *     pff_type);  void               link_state_fini(void); +int                link_state_start(void); + +void               link_state_stop(void); +  struct routing_i * link_state_routing_i_create(struct pff * pff);  void               link_state_routing_i_destroy(struct routing_i * instance); -extern struct pol_routing_ops link_state_ops; +extern struct routing_ops link_state_ops;  #endif /* OUROBOROS_IPCPD_UNICAST_POL_LINK_STATE_H */ diff --git a/src/ipcpd/unicast/pol-routing-ops.h b/src/ipcpd/unicast/routing/ops.h index cea88582..4bf75c80 100644 --- a/src/ipcpd/unicast/pol-routing-ops.h +++ b/src/ipcpd/unicast/routing/ops.h @@ -1,5 +1,5 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   *   * Routing policy ops   * @@ -20,19 +20,24 @@   * Foundation, Inc., http://www.fsf.org/about/contact/.   */ -#ifndef OUROBOROS_IPCPD_UNICAST_POL_ROUTING_OPS_H -#define OUROBOROS_IPCPD_UNICAST_POL_ROUTING_OPS_H +#ifndef OUROBOROS_IPCPD_UNICAST_ROUTING_OPS_H +#define OUROBOROS_IPCPD_UNICAST_ROUTING_OPS_H  #include "pff.h" -struct pol_routing_ops { -        int                (* init)(enum pol_routing pr); +struct routing_ops { +        int                (* init)(void *         conf, +                                    enum pol_pff * pff_type);          void               (* fini)(void); +        int                (* start)(void); + +        void               (* stop)(void); +          struct routing_i * (* routing_i_create)(struct pff * pff);          void               (* routing_i_destroy)(struct routing_i * instance);  }; -#endif /* OUROBOROS_IPCPD_UNICAST_POL_ROUTING_OPS_H */ +#endif /* OUROBOROS_IPCPD_UNICAST_ROUTING_OPS_H */ diff --git a/src/ipcpd/unicast/routing/pol.h b/src/ipcpd/unicast/routing/pol.h new file mode 100644 index 00000000..b6a6f150 --- /dev/null +++ b/src/ipcpd/unicast/routing/pol.h @@ -0,0 +1,23 @@ +/* + * Ouroboros - Copyright (C) 2016 - 2024 + * + * Routing policies + * + *    Dimitri Staessens <dimitri@ouroboros.rocks> + *    Sander Vrijders   <sander@ouroboros.rocks> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., http://www.fsf.org/about/contact/. + */ + +#include "link-state.h" diff --git a/src/ipcpd/unicast/pol/tests/CMakeLists.txt b/src/ipcpd/unicast/routing/tests/CMakeLists.txt index 34d80e8d..9d24bf03 100644 --- a/src/ipcpd/unicast/pol/tests/CMakeLists.txt +++ b/src/ipcpd/unicast/routing/tests/CMakeLists.txt @@ -18,7 +18,6 @@ get_filename_component(PARENT_DIR ${PARENT_PATH} NAME)  create_test_sourcelist(${PARENT_DIR}_tests test_suite.c    # Add new tests here    graph_test.c -  pft_test.c    )  add_executable(${PARENT_DIR}_test EXCLUDE_FROM_ALL ${${PARENT_DIR}_tests}) @@ -27,7 +26,11 @@ target_link_libraries(${PARENT_DIR}_test ouroboros-common)  add_dependencies(check ${PARENT_DIR}_test)  set(tests_to_run ${${PARENT_DIR}_tests}) -remove(tests_to_run test_suite.c) +if(CMAKE_VERSION VERSION_LESS "3.29.0") +  remove(tests_to_run test_suite.c) +else () +  list(POP_FRONT tests_to_run) +endif()  foreach (test ${tests_to_run})    get_filename_component(test_name ${test} NAME_WE) diff --git a/src/ipcpd/unicast/pol/tests/graph_test.c b/src/ipcpd/unicast/routing/tests/graph_test.c index 217c7eab..d805640c 100644 --- a/src/ipcpd/unicast/pol/tests/graph_test.c +++ b/src/ipcpd/unicast/routing/tests/graph_test.c @@ -1,5 +1,5 @@  /* - * Ouroboros - Copyright (C) 2016 - 2021 + * Ouroboros - Copyright (C) 2016 - 2024   *   * Test of the graph structure   * diff --git a/src/ipcpd/unicast/tests/dht_test.c b/src/ipcpd/unicast/tests/dht_test.c deleted file mode 100644 index 552af75c..00000000 --- a/src/ipcpd/unicast/tests/dht_test.c +++ /dev/null @@ -1,99 +0,0 @@ -/* - * Ouroboros - Copyright (C) 2016 - 2021 - * - * Unit tests of the DHT - * - *    Dimitri Staessens <dimitri@ouroboros.rocks> - *    Sander Vrijders   <sander@ouroboros.rocks> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., http://www.fsf.org/about/contact/. - */ - -#define __DHT_TEST__ - -#include "dht.c" - -#include <pthread.h> -#include <time.h> -#include <stdlib.h> -#include <stdio.h> - -#define KEY_LEN  32 - -#define EXP      86400 -#define CONTACTS 1000 - -int dht_test(int     argc, -             char ** argv) -{ -        struct dht * dht; -        uint64_t     addr = 0x0D1F; -        uint8_t      key[KEY_LEN]; -        size_t       i; - -        (void) argc; -        (void) argv; - -        dht = dht_create(addr); -        if (dht == NULL) { -                printf("Failed to create dht.\n"); -                return -1; -        } - -        dht_destroy(dht); - -        dht = dht_create(addr); -        if (dht == NULL) { -                printf("Failed to re-create dht.\n"); -                return -1; -        } - -        if (dht_bootstrap(dht, KEY_LEN, EXP)) { -                printf("Failed to bootstrap dht.\n"); -                dht_destroy(dht); -                return -1; -        } - -        dht_destroy(dht); - -        dht = dht_create(addr); -        if (dht == NULL) { -                printf("Failed to re-create dht.\n"); -                return -1; -        } - -        if (dht_bootstrap(dht, KEY_LEN, EXP)) { -                printf("Failed to bootstrap dht.\n"); -                dht_destroy(dht); -                return -1; -        } - -        for (i = 0; i < CONTACTS; ++i) { -                uint64_t addr; -                random_buffer(&addr, sizeof(addr)); -                random_buffer(key, KEY_LEN); -                pthread_rwlock_wrlock(&dht->lock); -                if (dht_update_bucket(dht, key, addr)) { -                        pthread_rwlock_unlock(&dht->lock); -                        printf("Failed to update bucket.\n"); -                        dht_destroy(dht); -                        return -1; -                } -                pthread_rwlock_unlock(&dht->lock); -        } - -        dht_destroy(dht); - -        return 0; -}  | 
