summaryrefslogtreecommitdiff
path: root/src/ipcpd/unicast/routing/link-state.c
diff options
context:
space:
mode:
authorDimitri Staessens <dimitri@ouroboros.rocks>2026-01-20 22:25:41 +0100
committerSander Vrijders <sander@ouroboros.rocks>2026-01-26 07:50:33 +0100
commit0ca48453a067c7862f0bb6b85f152da826f59af7 (patch)
tree5daf26d84777ec6ad1c266601b66e59f9dcc88ca /src/ipcpd/unicast/routing/link-state.c
parent1775201647a10923b9f73addf2304c3124350836 (diff)
downloadouroboros-0ca48453a067c7862f0bb6b85f152da826f59af7.tar.gz
ouroboros-0ca48453a067c7862f0bb6b85f152da826f59af7.zip
lib: Replace rdrbuff with a proper slab allocatorbe
This is a first step towards the Secure Shared Memory (SSM) infrastructure for Ouroboros, which will allow proper resource separation for non-privileged processes. This replaces the rdrbuff (random-deletion ring buffer) PoC allocator with a sharded slab allocator for the packet buffer pool to avoid the head-of-line blocking behaviour of the rdrb and reduce lock contention in multi-process scenarios. Each size class contains multiple independent shards, allowing parallel allocations without blocking. - Configurable shard count per size class (default: 4, set via SSM_POOL_SHARDS in CMake). The configured number of blocks are spread over the number of shards. As an example: SSM_POOL_512_BLOCKS = 768 blocks total These 768 blocks are shared among 4 shards (not 768 × 4 = 3072 blocks) - Lazy block distribution: all blocks initially reside in shard 0 and naturally migrate to process-local shards upon first allocation and subsequent free operations - Fallback with work stealing: processes attempt allocation from their local shard (pid % SSM_POOL_SHARDS) first, then steal from other shards if local is exhausted, eliminating fragmentation while maintaining low contention - Round-robin condvar signaling: blocking allocations cycle through all shard condition variables to ensure fairness - Blocks freed to allocator's shard: uses allocator_pid to determine target shard, enabling natural load balancing as process allocation patterns stabilize over time Maintains existing robust mutex semantics including EOWNERDEAD handling for dead process recovery. Internal structures exposed in ssm.h for testing purposes. Adds some tests (pool_test, pool_sharding_test.c. etc) verifying lazy distribution, migration, fallback stealing, and multiprocess behavior. Updates the ring buffer (rbuff) to use relaxed/acquire/release ordering on atomic indices. The ring buffer requires the (robust) mutex to ensure cross-structure synchronization between pool buffer writes and ring buffer index publication. Signed-off-by: Dimitri Staessens <dimitri@ouroboros.rocks> Signed-off-by: Sander Vrijders <sander@ouroboros.rocks>
Diffstat (limited to 'src/ipcpd/unicast/routing/link-state.c')
-rw-r--r--src/ipcpd/unicast/routing/link-state.c64
1 files changed, 32 insertions, 32 deletions
diff --git a/src/ipcpd/unicast/routing/link-state.c b/src/ipcpd/unicast/routing/link-state.c
index e5edf539..95a104bb 100644
--- a/src/ipcpd/unicast/routing/link-state.c
+++ b/src/ipcpd/unicast/routing/link-state.c
@@ -56,7 +56,7 @@
#include <string.h>
#define LS_ENTRY_SIZE 104
-#define LSDB "lsdb"
+#define Lspb "lspb"
#ifndef CLOCK_REALTIME_COARSE
#define CLOCK_REALTIME_COARSE CLOCK_REALTIME
@@ -199,7 +199,7 @@ static struct adjacency * get_adj(const char * path)
return NULL;
}
-static int lsdb_rib_getattr(const char * path,
+static int lspb_rib_getattr(const char * path,
struct rib_attr * attr)
{
struct adjacency * adj;
@@ -230,7 +230,7 @@ static int lsdb_rib_getattr(const char * path,
return 0;
}
-static int lsdb_rib_read(const char * path,
+static int lspb_rib_read(const char * path,
char * buf,
size_t len)
{
@@ -264,7 +264,7 @@ static int lsdb_rib_read(const char * path,
return -1;
}
-static int lsdb_rib_readdir(char *** buf)
+static int lspb_rib_readdir(char *** buf)
{
struct list_head * p;
char entry[RIB_PATH_LEN + 1];
@@ -319,12 +319,12 @@ static int lsdb_rib_readdir(char *** buf)
}
static struct rib_ops r_ops = {
- .read = lsdb_rib_read,
- .readdir = lsdb_rib_readdir,
- .getattr = lsdb_rib_getattr
+ .read = lspb_rib_read,
+ .readdir = lspb_rib_readdir,
+ .getattr = lspb_rib_getattr
};
-static int lsdb_add_nb(uint64_t addr,
+static int lspb_add_nb(uint64_t addr,
int fd,
enum nb_type type)
{
@@ -372,7 +372,7 @@ static int lsdb_add_nb(uint64_t addr,
return 0;
}
-static int lsdb_del_nb(uint64_t addr,
+static int lspb_del_nb(uint64_t addr,
int fd)
{
struct list_head * p;
@@ -478,7 +478,7 @@ static void set_pff_modified(bool calc)
pthread_mutex_unlock(&ls.instances.mtx);
}
-static int lsdb_add_link(uint64_t src,
+static int lspb_add_link(uint64_t src,
uint64_t dst,
uint64_t seqno,
qosspec_t * qs)
@@ -535,7 +535,7 @@ static int lsdb_add_link(uint64_t src,
return 0;
}
-static int lsdb_del_link(uint64_t src,
+static int lspb_del_link(uint64_t src,
uint64_t dst)
{
struct list_head * p;
@@ -616,8 +616,8 @@ static void send_lsm(uint64_t src,
}
}
-/* replicate the lsdb to a mgmt neighbor */
-static void lsdb_replicate(int fd)
+/* replicate the lspb to a mgmt neighbor */
+static void lspb_replicate(int fd)
{
struct list_head * p;
struct list_head * h;
@@ -625,7 +625,7 @@ static void lsdb_replicate(int fd)
list_head_init(&copy);
- /* Lock the lsdb, copy the lsms and send outside of lock. */
+ /* Lock the lspb, copy the lsms and send outside of lock. */
pthread_rwlock_rdlock(&ls.lock);
list_for_each(p, &ls.db.list) {
@@ -634,7 +634,7 @@ static void lsdb_replicate(int fd)
adj = list_entry(p, struct adjacency, next);
cpy = malloc(sizeof(*cpy));
if (cpy == NULL) {
- log_warn("Failed to replicate full lsdb.");
+ log_warn("Failed to replicate full lspb.");
break;
}
@@ -814,7 +814,7 @@ static void * lsreader(void * o)
LSU_VAL(msg.s_addr, msg.d_addr, msg.seqno),
ADDR_VAL32(&ls.addr));
#endif
- if (lsdb_add_link(msg.s_addr,
+ if (lspb_add_link(msg.s_addr,
msg.d_addr,
msg.seqno,
&qs))
@@ -873,20 +873,20 @@ static void handle_event(void * self,
send_lsm(ls.addr, c->conn_info.addr, 0);
pthread_cleanup_pop(true);
- if (lsdb_add_nb(c->conn_info.addr, c->flow_info.fd, NB_DT))
- log_dbg("Failed to add neighbor to LSDB.");
+ if (lspb_add_nb(c->conn_info.addr, c->flow_info.fd, NB_DT))
+ log_dbg("Failed to add neighbor to Lspb.");
- if (lsdb_add_link(ls.addr, c->conn_info.addr, 0, &qs))
- log_dbg("Failed to add new adjacency to LSDB.");
+ if (lspb_add_link(ls.addr, c->conn_info.addr, 0, &qs))
+ log_dbg("Failed to add new adjacency to Lspb.");
break;
case NOTIFY_DT_CONN_DEL:
flow_event(c->flow_info.fd, false);
- if (lsdb_del_nb(c->conn_info.addr, c->flow_info.fd))
- log_dbg("Failed to delete neighbor from LSDB.");
+ if (lspb_del_nb(c->conn_info.addr, c->flow_info.fd))
+ log_dbg("Failed to delete neighbor from Lspb.");
- if (lsdb_del_link(ls.addr, c->conn_info.addr))
- log_dbg("Local link was not in LSDB.");
+ if (lspb_del_link(ls.addr, c->conn_info.addr))
+ log_dbg("Local link was not in Lspb.");
break;
case NOTIFY_DT_CONN_QOS:
log_dbg("QoS changes currently unsupported.");
@@ -901,15 +901,15 @@ static void handle_event(void * self,
fccntl(c->flow_info.fd, FLOWGFLAGS, &flags);
fccntl(c->flow_info.fd, FLOWSFLAGS, flags | FLOWFRNOPART);
fset_add(ls.mgmt_set, c->flow_info.fd);
- if (lsdb_add_nb(c->conn_info.addr, c->flow_info.fd, NB_MGMT))
- log_warn("Failed to add mgmt neighbor to LSDB.");
- /* replicate the entire lsdb */
- lsdb_replicate(c->flow_info.fd);
+ if (lspb_add_nb(c->conn_info.addr, c->flow_info.fd, NB_MGMT))
+ log_warn("Failed to add mgmt neighbor to Lspb.");
+ /* replicate the entire lspb */
+ lspb_replicate(c->flow_info.fd);
break;
case NOTIFY_MGMT_CONN_DEL:
fset_del(ls.mgmt_set, c->flow_info.fd);
- if (lsdb_del_nb(c->conn_info.addr, c->flow_info.fd))
- log_warn("Failed to delete mgmt neighbor from LSDB.");
+ if (lspb_del_nb(c->conn_info.addr, c->flow_info.fd))
+ log_warn("Failed to delete mgmt neighbor from Lspb.");
break;
default:
break;
@@ -1094,7 +1094,7 @@ int link_state_init(struct ls_config * conf,
list_head_init(&ls.nbs.list);
list_head_init(&ls.instances.list);
- if (rib_reg(LSDB, &r_ops))
+ if (rib_reg(Lspb, &r_ops))
goto fail_rib_reg;
ls.db.len = 0;
@@ -1121,7 +1121,7 @@ void link_state_fini(void)
struct list_head * p;
struct list_head * h;
- rib_unreg(LSDB);
+ rib_unreg(Lspb);
fset_destroy(ls.mgmt_set);