summaryrefslogtreecommitdiff
path: root/src/irmd/reg
diff options
context:
space:
mode:
authorDimitri Staessens <dimitri@ouroboros.rocks>2026-01-20 22:25:41 +0100
committerSander Vrijders <sander@ouroboros.rocks>2026-01-26 07:50:33 +0100
commit0ca48453a067c7862f0bb6b85f152da826f59af7 (patch)
tree5daf26d84777ec6ad1c266601b66e59f9dcc88ca /src/irmd/reg
parent1775201647a10923b9f73addf2304c3124350836 (diff)
downloadouroboros-0ca48453a067c7862f0bb6b85f152da826f59af7.tar.gz
ouroboros-0ca48453a067c7862f0bb6b85f152da826f59af7.zip
lib: Replace rdrbuff with a proper slab allocatorbe
This is a first step towards the Secure Shared Memory (SSM) infrastructure for Ouroboros, which will allow proper resource separation for non-privileged processes. This replaces the rdrbuff (random-deletion ring buffer) PoC allocator with a sharded slab allocator for the packet buffer pool to avoid the head-of-line blocking behaviour of the rdrb and reduce lock contention in multi-process scenarios. Each size class contains multiple independent shards, allowing parallel allocations without blocking. - Configurable shard count per size class (default: 4, set via SSM_POOL_SHARDS in CMake). The configured number of blocks are spread over the number of shards. As an example: SSM_POOL_512_BLOCKS = 768 blocks total These 768 blocks are shared among 4 shards (not 768 × 4 = 3072 blocks) - Lazy block distribution: all blocks initially reside in shard 0 and naturally migrate to process-local shards upon first allocation and subsequent free operations - Fallback with work stealing: processes attempt allocation from their local shard (pid % SSM_POOL_SHARDS) first, then steal from other shards if local is exhausted, eliminating fragmentation while maintaining low contention - Round-robin condvar signaling: blocking allocations cycle through all shard condition variables to ensure fairness - Blocks freed to allocator's shard: uses allocator_pid to determine target shard, enabling natural load balancing as process allocation patterns stabilize over time Maintains existing robust mutex semantics including EOWNERDEAD handling for dead process recovery. Internal structures exposed in ssm.h for testing purposes. Adds some tests (pool_test, pool_sharding_test.c. etc) verifying lazy distribution, migration, fallback stealing, and multiprocess behavior. Updates the ring buffer (rbuff) to use relaxed/acquire/release ordering on atomic indices. The ring buffer requires the (robust) mutex to ensure cross-structure synchronization between pool buffer writes and ring buffer index publication. Signed-off-by: Dimitri Staessens <dimitri@ouroboros.rocks> Signed-off-by: Sander Vrijders <sander@ouroboros.rocks>
Diffstat (limited to 'src/irmd/reg')
-rw-r--r--src/irmd/reg/flow.c14
-rw-r--r--src/irmd/reg/flow.h6
-rw-r--r--src/irmd/reg/proc.c4
-rw-r--r--src/irmd/reg/proc.h4
4 files changed, 14 insertions, 14 deletions
diff --git a/src/irmd/reg/flow.c b/src/irmd/reg/flow.c
index d6f6437f..02dc9c99 100644
--- a/src/irmd/reg/flow.c
+++ b/src/irmd/reg/flow.c
@@ -66,11 +66,11 @@ struct reg_flow * reg_flow_create(const struct flow_info * info)
static void destroy_rbuffs(struct reg_flow * flow)
{
if (flow->n_rb != NULL)
- shm_rbuff_destroy(flow->n_rb);
+ ssm_rbuff_destroy(flow->n_rb);
flow->n_rb = NULL;
if (flow->n_1_rb != NULL)
- shm_rbuff_destroy(flow->n_1_rb);
+ ssm_rbuff_destroy(flow->n_1_rb);
flow->n_1_rb = NULL;
}
@@ -103,28 +103,28 @@ static int create_rbuffs(struct reg_flow * flow,
assert(flow != NULL);
assert(info != NULL);
- flow->n_rb = shm_rbuff_create(info->n_pid, info->id);
+ flow->n_rb = ssm_rbuff_create(info->n_pid, info->id);
if (flow->n_rb == NULL)
goto fail_n_rb;
- if (shm_rbuff_mlock(flow->n_rb) < 0)
+ if (ssm_rbuff_mlock(flow->n_rb) < 0)
log_warn("Failed to mlock n_rb for flow %d.", info->id);
assert(flow->info.n_1_pid == 0);
assert(flow->n_1_rb == NULL);
flow->info.n_1_pid = info->n_1_pid;
- flow->n_1_rb = shm_rbuff_create(info->n_1_pid, info->id);
+ flow->n_1_rb = ssm_rbuff_create(info->n_1_pid, info->id);
if (flow->n_1_rb == NULL)
goto fail_n_1_rb;
- if (shm_rbuff_mlock(flow->n_1_rb) < 0)
+ if (ssm_rbuff_mlock(flow->n_1_rb) < 0)
log_warn("Failed to mlock n_1_rb for flow %d.", info->id);
return 0;
fail_n_1_rb:
- shm_rbuff_destroy(flow->n_rb);
+ ssm_rbuff_destroy(flow->n_rb);
fail_n_rb:
return -ENOMEM;
}
diff --git a/src/irmd/reg/flow.h b/src/irmd/reg/flow.h
index d1e4811c..b671d486 100644
--- a/src/irmd/reg/flow.h
+++ b/src/irmd/reg/flow.h
@@ -28,7 +28,7 @@
#include <ouroboros/name.h>
#include <ouroboros/pthread.h>
#include <ouroboros/qos.h>
-#include <ouroboros/shm_rbuff.h>
+#include <ouroboros/ssm_rbuff.h>
#include <ouroboros/utils.h>
#include <sys/types.h>
@@ -45,8 +45,8 @@ struct reg_flow {
char name[NAME_SIZE + 1];
- struct shm_rbuff * n_rb;
- struct shm_rbuff * n_1_rb;
+ struct ssm_rbuff * n_rb;
+ struct ssm_rbuff * n_1_rb;
};
struct reg_flow * reg_flow_create(const struct flow_info * info);
diff --git a/src/irmd/reg/proc.c b/src/irmd/reg/proc.c
index 9bbdf0eb..541731b2 100644
--- a/src/irmd/reg/proc.c
+++ b/src/irmd/reg/proc.c
@@ -75,7 +75,7 @@ struct reg_proc * reg_proc_create(const struct proc_info * info)
goto fail_malloc;
}
- proc->set = shm_flow_set_create(info->pid);
+ proc->set = ssm_flow_set_create(info->pid);
if (proc->set == NULL) {
log_err("Failed to create flow set for %d.", info->pid);
goto fail_set;
@@ -99,7 +99,7 @@ void reg_proc_destroy(struct reg_proc * proc)
{
assert(proc != NULL);
- shm_flow_set_destroy(proc->set);
+ ssm_flow_set_destroy(proc->set);
__reg_proc_clear_names(proc);
diff --git a/src/irmd/reg/proc.h b/src/irmd/reg/proc.h
index 499ecc72..a790e5c9 100644
--- a/src/irmd/reg/proc.h
+++ b/src/irmd/reg/proc.h
@@ -25,7 +25,7 @@
#include <ouroboros/list.h>
#include <ouroboros/proc.h>
-#include <ouroboros/shm_flow_set.h>
+#include <ouroboros/ssm_flow_set.h>
struct reg_proc {
struct list_head next;
@@ -35,7 +35,7 @@ struct reg_proc {
struct list_head names; /* process accepts flows for names */
size_t n_names; /* number of names */
- struct shm_flow_set * set;
+ struct ssm_flow_set * set;
};
struct reg_proc * reg_proc_create(const struct proc_info * info);