diff options
| author | Dimitri Staessens <dimitri@ouroboros.rocks> | 2026-01-20 22:25:41 +0100 |
|---|---|---|
| committer | Sander Vrijders <sander@ouroboros.rocks> | 2026-01-26 07:50:33 +0100 |
| commit | 0ca48453a067c7862f0bb6b85f152da826f59af7 (patch) | |
| tree | 5daf26d84777ec6ad1c266601b66e59f9dcc88ca /src/ipcpd/eth/eth.c | |
| parent | 1775201647a10923b9f73addf2304c3124350836 (diff) | |
| download | ouroboros-0ca48453a067c7862f0bb6b85f152da826f59af7.tar.gz ouroboros-0ca48453a067c7862f0bb6b85f152da826f59af7.zip | |
lib: Replace rdrbuff with a proper slab allocatorbe
This is a first step towards the Secure Shared Memory (SSM)
infrastructure for Ouroboros, which will allow proper resource
separation for non-privileged processes.
This replaces the rdrbuff (random-deletion ring buffer) PoC allocator
with a sharded slab allocator for the packet buffer pool to avoid the
head-of-line blocking behaviour of the rdrb and reduce lock contention
in multi-process scenarios. Each size class contains multiple
independent shards, allowing parallel allocations without blocking.
- Configurable shard count per size class (default: 4, set via
SSM_POOL_SHARDS in CMake). The configured number of blocks are
spread over the number of shards. As an example:
SSM_POOL_512_BLOCKS = 768 blocks total
These 768 blocks are shared among 4 shards
(not 768 × 4 = 3072 blocks)
- Lazy block distribution: all blocks initially reside in shard 0
and naturally migrate to process-local shards upon first
allocation and subsequent free operations
- Fallback with work stealing: processes attempt allocation from
their local shard (pid % SSM_POOL_SHARDS) first, then steal
from other shards if local is exhausted, eliminating
fragmentation while maintaining low contention
- Round-robin condvar signaling: blocking allocations cycle
through all shard condition variables to ensure fairness
- Blocks freed to allocator's shard: uses allocator_pid to
determine target shard, enabling natural load balancing as
process allocation patterns stabilize over time
Maintains existing robust mutex semantics including EOWNERDEAD
handling for dead process recovery. Internal structures exposed in
ssm.h for testing purposes. Adds some tests (pool_test,
pool_sharding_test.c. etc) verifying lazy distribution, migration,
fallback stealing, and multiprocess behavior.
Updates the ring buffer (rbuff) to use relaxed/acquire/release
ordering on atomic indices. The ring buffer requires the (robust)
mutex to ensure cross-structure synchronization between pool buffer
writes and ring buffer index publication.
Signed-off-by: Dimitri Staessens <dimitri@ouroboros.rocks>
Signed-off-by: Sander Vrijders <sander@ouroboros.rocks>
Diffstat (limited to 'src/ipcpd/eth/eth.c')
| -rw-r--r-- | src/ipcpd/eth/eth.c | 56 |
1 files changed, 23 insertions, 33 deletions
diff --git a/src/ipcpd/eth/eth.c b/src/ipcpd/eth/eth.c index 29c5ff4f..f36e0b13 100644 --- a/src/ipcpd/eth/eth.c +++ b/src/ipcpd/eth/eth.c @@ -839,7 +839,7 @@ static void * eth_ipcp_packet_reader(void * o) #if defined(HAVE_NETMAP) struct nm_pkthdr hdr; #else - struct shm_du_buff * sdb; + struct ssm_pk_buff * spb; fd_set fds; int frame_len; #endif @@ -871,21 +871,21 @@ static void * eth_ipcp_packet_reader(void * o) if (select(eth_data.bpf + 1, &fds, NULL, NULL, NULL)) continue; assert(FD_ISSET(eth_data.bpf, &fds)); - if (ipcp_sdb_reserve(&sdb, BPF_LEN)) + if (ipcp_spb_reserve(&spb, BPF_LEN)) continue; - buf = shm_du_buff_head(sdb); + buf = ssm_pk_buff_head(spb); frame_len = read(eth_data.bpf, buf, BPF_BLEN); #elif defined(HAVE_RAW_SOCKETS) FD_SET(eth_data.s_fd, &fds); if (select(eth_data.s_fd + 1, &fds, NULL, NULL, NULL) < 0) continue; assert(FD_ISSET(eth_data.s_fd, &fds)); - if (ipcp_sdb_reserve(&sdb, ETH_MTU)) + if (ipcp_spb_reserve(&spb, ETH_MTU)) continue; - buf = shm_du_buff_head_alloc(sdb, ETH_HEADER_TOT_SIZE); + buf = ssm_pk_buff_head_alloc(spb, ETH_HEADER_TOT_SIZE); if (buf == NULL) { log_dbg("Failed to allocate header."); - ipcp_sdb_release(sdb); + ipcp_spb_release(spb); continue; } frame_len = recv(eth_data.s_fd, buf, @@ -893,7 +893,7 @@ static void * eth_ipcp_packet_reader(void * o) #endif if (frame_len <= 0) { log_dbg("Failed to receive frame."); - ipcp_sdb_release(sdb); + ipcp_spb_release(spb); continue; } #endif @@ -935,7 +935,7 @@ static void * eth_ipcp_packet_reader(void * o) if (ssap == MGMT_SAP && dsap == MGMT_SAP) { #endif - ipcp_sdb_release(sdb); /* No need for the N+1 buffer. */ + ipcp_spb_release(spb); /* No need for the N+1 buffer. */ if (length > MGMT_FRAME_SIZE) { log_warn("Management frame size %u exceeds %u.", @@ -981,22 +981,22 @@ static void * eth_ipcp_packet_reader(void * o) pthread_rwlock_unlock(ð_data.flows_lock); #ifndef HAVE_NETMAP - shm_du_buff_head_release(sdb, ETH_HEADER_TOT_SIZE); - shm_du_buff_truncate(sdb, length); + ssm_pk_buff_head_release(spb, ETH_HEADER_TOT_SIZE); + ssm_pk_buff_truncate(spb, length); #else - if (ipcp_sdb_reserve(&sdb, length)) + if (ipcp_spb_reserve(&spb, length)) continue; - buf = shm_du_buff_head(sdb); + buf = ssm_pk_buff_head(spb); memcpy(buf, &e_frame->payload, length); #endif - if (np1_flow_write(fd, sdb) < 0) - ipcp_sdb_release(sdb); + if (np1_flow_write(fd, spb) < 0) + ipcp_spb_release(spb); continue; fail_frame: #ifndef HAVE_NETMAP - ipcp_sdb_release(sdb); + ipcp_spb_release(spb); #endif } } @@ -1012,7 +1012,7 @@ static void cleanup_writer(void * o) static void * eth_ipcp_packet_writer(void * o) { int fd; - struct shm_du_buff * sdb; + struct ssm_pk_buff * spb; size_t len; #if defined(BUILD_ETH_DIX) uint16_t deid; @@ -1040,17 +1040,17 @@ static void * eth_ipcp_packet_writer(void * o) if (fqueue_type(fq) != FLOW_PKT) continue; - if (np1_flow_read(fd, &sdb)) { + if (np1_flow_read(fd, &spb)) { log_dbg("Bad read from fd %d.", fd); continue; } - len = shm_du_buff_len(sdb); + len = ssm_pk_buff_len(spb); - if (shm_du_buff_head_alloc(sdb, ETH_HEADER_TOT_SIZE) + if (ssm_pk_buff_head_alloc(spb, ETH_HEADER_TOT_SIZE) == NULL) { log_dbg("Failed to allocate header."); - ipcp_sdb_release(sdb); + ipcp_spb_release(spb); continue; } @@ -1073,10 +1073,10 @@ static void * eth_ipcp_packet_writer(void * o) #elif defined(BUILD_ETH_LLC) dsap, ssap, #endif - shm_du_buff_head(sdb), + ssm_pk_buff_head(spb), len)) log_dbg("Failed to send frame."); - ipcp_sdb_release(sdb); + ipcp_spb_release(spb); } } @@ -1342,14 +1342,7 @@ static int eth_set_mtu(struct ifreq * ifr) IPCP_ETH_LO_MTU); eth_data.mtu = IPCP_ETH_LO_MTU; } -#ifndef SHM_RDRB_MULTI_BLOCK - maxsz = SHM_RDRB_BLOCK_SIZE - 5 * sizeof(size_t) - - (DU_BUFF_HEADSPACE + DU_BUFF_TAILSPACE); - if ((size_t) eth_data.mtu > maxsz ) { - log_dbg("Layer MTU truncated to shm block size."); - eth_data.mtu = maxsz; - } -#endif + log_dbg("Layer MTU is %d.", eth_data.mtu); return 0; @@ -1503,9 +1496,6 @@ static int eth_ipcp_bootstrap(struct ipcp_config * conf) char ifn[IFNAMSIZ]; #endif /* HAVE_NETMAP */ -#ifndef SHM_RDRB_MULTI_BLOCK - size_t maxsz; -#endif assert(conf); assert(conf->type == THIS_TYPE); |
