summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDimitri Staessens <dimitri@ouroboros.rocks>2026-01-26 22:02:50 +0100
committerSander Vrijders <sander@ouroboros.rocks>2026-02-02 08:15:15 +0100
commitb1687570df3e080c961cdcc0d59b708cfbdf955e (patch)
treecaf93583ab36ab2b62b95fcfbea4b63e29857e0d
parent37e3dbdd8206e4f0f03fab13ff3f38aa932be065 (diff)
downloadouroboros-b1687570df3e080c961cdcc0d59b708cfbdf955e.tar.gz
ouroboros-b1687570df3e080c961cdcc0d59b708cfbdf955e.zip
lib: Add per-user packet pools
The IRMd will now check the user UID and GID for privileged access, avoiding unprivileged users being able to disrupt all IPC (e.g. by shm_open the single pool and corrupting its metadata). Non-privileged users are now limited to a PUP (per-user pool) for sending/receiving packets. It is still created by the IRMd, but owned by the user (uid) with 600 permissions. It does not add additional copies for local IPC between their own processes (i.e. over the local IPCP), but packets between processes owned by a different user or destined over the network (other IPCPs) will incur a copy when crossing the PUP / PUP or the PUP / GSPP boundary. Privileged users and users in the ouroboros group still have direct access to the GSPP (globally shared private pool) for packet transfer that will avoid additional copies when processing packets between processes owned by different users and to the network. This aligns the security model with UNIX trust domains defined by UID and GID by leveraging file permission on the pools in shared memory. ┌─────────────────────────────────────────────────────────────┐ │ Source Pool │ Dest Pool │ Operation │ Copies │ ├─────────────────────────────────────────────────────────────┤ │ GSPP │ GSPP │ Zero-copy │ 0 │ │ PUP.uid │ PUP.uid │ Zero-copy │ 0 │ │ PUP.uid1 │ PUP.uid2 │ memcpy() │ 1 │ │ PUP.uid │ GSPP │ memcpy() │ 1 │ │ GSPP │ PUP.uid │ memcpy() │ 1 │ └─────────────────────────────────────────────────────────────┘ This also renames the struct ai ("application instance") in dev.c to struct proc (process). Signed-off-by: Dimitri Staessens <dimitri@ouroboros.rocks> Signed-off-by: Sander Vrijders <sander@ouroboros.rocks>
-rw-r--r--cmake/ipcp/broadcast.cmake6
-rw-r--r--cmake/ipcp/eth.cmake6
-rw-r--r--cmake/ipcp/ipcp.cmake2
-rw-r--r--cmake/ipcp/local.cmake17
-rw-r--r--cmake/ipcp/udp.cmake6
-rw-r--r--cmake/ipcp/unicast.cmake6
-rw-r--r--cmake/irmd.cmake1
-rw-r--r--cmake/lib/ssm.cmake156
-rw-r--r--include/ouroboros/flow.h2
-rw-r--r--include/ouroboros/ipcp-dev.h6
-rw-r--r--include/ouroboros/local-dev.h8
-rw-r--r--include/ouroboros/proc.h5
-rw-r--r--include/ouroboros/serdes-irm.h5
-rw-r--r--include/ouroboros/ssm_pool.h10
-rw-r--r--include/ouroboros/utils.h6
-rw-r--r--src/ipcpd/eth/eth.c5
-rw-r--r--src/ipcpd/ipcp.c56
-rw-r--r--src/ipcpd/local/main.c34
-rw-r--r--src/ipcpd/np1.h41
-rw-r--r--src/ipcpd/udp/udp.c5
-rw-r--r--src/ipcpd/unicast/fa.c11
-rw-r--r--src/irmd/config.h.in2
-rw-r--r--src/irmd/ipcp.c6
-rw-r--r--src/irmd/main.c87
-rw-r--r--src/irmd/reg/flow.c1
-rw-r--r--src/irmd/reg/pool.c101
-rw-r--r--src/irmd/reg/pool.h48
-rw-r--r--src/irmd/reg/proc.c10
-rw-r--r--src/irmd/reg/proc.h2
-rw-r--r--src/irmd/reg/reg.c105
-rw-r--r--src/irmd/reg/reg.h9
-rw-r--r--src/irmd/reg/tests/proc_test.c17
-rw-r--r--src/irmd/reg/tests/reg_test.c33
-rw-r--r--src/lib/dev.c689
-rw-r--r--src/lib/frct.c18
-rw-r--r--src/lib/pb/ipcp.proto1
-rw-r--r--src/lib/pb/irm.proto4
-rw-r--r--src/lib/pb/model.proto13
-rw-r--r--src/lib/protobuf.c15
-rw-r--r--src/lib/serdes-irm.c8
-rw-r--r--src/lib/ssm/pool.c231
-rw-r--r--src/lib/ssm/ssm.h.in31
-rw-r--r--src/lib/ssm/tests/pool_sharding_test.c23
-rw-r--r--src/lib/ssm/tests/pool_test.c32
-rw-r--r--src/lib/timerwheel.c10
-rw-r--r--src/lib/utils.c64
-rw-r--r--src/tools/oping/oping.c11
-rw-r--r--src/tools/oping/oping_client.c149
-rw-r--r--src/tools/oping/oping_server.c5
49 files changed, 1488 insertions, 631 deletions
diff --git a/cmake/ipcp/broadcast.cmake b/cmake/ipcp/broadcast.cmake
index 20610a25..4f43d001 100644
--- a/cmake/ipcp/broadcast.cmake
+++ b/cmake/ipcp/broadcast.cmake
@@ -19,4 +19,10 @@ add_executable(${IPCP_BROADCAST_TARGET}
target_include_directories(${IPCP_BROADCAST_TARGET} PRIVATE ${IPCP_INCLUDE_DIRS})
target_link_libraries(${IPCP_BROADCAST_TARGET} PUBLIC ouroboros-dev)
+
+include(utils/AddCompileFlags)
+if (CMAKE_BUILD_TYPE MATCHES "Debug*")
+ add_compile_flags(${IPCP_BROADCAST_TARGET} -DCONFIG_OUROBOROS_DEBUG)
+endif ()
+
install(TARGETS ${IPCP_BROADCAST_TARGET} RUNTIME DESTINATION ${CMAKE_INSTALL_SBINDIR})
diff --git a/cmake/ipcp/eth.cmake b/cmake/ipcp/eth.cmake
index c14a1d6e..0960c8a5 100644
--- a/cmake/ipcp/eth.cmake
+++ b/cmake/ipcp/eth.cmake
@@ -38,5 +38,11 @@ if (HAVE_ETH)
target_link_libraries(${target} PUBLIC ouroboros-dev)
endforeach()
+ include(utils/AddCompileFlags)
+ if (CMAKE_BUILD_TYPE MATCHES "Debug*")
+ add_compile_flags(${IPCP_ETH_LLC_TARGET} -DCONFIG_OUROBOROS_DEBUG)
+ add_compile_flags(${IPCP_ETH_DIX_TARGET} -DCONFIG_OUROBOROS_DEBUG)
+ endif ()
+
install(TARGETS ${IPCP_ETH_LLC_TARGET} ${IPCP_ETH_DIX_TARGET} RUNTIME DESTINATION ${CMAKE_INSTALL_SBINDIR})
endif ()
diff --git a/cmake/ipcp/ipcp.cmake b/cmake/ipcp/ipcp.cmake
index f821f481..006e76b0 100644
--- a/cmake/ipcp/ipcp.cmake
+++ b/cmake/ipcp/ipcp.cmake
@@ -23,7 +23,7 @@ set(BUILD_CONTAINER FALSE CACHE BOOL
"Disable thread priority setting for container compatibility")
if (CMAKE_SYSTEM_NAME STREQUAL "Linux")
- set(IPCP_LINUX_TIMERSLACK_NS 1000 CACHE STRING
+ set(IPCP_LINUX_TIMERSLACK_NS 100 CACHE STRING
"Slack value for high resolution timers on Linux systems.")
endif ()
diff --git a/cmake/ipcp/local.cmake b/cmake/ipcp/local.cmake
index 9e320aad..7b604ba1 100644
--- a/cmake/ipcp/local.cmake
+++ b/cmake/ipcp/local.cmake
@@ -5,7 +5,24 @@ set(IPCP_LOCAL_TARGET ipcpd-local CACHE INTERNAL "")
set(IPCP_LOCAL_MPL 100 CACHE STRING
"Default maximum packet lifetime for the Local IPCP, in ms")
+set(IPCP_LOCAL_POLLING FALSE CACHE BOOL
+ "Enable active polling in the Local IPCP for low-latency mode")
+
add_executable(${IPCP_LOCAL_TARGET} "${LOCAL_SOURCE_DIR}/main.c" ${IPCP_SOURCES})
target_include_directories(${IPCP_LOCAL_TARGET} PRIVATE ${IPCP_INCLUDE_DIRS})
target_link_libraries(${IPCP_LOCAL_TARGET} PUBLIC ouroboros-dev)
+
+include(utils/AddCompileFlags)
+if (CMAKE_BUILD_TYPE MATCHES "Debug*")
+ add_compile_flags(${IPCP_LOCAL_TARGET} -DCONFIG_OUROBOROS_DEBUG)
+endif ()
+
+if (IPCP_LOCAL_POLLING)
+ add_compile_flags(${IPCP_LOCAL_TARGET} -DCONFIG_IPCP_LOCAL_POLLING)
+endif ()
+
+if (IPCP_LOCAL_POLLING)
+ add_compile_flags(${IPCP_LOCAL_TARGET} -DCONFIG_IPCP_LOCAL_POLLING)
+endif ()
+
install(TARGETS ${IPCP_LOCAL_TARGET} RUNTIME DESTINATION ${CMAKE_INSTALL_SBINDIR})
diff --git a/cmake/ipcp/udp.cmake b/cmake/ipcp/udp.cmake
index 7195dfa1..360330c5 100644
--- a/cmake/ipcp/udp.cmake
+++ b/cmake/ipcp/udp.cmake
@@ -48,4 +48,10 @@ foreach(target ${IPCP_UDP4_TARGET} ${IPCP_UDP6_TARGET})
target_link_libraries(${target} PUBLIC ouroboros-dev)
endforeach()
+include(utils/AddCompileFlags)
+if (CMAKE_BUILD_TYPE MATCHES "Debug*")
+ add_compile_flags(${IPCP_UDP4_TARGET} -DCONFIG_OUROBOROS_DEBUG)
+ add_compile_flags(${IPCP_UDP6_TARGET} -DCONFIG_OUROBOROS_DEBUG)
+endif ()
+
install(TARGETS ${IPCP_UDP4_TARGET} ${IPCP_UDP6_TARGET} RUNTIME DESTINATION ${CMAKE_INSTALL_SBINDIR})
diff --git a/cmake/ipcp/unicast.cmake b/cmake/ipcp/unicast.cmake
index 2a2f7a3f..de237500 100644
--- a/cmake/ipcp/unicast.cmake
+++ b/cmake/ipcp/unicast.cmake
@@ -47,4 +47,10 @@ add_executable(${IPCP_UNICAST_TARGET}
target_include_directories(${IPCP_UNICAST_TARGET} PRIVATE ${IPCP_INCLUDE_DIRS})
target_include_directories(${IPCP_UNICAST_TARGET} PRIVATE "${UNICAST_SOURCE_DIR}")
target_link_libraries(${IPCP_UNICAST_TARGET} PUBLIC ouroboros-dev)
+
+include(utils/AddCompileFlags)
+if (CMAKE_BUILD_TYPE MATCHES "Debug*")
+ add_compile_flags(${IPCP_UNICAST_TARGET} -DCONFIG_OUROBOROS_DEBUG)
+endif ()
+
install(TARGETS ${IPCP_UNICAST_TARGET} RUNTIME DESTINATION ${CMAKE_INSTALL_SBINDIR})
diff --git a/cmake/irmd.cmake b/cmake/irmd.cmake
index f02f37d6..1d6341d3 100644
--- a/cmake/irmd.cmake
+++ b/cmake/irmd.cmake
@@ -94,6 +94,7 @@ set(IRMD_SOURCES
"${IRMD_SOURCE_DIR}/oap/cli.c"
"${IRMD_SOURCE_DIR}/reg/flow.c"
"${IRMD_SOURCE_DIR}/reg/ipcp.c"
+ "${IRMD_SOURCE_DIR}/reg/pool.c"
"${IRMD_SOURCE_DIR}/reg/proc.c"
"${IRMD_SOURCE_DIR}/reg/prog.c"
"${IRMD_SOURCE_DIR}/reg/name.c"
diff --git a/cmake/lib/ssm.cmake b/cmake/lib/ssm.cmake
index 71790a4a..8a2971e6 100644
--- a/cmake/lib/ssm.cmake
+++ b/cmake/lib/ssm.cmake
@@ -3,87 +3,135 @@
# secure shared memory pool allocator
# Shared memory pool naming configuration
-set(SSM_PREFIX "o7s.ssm" CACHE STRING
+set(SSM_PREFIX "ouroboros" CACHE STRING
"Prefix for secure shared memory pools")
-set(SSM_GSMP_SUFFIX ".gsmp" CACHE STRING
- "Suffix for Group Shared Memory Pool")
-set(SSM_PPP_SUFFIX ".ppp" CACHE STRING
- "Suffix for Process Private Pool")
+# Pool naming
+set(SSM_GSPP_NAME "/${SSM_PREFIX}.gspp" CACHE INTERNAL
+ "Name for the Global Shared Packet Pool")
+set(SSM_PUP_NAME_FMT "/${SSM_PREFIX}.pup.%d" CACHE INTERNAL
+ "Format string for Per-User Pool names (uid as argument)")
+
+# Legacy naming (for compatibility)
set(SSM_POOL_NAME "/${SHM_PREFIX}.pool" CACHE INTERNAL
- "Name for the main POSIX shared memory pool")
+ "Name for the main POSIX shared memory pool")
set(SSM_POOL_BLOCKS 16384 CACHE STRING
"Number of blocks in SSM packet pool, must be a power of 2")
set(SSM_PK_BUFF_HEADSPACE 256 CACHE STRING
- "Bytes of headspace to reserve for future headers")
+ "Bytes of headspace to reserve for future headers")
set(SSM_PK_BUFF_TAILSPACE 32 CACHE STRING
- "Bytes of tailspace to reserve for future tails")
+ "Bytes of tailspace to reserve for future tails")
set(SSM_RBUFF_SIZE 1024 CACHE STRING
"Number of blocks in rbuff buffer, must be a power of 2")
set(SSM_RBUFF_PREFIX "/${SHM_PREFIX}.rbuff." CACHE INTERNAL
- "Prefix for rbuff POSIX shared memory filenames")
+ "Prefix for rbuff POSIX shared memory filenames")
set(SSM_FLOW_SET_PREFIX "/${SHM_PREFIX}.set." CACHE INTERNAL
- "Prefix for the POSIX shared memory flow set")
-
-# Pool blocks per size class
-# This determines how many blocks of each size are preallocated in the pool
-# Higher values reduce allocation failures but increase memory usage
-set(SSM_POOL_256_BLOCKS 1024 CACHE STRING
- "Number of 256B blocks in pool")
-set(SSM_POOL_512_BLOCKS 768 CACHE STRING
- "Number of 512B blocks in pool")
-set(SSM_POOL_1K_BLOCKS 512 CACHE STRING
- "Number of 1KB blocks in pool")
-set(SSM_POOL_2K_BLOCKS 384 CACHE STRING
- "Number of 2KB blocks in pool")
-set(SSM_POOL_4K_BLOCKS 256 CACHE STRING
- "Number of 4KB blocks in pool")
-set(SSM_POOL_16K_BLOCKS 128 CACHE STRING
- "Number of 16KB blocks in pool")
-set(SSM_POOL_64K_BLOCKS 64 CACHE STRING
- "Number of 64KB blocks in pool")
-set(SSM_POOL_256K_BLOCKS 32 CACHE STRING
- "Number of 256KB blocks in pool")
-set(SSM_POOL_1M_BLOCKS 16 CACHE STRING
- "Number of 1MB blocks in pool")
+ "Prefix for the POSIX shared memory flow set")
# Number of shards per size class for reducing contention
set(SSM_POOL_SHARDS 4 CACHE STRING
"Number of allocator shards per size class")
+# Global Shared Packet Pool (GSPP) - for privileged processes
+# Shared by all processes in 'ouroboros' group (~60 MB total)
+set(SSM_GSPP_256_BLOCKS 1024 CACHE STRING
+ "GSPP: Number of 256B blocks")
+set(SSM_GSPP_512_BLOCKS 768 CACHE STRING
+ "GSPP: Number of 512B blocks")
+set(SSM_GSPP_1K_BLOCKS 512 CACHE STRING
+ "GSPP: Number of 1KB blocks")
+set(SSM_GSPP_2K_BLOCKS 384 CACHE STRING
+ "GSPP: Number of 2KB blocks")
+set(SSM_GSPP_4K_BLOCKS 256 CACHE STRING
+ "GSPP: Number of 4KB blocks")
+set(SSM_GSPP_16K_BLOCKS 128 CACHE STRING
+ "GSPP: Number of 16KB blocks")
+set(SSM_GSPP_64K_BLOCKS 64 CACHE STRING
+ "GSPP: Number of 64KB blocks")
+set(SSM_GSPP_256K_BLOCKS 32 CACHE STRING
+ "GSPP: Number of 256KB blocks")
+set(SSM_GSPP_1M_BLOCKS 16 CACHE STRING
+ "GSPP: Number of 1MB blocks")
+
+# Per-User Pool (PUP) - for unprivileged applications
+# Each unprivileged app gets its own smaller pool (~7.5 MB total)
+set(SSM_PUP_256_BLOCKS 128 CACHE STRING
+ "PUP: Number of 256B blocks")
+set(SSM_PUP_512_BLOCKS 96 CACHE STRING
+ "PUP: Number of 512B blocks")
+set(SSM_PUP_1K_BLOCKS 64 CACHE STRING
+ "PUP: Number of 1KB blocks")
+set(SSM_PUP_2K_BLOCKS 48 CACHE STRING
+ "PUP: Number of 2KB blocks")
+set(SSM_PUP_4K_BLOCKS 32 CACHE STRING
+ "PUP: Number of 4KB blocks")
+set(SSM_PUP_16K_BLOCKS 16 CACHE STRING
+ "PUP: Number of 16KB blocks")
+set(SSM_PUP_64K_BLOCKS 8 CACHE STRING
+ "PUP: Number of 64KB blocks")
+set(SSM_PUP_256K_BLOCKS 2 CACHE STRING
+ "PUP: Number of 256KB blocks")
+set(SSM_PUP_1M_BLOCKS 0 CACHE STRING
+ "PUP: Number of 1MB blocks")
+
# SSM packet buffer overhead - computed at compile time via sizeof()
# Defined in config.h.in as sizeof(_ssm_memory_block) + sizeof(_ssm_pk_buff)
-# This makes it portable across platforms with different pid_t sizes and padding
+# This makes it portable across platforms with different pid_t sizes
+
+# GSPP total size calculation
+math(EXPR SSM_GSPP_TOTAL_SIZE
+ "(1 << 8) * ${SSM_GSPP_256_BLOCKS} + \
+ (1 << 9) * ${SSM_GSPP_512_BLOCKS} + \
+ (1 << 10) * ${SSM_GSPP_1K_BLOCKS} + \
+ (1 << 11) * ${SSM_GSPP_2K_BLOCKS} + \
+ (1 << 12) * ${SSM_GSPP_4K_BLOCKS} + \
+ (1 << 14) * ${SSM_GSPP_16K_BLOCKS} + \
+ (1 << 16) * ${SSM_GSPP_64K_BLOCKS} + \
+ (1 << 18) * ${SSM_GSPP_256K_BLOCKS} + \
+ (1 << 20) * ${SSM_GSPP_1M_BLOCKS}")
+
+set(SSM_GSPP_TOTAL_SIZE ${SSM_GSPP_TOTAL_SIZE} CACHE INTERNAL
+ "GSPP total size in bytes")
+
+# PUP total size calculation
+math(EXPR SSM_PUP_TOTAL_SIZE
+ "(1 << 8) * ${SSM_PUP_256_BLOCKS} + \
+ (1 << 9) * ${SSM_PUP_512_BLOCKS} + \
+ (1 << 10) * ${SSM_PUP_1K_BLOCKS} + \
+ (1 << 11) * ${SSM_PUP_2K_BLOCKS} + \
+ (1 << 12) * ${SSM_PUP_4K_BLOCKS} + \
+ (1 << 14) * ${SSM_PUP_16K_BLOCKS} + \
+ (1 << 16) * ${SSM_PUP_64K_BLOCKS} + \
+ (1 << 18) * ${SSM_PUP_256K_BLOCKS} + \
+ (1 << 20) * ${SSM_PUP_1M_BLOCKS}")
-# Total shared memory pool size calculation
-math(EXPR SSM_POOL_TOTAL_SIZE
- "(1 << 8) * ${SSM_POOL_256_BLOCKS} + \
- (1 << 9) * ${SSM_POOL_512_BLOCKS} + \
- (1 << 10) * ${SSM_POOL_1K_BLOCKS} + \
- (1 << 11) * ${SSM_POOL_2K_BLOCKS} + \
- (1 << 12) * ${SSM_POOL_4K_BLOCKS} + \
- (1 << 14) * ${SSM_POOL_16K_BLOCKS} + \
- (1 << 16) * ${SSM_POOL_64K_BLOCKS} + \
- (1 << 18) * ${SSM_POOL_256K_BLOCKS} + \
- (1 << 20) * ${SSM_POOL_1M_BLOCKS}")
+set(SSM_PUP_TOTAL_SIZE ${SSM_PUP_TOTAL_SIZE} CACHE INTERNAL
+ "PUP total size in bytes")
-set(SSM_POOL_TOTAL_SIZE ${SSM_POOL_TOTAL_SIZE} CACHE INTERNAL
+# Legacy total size (same as GSPP)
+set(SSM_POOL_TOTAL_SIZE ${SSM_GSPP_TOTAL_SIZE} CACHE INTERNAL
"Total shared memory pool size in bytes")
include(utils/HumanReadable)
-format_bytes_human_readable(${SSM_POOL_TOTAL_SIZE} SSM_POOL_SIZE_DISPLAY)
+format_bytes_human_readable(${SSM_GSPP_TOTAL_SIZE} SSM_GSPP_SIZE_DISPLAY)
+format_bytes_human_readable(${SSM_PUP_TOTAL_SIZE} SSM_PUP_SIZE_DISPLAY)
# Display configuration summary
message(STATUS "Secure Shared Memory Pool Configuration:")
message(STATUS " Pool prefix: ${SSM_PREFIX}")
message(STATUS " Size classes: "
- "256B, 512B, 1KiB, 2KiB, 4KiB, 16KiB, 64KiB, 256KiB, 1MiB")
+ "256B, 512B, 1KiB, 2KiB, 4KiB, 16KiB, 64KiB, 256KiB, 1MiB")
message(STATUS " Max allocation: 1 MB")
-message(STATUS " Total pool size: ${SSM_POOL_SIZE_DISPLAY} "
- "(${SSM_POOL_TOTAL_SIZE} bytes)")
message(STATUS " Shards per class: ${SSM_POOL_SHARDS}")
-message(STATUS " Blocks per class: ${SSM_POOL_256_BLOCKS}, "
- "${SSM_POOL_512_BLOCKS}, ${SSM_POOL_1K_BLOCKS}, "
- "${SSM_POOL_2K_BLOCKS}, ${SSM_POOL_4K_BLOCKS}, "
- "${SSM_POOL_16K_BLOCKS}, ${SSM_POOL_64K_BLOCKS}, "
- "${SSM_POOL_256K_BLOCKS}, ${SSM_POOL_1M_BLOCKS}")
+message(STATUS " GSPP (privileged): ${SSM_GSPP_SIZE_DISPLAY} "
+ "(${SSM_GSPP_TOTAL_SIZE} bytes)")
+message(STATUS " Blocks: ${SSM_GSPP_256_BLOCKS}, ${SSM_GSPP_512_BLOCKS}, "
+ "${SSM_GSPP_1K_BLOCKS}, ${SSM_GSPP_2K_BLOCKS}, ${SSM_GSPP_4K_BLOCKS}, "
+ "${SSM_GSPP_16K_BLOCKS}, ${SSM_GSPP_64K_BLOCKS}, ${SSM_GSPP_256K_BLOCKS}, "
+ "${SSM_GSPP_1M_BLOCKS}")
+message(STATUS " PUP (unprivileged): ${SSM_PUP_SIZE_DISPLAY} "
+ "(${SSM_PUP_TOTAL_SIZE} bytes)")
+message(STATUS " Blocks: ${SSM_PUP_256_BLOCKS}, ${SSM_PUP_512_BLOCKS}, "
+ "${SSM_PUP_1K_BLOCKS}, ${SSM_PUP_2K_BLOCKS}, ${SSM_PUP_4K_BLOCKS}, "
+ "${SSM_PUP_16K_BLOCKS}, ${SSM_PUP_64K_BLOCKS}, ${SSM_PUP_256K_BLOCKS}, "
+ "${SSM_PUP_1M_BLOCKS}")
diff --git a/include/ouroboros/flow.h b/include/ouroboros/flow.h
index f9aa0d83..6b3dcde4 100644
--- a/include/ouroboros/flow.h
+++ b/include/ouroboros/flow.h
@@ -46,6 +46,8 @@ struct flow_info {
pid_t n_pid;
pid_t n_1_pid;
+ uid_t uid; /* 0 = privileged (GSPP), > 0 = PUP uid */
+
time_t mpl;
struct qos_spec qs;
diff --git a/include/ouroboros/ipcp-dev.h b/include/ouroboros/ipcp-dev.h
index 118f1101..37c8064f 100644
--- a/include/ouroboros/ipcp-dev.h
+++ b/include/ouroboros/ipcp-dev.h
@@ -47,10 +47,12 @@ int ipcp_flow_write(int fd,
struct ssm_pk_buff * spb);
int np1_flow_read(int fd,
- struct ssm_pk_buff ** spb);
+ struct ssm_pk_buff ** spb,
+ struct ssm_pool * pool);
int np1_flow_write(int fd,
- struct ssm_pk_buff * spb);
+ struct ssm_pk_buff * spb,
+ struct ssm_pool * pool);
int ipcp_flow_dealloc(int fd);
diff --git a/include/ouroboros/local-dev.h b/include/ouroboros/local-dev.h
index da62e31c..cd0298d3 100644
--- a/include/ouroboros/local-dev.h
+++ b/include/ouroboros/local-dev.h
@@ -23,9 +23,11 @@
#ifndef OUROBOROS_LIB_LOCAL_DEV_H
#define OUROBOROS_LIB_LOCAL_DEV_H
-ssize_t local_flow_read(int fd);
+#include <ouroboros/ssm_pool.h>
-int local_flow_write(int fd,
- size_t idx);
+int local_flow_transfer(int src_fd,
+ int dst_fd,
+ struct ssm_pool * src_pool,
+ struct ssm_pool * dst_pool);
#endif /* OUROBOROS_LIB_LOCAL_DEV_H */
diff --git a/include/ouroboros/proc.h b/include/ouroboros/proc.h
index 80c67227..0e27362e 100644
--- a/include/ouroboros/proc.h
+++ b/include/ouroboros/proc.h
@@ -31,8 +31,9 @@
/* Processes */
struct proc_info {
pid_t pid;
- char prog[PROG_NAME_SIZE + 1]; /* program instantiated */
-
+ char prog[PROG_NAME_SIZE + 1];
+ uid_t uid;
+ gid_t gid;
};
/* Programs */
diff --git a/include/ouroboros/serdes-irm.h b/include/ouroboros/serdes-irm.h
index 246db23d..bd04fc57 100644
--- a/include/ouroboros/serdes-irm.h
+++ b/include/ouroboros/serdes-irm.h
@@ -26,6 +26,7 @@
#include <ouroboros/crypt.h>
#include <ouroboros/flow.h>
#include <ouroboros/ipcp.h>
+#include <ouroboros/proc.h>
#include <ouroboros/time.h>
#include <ouroboros/utils.h>
@@ -69,8 +70,8 @@ int ipcp_flow_dealloc__irm_req_ser(buffer_t * buf,
int ipcp_create_r__irm_req_ser(buffer_t * buf,
const struct ipcp_info * ipcp);
-int proc_announce__irm_req_ser(buffer_t * buf,
- const char * prog);
+int proc_announce__irm_req_ser(buffer_t * buf,
+ const struct proc_info * proc);
int proc_exit__irm_req_ser(buffer_t * buf);
diff --git a/include/ouroboros/ssm_pool.h b/include/ouroboros/ssm_pool.h
index 80b22489..4becbdf5 100644
--- a/include/ouroboros/ssm_pool.h
+++ b/include/ouroboros/ssm_pool.h
@@ -32,18 +32,20 @@
struct ssm_pool;
-struct ssm_pool * ssm_pool_create(void);
+/* Pool API: uid = 0 for GSPP (privileged), uid > 0 for PUP (per-user) */
+struct ssm_pool * ssm_pool_create(uid_t uid,
+ gid_t gid);
-struct ssm_pool * ssm_pool_open(void);
+struct ssm_pool * ssm_pool_open(uid_t uid);
void ssm_pool_close(struct ssm_pool * pool);
void ssm_pool_destroy(struct ssm_pool * pool);
-void ssm_pool_purge(void);
-
int ssm_pool_mlock(struct ssm_pool * pool);
+void ssm_pool_gspp_purge(void);
+
/* Alloc count bytes, returns block index, a ptr and pk_buff. */
ssize_t ssm_pool_alloc(struct ssm_pool * pool,
size_t count,
diff --git a/include/ouroboros/utils.h b/include/ouroboros/utils.h
index 5d082d5c..f53361eb 100644
--- a/include/ouroboros/utils.h
+++ b/include/ouroboros/utils.h
@@ -23,9 +23,11 @@
#ifndef OUROBOROS_LIB_UTILS_H
#define OUROBOROS_LIB_UTILS_H
+#include <stdbool.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
+#include <sys/types.h>
#include <unistd.h>
#define MIN(a,b) (((a) < (b)) ? (a) : (b))
@@ -54,6 +56,10 @@ char * path_strip(const char * src);
char * trim_whitespace(char * str);
+bool is_ouroboros_member_uid(uid_t uid);
+
+bool is_ouroboros_member(void);
+
/* functions for copying and destroying arguments list */
size_t argvlen(const char ** argv);
diff --git a/src/ipcpd/eth/eth.c b/src/ipcpd/eth/eth.c
index f36e0b13..9d281297 100644
--- a/src/ipcpd/eth/eth.c
+++ b/src/ipcpd/eth/eth.c
@@ -52,6 +52,7 @@
#include <ouroboros/pthread.h>
#include "ipcp.h"
+#include "np1.h"
#include "shim-data.h"
#include <signal.h>
@@ -990,7 +991,7 @@ static void * eth_ipcp_packet_reader(void * o)
buf = ssm_pk_buff_head(spb);
memcpy(buf, &e_frame->payload, length);
#endif
- if (np1_flow_write(fd, spb) < 0)
+ if (np1_flow_write(fd, spb, NP1_GET_POOL(fd)) < 0)
ipcp_spb_release(spb);
continue;
@@ -1040,7 +1041,7 @@ static void * eth_ipcp_packet_writer(void * o)
if (fqueue_type(fq) != FLOW_PKT)
continue;
- if (np1_flow_read(fd, &spb)) {
+ if (np1_flow_read(fd, &spb, NP1_GET_POOL(fd))) {
log_dbg("Bad read from fd %d.", fd);
continue;
}
diff --git a/src/ipcpd/ipcp.c b/src/ipcpd/ipcp.c
index ebb9b1c5..3ea77da9 100644
--- a/src/ipcpd/ipcp.c
+++ b/src/ipcpd/ipcp.c
@@ -52,6 +52,7 @@
#include <ouroboros/utils.h>
#include "ipcp.h"
+#include "np1.h"
#include <signal.h>
#include <string.h>
@@ -131,6 +132,8 @@ struct {
pthread_t acceptor;
} ipcpd;
+struct np1_state np1;
+
struct cmd {
struct list_head next;
@@ -633,9 +636,11 @@ static void do_flow_alloc(pid_t pid,
uint8_t * dst,
qosspec_t qs,
const buffer_t * data,
+ uid_t uid,
ipcp_msg_t * ret_msg)
{
- int fd;
+ int fd;
+ struct ssm_pool * pool = NULL;
log_info("Allocating flow %d for %d to " HASH_FMT32 ".",
flow_id, pid, HASH_VAL32(dst));
@@ -662,6 +667,17 @@ static void do_flow_alloc(pid_t pid,
return;
}
+ if (uid != 0) {
+ pool = ssm_pool_open(uid);
+ if (pool == NULL) {
+ log_err("Failed to open PUP for uid %d.", uid);
+ ret_msg->result = -ENOMEM;
+ return;
+ }
+ }
+
+ NP1_SET_POOL(fd, pool);
+
ret_msg->result = ipcpd.ops->ipcp_flow_alloc(fd, dst, qs, data);
log_info("Finished allocating flow %d to " HASH_FMT32 ".",
@@ -672,9 +688,11 @@ static void do_flow_alloc(pid_t pid,
static void do_flow_join(pid_t pid,
int flow_id,
const uint8_t * dst,
+ uid_t uid,
ipcp_msg_t * ret_msg)
{
- int fd;
+ int fd;
+ struct ssm_pool * pool = NULL;
log_info("Joining layer " HASH_FMT32 ".", HASH_VAL32(dst));
@@ -699,6 +717,17 @@ static void do_flow_join(pid_t pid,
return;
}
+ if (uid != 0) {
+ pool = ssm_pool_open(uid);
+ if (pool == NULL) {
+ log_err("Failed to open PUP for uid %d.", uid);
+ ret_msg->result = -ENOMEM;
+ return;
+ }
+ }
+
+ NP1_SET_POOL(fd, pool);
+
ret_msg->result = ipcpd.ops->ipcp_flow_join(fd, dst);
log_info("Finished joining layer " HASH_FMT32 ".", HASH_VAL32(dst));
@@ -706,10 +735,12 @@ static void do_flow_join(pid_t pid,
static void do_flow_alloc_resp(int resp,
int flow_id,
+ uid_t uid,
const buffer_t * data,
ipcp_msg_t * ret_msg)
{
- int fd = -1;
+ int fd = -1;
+ struct ssm_pool * pool = NULL;
log_info("Responding %d to alloc on flow_id %d.", resp, flow_id);
@@ -737,6 +768,17 @@ static void do_flow_alloc_resp(int resp,
return;
}
+ if (uid != 0) {
+ pool = ssm_pool_open(uid);
+ if (pool == NULL) {
+ log_err("Failed to open PUP for uid %d.", uid);
+ ret_msg->result = -ENOMEM;
+ return;
+ }
+ }
+
+ NP1_SET_POOL(fd, pool);
+
ret_msg->result = ipcpd.ops->ipcp_flow_alloc_resp(fd, resp, data);
log_info("Finished responding %d to allocation request.",
@@ -857,12 +899,12 @@ static void * mainloop(void * o)
qs = qos_spec_msg_to_s(msg->qosspec);
do_flow_alloc(msg->pid, msg->flow_id,
msg->hash.data, qs,
- &data, &ret_msg);
+ &data, msg->uid, &ret_msg);
break;
case IPCP_MSG_CODE__IPCP_FLOW_JOIN:
assert(msg->hash.len == ipcp_dir_hash_len());
do_flow_join(msg->pid, msg->flow_id,
- msg->hash.data, &ret_msg);
+ msg->hash.data, msg->uid, &ret_msg);
break;
case IPCP_MSG_CODE__IPCP_FLOW_ALLOC_RESP:
assert(msg->pk.len > 0 ? msg->pk.data != NULL
@@ -870,7 +912,7 @@ static void * mainloop(void * o)
data.len = msg->pk.len;
data.data = msg->pk.data;
do_flow_alloc_resp(msg->response, msg->flow_id,
- &data, &ret_msg);
+ msg->uid, &data, &ret_msg);
break;
case IPCP_MSG_CODE__IPCP_FLOW_DEALLOC:
do_flow_dealloc(msg->flow_id, msg->timeo_sec, &ret_msg);
@@ -1035,6 +1077,8 @@ int ipcp_init(int argc,
ipcpd.alloc_id = -1;
+ memset(&np1, 0, sizeof(np1));
+
pthread_condattr_destroy(&cattr);
ipcp_set_state(IPCP_INIT);
diff --git a/src/ipcpd/local/main.c b/src/ipcpd/local/main.c
index e0f3cc5a..5372197f 100644
--- a/src/ipcpd/local/main.c
+++ b/src/ipcpd/local/main.c
@@ -40,6 +40,7 @@
#include <ouroboros/local-dev.h>
#include "ipcp.h"
+#include "np1.h"
#include "shim-data.h"
#include <string.h>
@@ -103,32 +104,41 @@ static void local_data_fini(void){
static void * local_ipcp_packet_loop(void * o)
{
+ int src_fd;
+ int dst_fd;
+ struct timespec * timeout;
+#ifdef CONFIG_IPCP_LOCAL_POLLING
+ struct timespec ts_poll = {0, 0};
+#endif
(void) o;
ipcp_lock_to_core();
- while (true) {
- int fd;
- ssize_t idx;
+#ifdef CONFIG_IPCP_LOCAL_POLLING
+ timeout = &ts_poll; /* Spin poll with zero timeout */
+#else
+ timeout = NULL; /* Block until event */
+#endif
- fevent(local_data.flows, local_data.fq, NULL);
+ while (true) {
+ fevent(local_data.flows, local_data.fq, timeout);
- while ((fd = fqueue_next(local_data.fq)) >= 0) {
+ while ((src_fd = fqueue_next(local_data.fq)) >= 0) {
if (fqueue_type(local_data.fq) != FLOW_PKT)
continue;
- idx = local_flow_read(fd);
- if (idx < 0)
- continue;
-
pthread_rwlock_rdlock(&local_data.lock);
- fd = local_data.in_out[fd];
+ dst_fd = local_data.in_out[src_fd];
pthread_rwlock_unlock(&local_data.lock);
- if (fd != -1)
- local_flow_write(fd, idx);
+ if (dst_fd == -1)
+ continue;
+
+ local_flow_transfer(src_fd, dst_fd,
+ NP1_GET_POOL(src_fd),
+ NP1_GET_POOL(dst_fd));
}
}
diff --git a/src/ipcpd/np1.h b/src/ipcpd/np1.h
new file mode 100644
index 00000000..a3f21200
--- /dev/null
+++ b/src/ipcpd/np1.h
@@ -0,0 +1,41 @@
+/*
+ * Ouroboros - Copyright (C) 2016 - 2024
+ *
+ * N+1 flow pool tracking for IPCPs
+ *
+ * Dimitri Staessens <dimitri@ouroboros.rocks>
+ * Sander Vrijders <sander@ouroboros.rocks>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., http://www.fsf.org/about/contact/.
+ */
+
+#ifndef OUROBOROS_IPCPD_NP1_H
+#define OUROBOROS_IPCPD_NP1_H
+
+#include "config.h"
+
+#include <ouroboros/ssm_pool.h>
+
+#define NP1_LOAD(ptr) (__atomic_load_n((ptr), __ATOMIC_ACQUIRE))
+#define NP1_STORE(ptr, v) (__atomic_store_n((ptr), (v), __ATOMIC_RELEASE))
+#define NP1_GET_POOL(fd) (NP1_LOAD(&np1.pool[(fd)]))
+#define NP1_SET_POOL(fd, p) (NP1_STORE(&np1.pool[(fd)], (p)))
+
+struct np1_state {
+ struct ssm_pool * pool[SYS_MAX_FLOWS];
+};
+
+extern struct np1_state np1;
+
+#endif /* OUROBOROS_IPCPD_NP1_H */
diff --git a/src/ipcpd/udp/udp.c b/src/ipcpd/udp/udp.c
index 5e29cb52..5803e674 100644
--- a/src/ipcpd/udp/udp.c
+++ b/src/ipcpd/udp/udp.c
@@ -34,6 +34,7 @@
#include <ouroboros/pthread.h>
#include "ipcp.h"
+#include "np1.h"
#include "shim-data.h"
#include <string.h>
@@ -492,7 +493,7 @@ static void * udp_ipcp_packet_reader(void * o)
head = ssm_pk_buff_head(spb);
memcpy(head, data, n);
- if (np1_flow_write(eid, spb) < 0)
+ if (np1_flow_write(eid, spb, NP1_GET_POOL(eid)) < 0)
ipcp_spb_release(spb);
}
@@ -536,7 +537,7 @@ static void * udp_ipcp_packet_writer(void * o)
if (fqueue_type(fq) != FLOW_PKT)
continue;
- if (np1_flow_read(fd, &spb)) {
+ if (np1_flow_read(fd, &spb, NP1_GET_POOL(fd))) {
log_dbg("Bad read from fd %d.", fd);
continue;
}
diff --git a/src/ipcpd/unicast/fa.c b/src/ipcpd/unicast/fa.c
index 06e4b043..07f5b46c 100644
--- a/src/ipcpd/unicast/fa.c
+++ b/src/ipcpd/unicast/fa.c
@@ -48,6 +48,7 @@
#include "ipcp.h"
#include "dt.h"
#include "ca.h"
+#include "np1.h"
#include <inttypes.h>
#include <stdlib.h>
@@ -687,6 +688,12 @@ void fa_fini(void)
pthread_rwlock_destroy(&fa.flows_lock);
}
+static int np1_flow_read_fa(int fd,
+ struct ssm_pk_buff ** spb)
+{
+ return np1_flow_read(fd, spb, NP1_GET_POOL(fd));
+}
+
int fa_start(void)
{
#ifndef BUILD_CONTAINER
@@ -695,7 +702,7 @@ int fa_start(void)
int max;
#endif
- fa.psched = psched_create(packet_handler, np1_flow_read);
+ fa.psched = psched_create(packet_handler, np1_flow_read_fa);
if (fa.psched == NULL) {
log_err("Failed to start packet scheduler.");
goto fail_psched;
@@ -963,7 +970,7 @@ void fa_np1_rcv(uint64_t eid,
pthread_rwlock_unlock(&fa.flows_lock);
- if (ipcp_flow_write(fd, spb) < 0) {
+ if (np1_flow_write(fd, spb, NP1_GET_POOL(fd)) < 0) {
log_dbg("Failed to write to flow %d.", fd);
ipcp_spb_release(spb);
#ifdef IPCP_FLOW_STATS
diff --git a/src/irmd/config.h.in b/src/irmd/config.h.in
index 43d7f4ee..e1072193 100644
--- a/src/irmd/config.h.in
+++ b/src/irmd/config.h.in
@@ -53,6 +53,8 @@
#define IRMD_MIN_THREADS @IRMD_MIN_THREADS@
#define IRMD_ADD_THREADS @IRMD_ADD_THREADS@
+#define SSM_PID_GSPP 0
+
#cmakedefine HAVE_FUSE
#ifdef HAVE_FUSE
#define FUSE_PREFIX "@FUSE_PREFIX@"
diff --git a/src/irmd/ipcp.c b/src/irmd/ipcp.c
index 6226aeda..d261fc57 100644
--- a/src/irmd/ipcp.c
+++ b/src/irmd/ipcp.c
@@ -421,6 +421,8 @@ int ipcp_flow_join(const struct flow_info * flow,
msg.flow_id = flow->id;
msg.has_pid = true;
msg.pid = flow->n_pid;
+ msg.has_uid = true;
+ msg.uid = flow->uid;
msg.has_hash = true;
msg.hash.data = (uint8_t *) dst.data;
msg.hash.len = dst.len;
@@ -455,6 +457,8 @@ int ipcp_flow_alloc(const struct flow_info * flow,
msg.flow_id = flow->id;
msg.has_pid = true;
msg.pid = flow->n_pid;
+ msg.has_uid = true;
+ msg.uid = flow->uid;
msg.qosspec = qos_spec_s_to_msg(&flow->qs);
msg.has_hash = true;
msg.hash.data = (uint8_t *) dst.data;
@@ -495,6 +499,8 @@ int ipcp_flow_alloc_resp(const struct flow_info * flow,
msg.flow_id = flow->id;
msg.has_pid = true;
msg.pid = flow->n_pid;
+ msg.has_uid = true;
+ msg.uid = flow->uid;
msg.has_response = true;
msg.response = response;
msg.has_pk = response == 0;
diff --git a/src/irmd/main.c b/src/irmd/main.c
index 5b787a24..ccb16017 100644
--- a/src/irmd/main.c
+++ b/src/irmd/main.c
@@ -22,6 +22,7 @@
#if defined(__linux__) || defined(__CYGWIN__)
#define _DEFAULT_SOURCE
+#define _GNU_SOURCE
#else
#define _POSIX_C_SOURCE 200809L
#endif
@@ -39,6 +40,7 @@
#include <ouroboros/list.h>
#include <ouroboros/lockfile.h>
#include <ouroboros/logs.h>
+#include <ouroboros/protobuf.h>
#include <ouroboros/pthread.h>
#include <ouroboros/random.h>
#include <ouroboros/rib.h>
@@ -56,6 +58,8 @@
#include "configfile.h"
#include <dirent.h>
+#include <grp.h>
+#include <pwd.h>
#include <sys/socket.h>
#include <sys/un.h>
#include <signal.h>
@@ -770,18 +774,47 @@ static int name_unreg(const char * name,
return -1;
}
+static int get_peer_ids(int fd,
+ uid_t * uid,
+ gid_t * gid)
+{
+#if defined(__linux__)
+ struct ucred ucred;
+ socklen_t len;
+
+ len = sizeof(ucred);
+
+ if (getsockopt(fd, SOL_SOCKET, SO_PEERCRED, &ucred, &len) < 0)
+ goto fail;
+
+ *uid = ucred.uid;
+ *gid = ucred.gid;
+#else
+ if (getpeereid(fd, uid, gid) < 0)
+ goto fail;
+#endif
+ return 0;
+ fail:
+ return -1;
+}
+
static int proc_announce(const struct proc_info * info)
{
+ if (reg_prepare_pool(info->uid, info->gid) < 0) {
+ log_err("Failed to prepare pool for uid %d.", info->uid);
+ goto fail;
+ }
+
if (reg_create_proc(info) < 0) {
log_err("Failed to add process %d.", info->pid);
- goto fail_proc;
+ goto fail;
}
log_info("Process added: %d (%s).", info->pid, info->prog);
return 0;
- fail_proc:
+ fail:
return -1;
}
@@ -868,6 +901,8 @@ static int flow_accept(struct flow_info * flow,
log_dbg("IPCP %d accepting flow %d for %s.",
flow->n_pid, flow->id, name);
+ flow->uid = reg_get_proc_uid(flow->n_pid);
+
err = oap_srv_process(&info, req_hdr, &resp_hdr, data, sk);
if (err < 0) {
log_err("OAP processing failed for %s.", name);
@@ -879,7 +914,8 @@ static int flow_accept(struct flow_info * flow,
goto fail_resp;
}
- log_info("Flow %d accepted by %d for %s.", flow->id, flow->n_pid, name);
+ log_info("Flow %d accepted by %d for %s (uid %d).",
+ flow->id, flow->n_pid, name, flow->uid);
freebuf(req_hdr);
freebuf(resp_hdr);
@@ -911,14 +947,17 @@ static int flow_join(struct flow_info * flow,
buffer_t pbuf = BUF_INIT; /* nothing to piggyback */
int err;
- log_info("Allocating flow for %d to %s.", flow->n_pid, dst);
-
if (reg_create_flow(flow) < 0) {
log_err("Failed to create flow.");
err = -EBADF;
goto fail_flow;
}
+ flow->uid = reg_get_proc_uid(flow->n_pid);
+
+ log_info("Allocating flow for %d to %s (uid %d).",
+ flow->n_pid, dst, flow->uid);
+
strcpy(layer.name, dst);
if (reg_get_ipcp_by_layer(&ipcp, &layer) < 0) {
log_err("Failed to get IPCP for layer %s.", dst);
@@ -1053,8 +1092,6 @@ static int flow_alloc(const char * dst,
/* piggyback of user data not yet implemented */
assert(data != NULL && BUF_IS_EMPTY(data));
- log_info("Allocating flow for %d to %s.", flow->n_pid, dst);
-
/* Look up name_info for dst */
if (reg_get_name_info(dst, &info) < 0) {
log_err("Failed to get name info for %s.", dst);
@@ -1068,6 +1105,11 @@ static int flow_alloc(const char * dst,
goto fail_flow;
}
+ flow->uid = reg_get_proc_uid(flow->n_pid);
+
+ log_info("Allocating flow for %d to %s (uid %d).",
+ flow->n_pid, dst, flow->uid);
+
if (get_ipcp_by_dst(dst, &flow->n_1_pid, &hash) < 0) {
log_err("Failed to find IPCP for %s.", dst);
err = -EIPCP;
@@ -1332,7 +1374,8 @@ static void __cleanup_irm_msg(void * o)
irm_msg__free_unpacked((irm_msg_t *) o, NULL);
}
-static irm_msg_t * do_command_msg(irm_msg_t * msg)
+static irm_msg_t * do_command_msg(irm_msg_t * msg,
+ int fd)
{
struct ipcp_config conf;
struct ipcp_info ipcp;
@@ -1413,7 +1456,11 @@ static irm_msg_t * do_command_msg(irm_msg_t * msg)
case IRM_MSG_CODE__IRM_PROC_ANNOUNCE:
proc.pid = msg->pid;
strcpy(proc.prog, msg->prog);
- res = proc_announce(&proc);
+ res = get_peer_ids(fd, &proc.uid, &proc.gid);
+ if (res < 0)
+ log_err("Failed to get UID/GID for pid %d.", msg->pid);
+ else
+ res = proc_announce(&proc);
break;
case IRM_MSG_CODE__IRM_PROC_EXIT:
res = proc_exit(msg->pid);
@@ -1598,7 +1645,7 @@ static void * mainloop(void * o)
pthread_cleanup_push(__cleanup_close_ptr, &sfd);
pthread_cleanup_push(__cleanup_irm_msg, msg);
- ret_msg = do_command_msg(msg);
+ ret_msg = do_command_msg(msg, sfd);
pthread_cleanup_pop(true);
pthread_cleanup_pop(false);
@@ -1691,7 +1738,7 @@ static void destroy_mount(char * mnt)
static int ouroboros_reset(void)
{
- ssm_pool_purge();
+ ssm_pool_gspp_purge();
lockfile_destroy(irmd.lf);
return 0;
@@ -1813,6 +1860,8 @@ static int irm_load_store(char * dpath)
static int irm_init(void)
{
struct stat st;
+ struct group * grp;
+ gid_t gid;
pthread_condattr_t cattr;
#ifdef HAVE_FUSE
mode_t mask;
@@ -1898,8 +1947,17 @@ static int irm_init(void)
goto fail_sock_path;
}
- if ((irmd.gspp = ssm_pool_create()) == NULL) {
- log_err("Failed to create pool.");
+ grp = getgrnam("ouroboros");
+ if (grp == NULL) {
+ log_warn("ouroboros group not found, using gid %d.", getgid());
+ gid = getgid();
+ } else {
+ gid = grp->gr_gid;
+ }
+
+ irmd.gspp = ssm_pool_create(getuid(), gid);
+ if (irmd.gspp == NULL) {
+ log_err("Failed to create GSPP.");
goto fail_pool;
}
@@ -2006,8 +2064,7 @@ static void irm_fini(void)
if (unlink(IRM_SOCK_PATH))
log_dbg("Failed to unlink %s.", IRM_SOCK_PATH);
- if (irmd.gspp != NULL)
- ssm_pool_destroy(irmd.gspp);
+ ssm_pool_destroy(irmd.gspp);
if (irmd.lf != NULL)
lockfile_destroy(irmd.lf);
diff --git a/src/irmd/reg/flow.c b/src/irmd/reg/flow.c
index 02dc9c99..52b03e61 100644
--- a/src/irmd/reg/flow.c
+++ b/src/irmd/reg/flow.c
@@ -178,6 +178,7 @@ int reg_flow_update(struct reg_flow * flow,
}
flow->info.state = info->state;
+ flow->info.uid = info->uid;
*info = flow->info;
diff --git a/src/irmd/reg/pool.c b/src/irmd/reg/pool.c
new file mode 100644
index 00000000..fd983db8
--- /dev/null
+++ b/src/irmd/reg/pool.c
@@ -0,0 +1,101 @@
+/*
+ * Ouroboros - Copyright (C) 2016 - 2026
+ *
+ * The IPC Resource Manager - Registry - Per-User Pools
+ *
+ * Dimitri Staessens <dimitri@ouroboros.rocks>
+ * Sander Vrijders <sander@ouroboros.rocks>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., http://www.fsf.org/about/contact/.
+ */
+
+#define _POSIX_C_SOURCE 200809L
+
+#define OUROBOROS_PREFIX "reg/pool"
+
+#include <ouroboros/logs.h>
+#include <ouroboros/ssm_pool.h>
+
+#include "pool.h"
+
+#include <assert.h>
+#include <stdlib.h>
+
+struct reg_pool * reg_pool_create(uid_t uid,
+ gid_t gid)
+{
+ struct reg_pool * pool;
+
+ pool = malloc(sizeof(*pool));
+ if (pool == NULL) {
+ log_err("Failed to malloc pool.");
+ goto fail_malloc;
+ }
+
+ pool->ssm = ssm_pool_create(uid, gid);
+ if (pool->ssm == NULL) {
+ log_err("Failed to create PUP for uid %d.", uid);
+ goto fail_ssm;
+ }
+
+ list_head_init(&pool->next);
+ pool->uid = uid;
+ pool->gid = gid;
+ pool->refcount = 1;
+
+ log_dbg("Created PUP for uid %d gid %d.", uid, gid);
+
+ return pool;
+
+ fail_ssm:
+ free(pool);
+ fail_malloc:
+ return NULL;
+}
+
+void reg_pool_destroy(struct reg_pool * pool)
+{
+ assert(pool != NULL);
+ assert(pool->refcount == 0);
+
+ log_dbg("Destroying PUP for uid %d.", pool->uid);
+
+ ssm_pool_destroy(pool->ssm);
+
+ assert(list_is_empty(&pool->next));
+
+ free(pool);
+}
+
+void reg_pool_ref(struct reg_pool * pool)
+{
+ assert(pool != NULL);
+ assert(pool->refcount > 0);
+
+ pool->refcount++;
+
+ log_dbg("PUP uid %d refcount++ -> %zu.", pool->uid, pool->refcount);
+}
+
+int reg_pool_unref(struct reg_pool * pool)
+{
+ assert(pool != NULL);
+ assert(pool->refcount > 0);
+
+ pool->refcount--;
+
+ log_dbg("PUP uid %d refcount-- -> %zu.", pool->uid, pool->refcount);
+
+ return pool->refcount == 0 ? 0 : 1;
+}
diff --git a/src/irmd/reg/pool.h b/src/irmd/reg/pool.h
new file mode 100644
index 00000000..576f491c
--- /dev/null
+++ b/src/irmd/reg/pool.h
@@ -0,0 +1,48 @@
+/*
+ * Ouroboros - Copyright (C) 2016 - 2026
+ *
+ * The IPC Resource Manager - Registry - Per-User Pools
+ *
+ * Dimitri Staessens <dimitri@ouroboros.rocks>
+ * Sander Vrijders <sander@ouroboros.rocks>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., http://www.fsf.org/about/contact/.
+ */
+
+#ifndef OUROBOROS_IRMD_REG_POOL_H
+#define OUROBOROS_IRMD_REG_POOL_H
+
+#include <ouroboros/list.h>
+#include <ouroboros/ssm_pool.h>
+
+#include <sys/types.h>
+
+struct reg_pool {
+ struct list_head next;
+ uid_t uid;
+ gid_t gid;
+ size_t refcount;
+ struct ssm_pool * ssm;
+};
+
+struct reg_pool * reg_pool_create(uid_t uid,
+ gid_t gid);
+
+void reg_pool_destroy(struct reg_pool * pool);
+
+void reg_pool_ref(struct reg_pool * pool);
+
+int reg_pool_unref(struct reg_pool * pool);
+
+#endif /* OUROBOROS_IRMD_REG_POOL_H */
diff --git a/src/irmd/reg/proc.c b/src/irmd/reg/proc.c
index 541731b2..b97dcf2d 100644
--- a/src/irmd/reg/proc.c
+++ b/src/irmd/reg/proc.c
@@ -25,6 +25,7 @@
#define OUROBOROS_PREFIX "reg/proc"
#include <ouroboros/logs.h>
+#include <ouroboros/utils.h>
#include "proc.h"
@@ -75,6 +76,8 @@ struct reg_proc * reg_proc_create(const struct proc_info * info)
goto fail_malloc;
}
+ memset(proc, 0, sizeof(*proc));
+
proc->set = ssm_flow_set_create(info->pid);
if (proc->set == NULL) {
log_err("Failed to create flow set for %d.", info->pid);
@@ -181,3 +184,10 @@ bool reg_proc_has_name(const struct reg_proc * proc,
{
return __reg_proc_get_name(proc, name) != NULL;
}
+
+bool reg_proc_is_privileged(const struct reg_proc * proc)
+{
+ assert(proc != NULL);
+
+ return is_ouroboros_member_uid(proc->info.uid);
+}
diff --git a/src/irmd/reg/proc.h b/src/irmd/reg/proc.h
index a790e5c9..be4c1161 100644
--- a/src/irmd/reg/proc.h
+++ b/src/irmd/reg/proc.h
@@ -53,4 +53,6 @@ void reg_proc_del_name(struct reg_proc * proc,
bool reg_proc_has_name(const struct reg_proc * proc,
const char * name);
+bool reg_proc_is_privileged(const struct reg_proc * proc);
+
#endif /* OUROBOROS_IRMD_REG_PROC_H */
diff --git a/src/irmd/reg/reg.c b/src/irmd/reg/reg.c
index fd8285be..e89b492b 100644
--- a/src/irmd/reg/reg.c
+++ b/src/irmd/reg/reg.c
@@ -35,6 +35,7 @@ The IPC Resource Manager - Registry
#include "flow.h"
#include "ipcp.h"
#include "name.h"
+#include "pool.h"
#include "proc.h"
#include "prog.h"
@@ -57,6 +58,9 @@ struct {
struct list_head names; /* registered names known */
size_t n_names; /* number of names */
+ struct list_head pools; /* per-user pools */
+ size_t n_pools; /* number of pools */
+
struct list_head procs; /* processes */
size_t n_procs; /* number of processes */
@@ -235,6 +239,20 @@ static struct list_head * __reg_after_name(const char * name)
return p;
}
+static struct reg_pool * __reg_get_pool(uid_t uid)
+{
+ struct list_head * p;
+
+ list_for_each(p, &reg.pools) {
+ struct reg_pool * entry;
+ entry = list_entry(p, struct reg_pool, next);
+ if (entry->uid == uid)
+ return entry;
+ }
+
+ return NULL;
+}
+
static struct reg_proc * __reg_get_proc(pid_t pid)
{
struct list_head * p;
@@ -540,6 +558,7 @@ int reg_init(void)
list_head_init(&reg.flows);
list_head_init(&reg.ipcps);
list_head_init(&reg.names);
+ list_head_init(&reg.pools);
list_head_init(&reg.procs);
list_head_init(&reg.progs);
list_head_init(&reg.spawned);
@@ -589,6 +608,15 @@ void reg_clear(void)
reg.n_procs--;
}
+ list_for_each_safe(p, h, &reg.pools) {
+ struct reg_pool * entry;
+ entry = list_entry(p, struct reg_pool, next);
+ list_del(&entry->next);
+ entry->refcount = 0; /* Force destroy during cleanup */
+ reg_pool_destroy(entry);
+ reg.n_pools--;
+ }
+
list_for_each_safe(p, h, &reg.flows) {
struct reg_flow * entry;
entry = list_entry(p, struct reg_flow, next);
@@ -621,6 +649,7 @@ void reg_fini(void)
assert(list_is_empty(&reg.spawned));
assert(list_is_empty(&reg.progs));
assert(list_is_empty(&reg.procs));
+ assert(list_is_empty(&reg.pools));
assert(list_is_empty(&reg.names));
assert(list_is_empty(&reg.ipcps));
assert(list_is_empty(&reg.flows));
@@ -628,6 +657,7 @@ void reg_fini(void)
assert(reg.n_spawned == 0);
assert(reg.n_progs == 0);
assert(reg.n_procs == 0);
+ assert(reg.n_pools == 0);
assert(reg.n_names == 0);
assert(reg.n_ipcps == 0);
assert(reg.n_flows == 0);
@@ -1090,6 +1120,35 @@ int reg_list_names(name_info_msg_t *** names)
return -ENOMEM;
}
+int reg_prepare_pool(uid_t uid,
+ gid_t gid)
+{
+ struct reg_pool * pool;
+
+ if (is_ouroboros_member_uid(uid))
+ return 0;
+
+ pthread_mutex_lock(&reg.mtx);
+
+ pool = __reg_get_pool(uid);
+ if (pool == NULL) {
+ pool = reg_pool_create(uid, gid);
+ if (pool == NULL) {
+ log_err("Failed to create pool for uid %d.", uid);
+ pthread_mutex_unlock(&reg.mtx);
+ return -1;
+ }
+ list_add(&pool->next, &reg.pools);
+ reg.n_pools++;
+ }
+
+ reg_pool_ref(pool);
+
+ pthread_mutex_unlock(&reg.mtx);
+
+ return 0;
+}
+
int reg_create_proc(const struct proc_info * info)
{
struct reg_proc * proc;
@@ -1100,13 +1159,13 @@ int reg_create_proc(const struct proc_info * info)
if (__reg_get_proc(info->pid) != NULL) {
log_err("Process %d already exists.", info->pid);
- goto fail_proc;
+ goto fail;
}
proc = reg_proc_create(info);
if (proc == NULL) {
log_err("Failed to create process %d.", info->pid);
- goto fail_proc;
+ goto fail;
}
__reg_proc_update_names(proc);
@@ -1121,7 +1180,7 @@ int reg_create_proc(const struct proc_info * info)
return 0;
- fail_proc:
+ fail:
pthread_mutex_unlock(&reg.mtx);
return -1;
}
@@ -1129,6 +1188,7 @@ int reg_create_proc(const struct proc_info * info)
int reg_destroy_proc(pid_t pid)
{
struct reg_proc * proc;
+ struct reg_pool * pool = NULL;
struct pid_entry * spawn;
struct reg_ipcp * ipcp;
@@ -1136,11 +1196,18 @@ int reg_destroy_proc(pid_t pid)
proc = __reg_get_proc(pid);
if (proc != NULL) {
+ if (!is_ouroboros_member_uid(proc->info.uid))
+ pool = __reg_get_pool(proc->info.uid);
list_del(&proc->next);
reg.n_procs--;
reg_proc_destroy(proc);
__reg_del_proc_from_names(pid);
__reg_cancel_flows_for_proc(pid);
+ if (pool != NULL && reg_pool_unref(pool) == 0) {
+ list_del(&pool->next);
+ reg.n_pools--;
+ reg_pool_destroy(pool);
+ }
}
spawn = __reg_get_spawned(pid);
@@ -1175,6 +1242,38 @@ bool reg_has_proc(pid_t pid)
return ret;
}
+bool reg_is_proc_privileged(pid_t pid)
+{
+ struct reg_proc * proc;
+ bool ret = false;
+
+ pthread_mutex_lock(&reg.mtx);
+
+ proc = __reg_get_proc(pid);
+ if (proc != NULL)
+ ret = reg_proc_is_privileged(proc);
+
+ pthread_mutex_unlock(&reg.mtx);
+
+ return ret;
+}
+
+uid_t reg_get_proc_uid(pid_t pid)
+{
+ struct reg_proc * proc;
+ uid_t ret = 0;
+
+ pthread_mutex_lock(&reg.mtx);
+
+ proc = __reg_get_proc(pid);
+ if (proc != NULL && !is_ouroboros_member_uid(proc->info.uid))
+ ret = proc->info.uid;
+
+ pthread_mutex_unlock(&reg.mtx);
+
+ return ret;
+}
+
void reg_kill_all_proc(int signal)
{
pthread_mutex_lock(&reg.mtx);
diff --git a/src/irmd/reg/reg.h b/src/irmd/reg/reg.h
index 7728c80f..77264fde 100644
--- a/src/irmd/reg/reg.h
+++ b/src/irmd/reg/reg.h
@@ -31,6 +31,8 @@
#include <ouroboros/time.h>
#include <ouroboros/utils.h>
+#include "pool.h"
+
int reg_init(void);
void reg_clear(void);
@@ -50,6 +52,13 @@ int reg_destroy_proc(pid_t pid);
bool reg_has_proc(pid_t pid);
+bool reg_is_proc_privileged(pid_t pid);
+
+int reg_prepare_pool(uid_t uid,
+ gid_t gid);
+
+uid_t reg_get_proc_uid(pid_t pid);
+
void reg_kill_all_proc(int signal);
pid_t reg_get_dead_proc(void);
diff --git a/src/irmd/reg/tests/proc_test.c b/src/irmd/reg/tests/proc_test.c
index d53f18ec..c4e689f0 100644
--- a/src/irmd/reg/tests/proc_test.c
+++ b/src/irmd/reg/tests/proc_test.c
@@ -27,13 +27,17 @@
#define TEST_PID 65534
#define TEST_PROG "usr/bin/testprog"
+#define TEST_PROC { \
+ .pid = TEST_PID, \
+ .prog = TEST_PROG, \
+ .uid = getuid(), \
+ .gid = getgid() \
+}
+
static int test_reg_proc_create_destroy(void)
{
struct reg_proc * proc;
- struct proc_info info = {
- .pid = TEST_PID,
- .prog = TEST_PROG
- };
+ struct proc_info info = TEST_PROC;
TEST_START();
@@ -56,10 +60,7 @@ static int test_reg_proc_create_destroy(void)
static int test_reg_proc_add_name(void)
{
struct reg_proc * proc;
- struct proc_info info = {
- .pid = TEST_PID,
- .prog = TEST_PROG
- };
+ struct proc_info info = TEST_PROC;
char * name = "testname";
diff --git a/src/irmd/reg/tests/reg_test.c b/src/irmd/reg/tests/reg_test.c
index 07d0a198..f7a4de8e 100644
--- a/src/irmd/reg/tests/reg_test.c
+++ b/src/irmd/reg/tests/reg_test.c
@@ -21,6 +21,8 @@
*/
+#include "../pool.c"
+#undef OUROBOROS_PREFIX
#include "../reg.c"
#include <test/test.h>
@@ -35,6 +37,12 @@
#define TEST_DATA "testpbufdata"
#define TEST_DATA2 "testpbufdata2"
#define TEST_LAYER "testlayer"
+#define TEST_PROC_INFO { \
+ .pid = TEST_PID, \
+ .prog = TEST_PROG, \
+ .uid = 0, \
+ .gid = 0 \
+}
#define REG_TEST_FAIL() \
do { TEST_FAIL(); reg_clear(); return TEST_RC_FAIL;} while(0)
@@ -852,10 +860,7 @@ static int test_reg_name(void)
static int test_reg_create_proc(void)
{
- struct proc_info info = {
- .pid = TEST_PID,
- .prog = TEST_PROG
- };
+ struct proc_info info = TEST_PROC_INFO;
TEST_START();
@@ -1011,10 +1016,7 @@ static int test_reg_prog(void)
static int test_bind_proc(void)
{
- struct proc_info pinfo = {
- .pid = TEST_PID,
- .prog = TEST_PROG
- };
+ struct proc_info pinfo = TEST_PROC_INFO;
struct name_info ninfo = {
.name = TEST_NAME,
@@ -1167,10 +1169,7 @@ static int test_inherit_prog(void)
.name = TEST_PROG
};
- struct proc_info procinfo = {
- .pid = TEST_PID,
- .prog = TEST_PROG
- };
+ struct proc_info procinfo = TEST_PROC_INFO;
char * exec[] = { TEST_PROG, NULL};
@@ -1308,10 +1307,7 @@ static void * test_call_flow_accept(void * o)
struct timespec timeo = TIMESPEC_INIT_MS(10);
buffer_t pbuf = BUF_INIT;
- struct proc_info pinfo = {
- .pid = TEST_PID,
- .prog = TEST_PROG
- };
+ struct proc_info pinfo = TEST_PROC_INFO;
struct flow_info info = {
.n_pid = pinfo.pid,
@@ -1663,10 +1659,7 @@ static int test_wait_proc_success(void)
struct timespec abstime;
struct timespec timeo = TIMESPEC_INIT_S(10);
pthread_t thr;
- struct proc_info info = {
- .pid = TEST_PID,
- .prog = TEST_PROG
- };
+ struct proc_info info = TEST_PROC_INFO;
TEST_START();
diff --git a/src/lib/dev.c b/src/lib/dev.c
index fb06c496..454dd027 100644
--- a/src/lib/dev.c
+++ b/src/lib/dev.c
@@ -127,7 +127,7 @@ struct fqueue {
};
struct {
- struct ssm_pool * gspp;
+ struct ssm_pool * pool;
struct ssm_flow_set * fqset;
struct bmp * fds;
@@ -146,14 +146,14 @@ struct {
fset_t * frct_set;
pthread_rwlock_t lock;
-} ai;
+} proc;
static void flow_destroy(struct fmap * p)
{
- pthread_mutex_lock(&ai.mtx);
+ pthread_mutex_lock(&proc.mtx);
if (p->state == FLOW_DESTROY) {
- pthread_mutex_unlock(&ai.mtx);
+ pthread_mutex_unlock(&proc.mtx);
return;
}
@@ -162,12 +162,12 @@ static void flow_destroy(struct fmap * p)
else
p->state = FLOW_NULL;
- pthread_cond_signal(&ai.cond);
+ pthread_cond_signal(&proc.cond);
- pthread_cleanup_push(__cleanup_mutex_unlock, &ai.mtx);
+ pthread_cleanup_push(__cleanup_mutex_unlock, &proc.mtx);
while (p->state != FLOW_NULL)
- pthread_cond_wait(&ai.cond, &ai.mtx);
+ pthread_cond_wait(&proc.cond, &proc.mtx);
p->fd = -1;
p->state = FLOW_INIT;
@@ -178,17 +178,17 @@ static void flow_destroy(struct fmap * p)
static void flow_set_state(struct fmap * p,
enum flow_state state)
{
- pthread_mutex_lock(&ai.mtx);
+ pthread_mutex_lock(&proc.mtx);
if (p->state == FLOW_DESTROY) {
- pthread_mutex_unlock(&ai.mtx);
+ pthread_mutex_unlock(&proc.mtx);
return;
}
p->state = state;
- pthread_cond_broadcast(&ai.cond);
+ pthread_cond_broadcast(&proc.cond);
- pthread_mutex_unlock(&ai.mtx);
+ pthread_mutex_unlock(&proc.mtx);
}
static enum flow_state flow_wait_assign(int flow_id)
@@ -196,26 +196,26 @@ static enum flow_state flow_wait_assign(int flow_id)
enum flow_state state;
struct fmap * p;
- p = &ai.id_to_fd[flow_id];
+ p = &proc.id_to_fd[flow_id];
- pthread_mutex_lock(&ai.mtx);
+ pthread_mutex_lock(&proc.mtx);
if (p->state == FLOW_ALLOCATED) {
- pthread_mutex_unlock(&ai.mtx);
+ pthread_mutex_unlock(&proc.mtx);
return FLOW_ALLOCATED;
}
if (p->state == FLOW_INIT)
p->state = FLOW_ALLOC_PENDING;
- pthread_cleanup_push(__cleanup_mutex_unlock, &ai.mtx);
+ pthread_cleanup_push(__cleanup_mutex_unlock, &proc.mtx);
while (p->state == FLOW_ALLOC_PENDING)
- pthread_cond_wait(&ai.cond, &ai.mtx);
+ pthread_cond_wait(&proc.cond, &proc.mtx);
if (p->state == FLOW_DESTROY) {
p->state = FLOW_NULL;
- pthread_cond_broadcast(&ai.cond);
+ pthread_cond_broadcast(&proc.cond);
}
state = p->state;
@@ -227,13 +227,13 @@ static enum flow_state flow_wait_assign(int flow_id)
return state;
}
-static int proc_announce(const char * prog)
+static int proc_announce(const struct proc_info * proc)
{
uint8_t buf[SOCK_BUF_SIZE];
buffer_t msg = {SOCK_BUF_SIZE, buf};
int err;
- if (proc_announce__irm_req_ser(&msg, prog) < 0)
+ if (proc_announce__irm_req_ser(&msg, proc) < 0)
return -ENOMEM;
err = send_recv_msg(&msg);
@@ -342,23 +342,23 @@ static void flow_send_keepalive(struct flow * flow,
ssize_t idx;
uint8_t * ptr;
- idx = ssm_pool_alloc(ai.gspp, 0, &ptr, &spb);
+ idx = ssm_pool_alloc(proc.pool, 0, &ptr, &spb);
if (idx < 0)
return;
- pthread_rwlock_wrlock(&ai.lock);
+ pthread_rwlock_wrlock(&proc.lock);
flow->snd_act = now;
if (ssm_rbuff_write(flow->tx_rb, idx))
- ssm_pool_remove(ai.gspp, idx);
+ ssm_pool_remove(proc.pool, idx);
else
ssm_flow_set_notify(flow->set, flow->info.id, FLOW_PKT);
- pthread_rwlock_unlock(&ai.lock);
+ pthread_rwlock_unlock(&proc.lock);
}
-/* Needs rdlock on ai. */
+/* Needs rdlock on proc. */
static void _flow_keepalive(struct flow * flow)
{
struct timespec now;
@@ -382,16 +382,16 @@ static void _flow_keepalive(struct flow * flow)
if (ts_diff_ns(&now, &r_act) > (int64_t) timeo * MILLION) {
ssm_rbuff_set_acl(flow->rx_rb, ACL_FLOWPEER);
- ssm_flow_set_notify(ai.fqset, flow_id, FLOW_PEER);
+ ssm_flow_set_notify(proc.fqset, flow_id, FLOW_PEER);
return;
}
if (ts_diff_ns(&now, &s_act) > (int64_t) timeo * (MILLION >> 2)) {
- pthread_rwlock_unlock(&ai.lock);
+ pthread_rwlock_unlock(&proc.lock);
flow_send_keepalive(flow, now);
- pthread_rwlock_rdlock(&ai.lock);
+ pthread_rwlock_rdlock(&proc.lock);
}
}
@@ -400,15 +400,15 @@ static void handle_keepalives(void)
struct list_head * p;
struct list_head * h;
- pthread_rwlock_rdlock(&ai.lock);
+ pthread_rwlock_rdlock(&proc.lock);
- list_for_each_safe(p, h, &ai.flow_list) {
+ list_for_each_safe(p, h, &proc.flow_list) {
struct flow * flow;
flow = list_entry(p, struct flow, next);
_flow_keepalive(flow);
}
- pthread_rwlock_unlock(&ai.lock);
+ pthread_rwlock_unlock(&proc.lock);
}
static void __cleanup_fqueue_destroy(void * fq)
@@ -429,7 +429,7 @@ void * flow_rx(void * o)
pthread_cleanup_push(__cleanup_fqueue_destroy, fq);
/* fevent will filter all FRCT packets for us */
- while ((ret = fevent(ai.frct_set, fq, &tic)) != 0) {
+ while ((ret = fevent(proc.frct_set, fq, &tic)) != 0) {
if (ret == -ETIMEDOUT) {
handle_keepalives();
continue;
@@ -446,63 +446,63 @@ void * flow_rx(void * o)
static void flow_clear(int fd)
{
- memset(&ai.flows[fd], 0, sizeof(ai.flows[fd]));
+ memset(&proc.flows[fd], 0, sizeof(proc.flows[fd]));
- ai.flows[fd].info.id = -1;
+ proc.flows[fd].info.id = -1;
}
static void __flow_fini(int fd)
{
assert(fd >= 0 && fd < SYS_MAX_FLOWS);
- if (ai.flows[fd].frcti != NULL) {
- ai.n_frcti--;
- if (ai.n_frcti == 0) {
- pthread_cancel(ai.tx);
- pthread_join(ai.tx, NULL);
+ if (proc.flows[fd].frcti != NULL) {
+ proc.n_frcti--;
+ if (proc.n_frcti == 0) {
+ pthread_cancel(proc.tx);
+ pthread_join(proc.tx, NULL);
}
- ssm_flow_set_del(ai.fqset, 0, ai.flows[fd].info.id);
+ ssm_flow_set_del(proc.fqset, 0, proc.flows[fd].info.id);
- frcti_destroy(ai.flows[fd].frcti);
+ frcti_destroy(proc.flows[fd].frcti);
}
- if (ai.flows[fd].info.id != -1) {
- flow_destroy(&ai.id_to_fd[ai.flows[fd].info.id]);
- bmp_release(ai.fds, fd);
+ if (proc.flows[fd].info.id != -1) {
+ flow_destroy(&proc.id_to_fd[proc.flows[fd].info.id]);
+ bmp_release(proc.fds, fd);
}
- if (ai.flows[fd].rx_rb != NULL) {
- ssm_rbuff_set_acl(ai.flows[fd].rx_rb, ACL_FLOWDOWN);
- ssm_rbuff_close(ai.flows[fd].rx_rb);
+ if (proc.flows[fd].rx_rb != NULL) {
+ ssm_rbuff_set_acl(proc.flows[fd].rx_rb, ACL_FLOWDOWN);
+ ssm_rbuff_close(proc.flows[fd].rx_rb);
}
- if (ai.flows[fd].tx_rb != NULL) {
- ssm_rbuff_set_acl(ai.flows[fd].tx_rb, ACL_FLOWDOWN);
- ssm_rbuff_close(ai.flows[fd].tx_rb);
+ if (proc.flows[fd].tx_rb != NULL) {
+ ssm_rbuff_set_acl(proc.flows[fd].tx_rb, ACL_FLOWDOWN);
+ ssm_rbuff_close(proc.flows[fd].tx_rb);
}
- if (ai.flows[fd].set != NULL) {
- ssm_flow_set_notify(ai.flows[fd].set,
- ai.flows[fd].info.id,
+ if (proc.flows[fd].set != NULL) {
+ ssm_flow_set_notify(proc.flows[fd].set,
+ proc.flows[fd].info.id,
FLOW_DEALLOC);
- ssm_flow_set_close(ai.flows[fd].set);
+ ssm_flow_set_close(proc.flows[fd].set);
}
- crypt_destroy_ctx(ai.flows[fd].crypt);
+ crypt_destroy_ctx(proc.flows[fd].crypt);
- list_del(&ai.flows[fd].next);
+ list_del(&proc.flows[fd].next);
flow_clear(fd);
}
static void flow_fini(int fd)
{
- pthread_rwlock_wrlock(&ai.lock);
+ pthread_rwlock_wrlock(&proc.lock);
__flow_fini(fd);
- pthread_rwlock_unlock(&ai.lock);
+ pthread_rwlock_unlock(&proc.lock);
}
#define IS_ENCRYPTED(crypt) ((crypt)->nid != NID_undef)
@@ -517,15 +517,15 @@ static int flow_init(struct flow_info * info,
clock_gettime(PTHREAD_COND_CLOCK, &now);
- pthread_rwlock_wrlock(&ai.lock);
+ pthread_rwlock_wrlock(&proc.lock);
- fd = bmp_allocate(ai.fds);
- if (!bmp_is_id_valid(ai.fds, fd)) {
+ fd = bmp_allocate(proc.fds);
+ if (!bmp_is_id_valid(proc.fds, fd)) {
err = -EBADF;
goto fail_fds;
}
- flow = &ai.flows[fd];
+ flow = &proc.flows[fd];
flow->info = *info;
@@ -566,27 +566,27 @@ static int flow_init(struct flow_info * info,
if (flow->frcti == NULL)
goto fail_frcti;
- if (ssm_flow_set_add(ai.fqset, 0, info->id))
+ if (ssm_flow_set_add(proc.fqset, 0, info->id))
goto fail_flow_set_add;
- ++ai.n_frcti;
- if (ai.n_frcti == 1 &&
- pthread_create(&ai.tx, NULL, flow_tx, NULL) < 0)
+ ++proc.n_frcti;
+ if (proc.n_frcti == 1 &&
+ pthread_create(&proc.tx, NULL, flow_tx, NULL) < 0)
goto fail_tx_thread;
}
- list_add_tail(&flow->next, &ai.flow_list);
+ list_add_tail(&flow->next, &proc.flow_list);
- ai.id_to_fd[info->id].fd = fd;
+ proc.id_to_fd[info->id].fd = fd;
- flow_set_state(&ai.id_to_fd[info->id], FLOW_ALLOCATED);
+ flow_set_state(&proc.id_to_fd[info->id], FLOW_ALLOCATED);
- pthread_rwlock_unlock(&ai.lock);
+ pthread_rwlock_unlock(&proc.lock);
return fd;
fail_tx_thread:
- ssm_flow_set_del(ai.fqset, 0, info->id);
+ ssm_flow_set_del(proc.fqset, 0, info->id);
fail_flow_set_add:
frcti_destroy(flow->frcti);
fail_frcti:
@@ -598,9 +598,9 @@ static int flow_init(struct flow_info * info,
fail_tx_rb:
ssm_rbuff_close(flow->rx_rb);
fail_rx_rb:
- bmp_release(ai.fds, fd);
+ bmp_release(proc.fds, fd);
fail_fds:
- pthread_rwlock_unlock(&ai.lock);
+ pthread_rwlock_unlock(&proc.lock);
return err;
}
@@ -618,6 +618,7 @@ static void init(int argc,
char ** argv,
char ** envp)
{
+ struct proc_info info;
char * prog = argv[0];
int i;
#ifdef PROC_FLOW_STATS
@@ -635,7 +636,11 @@ static void init(int argc,
goto fail_prog;
}
- if (proc_announce(prog)) {
+ memset(&info, 0, sizeof(info));
+ info.pid = getpid();
+ strncpy(info.prog, prog, PROG_NAME_SIZE);
+
+ if (proc_announce(&info)) {
fprintf(stderr, "FATAL: Could not announce to IRMd.\n");
goto fail_prog;
}
@@ -650,26 +655,30 @@ static void init(int argc,
gcry_control(GCRYCTL_INITIALIZATION_FINISHED, 0);
}
#endif
- ai.fds = bmp_create(PROG_MAX_FLOWS - PROG_RES_FDS, PROG_RES_FDS);
- if (ai.fds == NULL) {
+ proc.fds = bmp_create(PROG_MAX_FLOWS - PROG_RES_FDS, PROG_RES_FDS);
+ if (proc.fds == NULL) {
fprintf(stderr, "FATAL: Could not create fd bitmap.\n");
goto fail_fds;
}
- ai.fqueues = bmp_create(PROG_MAX_FQUEUES, 0);
- if (ai.fqueues == NULL) {
+ proc.fqueues = bmp_create(PROG_MAX_FQUEUES, 0);
+ if (proc.fqueues == NULL) {
fprintf(stderr, "FATAL: Could not create fqueue bitmap.\n");
goto fail_fqueues;
}
- ai.gspp = ssm_pool_open();
- if (ai.gspp == NULL) {
+ if (is_ouroboros_member_uid(getuid()))
+ proc.pool = ssm_pool_open(0);
+ else
+ proc.pool = ssm_pool_open(getuid());
+
+ if (proc.pool == NULL) {
fprintf(stderr, "FATAL: Could not open packet buffer.\n");
goto fail_rdrb;
}
- ai.flows = malloc(sizeof(*ai.flows) * PROG_MAX_FLOWS);
- if (ai.flows == NULL) {
+ proc.flows = malloc(sizeof(*proc.flows) * PROG_MAX_FLOWS);
+ if (proc.flows == NULL) {
fprintf(stderr, "FATAL: Could not malloc flows.\n");
goto fail_flows;
}
@@ -677,38 +686,38 @@ static void init(int argc,
for (i = 0; i < PROG_MAX_FLOWS; ++i)
flow_clear(i);
- ai.id_to_fd = malloc(sizeof(*ai.id_to_fd) * SYS_MAX_FLOWS);
- if (ai.id_to_fd == NULL) {
+ proc.id_to_fd = malloc(sizeof(*proc.id_to_fd) * SYS_MAX_FLOWS);
+ if (proc.id_to_fd == NULL) {
fprintf(stderr, "FATAL: Could not malloc id_to_fd.\n");
goto fail_id_to_fd;
}
for (i = 0; i < SYS_MAX_FLOWS; ++i)
- ai.id_to_fd[i].state = FLOW_INIT;
+ proc.id_to_fd[i].state = FLOW_INIT;
- if (pthread_mutex_init(&ai.mtx, NULL)) {
+ if (pthread_mutex_init(&proc.mtx, NULL)) {
fprintf(stderr, "FATAL: Could not init mutex.\n");
goto fail_mtx;
}
- if (pthread_cond_init(&ai.cond, NULL) < 0) {
+ if (pthread_cond_init(&proc.cond, NULL) < 0) {
fprintf(stderr, "FATAL: Could not init condvar.\n");
goto fail_cond;
}
- if (pthread_rwlock_init(&ai.lock, NULL) < 0) {
+ if (pthread_rwlock_init(&proc.lock, NULL) < 0) {
fprintf(stderr, "FATAL: Could not initialize flow lock.\n");
goto fail_flow_lock;
}
- ai.fqset = ssm_flow_set_open(getpid());
- if (ai.fqset == NULL) {
+ proc.fqset = ssm_flow_set_open(getpid());
+ if (proc.fqset == NULL) {
fprintf(stderr, "FATAL: Could not open flow set.\n");
goto fail_fqset;
}
- ai.frct_set = fset_create();
- if (ai.frct_set == NULL || ai.frct_set->idx != 0) {
+ proc.frct_set = fset_create();
+ if (proc.frct_set == NULL || proc.frct_set->idx != 0) {
fprintf(stderr, "FATAL: Could not create FRCT set.\n");
goto fail_frct_set;
}
@@ -732,12 +741,12 @@ static void init(int argc,
}
}
#endif
- if (pthread_create(&ai.rx, NULL, flow_rx, NULL) < 0) {
+ if (pthread_create(&proc.rx, NULL, flow_rx, NULL) < 0) {
fprintf(stderr, "FATAL: Could not start monitor thread.\n");
goto fail_monitor;
}
- list_head_init(&ai.flow_list);
+ list_head_init(&proc.flow_list);
return;
@@ -748,27 +757,27 @@ static void init(int argc,
#endif
timerwheel_fini();
fail_timerwheel:
- fset_destroy(ai.frct_set);
+ fset_destroy(proc.frct_set);
fail_frct_set:
- ssm_flow_set_close(ai.fqset);
+ ssm_flow_set_close(proc.fqset);
fail_fqset:
- pthread_rwlock_destroy(&ai.lock);
+ pthread_rwlock_destroy(&proc.lock);
fail_flow_lock:
- pthread_cond_destroy(&ai.cond);
+ pthread_cond_destroy(&proc.cond);
fail_cond:
- pthread_mutex_destroy(&ai.mtx);
+ pthread_mutex_destroy(&proc.mtx);
fail_mtx:
- free(ai.id_to_fd);
+ free(proc.id_to_fd);
fail_id_to_fd:
- free(ai.flows);
+ free(proc.flows);
fail_flows:
- ssm_pool_close(ai.gspp);
+ ssm_pool_close(proc.pool);
fail_rdrb:
- bmp_destroy(ai.fqueues);
+ bmp_destroy(proc.fqueues);
fail_fqueues:
- bmp_destroy(ai.fds);
+ bmp_destroy(proc.fds);
fail_fds:
- memset(&ai, 0, sizeof(ai));
+ memset(&proc, 0, sizeof(proc));
fail_prog:
exit(EXIT_FAILURE);
}
@@ -777,51 +786,52 @@ static void fini(void)
{
int i;
- if (ai.fds == NULL)
+ if (proc.fds == NULL)
return;
- pthread_cancel(ai.rx);
- pthread_join(ai.rx, NULL);
+ pthread_cancel(proc.rx);
+ pthread_join(proc.rx, NULL);
- pthread_rwlock_wrlock(&ai.lock);
+ pthread_rwlock_wrlock(&proc.lock);
for (i = 0; i < PROG_MAX_FLOWS; ++i) {
- if (ai.flows[i].info.id != -1) {
+ struct flow * flow = &proc.flows[i];
+ if (flow->info.id != -1) {
ssize_t idx;
- ssm_rbuff_set_acl(ai.flows[i].rx_rb, ACL_FLOWDOWN);
- while ((idx = ssm_rbuff_read(ai.flows[i].rx_rb)) >= 0)
- ssm_pool_remove(ai.gspp, idx);
+ ssm_rbuff_set_acl(flow->rx_rb, ACL_FLOWDOWN);
+ while ((idx = ssm_rbuff_read(flow->rx_rb)) >= 0)
+ ssm_pool_remove(proc.pool, idx);
__flow_fini(i);
}
}
- pthread_cond_destroy(&ai.cond);
- pthread_mutex_destroy(&ai.mtx);
+ pthread_cond_destroy(&proc.cond);
+ pthread_mutex_destroy(&proc.mtx);
- pthread_rwlock_unlock(&ai.lock);
+ pthread_rwlock_unlock(&proc.lock);
#ifdef PROC_FLOW_STATS
rib_fini();
#endif
timerwheel_fini();
- fset_destroy(ai.frct_set);
+ fset_destroy(proc.frct_set);
- ssm_flow_set_close(ai.fqset);
+ ssm_flow_set_close(proc.fqset);
- pthread_rwlock_destroy(&ai.lock);
+ pthread_rwlock_destroy(&proc.lock);
- free(ai.flows);
- free(ai.id_to_fd);
+ free(proc.flows);
+ free(proc.id_to_fd);
- ssm_pool_close(ai.gspp);
+ ssm_pool_close(proc.pool);
- bmp_destroy(ai.fds);
- bmp_destroy(ai.fqueues);
+ bmp_destroy(proc.fds);
+ bmp_destroy(proc.fqueues);
proc_exit();
- memset(&ai, 0, sizeof(ai));
+ memset(&proc, 0, sizeof(proc));
}
#if defined(__MACH__) && defined(__APPLE__)
@@ -978,12 +988,12 @@ int flow_dealloc(int fd)
memset(&info, 0, sizeof(flow));
- flow = &ai.flows[fd];
+ flow = &proc.flows[fd];
- pthread_rwlock_rdlock(&ai.lock);
+ pthread_rwlock_rdlock(&proc.lock);
if (flow->info.id < 0) {
- pthread_rwlock_unlock(&ai.lock);
+ pthread_rwlock_unlock(&proc.lock);
return -ENOTALLOC;
}
@@ -992,21 +1002,21 @@ int flow_dealloc(int fd)
flow->rcv_timesout = true;
flow->rcv_timeo = tic;
- pthread_rwlock_unlock(&ai.lock);
+ pthread_rwlock_unlock(&proc.lock);
flow_read(fd, buf, SOCK_BUF_SIZE);
- pthread_rwlock_rdlock(&ai.lock);
+ pthread_rwlock_rdlock(&proc.lock);
timeo.tv_sec = frcti_dealloc(flow->frcti);
while (timeo.tv_sec < 0) { /* keep the flow active for rtx */
ssize_t ret;
- pthread_rwlock_unlock(&ai.lock);
+ pthread_rwlock_unlock(&proc.lock);
ret = flow_read(fd, pkt, PKT_BUF_LEN);
- pthread_rwlock_rdlock(&ai.lock);
+ pthread_rwlock_rdlock(&proc.lock);
timeo.tv_sec = frcti_dealloc(flow->frcti);
@@ -1014,7 +1024,7 @@ int flow_dealloc(int fd)
timeo.tv_sec = -timeo.tv_sec;
}
- pthread_cleanup_push(__cleanup_rwlock_unlock, &ai.lock);
+ pthread_cleanup_push(__cleanup_rwlock_unlock, &proc.lock);
ssm_rbuff_fini(flow->tx_rb);
@@ -1048,21 +1058,21 @@ int ipcp_flow_dealloc(int fd)
if (fd < 0 || fd >= SYS_MAX_FLOWS )
return -EINVAL;
- flow = &ai.flows[fd];
+ flow = &proc.flows[fd];
memset(&info, 0, sizeof(flow));
- pthread_rwlock_rdlock(&ai.lock);
+ pthread_rwlock_rdlock(&proc.lock);
if (flow->info.id < 0) {
- pthread_rwlock_unlock(&ai.lock);
+ pthread_rwlock_unlock(&proc.lock);
return -ENOTALLOC;
}
info.id = flow->info.id;
info.n_1_pid = flow->info.n_1_pid;
- pthread_rwlock_unlock(&ai.lock);
+ pthread_rwlock_unlock(&proc.lock);
if (ipcp_flow_dealloc__irm_req_ser(&msg, &info) < 0)
return -ENOMEM;
@@ -1096,14 +1106,14 @@ int fccntl(int fd,
if (fd < 0 || fd >= SYS_MAX_FLOWS)
return -EBADF;
- flow = &ai.flows[fd];
+ flow = &proc.flows[fd];
va_start(l, cmd);
- pthread_rwlock_wrlock(&ai.lock);
+ pthread_rwlock_wrlock(&proc.lock);
if (flow->info.id < 0) {
- pthread_rwlock_unlock(&ai.lock);
+ pthread_rwlock_unlock(&proc.lock);
va_end(l);
return -ENOTALLOC;
}
@@ -1209,24 +1219,24 @@ int fccntl(int fd,
*cflags = frcti_getflags(flow->frcti);
break;
default:
- pthread_rwlock_unlock(&ai.lock);
+ pthread_rwlock_unlock(&proc.lock);
va_end(l);
return -ENOTSUP;
};
- pthread_rwlock_unlock(&ai.lock);
+ pthread_rwlock_unlock(&proc.lock);
va_end(l);
return 0;
einval:
- pthread_rwlock_unlock(&ai.lock);
+ pthread_rwlock_unlock(&proc.lock);
va_end(l);
return -EINVAL;
eperm:
- pthread_rwlock_unlock(&ai.lock);
+ pthread_rwlock_unlock(&proc.lock);
va_end(l);
return -EPERM;
}
@@ -1268,15 +1278,15 @@ static int flow_tx_spb(struct flow * flow,
clock_gettime(PTHREAD_COND_CLOCK, &now);
- pthread_rwlock_wrlock(&ai.lock);
+ pthread_rwlock_wrlock(&proc.lock);
flow->snd_act = now;
- pthread_rwlock_unlock(&ai.lock);
+ pthread_rwlock_unlock(&proc.lock);
idx = ssm_pk_buff_get_idx(spb);
- pthread_rwlock_rdlock(&ai.lock);
+ pthread_rwlock_rdlock(&proc.lock);
if (ssm_pk_buff_len(spb) > 0) {
if (frcti_snd(flow->frcti, spb) < 0)
@@ -1289,7 +1299,7 @@ static int flow_tx_spb(struct flow * flow,
goto enomem;
}
- pthread_cleanup_push(__cleanup_rwlock_unlock, &ai.lock);
+ pthread_cleanup_push(__cleanup_rwlock_unlock, &proc.lock);
if (!block)
ret = ssm_rbuff_write(flow->tx_rb, idx);
@@ -1297,7 +1307,7 @@ static int flow_tx_spb(struct flow * flow,
ret = ssm_rbuff_write_b(flow->tx_rb, idx, abstime);
if (ret < 0)
- ssm_pool_remove(ai.gspp, idx);
+ ssm_pool_remove(proc.pool, idx);
else
ssm_flow_set_notify(flow->set, flow->info.id, FLOW_PKT);
@@ -1306,8 +1316,8 @@ static int flow_tx_spb(struct flow * flow,
return 0;
enomem:
- pthread_rwlock_unlock(&ai.lock);
- ssm_pool_remove(ai.gspp, idx);
+ pthread_rwlock_unlock(&proc.lock);
+ ssm_pool_remove(proc.pool, idx);
return -ENOMEM;
}
@@ -1330,14 +1340,14 @@ ssize_t flow_write(int fd,
if (fd < 0 || fd >= PROG_MAX_FLOWS)
return -EBADF;
- flow = &ai.flows[fd];
+ flow = &proc.flows[fd];
clock_gettime(PTHREAD_COND_CLOCK, &abs);
- pthread_rwlock_wrlock(&ai.lock);
+ pthread_rwlock_wrlock(&proc.lock);
if (flow->info.id < 0) {
- pthread_rwlock_unlock(&ai.lock);
+ pthread_rwlock_unlock(&proc.lock);
return -ENOTALLOC;
}
@@ -1348,7 +1358,7 @@ ssize_t flow_write(int fd,
flags = flow->oflags;
- pthread_rwlock_unlock(&ai.lock);
+ pthread_rwlock_unlock(&proc.lock);
if ((flags & FLOWFACCMODE) == FLOWFRDONLY)
return -EPERM;
@@ -1356,12 +1366,12 @@ ssize_t flow_write(int fd,
if (flags & FLOWFWNOBLOCK) {
if (!frcti_is_window_open(flow->frcti))
return -EAGAIN;
- idx = ssm_pool_alloc(ai.gspp, count, &ptr, &spb);
+ idx = ssm_pool_alloc(proc.pool, count, &ptr, &spb);
} else {
ret = frcti_window_wait(flow->frcti, abstime);
if (ret < 0)
return ret;
- idx = ssm_pool_alloc_b(ai.gspp, count, &ptr, &spb, abstime);
+ idx = ssm_pool_alloc_b(proc.pool, count, &ptr, &spb, abstime);
}
if (idx < 0)
@@ -1405,16 +1415,16 @@ static ssize_t flow_rx_spb(struct flow * flow,
clock_gettime(PTHREAD_COND_CLOCK, &now);
- pthread_rwlock_wrlock(&ai.lock);
+ pthread_rwlock_wrlock(&proc.lock);
flow->rcv_act = now;
- pthread_rwlock_unlock(&ai.lock);
+ pthread_rwlock_unlock(&proc.lock);
- *spb = ssm_pool_get(ai.gspp, idx);
+ *spb = ssm_pool_get(proc.pool, idx);
if (invalid_pkt(flow, *spb)) {
- ssm_pool_remove(ai.gspp, idx);
+ ssm_pool_remove(proc.pool, idx);
return -EAGAIN;
}
@@ -1439,19 +1449,19 @@ ssize_t flow_read(int fd,
if (fd < 0 || fd >= PROG_MAX_FLOWS)
return -EBADF;
- flow = &ai.flows[fd];
+ flow = &proc.flows[fd];
clock_gettime(PTHREAD_COND_CLOCK, &now);
- pthread_rwlock_rdlock(&ai.lock);
+ pthread_rwlock_rdlock(&proc.lock);
if (flow->info.id < 0) {
- pthread_rwlock_unlock(&ai.lock);
+ pthread_rwlock_unlock(&proc.lock);
return -ENOTALLOC;
}
if (flow->part_idx == DONE_PART) {
- pthread_rwlock_unlock(&ai.lock);
+ pthread_rwlock_unlock(&proc.lock);
flow->part_idx = NO_PART;
return 0;
}
@@ -1467,7 +1477,7 @@ ssize_t flow_read(int fd,
idx = flow->part_idx;
if (idx < 0) {
while ((idx = frcti_queued_pdu(flow->frcti)) < 0) {
- pthread_rwlock_unlock(&ai.lock);
+ pthread_rwlock_unlock(&proc.lock);
idx = flow_rx_spb(flow, &spb, block, abstime);
if (idx < 0) {
@@ -1476,19 +1486,19 @@ ssize_t flow_read(int fd,
if (!block)
return idx;
- pthread_rwlock_rdlock(&ai.lock);
+ pthread_rwlock_rdlock(&proc.lock);
continue;
}
- pthread_rwlock_rdlock(&ai.lock);
+ pthread_rwlock_rdlock(&proc.lock);
frcti_rcv(flow->frcti, spb);
}
}
- spb = ssm_pool_get(ai.gspp, idx);
+ spb = ssm_pool_get(proc.pool, idx);
- pthread_rwlock_unlock(&ai.lock);
+ pthread_rwlock_unlock(&proc.lock);
packet = ssm_pk_buff_head(spb);
@@ -1500,25 +1510,25 @@ ssize_t flow_read(int fd,
memcpy(buf, packet, n);
ipcp_spb_release(spb);
- pthread_rwlock_wrlock(&ai.lock);
+ pthread_rwlock_wrlock(&proc.lock);
flow->part_idx = (partrd && n == (ssize_t) count) ?
DONE_PART : NO_PART;
flow->rcv_act = now;
- pthread_rwlock_unlock(&ai.lock);
+ pthread_rwlock_unlock(&proc.lock);
return n;
} else {
if (partrd) {
memcpy(buf, packet, count);
ssm_pk_buff_head_release(spb, n);
- pthread_rwlock_wrlock(&ai.lock);
+ pthread_rwlock_wrlock(&proc.lock);
flow->part_idx = idx;
flow->rcv_act = now;
- pthread_rwlock_unlock(&ai.lock);
+ pthread_rwlock_unlock(&proc.lock);
return count;
} else {
ipcp_spb_release(spb);
@@ -1537,20 +1547,20 @@ struct flow_set * fset_create(void)
if (set == NULL)
goto fail_malloc;
- assert(ai.fqueues);
+ assert(proc.fqueues);
- pthread_rwlock_wrlock(&ai.lock);
+ pthread_rwlock_wrlock(&proc.lock);
- set->idx = bmp_allocate(ai.fqueues);
- if (!bmp_is_id_valid(ai.fqueues, set->idx))
+ set->idx = bmp_allocate(proc.fqueues);
+ if (!bmp_is_id_valid(proc.fqueues, set->idx))
goto fail_bmp_alloc;
- pthread_rwlock_unlock(&ai.lock);
+ pthread_rwlock_unlock(&proc.lock);
return set;
fail_bmp_alloc:
- pthread_rwlock_unlock(&ai.lock);
+ pthread_rwlock_unlock(&proc.lock);
free(set);
fail_malloc:
return NULL;
@@ -1563,11 +1573,11 @@ void fset_destroy(struct flow_set * set)
fset_zero(set);
- pthread_rwlock_wrlock(&ai.lock);
+ pthread_rwlock_wrlock(&proc.lock);
- bmp_release(ai.fqueues, set->idx);
+ bmp_release(proc.fqueues, set->idx);
- pthread_rwlock_unlock(&ai.lock);
+ pthread_rwlock_unlock(&proc.lock);
free(set);
}
@@ -1595,7 +1605,7 @@ void fset_zero(struct flow_set * set)
if (set == NULL)
return;
- ssm_flow_set_zero(ai.fqset, set->idx);
+ ssm_flow_set_zero(proc.fqset, set->idx);
}
int fset_add(struct flow_set * set,
@@ -1607,9 +1617,9 @@ int fset_add(struct flow_set * set,
if (set == NULL || fd < 0 || fd >= SYS_MAX_FLOWS)
return -EINVAL;
- flow = &ai.flows[fd];
+ flow = &proc.flows[fd];
- pthread_rwlock_rdlock(&ai.lock);
+ pthread_rwlock_rdlock(&proc.lock);
if (flow->info.id < 0) {
ret = -EINVAL;
@@ -1617,21 +1627,21 @@ int fset_add(struct flow_set * set,
}
if (flow->frcti != NULL)
- ssm_flow_set_del(ai.fqset, 0, ai.flows[fd].info.id);
+ ssm_flow_set_del(proc.fqset, 0, flow->info.id);
- ret = ssm_flow_set_add(ai.fqset, set->idx, ai.flows[fd].info.id);
+ ret = ssm_flow_set_add(proc.fqset, set->idx, flow->info.id);
if (ret < 0)
goto fail;
- if (ssm_rbuff_queued(ai.flows[fd].rx_rb))
- ssm_flow_set_notify(ai.fqset, ai.flows[fd].info.id, FLOW_PKT);
+ if (ssm_rbuff_queued(flow->rx_rb))
+ ssm_flow_set_notify(proc.fqset, flow->info.id, FLOW_PKT);
- pthread_rwlock_unlock(&ai.lock);
+ pthread_rwlock_unlock(&proc.lock);
return ret;
fail:
- pthread_rwlock_unlock(&ai.lock);
+ pthread_rwlock_unlock(&proc.lock);
return ret;
}
@@ -1643,37 +1653,40 @@ void fset_del(struct flow_set * set,
if (set == NULL || fd < 0 || fd >= SYS_MAX_FLOWS)
return;
- flow = &ai.flows[fd];
+ flow = &proc.flows[fd];
- pthread_rwlock_rdlock(&ai.lock);
+ pthread_rwlock_rdlock(&proc.lock);
if (flow->info.id >= 0)
- ssm_flow_set_del(ai.fqset, set->idx, flow->info.id);
+ ssm_flow_set_del(proc.fqset, set->idx, flow->info.id);
if (flow->frcti != NULL)
- ssm_flow_set_add(ai.fqset, 0, ai.flows[fd].info.id);
+ ssm_flow_set_add(proc.fqset, 0, proc.flows[fd].info.id);
- pthread_rwlock_unlock(&ai.lock);
+ pthread_rwlock_unlock(&proc.lock);
}
bool fset_has(const struct flow_set * set,
int fd)
{
- bool ret;
+ struct flow * flow;
+ bool ret;
if (set == NULL || fd < 0 || fd >= SYS_MAX_FLOWS)
return false;
- pthread_rwlock_rdlock(&ai.lock);
+ flow = &proc.flows[fd];
- if (ai.flows[fd].info.id < 0) {
- pthread_rwlock_unlock(&ai.lock);
+ pthread_rwlock_rdlock(&proc.lock);
+
+ if (flow->info.id < 0) {
+ pthread_rwlock_unlock(&proc.lock);
return false;
}
- ret = (ssm_flow_set_has(ai.fqset, set->idx, ai.flows[fd].info.id) == 1);
+ ret = (ssm_flow_set_has(proc.fqset, set->idx, flow->info.id) == 1);
- pthread_rwlock_unlock(&ai.lock);
+ pthread_rwlock_unlock(&proc.lock);
return ret;
}
@@ -1690,44 +1703,44 @@ static int fqueue_filter(struct fqueue * fq)
if (fq->fqueue[fq->next].event != FLOW_PKT)
return 1;
- pthread_rwlock_rdlock(&ai.lock);
+ pthread_rwlock_rdlock(&proc.lock);
- fd = ai.id_to_fd[fq->fqueue[fq->next].flow_id].fd;
+ fd = proc.id_to_fd[fq->fqueue[fq->next].flow_id].fd;
if (fd < 0) {
++fq->next;
- pthread_rwlock_unlock(&ai.lock);
+ pthread_rwlock_unlock(&proc.lock);
continue;
}
- frcti = ai.flows[fd].frcti;
+ frcti = proc.flows[fd].frcti;
if (frcti == NULL) {
- pthread_rwlock_unlock(&ai.lock);
+ pthread_rwlock_unlock(&proc.lock);
return 1;
}
if (__frcti_pdu_ready(frcti) >= 0) {
- pthread_rwlock_unlock(&ai.lock);
+ pthread_rwlock_unlock(&proc.lock);
return 1;
}
- pthread_rwlock_unlock(&ai.lock);
+ pthread_rwlock_unlock(&proc.lock);
- idx = flow_rx_spb(&ai.flows[fd], &spb, false, NULL);
+ idx = flow_rx_spb(&proc.flows[fd], &spb, false, NULL);
if (idx < 0)
return 0;
- pthread_rwlock_rdlock(&ai.lock);
+ pthread_rwlock_rdlock(&proc.lock);
- spb = ssm_pool_get(ai.gspp, idx);
+ spb = ssm_pool_get(proc.pool, idx);
__frcti_rcv(frcti, spb);
if (__frcti_pdu_ready(frcti) >= 0) {
- pthread_rwlock_unlock(&ai.lock);
+ pthread_rwlock_unlock(&proc.lock);
return 1;
}
- pthread_rwlock_unlock(&ai.lock);
+ pthread_rwlock_unlock(&proc.lock);
++fq->next;
}
@@ -1749,15 +1762,15 @@ int fqueue_next(struct fqueue * fq)
if (fq->next != 0 && fqueue_filter(fq) == 0)
return -EPERM;
- pthread_rwlock_rdlock(&ai.lock);
+ pthread_rwlock_rdlock(&proc.lock);
e = fq->fqueue + fq->next;
- fd = ai.id_to_fd[e->flow_id].fd;
+ fd = proc.id_to_fd[e->flow_id].fd;
++fq->next;
- pthread_rwlock_unlock(&ai.lock);
+ pthread_rwlock_unlock(&proc.lock);
return fd;
}
@@ -1795,7 +1808,7 @@ ssize_t fevent(struct flow_set * set,
}
while (ret == 0) {
- ret = ssm_flow_set_wait(ai.fqset, set->idx, fq->fqueue, t);
+ ret = ssm_flow_set_wait(proc.fqset, set->idx, fq->fqueue, t);
if (ret == -ETIMEDOUT)
return -ETIMEDOUT;
@@ -1842,11 +1855,11 @@ int np1_flow_dealloc(int flow_id,
sleep(timeo);
- pthread_rwlock_rdlock(&ai.lock);
+ pthread_rwlock_rdlock(&proc.lock);
- fd = ai.id_to_fd[flow_id].fd;
+ fd = proc.id_to_fd[flow_id].fd;
- pthread_rwlock_unlock(&ai.lock);
+ pthread_rwlock_unlock(&proc.lock);
return fd;
}
@@ -1859,11 +1872,11 @@ int np1_flow_resp(int flow_id,
if (resp == 0 && flow_wait_assign(flow_id) != FLOW_ALLOCATED)
return -1;
- pthread_rwlock_rdlock(&ai.lock);
+ pthread_rwlock_rdlock(&proc.lock);
- fd = ai.id_to_fd[flow_id].fd;
+ fd = proc.id_to_fd[flow_id].fd;
- pthread_rwlock_unlock(&ai.lock);
+ pthread_rwlock_unlock(&proc.lock);
return fd;
}
@@ -1942,11 +1955,11 @@ int ipcp_flow_alloc_reply(int fd,
assert(fd >= 0 && fd < SYS_MAX_FLOWS);
- pthread_rwlock_rdlock(&ai.lock);
+ pthread_rwlock_rdlock(&proc.lock);
- flow.id = ai.flows[fd].info.id;
+ flow.id = proc.flows[fd].info.id;
- pthread_rwlock_unlock(&ai.lock);
+ pthread_rwlock_unlock(&proc.lock);
flow.mpl = mpl;
@@ -1969,25 +1982,25 @@ int ipcp_flow_read(int fd,
assert(fd >= 0 && fd < SYS_MAX_FLOWS);
assert(spb);
- flow = &ai.flows[fd];
+ flow = &proc.flows[fd];
- pthread_rwlock_rdlock(&ai.lock);
+ pthread_rwlock_rdlock(&proc.lock);
assert(flow->info.id >= 0);
while (frcti_queued_pdu(flow->frcti) < 0) {
- pthread_rwlock_unlock(&ai.lock);
+ pthread_rwlock_unlock(&proc.lock);
idx = flow_rx_spb(flow, spb, false, NULL);
if (idx < 0)
return idx;
- pthread_rwlock_rdlock(&ai.lock);
+ pthread_rwlock_rdlock(&proc.lock);
frcti_rcv(flow->frcti, *spb);
}
- pthread_rwlock_unlock(&ai.lock);
+ pthread_rwlock_unlock(&proc.lock);
return 0;
}
@@ -2001,88 +2014,132 @@ int ipcp_flow_write(int fd,
assert(fd >= 0 && fd < SYS_MAX_FLOWS);
assert(spb);
- flow = &ai.flows[fd];
+ flow = &proc.flows[fd];
- pthread_rwlock_wrlock(&ai.lock);
+ pthread_rwlock_wrlock(&proc.lock);
if (flow->info.id < 0) {
- pthread_rwlock_unlock(&ai.lock);
+ pthread_rwlock_unlock(&proc.lock);
return -ENOTALLOC;
}
if ((flow->oflags & FLOWFACCMODE) == FLOWFRDONLY) {
- pthread_rwlock_unlock(&ai.lock);
+ pthread_rwlock_unlock(&proc.lock);
return -EPERM;
}
- pthread_rwlock_unlock(&ai.lock);
+ pthread_rwlock_unlock(&proc.lock);
ret = flow_tx_spb(flow, spb, true, NULL);
return ret;
}
+static int pool_copy_spb(struct ssm_pool * src_pool,
+ ssize_t src_idx,
+ struct ssm_pool * dst_pool,
+ struct ssm_pk_buff ** dst_spb)
+{
+ struct ssm_pk_buff * src;
+ uint8_t * ptr;
+ size_t len;
+
+ src = ssm_pool_get(src_pool, src_idx);
+ len = ssm_pk_buff_len(src);
+
+ if (ssm_pool_alloc(dst_pool, len, &ptr, dst_spb) < 0) {
+ ssm_pool_remove(src_pool, src_idx);
+ return -ENOMEM;
+ }
+
+ memcpy(ptr, ssm_pk_buff_head(src), len);
+ ssm_pool_remove(src_pool, src_idx);
+
+ return 0;
+}
+
int np1_flow_read(int fd,
- struct ssm_pk_buff ** spb)
+ struct ssm_pk_buff ** spb,
+ struct ssm_pool * pool)
{
- struct flow * flow;
- ssize_t idx = -1;
+ struct flow * flow;
+ ssize_t idx = -1;
assert(fd >= 0 && fd < SYS_MAX_FLOWS);
assert(spb);
- flow = &ai.flows[fd];
+ flow = &proc.flows[fd];
assert(flow->info.id >= 0);
- pthread_rwlock_rdlock(&ai.lock);
+ pthread_rwlock_rdlock(&proc.lock);
idx = ssm_rbuff_read(flow->rx_rb);
if (idx < 0) {
- pthread_rwlock_unlock(&ai.lock);
+ pthread_rwlock_unlock(&proc.lock);
return idx;
}
- pthread_rwlock_unlock(&ai.lock);
+ pthread_rwlock_unlock(&proc.lock);
- *spb = ssm_pool_get(ai.gspp, idx);
+ if (pool == NULL) {
+ *spb = ssm_pool_get(proc.pool, idx);
+ } else {
+ /* Cross-pool copy: PUP -> GSPP */
+ if (pool_copy_spb(pool, idx, proc.pool, spb) < 0)
+ return -ENOMEM;
+ }
return 0;
}
int np1_flow_write(int fd,
- struct ssm_pk_buff * spb)
+ struct ssm_pk_buff * spb,
+ struct ssm_pool * pool)
{
- struct flow * flow;
- int ret;
- ssize_t idx;
+ struct flow * flow;
+ struct ssm_pk_buff * dst;
+ int ret;
+ ssize_t idx;
assert(fd >= 0 && fd < SYS_MAX_FLOWS);
assert(spb);
- flow = &ai.flows[fd];
+ flow = &proc.flows[fd];
- pthread_rwlock_rdlock(&ai.lock);
+ pthread_rwlock_rdlock(&proc.lock);
if (flow->info.id < 0) {
- pthread_rwlock_unlock(&ai.lock);
+ pthread_rwlock_unlock(&proc.lock);
return -ENOTALLOC;
}
if ((flow->oflags & FLOWFACCMODE) == FLOWFRDONLY) {
- pthread_rwlock_unlock(&ai.lock);
+ pthread_rwlock_unlock(&proc.lock);
return -EPERM;
}
- pthread_rwlock_unlock(&ai.lock);
+ pthread_rwlock_unlock(&proc.lock);
idx = ssm_pk_buff_get_idx(spb);
- ret = ssm_rbuff_write_b(flow->tx_rb, idx, NULL);
- if (ret < 0)
- ssm_pool_remove(ai.gspp, idx);
- else
- ssm_flow_set_notify(flow->set, flow->info.id, FLOW_PKT);
+ if (pool == NULL) {
+ ret = ssm_rbuff_write_b(flow->tx_rb, idx, NULL);
+ if (ret < 0)
+ ssm_pool_remove(proc.pool, idx);
+ else
+ ssm_flow_set_notify(flow->set, flow->info.id, FLOW_PKT);
+ } else {
+ /* Cross-pool copy: GSPP -> PUP */
+ if (pool_copy_spb(proc.pool, idx, pool, &dst) < 0)
+ return -ENOMEM;
+ idx = ssm_pk_buff_get_idx(dst);
+ ret = ssm_rbuff_write_b(flow->tx_rb, idx, NULL);
+ if (ret < 0)
+ ssm_pool_remove(pool, idx);
+ else
+ ssm_flow_set_notify(flow->set, flow->info.id, FLOW_PKT);
+ }
return ret;
}
@@ -2090,12 +2147,12 @@ int np1_flow_write(int fd,
int ipcp_spb_reserve(struct ssm_pk_buff ** spb,
size_t len)
{
- return ssm_pool_alloc_b(ai.gspp, len, NULL, spb, NULL) < 0 ? -1 : 0;
+ return ssm_pool_alloc_b(proc.pool, len, NULL, spb, NULL) < 0 ? -1 : 0;
}
void ipcp_spb_release(struct ssm_pk_buff * spb)
{
- ssm_pool_remove(ai.gspp, ssm_pk_buff_get_idx(spb));
+ ssm_pool_remove(proc.pool, ssm_pk_buff_get_idx(spb));
}
int ipcp_flow_fini(int fd)
@@ -2104,23 +2161,23 @@ int ipcp_flow_fini(int fd)
assert(fd >= 0 && fd < SYS_MAX_FLOWS);
- pthread_rwlock_rdlock(&ai.lock);
+ pthread_rwlock_rdlock(&proc.lock);
- if (ai.flows[fd].info.id < 0) {
- pthread_rwlock_unlock(&ai.lock);
+ if (proc.flows[fd].info.id < 0) {
+ pthread_rwlock_unlock(&proc.lock);
return -1;
}
- ssm_rbuff_set_acl(ai.flows[fd].rx_rb, ACL_FLOWDOWN);
- ssm_rbuff_set_acl(ai.flows[fd].tx_rb, ACL_FLOWDOWN);
+ ssm_rbuff_set_acl(proc.flows[fd].rx_rb, ACL_FLOWDOWN);
+ ssm_rbuff_set_acl(proc.flows[fd].tx_rb, ACL_FLOWDOWN);
- ssm_flow_set_notify(ai.flows[fd].set,
- ai.flows[fd].info.id,
+ ssm_flow_set_notify(proc.flows[fd].set,
+ proc.flows[fd].info.id,
FLOW_DEALLOC);
- rx_rb = ai.flows[fd].rx_rb;
+ rx_rb = proc.flows[fd].rx_rb;
- pthread_rwlock_unlock(&ai.lock);
+ pthread_rwlock_unlock(&proc.lock);
if (rx_rb != NULL)
ssm_rbuff_fini(rx_rb);
@@ -2134,13 +2191,13 @@ int ipcp_flow_get_qoscube(int fd,
assert(fd >= 0 && fd < SYS_MAX_FLOWS);
assert(cube);
- pthread_rwlock_rdlock(&ai.lock);
+ pthread_rwlock_rdlock(&proc.lock);
- assert(ai.flows[fd].info.id >= 0);
+ assert(proc.flows[fd].info.id >= 0);
- *cube = qos_spec_to_cube(ai.flows[fd].info.qs);
+ *cube = qos_spec_to_cube(proc.flows[fd].info.qs);
- pthread_rwlock_unlock(&ai.lock);
+ pthread_rwlock_unlock(&proc.lock);
return 0;
}
@@ -2149,56 +2206,76 @@ size_t ipcp_flow_queued(int fd)
{
size_t q;
- pthread_rwlock_rdlock(&ai.lock);
+ pthread_rwlock_rdlock(&proc.lock);
- assert(ai.flows[fd].info.id >= 0);
+ assert(proc.flows[fd].info.id >= 0);
- q = ssm_rbuff_queued(ai.flows[fd].tx_rb);
+ q = ssm_rbuff_queued(proc.flows[fd].tx_rb);
- pthread_rwlock_unlock(&ai.lock);
+ pthread_rwlock_unlock(&proc.lock);
return q;
}
-ssize_t local_flow_read(int fd)
+int local_flow_transfer(int src_fd,
+ int dst_fd,
+ struct ssm_pool * src_pool,
+ struct ssm_pool * dst_pool)
{
- ssize_t ret;
-
- assert(fd >= 0);
+ struct flow * src_flow;
+ struct flow * dst_flow;
+ struct ssm_pk_buff * dst_spb;
+ struct ssm_pool * sp;
+ struct ssm_pool * dp;
+ ssize_t idx;
+ int ret;
- pthread_rwlock_rdlock(&ai.lock);
+ assert(src_fd >= 0);
+ assert(dst_fd >= 0);
- ret = ssm_rbuff_read(ai.flows[fd].rx_rb);
+ src_flow = &proc.flows[src_fd];
+ dst_flow = &proc.flows[dst_fd];
- pthread_rwlock_unlock(&ai.lock);
+ sp = src_pool == NULL ? proc.pool : src_pool;
+ dp = dst_pool == NULL ? proc.pool : dst_pool;
- return ret;
-}
-
-int local_flow_write(int fd,
- size_t idx)
-{
- struct flow * flow;
- int ret;
+ pthread_rwlock_rdlock(&proc.lock);
- assert(fd >= 0);
-
- flow = &ai.flows[fd];
-
- pthread_rwlock_rdlock(&ai.lock);
+ idx = ssm_rbuff_read(src_flow->rx_rb);
+ if (idx < 0) {
+ pthread_rwlock_unlock(&proc.lock);
+ return idx;
+ }
- if (flow->info.id < 0) {
- pthread_rwlock_unlock(&ai.lock);
+ if (dst_flow->info.id < 0) {
+ pthread_rwlock_unlock(&proc.lock);
+ ssm_pool_remove(sp, idx);
return -ENOTALLOC;
}
- ret = ssm_rbuff_write_b(flow->tx_rb, idx, NULL);
- if (ret == 0)
- ssm_flow_set_notify(flow->set, flow->info.id, FLOW_PKT);
- else
- ssm_pool_remove(ai.gspp, idx);
+ pthread_rwlock_unlock(&proc.lock);
+
+ if (sp == dp) {
+ /* Same pool: zero-copy */
+ ret = ssm_rbuff_write_b(dst_flow->tx_rb, idx, NULL);
+ if (ret < 0)
+ ssm_pool_remove(sp, idx);
+ else
+ ssm_flow_set_notify(dst_flow->set,
+ dst_flow->info.id, FLOW_PKT);
+ } else {
+ /* Different pools: single copy */
+ if (pool_copy_spb(sp, idx, dp, &dst_spb) < 0)
+ return -ENOMEM;
- pthread_rwlock_unlock(&ai.lock);
+ idx = ssm_pk_buff_get_idx(dst_spb);
+ ret = ssm_rbuff_write_b(dst_flow->tx_rb, idx, NULL);
+ if (ret < 0)
+ ssm_pool_remove(dp, idx);
+ else
+ ssm_flow_set_notify(dst_flow->set,
+ dst_flow->info.id, FLOW_PKT);
+ }
return ret;
}
diff --git a/src/lib/frct.c b/src/lib/frct.c
index 76736931..39a82966 100644
--- a/src/lib/frct.c
+++ b/src/lib/frct.c
@@ -118,11 +118,11 @@ static int frct_rib_read(const char * path,
fd = atoi(path);
- flow = &ai.flows[fd];
+ flow = &proc.flows[fd];
clock_gettime(PTHREAD_COND_CLOCK, &now);
- pthread_rwlock_rdlock(&ai.lock);
+ pthread_rwlock_rdlock(&proc.lock);
frcti = flow->frcti;
@@ -176,7 +176,7 @@ static int frct_rib_read(const char * path,
pthread_rwlock_unlock(&flow->frcti->lock);
- pthread_rwlock_unlock(&ai.lock);
+ pthread_rwlock_unlock(&proc.lock);
return strlen(buf);
}
@@ -244,9 +244,9 @@ static void __send_frct_pkt(int fd,
/* Raw calls needed to bypass frcti. */
#ifdef RXM_BLOCKING
- idx = ssm_pool_alloc_b(ai.gspp, sizeof(*pci), NULL, &spb, NULL);
+ idx = ssm_pool_alloc_b(proc.pool, sizeof(*pci), NULL, &spb, NULL);
#else
- idx = ssm_pool_alloc(ai.gspp, sizeof(*pci), NULL, &spb);
+ idx = ssm_pool_alloc(proc.pool, sizeof(*pci), NULL, &spb);
#endif
if (idx < 0)
return;
@@ -259,7 +259,7 @@ static void __send_frct_pkt(int fd,
pci->flags = flags;
pci->ackno = hton32(ackno);
- f = &ai.flows[fd];
+ f = &proc.flows[fd];
if (spb_encrypt(f, spb) < 0)
goto fail;
@@ -398,7 +398,7 @@ static struct frcti * frcti_create(int fd,
frcti->n_out = 0;
frcti->n_rqo = 0;
#endif
- if (ai.flows[fd].info.qs.loss == 0) {
+ if (proc.flows[fd].info.qs.loss == 0) {
frcti->snd_cr.cflags |= FRCTFRTX | FRCTFLINGER;
frcti->rcv_cr.cflags |= FRCTFRTX;
}
@@ -841,7 +841,7 @@ static void __frcti_rcv(struct frcti * frcti,
__send_frct_pkt(fd, FRCT_FC, 0, rwe);
- ssm_pool_remove(ai.gspp, idx);
+ ssm_pool_remove(proc.pool, idx);
return;
}
@@ -928,7 +928,7 @@ static void __frcti_rcv(struct frcti * frcti,
drop_packet:
pthread_rwlock_unlock(&frcti->lock);
- ssm_pool_remove(ai.gspp, idx);
+ ssm_pool_remove(proc.pool, idx);
send_frct_pkt(frcti);
return;
}
diff --git a/src/lib/pb/ipcp.proto b/src/lib/pb/ipcp.proto
index c2c7f48b..deddf6af 100644
--- a/src/lib/pb/ipcp.proto
+++ b/src/lib/pb/ipcp.proto
@@ -56,4 +56,5 @@ message ipcp_msg {
optional uint32 timeo_sec = 12;
optional sint32 mpl = 13;
optional int32 result = 14;
+ optional uint32 uid = 15; /* 0 = GSPP, >0 = PUP uid */
}
diff --git a/src/lib/pb/irm.proto b/src/lib/pb/irm.proto
index 5d0ee611..1845c694 100644
--- a/src/lib/pb/irm.proto
+++ b/src/lib/pb/irm.proto
@@ -94,6 +94,6 @@ message irm_msg {
optional uint32 timeo_sec = 23;
optional uint32 timeo_nsec = 24;
optional sint32 result = 25;
- optional bytes sym_key = 26; /* symmetric encryption key */
- optional sint32 cipher_nid = 27; /* cipher NID */
+ optional bytes sym_key = 26; /* symmetric encryption key */
+ optional sint32 cipher_nid = 27; /* cipher NID */
}
diff --git a/src/lib/pb/model.proto b/src/lib/pb/model.proto
index 7b06e434..82700a9a 100644
--- a/src/lib/pb/model.proto
+++ b/src/lib/pb/model.proto
@@ -34,12 +34,13 @@ message qosspec_msg {
}
message flow_info_msg {
- required uint32 id = 1;
- required uint32 n_pid = 2;
- required uint32 n_1_pid = 3;
- required uint32 mpl = 4;
- required uint32 state = 5;
- required qosspec_msg qos = 6;
+ required uint32 id = 1;
+ required uint32 n_pid = 2;
+ required uint32 n_1_pid = 3;
+ required uint32 mpl = 4;
+ required uint32 state = 5;
+ required qosspec_msg qos = 6;
+ required uint32 uid = 7;
}
message name_info_msg {
diff --git a/src/lib/protobuf.c b/src/lib/protobuf.c
index bd6c179e..4617323a 100644
--- a/src/lib/protobuf.c
+++ b/src/lib/protobuf.c
@@ -24,6 +24,7 @@
#include <ouroboros/protobuf.h>
#include <ouroboros/crypt.h>
+#include <ouroboros/proc.h>
#include <stdlib.h>
#include <string.h>
@@ -74,12 +75,13 @@ flow_info_msg_t * flow_info_s_to_msg(const struct flow_info * s)
flow_info_msg__init(msg);
- msg->id = s->id;
- msg->n_pid = s->n_pid;
- msg->n_1_pid = s->n_1_pid;
- msg->mpl = s->mpl;
- msg->state = s->state;
- msg->qos = qos_spec_s_to_msg(&s->qs);
+ msg->id = s->id;
+ msg->n_pid = s->n_pid;
+ msg->n_1_pid = s->n_1_pid;
+ msg->mpl = s->mpl;
+ msg->state = s->state;
+ msg->uid = s->uid;
+ msg->qos = qos_spec_s_to_msg(&s->qs);
if (msg->qos == NULL)
goto fail_msg;
@@ -104,6 +106,7 @@ struct flow_info flow_info_msg_to_s(const flow_info_msg_t * msg)
s.n_1_pid = msg->n_1_pid;
s.mpl = msg->mpl;
s.state = msg->state;
+ s.uid = msg->uid;
s.qs = qos_spec_msg_to_s(msg->qos);
return s;
diff --git a/src/lib/serdes-irm.c b/src/lib/serdes-irm.c
index 9e829632..8cbe20b1 100644
--- a/src/lib/serdes-irm.c
+++ b/src/lib/serdes-irm.c
@@ -289,8 +289,8 @@ int ipcp_create_r__irm_req_ser(buffer_t * buf,
return -ENOMEM;
}
-int proc_announce__irm_req_ser(buffer_t * buf,
- const char * prog)
+int proc_announce__irm_req_ser(buffer_t * buf,
+ const struct proc_info * proc)
{
irm_msg_t * msg;
size_t len;
@@ -303,8 +303,8 @@ int proc_announce__irm_req_ser(buffer_t * buf,
msg->code = IRM_MSG_CODE__IRM_PROC_ANNOUNCE;
msg->has_pid = true;
- msg->pid = getpid();
- msg->prog = strdup(prog);
+ msg->pid = proc->pid;
+ msg->prog = strdup(proc->prog);
if (msg->prog == NULL)
goto fail_msg;
diff --git a/src/lib/ssm/pool.c b/src/lib/ssm/pool.c
index b8cfe3a1..e938f644 100644
--- a/src/lib/ssm/pool.c
+++ b/src/lib/ssm/pool.c
@@ -41,17 +41,30 @@
#include <sys/mman.h>
#include <sys/stat.h>
-/* Size class configuration from CMake */
-static const struct ssm_size_class_cfg ssm_sc_cfg[SSM_POOL_MAX_CLASSES] = {
- { (1 << 8), SSM_POOL_256_BLOCKS },
- { (1 << 9), SSM_POOL_512_BLOCKS },
- { (1 << 10), SSM_POOL_1K_BLOCKS },
- { (1 << 11), SSM_POOL_2K_BLOCKS },
- { (1 << 12), SSM_POOL_4K_BLOCKS },
- { (1 << 14), SSM_POOL_16K_BLOCKS },
- { (1 << 16), SSM_POOL_64K_BLOCKS },
- { (1 << 18), SSM_POOL_256K_BLOCKS },
- { (1 << 20), SSM_POOL_1M_BLOCKS },
+/* Global Shared Packet Pool (GSPP) configuration */
+static const struct ssm_size_class_cfg ssm_gspp_cfg[SSM_POOL_MAX_CLASSES] = {
+ { (1 << 8), SSM_GSPP_256_BLOCKS },
+ { (1 << 9), SSM_GSPP_512_BLOCKS },
+ { (1 << 10), SSM_GSPP_1K_BLOCKS },
+ { (1 << 11), SSM_GSPP_2K_BLOCKS },
+ { (1 << 12), SSM_GSPP_4K_BLOCKS },
+ { (1 << 14), SSM_GSPP_16K_BLOCKS },
+ { (1 << 16), SSM_GSPP_64K_BLOCKS },
+ { (1 << 18), SSM_GSPP_256K_BLOCKS },
+ { (1 << 20), SSM_GSPP_1M_BLOCKS },
+};
+
+/* Per-User Pool (PUP) configuration */
+static const struct ssm_size_class_cfg ssm_pup_cfg[SSM_POOL_MAX_CLASSES] = {
+ { (1 << 8), SSM_PUP_256_BLOCKS },
+ { (1 << 9), SSM_PUP_512_BLOCKS },
+ { (1 << 10), SSM_PUP_1K_BLOCKS },
+ { (1 << 11), SSM_PUP_2K_BLOCKS },
+ { (1 << 12), SSM_PUP_4K_BLOCKS },
+ { (1 << 14), SSM_PUP_16K_BLOCKS },
+ { (1 << 16), SSM_PUP_64K_BLOCKS },
+ { (1 << 18), SSM_PUP_256K_BLOCKS },
+ { (1 << 20), SSM_PUP_1M_BLOCKS },
};
#define PTR_TO_OFFSET(pool_base, ptr) \
@@ -83,16 +96,23 @@ static const struct ssm_size_class_cfg ssm_sc_cfg[SSM_POOL_MAX_CLASSES] = {
#define FETCH_SUB(ptr, val) \
(__atomic_fetch_sub(ptr, val, __ATOMIC_SEQ_CST))
-#define CAS(ptr, expected, desired) \
- (__atomic_compare_exchange_n(ptr, expected, desired, false, \
- __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST))
-
#define SSM_FILE_SIZE (SSM_POOL_TOTAL_SIZE + sizeof(struct _ssm_pool_hdr))
+#define SSM_GSPP_FILE_SIZE (SSM_GSPP_TOTAL_SIZE + sizeof(struct _ssm_pool_hdr))
+#define SSM_PUP_FILE_SIZE (SSM_PUP_TOTAL_SIZE + sizeof(struct _ssm_pool_hdr))
+
+#define IS_GSPP(uid) ((uid) == SSM_GSPP_UID)
+#define GET_POOL_TOTAL_SIZE(uid) (IS_GSPP(uid) ? SSM_GSPP_TOTAL_SIZE \
+ : SSM_PUP_TOTAL_SIZE)
+#define GET_POOL_FILE_SIZE(uid) (IS_GSPP(uid) ? SSM_GSPP_FILE_SIZE \
+ : SSM_PUP_FILE_SIZE)
+#define GET_POOL_CFG(uid) (IS_GSPP(uid) ? ssm_gspp_cfg : ssm_pup_cfg)
struct ssm_pool {
- uint8_t * shm_base; /* start of blocks */
- struct _ssm_pool_hdr * hdr; /* shared memory header */
- void * pool_base; /* base of the memory pool */
+ uint8_t * shm_base; /* start of blocks */
+ struct _ssm_pool_hdr * hdr; /* shared memory header */
+ void * pool_base; /* base of the memory pool */
+ uid_t uid; /* user owner (0 = GSPP) */
+ size_t total_size; /* total data size */
};
static __inline__
@@ -143,17 +163,23 @@ static __inline__ void list_add_head(struct _ssm_list_head * head,
STORE(&head->count, LOAD(&head->count) + 1);
}
-static __inline__ int select_size_class(size_t len)
+static __inline__ int select_size_class(struct ssm_pool * pool,
+ size_t len)
{
size_t sz;
int i;
+ assert(pool != NULL);
+
/* Total space needed: header + headspace + data + tailspace */
sz = sizeof(struct ssm_pk_buff) + SSM_PK_BUFF_HEADSPACE + len
+ SSM_PK_BUFF_TAILSPACE;
for (i = 0; i < SSM_POOL_MAX_CLASSES; i++) {
- if (ssm_sc_cfg[i].blocks > 0 && sz <= ssm_sc_cfg[i].size)
+ struct _ssm_size_class * sc;
+
+ sc = &pool->hdr->size_classes[i];
+ if (sc->object_size > 0 && sz <= sc->object_size)
return i;
}
@@ -183,15 +209,16 @@ static __inline__ int find_size_class_for_offset(struct ssm_pool * pool,
static void init_size_classes(struct ssm_pool * pool)
{
- struct _ssm_size_class * sc;
- struct _ssm_shard * shard;
- pthread_mutexattr_t mattr;
- pthread_condattr_t cattr;
- uint8_t * region;
- size_t offset;
- int c;
- int s;
- size_t i;
+ const struct ssm_size_class_cfg * cfg;
+ struct _ssm_size_class * sc;
+ struct _ssm_shard * shard;
+ pthread_mutexattr_t mattr;
+ pthread_condattr_t cattr;
+ uint8_t * region;
+ size_t offset;
+ int c; /* class iterator */
+ int s; /* shard iterator */
+ size_t i;
assert(pool != NULL);
@@ -199,6 +226,8 @@ static void init_size_classes(struct ssm_pool * pool)
if (LOAD(&pool->hdr->initialized) != 0)
return;
+ cfg = GET_POOL_CFG(pool->uid);
+
pthread_mutexattr_init(&mattr);
pthread_mutexattr_setpshared(&mattr, PTHREAD_PROCESS_SHARED);
#ifdef HAVE_ROBUST_MUTEX
@@ -214,15 +243,15 @@ static void init_size_classes(struct ssm_pool * pool)
offset = 0;
for (c = 0; c < SSM_POOL_MAX_CLASSES; c++) {
- if (ssm_sc_cfg[c].blocks == 0)
+ if (cfg[c].blocks == 0)
continue;
sc = &pool->hdr->size_classes[c];
- sc->object_size = ssm_sc_cfg[c].size;
+ sc->object_size = cfg[c].size;
sc->pool_start = offset;
- sc->pool_size = ssm_sc_cfg[c].size * ssm_sc_cfg[c].blocks;
- sc->object_count = ssm_sc_cfg[c].blocks;
+ sc->pool_size = cfg[c].size * cfg[c].blocks;
+ sc->object_count = cfg[c].blocks;
/* Initialize all shards */
for (s = 0; s < SSM_POOL_SHARDS; s++) {
@@ -444,23 +473,26 @@ static ssize_t alloc_from_sc_b(struct ssm_pool * pool,
return init_block(pool, sc, shard, blk, len, ptr, spb);
}
-/* Global Shared Packet Pool */
-static char * gspp_filename(void)
+/* Generate pool filename: uid=0 for GSPP, uid>0 for PUP */
+static char * pool_filename(uid_t uid)
{
char * str;
char * test_suffix;
+ char base[64];
+
+ if (IS_GSPP(uid))
+ snprintf(base, sizeof(base), "%s", SSM_GSPP_NAME);
+ else
+ snprintf(base, sizeof(base), SSM_PUP_NAME_FMT, (int) uid);
test_suffix = getenv("OUROBOROS_TEST_POOL_SUFFIX");
if (test_suffix != NULL) {
- str = malloc(strlen(SSM_POOL_NAME) + strlen(test_suffix) + 1);
+ str = malloc(strlen(base) + strlen(test_suffix) + 1);
if (str == NULL)
return NULL;
- sprintf(str, "%s%s", SSM_POOL_NAME, test_suffix);
+ sprintf(str, "%s%s", base, test_suffix);
} else {
- str = malloc(strlen(SSM_POOL_NAME) + 1);
- if (str == NULL)
- return NULL;
- sprintf(str, "%s", SSM_POOL_NAME);
+ str = strdup(base);
}
return str;
@@ -468,9 +500,13 @@ static char * gspp_filename(void)
void ssm_pool_close(struct ssm_pool * pool)
{
+ size_t file_size;
+
assert(pool != NULL);
- munmap(pool->shm_base, SSM_FILE_SIZE);
+ file_size = GET_POOL_FILE_SIZE(pool->uid);
+
+ munmap(pool->shm_base, file_size);
free(pool);
}
@@ -481,15 +517,19 @@ void ssm_pool_destroy(struct ssm_pool * pool)
assert(pool != NULL);
if (getpid() != pool->hdr->pid && kill(pool->hdr->pid, 0) == 0) {
+ ssm_pool_close(pool);
free(pool);
return;
}
- ssm_pool_close(pool);
-
- fn = gspp_filename();
- if (fn == NULL)
+ fn = pool_filename(pool->uid);
+ if (fn == NULL) {
+ ssm_pool_close(pool);
+ free(pool);
return;
+ }
+
+ ssm_pool_close(pool);
shm_unlink(fn);
free(fn);
@@ -497,72 +537,89 @@ void ssm_pool_destroy(struct ssm_pool * pool)
#define MM_FLAGS (PROT_READ | PROT_WRITE)
-static struct ssm_pool * pool_create(int flags)
+static struct ssm_pool * __pool_create(const char * name,
+ int flags,
+ uid_t uid,
+ mode_t mode)
{
struct ssm_pool * pool;
int fd;
uint8_t * shm_base;
- char * fn;
+ size_t file_size;
+ size_t total_size;
- fn = gspp_filename();
- if (fn == NULL)
- goto fail_fn;
+ file_size = GET_POOL_FILE_SIZE(uid);
+ total_size = GET_POOL_TOTAL_SIZE(uid);
- pool = malloc(sizeof *pool);
+ pool = malloc(sizeof(*pool));
if (pool == NULL)
- goto fail_rdrb;
+ goto fail_pool;
- fd = shm_open(fn, flags, 0666);
+ fd = shm_open(name, flags, mode);
if (fd == -1)
goto fail_open;
- if ((flags & O_CREAT) && ftruncate(fd, SSM_FILE_SIZE) < 0)
+ if ((flags & O_CREAT) && ftruncate(fd, (off_t) file_size) < 0)
goto fail_truncate;
- shm_base = mmap(NULL, SSM_FILE_SIZE, MM_FLAGS, MAP_SHARED, fd, 0);
+ shm_base = mmap(NULL, file_size, MM_FLAGS, MAP_SHARED, fd, 0);
if (shm_base == MAP_FAILED)
goto fail_truncate;
- pool->shm_base = shm_base;
- pool->pool_base = shm_base;
- pool->hdr = (struct _ssm_pool_hdr *) (shm_base + SSM_POOL_TOTAL_SIZE);
+ pool->shm_base = shm_base;
+ pool->pool_base = shm_base;
+ pool->hdr = (struct _ssm_pool_hdr *) (shm_base + total_size);
+ pool->uid = uid;
+ pool->total_size = total_size;
if (flags & O_CREAT)
pool->hdr->mapped_addr = shm_base;
close(fd);
- free(fn);
-
return pool;
fail_truncate:
close(fd);
if (flags & O_CREAT)
- shm_unlink(fn);
+ shm_unlink(name);
fail_open:
free(pool);
- fail_rdrb:
- free(fn);
- fail_fn:
+ fail_pool:
return NULL;
}
-struct ssm_pool * ssm_pool_create(void)
+struct ssm_pool * ssm_pool_create(uid_t uid,
+ gid_t gid)
{
struct ssm_pool * pool;
+ char * fn;
mode_t mask;
+ mode_t mode;
pthread_mutexattr_t mattr;
pthread_condattr_t cattr;
+ int fd;
+ fn = pool_filename(uid);
+ if (fn == NULL)
+ goto fail_fn;
+
+ mode = IS_GSPP(uid) ? 0660 : 0600;
mask = umask(0);
- pool = pool_create(O_CREAT | O_EXCL | O_RDWR);
+ pool = __pool_create(fn, O_CREAT | O_EXCL | O_RDWR, uid, mode);
umask(mask);
if (pool == NULL)
- goto fail_rdrb;
+ goto fail_pool;
+
+ fd = shm_open(fn, O_RDWR, 0);
+ if (fd >= 0) {
+ fchown(fd, uid, gid);
+ fchmod(fd, mode);
+ close(fd);
+ }
if (pthread_mutexattr_init(&mattr))
goto fail_mattr;
@@ -585,13 +642,13 @@ struct ssm_pool * ssm_pool_create(void)
goto fail_healthy;
pool->hdr->pid = getpid();
- /* Will be set by init_size_classes */
STORE(&pool->hdr->initialized, 0);
init_size_classes(pool);
pthread_mutexattr_destroy(&mattr);
pthread_condattr_destroy(&cattr);
+ free(fn);
return pool;
@@ -602,27 +659,37 @@ struct ssm_pool * ssm_pool_create(void)
fail_mutex:
pthread_mutexattr_destroy(&mattr);
fail_mattr:
- ssm_pool_destroy(pool);
- fail_rdrb:
+ ssm_pool_close(pool);
+ shm_unlink(fn);
+ fail_pool:
+ free(fn);
+ fail_fn:
return NULL;
}
-struct ssm_pool * ssm_pool_open(void)
+struct ssm_pool * ssm_pool_open(uid_t uid)
{
struct ssm_pool * pool;
+ char * fn;
+
+ fn = pool_filename(uid);
+ if (fn == NULL)
+ return NULL;
- pool = pool_create(O_RDWR);
+ pool = __pool_create(fn, O_RDWR, uid, 0);
if (pool != NULL)
init_size_classes(pool);
+ free(fn);
+
return pool;
}
-void ssm_pool_purge(void)
+void ssm_pool_gspp_purge(void)
{
char * fn;
- fn = gspp_filename();
+ fn = pool_filename(SSM_GSPP_UID);
if (fn == NULL)
return;
@@ -632,9 +699,13 @@ void ssm_pool_purge(void)
int ssm_pool_mlock(struct ssm_pool * pool)
{
+ size_t file_size;
+
assert(pool != NULL);
- return mlock(pool->shm_base, SSM_FILE_SIZE);
+ file_size = GET_POOL_FILE_SIZE(pool->uid);
+
+ return mlock(pool->shm_base, file_size);
}
ssize_t ssm_pool_alloc(struct ssm_pool * pool,
@@ -647,7 +718,7 @@ ssize_t ssm_pool_alloc(struct ssm_pool * pool,
assert(pool != NULL);
assert(spb != NULL);
- idx = select_size_class(count);
+ idx = select_size_class(pool, count);
if (idx >= 0)
return alloc_from_sc(pool, idx, count, ptr, spb);
@@ -665,7 +736,7 @@ ssize_t ssm_pool_alloc_b(struct ssm_pool * pool,
assert(pool != NULL);
assert(spb != NULL);
- idx = select_size_class(count);
+ idx = select_size_class(pool, count);
if (idx >= 0)
return alloc_from_sc_b(pool, idx, count, ptr, spb, abstime);
@@ -697,7 +768,7 @@ struct ssm_pk_buff * ssm_pool_get(struct ssm_pool * pool,
assert(pool != NULL);
- if (off == 0 || off >= (size_t) SSM_POOL_TOTAL_SIZE)
+ if (off == 0 || off >= pool->total_size)
return NULL;
blk = OFFSET_TO_PTR(pool->pool_base, off);
@@ -722,7 +793,7 @@ int ssm_pool_remove(struct ssm_pool * pool,
assert(pool != NULL);
- if (off == 0 || off >= SSM_POOL_TOTAL_SIZE)
+ if (off == 0 || off >= pool->total_size)
return -EINVAL;
blk = OFFSET_TO_PTR(pool->pool_base, off);
diff --git a/src/lib/ssm/ssm.h.in b/src/lib/ssm/ssm.h.in
index d14cd49c..b9246c8b 100644
--- a/src/lib/ssm/ssm.h.in
+++ b/src/lib/ssm/ssm.h.in
@@ -30,8 +30,9 @@
/* Pool naming configuration */
#define SSM_PREFIX "@SSM_PREFIX@"
-#define SSM_GSMP_SUFFIX "@SSM_GSMP_SUFFIX@"
-#define SSM_PPP_SUFFIX "@SSM_PPP_SUFFIX@"
+#define SSM_GSPP_NAME "@SSM_GSPP_NAME@"
+#define SSM_PUP_NAME_FMT "@SSM_PUP_NAME_FMT@"
+#define SSM_GSPP_UID 0
/* Legacy SSM constants */
#define SSM_RBUFF_PREFIX "@SSM_RBUFF_PREFIX@"
@@ -44,7 +45,31 @@
#define SSM_PK_BUFF_HEADSPACE @SSM_PK_BUFF_HEADSPACE@
#define SSM_PK_BUFF_TAILSPACE @SSM_PK_BUFF_TAILSPACE@
-/* Pool blocks per size class */
+/* Global Shared Packet Pool (GSPP) - for privileged processes */
+#define SSM_GSPP_256_BLOCKS @SSM_GSPP_256_BLOCKS@
+#define SSM_GSPP_512_BLOCKS @SSM_GSPP_512_BLOCKS@
+#define SSM_GSPP_1K_BLOCKS @SSM_GSPP_1K_BLOCKS@
+#define SSM_GSPP_2K_BLOCKS @SSM_GSPP_2K_BLOCKS@
+#define SSM_GSPP_4K_BLOCKS @SSM_GSPP_4K_BLOCKS@
+#define SSM_GSPP_16K_BLOCKS @SSM_GSPP_16K_BLOCKS@
+#define SSM_GSPP_64K_BLOCKS @SSM_GSPP_64K_BLOCKS@
+#define SSM_GSPP_256K_BLOCKS @SSM_GSPP_256K_BLOCKS@
+#define SSM_GSPP_1M_BLOCKS @SSM_GSPP_1M_BLOCKS@
+#define SSM_GSPP_TOTAL_SIZE @SSM_GSPP_TOTAL_SIZE@
+
+/* Per-User Pool (PUP) - for unprivileged applications */
+#define SSM_PUP_256_BLOCKS @SSM_PUP_256_BLOCKS@
+#define SSM_PUP_512_BLOCKS @SSM_PUP_512_BLOCKS@
+#define SSM_PUP_1K_BLOCKS @SSM_PUP_1K_BLOCKS@
+#define SSM_PUP_2K_BLOCKS @SSM_PUP_2K_BLOCKS@
+#define SSM_PUP_4K_BLOCKS @SSM_PUP_4K_BLOCKS@
+#define SSM_PUP_16K_BLOCKS @SSM_PUP_16K_BLOCKS@
+#define SSM_PUP_64K_BLOCKS @SSM_PUP_64K_BLOCKS@
+#define SSM_PUP_256K_BLOCKS @SSM_PUP_256K_BLOCKS@
+#define SSM_PUP_1M_BLOCKS @SSM_PUP_1M_BLOCKS@
+#define SSM_PUP_TOTAL_SIZE @SSM_PUP_TOTAL_SIZE@
+
+/* Legacy pool blocks (same as GSPP for compatibility) */
#define SSM_POOL_256_BLOCKS @SSM_POOL_256_BLOCKS@
#define SSM_POOL_512_BLOCKS @SSM_POOL_512_BLOCKS@
#define SSM_POOL_1K_BLOCKS @SSM_POOL_1K_BLOCKS@
diff --git a/src/lib/ssm/tests/pool_sharding_test.c b/src/lib/ssm/tests/pool_sharding_test.c
index 72ae1cb7..46eecd8d 100644
--- a/src/lib/ssm/tests/pool_sharding_test.c
+++ b/src/lib/ssm/tests/pool_sharding_test.c
@@ -54,6 +54,7 @@ static struct _ssm_pool_hdr * get_pool_hdr(struct ssm_pool * pool)
*/
struct _ssm_pool_hdr ** hdr_ptr =
(struct _ssm_pool_hdr **)((uint8_t *)pool + sizeof(void *));
+
return *hdr_ptr;
}
@@ -67,9 +68,7 @@ static int test_lazy_distribution(void)
TEST_START();
- ssm_pool_purge();
-
- pool = ssm_pool_create();
+ pool = ssm_pool_create(0, getgid());
if (pool == NULL) {
printf("Failed to create pool.\n");
goto fail;
@@ -142,9 +141,7 @@ static int test_shard_migration(void)
TEST_START();
- ssm_pool_purge();
-
- pool = ssm_pool_create();
+ pool = ssm_pool_create(0, getgid());
if (pool == NULL) {
printf("Failed to create pool.\n");
goto fail;
@@ -216,9 +213,7 @@ static int test_fallback_stealing(void)
TEST_START();
- ssm_pool_purge();
-
- pool = ssm_pool_create();
+ pool = ssm_pool_create(0, getgid());
if (pool == NULL) {
printf("Failed to create pool.\n");
goto fail;
@@ -331,9 +326,7 @@ static int test_multiprocess_sharding(void)
TEST_START();
- ssm_pool_purge();
-
- pool = ssm_pool_create();
+ pool = ssm_pool_create(0, getgid());
if (pool == NULL) {
printf("Failed to create pool.\n");
goto fail;
@@ -355,7 +348,7 @@ static int test_multiprocess_sharding(void)
ssize_t off;
int my_shard;
- child_pool = ssm_pool_open();
+ child_pool = ssm_pool_open(0);
if (child_pool == NULL)
exit(EXIT_FAILURE);
@@ -449,9 +442,7 @@ static int test_exhaustion_with_fallback(void)
TEST_START();
- ssm_pool_purge();
-
- pool = ssm_pool_create();
+ pool = ssm_pool_create(0, getgid());
if (pool == NULL) {
printf("Failed to create pool.\n");
goto fail;
diff --git a/src/lib/ssm/tests/pool_test.c b/src/lib/ssm/tests/pool_test.c
index e298d9ab..53f7f541 100644
--- a/src/lib/ssm/tests/pool_test.c
+++ b/src/lib/ssm/tests/pool_test.c
@@ -61,7 +61,7 @@ static int test_ssm_pool_basic_allocation(void)
TEST_START();
- pool = ssm_pool_create();
+ pool = ssm_pool_create(0, getgid());
if (pool == NULL)
goto fail_create;
@@ -119,7 +119,7 @@ static int test_ssm_pool_multiple_allocations(void)
TEST_START();
- pool = ssm_pool_create();
+ pool = ssm_pool_create(0, getgid());
if (pool == NULL)
goto fail_create;
@@ -212,7 +212,7 @@ static int test_ssm_pool_no_fallback_for_large(void)
TEST_START();
- pool = ssm_pool_create();
+ pool = ssm_pool_create(0, getgid());
if (pool == NULL)
goto fail_create;
@@ -248,7 +248,7 @@ static int test_ssm_pool_blocking_vs_nonblocking(void)
TEST_START();
- pool = ssm_pool_create();
+ pool = ssm_pool_create(0, getgid());
if (pool == NULL)
goto fail_create;
@@ -295,7 +295,7 @@ static int test_ssm_pool_stress_test(void)
TEST_START();
- pool = ssm_pool_create();
+ pool = ssm_pool_create(0, getgid());
if (pool == NULL)
goto fail_create;
@@ -392,7 +392,7 @@ static int test_ssm_pool_open_initializes_ssm(void)
TEST_START();
- creator = ssm_pool_create();
+ creator = ssm_pool_create(0, getgid());
if (creator == NULL)
goto fail_create;
@@ -403,7 +403,7 @@ static int test_ssm_pool_open_initializes_ssm(void)
}
ssm_pool_remove(creator, ret);
- opener = ssm_pool_open();
+ opener = ssm_pool_open(0);
if (opener == NULL) {
printf("Open failed.\n");
goto fail_creator;
@@ -439,7 +439,7 @@ static int test_ssm_pool_bounds_checking(void)
TEST_START();
- pool = ssm_pool_create();
+ pool = ssm_pool_create(0, getgid());
if (pool == NULL)
goto fail_create;
@@ -502,7 +502,7 @@ static int test_ssm_pool_inter_process_communication(void)
len = strlen(msg) + 1;
- pool = ssm_pool_create();
+ pool = ssm_pool_create(0, getgid());
if (pool == NULL)
goto fail_create;
@@ -606,7 +606,7 @@ static int test_ssm_pool_read_operation(void)
len = strlen(data) + 1;
- pool = ssm_pool_create();
+ pool = ssm_pool_create(0, getgid());
if (pool == NULL)
goto fail_create;
@@ -656,7 +656,7 @@ static int test_ssm_pool_mlock_operation(void)
TEST_START();
- pool = ssm_pool_create();
+ pool = ssm_pool_create(0, getgid());
if (pool == NULL)
goto fail_create;
@@ -690,7 +690,7 @@ static int test_ssm_pk_buff_operations(void)
dlen = strlen(data);
- pool = ssm_pool_create();
+ pool = ssm_pool_create(0, getgid());
if (pool == NULL)
goto fail_create;
@@ -798,7 +798,7 @@ static int test_ssm_pool_size_class_boundaries(void)
TEST_START();
- pool = ssm_pool_create();
+ pool = ssm_pool_create(0, getgid());
if (pool == NULL)
goto fail_create;
@@ -859,7 +859,7 @@ static int test_ssm_pool_exhaustion(void)
TEST_START();
- pool = ssm_pool_create();
+ pool = ssm_pool_create(0, getgid());
if (pool == NULL)
goto fail_create;
@@ -937,7 +937,7 @@ static int test_ssm_pool_reclaim_orphans(void)
TEST_START();
- pool = ssm_pool_create();
+ pool = ssm_pool_create(0, getgid());
if (pool == NULL)
goto fail_create;
@@ -1017,8 +1017,6 @@ int pool_test(int argc,
(void) argc;
(void) argv;
- ssm_pool_purge();
-
ret |= test_ssm_pool_basic_allocation();
ret |= test_ssm_pool_multiple_allocations();
ret |= test_ssm_pool_no_fallback_for_large();
diff --git a/src/lib/timerwheel.c b/src/lib/timerwheel.c
index eaa684e5..ed235047 100644
--- a/src/lib/timerwheel.c
+++ b/src/lib/timerwheel.c
@@ -173,7 +173,7 @@ static void timerwheel_move(void)
snd_cr = &r->frcti->snd_cr;
rcv_cr = &r->frcti->rcv_cr;
- f = &ai.flows[r->fd];
+ f = &proc.flows[r->fd];
#ifndef RXM_BUFFER_ON_HEAP
ssm_pk_buff_ack(r->spb);
#endif
@@ -226,7 +226,7 @@ static void timerwheel_move(void)
#ifdef RXM_BLOCKING
if (ipcp_spb_reserve(&spb, r->len) < 0)
#else
- if (ssm_pool_alloc(ai.gspp, r->len, NULL,
+ if (ssm_pool_alloc(proc.pool, r->len, NULL,
&spb) < 0)
#endif
goto reschedule; /* rdrbuff full */
@@ -288,7 +288,7 @@ static void timerwheel_move(void)
list_del(&a->next);
- f = &ai.flows[a->fd];
+ f = &proc.flows[a->fd];
rw.map[j & (ACKQ_SLOTS - 1)][a->fd] = false;
@@ -341,7 +341,7 @@ static int timerwheel_rxm(struct frcti * frcti,
slot = r->t0 >> RXMQ_RES;
r->fd = frcti->fd;
- r->flow_id = ai.flows[r->fd].info.id;
+ r->flow_id = proc.flows[r->fd].info.id;
pthread_rwlock_unlock(&r->frcti->lock);
@@ -394,7 +394,7 @@ static int timerwheel_delayed_ack(int fd,
a->fd = fd;
a->frcti = frcti;
- a->flow_id = ai.flows[fd].info.id;
+ a->flow_id = proc.flows[fd].info.id;
pthread_mutex_lock(&rw.lock);
diff --git a/src/lib/utils.c b/src/lib/utils.c
index 74f8ce4f..cfddec62 100644
--- a/src/lib/utils.c
+++ b/src/lib/utils.c
@@ -20,11 +20,15 @@
* Foundation, Inc., http://www.fsf.org/about/contact/.
*/
-#define _POSIX_C_SOURCE 200809L
+#define _DEFAULT_SOURCE
+
+#include "config.h"
#include <ouroboros/utils.h>
#include <ctype.h>
+#include <grp.h>
+#include <pwd.h>
#include <stdlib.h>
#include <string.h>
@@ -138,5 +142,63 @@ char ** argvdup(char ** argv)
}
argv_dup[argc] = NULL;
+
return argv_dup;
}
+
+bool is_ouroboros_member_uid(uid_t uid)
+{
+ struct group * grp;
+ struct passwd * pw;
+ gid_t gid;
+ gid_t * groups = NULL;
+ int ngroups;
+ int i;
+
+ /* Root is always privileged */
+ if (uid == 0)
+ return true;
+
+ grp = getgrnam("ouroboros");
+ if (grp == NULL)
+ return false;
+
+ gid = grp->gr_gid;
+
+ pw = getpwuid(uid);
+ if (pw == NULL)
+ return false;
+
+ if (pw->pw_gid == gid)
+ return true;
+
+ ngroups = 0;
+ getgrouplist(pw->pw_name, pw->pw_gid, NULL, &ngroups);
+ if (ngroups <= 0)
+ return false;
+
+ groups = malloc(ngroups * sizeof(*groups));
+ if (groups == NULL)
+ return false;
+
+ if (getgrouplist(pw->pw_name, pw->pw_gid, groups, &ngroups) < 0) {
+ free(groups);
+ return false;
+ }
+
+ for (i = 0; i < ngroups; i++) {
+ if (groups[i] == gid) {
+ free(groups);
+ return true;
+ }
+ }
+
+ free(groups);
+
+ return false;
+}
+
+bool is_ouroboros_member(void)
+{
+ return is_ouroboros_member_uid(getuid());
+}
diff --git a/src/tools/oping/oping.c b/src/tools/oping/oping.c
index 87c1ee18..47b86c68 100644
--- a/src/tools/oping/oping.c
+++ b/src/tools/oping/oping.c
@@ -75,12 +75,14 @@
"\n" \
" -c, --count Number of packets\n" \
" -d, --duration Duration of the test (default 1s)\n" \
+" -f, --flood Send back-to-back without waiting\n" \
" -i, --interval Interval (default 1000ms)\n" \
" -n, --server-name Name of the oping server\n" \
" -q, --qos QoS (raw, best, video, voice, data)\n" \
" -s, --size Payload size (B, default 64)\n" \
" -Q, --quiet Only print final statistics\n" \
" -D, --timeofday Print time of day before each line\n" \
+" --poll Server uses polling (lower latency)\n" \
"\n" \
" --help Display this help text and exit\n" \
@@ -90,6 +92,7 @@ struct {
uint32_t count;
int size;
bool timestamp;
+ bool flood;
qosspec_t qs;
/* stats */
@@ -114,6 +117,7 @@ struct {
pthread_mutex_t lock;
bool quiet;
+ bool poll;
pthread_t cleaner_pt;
pthread_t accept_pt;
@@ -172,9 +176,11 @@ int main(int argc,
client.size = 64;
client.count = INT_MAX;
client.timestamp = false;
+ client.flood = false;
client.qs = qos_raw;
client.quiet = false;
server.quiet = false;
+ server.poll = false;
while (argc > 0) {
if ((strcmp(*argv, "-i") == 0 ||
@@ -212,6 +218,9 @@ int main(int argc,
} else if (strcmp(*argv, "-l") == 0 ||
strcmp(*argv, "--listen") == 0) {
serv = true;
+ } else if (strcmp(*argv, "-f") == 0 ||
+ strcmp(*argv, "--flood") == 0) {
+ client.flood = true;
} else if (strcmp(*argv, "-D") == 0 ||
strcmp(*argv, "--timeofday") == 0) {
client.timestamp = true;
@@ -219,6 +228,8 @@ int main(int argc,
strcmp(*argv, "--quiet") == 0) {
client.quiet = true;
server.quiet = true;
+ } else if (strcmp(*argv, "--poll") == 0) {
+ server.poll = true;
} else {
goto fail;
}
diff --git a/src/tools/oping/oping_client.c b/src/tools/oping/oping_client.c
index 5a9e03dc..1513eac8 100644
--- a/src/tools/oping/oping_client.c
+++ b/src/tools/oping/oping_client.c
@@ -53,6 +53,20 @@ void shutdown_client(int signo, siginfo_t * info, void * c)
}
}
+static void update_rtt_stats(double ms)
+{
+ double d;
+
+ if (ms < client.rtt_min)
+ client.rtt_min = ms;
+ if (ms > client.rtt_max)
+ client.rtt_max = ms;
+
+ d = (ms - client.rtt_avg);
+ client.rtt_avg += d / client.rcvd;
+ client.rtt_m2 += d * (ms - client.rtt_avg);
+}
+
void * reader(void * o)
{
struct timespec timeout = {client.interval / 1000 + 2, 0};
@@ -64,7 +78,6 @@ void * reader(void * o)
int fd = *((int *) o);
int msg_len = 0;
double ms = 0;
- double d = 0;
uint32_t exp_id = 0;
fccntl(fd, FLOWSRCVTIMEO, &timeout);
@@ -122,14 +135,7 @@ void * reader(void * o)
id < exp_id ? " [out-of-order]" : "");
}
- if (ms < client.rtt_min)
- client.rtt_min = ms;
- if (ms > client.rtt_max)
- client.rtt_max = ms;
-
- d = (ms - client.rtt_avg);
- client.rtt_avg += d / client.rcvd;
- client.rtt_m2 += d * (ms - client.rtt_avg);
+ update_rtt_stats(ms);
if (id >= exp_id)
exp_id = id + 1;
@@ -204,13 +210,103 @@ static void client_fini(void)
return;
}
+static void print_stats(struct timespec * tic,
+ struct timespec * toc)
+{
+ printf("\n");
+ printf("--- %s ping statistics ---\n", client.s_apn);
+ printf("%d packets transmitted, ", client.sent);
+ printf("%d received, ", client.rcvd);
+ printf("%zd out-of-order, ", client.ooo);
+ printf("%.0lf%% packet loss, ", client.sent == 0 ? 0 :
+ ceil(100 - (100 * (client.rcvd / (float) client.sent))));
+ printf("time: %.3f ms\n", ts_diff_us(toc, tic) / 1000.0);
+
+ if (client.rcvd > 0) {
+ printf("rtt min/avg/max/mdev = %.3f/%.3f/%.3f/",
+ client.rtt_min,
+ client.rtt_avg,
+ client.rtt_max);
+ if (client.rcvd > 1)
+ printf("%.3f ms\n",
+ sqrt(client.rtt_m2 / (client.rcvd - 1)));
+ else
+ printf("NaN ms\n");
+ }
+}
+
+static int flood_ping(int fd)
+{
+ char buf[OPING_BUF_SIZE];
+ struct oping_msg * msg = (struct oping_msg *) buf;
+ struct timespec sent;
+ struct timespec rcvd;
+ double ms;
+
+ memset(buf, 0, client.size);
+
+ if (!client.quiet)
+ printf("Pinging %s with %d bytes of data (%u packets):\n\n",
+ client.s_apn, client.size, client.count);
+
+ while (!stop && client.sent < client.count) {
+ clock_gettime(CLOCK_MONOTONIC, &sent);
+
+ msg->type = htonl(ECHO_REQUEST);
+ msg->id = htonl(client.sent);
+ msg->tv_sec = sent.tv_sec;
+ msg->tv_nsec = sent.tv_nsec;
+
+ if (flow_write(fd, buf, client.size) < 0) {
+ printf("Failed to send packet.\n");
+ break;
+ }
+
+ ++client.sent;
+
+ if (flow_read(fd, buf, OPING_BUF_SIZE) < 0) {
+ printf("Failed to read packet.\n");
+ break;
+ }
+
+ clock_gettime(CLOCK_MONOTONIC, &rcvd);
+
+ if (ntohl(msg->type) != ECHO_REPLY)
+ continue;
+
+ ++client.rcvd;
+
+ sent.tv_sec = msg->tv_sec;
+ sent.tv_nsec = msg->tv_nsec;
+ ms = ts_diff_us(&rcvd, &sent) / 1000.0;
+
+ update_rtt_stats(ms);
+
+ if (!client.quiet)
+ printf("%d bytes from %s: seq=%d time=%.3f ms\n",
+ client.size, client.s_apn,
+ ntohl(msg->id), ms);
+ }
+
+ return 0;
+}
+
+static int threaded_ping(int fd)
+{
+ pthread_create(&client.reader_pt, NULL, reader, &fd);
+ pthread_create(&client.writer_pt, NULL, writer, &fd);
+
+ pthread_join(client.writer_pt, NULL);
+ pthread_join(client.reader_pt, NULL);
+
+ return 0;
+}
+
static int client_main(void)
{
struct sigaction sig_act;
-
struct timespec tic;
struct timespec toc;
-
int fd;
memset(&sig_act, 0, sizeof sig_act);
@@ -241,37 +337,16 @@ static int client_main(void)
clock_gettime(CLOCK_REALTIME, &tic);
- pthread_create(&client.reader_pt, NULL, reader, &fd);
- pthread_create(&client.writer_pt, NULL, writer, &fd);
-
- pthread_join(client.writer_pt, NULL);
- pthread_join(client.reader_pt, NULL);
+ if (client.flood)
+ flood_ping(fd);
+ else
+ threaded_ping(fd);
clock_gettime(CLOCK_REALTIME, &toc);
- printf("\n");
- printf("--- %s ping statistics ---\n", client.s_apn);
- printf("%d packets transmitted, ", client.sent);
- printf("%d received, ", client.rcvd);
- printf("%zd out-of-order, ", client.ooo);
- printf("%.0lf%% packet loss, ", client.sent == 0 ? 0 :
- ceil(100 - (100 * (client.rcvd / (float) client.sent))));
- printf("time: %.3f ms\n", ts_diff_us(&toc, &tic) / 1000.0);
-
- if (client.rcvd > 0) {
- printf("rtt min/avg/max/mdev = %.3f/%.3f/%.3f/",
- client.rtt_min,
- client.rtt_avg,
- client.rtt_max);
- if (client.rcvd > 1)
- printf("%.3f ms\n",
- sqrt(client.rtt_m2 / (client.rcvd - 1)));
- else
- printf("NaN ms\n");
- }
+ print_stats(&tic, &toc);
flow_dealloc(fd);
-
client_fini();
return 0;
diff --git a/src/tools/oping/oping_server.c b/src/tools/oping/oping_server.c
index c1d5e6e5..13ab8f47 100644
--- a/src/tools/oping/oping_server.c
+++ b/src/tools/oping/oping_server.c
@@ -89,12 +89,15 @@ void * server_thread(void *o)
struct oping_msg * msg = (struct oping_msg *) buf;
struct timespec now = {0, 0};
struct timespec timeout = {0, 100 * MILLION};
+ struct timespec poll_timeout = {0, 0};
int fd;
(void) o;
while (true) {
- if (fevent(server.flows, server.fq, &timeout) == -ETIMEDOUT)
+ if (fevent(server.flows, server.fq,
+ server.poll ? &poll_timeout : &timeout)
+ == -ETIMEDOUT)
continue;
while ((fd = fqueue_next(server.fq)) >= 0) {