diff options
Diffstat (limited to 'src/lib')
| -rw-r--r-- | src/lib/CMakeLists.txt | 160 | ||||
| -rw-r--r-- | src/lib/crypt.c | 4 | ||||
| -rw-r--r-- | src/lib/dev.c | 689 | ||||
| -rw-r--r-- | src/lib/frct.c | 18 | ||||
| -rw-r--r-- | src/lib/pb/ipcp.proto | 1 | ||||
| -rw-r--r-- | src/lib/pb/irm.proto | 4 | ||||
| -rw-r--r-- | src/lib/pb/model.proto | 13 | ||||
| -rw-r--r-- | src/lib/protobuf.c | 15 | ||||
| -rw-r--r-- | src/lib/serdes-irm.c | 8 | ||||
| -rw-r--r-- | src/lib/ssm/pool.c | 231 | ||||
| -rw-r--r-- | src/lib/ssm/ssm.h.in | 31 | ||||
| -rw-r--r-- | src/lib/ssm/tests/CMakeLists.txt | 15 | ||||
| -rw-r--r-- | src/lib/ssm/tests/pool_sharding_test.c | 23 | ||||
| -rw-r--r-- | src/lib/ssm/tests/pool_test.c | 32 | ||||
| -rw-r--r-- | src/lib/tests/CMakeLists.txt | 18 | ||||
| -rw-r--r-- | src/lib/timerwheel.c | 10 | ||||
| -rw-r--r-- | src/lib/utils.c | 64 |
17 files changed, 851 insertions, 485 deletions
diff --git a/src/lib/CMakeLists.txt b/src/lib/CMakeLists.txt new file mode 100644 index 00000000..c4306b00 --- /dev/null +++ b/src/lib/CMakeLists.txt @@ -0,0 +1,160 @@ +# Ouroboros libraries build configuration +# Configuration options are in cmake/config/lib.cmake + +protobuf_generate_c(MODEL_PROTO_SRCS MODEL_PROTO_HDRS + "${CMAKE_CURRENT_SOURCE_DIR}/pb/model.proto") +protobuf_generate_c(IPCP_CONFIG_PROTO_SRCS IPCP_CONFIG_PROTO_HDRS + "${CMAKE_CURRENT_SOURCE_DIR}/pb/ipcp_config.proto") +protobuf_generate_c(ENROLL_PROTO_SRCS ENROLL_PROTO_HDRS + "${CMAKE_CURRENT_SOURCE_DIR}/pb/enroll.proto") +protobuf_generate_c(CEP_PROTO_SRCS CEP_PROTO_HDRS + "${CMAKE_CURRENT_SOURCE_DIR}/pb/cep.proto") +protobuf_generate_c(IRM_PROTO_SRCS IRM_PROTO_HDRS + "${CMAKE_CURRENT_SOURCE_DIR}/pb/irm.proto") +protobuf_generate_c(IPCP_PROTO_SRCS IPCP_PROTO_HDRS + "${CMAKE_CURRENT_SOURCE_DIR}/pb/ipcp.proto") + +set(SOURCE_FILES_COMMON + bitmap.c + btree.c + crc32.c + crypt.c + hash.c + list.c + lockfile.c + logs.c + md5.c + notifier.c + protobuf.c + qoscube.c + random.c + rib.c + serdes-irm.c + serdes-oep.c + sha3.c + ssm/flow_set.c + ssm/rbuff.c + ssm/pool.c + sockets.c + tpm.c + utils.c +) + +if(HAVE_OPENSSL) + list(APPEND SOURCE_FILES_COMMON crypt/openssl.c) +endif() + +add_library(ouroboros-common SHARED + ${SOURCE_FILES_COMMON} + ${IRM_PROTO_SRCS} + ${IPCP_PROTO_SRCS} + ${IPCP_CONFIG_PROTO_SRCS} + ${MODEL_PROTO_SRCS} + ${ENROLL_PROTO_SRCS}) + +set_target_properties(ouroboros-common PROPERTIES + VERSION ${PACKAGE_VERSION} + SOVERSION ${PACKAGE_VERSION_MAJOR}.${PACKAGE_VERSION_MINOR}) + +ouroboros_target_debug_definitions(ouroboros-common) + +target_include_directories(ouroboros-common + PUBLIC + $<BUILD_INTERFACE:${CMAKE_SOURCE_DIR}/include> + $<BUILD_INTERFACE:${CMAKE_BINARY_DIR}/include> + $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}> + $<BUILD_INTERFACE:${CMAKE_CURRENT_BINARY_DIR}> + $<BUILD_INTERFACE:${CMAKE_BINARY_DIR}> + $<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}> + PRIVATE + ${SYS_RND_HDR} + ${APPLE_INCLUDE_DIRS}) + +target_link_libraries(ouroboros-common + PRIVATE + ${LIBRT_LIBRARIES} + Threads::Threads + PUBLIC + ProtobufC::ProtobufC) + +if(HAVE_OPENSSL) + target_link_libraries(ouroboros-common PUBLIC OpenSSL::Crypto) +endif() + +if(HAVE_LIBGCRYPT) + target_link_libraries(ouroboros-common PUBLIC Gcrypt::Gcrypt) +endif() + +if(HAVE_FUSE) + target_link_libraries(ouroboros-common PRIVATE Fuse::Fuse) +endif() + +install(TARGETS ouroboros-common + EXPORT OuroborosTargets + LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}) + +set(SOURCE_FILES_DEV + cep.c + dev.c +) + +add_library(ouroboros-dev SHARED + ${SOURCE_FILES_DEV} + ${CEP_PROTO_SRCS}) + +set_target_properties(ouroboros-dev PROPERTIES + VERSION ${PACKAGE_VERSION} + SOVERSION ${PACKAGE_VERSION_MAJOR}.${PACKAGE_VERSION_MINOR}) + +ouroboros_target_debug_definitions(ouroboros-dev) + +target_include_directories(ouroboros-dev + PUBLIC + $<BUILD_INTERFACE:${CMAKE_SOURCE_DIR}/include> + $<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}> + PRIVATE + ${CMAKE_CURRENT_SOURCE_DIR} + ${CMAKE_CURRENT_BINARY_DIR} + ${CMAKE_BINARY_DIR} + ${SYS_RND_HDR}) + +target_link_libraries(ouroboros-dev PUBLIC ouroboros-common) + +install(TARGETS ouroboros-dev + EXPORT OuroborosTargets + LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}) + +add_library(ouroboros-irm SHARED irm.c) + +set_target_properties(ouroboros-irm PROPERTIES + VERSION ${PACKAGE_VERSION} + SOVERSION ${PACKAGE_VERSION_MAJOR}.${PACKAGE_VERSION_MINOR}) + +ouroboros_target_debug_definitions(ouroboros-irm) + +target_include_directories(ouroboros-irm + PUBLIC + $<BUILD_INTERFACE:${CMAKE_SOURCE_DIR}/include> + $<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}> + PRIVATE + ${CMAKE_CURRENT_SOURCE_DIR} + ${CMAKE_CURRENT_BINARY_DIR} + ${CMAKE_BINARY_DIR} + ${SYS_RND_HDR}) + +target_link_libraries(ouroboros-irm PUBLIC ouroboros-common) + +install(TARGETS ouroboros-irm + EXPORT OuroborosTargets + LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}) + +configure_file("${CMAKE_CURRENT_SOURCE_DIR}/config.h.in" + "${CMAKE_CURRENT_BINARY_DIR}/config.h" @ONLY) + +configure_file("${CMAKE_CURRENT_SOURCE_DIR}/ssm/ssm.h.in" + "${CMAKE_CURRENT_BINARY_DIR}/ssm.h" @ONLY) + +if(BUILD_TESTS) + add_subdirectory(tests) + add_subdirectory(ssm/tests) +endif() diff --git a/src/lib/crypt.c b/src/lib/crypt.c index 38dd9f29..8c29cbb3 100644 --- a/src/lib/crypt.c +++ b/src/lib/crypt.c @@ -20,6 +20,10 @@ * Foundation, Inc., http://www.fsf.org/about/contact/. */ +#if defined(__linux__) || defined(__CYGWIN__) +#define _DEFAULT_SOURCE +#endif + #include <config.h> #include <ouroboros/errno.h> diff --git a/src/lib/dev.c b/src/lib/dev.c index fb06c496..454dd027 100644 --- a/src/lib/dev.c +++ b/src/lib/dev.c @@ -127,7 +127,7 @@ struct fqueue { }; struct { - struct ssm_pool * gspp; + struct ssm_pool * pool; struct ssm_flow_set * fqset; struct bmp * fds; @@ -146,14 +146,14 @@ struct { fset_t * frct_set; pthread_rwlock_t lock; -} ai; +} proc; static void flow_destroy(struct fmap * p) { - pthread_mutex_lock(&ai.mtx); + pthread_mutex_lock(&proc.mtx); if (p->state == FLOW_DESTROY) { - pthread_mutex_unlock(&ai.mtx); + pthread_mutex_unlock(&proc.mtx); return; } @@ -162,12 +162,12 @@ static void flow_destroy(struct fmap * p) else p->state = FLOW_NULL; - pthread_cond_signal(&ai.cond); + pthread_cond_signal(&proc.cond); - pthread_cleanup_push(__cleanup_mutex_unlock, &ai.mtx); + pthread_cleanup_push(__cleanup_mutex_unlock, &proc.mtx); while (p->state != FLOW_NULL) - pthread_cond_wait(&ai.cond, &ai.mtx); + pthread_cond_wait(&proc.cond, &proc.mtx); p->fd = -1; p->state = FLOW_INIT; @@ -178,17 +178,17 @@ static void flow_destroy(struct fmap * p) static void flow_set_state(struct fmap * p, enum flow_state state) { - pthread_mutex_lock(&ai.mtx); + pthread_mutex_lock(&proc.mtx); if (p->state == FLOW_DESTROY) { - pthread_mutex_unlock(&ai.mtx); + pthread_mutex_unlock(&proc.mtx); return; } p->state = state; - pthread_cond_broadcast(&ai.cond); + pthread_cond_broadcast(&proc.cond); - pthread_mutex_unlock(&ai.mtx); + pthread_mutex_unlock(&proc.mtx); } static enum flow_state flow_wait_assign(int flow_id) @@ -196,26 +196,26 @@ static enum flow_state flow_wait_assign(int flow_id) enum flow_state state; struct fmap * p; - p = &ai.id_to_fd[flow_id]; + p = &proc.id_to_fd[flow_id]; - pthread_mutex_lock(&ai.mtx); + pthread_mutex_lock(&proc.mtx); if (p->state == FLOW_ALLOCATED) { - pthread_mutex_unlock(&ai.mtx); + pthread_mutex_unlock(&proc.mtx); return FLOW_ALLOCATED; } if (p->state == FLOW_INIT) p->state = FLOW_ALLOC_PENDING; - pthread_cleanup_push(__cleanup_mutex_unlock, &ai.mtx); + pthread_cleanup_push(__cleanup_mutex_unlock, &proc.mtx); while (p->state == FLOW_ALLOC_PENDING) - pthread_cond_wait(&ai.cond, &ai.mtx); + pthread_cond_wait(&proc.cond, &proc.mtx); if (p->state == FLOW_DESTROY) { p->state = FLOW_NULL; - pthread_cond_broadcast(&ai.cond); + pthread_cond_broadcast(&proc.cond); } state = p->state; @@ -227,13 +227,13 @@ static enum flow_state flow_wait_assign(int flow_id) return state; } -static int proc_announce(const char * prog) +static int proc_announce(const struct proc_info * proc) { uint8_t buf[SOCK_BUF_SIZE]; buffer_t msg = {SOCK_BUF_SIZE, buf}; int err; - if (proc_announce__irm_req_ser(&msg, prog) < 0) + if (proc_announce__irm_req_ser(&msg, proc) < 0) return -ENOMEM; err = send_recv_msg(&msg); @@ -342,23 +342,23 @@ static void flow_send_keepalive(struct flow * flow, ssize_t idx; uint8_t * ptr; - idx = ssm_pool_alloc(ai.gspp, 0, &ptr, &spb); + idx = ssm_pool_alloc(proc.pool, 0, &ptr, &spb); if (idx < 0) return; - pthread_rwlock_wrlock(&ai.lock); + pthread_rwlock_wrlock(&proc.lock); flow->snd_act = now; if (ssm_rbuff_write(flow->tx_rb, idx)) - ssm_pool_remove(ai.gspp, idx); + ssm_pool_remove(proc.pool, idx); else ssm_flow_set_notify(flow->set, flow->info.id, FLOW_PKT); - pthread_rwlock_unlock(&ai.lock); + pthread_rwlock_unlock(&proc.lock); } -/* Needs rdlock on ai. */ +/* Needs rdlock on proc. */ static void _flow_keepalive(struct flow * flow) { struct timespec now; @@ -382,16 +382,16 @@ static void _flow_keepalive(struct flow * flow) if (ts_diff_ns(&now, &r_act) > (int64_t) timeo * MILLION) { ssm_rbuff_set_acl(flow->rx_rb, ACL_FLOWPEER); - ssm_flow_set_notify(ai.fqset, flow_id, FLOW_PEER); + ssm_flow_set_notify(proc.fqset, flow_id, FLOW_PEER); return; } if (ts_diff_ns(&now, &s_act) > (int64_t) timeo * (MILLION >> 2)) { - pthread_rwlock_unlock(&ai.lock); + pthread_rwlock_unlock(&proc.lock); flow_send_keepalive(flow, now); - pthread_rwlock_rdlock(&ai.lock); + pthread_rwlock_rdlock(&proc.lock); } } @@ -400,15 +400,15 @@ static void handle_keepalives(void) struct list_head * p; struct list_head * h; - pthread_rwlock_rdlock(&ai.lock); + pthread_rwlock_rdlock(&proc.lock); - list_for_each_safe(p, h, &ai.flow_list) { + list_for_each_safe(p, h, &proc.flow_list) { struct flow * flow; flow = list_entry(p, struct flow, next); _flow_keepalive(flow); } - pthread_rwlock_unlock(&ai.lock); + pthread_rwlock_unlock(&proc.lock); } static void __cleanup_fqueue_destroy(void * fq) @@ -429,7 +429,7 @@ void * flow_rx(void * o) pthread_cleanup_push(__cleanup_fqueue_destroy, fq); /* fevent will filter all FRCT packets for us */ - while ((ret = fevent(ai.frct_set, fq, &tic)) != 0) { + while ((ret = fevent(proc.frct_set, fq, &tic)) != 0) { if (ret == -ETIMEDOUT) { handle_keepalives(); continue; @@ -446,63 +446,63 @@ void * flow_rx(void * o) static void flow_clear(int fd) { - memset(&ai.flows[fd], 0, sizeof(ai.flows[fd])); + memset(&proc.flows[fd], 0, sizeof(proc.flows[fd])); - ai.flows[fd].info.id = -1; + proc.flows[fd].info.id = -1; } static void __flow_fini(int fd) { assert(fd >= 0 && fd < SYS_MAX_FLOWS); - if (ai.flows[fd].frcti != NULL) { - ai.n_frcti--; - if (ai.n_frcti == 0) { - pthread_cancel(ai.tx); - pthread_join(ai.tx, NULL); + if (proc.flows[fd].frcti != NULL) { + proc.n_frcti--; + if (proc.n_frcti == 0) { + pthread_cancel(proc.tx); + pthread_join(proc.tx, NULL); } - ssm_flow_set_del(ai.fqset, 0, ai.flows[fd].info.id); + ssm_flow_set_del(proc.fqset, 0, proc.flows[fd].info.id); - frcti_destroy(ai.flows[fd].frcti); + frcti_destroy(proc.flows[fd].frcti); } - if (ai.flows[fd].info.id != -1) { - flow_destroy(&ai.id_to_fd[ai.flows[fd].info.id]); - bmp_release(ai.fds, fd); + if (proc.flows[fd].info.id != -1) { + flow_destroy(&proc.id_to_fd[proc.flows[fd].info.id]); + bmp_release(proc.fds, fd); } - if (ai.flows[fd].rx_rb != NULL) { - ssm_rbuff_set_acl(ai.flows[fd].rx_rb, ACL_FLOWDOWN); - ssm_rbuff_close(ai.flows[fd].rx_rb); + if (proc.flows[fd].rx_rb != NULL) { + ssm_rbuff_set_acl(proc.flows[fd].rx_rb, ACL_FLOWDOWN); + ssm_rbuff_close(proc.flows[fd].rx_rb); } - if (ai.flows[fd].tx_rb != NULL) { - ssm_rbuff_set_acl(ai.flows[fd].tx_rb, ACL_FLOWDOWN); - ssm_rbuff_close(ai.flows[fd].tx_rb); + if (proc.flows[fd].tx_rb != NULL) { + ssm_rbuff_set_acl(proc.flows[fd].tx_rb, ACL_FLOWDOWN); + ssm_rbuff_close(proc.flows[fd].tx_rb); } - if (ai.flows[fd].set != NULL) { - ssm_flow_set_notify(ai.flows[fd].set, - ai.flows[fd].info.id, + if (proc.flows[fd].set != NULL) { + ssm_flow_set_notify(proc.flows[fd].set, + proc.flows[fd].info.id, FLOW_DEALLOC); - ssm_flow_set_close(ai.flows[fd].set); + ssm_flow_set_close(proc.flows[fd].set); } - crypt_destroy_ctx(ai.flows[fd].crypt); + crypt_destroy_ctx(proc.flows[fd].crypt); - list_del(&ai.flows[fd].next); + list_del(&proc.flows[fd].next); flow_clear(fd); } static void flow_fini(int fd) { - pthread_rwlock_wrlock(&ai.lock); + pthread_rwlock_wrlock(&proc.lock); __flow_fini(fd); - pthread_rwlock_unlock(&ai.lock); + pthread_rwlock_unlock(&proc.lock); } #define IS_ENCRYPTED(crypt) ((crypt)->nid != NID_undef) @@ -517,15 +517,15 @@ static int flow_init(struct flow_info * info, clock_gettime(PTHREAD_COND_CLOCK, &now); - pthread_rwlock_wrlock(&ai.lock); + pthread_rwlock_wrlock(&proc.lock); - fd = bmp_allocate(ai.fds); - if (!bmp_is_id_valid(ai.fds, fd)) { + fd = bmp_allocate(proc.fds); + if (!bmp_is_id_valid(proc.fds, fd)) { err = -EBADF; goto fail_fds; } - flow = &ai.flows[fd]; + flow = &proc.flows[fd]; flow->info = *info; @@ -566,27 +566,27 @@ static int flow_init(struct flow_info * info, if (flow->frcti == NULL) goto fail_frcti; - if (ssm_flow_set_add(ai.fqset, 0, info->id)) + if (ssm_flow_set_add(proc.fqset, 0, info->id)) goto fail_flow_set_add; - ++ai.n_frcti; - if (ai.n_frcti == 1 && - pthread_create(&ai.tx, NULL, flow_tx, NULL) < 0) + ++proc.n_frcti; + if (proc.n_frcti == 1 && + pthread_create(&proc.tx, NULL, flow_tx, NULL) < 0) goto fail_tx_thread; } - list_add_tail(&flow->next, &ai.flow_list); + list_add_tail(&flow->next, &proc.flow_list); - ai.id_to_fd[info->id].fd = fd; + proc.id_to_fd[info->id].fd = fd; - flow_set_state(&ai.id_to_fd[info->id], FLOW_ALLOCATED); + flow_set_state(&proc.id_to_fd[info->id], FLOW_ALLOCATED); - pthread_rwlock_unlock(&ai.lock); + pthread_rwlock_unlock(&proc.lock); return fd; fail_tx_thread: - ssm_flow_set_del(ai.fqset, 0, info->id); + ssm_flow_set_del(proc.fqset, 0, info->id); fail_flow_set_add: frcti_destroy(flow->frcti); fail_frcti: @@ -598,9 +598,9 @@ static int flow_init(struct flow_info * info, fail_tx_rb: ssm_rbuff_close(flow->rx_rb); fail_rx_rb: - bmp_release(ai.fds, fd); + bmp_release(proc.fds, fd); fail_fds: - pthread_rwlock_unlock(&ai.lock); + pthread_rwlock_unlock(&proc.lock); return err; } @@ -618,6 +618,7 @@ static void init(int argc, char ** argv, char ** envp) { + struct proc_info info; char * prog = argv[0]; int i; #ifdef PROC_FLOW_STATS @@ -635,7 +636,11 @@ static void init(int argc, goto fail_prog; } - if (proc_announce(prog)) { + memset(&info, 0, sizeof(info)); + info.pid = getpid(); + strncpy(info.prog, prog, PROG_NAME_SIZE); + + if (proc_announce(&info)) { fprintf(stderr, "FATAL: Could not announce to IRMd.\n"); goto fail_prog; } @@ -650,26 +655,30 @@ static void init(int argc, gcry_control(GCRYCTL_INITIALIZATION_FINISHED, 0); } #endif - ai.fds = bmp_create(PROG_MAX_FLOWS - PROG_RES_FDS, PROG_RES_FDS); - if (ai.fds == NULL) { + proc.fds = bmp_create(PROG_MAX_FLOWS - PROG_RES_FDS, PROG_RES_FDS); + if (proc.fds == NULL) { fprintf(stderr, "FATAL: Could not create fd bitmap.\n"); goto fail_fds; } - ai.fqueues = bmp_create(PROG_MAX_FQUEUES, 0); - if (ai.fqueues == NULL) { + proc.fqueues = bmp_create(PROG_MAX_FQUEUES, 0); + if (proc.fqueues == NULL) { fprintf(stderr, "FATAL: Could not create fqueue bitmap.\n"); goto fail_fqueues; } - ai.gspp = ssm_pool_open(); - if (ai.gspp == NULL) { + if (is_ouroboros_member_uid(getuid())) + proc.pool = ssm_pool_open(0); + else + proc.pool = ssm_pool_open(getuid()); + + if (proc.pool == NULL) { fprintf(stderr, "FATAL: Could not open packet buffer.\n"); goto fail_rdrb; } - ai.flows = malloc(sizeof(*ai.flows) * PROG_MAX_FLOWS); - if (ai.flows == NULL) { + proc.flows = malloc(sizeof(*proc.flows) * PROG_MAX_FLOWS); + if (proc.flows == NULL) { fprintf(stderr, "FATAL: Could not malloc flows.\n"); goto fail_flows; } @@ -677,38 +686,38 @@ static void init(int argc, for (i = 0; i < PROG_MAX_FLOWS; ++i) flow_clear(i); - ai.id_to_fd = malloc(sizeof(*ai.id_to_fd) * SYS_MAX_FLOWS); - if (ai.id_to_fd == NULL) { + proc.id_to_fd = malloc(sizeof(*proc.id_to_fd) * SYS_MAX_FLOWS); + if (proc.id_to_fd == NULL) { fprintf(stderr, "FATAL: Could not malloc id_to_fd.\n"); goto fail_id_to_fd; } for (i = 0; i < SYS_MAX_FLOWS; ++i) - ai.id_to_fd[i].state = FLOW_INIT; + proc.id_to_fd[i].state = FLOW_INIT; - if (pthread_mutex_init(&ai.mtx, NULL)) { + if (pthread_mutex_init(&proc.mtx, NULL)) { fprintf(stderr, "FATAL: Could not init mutex.\n"); goto fail_mtx; } - if (pthread_cond_init(&ai.cond, NULL) < 0) { + if (pthread_cond_init(&proc.cond, NULL) < 0) { fprintf(stderr, "FATAL: Could not init condvar.\n"); goto fail_cond; } - if (pthread_rwlock_init(&ai.lock, NULL) < 0) { + if (pthread_rwlock_init(&proc.lock, NULL) < 0) { fprintf(stderr, "FATAL: Could not initialize flow lock.\n"); goto fail_flow_lock; } - ai.fqset = ssm_flow_set_open(getpid()); - if (ai.fqset == NULL) { + proc.fqset = ssm_flow_set_open(getpid()); + if (proc.fqset == NULL) { fprintf(stderr, "FATAL: Could not open flow set.\n"); goto fail_fqset; } - ai.frct_set = fset_create(); - if (ai.frct_set == NULL || ai.frct_set->idx != 0) { + proc.frct_set = fset_create(); + if (proc.frct_set == NULL || proc.frct_set->idx != 0) { fprintf(stderr, "FATAL: Could not create FRCT set.\n"); goto fail_frct_set; } @@ -732,12 +741,12 @@ static void init(int argc, } } #endif - if (pthread_create(&ai.rx, NULL, flow_rx, NULL) < 0) { + if (pthread_create(&proc.rx, NULL, flow_rx, NULL) < 0) { fprintf(stderr, "FATAL: Could not start monitor thread.\n"); goto fail_monitor; } - list_head_init(&ai.flow_list); + list_head_init(&proc.flow_list); return; @@ -748,27 +757,27 @@ static void init(int argc, #endif timerwheel_fini(); fail_timerwheel: - fset_destroy(ai.frct_set); + fset_destroy(proc.frct_set); fail_frct_set: - ssm_flow_set_close(ai.fqset); + ssm_flow_set_close(proc.fqset); fail_fqset: - pthread_rwlock_destroy(&ai.lock); + pthread_rwlock_destroy(&proc.lock); fail_flow_lock: - pthread_cond_destroy(&ai.cond); + pthread_cond_destroy(&proc.cond); fail_cond: - pthread_mutex_destroy(&ai.mtx); + pthread_mutex_destroy(&proc.mtx); fail_mtx: - free(ai.id_to_fd); + free(proc.id_to_fd); fail_id_to_fd: - free(ai.flows); + free(proc.flows); fail_flows: - ssm_pool_close(ai.gspp); + ssm_pool_close(proc.pool); fail_rdrb: - bmp_destroy(ai.fqueues); + bmp_destroy(proc.fqueues); fail_fqueues: - bmp_destroy(ai.fds); + bmp_destroy(proc.fds); fail_fds: - memset(&ai, 0, sizeof(ai)); + memset(&proc, 0, sizeof(proc)); fail_prog: exit(EXIT_FAILURE); } @@ -777,51 +786,52 @@ static void fini(void) { int i; - if (ai.fds == NULL) + if (proc.fds == NULL) return; - pthread_cancel(ai.rx); - pthread_join(ai.rx, NULL); + pthread_cancel(proc.rx); + pthread_join(proc.rx, NULL); - pthread_rwlock_wrlock(&ai.lock); + pthread_rwlock_wrlock(&proc.lock); for (i = 0; i < PROG_MAX_FLOWS; ++i) { - if (ai.flows[i].info.id != -1) { + struct flow * flow = &proc.flows[i]; + if (flow->info.id != -1) { ssize_t idx; - ssm_rbuff_set_acl(ai.flows[i].rx_rb, ACL_FLOWDOWN); - while ((idx = ssm_rbuff_read(ai.flows[i].rx_rb)) >= 0) - ssm_pool_remove(ai.gspp, idx); + ssm_rbuff_set_acl(flow->rx_rb, ACL_FLOWDOWN); + while ((idx = ssm_rbuff_read(flow->rx_rb)) >= 0) + ssm_pool_remove(proc.pool, idx); __flow_fini(i); } } - pthread_cond_destroy(&ai.cond); - pthread_mutex_destroy(&ai.mtx); + pthread_cond_destroy(&proc.cond); + pthread_mutex_destroy(&proc.mtx); - pthread_rwlock_unlock(&ai.lock); + pthread_rwlock_unlock(&proc.lock); #ifdef PROC_FLOW_STATS rib_fini(); #endif timerwheel_fini(); - fset_destroy(ai.frct_set); + fset_destroy(proc.frct_set); - ssm_flow_set_close(ai.fqset); + ssm_flow_set_close(proc.fqset); - pthread_rwlock_destroy(&ai.lock); + pthread_rwlock_destroy(&proc.lock); - free(ai.flows); - free(ai.id_to_fd); + free(proc.flows); + free(proc.id_to_fd); - ssm_pool_close(ai.gspp); + ssm_pool_close(proc.pool); - bmp_destroy(ai.fds); - bmp_destroy(ai.fqueues); + bmp_destroy(proc.fds); + bmp_destroy(proc.fqueues); proc_exit(); - memset(&ai, 0, sizeof(ai)); + memset(&proc, 0, sizeof(proc)); } #if defined(__MACH__) && defined(__APPLE__) @@ -978,12 +988,12 @@ int flow_dealloc(int fd) memset(&info, 0, sizeof(flow)); - flow = &ai.flows[fd]; + flow = &proc.flows[fd]; - pthread_rwlock_rdlock(&ai.lock); + pthread_rwlock_rdlock(&proc.lock); if (flow->info.id < 0) { - pthread_rwlock_unlock(&ai.lock); + pthread_rwlock_unlock(&proc.lock); return -ENOTALLOC; } @@ -992,21 +1002,21 @@ int flow_dealloc(int fd) flow->rcv_timesout = true; flow->rcv_timeo = tic; - pthread_rwlock_unlock(&ai.lock); + pthread_rwlock_unlock(&proc.lock); flow_read(fd, buf, SOCK_BUF_SIZE); - pthread_rwlock_rdlock(&ai.lock); + pthread_rwlock_rdlock(&proc.lock); timeo.tv_sec = frcti_dealloc(flow->frcti); while (timeo.tv_sec < 0) { /* keep the flow active for rtx */ ssize_t ret; - pthread_rwlock_unlock(&ai.lock); + pthread_rwlock_unlock(&proc.lock); ret = flow_read(fd, pkt, PKT_BUF_LEN); - pthread_rwlock_rdlock(&ai.lock); + pthread_rwlock_rdlock(&proc.lock); timeo.tv_sec = frcti_dealloc(flow->frcti); @@ -1014,7 +1024,7 @@ int flow_dealloc(int fd) timeo.tv_sec = -timeo.tv_sec; } - pthread_cleanup_push(__cleanup_rwlock_unlock, &ai.lock); + pthread_cleanup_push(__cleanup_rwlock_unlock, &proc.lock); ssm_rbuff_fini(flow->tx_rb); @@ -1048,21 +1058,21 @@ int ipcp_flow_dealloc(int fd) if (fd < 0 || fd >= SYS_MAX_FLOWS ) return -EINVAL; - flow = &ai.flows[fd]; + flow = &proc.flows[fd]; memset(&info, 0, sizeof(flow)); - pthread_rwlock_rdlock(&ai.lock); + pthread_rwlock_rdlock(&proc.lock); if (flow->info.id < 0) { - pthread_rwlock_unlock(&ai.lock); + pthread_rwlock_unlock(&proc.lock); return -ENOTALLOC; } info.id = flow->info.id; info.n_1_pid = flow->info.n_1_pid; - pthread_rwlock_unlock(&ai.lock); + pthread_rwlock_unlock(&proc.lock); if (ipcp_flow_dealloc__irm_req_ser(&msg, &info) < 0) return -ENOMEM; @@ -1096,14 +1106,14 @@ int fccntl(int fd, if (fd < 0 || fd >= SYS_MAX_FLOWS) return -EBADF; - flow = &ai.flows[fd]; + flow = &proc.flows[fd]; va_start(l, cmd); - pthread_rwlock_wrlock(&ai.lock); + pthread_rwlock_wrlock(&proc.lock); if (flow->info.id < 0) { - pthread_rwlock_unlock(&ai.lock); + pthread_rwlock_unlock(&proc.lock); va_end(l); return -ENOTALLOC; } @@ -1209,24 +1219,24 @@ int fccntl(int fd, *cflags = frcti_getflags(flow->frcti); break; default: - pthread_rwlock_unlock(&ai.lock); + pthread_rwlock_unlock(&proc.lock); va_end(l); return -ENOTSUP; }; - pthread_rwlock_unlock(&ai.lock); + pthread_rwlock_unlock(&proc.lock); va_end(l); return 0; einval: - pthread_rwlock_unlock(&ai.lock); + pthread_rwlock_unlock(&proc.lock); va_end(l); return -EINVAL; eperm: - pthread_rwlock_unlock(&ai.lock); + pthread_rwlock_unlock(&proc.lock); va_end(l); return -EPERM; } @@ -1268,15 +1278,15 @@ static int flow_tx_spb(struct flow * flow, clock_gettime(PTHREAD_COND_CLOCK, &now); - pthread_rwlock_wrlock(&ai.lock); + pthread_rwlock_wrlock(&proc.lock); flow->snd_act = now; - pthread_rwlock_unlock(&ai.lock); + pthread_rwlock_unlock(&proc.lock); idx = ssm_pk_buff_get_idx(spb); - pthread_rwlock_rdlock(&ai.lock); + pthread_rwlock_rdlock(&proc.lock); if (ssm_pk_buff_len(spb) > 0) { if (frcti_snd(flow->frcti, spb) < 0) @@ -1289,7 +1299,7 @@ static int flow_tx_spb(struct flow * flow, goto enomem; } - pthread_cleanup_push(__cleanup_rwlock_unlock, &ai.lock); + pthread_cleanup_push(__cleanup_rwlock_unlock, &proc.lock); if (!block) ret = ssm_rbuff_write(flow->tx_rb, idx); @@ -1297,7 +1307,7 @@ static int flow_tx_spb(struct flow * flow, ret = ssm_rbuff_write_b(flow->tx_rb, idx, abstime); if (ret < 0) - ssm_pool_remove(ai.gspp, idx); + ssm_pool_remove(proc.pool, idx); else ssm_flow_set_notify(flow->set, flow->info.id, FLOW_PKT); @@ -1306,8 +1316,8 @@ static int flow_tx_spb(struct flow * flow, return 0; enomem: - pthread_rwlock_unlock(&ai.lock); - ssm_pool_remove(ai.gspp, idx); + pthread_rwlock_unlock(&proc.lock); + ssm_pool_remove(proc.pool, idx); return -ENOMEM; } @@ -1330,14 +1340,14 @@ ssize_t flow_write(int fd, if (fd < 0 || fd >= PROG_MAX_FLOWS) return -EBADF; - flow = &ai.flows[fd]; + flow = &proc.flows[fd]; clock_gettime(PTHREAD_COND_CLOCK, &abs); - pthread_rwlock_wrlock(&ai.lock); + pthread_rwlock_wrlock(&proc.lock); if (flow->info.id < 0) { - pthread_rwlock_unlock(&ai.lock); + pthread_rwlock_unlock(&proc.lock); return -ENOTALLOC; } @@ -1348,7 +1358,7 @@ ssize_t flow_write(int fd, flags = flow->oflags; - pthread_rwlock_unlock(&ai.lock); + pthread_rwlock_unlock(&proc.lock); if ((flags & FLOWFACCMODE) == FLOWFRDONLY) return -EPERM; @@ -1356,12 +1366,12 @@ ssize_t flow_write(int fd, if (flags & FLOWFWNOBLOCK) { if (!frcti_is_window_open(flow->frcti)) return -EAGAIN; - idx = ssm_pool_alloc(ai.gspp, count, &ptr, &spb); + idx = ssm_pool_alloc(proc.pool, count, &ptr, &spb); } else { ret = frcti_window_wait(flow->frcti, abstime); if (ret < 0) return ret; - idx = ssm_pool_alloc_b(ai.gspp, count, &ptr, &spb, abstime); + idx = ssm_pool_alloc_b(proc.pool, count, &ptr, &spb, abstime); } if (idx < 0) @@ -1405,16 +1415,16 @@ static ssize_t flow_rx_spb(struct flow * flow, clock_gettime(PTHREAD_COND_CLOCK, &now); - pthread_rwlock_wrlock(&ai.lock); + pthread_rwlock_wrlock(&proc.lock); flow->rcv_act = now; - pthread_rwlock_unlock(&ai.lock); + pthread_rwlock_unlock(&proc.lock); - *spb = ssm_pool_get(ai.gspp, idx); + *spb = ssm_pool_get(proc.pool, idx); if (invalid_pkt(flow, *spb)) { - ssm_pool_remove(ai.gspp, idx); + ssm_pool_remove(proc.pool, idx); return -EAGAIN; } @@ -1439,19 +1449,19 @@ ssize_t flow_read(int fd, if (fd < 0 || fd >= PROG_MAX_FLOWS) return -EBADF; - flow = &ai.flows[fd]; + flow = &proc.flows[fd]; clock_gettime(PTHREAD_COND_CLOCK, &now); - pthread_rwlock_rdlock(&ai.lock); + pthread_rwlock_rdlock(&proc.lock); if (flow->info.id < 0) { - pthread_rwlock_unlock(&ai.lock); + pthread_rwlock_unlock(&proc.lock); return -ENOTALLOC; } if (flow->part_idx == DONE_PART) { - pthread_rwlock_unlock(&ai.lock); + pthread_rwlock_unlock(&proc.lock); flow->part_idx = NO_PART; return 0; } @@ -1467,7 +1477,7 @@ ssize_t flow_read(int fd, idx = flow->part_idx; if (idx < 0) { while ((idx = frcti_queued_pdu(flow->frcti)) < 0) { - pthread_rwlock_unlock(&ai.lock); + pthread_rwlock_unlock(&proc.lock); idx = flow_rx_spb(flow, &spb, block, abstime); if (idx < 0) { @@ -1476,19 +1486,19 @@ ssize_t flow_read(int fd, if (!block) return idx; - pthread_rwlock_rdlock(&ai.lock); + pthread_rwlock_rdlock(&proc.lock); continue; } - pthread_rwlock_rdlock(&ai.lock); + pthread_rwlock_rdlock(&proc.lock); frcti_rcv(flow->frcti, spb); } } - spb = ssm_pool_get(ai.gspp, idx); + spb = ssm_pool_get(proc.pool, idx); - pthread_rwlock_unlock(&ai.lock); + pthread_rwlock_unlock(&proc.lock); packet = ssm_pk_buff_head(spb); @@ -1500,25 +1510,25 @@ ssize_t flow_read(int fd, memcpy(buf, packet, n); ipcp_spb_release(spb); - pthread_rwlock_wrlock(&ai.lock); + pthread_rwlock_wrlock(&proc.lock); flow->part_idx = (partrd && n == (ssize_t) count) ? DONE_PART : NO_PART; flow->rcv_act = now; - pthread_rwlock_unlock(&ai.lock); + pthread_rwlock_unlock(&proc.lock); return n; } else { if (partrd) { memcpy(buf, packet, count); ssm_pk_buff_head_release(spb, n); - pthread_rwlock_wrlock(&ai.lock); + pthread_rwlock_wrlock(&proc.lock); flow->part_idx = idx; flow->rcv_act = now; - pthread_rwlock_unlock(&ai.lock); + pthread_rwlock_unlock(&proc.lock); return count; } else { ipcp_spb_release(spb); @@ -1537,20 +1547,20 @@ struct flow_set * fset_create(void) if (set == NULL) goto fail_malloc; - assert(ai.fqueues); + assert(proc.fqueues); - pthread_rwlock_wrlock(&ai.lock); + pthread_rwlock_wrlock(&proc.lock); - set->idx = bmp_allocate(ai.fqueues); - if (!bmp_is_id_valid(ai.fqueues, set->idx)) + set->idx = bmp_allocate(proc.fqueues); + if (!bmp_is_id_valid(proc.fqueues, set->idx)) goto fail_bmp_alloc; - pthread_rwlock_unlock(&ai.lock); + pthread_rwlock_unlock(&proc.lock); return set; fail_bmp_alloc: - pthread_rwlock_unlock(&ai.lock); + pthread_rwlock_unlock(&proc.lock); free(set); fail_malloc: return NULL; @@ -1563,11 +1573,11 @@ void fset_destroy(struct flow_set * set) fset_zero(set); - pthread_rwlock_wrlock(&ai.lock); + pthread_rwlock_wrlock(&proc.lock); - bmp_release(ai.fqueues, set->idx); + bmp_release(proc.fqueues, set->idx); - pthread_rwlock_unlock(&ai.lock); + pthread_rwlock_unlock(&proc.lock); free(set); } @@ -1595,7 +1605,7 @@ void fset_zero(struct flow_set * set) if (set == NULL) return; - ssm_flow_set_zero(ai.fqset, set->idx); + ssm_flow_set_zero(proc.fqset, set->idx); } int fset_add(struct flow_set * set, @@ -1607,9 +1617,9 @@ int fset_add(struct flow_set * set, if (set == NULL || fd < 0 || fd >= SYS_MAX_FLOWS) return -EINVAL; - flow = &ai.flows[fd]; + flow = &proc.flows[fd]; - pthread_rwlock_rdlock(&ai.lock); + pthread_rwlock_rdlock(&proc.lock); if (flow->info.id < 0) { ret = -EINVAL; @@ -1617,21 +1627,21 @@ int fset_add(struct flow_set * set, } if (flow->frcti != NULL) - ssm_flow_set_del(ai.fqset, 0, ai.flows[fd].info.id); + ssm_flow_set_del(proc.fqset, 0, flow->info.id); - ret = ssm_flow_set_add(ai.fqset, set->idx, ai.flows[fd].info.id); + ret = ssm_flow_set_add(proc.fqset, set->idx, flow->info.id); if (ret < 0) goto fail; - if (ssm_rbuff_queued(ai.flows[fd].rx_rb)) - ssm_flow_set_notify(ai.fqset, ai.flows[fd].info.id, FLOW_PKT); + if (ssm_rbuff_queued(flow->rx_rb)) + ssm_flow_set_notify(proc.fqset, flow->info.id, FLOW_PKT); - pthread_rwlock_unlock(&ai.lock); + pthread_rwlock_unlock(&proc.lock); return ret; fail: - pthread_rwlock_unlock(&ai.lock); + pthread_rwlock_unlock(&proc.lock); return ret; } @@ -1643,37 +1653,40 @@ void fset_del(struct flow_set * set, if (set == NULL || fd < 0 || fd >= SYS_MAX_FLOWS) return; - flow = &ai.flows[fd]; + flow = &proc.flows[fd]; - pthread_rwlock_rdlock(&ai.lock); + pthread_rwlock_rdlock(&proc.lock); if (flow->info.id >= 0) - ssm_flow_set_del(ai.fqset, set->idx, flow->info.id); + ssm_flow_set_del(proc.fqset, set->idx, flow->info.id); if (flow->frcti != NULL) - ssm_flow_set_add(ai.fqset, 0, ai.flows[fd].info.id); + ssm_flow_set_add(proc.fqset, 0, proc.flows[fd].info.id); - pthread_rwlock_unlock(&ai.lock); + pthread_rwlock_unlock(&proc.lock); } bool fset_has(const struct flow_set * set, int fd) { - bool ret; + struct flow * flow; + bool ret; if (set == NULL || fd < 0 || fd >= SYS_MAX_FLOWS) return false; - pthread_rwlock_rdlock(&ai.lock); + flow = &proc.flows[fd]; - if (ai.flows[fd].info.id < 0) { - pthread_rwlock_unlock(&ai.lock); + pthread_rwlock_rdlock(&proc.lock); + + if (flow->info.id < 0) { + pthread_rwlock_unlock(&proc.lock); return false; } - ret = (ssm_flow_set_has(ai.fqset, set->idx, ai.flows[fd].info.id) == 1); + ret = (ssm_flow_set_has(proc.fqset, set->idx, flow->info.id) == 1); - pthread_rwlock_unlock(&ai.lock); + pthread_rwlock_unlock(&proc.lock); return ret; } @@ -1690,44 +1703,44 @@ static int fqueue_filter(struct fqueue * fq) if (fq->fqueue[fq->next].event != FLOW_PKT) return 1; - pthread_rwlock_rdlock(&ai.lock); + pthread_rwlock_rdlock(&proc.lock); - fd = ai.id_to_fd[fq->fqueue[fq->next].flow_id].fd; + fd = proc.id_to_fd[fq->fqueue[fq->next].flow_id].fd; if (fd < 0) { ++fq->next; - pthread_rwlock_unlock(&ai.lock); + pthread_rwlock_unlock(&proc.lock); continue; } - frcti = ai.flows[fd].frcti; + frcti = proc.flows[fd].frcti; if (frcti == NULL) { - pthread_rwlock_unlock(&ai.lock); + pthread_rwlock_unlock(&proc.lock); return 1; } if (__frcti_pdu_ready(frcti) >= 0) { - pthread_rwlock_unlock(&ai.lock); + pthread_rwlock_unlock(&proc.lock); return 1; } - pthread_rwlock_unlock(&ai.lock); + pthread_rwlock_unlock(&proc.lock); - idx = flow_rx_spb(&ai.flows[fd], &spb, false, NULL); + idx = flow_rx_spb(&proc.flows[fd], &spb, false, NULL); if (idx < 0) return 0; - pthread_rwlock_rdlock(&ai.lock); + pthread_rwlock_rdlock(&proc.lock); - spb = ssm_pool_get(ai.gspp, idx); + spb = ssm_pool_get(proc.pool, idx); __frcti_rcv(frcti, spb); if (__frcti_pdu_ready(frcti) >= 0) { - pthread_rwlock_unlock(&ai.lock); + pthread_rwlock_unlock(&proc.lock); return 1; } - pthread_rwlock_unlock(&ai.lock); + pthread_rwlock_unlock(&proc.lock); ++fq->next; } @@ -1749,15 +1762,15 @@ int fqueue_next(struct fqueue * fq) if (fq->next != 0 && fqueue_filter(fq) == 0) return -EPERM; - pthread_rwlock_rdlock(&ai.lock); + pthread_rwlock_rdlock(&proc.lock); e = fq->fqueue + fq->next; - fd = ai.id_to_fd[e->flow_id].fd; + fd = proc.id_to_fd[e->flow_id].fd; ++fq->next; - pthread_rwlock_unlock(&ai.lock); + pthread_rwlock_unlock(&proc.lock); return fd; } @@ -1795,7 +1808,7 @@ ssize_t fevent(struct flow_set * set, } while (ret == 0) { - ret = ssm_flow_set_wait(ai.fqset, set->idx, fq->fqueue, t); + ret = ssm_flow_set_wait(proc.fqset, set->idx, fq->fqueue, t); if (ret == -ETIMEDOUT) return -ETIMEDOUT; @@ -1842,11 +1855,11 @@ int np1_flow_dealloc(int flow_id, sleep(timeo); - pthread_rwlock_rdlock(&ai.lock); + pthread_rwlock_rdlock(&proc.lock); - fd = ai.id_to_fd[flow_id].fd; + fd = proc.id_to_fd[flow_id].fd; - pthread_rwlock_unlock(&ai.lock); + pthread_rwlock_unlock(&proc.lock); return fd; } @@ -1859,11 +1872,11 @@ int np1_flow_resp(int flow_id, if (resp == 0 && flow_wait_assign(flow_id) != FLOW_ALLOCATED) return -1; - pthread_rwlock_rdlock(&ai.lock); + pthread_rwlock_rdlock(&proc.lock); - fd = ai.id_to_fd[flow_id].fd; + fd = proc.id_to_fd[flow_id].fd; - pthread_rwlock_unlock(&ai.lock); + pthread_rwlock_unlock(&proc.lock); return fd; } @@ -1942,11 +1955,11 @@ int ipcp_flow_alloc_reply(int fd, assert(fd >= 0 && fd < SYS_MAX_FLOWS); - pthread_rwlock_rdlock(&ai.lock); + pthread_rwlock_rdlock(&proc.lock); - flow.id = ai.flows[fd].info.id; + flow.id = proc.flows[fd].info.id; - pthread_rwlock_unlock(&ai.lock); + pthread_rwlock_unlock(&proc.lock); flow.mpl = mpl; @@ -1969,25 +1982,25 @@ int ipcp_flow_read(int fd, assert(fd >= 0 && fd < SYS_MAX_FLOWS); assert(spb); - flow = &ai.flows[fd]; + flow = &proc.flows[fd]; - pthread_rwlock_rdlock(&ai.lock); + pthread_rwlock_rdlock(&proc.lock); assert(flow->info.id >= 0); while (frcti_queued_pdu(flow->frcti) < 0) { - pthread_rwlock_unlock(&ai.lock); + pthread_rwlock_unlock(&proc.lock); idx = flow_rx_spb(flow, spb, false, NULL); if (idx < 0) return idx; - pthread_rwlock_rdlock(&ai.lock); + pthread_rwlock_rdlock(&proc.lock); frcti_rcv(flow->frcti, *spb); } - pthread_rwlock_unlock(&ai.lock); + pthread_rwlock_unlock(&proc.lock); return 0; } @@ -2001,88 +2014,132 @@ int ipcp_flow_write(int fd, assert(fd >= 0 && fd < SYS_MAX_FLOWS); assert(spb); - flow = &ai.flows[fd]; + flow = &proc.flows[fd]; - pthread_rwlock_wrlock(&ai.lock); + pthread_rwlock_wrlock(&proc.lock); if (flow->info.id < 0) { - pthread_rwlock_unlock(&ai.lock); + pthread_rwlock_unlock(&proc.lock); return -ENOTALLOC; } if ((flow->oflags & FLOWFACCMODE) == FLOWFRDONLY) { - pthread_rwlock_unlock(&ai.lock); + pthread_rwlock_unlock(&proc.lock); return -EPERM; } - pthread_rwlock_unlock(&ai.lock); + pthread_rwlock_unlock(&proc.lock); ret = flow_tx_spb(flow, spb, true, NULL); return ret; } +static int pool_copy_spb(struct ssm_pool * src_pool, + ssize_t src_idx, + struct ssm_pool * dst_pool, + struct ssm_pk_buff ** dst_spb) +{ + struct ssm_pk_buff * src; + uint8_t * ptr; + size_t len; + + src = ssm_pool_get(src_pool, src_idx); + len = ssm_pk_buff_len(src); + + if (ssm_pool_alloc(dst_pool, len, &ptr, dst_spb) < 0) { + ssm_pool_remove(src_pool, src_idx); + return -ENOMEM; + } + + memcpy(ptr, ssm_pk_buff_head(src), len); + ssm_pool_remove(src_pool, src_idx); + + return 0; +} + int np1_flow_read(int fd, - struct ssm_pk_buff ** spb) + struct ssm_pk_buff ** spb, + struct ssm_pool * pool) { - struct flow * flow; - ssize_t idx = -1; + struct flow * flow; + ssize_t idx = -1; assert(fd >= 0 && fd < SYS_MAX_FLOWS); assert(spb); - flow = &ai.flows[fd]; + flow = &proc.flows[fd]; assert(flow->info.id >= 0); - pthread_rwlock_rdlock(&ai.lock); + pthread_rwlock_rdlock(&proc.lock); idx = ssm_rbuff_read(flow->rx_rb); if (idx < 0) { - pthread_rwlock_unlock(&ai.lock); + pthread_rwlock_unlock(&proc.lock); return idx; } - pthread_rwlock_unlock(&ai.lock); + pthread_rwlock_unlock(&proc.lock); - *spb = ssm_pool_get(ai.gspp, idx); + if (pool == NULL) { + *spb = ssm_pool_get(proc.pool, idx); + } else { + /* Cross-pool copy: PUP -> GSPP */ + if (pool_copy_spb(pool, idx, proc.pool, spb) < 0) + return -ENOMEM; + } return 0; } int np1_flow_write(int fd, - struct ssm_pk_buff * spb) + struct ssm_pk_buff * spb, + struct ssm_pool * pool) { - struct flow * flow; - int ret; - ssize_t idx; + struct flow * flow; + struct ssm_pk_buff * dst; + int ret; + ssize_t idx; assert(fd >= 0 && fd < SYS_MAX_FLOWS); assert(spb); - flow = &ai.flows[fd]; + flow = &proc.flows[fd]; - pthread_rwlock_rdlock(&ai.lock); + pthread_rwlock_rdlock(&proc.lock); if (flow->info.id < 0) { - pthread_rwlock_unlock(&ai.lock); + pthread_rwlock_unlock(&proc.lock); return -ENOTALLOC; } if ((flow->oflags & FLOWFACCMODE) == FLOWFRDONLY) { - pthread_rwlock_unlock(&ai.lock); + pthread_rwlock_unlock(&proc.lock); return -EPERM; } - pthread_rwlock_unlock(&ai.lock); + pthread_rwlock_unlock(&proc.lock); idx = ssm_pk_buff_get_idx(spb); - ret = ssm_rbuff_write_b(flow->tx_rb, idx, NULL); - if (ret < 0) - ssm_pool_remove(ai.gspp, idx); - else - ssm_flow_set_notify(flow->set, flow->info.id, FLOW_PKT); + if (pool == NULL) { + ret = ssm_rbuff_write_b(flow->tx_rb, idx, NULL); + if (ret < 0) + ssm_pool_remove(proc.pool, idx); + else + ssm_flow_set_notify(flow->set, flow->info.id, FLOW_PKT); + } else { + /* Cross-pool copy: GSPP -> PUP */ + if (pool_copy_spb(proc.pool, idx, pool, &dst) < 0) + return -ENOMEM; + idx = ssm_pk_buff_get_idx(dst); + ret = ssm_rbuff_write_b(flow->tx_rb, idx, NULL); + if (ret < 0) + ssm_pool_remove(pool, idx); + else + ssm_flow_set_notify(flow->set, flow->info.id, FLOW_PKT); + } return ret; } @@ -2090,12 +2147,12 @@ int np1_flow_write(int fd, int ipcp_spb_reserve(struct ssm_pk_buff ** spb, size_t len) { - return ssm_pool_alloc_b(ai.gspp, len, NULL, spb, NULL) < 0 ? -1 : 0; + return ssm_pool_alloc_b(proc.pool, len, NULL, spb, NULL) < 0 ? -1 : 0; } void ipcp_spb_release(struct ssm_pk_buff * spb) { - ssm_pool_remove(ai.gspp, ssm_pk_buff_get_idx(spb)); + ssm_pool_remove(proc.pool, ssm_pk_buff_get_idx(spb)); } int ipcp_flow_fini(int fd) @@ -2104,23 +2161,23 @@ int ipcp_flow_fini(int fd) assert(fd >= 0 && fd < SYS_MAX_FLOWS); - pthread_rwlock_rdlock(&ai.lock); + pthread_rwlock_rdlock(&proc.lock); - if (ai.flows[fd].info.id < 0) { - pthread_rwlock_unlock(&ai.lock); + if (proc.flows[fd].info.id < 0) { + pthread_rwlock_unlock(&proc.lock); return -1; } - ssm_rbuff_set_acl(ai.flows[fd].rx_rb, ACL_FLOWDOWN); - ssm_rbuff_set_acl(ai.flows[fd].tx_rb, ACL_FLOWDOWN); + ssm_rbuff_set_acl(proc.flows[fd].rx_rb, ACL_FLOWDOWN); + ssm_rbuff_set_acl(proc.flows[fd].tx_rb, ACL_FLOWDOWN); - ssm_flow_set_notify(ai.flows[fd].set, - ai.flows[fd].info.id, + ssm_flow_set_notify(proc.flows[fd].set, + proc.flows[fd].info.id, FLOW_DEALLOC); - rx_rb = ai.flows[fd].rx_rb; + rx_rb = proc.flows[fd].rx_rb; - pthread_rwlock_unlock(&ai.lock); + pthread_rwlock_unlock(&proc.lock); if (rx_rb != NULL) ssm_rbuff_fini(rx_rb); @@ -2134,13 +2191,13 @@ int ipcp_flow_get_qoscube(int fd, assert(fd >= 0 && fd < SYS_MAX_FLOWS); assert(cube); - pthread_rwlock_rdlock(&ai.lock); + pthread_rwlock_rdlock(&proc.lock); - assert(ai.flows[fd].info.id >= 0); + assert(proc.flows[fd].info.id >= 0); - *cube = qos_spec_to_cube(ai.flows[fd].info.qs); + *cube = qos_spec_to_cube(proc.flows[fd].info.qs); - pthread_rwlock_unlock(&ai.lock); + pthread_rwlock_unlock(&proc.lock); return 0; } @@ -2149,56 +2206,76 @@ size_t ipcp_flow_queued(int fd) { size_t q; - pthread_rwlock_rdlock(&ai.lock); + pthread_rwlock_rdlock(&proc.lock); - assert(ai.flows[fd].info.id >= 0); + assert(proc.flows[fd].info.id >= 0); - q = ssm_rbuff_queued(ai.flows[fd].tx_rb); + q = ssm_rbuff_queued(proc.flows[fd].tx_rb); - pthread_rwlock_unlock(&ai.lock); + pthread_rwlock_unlock(&proc.lock); return q; } -ssize_t local_flow_read(int fd) +int local_flow_transfer(int src_fd, + int dst_fd, + struct ssm_pool * src_pool, + struct ssm_pool * dst_pool) { - ssize_t ret; - - assert(fd >= 0); + struct flow * src_flow; + struct flow * dst_flow; + struct ssm_pk_buff * dst_spb; + struct ssm_pool * sp; + struct ssm_pool * dp; + ssize_t idx; + int ret; - pthread_rwlock_rdlock(&ai.lock); + assert(src_fd >= 0); + assert(dst_fd >= 0); - ret = ssm_rbuff_read(ai.flows[fd].rx_rb); + src_flow = &proc.flows[src_fd]; + dst_flow = &proc.flows[dst_fd]; - pthread_rwlock_unlock(&ai.lock); + sp = src_pool == NULL ? proc.pool : src_pool; + dp = dst_pool == NULL ? proc.pool : dst_pool; - return ret; -} - -int local_flow_write(int fd, - size_t idx) -{ - struct flow * flow; - int ret; + pthread_rwlock_rdlock(&proc.lock); - assert(fd >= 0); - - flow = &ai.flows[fd]; - - pthread_rwlock_rdlock(&ai.lock); + idx = ssm_rbuff_read(src_flow->rx_rb); + if (idx < 0) { + pthread_rwlock_unlock(&proc.lock); + return idx; + } - if (flow->info.id < 0) { - pthread_rwlock_unlock(&ai.lock); + if (dst_flow->info.id < 0) { + pthread_rwlock_unlock(&proc.lock); + ssm_pool_remove(sp, idx); return -ENOTALLOC; } - ret = ssm_rbuff_write_b(flow->tx_rb, idx, NULL); - if (ret == 0) - ssm_flow_set_notify(flow->set, flow->info.id, FLOW_PKT); - else - ssm_pool_remove(ai.gspp, idx); + pthread_rwlock_unlock(&proc.lock); + + if (sp == dp) { + /* Same pool: zero-copy */ + ret = ssm_rbuff_write_b(dst_flow->tx_rb, idx, NULL); + if (ret < 0) + ssm_pool_remove(sp, idx); + else + ssm_flow_set_notify(dst_flow->set, + dst_flow->info.id, FLOW_PKT); + } else { + /* Different pools: single copy */ + if (pool_copy_spb(sp, idx, dp, &dst_spb) < 0) + return -ENOMEM; - pthread_rwlock_unlock(&ai.lock); + idx = ssm_pk_buff_get_idx(dst_spb); + ret = ssm_rbuff_write_b(dst_flow->tx_rb, idx, NULL); + if (ret < 0) + ssm_pool_remove(dp, idx); + else + ssm_flow_set_notify(dst_flow->set, + dst_flow->info.id, FLOW_PKT); + } return ret; } diff --git a/src/lib/frct.c b/src/lib/frct.c index 76736931..39a82966 100644 --- a/src/lib/frct.c +++ b/src/lib/frct.c @@ -118,11 +118,11 @@ static int frct_rib_read(const char * path, fd = atoi(path); - flow = &ai.flows[fd]; + flow = &proc.flows[fd]; clock_gettime(PTHREAD_COND_CLOCK, &now); - pthread_rwlock_rdlock(&ai.lock); + pthread_rwlock_rdlock(&proc.lock); frcti = flow->frcti; @@ -176,7 +176,7 @@ static int frct_rib_read(const char * path, pthread_rwlock_unlock(&flow->frcti->lock); - pthread_rwlock_unlock(&ai.lock); + pthread_rwlock_unlock(&proc.lock); return strlen(buf); } @@ -244,9 +244,9 @@ static void __send_frct_pkt(int fd, /* Raw calls needed to bypass frcti. */ #ifdef RXM_BLOCKING - idx = ssm_pool_alloc_b(ai.gspp, sizeof(*pci), NULL, &spb, NULL); + idx = ssm_pool_alloc_b(proc.pool, sizeof(*pci), NULL, &spb, NULL); #else - idx = ssm_pool_alloc(ai.gspp, sizeof(*pci), NULL, &spb); + idx = ssm_pool_alloc(proc.pool, sizeof(*pci), NULL, &spb); #endif if (idx < 0) return; @@ -259,7 +259,7 @@ static void __send_frct_pkt(int fd, pci->flags = flags; pci->ackno = hton32(ackno); - f = &ai.flows[fd]; + f = &proc.flows[fd]; if (spb_encrypt(f, spb) < 0) goto fail; @@ -398,7 +398,7 @@ static struct frcti * frcti_create(int fd, frcti->n_out = 0; frcti->n_rqo = 0; #endif - if (ai.flows[fd].info.qs.loss == 0) { + if (proc.flows[fd].info.qs.loss == 0) { frcti->snd_cr.cflags |= FRCTFRTX | FRCTFLINGER; frcti->rcv_cr.cflags |= FRCTFRTX; } @@ -841,7 +841,7 @@ static void __frcti_rcv(struct frcti * frcti, __send_frct_pkt(fd, FRCT_FC, 0, rwe); - ssm_pool_remove(ai.gspp, idx); + ssm_pool_remove(proc.pool, idx); return; } @@ -928,7 +928,7 @@ static void __frcti_rcv(struct frcti * frcti, drop_packet: pthread_rwlock_unlock(&frcti->lock); - ssm_pool_remove(ai.gspp, idx); + ssm_pool_remove(proc.pool, idx); send_frct_pkt(frcti); return; } diff --git a/src/lib/pb/ipcp.proto b/src/lib/pb/ipcp.proto index c2c7f48b..deddf6af 100644 --- a/src/lib/pb/ipcp.proto +++ b/src/lib/pb/ipcp.proto @@ -56,4 +56,5 @@ message ipcp_msg { optional uint32 timeo_sec = 12; optional sint32 mpl = 13; optional int32 result = 14; + optional uint32 uid = 15; /* 0 = GSPP, >0 = PUP uid */ } diff --git a/src/lib/pb/irm.proto b/src/lib/pb/irm.proto index 5d0ee611..1845c694 100644 --- a/src/lib/pb/irm.proto +++ b/src/lib/pb/irm.proto @@ -94,6 +94,6 @@ message irm_msg { optional uint32 timeo_sec = 23; optional uint32 timeo_nsec = 24; optional sint32 result = 25; - optional bytes sym_key = 26; /* symmetric encryption key */ - optional sint32 cipher_nid = 27; /* cipher NID */ + optional bytes sym_key = 26; /* symmetric encryption key */ + optional sint32 cipher_nid = 27; /* cipher NID */ } diff --git a/src/lib/pb/model.proto b/src/lib/pb/model.proto index 7b06e434..82700a9a 100644 --- a/src/lib/pb/model.proto +++ b/src/lib/pb/model.proto @@ -34,12 +34,13 @@ message qosspec_msg { } message flow_info_msg { - required uint32 id = 1; - required uint32 n_pid = 2; - required uint32 n_1_pid = 3; - required uint32 mpl = 4; - required uint32 state = 5; - required qosspec_msg qos = 6; + required uint32 id = 1; + required uint32 n_pid = 2; + required uint32 n_1_pid = 3; + required uint32 mpl = 4; + required uint32 state = 5; + required qosspec_msg qos = 6; + required uint32 uid = 7; } message name_info_msg { diff --git a/src/lib/protobuf.c b/src/lib/protobuf.c index bd6c179e..4617323a 100644 --- a/src/lib/protobuf.c +++ b/src/lib/protobuf.c @@ -24,6 +24,7 @@ #include <ouroboros/protobuf.h> #include <ouroboros/crypt.h> +#include <ouroboros/proc.h> #include <stdlib.h> #include <string.h> @@ -74,12 +75,13 @@ flow_info_msg_t * flow_info_s_to_msg(const struct flow_info * s) flow_info_msg__init(msg); - msg->id = s->id; - msg->n_pid = s->n_pid; - msg->n_1_pid = s->n_1_pid; - msg->mpl = s->mpl; - msg->state = s->state; - msg->qos = qos_spec_s_to_msg(&s->qs); + msg->id = s->id; + msg->n_pid = s->n_pid; + msg->n_1_pid = s->n_1_pid; + msg->mpl = s->mpl; + msg->state = s->state; + msg->uid = s->uid; + msg->qos = qos_spec_s_to_msg(&s->qs); if (msg->qos == NULL) goto fail_msg; @@ -104,6 +106,7 @@ struct flow_info flow_info_msg_to_s(const flow_info_msg_t * msg) s.n_1_pid = msg->n_1_pid; s.mpl = msg->mpl; s.state = msg->state; + s.uid = msg->uid; s.qs = qos_spec_msg_to_s(msg->qos); return s; diff --git a/src/lib/serdes-irm.c b/src/lib/serdes-irm.c index 9e829632..8cbe20b1 100644 --- a/src/lib/serdes-irm.c +++ b/src/lib/serdes-irm.c @@ -289,8 +289,8 @@ int ipcp_create_r__irm_req_ser(buffer_t * buf, return -ENOMEM; } -int proc_announce__irm_req_ser(buffer_t * buf, - const char * prog) +int proc_announce__irm_req_ser(buffer_t * buf, + const struct proc_info * proc) { irm_msg_t * msg; size_t len; @@ -303,8 +303,8 @@ int proc_announce__irm_req_ser(buffer_t * buf, msg->code = IRM_MSG_CODE__IRM_PROC_ANNOUNCE; msg->has_pid = true; - msg->pid = getpid(); - msg->prog = strdup(prog); + msg->pid = proc->pid; + msg->prog = strdup(proc->prog); if (msg->prog == NULL) goto fail_msg; diff --git a/src/lib/ssm/pool.c b/src/lib/ssm/pool.c index b8cfe3a1..e938f644 100644 --- a/src/lib/ssm/pool.c +++ b/src/lib/ssm/pool.c @@ -41,17 +41,30 @@ #include <sys/mman.h> #include <sys/stat.h> -/* Size class configuration from CMake */ -static const struct ssm_size_class_cfg ssm_sc_cfg[SSM_POOL_MAX_CLASSES] = { - { (1 << 8), SSM_POOL_256_BLOCKS }, - { (1 << 9), SSM_POOL_512_BLOCKS }, - { (1 << 10), SSM_POOL_1K_BLOCKS }, - { (1 << 11), SSM_POOL_2K_BLOCKS }, - { (1 << 12), SSM_POOL_4K_BLOCKS }, - { (1 << 14), SSM_POOL_16K_BLOCKS }, - { (1 << 16), SSM_POOL_64K_BLOCKS }, - { (1 << 18), SSM_POOL_256K_BLOCKS }, - { (1 << 20), SSM_POOL_1M_BLOCKS }, +/* Global Shared Packet Pool (GSPP) configuration */ +static const struct ssm_size_class_cfg ssm_gspp_cfg[SSM_POOL_MAX_CLASSES] = { + { (1 << 8), SSM_GSPP_256_BLOCKS }, + { (1 << 9), SSM_GSPP_512_BLOCKS }, + { (1 << 10), SSM_GSPP_1K_BLOCKS }, + { (1 << 11), SSM_GSPP_2K_BLOCKS }, + { (1 << 12), SSM_GSPP_4K_BLOCKS }, + { (1 << 14), SSM_GSPP_16K_BLOCKS }, + { (1 << 16), SSM_GSPP_64K_BLOCKS }, + { (1 << 18), SSM_GSPP_256K_BLOCKS }, + { (1 << 20), SSM_GSPP_1M_BLOCKS }, +}; + +/* Per-User Pool (PUP) configuration */ +static const struct ssm_size_class_cfg ssm_pup_cfg[SSM_POOL_MAX_CLASSES] = { + { (1 << 8), SSM_PUP_256_BLOCKS }, + { (1 << 9), SSM_PUP_512_BLOCKS }, + { (1 << 10), SSM_PUP_1K_BLOCKS }, + { (1 << 11), SSM_PUP_2K_BLOCKS }, + { (1 << 12), SSM_PUP_4K_BLOCKS }, + { (1 << 14), SSM_PUP_16K_BLOCKS }, + { (1 << 16), SSM_PUP_64K_BLOCKS }, + { (1 << 18), SSM_PUP_256K_BLOCKS }, + { (1 << 20), SSM_PUP_1M_BLOCKS }, }; #define PTR_TO_OFFSET(pool_base, ptr) \ @@ -83,16 +96,23 @@ static const struct ssm_size_class_cfg ssm_sc_cfg[SSM_POOL_MAX_CLASSES] = { #define FETCH_SUB(ptr, val) \ (__atomic_fetch_sub(ptr, val, __ATOMIC_SEQ_CST)) -#define CAS(ptr, expected, desired) \ - (__atomic_compare_exchange_n(ptr, expected, desired, false, \ - __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) - #define SSM_FILE_SIZE (SSM_POOL_TOTAL_SIZE + sizeof(struct _ssm_pool_hdr)) +#define SSM_GSPP_FILE_SIZE (SSM_GSPP_TOTAL_SIZE + sizeof(struct _ssm_pool_hdr)) +#define SSM_PUP_FILE_SIZE (SSM_PUP_TOTAL_SIZE + sizeof(struct _ssm_pool_hdr)) + +#define IS_GSPP(uid) ((uid) == SSM_GSPP_UID) +#define GET_POOL_TOTAL_SIZE(uid) (IS_GSPP(uid) ? SSM_GSPP_TOTAL_SIZE \ + : SSM_PUP_TOTAL_SIZE) +#define GET_POOL_FILE_SIZE(uid) (IS_GSPP(uid) ? SSM_GSPP_FILE_SIZE \ + : SSM_PUP_FILE_SIZE) +#define GET_POOL_CFG(uid) (IS_GSPP(uid) ? ssm_gspp_cfg : ssm_pup_cfg) struct ssm_pool { - uint8_t * shm_base; /* start of blocks */ - struct _ssm_pool_hdr * hdr; /* shared memory header */ - void * pool_base; /* base of the memory pool */ + uint8_t * shm_base; /* start of blocks */ + struct _ssm_pool_hdr * hdr; /* shared memory header */ + void * pool_base; /* base of the memory pool */ + uid_t uid; /* user owner (0 = GSPP) */ + size_t total_size; /* total data size */ }; static __inline__ @@ -143,17 +163,23 @@ static __inline__ void list_add_head(struct _ssm_list_head * head, STORE(&head->count, LOAD(&head->count) + 1); } -static __inline__ int select_size_class(size_t len) +static __inline__ int select_size_class(struct ssm_pool * pool, + size_t len) { size_t sz; int i; + assert(pool != NULL); + /* Total space needed: header + headspace + data + tailspace */ sz = sizeof(struct ssm_pk_buff) + SSM_PK_BUFF_HEADSPACE + len + SSM_PK_BUFF_TAILSPACE; for (i = 0; i < SSM_POOL_MAX_CLASSES; i++) { - if (ssm_sc_cfg[i].blocks > 0 && sz <= ssm_sc_cfg[i].size) + struct _ssm_size_class * sc; + + sc = &pool->hdr->size_classes[i]; + if (sc->object_size > 0 && sz <= sc->object_size) return i; } @@ -183,15 +209,16 @@ static __inline__ int find_size_class_for_offset(struct ssm_pool * pool, static void init_size_classes(struct ssm_pool * pool) { - struct _ssm_size_class * sc; - struct _ssm_shard * shard; - pthread_mutexattr_t mattr; - pthread_condattr_t cattr; - uint8_t * region; - size_t offset; - int c; - int s; - size_t i; + const struct ssm_size_class_cfg * cfg; + struct _ssm_size_class * sc; + struct _ssm_shard * shard; + pthread_mutexattr_t mattr; + pthread_condattr_t cattr; + uint8_t * region; + size_t offset; + int c; /* class iterator */ + int s; /* shard iterator */ + size_t i; assert(pool != NULL); @@ -199,6 +226,8 @@ static void init_size_classes(struct ssm_pool * pool) if (LOAD(&pool->hdr->initialized) != 0) return; + cfg = GET_POOL_CFG(pool->uid); + pthread_mutexattr_init(&mattr); pthread_mutexattr_setpshared(&mattr, PTHREAD_PROCESS_SHARED); #ifdef HAVE_ROBUST_MUTEX @@ -214,15 +243,15 @@ static void init_size_classes(struct ssm_pool * pool) offset = 0; for (c = 0; c < SSM_POOL_MAX_CLASSES; c++) { - if (ssm_sc_cfg[c].blocks == 0) + if (cfg[c].blocks == 0) continue; sc = &pool->hdr->size_classes[c]; - sc->object_size = ssm_sc_cfg[c].size; + sc->object_size = cfg[c].size; sc->pool_start = offset; - sc->pool_size = ssm_sc_cfg[c].size * ssm_sc_cfg[c].blocks; - sc->object_count = ssm_sc_cfg[c].blocks; + sc->pool_size = cfg[c].size * cfg[c].blocks; + sc->object_count = cfg[c].blocks; /* Initialize all shards */ for (s = 0; s < SSM_POOL_SHARDS; s++) { @@ -444,23 +473,26 @@ static ssize_t alloc_from_sc_b(struct ssm_pool * pool, return init_block(pool, sc, shard, blk, len, ptr, spb); } -/* Global Shared Packet Pool */ -static char * gspp_filename(void) +/* Generate pool filename: uid=0 for GSPP, uid>0 for PUP */ +static char * pool_filename(uid_t uid) { char * str; char * test_suffix; + char base[64]; + + if (IS_GSPP(uid)) + snprintf(base, sizeof(base), "%s", SSM_GSPP_NAME); + else + snprintf(base, sizeof(base), SSM_PUP_NAME_FMT, (int) uid); test_suffix = getenv("OUROBOROS_TEST_POOL_SUFFIX"); if (test_suffix != NULL) { - str = malloc(strlen(SSM_POOL_NAME) + strlen(test_suffix) + 1); + str = malloc(strlen(base) + strlen(test_suffix) + 1); if (str == NULL) return NULL; - sprintf(str, "%s%s", SSM_POOL_NAME, test_suffix); + sprintf(str, "%s%s", base, test_suffix); } else { - str = malloc(strlen(SSM_POOL_NAME) + 1); - if (str == NULL) - return NULL; - sprintf(str, "%s", SSM_POOL_NAME); + str = strdup(base); } return str; @@ -468,9 +500,13 @@ static char * gspp_filename(void) void ssm_pool_close(struct ssm_pool * pool) { + size_t file_size; + assert(pool != NULL); - munmap(pool->shm_base, SSM_FILE_SIZE); + file_size = GET_POOL_FILE_SIZE(pool->uid); + + munmap(pool->shm_base, file_size); free(pool); } @@ -481,15 +517,19 @@ void ssm_pool_destroy(struct ssm_pool * pool) assert(pool != NULL); if (getpid() != pool->hdr->pid && kill(pool->hdr->pid, 0) == 0) { + ssm_pool_close(pool); free(pool); return; } - ssm_pool_close(pool); - - fn = gspp_filename(); - if (fn == NULL) + fn = pool_filename(pool->uid); + if (fn == NULL) { + ssm_pool_close(pool); + free(pool); return; + } + + ssm_pool_close(pool); shm_unlink(fn); free(fn); @@ -497,72 +537,89 @@ void ssm_pool_destroy(struct ssm_pool * pool) #define MM_FLAGS (PROT_READ | PROT_WRITE) -static struct ssm_pool * pool_create(int flags) +static struct ssm_pool * __pool_create(const char * name, + int flags, + uid_t uid, + mode_t mode) { struct ssm_pool * pool; int fd; uint8_t * shm_base; - char * fn; + size_t file_size; + size_t total_size; - fn = gspp_filename(); - if (fn == NULL) - goto fail_fn; + file_size = GET_POOL_FILE_SIZE(uid); + total_size = GET_POOL_TOTAL_SIZE(uid); - pool = malloc(sizeof *pool); + pool = malloc(sizeof(*pool)); if (pool == NULL) - goto fail_rdrb; + goto fail_pool; - fd = shm_open(fn, flags, 0666); + fd = shm_open(name, flags, mode); if (fd == -1) goto fail_open; - if ((flags & O_CREAT) && ftruncate(fd, SSM_FILE_SIZE) < 0) + if ((flags & O_CREAT) && ftruncate(fd, (off_t) file_size) < 0) goto fail_truncate; - shm_base = mmap(NULL, SSM_FILE_SIZE, MM_FLAGS, MAP_SHARED, fd, 0); + shm_base = mmap(NULL, file_size, MM_FLAGS, MAP_SHARED, fd, 0); if (shm_base == MAP_FAILED) goto fail_truncate; - pool->shm_base = shm_base; - pool->pool_base = shm_base; - pool->hdr = (struct _ssm_pool_hdr *) (shm_base + SSM_POOL_TOTAL_SIZE); + pool->shm_base = shm_base; + pool->pool_base = shm_base; + pool->hdr = (struct _ssm_pool_hdr *) (shm_base + total_size); + pool->uid = uid; + pool->total_size = total_size; if (flags & O_CREAT) pool->hdr->mapped_addr = shm_base; close(fd); - free(fn); - return pool; fail_truncate: close(fd); if (flags & O_CREAT) - shm_unlink(fn); + shm_unlink(name); fail_open: free(pool); - fail_rdrb: - free(fn); - fail_fn: + fail_pool: return NULL; } -struct ssm_pool * ssm_pool_create(void) +struct ssm_pool * ssm_pool_create(uid_t uid, + gid_t gid) { struct ssm_pool * pool; + char * fn; mode_t mask; + mode_t mode; pthread_mutexattr_t mattr; pthread_condattr_t cattr; + int fd; + fn = pool_filename(uid); + if (fn == NULL) + goto fail_fn; + + mode = IS_GSPP(uid) ? 0660 : 0600; mask = umask(0); - pool = pool_create(O_CREAT | O_EXCL | O_RDWR); + pool = __pool_create(fn, O_CREAT | O_EXCL | O_RDWR, uid, mode); umask(mask); if (pool == NULL) - goto fail_rdrb; + goto fail_pool; + + fd = shm_open(fn, O_RDWR, 0); + if (fd >= 0) { + fchown(fd, uid, gid); + fchmod(fd, mode); + close(fd); + } if (pthread_mutexattr_init(&mattr)) goto fail_mattr; @@ -585,13 +642,13 @@ struct ssm_pool * ssm_pool_create(void) goto fail_healthy; pool->hdr->pid = getpid(); - /* Will be set by init_size_classes */ STORE(&pool->hdr->initialized, 0); init_size_classes(pool); pthread_mutexattr_destroy(&mattr); pthread_condattr_destroy(&cattr); + free(fn); return pool; @@ -602,27 +659,37 @@ struct ssm_pool * ssm_pool_create(void) fail_mutex: pthread_mutexattr_destroy(&mattr); fail_mattr: - ssm_pool_destroy(pool); - fail_rdrb: + ssm_pool_close(pool); + shm_unlink(fn); + fail_pool: + free(fn); + fail_fn: return NULL; } -struct ssm_pool * ssm_pool_open(void) +struct ssm_pool * ssm_pool_open(uid_t uid) { struct ssm_pool * pool; + char * fn; + + fn = pool_filename(uid); + if (fn == NULL) + return NULL; - pool = pool_create(O_RDWR); + pool = __pool_create(fn, O_RDWR, uid, 0); if (pool != NULL) init_size_classes(pool); + free(fn); + return pool; } -void ssm_pool_purge(void) +void ssm_pool_gspp_purge(void) { char * fn; - fn = gspp_filename(); + fn = pool_filename(SSM_GSPP_UID); if (fn == NULL) return; @@ -632,9 +699,13 @@ void ssm_pool_purge(void) int ssm_pool_mlock(struct ssm_pool * pool) { + size_t file_size; + assert(pool != NULL); - return mlock(pool->shm_base, SSM_FILE_SIZE); + file_size = GET_POOL_FILE_SIZE(pool->uid); + + return mlock(pool->shm_base, file_size); } ssize_t ssm_pool_alloc(struct ssm_pool * pool, @@ -647,7 +718,7 @@ ssize_t ssm_pool_alloc(struct ssm_pool * pool, assert(pool != NULL); assert(spb != NULL); - idx = select_size_class(count); + idx = select_size_class(pool, count); if (idx >= 0) return alloc_from_sc(pool, idx, count, ptr, spb); @@ -665,7 +736,7 @@ ssize_t ssm_pool_alloc_b(struct ssm_pool * pool, assert(pool != NULL); assert(spb != NULL); - idx = select_size_class(count); + idx = select_size_class(pool, count); if (idx >= 0) return alloc_from_sc_b(pool, idx, count, ptr, spb, abstime); @@ -697,7 +768,7 @@ struct ssm_pk_buff * ssm_pool_get(struct ssm_pool * pool, assert(pool != NULL); - if (off == 0 || off >= (size_t) SSM_POOL_TOTAL_SIZE) + if (off == 0 || off >= pool->total_size) return NULL; blk = OFFSET_TO_PTR(pool->pool_base, off); @@ -722,7 +793,7 @@ int ssm_pool_remove(struct ssm_pool * pool, assert(pool != NULL); - if (off == 0 || off >= SSM_POOL_TOTAL_SIZE) + if (off == 0 || off >= pool->total_size) return -EINVAL; blk = OFFSET_TO_PTR(pool->pool_base, off); diff --git a/src/lib/ssm/ssm.h.in b/src/lib/ssm/ssm.h.in index d14cd49c..b9246c8b 100644 --- a/src/lib/ssm/ssm.h.in +++ b/src/lib/ssm/ssm.h.in @@ -30,8 +30,9 @@ /* Pool naming configuration */ #define SSM_PREFIX "@SSM_PREFIX@" -#define SSM_GSMP_SUFFIX "@SSM_GSMP_SUFFIX@" -#define SSM_PPP_SUFFIX "@SSM_PPP_SUFFIX@" +#define SSM_GSPP_NAME "@SSM_GSPP_NAME@" +#define SSM_PUP_NAME_FMT "@SSM_PUP_NAME_FMT@" +#define SSM_GSPP_UID 0 /* Legacy SSM constants */ #define SSM_RBUFF_PREFIX "@SSM_RBUFF_PREFIX@" @@ -44,7 +45,31 @@ #define SSM_PK_BUFF_HEADSPACE @SSM_PK_BUFF_HEADSPACE@ #define SSM_PK_BUFF_TAILSPACE @SSM_PK_BUFF_TAILSPACE@ -/* Pool blocks per size class */ +/* Global Shared Packet Pool (GSPP) - for privileged processes */ +#define SSM_GSPP_256_BLOCKS @SSM_GSPP_256_BLOCKS@ +#define SSM_GSPP_512_BLOCKS @SSM_GSPP_512_BLOCKS@ +#define SSM_GSPP_1K_BLOCKS @SSM_GSPP_1K_BLOCKS@ +#define SSM_GSPP_2K_BLOCKS @SSM_GSPP_2K_BLOCKS@ +#define SSM_GSPP_4K_BLOCKS @SSM_GSPP_4K_BLOCKS@ +#define SSM_GSPP_16K_BLOCKS @SSM_GSPP_16K_BLOCKS@ +#define SSM_GSPP_64K_BLOCKS @SSM_GSPP_64K_BLOCKS@ +#define SSM_GSPP_256K_BLOCKS @SSM_GSPP_256K_BLOCKS@ +#define SSM_GSPP_1M_BLOCKS @SSM_GSPP_1M_BLOCKS@ +#define SSM_GSPP_TOTAL_SIZE @SSM_GSPP_TOTAL_SIZE@ + +/* Per-User Pool (PUP) - for unprivileged applications */ +#define SSM_PUP_256_BLOCKS @SSM_PUP_256_BLOCKS@ +#define SSM_PUP_512_BLOCKS @SSM_PUP_512_BLOCKS@ +#define SSM_PUP_1K_BLOCKS @SSM_PUP_1K_BLOCKS@ +#define SSM_PUP_2K_BLOCKS @SSM_PUP_2K_BLOCKS@ +#define SSM_PUP_4K_BLOCKS @SSM_PUP_4K_BLOCKS@ +#define SSM_PUP_16K_BLOCKS @SSM_PUP_16K_BLOCKS@ +#define SSM_PUP_64K_BLOCKS @SSM_PUP_64K_BLOCKS@ +#define SSM_PUP_256K_BLOCKS @SSM_PUP_256K_BLOCKS@ +#define SSM_PUP_1M_BLOCKS @SSM_PUP_1M_BLOCKS@ +#define SSM_PUP_TOTAL_SIZE @SSM_PUP_TOTAL_SIZE@ + +/* Legacy pool blocks (same as GSPP for compatibility) */ #define SSM_POOL_256_BLOCKS @SSM_POOL_256_BLOCKS@ #define SSM_POOL_512_BLOCKS @SSM_POOL_512_BLOCKS@ #define SSM_POOL_1K_BLOCKS @SSM_POOL_1K_BLOCKS@ diff --git a/src/lib/ssm/tests/CMakeLists.txt b/src/lib/ssm/tests/CMakeLists.txt index 827f8bf8..5cac70d1 100644 --- a/src/lib/ssm/tests/CMakeLists.txt +++ b/src/lib/ssm/tests/CMakeLists.txt @@ -18,16 +18,5 @@ target_link_libraries(${PARENT_DIR}_test ouroboros-common) add_dependencies(build_tests ${PARENT_DIR}_test) -set(tests_to_run ${${PARENT_DIR}_tests}) -if(CMAKE_VERSION VERSION_LESS "3.29.0") - remove(tests_to_run test_suite.c) -else () - list(POP_FRONT tests_to_run) -endif() - -foreach (test ${tests_to_run}) - get_filename_component(test_name ${test} NAME_WE) - add_test(${TEST_PREFIX}/${test_name} ${C_TEST_PATH}/${PARENT_DIR}_test ${test_name}) - set_property(TEST ${TEST_PREFIX}/${test_name} - PROPERTY ENVIRONMENT "OUROBOROS_TEST_POOL_SUFFIX=.test") -endforeach (test) +ouroboros_register_tests(TARGET ${PARENT_DIR}_test TESTS ${${PARENT_DIR}_tests} + ENVIRONMENT "OUROBOROS_TEST_POOL_SUFFIX=.test") diff --git a/src/lib/ssm/tests/pool_sharding_test.c b/src/lib/ssm/tests/pool_sharding_test.c index 72ae1cb7..46eecd8d 100644 --- a/src/lib/ssm/tests/pool_sharding_test.c +++ b/src/lib/ssm/tests/pool_sharding_test.c @@ -54,6 +54,7 @@ static struct _ssm_pool_hdr * get_pool_hdr(struct ssm_pool * pool) */ struct _ssm_pool_hdr ** hdr_ptr = (struct _ssm_pool_hdr **)((uint8_t *)pool + sizeof(void *)); + return *hdr_ptr; } @@ -67,9 +68,7 @@ static int test_lazy_distribution(void) TEST_START(); - ssm_pool_purge(); - - pool = ssm_pool_create(); + pool = ssm_pool_create(0, getgid()); if (pool == NULL) { printf("Failed to create pool.\n"); goto fail; @@ -142,9 +141,7 @@ static int test_shard_migration(void) TEST_START(); - ssm_pool_purge(); - - pool = ssm_pool_create(); + pool = ssm_pool_create(0, getgid()); if (pool == NULL) { printf("Failed to create pool.\n"); goto fail; @@ -216,9 +213,7 @@ static int test_fallback_stealing(void) TEST_START(); - ssm_pool_purge(); - - pool = ssm_pool_create(); + pool = ssm_pool_create(0, getgid()); if (pool == NULL) { printf("Failed to create pool.\n"); goto fail; @@ -331,9 +326,7 @@ static int test_multiprocess_sharding(void) TEST_START(); - ssm_pool_purge(); - - pool = ssm_pool_create(); + pool = ssm_pool_create(0, getgid()); if (pool == NULL) { printf("Failed to create pool.\n"); goto fail; @@ -355,7 +348,7 @@ static int test_multiprocess_sharding(void) ssize_t off; int my_shard; - child_pool = ssm_pool_open(); + child_pool = ssm_pool_open(0); if (child_pool == NULL) exit(EXIT_FAILURE); @@ -449,9 +442,7 @@ static int test_exhaustion_with_fallback(void) TEST_START(); - ssm_pool_purge(); - - pool = ssm_pool_create(); + pool = ssm_pool_create(0, getgid()); if (pool == NULL) { printf("Failed to create pool.\n"); goto fail; diff --git a/src/lib/ssm/tests/pool_test.c b/src/lib/ssm/tests/pool_test.c index e298d9ab..53f7f541 100644 --- a/src/lib/ssm/tests/pool_test.c +++ b/src/lib/ssm/tests/pool_test.c @@ -61,7 +61,7 @@ static int test_ssm_pool_basic_allocation(void) TEST_START(); - pool = ssm_pool_create(); + pool = ssm_pool_create(0, getgid()); if (pool == NULL) goto fail_create; @@ -119,7 +119,7 @@ static int test_ssm_pool_multiple_allocations(void) TEST_START(); - pool = ssm_pool_create(); + pool = ssm_pool_create(0, getgid()); if (pool == NULL) goto fail_create; @@ -212,7 +212,7 @@ static int test_ssm_pool_no_fallback_for_large(void) TEST_START(); - pool = ssm_pool_create(); + pool = ssm_pool_create(0, getgid()); if (pool == NULL) goto fail_create; @@ -248,7 +248,7 @@ static int test_ssm_pool_blocking_vs_nonblocking(void) TEST_START(); - pool = ssm_pool_create(); + pool = ssm_pool_create(0, getgid()); if (pool == NULL) goto fail_create; @@ -295,7 +295,7 @@ static int test_ssm_pool_stress_test(void) TEST_START(); - pool = ssm_pool_create(); + pool = ssm_pool_create(0, getgid()); if (pool == NULL) goto fail_create; @@ -392,7 +392,7 @@ static int test_ssm_pool_open_initializes_ssm(void) TEST_START(); - creator = ssm_pool_create(); + creator = ssm_pool_create(0, getgid()); if (creator == NULL) goto fail_create; @@ -403,7 +403,7 @@ static int test_ssm_pool_open_initializes_ssm(void) } ssm_pool_remove(creator, ret); - opener = ssm_pool_open(); + opener = ssm_pool_open(0); if (opener == NULL) { printf("Open failed.\n"); goto fail_creator; @@ -439,7 +439,7 @@ static int test_ssm_pool_bounds_checking(void) TEST_START(); - pool = ssm_pool_create(); + pool = ssm_pool_create(0, getgid()); if (pool == NULL) goto fail_create; @@ -502,7 +502,7 @@ static int test_ssm_pool_inter_process_communication(void) len = strlen(msg) + 1; - pool = ssm_pool_create(); + pool = ssm_pool_create(0, getgid()); if (pool == NULL) goto fail_create; @@ -606,7 +606,7 @@ static int test_ssm_pool_read_operation(void) len = strlen(data) + 1; - pool = ssm_pool_create(); + pool = ssm_pool_create(0, getgid()); if (pool == NULL) goto fail_create; @@ -656,7 +656,7 @@ static int test_ssm_pool_mlock_operation(void) TEST_START(); - pool = ssm_pool_create(); + pool = ssm_pool_create(0, getgid()); if (pool == NULL) goto fail_create; @@ -690,7 +690,7 @@ static int test_ssm_pk_buff_operations(void) dlen = strlen(data); - pool = ssm_pool_create(); + pool = ssm_pool_create(0, getgid()); if (pool == NULL) goto fail_create; @@ -798,7 +798,7 @@ static int test_ssm_pool_size_class_boundaries(void) TEST_START(); - pool = ssm_pool_create(); + pool = ssm_pool_create(0, getgid()); if (pool == NULL) goto fail_create; @@ -859,7 +859,7 @@ static int test_ssm_pool_exhaustion(void) TEST_START(); - pool = ssm_pool_create(); + pool = ssm_pool_create(0, getgid()); if (pool == NULL) goto fail_create; @@ -937,7 +937,7 @@ static int test_ssm_pool_reclaim_orphans(void) TEST_START(); - pool = ssm_pool_create(); + pool = ssm_pool_create(0, getgid()); if (pool == NULL) goto fail_create; @@ -1017,8 +1017,6 @@ int pool_test(int argc, (void) argc; (void) argv; - ssm_pool_purge(); - ret |= test_ssm_pool_basic_allocation(); ret |= test_ssm_pool_multiple_allocations(); ret |= test_ssm_pool_no_fallback_for_large(); diff --git a/src/lib/tests/CMakeLists.txt b/src/lib/tests/CMakeLists.txt index d2535bc2..23d01f9b 100644 --- a/src/lib/tests/CMakeLists.txt +++ b/src/lib/tests/CMakeLists.txt @@ -28,20 +28,4 @@ target_link_libraries(${PARENT_DIR}_test ouroboros-common) add_dependencies(build_tests ${PARENT_DIR}_test) -set(tests_to_run ${${PARENT_DIR}_tests}) -if(CMAKE_VERSION VERSION_LESS "3.29.0") - remove(tests_to_run test_suite.c) -else () - list(POP_FRONT tests_to_run) -endif() - -foreach (test ${tests_to_run}) - get_filename_component(test_name ${test} NAME_WE) - add_test(${TEST_PREFIX}/${test_name} ${CMAKE_CURRENT_BINARY_DIR}/${PARENT_DIR}_test ${test_name}) -endforeach (test) - -set_property(TEST ${TEST_PREFIX}/auth_test PROPERTY SKIP_RETURN_CODE 1) -set_property(TEST ${TEST_PREFIX}/auth_test_pqc PROPERTY SKIP_RETURN_CODE 1) -set_property(TEST ${TEST_PREFIX}/crypt_test PROPERTY SKIP_RETURN_CODE 1) -set_property(TEST ${TEST_PREFIX}/kex_test PROPERTY SKIP_RETURN_CODE 1) -set_property(TEST ${TEST_PREFIX}/kex_test_pqc PROPERTY SKIP_RETURN_CODE 1) +ouroboros_register_tests(TARGET ${PARENT_DIR}_test TESTS ${${PARENT_DIR}_tests}) diff --git a/src/lib/timerwheel.c b/src/lib/timerwheel.c index eaa684e5..ed235047 100644 --- a/src/lib/timerwheel.c +++ b/src/lib/timerwheel.c @@ -173,7 +173,7 @@ static void timerwheel_move(void) snd_cr = &r->frcti->snd_cr; rcv_cr = &r->frcti->rcv_cr; - f = &ai.flows[r->fd]; + f = &proc.flows[r->fd]; #ifndef RXM_BUFFER_ON_HEAP ssm_pk_buff_ack(r->spb); #endif @@ -226,7 +226,7 @@ static void timerwheel_move(void) #ifdef RXM_BLOCKING if (ipcp_spb_reserve(&spb, r->len) < 0) #else - if (ssm_pool_alloc(ai.gspp, r->len, NULL, + if (ssm_pool_alloc(proc.pool, r->len, NULL, &spb) < 0) #endif goto reschedule; /* rdrbuff full */ @@ -288,7 +288,7 @@ static void timerwheel_move(void) list_del(&a->next); - f = &ai.flows[a->fd]; + f = &proc.flows[a->fd]; rw.map[j & (ACKQ_SLOTS - 1)][a->fd] = false; @@ -341,7 +341,7 @@ static int timerwheel_rxm(struct frcti * frcti, slot = r->t0 >> RXMQ_RES; r->fd = frcti->fd; - r->flow_id = ai.flows[r->fd].info.id; + r->flow_id = proc.flows[r->fd].info.id; pthread_rwlock_unlock(&r->frcti->lock); @@ -394,7 +394,7 @@ static int timerwheel_delayed_ack(int fd, a->fd = fd; a->frcti = frcti; - a->flow_id = ai.flows[fd].info.id; + a->flow_id = proc.flows[fd].info.id; pthread_mutex_lock(&rw.lock); diff --git a/src/lib/utils.c b/src/lib/utils.c index 74f8ce4f..cfddec62 100644 --- a/src/lib/utils.c +++ b/src/lib/utils.c @@ -20,11 +20,15 @@ * Foundation, Inc., http://www.fsf.org/about/contact/. */ -#define _POSIX_C_SOURCE 200809L +#define _DEFAULT_SOURCE + +#include "config.h" #include <ouroboros/utils.h> #include <ctype.h> +#include <grp.h> +#include <pwd.h> #include <stdlib.h> #include <string.h> @@ -138,5 +142,63 @@ char ** argvdup(char ** argv) } argv_dup[argc] = NULL; + return argv_dup; } + +bool is_ouroboros_member_uid(uid_t uid) +{ + struct group * grp; + struct passwd * pw; + gid_t gid; + gid_t * groups = NULL; + int ngroups; + int i; + + /* Root is always privileged */ + if (uid == 0) + return true; + + grp = getgrnam("ouroboros"); + if (grp == NULL) + return false; + + gid = grp->gr_gid; + + pw = getpwuid(uid); + if (pw == NULL) + return false; + + if (pw->pw_gid == gid) + return true; + + ngroups = 0; + getgrouplist(pw->pw_name, pw->pw_gid, NULL, &ngroups); + if (ngroups <= 0) + return false; + + groups = malloc(ngroups * sizeof(*groups)); + if (groups == NULL) + return false; + + if (getgrouplist(pw->pw_name, pw->pw_gid, groups, &ngroups) < 0) { + free(groups); + return false; + } + + for (i = 0; i < ngroups; i++) { + if (groups[i] == gid) { + free(groups); + return true; + } + } + + free(groups); + + return false; +} + +bool is_ouroboros_member(void) +{ + return is_ouroboros_member_uid(getuid()); +} |
