diff options
author | Dimitri Staessens <dimitri@ouroboros.rocks> | 2022-03-27 11:09:43 +0200 |
---|---|---|
committer | Sander Vrijders <sander@ouroboros.rocks> | 2022-03-30 15:05:05 +0200 |
commit | 02b3893b1ec392f1b3ca030a03267c31eb1dc290 (patch) | |
tree | f7cebdb5ef2c4994bc1e675e838bc8922cbae950 /src/lib/dev.c | |
parent | 56654f2cd1813d87d32695f126939bbfaad52385 (diff) | |
download | ouroboros-02b3893b1ec392f1b3ca030a03267c31eb1dc290.tar.gz ouroboros-02b3893b1ec392f1b3ca030a03267c31eb1dc290.zip |
lib: Add np1_flow_read and np1_flow_write calls
Reading/writing to (N + 1)-flows from the IPCP was using a raw QoS flow
to bypass some functions in the ipcp_flow_read call. But this call was
broken for keepalive packets. Fixing the ipcp_flow_read call for
(N - 1) flows causes the IPCPs to drop 0-byte keepalive packets coming from
(N + 1) client flows.
>From now on, there is a dedicated call for (N + 1) reads/writes from
the IPCPs that's more efficient and cleaner. The (N + 1) flow internal
QoS is now also defaulted to a qos_np1 qosspec, instead of tampering
with the qosspec requested by the (N + 1) client.
Signed-off-by: Dimitri Staessens <dimitri@ouroboros.rocks>
Signed-off-by: Sander Vrijders <sander@ouroboros.rocks>
Diffstat (limited to 'src/lib/dev.c')
-rw-r--r-- | src/lib/dev.c | 91 |
1 files changed, 80 insertions, 11 deletions
diff --git a/src/lib/dev.c b/src/lib/dev.c index ac885711..b3e9c69e 100644 --- a/src/lib/dev.c +++ b/src/lib/dev.c @@ -38,6 +38,7 @@ #include <ouroboros/sockets.h> #include <ouroboros/fccntl.h> #include <ouroboros/bitmap.h> +#include <ouroboros/np1_flow.h> #include <ouroboros/pthread.h> #include <ouroboros/random.h> #include <ouroboros/shm_flow_set.h> @@ -1330,7 +1331,7 @@ ssize_t flow_read(int fd, idx = flow_rx_sdb(flow, &sdb, block, &tictime); if (idx < 0) { - if (idx != -ETIMEDOUT) + if (idx != -ETIMEDOUT && idx != -EAGAIN) return idx; if (abstime != NULL @@ -1740,12 +1741,9 @@ ssize_t fevent(struct flow_set * set, /* ipcp-dev functions. */ int np1_flow_alloc(pid_t n_pid, - int flow_id, - qosspec_t qs) + int flow_id) { - qs.cypher_s = 0; /* No encryption ctx for np1 */ - qs.in_order = 0; /* No frct for np1 */ - return flow_init(flow_id, n_pid, qs, NULL, 0); + return flow_init(flow_id, n_pid, qos_np1, NULL, 0); } int np1_flow_dealloc(int flow_id, @@ -1855,9 +1853,7 @@ int ipcp_flow_req_arr(const uint8_t * dst, return -1; } - qs.cypher_s = 0; /* No encryption ctx for np1 */ - qs.in_order = 0; /* No frct for np1 */ - fd = flow_init(recv_msg->flow_id, recv_msg->pid, qs, NULL, 0); + fd = flow_init(recv_msg->flow_id, recv_msg->pid, qos_np1, NULL, 0); irm_msg__free_unpacked(recv_msg, NULL); @@ -1928,8 +1924,14 @@ int ipcp_flow_read(int fd, pthread_rwlock_unlock(&ai.lock); idx = flow_rx_sdb(flow, sdb, false, NULL); - if (idx < 0) + if (idx < 0) { + if (idx == -EAGAIN) { + pthread_rwlock_rdlock(&ai.lock); + continue; + } + return idx; + } pthread_rwlock_rdlock(&ai.lock); @@ -1964,7 +1966,74 @@ int ipcp_flow_write(int fd, return -EPERM; } - ret = flow_tx_sdb(flow, sdb, false, NULL); + pthread_rwlock_unlock(&ai.lock); + + ret = flow_tx_sdb(flow, sdb, true, NULL); + + return ret; +} + +int np1_flow_read(int fd, + struct shm_du_buff ** sdb) +{ + struct flow * flow; + ssize_t idx = -1; + + assert(fd >= 0 && fd < SYS_MAX_FLOWS); + assert(sdb); + + flow = &ai.flows[fd]; + + assert(flow->flow_id >= 0); + + pthread_rwlock_rdlock(&ai.lock); + + idx = shm_rbuff_read(flow->rx_rb);; + if (idx < 0) { + pthread_rwlock_unlock(&ai.lock); + return idx; + } + + pthread_rwlock_unlock(&ai.lock); + + *sdb = shm_rdrbuff_get(ai.rdrb, idx); + + return 0; +} + +int np1_flow_write(int fd, + struct shm_du_buff * sdb) +{ + struct flow * flow; + int ret; + ssize_t idx; + + assert(fd >= 0 && fd < SYS_MAX_FLOWS); + assert(sdb); + + flow = &ai.flows[fd]; + + pthread_rwlock_rdlock(&ai.lock); + + if (flow->flow_id < 0) { + pthread_rwlock_unlock(&ai.lock); + return -ENOTALLOC; + } + + if ((flow->oflags & FLOWFACCMODE) == FLOWFRDONLY) { + pthread_rwlock_unlock(&ai.lock); + return -EPERM; + } + + pthread_rwlock_unlock(&ai.lock); + + idx = shm_du_buff_get_idx(sdb); + + ret = shm_rbuff_write_b(flow->tx_rb, idx, NULL); + if (ret < 0) + shm_rdrbuff_remove(ai.rdrb, idx); + else + shm_flow_set_notify(flow->set, flow->flow_id, FLOW_PKT); return ret; } |