diff options
author | Dimitri Staessens <dimitri@ouroboros.rocks> | 2023-08-28 12:29:00 +0200 |
---|---|---|
committer | Sander Vrijders <sander@ouroboros.rocks> | 2023-08-30 10:33:20 +0200 |
commit | 08332eefba9aa4b08d00e190720de4771081e855 (patch) | |
tree | 9e6b4dbf920b3813c696f79c6f2b926f976402b8 /src/ipcpd/ipcp.c | |
parent | 870e3fdfaee4991592cc29e90767abd0e9fba43b (diff) | |
download | ouroboros-08332eefba9aa4b08d00e190720de4771081e855.tar.gz ouroboros-08332eefba9aa4b08d00e190720de4771081e855.zip |
ipcpd: Move alloc race mitigation to common source
All flow allocator code was duplicating the mitigation for a race
where the IRMd response for the flow allocation with a new flow fd was
arriving before the response to the flow_req_arr. This is now moved to
the ipcp common source.
Signed-off-by: Dimitri Staessens <dimitri@ouroboros.rocks>
Signed-off-by: Sander Vrijders <sander@ouroboros.rocks>
Diffstat (limited to 'src/ipcpd/ipcp.c')
-rw-r--r-- | src/ipcpd/ipcp.c | 77 |
1 files changed, 77 insertions, 0 deletions
diff --git a/src/ipcpd/ipcp.c b/src/ipcpd/ipcp.c index f40c70e6..eb83f3cc 100644 --- a/src/ipcpd/ipcp.c +++ b/src/ipcpd/ipcp.c @@ -35,6 +35,7 @@ #define OUROBOROS_PREFIX "ipcpd/ipcp" #define IPCP_INFO "info" +#define ALLOC_TIMEOUT 10 * MILLION /* 10 ms */ #include <ouroboros/hash.h> #include <ouroboros/logs.h> @@ -256,6 +257,82 @@ static void * acceptloop(void * o) return (void *) 0; } +int ipcp_wait_flow_req_arr(const uint8_t * dst, + qosspec_t qs, + time_t mpl, + const void * data, + size_t len) +{ + struct timespec ts = {0, ALLOC_TIMEOUT}; + struct timespec abstime; + int fd; + + clock_gettime(PTHREAD_COND_CLOCK, &abstime); + + pthread_mutex_lock(&ipcpi.alloc_lock); + + while (ipcpi.alloc_id != -1 && ipcp_get_state() == IPCP_OPERATIONAL) { + ts_add(&abstime, &ts, &abstime); + pthread_cond_timedwait(&ipcpi.alloc_cond, + &ipcpi.alloc_lock, + &abstime); + } + + if (ipcp_get_state() != IPCP_OPERATIONAL) { + pthread_mutex_unlock(&ipcpi.alloc_lock); + log_err("Won't allocate over non-operational IPCP."); + return -EIPCPSTATE; + } + + assert(ipcpi.alloc_id == -1); + + fd = ipcp_flow_req_arr(dst, ipcp_dir_hash_len(), qs, mpl, data, len); + if (fd < 0) { + pthread_mutex_unlock(&ipcpi.alloc_lock); + log_err("Failed to get fd for flow."); + return -ENOTALLOC; + } + + ipcpi.alloc_id = fd; + pthread_cond_broadcast(&ipcpi.alloc_cond); + + pthread_mutex_unlock(&ipcpi.alloc_lock); + + return fd; + +} + +int ipcp_wait_flow_resp(const int fd) +{ + struct timespec ts = {0, ALLOC_TIMEOUT}; + struct timespec abstime; + + clock_gettime(PTHREAD_COND_CLOCK, &abstime); + + pthread_mutex_lock(&ipcpi.alloc_lock); + + while (ipcpi.alloc_id != fd && ipcp_get_state() == IPCP_OPERATIONAL) { + ts_add(&abstime, &ts, &abstime); + pthread_cond_timedwait(&ipcpi.alloc_cond, + &ipcpi.alloc_lock, + &abstime); + } + + if (ipcp_get_state() != IPCP_OPERATIONAL) { + pthread_mutex_unlock(&ipcpi.alloc_lock); + return -1; + } + + assert(ipcpi.alloc_id == fd); + + ipcpi.alloc_id = -1; + pthread_cond_broadcast(&ipcpi.alloc_cond); + + pthread_mutex_unlock(&ipcpi.alloc_lock); + + return 0; +} + static void free_msg(void * o) { ipcp_msg__free_unpacked((ipcp_msg_t *) o, NULL); |