diff options
author | dimitri staessens <dimitri.staessens@ugent.be> | 2017-03-21 15:21:52 +0100 |
---|---|---|
committer | dimitri staessens <dimitri.staessens@ugent.be> | 2017-03-21 15:21:52 +0100 |
commit | 0c8d8e419f7c9052d88bb9765bf43b01187977d0 (patch) | |
tree | 75f499a09782dd03b9d61e69166b659f9d09f698 | |
parent | 55c8721cd2682a360c3eaeb6c51ef3455c320416 (diff) | |
download | ouroboros-0c8d8e419f7c9052d88bb9765bf43b01187977d0.tar.gz ouroboros-0c8d8e419f7c9052d88bb9765bf43b01187977d0.zip |
lib: Add queued SDUs when adding fd to flow_set
This solves some race conditions where packets arrive on a flow before
it is added to a flow_set.
-rw-r--r-- | include/ouroboros/shm_rbuff.h | 2 | ||||
-rw-r--r-- | src/lib/dev.c | 6 | ||||
-rw-r--r-- | src/lib/shm_rbuff.c | 20 |
3 files changed, 28 insertions, 0 deletions
diff --git a/include/ouroboros/shm_rbuff.h b/include/ouroboros/shm_rbuff.h index f31dab63..8471f47f 100644 --- a/include/ouroboros/shm_rbuff.h +++ b/include/ouroboros/shm_rbuff.h @@ -53,4 +53,6 @@ ssize_t shm_rbuff_read(struct shm_rbuff * rb); ssize_t shm_rbuff_read_b(struct shm_rbuff * rb, const struct timespec * timeout); +size_t shm_rbuff_queued(struct shm_rbuff * rb); + #endif /* OUROBOROS_SHM_RBUFF_H */ diff --git a/src/lib/dev.c b/src/lib/dev.c index 9ddc5b84..bd706dc8 100644 --- a/src/lib/dev.c +++ b/src/lib/dev.c @@ -1029,6 +1029,8 @@ int flow_set_add(struct flow_set * set, int fd) { int ret; + size_t sdus; + size_t i; if (set == NULL) return -EINVAL; @@ -1038,6 +1040,10 @@ int flow_set_add(struct flow_set * set, ret = shm_flow_set_add(ai.fqset, set->idx, ai.flows[fd].port_id); + sdus = shm_rbuff_queued(ai.flows[fd].rx_rb); + for (i = 0; i < sdus; i++) + shm_flow_set_notify(ai.fqset, ai.flows[fd].port_id); + pthread_rwlock_unlock(&ai.flows_lock); pthread_rwlock_unlock(&ai.data_lock); diff --git a/src/lib/shm_rbuff.c b/src/lib/shm_rbuff.c index a206a019..b8db7c19 100644 --- a/src/lib/shm_rbuff.c +++ b/src/lib/shm_rbuff.c @@ -384,3 +384,23 @@ void shm_rbuff_fini(struct shm_rbuff * rb) #endif pthread_cleanup_pop(true); } + +size_t shm_rbuff_queued(struct shm_rbuff * rb) +{ + size_t ret; + + assert(rb); + +#ifdef __APPLE__ + pthread_mutex_lock(rb->lock); +#else + if (pthread_mutex_lock(rb->lock) == EOWNERDEAD) + pthread_mutex_consistent(rb->lock); +#endif + + ret = shm_rbuff_used(rb); + + pthread_mutex_unlock(rb->lock); + + return ret; +} |