summaryrefslogtreecommitdiff
path: root/src/lib/shm_rbuff_pthr.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/lib/shm_rbuff_pthr.c')
-rw-r--r--src/lib/shm_rbuff_pthr.c84
1 files changed, 49 insertions, 35 deletions
diff --git a/src/lib/shm_rbuff_pthr.c b/src/lib/shm_rbuff_pthr.c
index 00ffd583..b543fb07 100644
--- a/src/lib/shm_rbuff_pthr.c
+++ b/src/lib/shm_rbuff_pthr.c
@@ -1,10 +1,10 @@
/*
- * Ouroboros - Copyright (C) 2016 - 2020
+ * Ouroboros - Copyright (C) 2016 - 2024
*
* Ring buffer for incoming packets
*
- * Dimitri Staessens <dimitri.staessens@ugent.be>
- * Sander Vrijders <sander.vrijders@ugent.be>
+ * Dimitri Staessens <dimitri@ouroboros.rocks>
+ * Sander Vrijders <sander@ouroboros.rocks>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public License
@@ -24,12 +24,15 @@ void shm_rbuff_destroy(struct shm_rbuff * rb)
{
char fn[FN_MAX_CHARS];
- assert(rb);
+ assert(rb != NULL);
#ifdef CONFIG_OUROBOROS_DEBUG
pthread_mutex_lock(rb->lock);
- assert(shm_rbuff_empty(rb));
+ *rb->acl = *rb->acl & ACL_FLOWDOWN;
+
+ pthread_cond_broadcast(rb->del);
+ pthread_cond_broadcast(rb->add);
pthread_mutex_unlock(rb->lock);
#endif
@@ -45,7 +48,7 @@ int shm_rbuff_write(struct shm_rbuff * rb,
{
int ret = 0;
- assert(rb);
+ assert(rb != NULL);
assert(idx < SHM_BUFFER_SIZE);
#ifndef HAVE_ROBUST_MUTEX
@@ -88,7 +91,7 @@ int shm_rbuff_write_b(struct shm_rbuff * rb,
{
int ret = 0;
- assert(rb);
+ assert(rb != NULL);
assert(idx < SHM_BUFFER_SIZE);
#ifndef HAVE_ROBUST_MUTEX
@@ -106,16 +109,12 @@ int shm_rbuff_write_b(struct shm_rbuff * rb,
goto err;
}
- pthread_cleanup_push((void(*)(void *))pthread_mutex_unlock,
- (void *) rb->lock);
+ pthread_cleanup_push(__cleanup_mutex_unlock, rb->lock);
- while (!shm_rbuff_free(rb) && ret != -ETIMEDOUT) {
- if (abstime != NULL)
- ret = -pthread_cond_timedwait(rb->del,
- rb->lock,
- abstime);
- else
- ret = -pthread_cond_wait(rb->del, rb->lock);
+ while (!shm_rbuff_free(rb)
+ && ret != -ETIMEDOUT
+ && !(*rb->acl & ACL_FLOWDOWN)) {
+ ret = -__timedwait(rb->del, rb->lock, abstime);
#ifdef HAVE_ROBUST_MUTEX
if (ret == -EOWNERDEAD)
pthread_mutex_consistent(rb->lock);
@@ -137,11 +136,24 @@ int shm_rbuff_write_b(struct shm_rbuff * rb,
return ret;
}
+static int check_rb_acl(struct shm_rbuff * rb)
+{
+ assert(rb != NULL);
+
+ if (*rb->acl & ACL_FLOWDOWN)
+ return -EFLOWDOWN;
+
+ if (*rb->acl & ACL_FLOWPEER)
+ return -EFLOWPEER;
+
+ return -EAGAIN;
+}
+
ssize_t shm_rbuff_read(struct shm_rbuff * rb)
{
ssize_t ret = 0;
- assert(rb);
+ assert(rb != NULL);
#ifndef HAVE_ROBUST_MUTEX
pthread_mutex_lock(rb->lock);
@@ -151,7 +163,7 @@ ssize_t shm_rbuff_read(struct shm_rbuff * rb)
#endif
if (shm_rbuff_empty(rb)) {
- ret = *rb->acl & ACL_FLOWDOWN ? -EFLOWDOWN : -EAGAIN;
+ ret = check_rb_acl(rb);
pthread_mutex_unlock(rb->lock);
return ret;
}
@@ -170,7 +182,7 @@ ssize_t shm_rbuff_read_b(struct shm_rbuff * rb,
{
ssize_t idx = -1;
- assert(rb);
+ assert(rb != NULL);
#ifndef HAVE_ROBUST_MUTEX
pthread_mutex_lock(rb->lock);
@@ -184,37 +196,37 @@ ssize_t shm_rbuff_read_b(struct shm_rbuff * rb,
return -EFLOWDOWN;
}
- pthread_cleanup_push((void(*)(void *))pthread_mutex_unlock,
- (void *) rb->lock);
+ pthread_cleanup_push(__cleanup_mutex_unlock, rb->lock);
- while (shm_rbuff_empty(rb) && (idx != -ETIMEDOUT)) {
- if (abstime != NULL)
- idx = -pthread_cond_timedwait(rb->add,
- rb->lock,
- abstime);
- else
- idx = -pthread_cond_wait(rb->add, rb->lock);
+ while (shm_rbuff_empty(rb) &&
+ idx != -ETIMEDOUT &&
+ check_rb_acl(rb) == -EAGAIN) {
+ idx = -__timedwait(rb->add, rb->lock, abstime);
#ifdef HAVE_ROBUST_MUTEX
if (idx == -EOWNERDEAD)
pthread_mutex_consistent(rb->lock);
#endif
}
- if (idx != -ETIMEDOUT) {
+ if (!shm_rbuff_empty(rb)) {
idx = *tail_el_ptr(rb);
*rb->tail = (*rb->tail + 1) & ((SHM_RBUFF_SIZE) - 1);
pthread_cond_broadcast(rb->del);
+ } else if (idx != -ETIMEDOUT) {
+ idx = check_rb_acl(rb);
}
pthread_cleanup_pop(true);
+ assert(idx != -EAGAIN);
+
return idx;
}
void shm_rbuff_set_acl(struct shm_rbuff * rb,
uint32_t flags)
{
- assert(rb);
+ assert(rb != NULL);
#ifndef HAVE_ROBUST_MUTEX
pthread_mutex_lock(rb->lock);
@@ -224,6 +236,9 @@ void shm_rbuff_set_acl(struct shm_rbuff * rb,
#endif
*rb->acl = (size_t) flags;
+ pthread_cond_broadcast(rb->del);
+ pthread_cond_broadcast(rb->add);
+
pthread_mutex_unlock(rb->lock);
}
@@ -231,7 +246,7 @@ uint32_t shm_rbuff_get_acl(struct shm_rbuff * rb)
{
uint32_t flags;
- assert(rb);
+ assert(rb != NULL);
#ifndef HAVE_ROBUST_MUTEX
pthread_mutex_lock(rb->lock);
@@ -248,7 +263,7 @@ uint32_t shm_rbuff_get_acl(struct shm_rbuff * rb)
void shm_rbuff_fini(struct shm_rbuff * rb)
{
- assert(rb);
+ assert(rb != NULL);
#ifndef HAVE_ROBUST_MUTEX
pthread_mutex_lock(rb->lock);
@@ -256,8 +271,7 @@ void shm_rbuff_fini(struct shm_rbuff * rb)
if (pthread_mutex_lock(rb->lock) == EOWNERDEAD)
pthread_mutex_consistent(rb->lock);
#endif
- pthread_cleanup_push((void(*)(void *))pthread_mutex_unlock,
- (void *) rb->lock);
+ pthread_cleanup_push(__cleanup_mutex_unlock, rb->lock);
while (!shm_rbuff_empty(rb))
#ifndef HAVE_ROBUST_MUTEX
@@ -273,7 +287,7 @@ size_t shm_rbuff_queued(struct shm_rbuff * rb)
{
size_t ret;
- assert(rb);
+ assert(rb != NULL);
#ifndef HAVE_ROBUST_MUTEX
pthread_mutex_lock(rb->lock);