summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--cmake/lib/lib.cmake2
-rw-r--r--src/lib/config.h.in1
-rw-r--r--src/lib/shm_rbuff.c281
-rw-r--r--src/lib/shm_rbuff_ll.c249
-rw-r--r--src/lib/shm_rbuff_pthr.c304
5 files changed, 277 insertions, 560 deletions
diff --git a/cmake/lib/lib.cmake b/cmake/lib/lib.cmake
index bf79da44..674d8503 100644
--- a/cmake/lib/lib.cmake
+++ b/cmake/lib/lib.cmake
@@ -43,8 +43,6 @@ set(SHM_RDRB_BLOCK_SIZE "sysconf(_SC_PAGESIZE)" CACHE STRING
"Packet buffer block size, multiple of pagesize for performance")
set(SHM_RDRB_MULTI_BLOCK TRUE CACHE BOOL
"Packet buffer multiblock packet support")
-set(SHM_RBUFF_LOCKLESS FALSE CACHE BOOL
- "Enable shared memory lockless rbuff support")
set(QOS_DISABLE_CRC TRUE CACHE BOOL
"Ignores ber setting on all QoS cubes")
set(DELTA_T_MPL 60 CACHE STRING
diff --git a/src/lib/config.h.in b/src/lib/config.h.in
index 8326a332..4533b00e 100644
--- a/src/lib/config.h.in
+++ b/src/lib/config.h.in
@@ -30,7 +30,6 @@
#define SYS_MAX_FLOWS @SYS_MAX_FLOWS@
-#cmakedefine SHM_RBUFF_LOCKLESS
#cmakedefine SHM_RDRB_MULTI_BLOCK
#cmakedefine QOS_DISABLE_CRC
#cmakedefine HAVE_OPENSSL_RNG
diff --git a/src/lib/shm_rbuff.c b/src/lib/shm_rbuff.c
index 22cff41c..bee52b4e 100644
--- a/src/lib/shm_rbuff.c
+++ b/src/lib/shm_rbuff.c
@@ -194,6 +194,29 @@ struct shm_rbuff * shm_rbuff_create(pid_t pid,
return NULL;
}
+void shm_rbuff_destroy(struct shm_rbuff * rb)
+{
+ char fn[FN_MAX_CHARS];
+
+ assert(rb != NULL);
+
+#ifdef CONFIG_OUROBOROS_DEBUG
+ pthread_mutex_lock(rb->lock);
+
+ *rb->acl = *rb->acl & ACL_FLOWDOWN;
+
+ pthread_cond_broadcast(rb->add);
+ pthread_cond_broadcast(rb->del);
+
+ pthread_mutex_unlock(rb->lock);
+#endif
+ sprintf(fn, SHM_RBUFF_PREFIX "%d.%d", rb->pid, rb->flow_id);
+
+ shm_rbuff_close(rb);
+
+ shm_unlink(fn);
+}
+
struct shm_rbuff * shm_rbuff_open(pid_t pid,
int flow_id)
{
@@ -207,9 +230,259 @@ void shm_rbuff_close(struct shm_rbuff * rb)
rbuff_destroy(rb);
}
-#if (defined(SHM_RBUFF_LOCKLESS) && \
- (defined(__GNUC__) || defined (__clang__)))
-#include "shm_rbuff_ll.c"
+int shm_rbuff_write(struct shm_rbuff * rb,
+ size_t idx)
+{
+ int ret = 0;
+
+ assert(rb != NULL);
+ assert(idx < SHM_BUFFER_SIZE);
+
+#ifndef HAVE_ROBUST_MUTEX
+ pthread_mutex_lock(rb->lock);
+#else
+ if (pthread_mutex_lock(rb->lock) == EOWNERDEAD)
+ pthread_mutex_consistent(rb->lock);
+#endif
+
+ if (*rb->acl != ACL_RDWR) {
+ if (*rb->acl & ACL_FLOWDOWN)
+ ret = -EFLOWDOWN;
+ else if (*rb->acl & ACL_RDONLY)
+ ret = -ENOTALLOC;
+ goto err;
+ }
+
+ if (!shm_rbuff_free(rb)) {
+ ret = -EAGAIN;
+ goto err;
+ }
+
+ if (shm_rbuff_empty(rb))
+ pthread_cond_broadcast(rb->add);
+
+ *head_el_ptr(rb) = (ssize_t) idx;
+ *rb->head = (*rb->head + 1) & ((SHM_RBUFF_SIZE) - 1);
+
+ pthread_mutex_unlock(rb->lock);
+
+ return 0;
+ err:
+ pthread_mutex_unlock(rb->lock);
+ return ret;
+}
+
+int shm_rbuff_write_b(struct shm_rbuff * rb,
+ size_t idx,
+ const struct timespec * abstime)
+{
+ int ret = 0;
+
+ assert(rb != NULL);
+ assert(idx < SHM_BUFFER_SIZE);
+
+#ifndef HAVE_ROBUST_MUTEX
+ pthread_mutex_lock(rb->lock);
+#else
+ if (pthread_mutex_lock(rb->lock) == EOWNERDEAD)
+ pthread_mutex_consistent(rb->lock);
+#endif
+
+ if (*rb->acl != ACL_RDWR) {
+ if (*rb->acl & ACL_FLOWDOWN)
+ ret = -EFLOWDOWN;
+ else if (*rb->acl & ACL_RDONLY)
+ ret = -ENOTALLOC;
+ goto err;
+ }
+
+ pthread_cleanup_push(__cleanup_mutex_unlock, rb->lock);
+
+ while (!shm_rbuff_free(rb)
+ && ret != -ETIMEDOUT
+ && !(*rb->acl & ACL_FLOWDOWN)) {
+ ret = -__timedwait(rb->del, rb->lock, abstime);
+#ifdef HAVE_ROBUST_MUTEX
+ if (ret == -EOWNERDEAD)
+ pthread_mutex_consistent(rb->lock);
+#endif
+ }
+
+ if (ret != -ETIMEDOUT) {
+ if (shm_rbuff_empty(rb))
+ pthread_cond_broadcast(rb->add);
+ *head_el_ptr(rb) = (ssize_t) idx;
+ *rb->head = (*rb->head + 1) & ((SHM_RBUFF_SIZE) - 1);
+ }
+
+ pthread_cleanup_pop(true);
+
+ return ret;
+ err:
+ pthread_mutex_unlock(rb->lock);
+ return ret;
+}
+
+static int check_rb_acl(struct shm_rbuff * rb)
+{
+ assert(rb != NULL);
+
+ if (*rb->acl & ACL_FLOWDOWN)
+ return -EFLOWDOWN;
+
+ if (*rb->acl & ACL_FLOWPEER)
+ return -EFLOWPEER;
+
+ return -EAGAIN;
+}
+
+ssize_t shm_rbuff_read(struct shm_rbuff * rb)
+{
+ ssize_t ret = 0;
+
+ assert(rb != NULL);
+
+#ifndef HAVE_ROBUST_MUTEX
+ pthread_mutex_lock(rb->lock);
+#else
+ if (pthread_mutex_lock(rb->lock) == EOWNERDEAD)
+ pthread_mutex_consistent(rb->lock);
+#endif
+
+ if (shm_rbuff_empty(rb)) {
+ ret = check_rb_acl(rb);
+ pthread_mutex_unlock(rb->lock);
+ return ret;
+ }
+
+ ret = *tail_el_ptr(rb);
+ *rb->tail = (*rb->tail + 1) & ((SHM_RBUFF_SIZE) - 1);
+ pthread_cond_broadcast(rb->del);
+
+ pthread_mutex_unlock(rb->lock);
+
+ return ret;
+}
+
+ssize_t shm_rbuff_read_b(struct shm_rbuff * rb,
+ const struct timespec * abstime)
+{
+ ssize_t idx = -1;
+
+ assert(rb != NULL);
+
+#ifndef HAVE_ROBUST_MUTEX
+ pthread_mutex_lock(rb->lock);
+#else
+ if (pthread_mutex_lock(rb->lock) == EOWNERDEAD)
+ pthread_mutex_consistent(rb->lock);
+#endif
+
+ if (shm_rbuff_empty(rb) && (*rb->acl & ACL_FLOWDOWN)) {
+ pthread_mutex_unlock(rb->lock);
+ return -EFLOWDOWN;
+ }
+
+ pthread_cleanup_push(__cleanup_mutex_unlock, rb->lock);
+
+ while (shm_rbuff_empty(rb) &&
+ idx != -ETIMEDOUT &&
+ check_rb_acl(rb) == -EAGAIN) {
+ idx = -__timedwait(rb->add, rb->lock, abstime);
+#ifdef HAVE_ROBUST_MUTEX
+ if (idx == -EOWNERDEAD)
+ pthread_mutex_consistent(rb->lock);
+#endif
+ }
+
+ if (!shm_rbuff_empty(rb)) {
+ idx = *tail_el_ptr(rb);
+ *rb->tail = (*rb->tail + 1) & ((SHM_RBUFF_SIZE) - 1);
+ pthread_cond_broadcast(rb->del);
+ } else if (idx != -ETIMEDOUT) {
+ idx = check_rb_acl(rb);
+ }
+
+ pthread_cleanup_pop(true);
+
+ assert(idx != -EAGAIN);
+
+ return idx;
+}
+
+void shm_rbuff_set_acl(struct shm_rbuff * rb,
+ uint32_t flags)
+{
+ assert(rb != NULL);
+
+#ifndef HAVE_ROBUST_MUTEX
+ pthread_mutex_lock(rb->lock);
+#else
+ if (pthread_mutex_lock(rb->lock) == EOWNERDEAD)
+ pthread_mutex_consistent(rb->lock);
+#endif
+ *rb->acl = (size_t) flags;
+
+ pthread_mutex_unlock(rb->lock);
+}
+
+uint32_t shm_rbuff_get_acl(struct shm_rbuff * rb)
+{
+ uint32_t flags;
+
+ assert(rb != NULL);
+
+#ifndef HAVE_ROBUST_MUTEX
+ pthread_mutex_lock(rb->lock);
+#else
+ if (pthread_mutex_lock(rb->lock) == EOWNERDEAD)
+ pthread_mutex_consistent(rb->lock);
+#endif
+ flags = (uint32_t) *rb->acl;
+
+ pthread_mutex_unlock(rb->lock);
+
+ return flags;
+}
+
+void shm_rbuff_fini(struct shm_rbuff * rb)
+{
+ assert(rb != NULL);
+
+#ifndef HAVE_ROBUST_MUTEX
+ pthread_mutex_lock(rb->lock);
+#else
+ if (pthread_mutex_lock(rb->lock) == EOWNERDEAD)
+ pthread_mutex_consistent(rb->lock);
+#endif
+ pthread_cleanup_push(__cleanup_mutex_unlock, rb->lock);
+
+ while (!shm_rbuff_empty(rb))
+#ifndef HAVE_ROBUST_MUTEX
+ pthread_cond_wait(rb->add, rb->lock);
+#else
+ if (pthread_cond_wait(rb->add, rb->lock) == EOWNERDEAD)
+ pthread_mutex_consistent(rb->lock);
+#endif
+ pthread_cleanup_pop(true);
+}
+
+size_t shm_rbuff_queued(struct shm_rbuff * rb)
+{
+ size_t ret;
+
+ assert(rb != NULL);
+
+#ifndef HAVE_ROBUST_MUTEX
+ pthread_mutex_lock(rb->lock);
#else
-#include "shm_rbuff_pthr.c"
+ if (pthread_mutex_lock(rb->lock) == EOWNERDEAD)
+ pthread_mutex_consistent(rb->lock);
#endif
+
+ ret = shm_rbuff_used(rb);
+
+ pthread_mutex_unlock(rb->lock);
+
+ return ret;
+}
diff --git a/src/lib/shm_rbuff_ll.c b/src/lib/shm_rbuff_ll.c
deleted file mode 100644
index 46a5314e..00000000
--- a/src/lib/shm_rbuff_ll.c
+++ /dev/null
@@ -1,249 +0,0 @@
-/*
- * Ouroboros - Copyright (C) 2016 - 2024
- *
- * Lockless ring buffer for incoming packets
- *
- * Dimitri Staessens <dimitri@ouroboros.rocks>
- * Sander Vrijders <sander@ouroboros.rocks>
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public License
- * version 2.1 as published by the Free Software Foundation.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., http://www.fsf.org/about/contact/.
- */
-
-#define RB_HEAD __sync_fetch_and_add(rb->head, 0)
-#define RB_TAIL __sync_fetch_and_add(rb->tail, 0)
-
-void shm_rbuff_destroy(struct shm_rbuff * rb)
-{
- char fn[FN_MAX_CHARS];
-
- assert(rb);
-
- sprintf(fn, SHM_RBUFF_PREFIX "%d.%d", rb->pid, rb->flow_id);
-
- __sync_bool_compare_and_swap(rb->acl, *rb->acl, ACL_FLOWDOWN);
-
- pthread_cond_broadcast(rb->del);
- pthread_cond_broadcast(rb->add);
-
- shm_rbuff_close(rb);
-
- shm_unlink(fn);
-}
-
-int shm_rbuff_write(struct shm_rbuff * rb,
- size_t idx)
-{
- size_t ohead;
- size_t nhead;
- bool was_empty = false;
-
- assert(rb);
- assert(idx < SHM_BUFFER_SIZE);
-
- if (__sync_fetch_and_add(rb->acl, 0) != ACL_RDWR) {
- if (__sync_fetch_and_add(rb->acl, 0) & ACL_FLOWDOWN)
- return -EFLOWDOWN;
- else if (__sync_fetch_and_add(rb->acl, 0) & ACL_RDONLY)
- return -ENOTALLOC;
- }
-
- if (!shm_rbuff_free(rb))
- return -EAGAIN;
-
- if (shm_rbuff_empty(rb))
- was_empty = true;
-
- nhead = RB_HEAD;
-
- *(rb->shm_base + nhead) = (ssize_t) idx;
-
- do {
- ohead = nhead;
- nhead = (ohead + 1) & ((SHM_RBUFF_SIZE) - 1);
- nhead = __sync_val_compare_and_swap(rb->head, ohead, nhead);
- } while (nhead != ohead);
-
- if (was_empty)
- pthread_cond_broadcast(rb->add);
-
- return 0;
-}
-
-/* FIXME: this is a copy of the pthr implementation */
-int shm_rbuff_write_b(struct shm_rbuff * rb,
- size_t idx,
- const struct timespec * abstime)
-{
- int ret = 0;
-
- assert(rb);
- assert(idx < SHM_BUFFER_SIZE);
-
-#ifndef HAVE_ROBUST_MUTEX
- pthread_mutex_lock(rb->lock);
-#else
- if (pthread_mutex_lock(rb->lock) == EOWNERDEAD)
- pthread_mutex_consistent(rb->lock);
-#endif
-
- if (*rb->acl != ACL_RDWR) {
- if (*rb->acl & ACL_FLOWDOWN)
- ret = -EFLOWDOWN;
- else if (*rb->acl & ACL_RDONLY)
- ret = -ENOTALLOC;
- goto err;
- }
-
- pthread_cleanup_push(__cleanup_mutex_unlock, rb->lock);
-
- while (!shm_rbuff_free(rb) && ret != -ETIMEDOUT) {
- ret = -__timedwait(rb->add, rb->lock, abstime);
-#ifdef HAVE_ROBUST_MUTEX
- if (ret == -EOWNERDEAD)
- pthread_mutex_consistent(rb->lock);
-#endif
- }
-
- if (shm_rbuff_empty(rb))
- pthread_cond_broadcast(rb->add);
-
- if (ret != -ETIMEDOUT) {
- *head_el_ptr(rb) = (ssize_t) idx;
- *rb->head = (*rb->head + 1) & ((SHM_RBUFF_SIZE) -1);
- }
-
- pthread_cleanup_pop(true);
-
- return ret;
- err:
- pthread_mutex_unlock(rb->lock);
- return ret;
-}
-
-ssize_t shm_rbuff_read(struct shm_rbuff * rb)
-{
- size_t otail;
- size_t ntail;
-
- assert(rb);
-
- if (shm_rbuff_empty(rb)) {
- if (_sync_fetch_and_add(rb->acl, 0) & ACL_FLOWDOWN)
- return -EFLOWDOWN;
-
- if (_sync_fetch_and_add(rb->acl, 0) & ACL_FLOWPEER)
- return -EFLOWPEER;
-
- return -EAGAIN;
- }
-
- ntail = RB_TAIL;
-
- do {
- otail = ntail;
- ntail = (otail + 1) & ((SHM_RBUFF_SIZE) - 1);
- ntail = __sync_val_compare_and_swap(rb->tail, otail, ntail);
- } while (ntail != otail);
-
- pthread_cond_broadcast(rb->del);
-
- return *(rb->shm_base + ntail);
-}
-
-ssize_t shm_rbuff_read_b(struct shm_rbuff * rb,
- const struct timespec * abstime)
-{
- ssize_t idx = -1;
-
- assert(rb);
-
- /* try a non-blocking read first */
- idx = shm_rbuff_read(rb);
- if (idx != -EAGAIN)
- return idx;
-
-#ifndef HAVE_ROBUST_MUTEX
- pthread_mutex_lock(rb->lock);
-#else
- if (pthread_mutex_lock(rb->lock) == EOWNERDEAD)
- pthread_mutex_consistent(rb->lock);
-#endif
- pthread_cleanup_push(__cleanup_mutex_unlock, rb->lock);
-
- while (shm_rbuff_empty(rb) && (idx != -ETIMEDOUT)) {
- idx = -__timedwait(rb->add, rb->lock, abstime);
-#ifdef HAVE_ROBUST_MUTEX
- if (idx == -EOWNERDEAD)
- pthread_mutex_consistent(rb->lock);
-#endif
- }
-
- if (idx != -ETIMEDOUT) {
- /* do a nonblocking read */
- idx = shm_rbuff_read(rb);
- assert(idx >= 0);
- }
-
- pthread_cleanup_pop(true);
-
- return idx;
-}
-
-void shm_rbuff_set_acl(struct shm_rbuff * rb,
- uint32_t flags)
-{
- assert(rb);
-
- __sync_bool_compare_and_swap(rb->acl, *rb->acl, flags);
-}
-
-uint32_t shm_rbuff_get_acl(struct shm_rbuff * rb)
-{
- assert(rb);
-
- return __sync_fetch_and_add(rb->acl, 0);
-}
-
-void shm_rbuff_fini(struct shm_rbuff * rb)
-{
- assert(rb);
-
- if (shm_rbuff_empty(rb))
- return;
-
-#ifndef HAVE_ROBUST_MUTEX
- pthread_mutex_lock(rb->lock);
-#else
- if (pthread_mutex_lock(rb->lock) == EOWNERDEAD)
- pthread_mutex_consistent(rb->lock);
-#endif
-
- pthread_cleanup_push(__cleanup_mutex_unlock, rb->lock);
-
- while (!shm_rbuff_empty(rb))
-#ifndef HAVE_ROBUST_MUTEX
- pthread_cond_wait(rb->del, rb->lock);
-#else
- if (pthread_cond_wait(rb->del, rb->lock) == EOWNERDEAD)
- pthread_mutex_consistent(rb->lock);
-#endif
- pthread_cleanup_pop(true);
-}
-
-size_t shm_rbuff_queued(struct shm_rbuff * rb)
-{
- assert(rb);
-
- return shm_rbuff_used(rb);
-}
diff --git a/src/lib/shm_rbuff_pthr.c b/src/lib/shm_rbuff_pthr.c
deleted file mode 100644
index b543fb07..00000000
--- a/src/lib/shm_rbuff_pthr.c
+++ /dev/null
@@ -1,304 +0,0 @@
-/*
- * Ouroboros - Copyright (C) 2016 - 2024
- *
- * Ring buffer for incoming packets
- *
- * Dimitri Staessens <dimitri@ouroboros.rocks>
- * Sander Vrijders <sander@ouroboros.rocks>
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public License
- * version 2.1 as published by the Free Software Foundation.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., http://www.fsf.org/about/contact/.
- */
-
-void shm_rbuff_destroy(struct shm_rbuff * rb)
-{
- char fn[FN_MAX_CHARS];
-
- assert(rb != NULL);
-
-#ifdef CONFIG_OUROBOROS_DEBUG
- pthread_mutex_lock(rb->lock);
-
- *rb->acl = *rb->acl & ACL_FLOWDOWN;
-
- pthread_cond_broadcast(rb->del);
- pthread_cond_broadcast(rb->add);
-
- pthread_mutex_unlock(rb->lock);
-#endif
- sprintf(fn, SHM_RBUFF_PREFIX "%d.%d", rb->pid, rb->flow_id);
-
- shm_rbuff_close(rb);
-
- shm_unlink(fn);
-}
-
-int shm_rbuff_write(struct shm_rbuff * rb,
- size_t idx)
-{
- int ret = 0;
-
- assert(rb != NULL);
- assert(idx < SHM_BUFFER_SIZE);
-
-#ifndef HAVE_ROBUST_MUTEX
- pthread_mutex_lock(rb->lock);
-#else
- if (pthread_mutex_lock(rb->lock) == EOWNERDEAD)
- pthread_mutex_consistent(rb->lock);
-#endif
-
- if (*rb->acl != ACL_RDWR) {
- if (*rb->acl & ACL_FLOWDOWN)
- ret = -EFLOWDOWN;
- else if (*rb->acl & ACL_RDONLY)
- ret = -ENOTALLOC;
- goto err;
- }
-
- if (!shm_rbuff_free(rb)) {
- ret = -EAGAIN;
- goto err;
- }
-
- if (shm_rbuff_empty(rb))
- pthread_cond_broadcast(rb->add);
-
- *head_el_ptr(rb) = (ssize_t) idx;
- *rb->head = (*rb->head + 1) & ((SHM_RBUFF_SIZE) - 1);
-
- pthread_mutex_unlock(rb->lock);
-
- return 0;
- err:
- pthread_mutex_unlock(rb->lock);
- return ret;
-}
-
-int shm_rbuff_write_b(struct shm_rbuff * rb,
- size_t idx,
- const struct timespec * abstime)
-{
- int ret = 0;
-
- assert(rb != NULL);
- assert(idx < SHM_BUFFER_SIZE);
-
-#ifndef HAVE_ROBUST_MUTEX
- pthread_mutex_lock(rb->lock);
-#else
- if (pthread_mutex_lock(rb->lock) == EOWNERDEAD)
- pthread_mutex_consistent(rb->lock);
-#endif
-
- if (*rb->acl != ACL_RDWR) {
- if (*rb->acl & ACL_FLOWDOWN)
- ret = -EFLOWDOWN;
- else if (*rb->acl & ACL_RDONLY)
- ret = -ENOTALLOC;
- goto err;
- }
-
- pthread_cleanup_push(__cleanup_mutex_unlock, rb->lock);
-
- while (!shm_rbuff_free(rb)
- && ret != -ETIMEDOUT
- && !(*rb->acl & ACL_FLOWDOWN)) {
- ret = -__timedwait(rb->del, rb->lock, abstime);
-#ifdef HAVE_ROBUST_MUTEX
- if (ret == -EOWNERDEAD)
- pthread_mutex_consistent(rb->lock);
-#endif
- }
-
- if (ret != -ETIMEDOUT) {
- if (shm_rbuff_empty(rb))
- pthread_cond_broadcast(rb->add);
- *head_el_ptr(rb) = (ssize_t) idx;
- *rb->head = (*rb->head + 1) & ((SHM_RBUFF_SIZE) - 1);
- }
-
- pthread_cleanup_pop(true);
-
- return ret;
- err:
- pthread_mutex_unlock(rb->lock);
- return ret;
-}
-
-static int check_rb_acl(struct shm_rbuff * rb)
-{
- assert(rb != NULL);
-
- if (*rb->acl & ACL_FLOWDOWN)
- return -EFLOWDOWN;
-
- if (*rb->acl & ACL_FLOWPEER)
- return -EFLOWPEER;
-
- return -EAGAIN;
-}
-
-ssize_t shm_rbuff_read(struct shm_rbuff * rb)
-{
- ssize_t ret = 0;
-
- assert(rb != NULL);
-
-#ifndef HAVE_ROBUST_MUTEX
- pthread_mutex_lock(rb->lock);
-#else
- if (pthread_mutex_lock(rb->lock) == EOWNERDEAD)
- pthread_mutex_consistent(rb->lock);
-#endif
-
- if (shm_rbuff_empty(rb)) {
- ret = check_rb_acl(rb);
- pthread_mutex_unlock(rb->lock);
- return ret;
- }
-
- ret = *tail_el_ptr(rb);
- *rb->tail = (*rb->tail + 1) & ((SHM_RBUFF_SIZE) - 1);
- pthread_cond_broadcast(rb->del);
-
- pthread_mutex_unlock(rb->lock);
-
- return ret;
-}
-
-ssize_t shm_rbuff_read_b(struct shm_rbuff * rb,
- const struct timespec * abstime)
-{
- ssize_t idx = -1;
-
- assert(rb != NULL);
-
-#ifndef HAVE_ROBUST_MUTEX
- pthread_mutex_lock(rb->lock);
-#else
- if (pthread_mutex_lock(rb->lock) == EOWNERDEAD)
- pthread_mutex_consistent(rb->lock);
-#endif
-
- if (shm_rbuff_empty(rb) && (*rb->acl & ACL_FLOWDOWN)) {
- pthread_mutex_unlock(rb->lock);
- return -EFLOWDOWN;
- }
-
- pthread_cleanup_push(__cleanup_mutex_unlock, rb->lock);
-
- while (shm_rbuff_empty(rb) &&
- idx != -ETIMEDOUT &&
- check_rb_acl(rb) == -EAGAIN) {
- idx = -__timedwait(rb->add, rb->lock, abstime);
-#ifdef HAVE_ROBUST_MUTEX
- if (idx == -EOWNERDEAD)
- pthread_mutex_consistent(rb->lock);
-#endif
- }
-
- if (!shm_rbuff_empty(rb)) {
- idx = *tail_el_ptr(rb);
- *rb->tail = (*rb->tail + 1) & ((SHM_RBUFF_SIZE) - 1);
- pthread_cond_broadcast(rb->del);
- } else if (idx != -ETIMEDOUT) {
- idx = check_rb_acl(rb);
- }
-
- pthread_cleanup_pop(true);
-
- assert(idx != -EAGAIN);
-
- return idx;
-}
-
-void shm_rbuff_set_acl(struct shm_rbuff * rb,
- uint32_t flags)
-{
- assert(rb != NULL);
-
-#ifndef HAVE_ROBUST_MUTEX
- pthread_mutex_lock(rb->lock);
-#else
- if (pthread_mutex_lock(rb->lock) == EOWNERDEAD)
- pthread_mutex_consistent(rb->lock);
-#endif
- *rb->acl = (size_t) flags;
-
- pthread_cond_broadcast(rb->del);
- pthread_cond_broadcast(rb->add);
-
- pthread_mutex_unlock(rb->lock);
-}
-
-uint32_t shm_rbuff_get_acl(struct shm_rbuff * rb)
-{
- uint32_t flags;
-
- assert(rb != NULL);
-
-#ifndef HAVE_ROBUST_MUTEX
- pthread_mutex_lock(rb->lock);
-#else
- if (pthread_mutex_lock(rb->lock) == EOWNERDEAD)
- pthread_mutex_consistent(rb->lock);
-#endif
- flags = (uint32_t) *rb->acl;
-
- pthread_mutex_unlock(rb->lock);
-
- return flags;
-}
-
-void shm_rbuff_fini(struct shm_rbuff * rb)
-{
- assert(rb != NULL);
-
-#ifndef HAVE_ROBUST_MUTEX
- pthread_mutex_lock(rb->lock);
-#else
- if (pthread_mutex_lock(rb->lock) == EOWNERDEAD)
- pthread_mutex_consistent(rb->lock);
-#endif
- pthread_cleanup_push(__cleanup_mutex_unlock, rb->lock);
-
- while (!shm_rbuff_empty(rb))
-#ifndef HAVE_ROBUST_MUTEX
- pthread_cond_wait(rb->del, rb->lock);
-#else
- if (pthread_cond_wait(rb->del, rb->lock) == EOWNERDEAD)
- pthread_mutex_consistent(rb->lock);
-#endif
- pthread_cleanup_pop(true);
-}
-
-size_t shm_rbuff_queued(struct shm_rbuff * rb)
-{
- size_t ret;
-
- assert(rb != NULL);
-
-#ifndef HAVE_ROBUST_MUTEX
- pthread_mutex_lock(rb->lock);
-#else
- if (pthread_mutex_lock(rb->lock) == EOWNERDEAD)
- pthread_mutex_consistent(rb->lock);
-#endif
-
- ret = shm_rbuff_used(rb);
-
- pthread_mutex_unlock(rb->lock);
-
- return ret;
-}