summaryrefslogtreecommitdiff
path: root/src/lib/shm_ap_rbuff.c
diff options
context:
space:
mode:
authordimitri staessens <dimitri.staessens@intec.ugent.be>2016-08-30 14:29:08 +0200
committerdimitri staessens <dimitri.staessens@intec.ugent.be>2016-08-30 21:44:34 +0200
commit04eb03136466a18d81511e7ccadf51c08faa8edb (patch)
tree721c6c6a7b394dc758ffa12bb3a1261d9817207f /src/lib/shm_ap_rbuff.c
parent2cc89f6da424ab503af563e0cc92dda43b8f8432 (diff)
downloadouroboros-04eb03136466a18d81511e7ccadf51c08faa8edb.tar.gz
ouroboros-04eb03136466a18d81511e7ccadf51c08faa8edb.zip
lib, ipcp: Compile on Apple junk
Disables robust mutexes and clock attributes for condition variables for compatibility with OSX (SUSv2). Implements clock_gettime and adds some defines for OSX compatibility in time_utils.
Diffstat (limited to 'src/lib/shm_ap_rbuff.c')
-rw-r--r--src/lib/shm_ap_rbuff.c59
1 files changed, 41 insertions, 18 deletions
diff --git a/src/lib/shm_ap_rbuff.c b/src/lib/shm_ap_rbuff.c
index f21b1e86..77e288a8 100644
--- a/src/lib/shm_ap_rbuff.c
+++ b/src/lib/shm_ap_rbuff.c
@@ -131,13 +131,17 @@ struct shm_ap_rbuff * shm_ap_rbuff_create()
rb->del = rb->add + 1;
pthread_mutexattr_init(&mattr);
+#ifndef __APPLE__
pthread_mutexattr_setrobust(&mattr, PTHREAD_MUTEX_ROBUST);
+#endif
pthread_mutexattr_setpshared(&mattr, PTHREAD_PROCESS_SHARED);
pthread_mutex_init(rb->lock, &mattr);
pthread_condattr_init(&cattr);
pthread_condattr_setpshared(&cattr, PTHREAD_PROCESS_SHARED);
+#ifndef __APPLE__
pthread_condattr_setclock(&cattr, PTHREAD_COND_CLOCK);
+#endif
pthread_cond_init(rb->add, &cattr);
pthread_cond_init(rb->del, &cattr);
@@ -264,11 +268,14 @@ int shm_ap_rbuff_write(struct shm_ap_rbuff * rb, struct rb_entry * e)
if (rb == NULL || e == NULL)
return -1;
+#ifdef __APPLE__
+ pthread_mutex_lock(rb->lock);
+#else
if (pthread_mutex_lock(rb->lock) == EOWNERDEAD) {
LOG_DBG("Recovering dead mutex.");
pthread_mutex_consistent(rb->lock);
}
-
+#endif
if (!shm_rbuff_free(rb)) {
pthread_mutex_unlock(rb->lock);
return -1;
@@ -291,12 +298,14 @@ int shm_ap_rbuff_peek_idx(struct shm_ap_rbuff * rb)
if (rb == NULL)
return -EINVAL;
-
+#ifdef __APPLE__
+ pthread_mutex_lock(rb->lock);
+#else
if (pthread_mutex_lock(rb->lock) == EOWNERDEAD) {
LOG_DBG("Recovering dead mutex.");
pthread_mutex_consistent(rb->lock);
}
-
+#endif
if (shm_rbuff_empty(rb)) {
pthread_mutex_unlock(rb->lock);
return -1;
@@ -325,12 +334,14 @@ int shm_ap_rbuff_peek_b(struct shm_ap_rbuff * rb,
pthread_cleanup_push((void(*)(void *))pthread_mutex_unlock,
(void *) rb->lock);
-
+#ifdef __APPLE__
+ pthread_mutex_lock(rb->lock);
+#else
if (pthread_mutex_lock(rb->lock) == EOWNERDEAD) {
LOG_DBG("Recovering dead mutex.");
pthread_mutex_consistent(rb->lock);
}
-
+#endif
while (shm_rbuff_empty(rb)) {
if (timeout != NULL)
ret = pthread_cond_timedwait(rb->add,
@@ -338,12 +349,12 @@ int shm_ap_rbuff_peek_b(struct shm_ap_rbuff * rb,
&abstime);
else
ret = pthread_cond_wait(rb->add, rb->lock);
-
+#ifndef __APPLE__
if (ret == EOWNERDEAD) {
LOG_DBG("Recovering dead mutex.");
pthread_mutex_consistent(rb->lock);
}
-
+#endif
if (ret == ETIMEDOUT)
break;
}
@@ -368,17 +379,23 @@ struct rb_entry * shm_ap_rbuff_read(struct shm_ap_rbuff * rb)
pthread_cleanup_push((void(*)(void *))pthread_mutex_unlock,
(void *) rb->lock);
+#ifdef __APPLE__
+ pthread_mutex_lock(rb->lock);
+#else
if (pthread_mutex_lock(rb->lock) == EOWNERDEAD) {
LOG_DBG("Recovering dead mutex.");
pthread_mutex_consistent(rb->lock);
}
-
+#endif
while (shm_rbuff_empty(rb))
+#ifdef __APPLE__
+ pthread_cond_wait(rb->add, rb->lock);
+#else
if (pthread_cond_wait(rb->add, rb->lock) == EOWNERDEAD) {
- LOG_DBG("Recovering dead mutex.");
- pthread_mutex_consistent(rb->lock);
- }
-
+ LOG_DBG("Recovering dead mutex.");
+ pthread_mutex_consistent(rb->lock);
+ }
+#endif
e = malloc(sizeof(*e));
if (e != NULL) {
*e = *(rb->shm_base + *rb->ptr_tail);
@@ -394,11 +411,14 @@ ssize_t shm_ap_rbuff_read_port(struct shm_ap_rbuff * rb, int port_id)
{
ssize_t idx = -1;
+#ifdef __APPLE__
+ pthread_mutex_lock(rb->lock);
+#else
if (pthread_mutex_lock(rb->lock) == EOWNERDEAD) {
LOG_DBG("Recovering dead mutex.");
pthread_mutex_consistent(rb->lock);
}
-
+#endif
if (shm_rbuff_empty(rb) || tail_el_ptr(rb)->port_id != port_id) {
pthread_mutex_unlock(rb->lock);
return -1;
@@ -422,11 +442,14 @@ ssize_t shm_ap_rbuff_read_port_b(struct shm_ap_rbuff * rb,
int ret = 0;
ssize_t idx = -1;
+#ifdef __APPLE__
+ pthread_mutex_lock(rb->lock);
+#else
if (pthread_mutex_lock(rb->lock) == EOWNERDEAD) {
LOG_DBG("Recovering dead mutex.");
pthread_mutex_consistent(rb->lock);
}
-
+#endif
if (timeout != NULL) {
clock_gettime(PTHREAD_COND_CLOCK, &abstime);
ts_add(&abstime, timeout, &abstime);
@@ -444,12 +467,12 @@ ssize_t shm_ap_rbuff_read_port_b(struct shm_ap_rbuff * rb,
&abstime);
else
ret = pthread_cond_wait(rb->add, rb->lock);
-
+#ifndef __APPLE__
if (ret == EOWNERDEAD) {
LOG_DBG("Recovering dead mutex.");
pthread_mutex_consistent(rb->lock);
}
-
+#endif
if (ret == ETIMEDOUT)
break;
}
@@ -461,12 +484,12 @@ ssize_t shm_ap_rbuff_read_port_b(struct shm_ap_rbuff * rb,
&abstime);
else
ret = pthread_cond_wait(rb->del, rb->lock);
-
+#ifndef __APPLE__
if (ret == EOWNERDEAD) {
LOG_DBG("Recovering dead mutex.");
pthread_mutex_consistent(rb->lock);
}
-
+#endif
if (ret == ETIMEDOUT)
break;
}