summaryrefslogtreecommitdiff
path: root/src/ipcpd/ipcp.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/ipcpd/ipcp.c')
-rw-r--r--src/ipcpd/ipcp.c1187
1 files changed, 717 insertions, 470 deletions
diff --git a/src/ipcpd/ipcp.c b/src/ipcpd/ipcp.c
index cd09a7cf..774bfda4 100644
--- a/src/ipcpd/ipcp.c
+++ b/src/ipcpd/ipcp.c
@@ -1,5 +1,5 @@
/*
- * Ouroboros - Copyright (C) 2016 - 2021
+ * Ouroboros - Copyright (C) 2016 - 2024
*
* IPC process main loop
*
@@ -35,18 +35,21 @@
#define OUROBOROS_PREFIX "ipcpd/ipcp"
#define IPCP_INFO "info"
+#define ALLOC_TIMEOUT 50 /* ms */
+#include <ouroboros/bitmap.h>
+#include <ouroboros/dev.h>
+#include <ouroboros/errno.h>
#include <ouroboros/hash.h>
+#include <ouroboros/ipcp-dev.h>
#include <ouroboros/logs.h>
-#include <ouroboros/time_utils.h>
-#include <ouroboros/utils.h>
-#include <ouroboros/sockets.h>
-#include <ouroboros/errno.h>
-#include <ouroboros/dev.h>
-#include <ouroboros/bitmap.h>
#include <ouroboros/np1_flow.h>
-#include <ouroboros/rib.h>
+#include <ouroboros/protobuf.h>
#include <ouroboros/pthread.h>
+#include <ouroboros/rib.h>
+#include <ouroboros/sockets.h>
+#include <ouroboros/time.h>
+#include <ouroboros/utils.h>
#include "ipcp.h"
@@ -61,13 +64,73 @@
#endif
#endif
-char * info[LAYER_NAME_SIZE + 1] = {
- "_state",
- "_type",
- "_layer",
- NULL
+#ifndef CLOCK_REALTIME_COARSE
+#define CLOCK_REALTIME_COARSE CLOCK_REALTIME
+#endif
+
+static char * ipcp_type_str[] = {
+ "local",
+ "unicast",
+ "broadcast",
+ "eth-llc",
+ "eth-dix",
+ "udp4",
+ "udp6"
+};
+
+static char * dir_hash_str[] = {
+ "SHA3-224",
+ "SHA3-256",
+ "SHA3-384",
+ "SHA3-512",
+ "CRC32",
+ "MD5"
+};
+
+static char * ipcp_state_str[] = {
+ "null",
+ "init",
+ "boot",
+ "bootstrapped",
+ "enrolled",
+ "operational",
+ "shutdown"
};
+struct {
+ pid_t irmd_pid;
+ char * name;
+
+ enum ipcp_type type;
+ char layer_name[LAYER_NAME_SIZE + 1];
+
+ uint64_t dt_addr;
+
+ enum hash_algo dir_hash_algo;
+
+ struct ipcp_ops * ops;
+ int irmd_fd;
+
+ enum ipcp_state state;
+ pthread_cond_t state_cond;
+ pthread_mutex_t state_mtx;
+
+ int sockfd;
+ char * sock_path;
+
+ struct list_head cmds;
+ pthread_cond_t cmd_cond;
+ pthread_mutex_t cmd_lock;
+
+ int alloc_id;
+ pthread_cond_t alloc_cond;
+ pthread_mutex_t alloc_lock;
+
+ struct tpm * tpm;
+
+ pthread_t acceptor;
+} ipcpd;
+
struct cmd {
struct list_head next;
@@ -76,9 +139,29 @@ struct cmd {
int fd;
};
+enum ipcp_type ipcp_get_type(void)
+{
+ return ipcpd.type;
+}
+
+const char * ipcp_get_name(void)
+{
+ return ipcpd.name;
+}
+
+void ipcp_set_dir_hash_algo(enum hash_algo algo)
+{
+ ipcpd.dir_hash_algo = algo;
+}
+
+size_t ipcp_dir_hash_len(void)
+{
+ return hash_len(ipcpd.dir_hash_algo);
+}
+
uint8_t * ipcp_hash_dup(const uint8_t * hash)
{
- uint8_t * dup = malloc(hash_len(ipcpi.dir_hash_algo));
+ uint8_t * dup = malloc(hash_len(ipcpd.dir_hash_algo));
if (dup == NULL)
return NULL;
@@ -102,6 +185,13 @@ void ipcp_hash_str(char * buf,
buf[2 * i] = '\0';
}
+static const char * info[] = {
+ "_state",
+ "_type",
+ "_layer",
+ NULL
+};
+
static int ipcp_rib_read(const char * path,
char * buf,
size_t len)
@@ -129,18 +219,20 @@ static int ipcp_rib_read(const char * path,
}
if (strcmp(entry, info[1]) == 0) { /* _type */
- if (ipcpi.type == IPCP_LOCAL)
+ if (ipcpd.type == IPCP_LOCAL)
strcpy(buf, "local\n");
- else if (ipcpi.type == IPCP_UNICAST)
+ else if (ipcpd.type == IPCP_UNICAST)
strcpy(buf, "unicast\n");
- else if (ipcpi.type == IPCP_BROADCAST)
+ else if (ipcpd.type == IPCP_BROADCAST)
strcpy(buf, "broadcast\n");
- else if (ipcpi.type == IPCP_ETH_LLC)
+ else if (ipcpd.type == IPCP_ETH_LLC)
strcpy(buf, "eth-llc\n");
- else if (ipcpi.type == IPCP_ETH_DIX)
+ else if (ipcpd.type == IPCP_ETH_DIX)
strcpy(buf, "eth-dix\n");
- else if (ipcpi.type == IPCP_UDP)
- strcpy(buf, "udp\n");
+ else if (ipcpd.type == IPCP_UDP4)
+ strcpy(buf, "udp4\n");
+ else if (ipcpd.type == IPCP_UDP6)
+ strcpy(buf, "udp6\n");
else
strcpy(buf, "bug\n");
}
@@ -150,7 +242,7 @@ static int ipcp_rib_read(const char * path,
if (ipcp_get_state() < IPCP_OPERATIONAL)
strcpy(buf, "(null)");
else
- strcpy(buf, ipcpi.layer_name);
+ strcpy(buf, ipcpd.layer_name);
buf[strlen(buf)] = '\n';
}
@@ -162,38 +254,40 @@ static int ipcp_rib_readdir(char *** buf)
{
int i = 0;
- while (info[i] != NULL)
- i++;
+ while (info[i++] != NULL);
*buf = malloc(sizeof(**buf) * i);
if (*buf == NULL)
- goto fail;
+ goto fail_entries;
i = 0;
while (info[i] != NULL) {
(*buf)[i] = strdup(info[i]);
- if (*buf == NULL)
+ if ((*buf)[i] == NULL)
goto fail_dup;
i++;
}
return i;
fail_dup:
- while (--i > 0)
+ while (i-- > 0)
free((*buf)[i]);
- fail:
free(*buf);
-
- return -1;
+ fail_entries:
+ return -ENOMEM;
}
static int ipcp_rib_getattr(const char * path,
struct rib_attr * attr)
{
- (void) path;
+ char buf[LAYER_NAME_SIZE + 2];
+ struct timespec now;
+
+ clock_gettime(CLOCK_REALTIME_COARSE, &now);
- attr->size = LAYER_NAME_SIZE;
+ attr->size = ipcp_rib_read(path, buf, LAYER_NAME_SIZE + 2);
+ attr->mtime = now.tv_sec;
return 0;
}
@@ -206,24 +300,18 @@ static struct rib_ops r_ops = {
static void * acceptloop(void * o)
{
- int csockfd;
- struct timeval tv = {(SOCKET_TIMEOUT / 1000),
- (SOCKET_TIMEOUT % 1000) * 1000};
+ int csockfd;
(void) o;
while (ipcp_get_state() != IPCP_SHUTDOWN &&
- ipcp_get_state() != IPCP_NULL) {
+ ipcp_get_state() != IPCP_INIT) {
struct cmd * cmd;
- csockfd = accept(ipcpi.sockfd, 0, 0);
+ csockfd = accept(ipcpd.sockfd, 0, 0);
if (csockfd < 0)
continue;
- if (setsockopt(csockfd, SOL_SOCKET, SO_RCVTIMEO,
- (void *) &tv, sizeof(tv)))
- log_warn("Failed to set timeout on socket.");
-
cmd = malloc(sizeof(*cmd));
if (cmd == NULL) {
log_err("Out of memory");
@@ -248,51 +336,463 @@ static void * acceptloop(void * o)
cmd->fd = csockfd;
- pthread_mutex_lock(&ipcpi.cmd_lock);
+ pthread_mutex_lock(&ipcpd.cmd_lock);
- list_add(&cmd->next, &ipcpi.cmds);
+ list_add(&cmd->next, &ipcpd.cmds);
- pthread_cond_signal(&ipcpi.cmd_cond);
+ pthread_cond_signal(&ipcpd.cmd_cond);
- pthread_mutex_unlock(&ipcpi.cmd_lock);
+ pthread_mutex_unlock(&ipcpd.cmd_lock);
}
return (void *) 0;
}
+int ipcp_wait_flow_req_arr(const uint8_t * dst,
+ qosspec_t qs,
+ time_t mpl,
+ const buffer_t * data)
+{
+ struct timespec ts = TIMESPEC_INIT_MS(ALLOC_TIMEOUT);
+ struct timespec abstime;
+ int fd;
+ buffer_t hash;
+
+ hash.data = (uint8_t *) dst;
+ hash.len = ipcp_dir_hash_len();
+
+ clock_gettime(PTHREAD_COND_CLOCK, &abstime);
+
+ pthread_mutex_lock(&ipcpd.alloc_lock);
+
+ while (ipcpd.alloc_id != -1 && ipcp_get_state() == IPCP_OPERATIONAL) {
+ ts_add(&abstime, &ts, &abstime);
+ pthread_cond_timedwait(&ipcpd.alloc_cond,
+ &ipcpd.alloc_lock,
+ &abstime);
+ }
+
+ if (ipcp_get_state() != IPCP_OPERATIONAL) {
+ pthread_mutex_unlock(&ipcpd.alloc_lock);
+ log_err("Won't allocate over non-operational IPCP.");
+ return -EIPCPSTATE;
+ }
+
+ assert(ipcpd.alloc_id == -1);
+
+ fd = ipcp_flow_req_arr(&hash, qs, mpl, data);
+ if (fd < 0) {
+ pthread_mutex_unlock(&ipcpd.alloc_lock);
+ log_err("Failed to get fd for flow.");
+ return fd;
+ }
+
+ ipcpd.alloc_id = fd;
+ pthread_cond_broadcast(&ipcpd.alloc_cond);
+
+ pthread_mutex_unlock(&ipcpd.alloc_lock);
+
+ return fd;
+
+}
+
+int ipcp_wait_flow_resp(const int fd)
+{
+ struct timespec ts = TIMESPEC_INIT_MS(ALLOC_TIMEOUT);
+ struct timespec abstime;
+
+ clock_gettime(PTHREAD_COND_CLOCK, &abstime);
+
+ pthread_mutex_lock(&ipcpd.alloc_lock);
+
+ while (ipcpd.alloc_id != fd && ipcp_get_state() == IPCP_OPERATIONAL) {
+ ts_add(&abstime, &ts, &abstime);
+ pthread_cond_timedwait(&ipcpd.alloc_cond,
+ &ipcpd.alloc_lock,
+ &abstime);
+ }
+
+ if (ipcp_get_state() != IPCP_OPERATIONAL) {
+ pthread_mutex_unlock(&ipcpd.alloc_lock);
+ return -1;
+ }
+
+ assert(ipcpd.alloc_id == fd);
+
+ ipcpd.alloc_id = -1;
+ pthread_cond_broadcast(&ipcpd.alloc_cond);
+
+ pthread_mutex_unlock(&ipcpd.alloc_lock);
+
+ return 0;
+}
+
static void free_msg(void * o)
{
ipcp_msg__free_unpacked((ipcp_msg_t *) o, NULL);
}
-static void * mainloop(void * o)
+
+static void do_bootstrap(ipcp_config_msg_t * conf_msg,
+ ipcp_msg_t * ret_msg)
{
- int sfd;
- buffer_t buffer;
struct ipcp_config conf;
- struct layer_info info;
- ipcp_config_msg_t * conf_msg;
- ipcp_msg_t * msg;
+ struct layer_info * info;
+
+ log_info("Bootstrapping...");
+
+ if (ipcpd.ops->ipcp_bootstrap == NULL) {
+ log_err("Failed to Bootstrap: operation unsupported.");
+ ret_msg->result = -ENOTSUP;
+ return;
+ }
+
+ if (ipcp_get_state() != IPCP_BOOT) {
+
+ log_err("Failed to bootstrap: IPCP in state <%s>, need <%s>.",
+ ipcp_state_str[ipcp_get_state()],
+ ipcp_state_str[IPCP_BOOT]);
+ ret_msg->result = -EIPCPSTATE;
+ return;
+ }
+
+ conf = ipcp_config_msg_to_s(conf_msg);
+ switch(conf.type) { /* FIXED algorithms */
+ case IPCP_UDP4:
+ /* FALLTHRU */
+ case IPCP_UDP6:
+ conf.layer_info.dir_hash_algo = (enum pol_dir_hash) HASH_MD5;
+ break;
+ case IPCP_BROADCAST:
+ conf.layer_info.dir_hash_algo = DIR_HASH_SHA3_256;
+ break;
+ default:
+ break;
+ }
+
+ ret_msg->result = ipcpd.ops->ipcp_bootstrap(&conf);
+ if (ret_msg->result < 0) {
+ log_err("Failed to bootstrap IPCP.");
+ return;
+ }
+
+ info = &conf.layer_info;
+
+ strcpy(ipcpd.layer_name, info->name);
+ ipcpd.dir_hash_algo = (enum hash_algo) info->dir_hash_algo;
+ ret_msg->layer_info = layer_info_s_to_msg(info);
+ ipcp_set_state(IPCP_OPERATIONAL);
+
+ log_info("Finished bootstrapping in %s.", info->name);
+ log_info(" type: %s", ipcp_type_str[ipcpd.type]);
+ log_info(" hash: %s [%zd bytes]",
+ dir_hash_str[ipcpd.dir_hash_algo],
+ ipcp_dir_hash_len());
+}
+
+static void do_enroll(const char * dst,
+ ipcp_msg_t * ret_msg)
+{
+ struct layer_info info;
+
+ log_info("Enrolling with %s...", dst);
+
+ if (ipcpd.ops->ipcp_enroll == NULL) {
+ log_err("Failed to enroll: operation unsupported.");
+ ret_msg->result = -ENOTSUP;
+ return;
+ }
+
+ if (ipcp_get_state() != IPCP_BOOT) {
+ log_err("Failed to enroll: IPCP in state <%s>, need <%s>.",
+ ipcp_state_str[ipcp_get_state()],
+ ipcp_state_str[IPCP_BOOT]);
+ ret_msg->result = -EIPCPSTATE;
+ return;
+ }
+
+ ret_msg->result = ipcpd.ops->ipcp_enroll(dst, &info);
+ if (ret_msg->result < 0) {
+ log_err("Failed to bootstrap IPCP.");
+ return;
+ }
+
+ strcpy(ipcpd.layer_name, info.name);
+ ipcpd.dir_hash_algo = (enum hash_algo) info.dir_hash_algo;
+ ret_msg->layer_info = layer_info_s_to_msg(&info);
+ ipcp_set_state(IPCP_OPERATIONAL);
+
+ log_info("Finished enrolling with %s in layer %s.", dst, info.name);
+ log_info(" type: %s", ipcp_type_str[ipcpd.type]);
+ log_info(" hash: %s [%zd bytes]",
+ dir_hash_str[ipcpd.dir_hash_algo],
+ ipcp_dir_hash_len());
+}
+
+static void do_connect(const char * dst,
+ const char * comp,
+ qosspec_t qs,
+ ipcp_msg_t * ret_msg)
+{
+ log_info("Connecting %s to %s...", comp, dst);
+
+ if (ipcpd.ops->ipcp_connect == NULL) {
+ log_err("Failed to connect: operation unsupported.");
+ ret_msg->result = -ENOTSUP;
+ return;
+ }
+
+ ret_msg->result = ipcpd.ops->ipcp_connect(dst, comp, qs);
+
+ log_info("Finished connecting.");
+}
+
+static void do_disconnect(const char * dst,
+ const char * comp,
+ ipcp_msg_t * ret_msg)
+{
+ log_info("Disconnecting %s from %s...", comp, dst);
+
+ if (ipcpd.ops->ipcp_disconnect == NULL) {
+ log_err("Failed to disconnect: operation unsupported.");
+ ret_msg->result = -ENOTSUP;
+ return;
+ }
+
+ ret_msg->result = ipcpd.ops->ipcp_disconnect(dst, comp);
+
+ log_info("Finished disconnecting %s from %s.", comp, dst);
+}
+
+static void do_reg(const uint8_t * hash,
+ ipcp_msg_t * ret_msg)
+{
+
+ log_info("Registering " HASH_FMT32 "...", HASH_VAL32(hash));
+
+ if (ipcpd.ops->ipcp_reg == NULL) {
+ log_err("Failed to register: operation unsupported.");
+ ret_msg->result = -ENOTSUP;
+ return;
+ }
+
+ ret_msg->result = ipcpd.ops->ipcp_reg(hash);
+
+ log_info("Finished registering " HASH_FMT32 ".", HASH_VAL32(hash));
+}
+
+static void do_unreg(const uint8_t * hash,
+ ipcp_msg_t * ret_msg)
+{
+ log_info("Unregistering " HASH_FMT32 "...", HASH_VAL32(hash));
+
+ if (ipcpd.ops->ipcp_unreg == NULL) {
+ log_err("Failed to unregister: operation unsupported.");
+ ret_msg->result = -ENOTSUP;
+ return;
+ }
+
+ ret_msg->result = ipcpd.ops->ipcp_unreg(hash);
+
+ log_info("Finished unregistering " HASH_FMT32 ".", HASH_VAL32(hash));
+}
+
+static void do_query(const uint8_t * hash,
+ ipcp_msg_t * ret_msg)
+{
+ /* TODO: Log this operation when IRMd has internal caches. */
+
+ if (ipcpd.ops->ipcp_query == NULL) {
+ log_err("Failed to query: operation unsupported.");
+ ret_msg->result = -ENOTSUP;
+ return;
+ }
+
+ if (ipcp_get_state() != IPCP_OPERATIONAL) {
+ log_dbg("Failed to query: IPCP in state <%s>, need <%s>.",
+ ipcp_state_str[ipcp_get_state()],
+ ipcp_state_str[IPCP_OPERATIONAL]);
+ ret_msg->result = -EIPCPSTATE;
+ return;
+ }
+
+ ret_msg->result = ipcpd.ops->ipcp_query(hash);
+}
+
+static void do_flow_alloc(pid_t pid,
+ int flow_id,
+ uint8_t * dst,
+ qosspec_t qs,
+ const buffer_t * data,
+ ipcp_msg_t * ret_msg)
+{
+ int fd;
+
+ log_info("Allocating flow %d for %d to " HASH_FMT32 ".",
+ flow_id, pid, HASH_VAL32(dst));
+
+ if (ipcpd.ops->ipcp_flow_alloc == NULL) {
+ log_err("Flow allocation failed: operation unsupported.");
+ ret_msg->result = -ENOTSUP;
+ return;
+ }
+
+ if (ipcp_get_state() != IPCP_OPERATIONAL) {
+ log_err("Failed to enroll: IPCP in state <%s>, need <%s>.",
+ ipcp_state_str[ipcp_get_state()],
+ ipcp_state_str[IPCP_OPERATIONAL]);
+ ret_msg->result = -EIPCPSTATE;
+ return;
+ }
+
+ fd = np1_flow_alloc(pid, flow_id);
+ if (fd < 0) {
+ log_err("Failed allocating n + 1 fd on flow_id %d: %d",
+ flow_id, fd);
+ ret_msg->result = -EFLOWDOWN;
+ return;
+ }
+
+ ret_msg->result = ipcpd.ops->ipcp_flow_alloc(fd, dst, qs, data);
+
+ log_info("Finished allocating flow %d to " HASH_FMT32 ".",
+ flow_id, HASH_VAL32(dst));
+}
+
+
+static void do_flow_join(pid_t pid,
+ int flow_id,
+ const uint8_t * dst,
+ qosspec_t qs,
+ ipcp_msg_t * ret_msg)
+{
+ int fd;
+
+ log_info("Joining layer " HASH_FMT32 ".", HASH_VAL32(dst));
+
+ if (ipcpd.ops->ipcp_flow_join == NULL) {
+ log_err("Failed to join: operation unsupported.");
+ ret_msg->result = -ENOTSUP;
+ return;
+ }
+
+ if (ipcp_get_state() != IPCP_OPERATIONAL) {
+ log_err("Failed to join: IPCP in state <%s>, need <%s>.",
+ ipcp_state_str[ipcp_get_state()],
+ ipcp_state_str[IPCP_OPERATIONAL]);
+ ret_msg->result = -EIPCPSTATE;
+ return;
+ }
+
+ fd = np1_flow_alloc(pid, flow_id);
+ if (fd < 0) {
+ log_err("Failed allocating n + 1 fd on flow_id %d.", flow_id);
+ ret_msg->result = -1;
+ return;
+ }
+
+ ret_msg->result = ipcpd.ops->ipcp_flow_join(fd, dst, qs);
+
+ log_info("Finished joining layer " HASH_FMT32 ".", HASH_VAL32(dst));
+}
+
+static void do_flow_alloc_resp(int resp,
+ int flow_id,
+ const buffer_t * data,
+ ipcp_msg_t * ret_msg)
+{
+ int fd = -1;
+
+ log_info("Responding %d to alloc on flow_id %d.", resp, flow_id);
+
+ if (ipcpd.ops->ipcp_flow_alloc_resp == NULL) {
+ log_err("Failed to respond on flow %d: operation unsupported.",
+ flow_id);
+ ret_msg->result = -ENOTSUP;
+ return;
+ }
+
+ if (ipcp_get_state() != IPCP_OPERATIONAL) {
+ log_err("Failed to respond to flow %d:"
+ "IPCP in state <%s>, need <%s>.",
+ flow_id,
+ ipcp_state_str[ipcp_get_state()],
+ ipcp_state_str[IPCP_OPERATIONAL]);
+ ret_msg->result = -EIPCPSTATE;
+ return;
+ }
+
+ fd = np1_flow_resp(flow_id, resp);
+ if (fd < 0) {
+ log_warn("Flow_id %d is not known.", flow_id);
+ ret_msg->result = -1;
+ return;
+ }
+
+ ret_msg->result = ipcpd.ops->ipcp_flow_alloc_resp(fd, resp, data);
+
+ log_info("Finished responding %d to allocation request.",
+ ret_msg->result);
+}
+
+static void do_flow_dealloc(int flow_id,
+ int timeo_sec,
+ ipcp_msg_t * ret_msg)
+{
+ int fd;
+
+ log_info("Deallocating flow %d.", flow_id);
+
+ if (ipcpd.ops->ipcp_flow_dealloc == NULL) {
+ log_err("Failed to dealloc: operation unsupported.");
+ ret_msg->result = -ENOTSUP;
+ return;
+ }
+
+ if (ipcp_get_state() != IPCP_OPERATIONAL) {
+ log_err("Failed to enroll: IPCP in state <%s>, need <%s>.",
+ ipcp_state_str[ipcp_get_state()],
+ ipcp_state_str[IPCP_OPERATIONAL]);
+ ret_msg->result = -EIPCPSTATE;
+ return;
+ }
+
+ fd = np1_flow_dealloc(flow_id, timeo_sec);
+ if (fd < 0) {
+ log_warn("Could not deallocate flow_id %d.", flow_id);
+ ret_msg->result = -1;
+ return;
+ }
+
+ ret_msg->result = ipcpd.ops->ipcp_flow_dealloc(fd);
+
+ log_info("Finished deallocating flow %d.", flow_id);
+}
+
+static void * mainloop(void * o)
+{
+ int sfd;
+ buffer_t buffer;
+ ipcp_msg_t * msg;
(void) o;
while (true) {
- ipcp_msg_t ret_msg = IPCP_MSG__INIT;
- layer_info_msg_t layer_info = LAYER_INFO_MSG__INIT;
- int fd = -1;
- struct cmd * cmd;
- qosspec_t qs;
+ ipcp_msg_t ret_msg = IPCP_MSG__INIT;
+ qosspec_t qs;
+ struct cmd * cmd;
+ buffer_t data;
ret_msg.code = IPCP_MSG_CODE__IPCP_REPLY;
- pthread_mutex_lock(&ipcpi.cmd_lock);
+ pthread_mutex_lock(&ipcpd.cmd_lock);
- pthread_cleanup_push(__cleanup_mutex_unlock, &ipcpi.cmd_lock);
+ pthread_cleanup_push(__cleanup_mutex_unlock, &ipcpd.cmd_lock);
- while (list_is_empty(&ipcpi.cmds))
- pthread_cond_wait(&ipcpi.cmd_cond, &ipcpi.cmd_lock);
+ while (list_is_empty(&ipcpd.cmds))
+ pthread_cond_wait(&ipcpd.cmd_cond, &ipcpd.cmd_lock);
- cmd = list_last_entry(&ipcpi.cmds, struct cmd, next);
+ cmd = list_last_entry(&ipcpd.cmds, struct cmd, next);
list_del(&cmd->next);
pthread_cleanup_pop(true);
@@ -307,334 +807,73 @@ static void * mainloop(void * o)
continue;
}
- tpm_dec(ipcpi.tpm);
+ tpm_begin_work(ipcpd.tpm);
pthread_cleanup_push(__cleanup_close_ptr, &sfd);
pthread_cleanup_push(free_msg, msg);
+ ret_msg.has_result = true;
+
switch (msg->code) {
case IPCP_MSG_CODE__IPCP_BOOTSTRAP:
- ret_msg.has_result = true;
-
- if (ipcpi.ops->ipcp_bootstrap == NULL) {
- log_err("Bootstrap unsupported.");
- ret_msg.result = -ENOTSUP;
- break;
- }
-
- if (ipcp_get_state() != IPCP_INIT) {
- log_err("IPCP in wrong state.");
- ret_msg.result = -EIPCPSTATE;
- break;
- }
-
- conf_msg = msg->conf;
- conf.type = conf_msg->ipcp_type;
- strcpy(conf.layer_info.layer_name,
- conf_msg->layer_info->layer_name);
-
- switch(conf_msg->ipcp_type) {
- case IPCP_LOCAL:
- break;
- case IPCP_UNICAST:
- conf.addr_size = conf_msg->addr_size;
- conf.eid_size = conf_msg->eid_size;
- conf.max_ttl = conf_msg->max_ttl;
- conf.addr_auth_type = conf_msg->addr_auth_type;
- conf.routing_type = conf_msg->routing_type;
- conf.cong_avoid = conf_msg->cong_avoid;
- break;
- case IPCP_ETH_DIX:
- conf.ethertype = conf_msg->ethertype;
- /* FALLTHRU */
- case IPCP_ETH_LLC:
- conf.dev = conf_msg->dev;
- break;
- case IPCP_UDP:
- conf.ip_addr = conf_msg->ip_addr;
- conf.dns_addr = conf_msg->dns_addr;
- conf.port = conf_msg->port;
- conf.layer_info.dir_hash_algo = HASH_MD5;
- layer_info.dir_hash_algo = HASH_MD5;
- break;
- case IPCP_BROADCAST:
- conf.layer_info.dir_hash_algo = HASH_SHA3_256;
- layer_info.dir_hash_algo = HASH_SHA3_256;
- break;
- default:
- log_err("Unknown IPCP type: %d.",
- conf_msg->ipcp_type);
- ret_msg.result = -EIPCP;
- goto exit; /* break from outer switch/case */
- }
-
- /* UDP and broadcast use fixed hash algorithm. */
- if (conf_msg->ipcp_type != IPCP_UDP &&
- conf_msg->ipcp_type != IPCP_BROADCAST) {
- switch(conf_msg->layer_info->dir_hash_algo) {
- case DIR_HASH_SHA3_224:
- conf.layer_info.dir_hash_algo =
- HASH_SHA3_224;
- break;
- case DIR_HASH_SHA3_256:
- conf.layer_info.dir_hash_algo =
- HASH_SHA3_256;
- break;
- case DIR_HASH_SHA3_384:
- conf.layer_info.dir_hash_algo =
- HASH_SHA3_384;
- break;
- case DIR_HASH_SHA3_512:
- conf.layer_info.dir_hash_algo =
- HASH_SHA3_512;
- break;
- default:
- assert(false);
- }
-
- layer_info.dir_hash_algo =
- conf.layer_info.dir_hash_algo;
- }
-
- ret_msg.result = ipcpi.ops->ipcp_bootstrap(&conf);
- if (ret_msg.result == 0) {
- ret_msg.layer_info = &layer_info;
- layer_info.layer_name =
- conf.layer_info.layer_name;
- }
+ do_bootstrap(msg->conf, &ret_msg);
break;
case IPCP_MSG_CODE__IPCP_ENROLL:
- ret_msg.has_result = true;
-
- if (ipcpi.ops->ipcp_enroll == NULL) {
- log_err("Enroll unsupported.");
- ret_msg.result = -ENOTSUP;
- break;
- }
-
- if (ipcp_get_state() != IPCP_INIT) {
- log_err("IPCP in wrong state.");
- ret_msg.result = -EIPCPSTATE;
- break;
- }
-
- ret_msg.result = ipcpi.ops->ipcp_enroll(msg->dst,
- &info);
- if (ret_msg.result == 0) {
- ret_msg.layer_info = &layer_info;
- layer_info.dir_hash_algo = info.dir_hash_algo;
- layer_info.layer_name = info.layer_name;
- }
+ do_enroll(msg->dst, &ret_msg);
break;
case IPCP_MSG_CODE__IPCP_CONNECT:
- ret_msg.has_result = true;
-
- if (ipcpi.ops->ipcp_connect == NULL) {
- log_err("Connect unsupported.");
- ret_msg.result = -ENOTSUP;
- break;
- }
-
- qs = msg_to_spec(msg->qosspec);
- ret_msg.result = ipcpi.ops->ipcp_connect(msg->dst,
- msg->comp,
- qs);
+ qs = qos_spec_msg_to_s(msg->qosspec);
+ do_connect(msg->dst, msg->comp, qs, &ret_msg);
break;
case IPCP_MSG_CODE__IPCP_DISCONNECT:
- ret_msg.has_result = true;
-
- if (ipcpi.ops->ipcp_disconnect == NULL) {
- log_err("Disconnect unsupported.");
- ret_msg.result = -ENOTSUP;
- break;
- }
-
- ret_msg.result = ipcpi.ops->ipcp_disconnect(msg->dst,
- msg->comp);
+ do_disconnect(msg->dst, msg->comp, &ret_msg);
break;
case IPCP_MSG_CODE__IPCP_REG:
- ret_msg.has_result = true;
-
- if (ipcpi.ops->ipcp_reg == NULL) {
- log_err("Registration unsupported.");
- ret_msg.result = -ENOTSUP;
- break;
- }
-
assert(msg->hash.len == ipcp_dir_hash_len());
-
- ret_msg.result =
- ipcpi.ops->ipcp_reg(msg->hash.data);
+ do_reg(msg->hash.data, &ret_msg);
break;
case IPCP_MSG_CODE__IPCP_UNREG:
- ret_msg.has_result = true;
-
- if (ipcpi.ops->ipcp_unreg == NULL) {
- log_err("Unregistration unsupported.");
- ret_msg.result = -ENOTSUP;
- break;
- }
-
assert(msg->hash.len == ipcp_dir_hash_len());
-
- ret_msg.result =
- ipcpi.ops->ipcp_unreg(msg->hash.data);
+ do_unreg(msg->hash.data, &ret_msg);
break;
case IPCP_MSG_CODE__IPCP_QUERY:
- ret_msg.has_result = true;
-
- if (ipcpi.ops->ipcp_query == NULL) {
- log_err("Directory query unsupported.");
- ret_msg.result = -ENOTSUP;
- break;
- }
-
assert(msg->hash.len == ipcp_dir_hash_len());
-
- if (ipcp_get_state() != IPCP_OPERATIONAL) {
- log_err("IPCP in wrong state.");
- ret_msg.result = -EIPCPSTATE;
- break;
- }
-
- ret_msg.result =
- ipcpi.ops->ipcp_query(msg->hash.data);
+ do_query(msg->hash.data, &ret_msg);
break;
case IPCP_MSG_CODE__IPCP_FLOW_ALLOC:
- ret_msg.has_result = true;
-
- if (ipcpi.ops->ipcp_flow_alloc == NULL) {
- log_err("Flow allocation unsupported.");
- ret_msg.result = -ENOTSUP;
- break;
- }
-
assert(msg->hash.len == ipcp_dir_hash_len());
assert(msg->pk.len > 0 ? msg->pk.data != NULL
: msg->pk.data == NULL);
-
- if (ipcp_get_state() != IPCP_OPERATIONAL) {
- log_err("IPCP in wrong state.");
- ret_msg.result = -EIPCPSTATE;
- break;
- }
-
- qs = msg_to_spec(msg->qosspec);
- fd = np1_flow_alloc(msg->pid,
- msg->flow_id,
- qs);
- if (fd < 0) {
- log_err("Failed allocating fd on flow_id %d.",
- msg->flow_id);
- ret_msg.result = -1;
- break;
- }
-
- ret_msg.result =
- ipcpi.ops->ipcp_flow_alloc(fd,
- msg->hash.data,
- qs,
- msg->pk.data,
- msg->pk.len);
+ data.len = msg->pk.len;
+ data.data = msg->pk.data;
+ qs = qos_spec_msg_to_s(msg->qosspec);
+ do_flow_alloc(msg->pid, msg->flow_id,
+ msg->hash.data, qs,
+ &data, &ret_msg);
break;
case IPCP_MSG_CODE__IPCP_FLOW_JOIN:
- ret_msg.has_result = true;
-
- if (ipcpi.ops->ipcp_flow_join == NULL) {
- log_err("Broadcast unsupported.");
- ret_msg.result = -ENOTSUP;
- break;
- }
-
assert(msg->hash.len == ipcp_dir_hash_len());
-
- if (ipcp_get_state() != IPCP_OPERATIONAL) {
- log_err("IPCP in wrong state.");
- ret_msg.result = -EIPCPSTATE;
- break;
- }
-
- qs = msg_to_spec(msg->qosspec);
- fd = np1_flow_alloc(msg->pid,
- msg->flow_id,
- qs);
- if (fd < 0) {
- log_err("Failed allocating fd on flow_id %d.",
- msg->flow_id);
- ret_msg.result = -1;
- break;
- }
-
- ret_msg.result =
- ipcpi.ops->ipcp_flow_join(fd,
- msg->hash.data,
- qs);
+ qs = qos_spec_msg_to_s(msg->qosspec);
+ do_flow_join(msg->pid, msg->flow_id,
+ msg->hash.data, qs, &ret_msg);
break;
case IPCP_MSG_CODE__IPCP_FLOW_ALLOC_RESP:
- ret_msg.has_result = true;
- if (ipcpi.ops->ipcp_flow_alloc_resp == NULL) {
- log_err("Flow_alloc_resp unsupported.");
- ret_msg.result = -ENOTSUP;
- break;
- }
-
- if (ipcp_get_state() != IPCP_OPERATIONAL) {
- log_err("IPCP in wrong state.");
- ret_msg.result = -EIPCPSTATE;
- break;
- }
-
- if (!msg->response) {
- fd = np1_flow_resp(msg->flow_id);
- if (fd < 0) {
- log_warn("Port_id %d is not known.",
- msg->flow_id);
- ret_msg.result = -1;
- break;
- }
- }
-
assert(msg->pk.len > 0 ? msg->pk.data != NULL
- : msg->pk.data == NULL);
-
- ret_msg.result =
- ipcpi.ops->ipcp_flow_alloc_resp(fd,
- msg->response,
- msg->pk.data,
- msg->pk.len);
+ : msg->pk.data == NULL);
+ data.len = msg->pk.len;
+ data.data = msg->pk.data;
+ do_flow_alloc_resp(msg->response, msg->flow_id,
+ &data, &ret_msg);
break;
case IPCP_MSG_CODE__IPCP_FLOW_DEALLOC:
- ret_msg.has_result = true;
- if (ipcpi.ops->ipcp_flow_dealloc == NULL) {
- log_err("Flow deallocation unsupported.");
- ret_msg.result = -ENOTSUP;
- break;
- }
-
- if (ipcp_get_state() != IPCP_OPERATIONAL) {
- log_err("IPCP in wrong state.");
- ret_msg.result = -EIPCPSTATE;
- break;
- }
-
- fd = np1_flow_dealloc(msg->flow_id, msg->timeo_sec);
- if (fd < 0) {
- log_warn("Could not deallocate flow_id %d.",
- msg->flow_id);
- ret_msg.result = -1;
- break;
- }
-
- ret_msg.result =
- ipcpi.ops->ipcp_flow_dealloc(fd);
+ do_flow_dealloc(msg->flow_id, msg->timeo_sec, &ret_msg);
break;
default:
- ret_msg.has_result = true;
- ret_msg.result = -1;
- log_err("Don't know that message code");
+ ret_msg.result = -1;
+ log_err("Unknown message code: %d.", msg->code);
break;
}
- exit:
+
pthread_cleanup_pop(true);
pthread_cleanup_pop(false);
@@ -642,7 +881,7 @@ static void * mainloop(void * o)
if (buffer.len == 0) {
log_err("Failed to pack reply message");
close(sfd);
- tpm_inc(ipcpi.tpm);
+ tpm_end_work(ipcpd.tpm);
continue;
}
@@ -650,21 +889,25 @@ static void * mainloop(void * o)
if (buffer.data == NULL) {
log_err("Failed to create reply buffer.");
close(sfd);
- tpm_inc(ipcpi.tpm);
+ tpm_end_work(ipcpd.tpm);
continue;
}
ipcp_msg__pack(&ret_msg, buffer.data);
+ if (ret_msg.layer_info != NULL)
+ layer_info_msg__free_unpacked(ret_msg.layer_info, NULL);
+
+ pthread_cleanup_push(free, buffer.data)
pthread_cleanup_push(__cleanup_close_ptr, &sfd);
if (write(sfd, buffer.data, buffer.len) == -1)
log_warn("Failed to send reply message");
- free(buffer.data);
- pthread_cleanup_pop(true);
+ pthread_cleanup_pop(true); /* close sfd */
+ pthread_cleanup_pop(true); /* free buffer.data */
- tpm_inc(ipcpi.tpm);
+ tpm_end_work(ipcpd.tpm);
}
return (void *) 0;
@@ -683,10 +926,10 @@ static int parse_args(int argc,
if (atoi(argv[1]) == 0)
return -1;
- ipcpi.irmd_pid = atoi(argv[1]);
+ ipcpd.irmd_pid = atoi(argv[1]);
/* argument 2: IPCP name */
- ipcpi.name = argv[2];
+ ipcpd.name = argv[2];
/* argument 3: syslog */
if (argv[3] != NULL)
@@ -702,149 +945,174 @@ int ipcp_init(int argc,
{
bool log;
pthread_condattr_t cattr;
- int ret = -1;
if (parse_args(argc, argv, &log))
return -1;
log_init(log);
- ipcpi.irmd_fd = -1;
- ipcpi.state = IPCP_NULL;
- ipcpi.type = type;
+ ipcpd.type = type;
#if defined (__linux__)
prctl(PR_SET_TIMERSLACK, IPCP_LINUX_SLACK_NS, 0, 0, 0);
#endif
- ipcpi.sock_path = ipcp_sock_path(getpid());
- if (ipcpi.sock_path == NULL)
+ ipcpd.sock_path = sock_path(getpid(), IPCP_SOCK_PATH_PREFIX);
+ if (ipcpd.sock_path == NULL)
goto fail_sock_path;
- ipcpi.sockfd = server_socket_open(ipcpi.sock_path);
- if (ipcpi.sockfd < 0) {
- log_err("Could not open server socket.");
+ ipcpd.sockfd = server_socket_open(ipcpd.sock_path);
+ if (ipcpd.sockfd < 0) {
+ log_err("Failed to open server socket at %s.",
+ ipcpd.sock_path);
goto fail_serv_sock;
}
- ipcpi.ops = ops;
+ ipcpd.ops = ops;
- if (pthread_mutex_init(&ipcpi.state_mtx, NULL)) {
- log_err("Could not create mutex.");
+ if (pthread_mutex_init(&ipcpd.state_mtx, NULL)) {
+ log_err("Failed to create mutex.");
goto fail_state_mtx;
}
if (pthread_condattr_init(&cattr)) {
- log_err("Could not create condattr.");
+ log_err("Failed to create condattr.");
goto fail_cond_attr;
}
#ifndef __APPLE__
pthread_condattr_setclock(&cattr, PTHREAD_COND_CLOCK);
#endif
- if (pthread_cond_init(&ipcpi.state_cond, &cattr)) {
- log_err("Could not init condvar.");
+ if (pthread_cond_init(&ipcpd.state_cond, &cattr)) {
+ log_err("Failed to init condvar.");
goto fail_state_cond;
}
- if (pthread_mutex_init(&ipcpi.alloc_lock, NULL)) {
+ if (pthread_mutex_init(&ipcpd.alloc_lock, NULL)) {
log_err("Failed to init mutex.");
goto fail_alloc_lock;
}
- if (pthread_cond_init(&ipcpi.alloc_cond, &cattr)) {
+ if (pthread_cond_init(&ipcpd.alloc_cond, &cattr)) {
log_err("Failed to init convar.");
goto fail_alloc_cond;
}
- if (pthread_mutex_init(&ipcpi.cmd_lock, NULL)) {
+ if (pthread_mutex_init(&ipcpd.cmd_lock, NULL)) {
log_err("Failed to init mutex.");
goto fail_cmd_lock;
}
- if (pthread_cond_init(&ipcpi.cmd_cond, &cattr)) {
+ if (pthread_cond_init(&ipcpd.cmd_cond, &cattr)) {
log_err("Failed to init convar.");
goto fail_cmd_cond;
}
- if (rib_init(ipcpi.name)) {
+ if (rib_init(ipcpd.name)) {
log_err("Failed to initialize RIB.");
goto fail_rib_init;
}
- list_head_init(&ipcpi.cmds);
+ if (rib_reg(IPCP_INFO, &r_ops)) {
+ log_err("Failed to register rib.");
+ goto fail_rib_reg;
+ }
- ipcpi.alloc_id = -1;
+ list_head_init(&ipcpd.cmds);
+
+ ipcpd.tpm = tpm_create(IPCP_MIN_THREADS, IPCP_ADD_THREADS,
+ mainloop, NULL);
+ if (ipcpd.tpm == NULL) {
+ log_err("Failed to create threadpool manager.");
+ goto fail_tpm_create;
+ }
+
+ ipcpd.alloc_id = -1;
pthread_condattr_destroy(&cattr);
+ ipcp_set_state(IPCP_INIT);
+
+ log_info("IPCP %s %d initialized.", ipcp_type_str[ipcpd.type],
+ getpid());
+
return 0;
+ fail_tpm_create:
+ rib_unreg(IPCP_INFO);
+ fail_rib_reg:
+ rib_fini();
fail_rib_init:
- pthread_cond_destroy(&ipcpi.cmd_cond);
+ pthread_cond_destroy(&ipcpd.cmd_cond);
fail_cmd_cond:
- pthread_mutex_destroy(&ipcpi.cmd_lock);
+ pthread_mutex_destroy(&ipcpd.cmd_lock);
fail_cmd_lock:
- pthread_cond_destroy(&ipcpi.alloc_cond);
+ pthread_cond_destroy(&ipcpd.alloc_cond);
fail_alloc_cond:
- pthread_mutex_destroy(&ipcpi.alloc_lock);
+ pthread_mutex_destroy(&ipcpd.alloc_lock);
fail_alloc_lock:
- pthread_cond_destroy(&ipcpi.state_cond);
+ pthread_cond_destroy(&ipcpd.state_cond);
fail_state_cond:
pthread_condattr_destroy(&cattr);
fail_cond_attr:
- pthread_mutex_destroy(&ipcpi.state_mtx);
+ pthread_mutex_destroy(&ipcpd.state_mtx);
fail_state_mtx:
- close(ipcpi.sockfd);
+ close(ipcpd.sockfd);
fail_serv_sock:
- free(ipcpi.sock_path);
+ free(ipcpd.sock_path);
fail_sock_path:
- return ret;
+ return -1;
}
-int ipcp_boot()
+int ipcp_start(void)
{
- sigset_t sigset;
+ sigset_t sigset;
+ struct ipcp_info info;
+
sigemptyset(&sigset);
sigaddset(&sigset, SIGINT);
sigaddset(&sigset, SIGQUIT);
sigaddset(&sigset, SIGHUP);
sigaddset(&sigset, SIGPIPE);
- ipcpi.tpm = tpm_create(IPCP_MIN_THREADS, IPCP_ADD_THREADS,
- mainloop, NULL);
- if (ipcpi.tpm == NULL)
- goto fail_tpm_create;
-
pthread_sigmask(SIG_BLOCK, &sigset, NULL);
- if (tpm_start(ipcpi.tpm))
- goto fail_tpm_start;
+ info.pid = getpid();
+ info.type = ipcpd.type;
+ strcpy(info.name, ipcpd.name);
+ info.state = IPCP_BOOT;
- ipcp_set_state(IPCP_INIT);
+ ipcp_set_state(IPCP_BOOT);
- if (rib_reg(IPCP_INFO, &r_ops))
- goto fail_rib_reg;
+ if (tpm_start(ipcpd.tpm)) {
+ log_err("Failed to start threadpool manager.");
+ goto fail_tpm_start;
+ }
- if (pthread_create(&ipcpi.acceptor, NULL, acceptloop, NULL)) {
+ if (pthread_create(&ipcpd.acceptor, NULL, acceptloop, NULL)) {
log_err("Failed to create acceptor thread.");
- ipcp_set_state(IPCP_NULL);
goto fail_acceptor;
}
- return 0;
+ if (ipcp_create_r(&info)) {
+ log_err("Failed to notify IRMd we are initialized.");
+ goto fail_create_r;
+ }
+ return 0;
+ fail_create_r:
+ pthread_cancel(ipcpd.acceptor);
+ pthread_join(ipcpd.acceptor, NULL);
fail_acceptor:
- rib_unreg(IPCP_INFO);
- fail_rib_reg:
- tpm_stop(ipcpi.tpm);
+ tpm_stop(ipcpd.tpm);
fail_tpm_start:
- tpm_destroy(ipcpi.tpm);
- fail_tpm_create:
+ tpm_destroy(ipcpd.tpm);
+ ipcp_set_state(IPCP_INIT);
+ ipcp_create_r(&info);
return -1;
}
-void ipcp_shutdown()
+void ipcp_sigwait(void)
{
siginfo_t info;
@@ -859,7 +1127,7 @@ void ipcp_shutdown()
sigaddset(&sigset, SIGTERM);
sigaddset(&sigset, SIGPIPE);
- while(ipcp_get_state() != IPCP_NULL &&
+ while(ipcp_get_state() != IPCP_INIT &&
ipcp_get_state() != IPCP_SHUTDOWN) {
#ifdef __APPLE__
if (sigwait(&sigset, &sig) < 0) {
@@ -873,7 +1141,7 @@ void ipcp_shutdown()
#ifdef __APPLE__
memset(&info, 0, sizeof(info));
info.si_signo = sig;
- info.si_pid = ipcpi.irmd_pid;
+ info.si_pid = ipcpd.irmd_pid;
#endif
switch(info.si_signo) {
case SIGINT:
@@ -883,9 +1151,9 @@ void ipcp_shutdown()
case SIGHUP:
/* FALLTHRU */
case SIGQUIT:
- if (info.si_pid == ipcpi.irmd_pid) {
- if (ipcp_get_state() == IPCP_INIT)
- ipcp_set_state(IPCP_NULL);
+ if (info.si_pid == ipcpd.irmd_pid) {
+ if (ipcp_get_state() == IPCP_BOOT)
+ ipcp_set_state(IPCP_INIT);
if (ipcp_get_state() == IPCP_OPERATIONAL)
ipcp_set_state(IPCP_SHUTDOWN);
@@ -898,93 +1166,72 @@ void ipcp_shutdown()
continue;
}
}
+}
- pthread_cancel(ipcpi.acceptor);
+void ipcp_stop(void)
+{
+ log_info("IPCP %d shutting down.", getpid());
- pthread_join(ipcpi.acceptor, NULL);
- tpm_stop(ipcpi.tpm);
- tpm_destroy(ipcpi.tpm);
+ pthread_cancel(ipcpd.acceptor);
+ pthread_join(ipcpd.acceptor, NULL);
- log_info("IPCP %d shutting down.", getpid());
+ tpm_stop(ipcpd.tpm);
+
+ ipcp_set_state(IPCP_INIT);
}
-void ipcp_fini()
+void ipcp_fini(void)
{
+ tpm_destroy(ipcpd.tpm);
+
+ rib_unreg(IPCP_INFO);
+
rib_fini();
- close(ipcpi.sockfd);
- if (unlink(ipcpi.sock_path))
- log_warn("Could not unlink %s.", ipcpi.sock_path);
+ close(ipcpd.sockfd);
+ if (unlink(ipcpd.sock_path))
+ log_warn("Could not unlink %s.", ipcpd.sock_path);
- free(ipcpi.sock_path);
+ free(ipcpd.sock_path);
- pthread_cond_destroy(&ipcpi.state_cond);
- pthread_mutex_destroy(&ipcpi.state_mtx);
- pthread_cond_destroy(&ipcpi.alloc_cond);
- pthread_mutex_destroy(&ipcpi.alloc_lock);
- pthread_cond_destroy(&ipcpi.cmd_cond);
- pthread_mutex_destroy(&ipcpi.cmd_lock);
+ pthread_cond_destroy(&ipcpd.state_cond);
+ pthread_mutex_destroy(&ipcpd.state_mtx);
+ pthread_cond_destroy(&ipcpd.alloc_cond);
+ pthread_mutex_destroy(&ipcpd.alloc_lock);
+ pthread_cond_destroy(&ipcpd.cmd_cond);
+ pthread_mutex_destroy(&ipcpd.cmd_lock);
log_info("IPCP %d out.", getpid());
log_fini();
+
+ ipcpd.state = IPCP_NULL;
}
void ipcp_set_state(enum ipcp_state state)
{
- pthread_mutex_lock(&ipcpi.state_mtx);
+ pthread_mutex_lock(&ipcpd.state_mtx);
- ipcpi.state = state;
+ ipcpd.state = state;
- pthread_cond_broadcast(&ipcpi.state_cond);
- pthread_mutex_unlock(&ipcpi.state_mtx);
+ pthread_cond_broadcast(&ipcpd.state_cond);
+ pthread_mutex_unlock(&ipcpd.state_mtx);
}
-enum ipcp_state ipcp_get_state()
+enum ipcp_state ipcp_get_state(void)
{
enum ipcp_state state;
- pthread_mutex_lock(&ipcpi.state_mtx);
+ pthread_mutex_lock(&ipcpd.state_mtx);
- state = ipcpi.state;
+ state = ipcpd.state;
- pthread_mutex_unlock(&ipcpi.state_mtx);
+ pthread_mutex_unlock(&ipcpd.state_mtx);
return state;
}
-int ipcp_wait_state(enum ipcp_state state,
- const struct timespec * timeout)
-{
- struct timespec abstime;
- int ret = 0;
-
- clock_gettime(PTHREAD_COND_CLOCK, &abstime);
- ts_add(&abstime, timeout, &abstime);
-
- pthread_mutex_lock(&ipcpi.state_mtx);
-
- pthread_cleanup_push(__cleanup_mutex_unlock, &ipcpi.state_mtx);
-
- while (ipcpi.state != state
- && ipcpi.state != IPCP_SHUTDOWN
- && ipcpi.state != IPCP_NULL
- && ret != -ETIMEDOUT) {
- if (timeout == NULL)
- ret = -pthread_cond_wait(&ipcpi.state_cond,
- &ipcpi.state_mtx);
- else
- ret = -pthread_cond_timedwait(&ipcpi.state_cond,
- &ipcpi.state_mtx,
- &abstime);
- }
-
- pthread_cleanup_pop(true);
-
- return ret;
-}
-
void ipcp_lock_to_core(void)
{
#if defined(__linux__) && !defined(DISABLE_CORE_LOCK)