diff options
author | Dimitri Staessens <dimitri@ouroboros.rocks> | 2020-02-24 22:30:22 +0100 |
---|---|---|
committer | Sander Vrijders <sander@ouroboros.rocks> | 2020-02-25 08:21:09 +0100 |
commit | fe6b60909d455abdac7885ceaba1097749e7aeb1 (patch) | |
tree | ffd3f3c6a2f15a1af5b393d8a5c068304b29a636 /src | |
parent | 11fbe2f998a39ca156e2c806fd78f2af781836a4 (diff) | |
download | ouroboros-fe6b60909d455abdac7885ceaba1097749e7aeb1.tar.gz ouroboros-fe6b60909d455abdac7885ceaba1097749e7aeb1.zip |
lib, ipcpd: piggyback ECDHE on flow allocation
The initial implementation for the ECDHE key exchange was doing the
key exchange after a flow was established. The public keys are now
sent allowg on the flow allocation messages, so that an encrypted
tunnel can be created within 1 RTT. The flow allocation steps had to
be extended to pass the opaque data ('piggybacking').
Signed-off-by: Dimitri Staessens <dimitri@ouroboros.rocks>
Signed-off-by: Sander Vrijders <sander@ouroboros.rocks>
Diffstat (limited to 'src')
-rw-r--r-- | src/ipcpd/broadcast/main.c | 2 | ||||
-rw-r--r-- | src/ipcpd/eth/eth.c | 93 | ||||
-rw-r--r-- | src/ipcpd/ipcp.c | 15 | ||||
-rw-r--r-- | src/ipcpd/ipcp.h | 10 | ||||
-rw-r--r-- | src/ipcpd/local/main.c | 15 | ||||
-rw-r--r-- | src/ipcpd/udp/main.c | 92 | ||||
-rw-r--r-- | src/ipcpd/unicast/fa.c | 60 | ||||
-rw-r--r-- | src/ipcpd/unicast/fa.h | 12 | ||||
-rw-r--r-- | src/irmd/ipcp.c | 36 | ||||
-rw-r--r-- | src/irmd/ipcp.h | 14 | ||||
-rw-r--r-- | src/irmd/irm_flow.c | 4 | ||||
-rw-r--r-- | src/irmd/irm_flow.h | 2 | ||||
-rw-r--r-- | src/irmd/main.c | 99 | ||||
-rw-r--r-- | src/lib/crypt.c | 197 | ||||
-rw-r--r-- | src/lib/dev.c | 252 | ||||
-rw-r--r-- | src/lib/frct.c | 2 | ||||
-rw-r--r-- | src/lib/ipcpd_messages.proto | 13 | ||||
-rw-r--r-- | src/lib/irmd_messages.proto | 3 | ||||
-rw-r--r-- | src/tools/oping/oping.c | 2 | ||||
-rw-r--r-- | src/tools/oping/oping_server.c | 2 |
20 files changed, 555 insertions, 370 deletions
diff --git a/src/ipcpd/broadcast/main.c b/src/ipcpd/broadcast/main.c index 05338ee4..120b2bf1 100644 --- a/src/ipcpd/broadcast/main.c +++ b/src/ipcpd/broadcast/main.c @@ -235,7 +235,7 @@ static int broadcast_ipcp_join(int fd, notifier_event(NOTIFY_DT_CONN_ADD, &conn); - ipcp_flow_alloc_reply(fd, 0); + ipcp_flow_alloc_reply(fd, 0, NULL, 0); return 0; } diff --git a/src/ipcpd/eth/eth.c b/src/ipcpd/eth/eth.c index 0c9daff4..6b17912b 100644 --- a/src/ipcpd/eth/eth.c +++ b/src/ipcpd/eth/eth.c @@ -138,7 +138,7 @@ #define ALLOC_TIMEO 10 /* ms */ #define NAME_QUERY_TIMEO 2000 /* ms */ #define MGMT_TIMEO 100 /* ms */ -#define MGMT_FRAME_SIZE 512 +#define MGMT_FRAME_SIZE 2048 #define FLOW_REQ 0 #define FLOW_REPLY 1 @@ -201,6 +201,7 @@ struct mgmt_frame { struct list_head next; uint8_t r_addr[MAC_SIZE]; uint8_t buf[MGMT_FRAME_SIZE]; + size_t len; }; struct { @@ -458,7 +459,9 @@ static int eth_ipcp_alloc(const uint8_t * dst_addr, uint8_t ssap, #endif const uint8_t * hash, - qosspec_t qs) + qosspec_t qs, + const void * data, + size_t dlen) { uint8_t * buf; struct mgmt_msg * msg; @@ -467,7 +470,7 @@ static int eth_ipcp_alloc(const uint8_t * dst_addr, len = sizeof(*msg) + ipcp_dir_hash_len(); - buf = malloc(len + ETH_HEADER_TOT_SIZE); + buf = malloc(len + ETH_HEADER_TOT_SIZE + dlen); if (buf == NULL) return -1; @@ -489,6 +492,7 @@ static int eth_ipcp_alloc(const uint8_t * dst_addr, msg->cypher_s = hton16(qs.cypher_s); memcpy(msg + 1, hash, ipcp_dir_hash_len()); + memcpy(buf + len + ETH_HEADER_TOT_SIZE, data, dlen); ret = eth_ipcp_send_frame(dst_addr, #if defined(BUILD_ETH_DIX) @@ -497,26 +501,28 @@ static int eth_ipcp_alloc(const uint8_t * dst_addr, reverse_bits(MGMT_SAP), reverse_bits(MGMT_SAP), #endif - buf, len); + buf, len + dlen); free(buf); return ret; } -static int eth_ipcp_alloc_resp(uint8_t * dst_addr, +static int eth_ipcp_alloc_resp(uint8_t * dst_addr, #if defined(BUILD_ETH_DIX) - uint16_t seid, - uint16_t deid, + uint16_t seid, + uint16_t deid, #elif defined(BUILD_ETH_LLC) - uint8_t ssap, - uint8_t dsap, + uint8_t ssap, + uint8_t dsap, #endif - int response) + int response, + const void * data, + size_t len) { struct mgmt_msg * msg; uint8_t * buf; - buf = malloc(sizeof(*msg) + ETH_HEADER_TOT_SIZE); + buf = malloc(sizeof(*msg) + ETH_HEADER_TOT_SIZE + len); if (buf == NULL) return -1; @@ -532,6 +538,8 @@ static int eth_ipcp_alloc_resp(uint8_t * dst_addr, #endif msg->response = response; + memcpy(msg + 1, data, len); + if (eth_ipcp_send_frame(dst_addr, #if defined(BUILD_ETH_DIX) MGMT_EID, @@ -539,7 +547,7 @@ static int eth_ipcp_alloc_resp(uint8_t * dst_addr, reverse_bits(MGMT_SAP), reverse_bits(MGMT_SAP), #endif - buf, sizeof(*msg))) { + buf, sizeof(*msg) + len)) { free(buf); return -1; } @@ -556,7 +564,9 @@ static int eth_ipcp_req(uint8_t * r_addr, uint8_t r_sap, #endif const uint8_t * dst, - qosspec_t qs) + qosspec_t qs, + const void * data, + size_t len) { struct timespec ts = {0, ALLOC_TIMEO * MILLION}; struct timespec abstime; @@ -580,7 +590,7 @@ static int eth_ipcp_req(uint8_t * r_addr, } /* reply to IRM, called under lock to prevent race */ - fd = ipcp_flow_req_arr(dst, ipcp_dir_hash_len(), qs); + fd = ipcp_flow_req_arr(dst, ipcp_dir_hash_len(), qs, data, len); if (fd < 0) { pthread_mutex_unlock(&ipcpi.alloc_lock); log_err("Could not get new flow from IRMd."); @@ -607,19 +617,20 @@ static int eth_ipcp_req(uint8_t * r_addr, #elif defined(BUILD_ETH_LLC) log_dbg("New flow request, fd %d, remote SAP %d.", fd, r_sap); #endif - return 0; } -static int eth_ipcp_alloc_reply(uint8_t * r_addr, +static int eth_ipcp_alloc_reply(uint8_t * r_addr, #if defined(BUILD_ETH_DIX) - uint16_t seid, - uint16_t deid, + uint16_t seid, + uint16_t deid, #elif defined(BUILD_ETH_LLC) - uint8_t ssap, - int dsap, + uint8_t ssap, + int dsap, #endif - int response) + int response, + const void * data, + size_t len) { int ret = 0; int fd = -1; @@ -657,7 +668,7 @@ static int eth_ipcp_alloc_reply(uint8_t * r_addr, #elif defined(BUILD_ETH_LLC) log_dbg("Flow reply, fd %d, SSAP %d, DSAP %d.", fd, ssap, dsap); #endif - if ((ret = ipcp_flow_alloc_reply(fd, response)) < 0) + if ((ret = ipcp_flow_alloc_reply(fd, response, data, len)) < 0) return -1; return ret; @@ -717,15 +728,21 @@ static int eth_ipcp_name_query_reply(const uint8_t * hash, } static int eth_ipcp_mgmt_frame(const uint8_t * buf, + size_t len, uint8_t * r_addr) { struct mgmt_msg * msg; + size_t msg_len; qosspec_t qs; msg = (struct mgmt_msg *) buf; switch (msg->code) { case FLOW_REQ: + msg_len = sizeof(*msg) + ipcp_dir_hash_len(); + + assert(len >= msg_len); + qs.delay = ntoh32(msg->delay); qs.bandwidth = ntoh64(msg->bandwidth); qs.availability = msg->availability; @@ -744,10 +761,14 @@ static int eth_ipcp_mgmt_frame(const uint8_t * buf, msg->ssap, #endif buf + sizeof(*msg), - qs); + qs, + buf + msg_len, + len - msg_len); } break; case FLOW_REPLY: + assert(len >= sizeof(*msg)); + eth_ipcp_alloc_reply(r_addr, #if defined(BUILD_ETH_DIX) ntohs(msg->seid), @@ -756,7 +777,9 @@ static int eth_ipcp_mgmt_frame(const uint8_t * buf, msg->ssap, msg->dsap, #endif - msg->response); + msg->response, + buf + sizeof(*msg), + len - sizeof(*msg)); break; case NAME_QUERY_REQ: eth_ipcp_name_query_req(buf + sizeof(*msg), r_addr); @@ -814,7 +837,7 @@ static void * eth_ipcp_mgmt_handler(void * o) list_del(&frame->next); pthread_mutex_unlock(ð_data.mgmt_lock); - eth_ipcp_mgmt_frame(frame->buf, frame->r_addr); + eth_ipcp_mgmt_frame(frame->buf, frame->len, frame->r_addr); free(frame); } @@ -951,6 +974,7 @@ static void * eth_ipcp_packet_reader(void * o) memcpy(frame->buf, &e_frame->payload, length); memcpy(frame->r_addr, e_frame->src_hwaddr, MAC_SIZE); + frame->len = length; pthread_mutex_lock(ð_data.mgmt_lock); list_add(&frame->next, ð_data.mgmt_frames); @@ -1613,7 +1637,9 @@ static int eth_ipcp_query(const uint8_t * hash) static int eth_ipcp_flow_alloc(int fd, const uint8_t * hash, - qosspec_t qs) + qosspec_t qs, + const void * data, + size_t len) { #ifdef BUILD_ETH_LLC uint8_t ssap = 0; @@ -1652,7 +1678,10 @@ static int eth_ipcp_flow_alloc(int fd, #elif defined(BUILD_ETH_LLC) ssap, #endif - hash, qs) < 0) { + hash, + qs, + data, + len) < 0) { #ifdef BUILD_ETH_LLC pthread_rwlock_wrlock(ð_data.flows_lock); bmp_release(eth_data.saps, eth_data.fd_to_ef[fd].sap); @@ -1672,8 +1701,10 @@ static int eth_ipcp_flow_alloc(int fd, return 0; } -static int eth_ipcp_flow_alloc_resp(int fd, - int response) +static int eth_ipcp_flow_alloc_resp(int fd, + int response, + const void * data, + size_t len) { struct timespec ts = {0, ALLOC_TIMEO * MILLION}; struct timespec abstime; @@ -1730,7 +1761,9 @@ static int eth_ipcp_flow_alloc_resp(int fd, #elif defined(BUILD_ETH_LLC) ssap, r_sap, #endif - response) < 0) { + response, + data, + len) < 0) { #ifdef BUILD_ETH_LLC pthread_rwlock_wrlock(ð_data.flows_lock); bmp_release(eth_data.saps, eth_data.fd_to_ef[fd].sap); diff --git a/src/ipcpd/ipcp.c b/src/ipcpd/ipcp.c index 8f9fcd7d..95d2f783 100644 --- a/src/ipcpd/ipcp.c +++ b/src/ipcpd/ipcp.c @@ -408,6 +408,8 @@ static void * mainloop(void * o) } assert(msg->hash.len == ipcp_dir_hash_len()); + assert(msg->pk.len > 0 ? msg->pk.data != NULL + : msg->pk.data == NULL); if (ipcp_get_state() != IPCP_OPERATIONAL) { log_err("IPCP in wrong state."); @@ -429,7 +431,9 @@ static void * mainloop(void * o) ret_msg.result = ipcpi.ops->ipcp_flow_alloc(fd, msg->hash.data, - qs); + qs, + msg->pk.data, + msg->pk.len); break; case IPCP_MSG_CODE__IPCP_FLOW_JOIN: ret_msg.has_result = true; @@ -488,9 +492,14 @@ static void * mainloop(void * o) } } + assert(msg->pk.len > 0 ? msg->pk.data != NULL + : msg->pk.data == NULL); + ret_msg.result = ipcpi.ops->ipcp_flow_alloc_resp(fd, - msg->response); + msg->response, + msg->pk.data, + msg->pk.len); break; case IPCP_MSG_CODE__IPCP_FLOW_DEALLOC: ret_msg.has_result = true; @@ -568,7 +577,7 @@ static int parse_args(int argc, if (!(argc == 4 || argc == 3)) return -1; - /* argument 1: pid of irmd */ + /* argument 1: pid of irm */ if (atoi(argv[1]) == 0) return -1; diff --git a/src/ipcpd/ipcp.h b/src/ipcpd/ipcp.h index 6afac3ea..02c74f50 100644 --- a/src/ipcpd/ipcp.h +++ b/src/ipcpd/ipcp.h @@ -60,14 +60,18 @@ struct ipcp_ops { int (* ipcp_flow_alloc)(int fd, const uint8_t * dst, - qosspec_t qs); + qosspec_t qs, + const void * data, + size_t len); int (* ipcp_flow_join)(int fd, const uint8_t * dst, qosspec_t qs); - int (* ipcp_flow_alloc_resp)(int fd, - int response); + int (* ipcp_flow_alloc_resp)(int fd, + int response, + const void * data, + size_t len); int (* ipcp_flow_dealloc)(int fd); }; diff --git a/src/ipcpd/local/main.c b/src/ipcpd/local/main.c index 009a3fde..a2e20017 100644 --- a/src/ipcpd/local/main.c +++ b/src/ipcpd/local/main.c @@ -186,14 +186,15 @@ static int ipcp_local_query(const uint8_t * hash) static int ipcp_local_flow_alloc(int fd, const uint8_t * dst, - qosspec_t qs) + qosspec_t qs, + const void * data, + size_t len) { struct timespec ts = {0, ALLOC_TIMEOUT * MILLION}; struct timespec abstime; int out_fd = -1; log_dbg("Allocating flow to " HASH_FMT " on fd %d.", HASH_VAL(dst), fd); - assert(dst); clock_gettime(PTHREAD_COND_CLOCK, &abstime); @@ -215,7 +216,7 @@ static int ipcp_local_flow_alloc(int fd, assert(ipcpi.alloc_id == -1); - out_fd = ipcp_flow_req_arr(dst, ipcp_dir_hash_len(), qs); + out_fd = ipcp_flow_req_arr(dst, ipcp_dir_hash_len(), qs, data, len); if (out_fd < 0) { pthread_mutex_unlock(&ipcpi.alloc_lock); log_dbg("Flow allocation failed: %d", out_fd); @@ -241,8 +242,10 @@ static int ipcp_local_flow_alloc(int fd, return 0; } -static int ipcp_local_flow_alloc_resp(int fd, - int response) +static int ipcp_local_flow_alloc_resp(int fd, + int response, + const void * data, + size_t len) { struct timespec ts = {0, ALLOC_TIMEOUT * MILLION}; struct timespec abstime; @@ -290,7 +293,7 @@ static int ipcp_local_flow_alloc_resp(int fd, fset_add(local_data.flows, fd); - if ((ret = ipcp_flow_alloc_reply(out_fd, response)) < 0) + if ((ret = ipcp_flow_alloc_reply(out_fd, response, data, len)) < 0) return -1; log_info("Flow allocation completed, fds (%d, %d).", out_fd, fd); diff --git a/src/ipcpd/udp/main.c b/src/ipcpd/udp/main.c index 1f0bebf0..04c21a8b 100644 --- a/src/ipcpd/udp/main.c +++ b/src/ipcpd/udp/main.c @@ -62,8 +62,8 @@ #define IPCP_UDP_MAX_PACKET_SIZE 8980 #define OUR_HEADER_LEN sizeof(uint32_t) /* adds eid */ -#define IPCP_UDP_BUF_SIZE 256 -#define IPCP_UDP_MSG_SIZE 256 +#define IPCP_UDP_BUF_SIZE 8980 +#define IPCP_UDP_MSG_SIZE 8980 #define DNS_TTL 86400 #define FD_UPDATE_TIMEOUT 100 /* microseconds */ @@ -74,7 +74,8 @@ #define LOCAL_IP (udp_data.s_saddr.sin_addr.s_addr) #define MGMT_EID 0 -#define MGMT_FRAME_SIZE 512 +#define MGMT_FRAME_SIZE (sizeof(struct mgmt_msg)) +#define MGMT_FRAME_BUF_SIZE 2048 /* Keep order for alignment. */ struct mgmt_msg { @@ -97,7 +98,8 @@ struct mgmt_msg { struct mgmt_frame { struct list_head next; struct sockaddr_in r_saddr; - uint8_t buf[MGMT_FRAME_SIZE]; + uint8_t buf[MGMT_FRAME_BUF_SIZE]; + size_t len; }; /* UDP flow */ @@ -186,15 +188,19 @@ static void udp_data_fini(void) static int ipcp_udp_port_alloc(int skfd, uint32_t s_eid, const uint8_t * dst, - qosspec_t qs) + qosspec_t qs, + const void * data, + size_t dlen) { uint8_t * buf; struct mgmt_msg * msg; size_t len; + assert(dlen > 0 ? data != NULL : data == NULL); + len = sizeof(*msg) + ipcp_dir_hash_len(); - buf = malloc(len); + buf = malloc(len + dlen); if (buf == NULL) return -1; @@ -212,8 +218,9 @@ static int ipcp_udp_port_alloc(int skfd, msg->cypher_s = hton16(qs.cypher_s); memcpy(msg + 1, dst, ipcp_dir_hash_len()); + memcpy(buf + len, data, dlen); - if (write(skfd, msg, len) < 0) { + if (write(skfd, msg, len + dlen) < 0) { free(buf); return -1; } @@ -223,14 +230,16 @@ static int ipcp_udp_port_alloc(int skfd, return 0; } -static int ipcp_udp_port_alloc_resp(int skfd, - uint32_t s_eid, - uint32_t d_eid, - int8_t response) +static int ipcp_udp_port_alloc_resp(int skfd, + uint32_t s_eid, + uint32_t d_eid, + int8_t response, + const void * data, + size_t len) { struct mgmt_msg * msg; - msg = malloc(sizeof(*msg)); + msg = malloc(sizeof(*msg) + len); if (msg == NULL) return -1; @@ -240,7 +249,9 @@ static int ipcp_udp_port_alloc_resp(int skfd, msg->d_eid = hton32(d_eid); msg->response = response; - if (write(skfd, msg, sizeof(*msg)) < 0) { + memcpy(msg + 1, data, len); + + if (write(skfd, msg, sizeof(*msg) + len) < 0) { free(msg); return -1; } @@ -253,7 +264,9 @@ static int ipcp_udp_port_alloc_resp(int skfd, static int ipcp_udp_port_req(struct sockaddr_in * c_saddr, int d_eid, const uint8_t * dst, - qosspec_t qs) + qosspec_t qs, + const void * data, + size_t len) { struct timespec ts = {0, FD_UPDATE_TIMEOUT * 1000}; struct timespec abstime; @@ -294,7 +307,7 @@ static int ipcp_udp_port_req(struct sockaddr_in * c_saddr, } /* reply to IRM */ - fd = ipcp_flow_req_arr(dst, ipcp_dir_hash_len(), qs); + fd = ipcp_flow_req_arr(dst, ipcp_dir_hash_len(), qs, data, len); if (fd < 0) { pthread_mutex_unlock(&ipcpi.alloc_lock); log_err("Could not get new flow from IRMd."); @@ -320,9 +333,11 @@ static int ipcp_udp_port_req(struct sockaddr_in * c_saddr, return 0; } -static int ipcp_udp_port_alloc_reply(uint32_t s_eid, - uint32_t d_eid, - int8_t response) +static int ipcp_udp_port_alloc_reply(uint32_t s_eid, + uint32_t d_eid, + int8_t response, + const void * data, + size_t len) { struct sockaddr_in t_saddr; socklen_t t_saddr_len; @@ -356,7 +371,7 @@ static int ipcp_udp_port_alloc_reply(uint32_t s_eid, return -1; } - if (ipcp_flow_alloc_reply(s_eid, response) < 0) { + if (ipcp_flow_alloc_reply(s_eid, response, data, len) < 0) { log_dbg("Failed to reply to flow allocation."); return -1; } @@ -368,15 +383,21 @@ static int ipcp_udp_port_alloc_reply(uint32_t s_eid, } static int ipcp_udp_mgmt_frame(const uint8_t * buf, + size_t len, struct sockaddr_in c_saddr) { struct mgmt_msg * msg; + size_t msg_len; qosspec_t qs; msg = (struct mgmt_msg *) buf; switch (msg->code) { case FLOW_REQ: + msg_len = sizeof(*msg) + ipcp_dir_hash_len(); + + assert(len >= msg_len); + qs.delay = ntoh32(msg->delay); qs.bandwidth = ntoh64(msg->bandwidth); qs.availability = msg->availability; @@ -387,11 +408,17 @@ static int ipcp_udp_mgmt_frame(const uint8_t * buf, qs.cypher_s = ntoh16(msg->cypher_s); return ipcp_udp_port_req(&c_saddr, ntoh32(msg->s_eid), - (uint8_t *) (msg + 1), qs); + (uint8_t *) (msg + 1), qs, + buf + msg_len, + len - msg_len); case FLOW_REPLY: + assert(len >= sizeof(*msg)); + return ipcp_udp_port_alloc_reply(ntoh32(msg->s_eid), ntoh32(msg->d_eid), - msg->response); + msg->response, + buf + sizeof(*msg), + len - sizeof(*msg)); default: log_err("Unknown message received %d.", msg->code); return -1; @@ -421,7 +448,7 @@ static void * ipcp_udp_mgmt_handler(void * o) pthread_mutex_unlock(&udp_data.mgmt_lock); - ipcp_udp_mgmt_frame(frame->buf, frame->r_saddr); + ipcp_udp_mgmt_frame(frame->buf, frame->len, frame->r_saddr); free(frame); } @@ -468,8 +495,8 @@ static void * ipcp_udp_packet_reader(void * o) /* pass onto mgmt queue */ if (eid == MGMT_EID) { - if (n > IPCP_UDP_MSG_SIZE) { - log_warn("Dropped oversize management frame."); + if ((size_t) n < MGMT_FRAME_SIZE) { + log_warn("Dropped runt mgmt frame."); continue; } @@ -479,6 +506,7 @@ static void * ipcp_udp_packet_reader(void * o) memcpy(frame->buf, buf, n); memcpy(&frame->r_saddr, &r_saddr, sizeof(r_saddr)); + frame->len = n; pthread_mutex_lock(&udp_data.mgmt_lock); list_add(&frame->next, &udp_data.mgmt_frames); @@ -962,7 +990,9 @@ static int ipcp_udp_query(const uint8_t * hash) static int ipcp_udp_flow_alloc(int fd, const uint8_t * dst, - qosspec_t qs) + qosspec_t qs, + const void * data, + size_t len) { struct sockaddr_in r_saddr; /* Server address */ struct sockaddr_in c_saddr; /* Client address */ @@ -1026,7 +1056,7 @@ static int ipcp_udp_flow_alloc(int fd, return -1; } - if (ipcp_udp_port_alloc(skfd, fd, dst, qs) < 0) { + if (ipcp_udp_port_alloc(skfd, fd, dst, qs, data, len) < 0) { log_err("Could not allocate port."); close(skfd); return -1; @@ -1047,15 +1077,17 @@ static int ipcp_udp_flow_alloc(int fd, return 0; } -static int ipcp_udp_flow_alloc_resp(int fd, - int response) +static int ipcp_udp_flow_alloc_resp(int fd, + int resp, + const void * data, + size_t len) { struct timespec ts = {0, FD_UPDATE_TIMEOUT * 1000}; struct timespec abstime; int skfd; int d_eid; - if (response) + if (resp) return 0; clock_gettime(PTHREAD_COND_CLOCK, &abstime); @@ -1088,7 +1120,7 @@ static int ipcp_udp_flow_alloc_resp(int fd, pthread_rwlock_unlock(&udp_data.flows_lock); - if (ipcp_udp_port_alloc_resp(skfd, d_eid, fd, response) < 0) { + if (ipcp_udp_port_alloc_resp(skfd, d_eid, fd, resp, data, len) < 0) { pthread_rwlock_rdlock(&udp_data.flows_lock); fset_del(udp_data.np1_flows, fd); pthread_rwlock_unlock(&udp_data.flows_lock); diff --git a/src/ipcpd/unicast/fa.c b/src/ipcpd/unicast/fa.c index 9ed9f710..e0727e85 100644 --- a/src/ipcpd/unicast/fa.c +++ b/src/ipcpd/unicast/fa.c @@ -51,6 +51,7 @@ #define FLOW_REQ 0 #define FLOW_REPLY 1 +#define MSGBUFSZ 2048 struct fa_msg { uint64_t s_addr; @@ -106,7 +107,7 @@ static void packet_handler(int fd, static void destroy_conn(int fd) { - fa.r_eid[fd] = -1; + fa.r_eid[fd] = -1; fa.r_addr[fd] = INVALID_ADDR; } @@ -146,10 +147,12 @@ static void * fa_handle_packet(void * o) while (true) { struct timespec abstime; int fd; - uint8_t * buf; + uint8_t buf[MSGBUFSZ]; struct fa_msg * msg; qosspec_t qs; struct cmd * cmd; + size_t len; + size_t msg_len; pthread_mutex_lock(&fa.mtx); @@ -164,10 +167,10 @@ static void * fa_handle_packet(void * o) pthread_cleanup_pop(true); - buf = malloc(sizeof(*msg) + ipcp_dir_hash_len()); - if (buf == NULL) { - log_err("Failed to allocate memory."); - ipcp_sdb_release(cmd->sdb); + len = shm_du_buff_tail(cmd->sdb) - shm_du_buff_head(cmd->sdb); + + if (len > MSGBUFSZ) { + log_err("Message over buffer size."); free(cmd); continue; } @@ -176,12 +179,7 @@ static void * fa_handle_packet(void * o) /* Depending on the message call the function in ipcp-dev.h */ - assert(sizeof(*msg) + ipcp_dir_hash_len() >= - (unsigned long int) (shm_du_buff_tail(cmd->sdb) - - shm_du_buff_head(cmd->sdb))); - - memcpy(msg, shm_du_buff_head(cmd->sdb), - shm_du_buff_tail(cmd->sdb) - shm_du_buff_head(cmd->sdb)); + memcpy(msg, shm_du_buff_head(cmd->sdb), len); ipcp_sdb_release(cmd->sdb); @@ -189,6 +187,10 @@ static void * fa_handle_packet(void * o) switch (msg->code) { case FLOW_REQ: + msg_len = sizeof(*msg) + ipcp_dir_hash_len(); + + assert(len >= msg_len); + clock_gettime(PTHREAD_COND_CLOCK, &abstime); pthread_mutex_lock(&ipcpi.alloc_lock); @@ -205,7 +207,6 @@ static void * fa_handle_packet(void * o) pthread_mutex_unlock(&ipcpi.alloc_lock); log_dbg("Won't allocate over non-operational" "IPCP."); - free(msg); continue; } @@ -222,11 +223,12 @@ static void * fa_handle_packet(void * o) fd = ipcp_flow_req_arr((uint8_t *) (msg + 1), ipcp_dir_hash_len(), - qs); + qs, + buf + msg_len, + len - msg_len); if (fd < 0) { pthread_mutex_unlock(&ipcpi.alloc_lock); log_err("Failed to get fd for flow."); - free(msg); continue; } @@ -244,12 +246,16 @@ static void * fa_handle_packet(void * o) break; case FLOW_REPLY: + assert(len >= sizeof(*msg)); + pthread_rwlock_wrlock(&fa.flows_lock); fa.r_eid[ntoh32(msg->r_eid)] = ntoh32(msg->s_eid); ipcp_flow_alloc_reply(ntoh32(msg->r_eid), - msg->response); + msg->response, + buf + sizeof(*msg), + len - sizeof(*msg)); if (msg->response < 0) destroy_conn(ntoh32(msg->r_eid)); @@ -263,8 +269,6 @@ static void * fa_handle_packet(void * o) log_err("Got an unknown flow allocation message."); break; } - - free(msg); } } @@ -363,18 +367,23 @@ void fa_stop(void) int fa_alloc(int fd, const uint8_t * dst, - qosspec_t qs) + qosspec_t qs, + const void * data, + size_t dlen) { struct fa_msg * msg; uint64_t addr; struct shm_du_buff * sdb; qoscube_t qc; + size_t len; addr = dir_query(dst); if (addr == 0) return -1; - if (ipcp_sdb_reserve(&sdb, sizeof(*msg) + ipcp_dir_hash_len())) + len = sizeof(*msg) + ipcp_dir_hash_len(); + + if (ipcp_sdb_reserve(&sdb, len + dlen)) return -1; msg = (struct fa_msg *) shm_du_buff_head(sdb); @@ -391,6 +400,7 @@ int fa_alloc(int fd, msg->cypher_s = hton16(qs.cypher_s); memcpy(msg + 1, dst, ipcp_dir_hash_len()); + memcpy(shm_du_buff_head(sdb) + len, data, dlen); qc = qos_spec_to_cube(qs); @@ -409,8 +419,10 @@ int fa_alloc(int fd, return 0; } -int fa_alloc_resp(int fd, - int response) +int fa_alloc_resp(int fd, + int response, + const void * data, + size_t len) { struct timespec ts = {0, TIMEOUT * 1000}; struct timespec abstime; @@ -439,7 +451,7 @@ int fa_alloc_resp(int fd, pthread_mutex_unlock(&ipcpi.alloc_lock); - if (ipcp_sdb_reserve(&sdb, sizeof(*msg) + ipcp_dir_hash_len())) { + if (ipcp_sdb_reserve(&sdb, sizeof(*msg) + len)) { destroy_conn(fd); return -1; } @@ -452,6 +464,8 @@ int fa_alloc_resp(int fd, msg->s_eid = hton32(fd); msg->response = response; + memcpy(msg + 1, data, len); + if (response < 0) { destroy_conn(fd); ipcp_sdb_release(sdb); diff --git a/src/ipcpd/unicast/fa.h b/src/ipcpd/unicast/fa.h index 8ced5e84..12a10a0c 100644 --- a/src/ipcpd/unicast/fa.h +++ b/src/ipcpd/unicast/fa.h @@ -36,10 +36,14 @@ void fa_stop(void); int fa_alloc(int fd, const uint8_t * dst, - qosspec_t qs); - -int fa_alloc_resp(int fd, - int response); + qosspec_t qs, + const void * data, + size_t len); + +int fa_alloc_resp(int fd, + int response, + const void * data, + size_t len); int fa_dealloc(int fd); diff --git a/src/irmd/ipcp.c b/src/irmd/ipcp.c index 85698ec1..78408185 100644 --- a/src/irmd/ipcp.c +++ b/src/irmd/ipcp.c @@ -441,7 +441,9 @@ static int __ipcp_flow_alloc(pid_t pid, const uint8_t * dst, size_t len, qosspec_t qs, - bool join) + bool join, + const void * data, + size_t dlen) { ipcp_msg_t msg = IPCP_MSG__INIT; qosspec_msg_t qs_msg; @@ -450,10 +452,8 @@ static int __ipcp_flow_alloc(pid_t pid, assert(dst); - if (join) - msg.code = IPCP_MSG_CODE__IPCP_FLOW_JOIN; - else - msg.code = IPCP_MSG_CODE__IPCP_FLOW_ALLOC; + msg.code = join ? IPCP_MSG_CODE__IPCP_FLOW_JOIN + : IPCP_MSG_CODE__IPCP_FLOW_ALLOC; msg.has_flow_id = true; msg.flow_id = flow_id; msg.has_pid = true; @@ -463,6 +463,9 @@ static int __ipcp_flow_alloc(pid_t pid, msg.hash.data = (uint8_t *) dst; qs_msg = spec_to_msg(&qs); msg.qosspec = &qs_msg; + msg.has_pk = true; + msg.pk.data = (uint8_t *) data; + msg.pk.len = (uint32_t) dlen; recv_msg = send_recv_ipcp_msg(pid, &msg); if (recv_msg == NULL) @@ -484,9 +487,12 @@ int ipcp_flow_alloc(pid_t pid, pid_t n_pid, const uint8_t * dst, size_t len, - qosspec_t qs) + qosspec_t qs, + const void * data, + size_t dlen) { - return __ipcp_flow_alloc(pid, flow_id, n_pid, dst, len, qs, false); + return __ipcp_flow_alloc(pid, flow_id, n_pid, dst, len, qs, false, + data, dlen); } int ipcp_flow_join(pid_t pid, @@ -496,13 +502,16 @@ int ipcp_flow_join(pid_t pid, size_t len, qosspec_t qs) { - return __ipcp_flow_alloc(pid, flow_id, n_pid, dst, len, qs, true); + return __ipcp_flow_alloc(pid, flow_id, n_pid, dst, len, qs, true, + NULL, 0); } -int ipcp_flow_alloc_resp(pid_t pid, - int flow_id, - pid_t n_pid, - int response) +int ipcp_flow_alloc_resp(pid_t pid, + int flow_id, + pid_t n_pid, + int response, + const void * data, + size_t len) { ipcp_msg_t msg = IPCP_MSG__INIT; ipcp_msg_t * recv_msg = NULL; @@ -515,6 +524,9 @@ int ipcp_flow_alloc_resp(pid_t pid, msg.pid = n_pid; msg.has_response = true; msg.response = response; + msg.has_pk = true; + msg.pk.data = (uint8_t *) data; + msg.pk.len = (uint32_t) len; recv_msg = send_recv_ipcp_msg(pid, &msg); if (recv_msg == NULL) diff --git a/src/irmd/ipcp.h b/src/irmd/ipcp.h index 398255e8..ae00792b 100644 --- a/src/irmd/ipcp.h +++ b/src/irmd/ipcp.h @@ -67,7 +67,9 @@ int ipcp_flow_alloc(pid_t pid, pid_t n_pid, const uint8_t * dst, size_t len, - qosspec_t qs); + qosspec_t qs, + const void * data, + size_t dlen); int ipcp_flow_join(pid_t pid, int flow_id, @@ -76,10 +78,12 @@ int ipcp_flow_join(pid_t pid, size_t len, qosspec_t qs); -int ipcp_flow_alloc_resp(pid_t pid, - int flow_id, - pid_t n_pid, - int response); +int ipcp_flow_alloc_resp(pid_t pid, + int flow_id, + pid_t n_pid, + int response, + const void * data, + size_t len); int ipcp_flow_dealloc(pid_t pid, int flow_id); diff --git a/src/irmd/irm_flow.c b/src/irmd/irm_flow.c index 70d2a789..10395a35 100644 --- a/src/irmd/irm_flow.c +++ b/src/irmd/irm_flow.c @@ -62,6 +62,8 @@ struct irm_flow * irm_flow_create(pid_t n_pid, f->n_1_pid = n_1_pid; f->flow_id = flow_id; f->qs = qs; + f->data = NULL; + f->len = 0; f->n_rb = shm_rbuff_create(n_pid, flow_id); if (f->n_rb == NULL) { @@ -119,6 +121,8 @@ void irm_flow_destroy(struct irm_flow * f) pthread_mutex_lock(&f->state_lock); + assert(f->len == 0); + if (f->state == FLOW_DESTROY) { pthread_mutex_unlock(&f->state_lock); return; diff --git a/src/irmd/irm_flow.h b/src/irmd/irm_flow.h index 28369e03..051a60a6 100644 --- a/src/irmd/irm_flow.h +++ b/src/irmd/irm_flow.h @@ -48,6 +48,8 @@ struct irm_flow { pid_t n_1_pid; qosspec_t qs; + void * data; + size_t len; struct shm_rbuff * n_rb; struct shm_rbuff * n_1_rb; diff --git a/src/irmd/main.c b/src/irmd/main.c index 6b672756..65354382 100644 --- a/src/irmd/main.c +++ b/src/irmd/main.c @@ -166,6 +166,11 @@ static void clear_irm_flow(struct irm_flow * f) { assert(f); + if (f->len != 0) { + free(f->data); + f->len = 0; + } + while ((idx = shm_rbuff_read(f->n_rb)) >= 0) shm_rdrbuff_remove(irmd.rdrb, idx); @@ -1161,7 +1166,9 @@ static int proc_announce(pid_t pid, static int flow_accept(pid_t pid, struct timespec * timeo, - struct irm_flow ** fl) + struct irm_flow ** fl, + const void * data, + size_t len) { struct irm_flow * f = NULL; struct proc_entry * e = NULL; @@ -1228,7 +1235,7 @@ static int flow_accept(pid_t pid, list_del(&f->next); bmp_release(irmd.flow_ids, f->flow_id); pthread_rwlock_unlock(&irmd.flows_lock); - ipcp_flow_alloc_resp(pid_n1, flow_id, pid_n, -1); + ipcp_flow_alloc_resp(pid_n1, flow_id, pid_n, -1, NULL, 0); clear_irm_flow(f); irm_flow_set_state(f, FLOW_NULL); irm_flow_destroy(f); @@ -1248,7 +1255,7 @@ static int flow_accept(pid_t pid, list_del(&f->next); bmp_release(irmd.flow_ids, f->flow_id); pthread_rwlock_unlock(&irmd.flows_lock); - ipcp_flow_alloc_resp(pid_n1, flow_id, pid_n, -1); + ipcp_flow_alloc_resp(pid_n1, flow_id, pid_n, -1, NULL, 0); clear_irm_flow(f); irm_flow_set_state(f, FLOW_NULL); irm_flow_destroy(f); @@ -1260,7 +1267,7 @@ static int flow_accept(pid_t pid, pthread_rwlock_unlock(&irmd.reg_lock); - if (ipcp_flow_alloc_resp(pid_n1, flow_id, pid_n, 0)) { + if (ipcp_flow_alloc_resp(pid_n1, flow_id, pid_n, 0, data, len)) { pthread_rwlock_wrlock(&irmd.flows_lock); list_del(&f->next); pthread_rwlock_unlock(&irmd.flows_lock); @@ -1285,7 +1292,9 @@ static int flow_alloc(pid_t pid, qosspec_t qs, struct timespec * timeo, struct irm_flow ** e, - bool join) + bool join, + const void * data, + size_t len) { struct irm_flow * f; struct ipcp_entry * ipcp; @@ -1293,10 +1302,8 @@ static int flow_alloc(pid_t pid, int state; uint8_t * hash; - if (join) - ipcp = get_ipcp_entry_by_layer(dst); - else - ipcp = get_ipcp_by_dst_name(dst, pid); + ipcp = join ? get_ipcp_entry_by_layer(dst) + : get_ipcp_by_dst_name(dst, pid); if (ipcp == NULL) { log_info("Destination %s unreachable.", dst); return -1; @@ -1341,7 +1348,7 @@ static int flow_alloc(pid_t pid, } } else { if (ipcp_flow_alloc(ipcp->pid, flow_id, pid, hash, - IPCP_HASH_LEN(ipcp), qs)) { + IPCP_HASH_LEN(ipcp), qs, data, len)) { /* sanitizer cleans this */ log_info("Flow_allocation failed."); free(hash); @@ -1450,7 +1457,9 @@ static pid_t auto_execute(char ** argv) static struct irm_flow * flow_req_arr(pid_t pid, const uint8_t * hash, - qosspec_t qs) + qosspec_t qs, + const void * data, + size_t len) { struct reg_entry * re = NULL; struct prog_entry * a = NULL; @@ -1547,6 +1556,7 @@ static struct irm_flow * flow_req_arr(pid_t pid, pthread_rwlock_unlock(&irmd.reg_lock); pthread_rwlock_wrlock(&irmd.flows_lock); + flow_id = bmp_allocate(irmd.flow_ids); if (!bmp_is_id_valid(irmd.flow_ids, flow_id)) { pthread_rwlock_unlock(&irmd.flows_lock); @@ -1561,6 +1571,21 @@ static struct irm_flow * flow_req_arr(pid_t pid, return NULL; } + if (len != 0) { + assert(data); + f->data = malloc(len); + if (f->data == NULL) { + bmp_release(irmd.flow_ids, flow_id); + pthread_rwlock_unlock(&irmd.flows_lock); + log_err("Could not piggyback data."); + return NULL; + } + + f->len = len; + + memcpy(f->data, data, len); + } + list_add(&f->next, &irmd.irm_flows); pthread_rwlock_unlock(&irmd.flows_lock); @@ -1577,6 +1602,8 @@ static struct irm_flow * flow_req_arr(pid_t pid, list_del(&f->next); pthread_rwlock_unlock(&irmd.flows_lock); log_err("Could not get process table entry for %d.", h_pid); + free(f->data); + f->len = 0; irm_flow_destroy(f); return NULL; } @@ -1590,8 +1617,10 @@ static struct irm_flow * flow_req_arr(pid_t pid, return f; } -static int flow_alloc_reply(int flow_id, - int response) +static int flow_alloc_reply(int flow_id, + int response, + const void * data, + size_t len) { struct irm_flow * f; @@ -1608,6 +1637,14 @@ static int flow_alloc_reply(int flow_id, else irm_flow_set_state(f, FLOW_NULL); + f->data = malloc(len); + if (f->data == NULL) { + pthread_rwlock_unlock(&irmd.flows_lock); + return -1; + } + memcpy(f->data, data, len); + f->len = len; + pthread_rwlock_unlock(&irmd.flows_lock); return 0; @@ -1921,7 +1958,7 @@ static void * mainloop(void * o) if (msg == NULL) { close(sfd); - irm_msg__free_unpacked(ret_msg, NULL); + irm_msg__free_unpacked(msg, NULL); continue; } @@ -1991,7 +2028,10 @@ static void * mainloop(void * o) result = name_unreg(msg->pid, msg->name); break; case IRM_MSG_CODE__IRM_FLOW_ACCEPT: - result = flow_accept(msg->pid, timeo, &e); + assert(msg->pk.len > 0 ? msg->pk.data != NULL + : msg->pk.data == NULL); + result = flow_accept(msg->pid, timeo, &e, + msg->pk.data, msg->pk.len); if (result == 0) { qosspec_msg_t qs_msg; ret_msg->has_flow_id = true; @@ -2000,23 +2040,35 @@ static void * mainloop(void * o) ret_msg->pid = e->n_1_pid; qs_msg = spec_to_msg(&e->qs); ret_msg->qosspec = &qs_msg; + ret_msg->has_pk = true; + ret_msg->pk.data = e->data; + ret_msg->pk.len = e->len; + e->len = 0; /* Data is free'd with ret_msg */ } break; case IRM_MSG_CODE__IRM_FLOW_ALLOC: + assert(msg->pk.len > 0 ? msg->pk.data != NULL + : msg->pk.data == NULL); result = flow_alloc(msg->pid, msg->dst, msg_to_spec(msg->qosspec), - timeo, &e, false); + timeo, &e, false, msg->pk.data, + msg->pk.len); if (result == 0) { ret_msg->has_flow_id = true; ret_msg->flow_id = e->flow_id; ret_msg->has_pid = true; ret_msg->pid = e->n_1_pid; + ret_msg->has_pk = true; + ret_msg->pk.data = e->data; + ret_msg->pk.len = e->len; + e->len = 0; /* Data is free'd with ret_msg */ } break; case IRM_MSG_CODE__IRM_FLOW_JOIN: + assert(msg->pk.len == 0 && msg->pk.data == NULL); result = flow_alloc(msg->pid, msg->dst, msg_to_spec(msg->qosspec), - timeo, &e, true); + timeo, &e, true, NULL, 0); if (result == 0) { ret_msg->has_flow_id = true; ret_msg->flow_id = e->flow_id; @@ -2028,9 +2080,13 @@ static void * mainloop(void * o) result = flow_dealloc(msg->pid, msg->flow_id); break; case IRM_MSG_CODE__IPCP_FLOW_REQ_ARR: + assert(msg->pk.len > 0 ? msg->pk.data != NULL + : msg->pk.data == NULL); e = flow_req_arr(msg->pid, msg->hash.data, - msg_to_spec(msg->qosspec)); + msg_to_spec(msg->qosspec), + msg->pk.data, + msg->pk.len); result = (e == NULL ? -1 : 0); if (result == 0) { ret_msg->has_flow_id = true; @@ -2040,7 +2096,12 @@ static void * mainloop(void * o) } break; case IRM_MSG_CODE__IPCP_FLOW_ALLOC_REPLY: - result = flow_alloc_reply(msg->flow_id, msg->response); + assert(msg->pk.len > 0 ? msg->pk.data != NULL + : msg->pk.data == NULL); + result = flow_alloc_reply(msg->flow_id, + msg->response, + msg->pk.data, + msg->pk.len); break; default: log_err("Don't know that message code."); diff --git a/src/lib/crypt.c b/src/lib/crypt.c index 929d73f9..1fa12bb8 100644 --- a/src/lib/crypt.c +++ b/src/lib/crypt.c @@ -27,9 +27,9 @@ #include <openssl/ec.h> #include <openssl/pem.h> -#define MSGBUFSZ 2048 +#include <openssl/bio.h> + #define IVSZ 16 -#define DH_TIMEO 2 /* seconds */ /* SYMMKEYSZ defined in dev.c */ /* @@ -90,7 +90,7 @@ static int __openssl_ecdh_derive_secret(EVP_PKEY * kp, return -ECRYPT; } -static int __openssl_ecdh_gen_key(EVP_PKEY ** kp) +static int __openssl_ecdh_gen_key(void ** kp) { EVP_PKEY_CTX * ctx = NULL; EVP_PKEY_CTX * kctx = NULL; @@ -121,7 +121,7 @@ static int __openssl_ecdh_gen_key(EVP_PKEY ** kp) if (ret != 1) goto fail_keygen; - ret = EVP_PKEY_keygen(kctx, kp); + ret = EVP_PKEY_keygen(kctx, (EVP_PKEY **) kp); if (ret != 1) goto fail_keygen; @@ -141,114 +141,57 @@ static int __openssl_ecdh_gen_key(EVP_PKEY ** kp) return -ECRYPT; } -/* ECDH from the server side. */ -static int openssl_ecdh_srv(int fd, - uint8_t * s) +static ssize_t openssl_ecdh_pkp_create(void ** pkp, + uint8_t * pk) { - EVP_PKEY * kp = NULL; - EVP_PKEY * pub = NULL; - uint8_t buf[MSGBUFSZ]; - ssize_t len; - int buf_sz; uint8_t * pos; - struct timespec timeo = {DH_TIMEO,0}; - - assert(s != NULL); + ssize_t len; - (void) fd; - (void) s; + assert(pkp != NULL); + assert(*pkp == NULL); + assert(pk != NULL); - if (__openssl_ecdh_gen_key(&kp) < 0) - goto fail_gen_key; + if (__openssl_ecdh_gen_key(pkp) < 0) + return -ECRYPT; - fccntl(fd, FLOWSRCVTIMEO, &timeo); + assert(*pkp != NULL); - len = flow_read(fd, buf, MSGBUFSZ); + pos = pk; /* i2d_PUBKEY increments the pointer, don't use buf! */ + len = i2d_PUBKEY(*pkp, &pos); if (len < 0) { - fccntl(fd, FLOWSRCVTIMEO, NULL); - goto fail_get_key; + EVP_PKEY_free(*pkp); + return -ECRYPT; } - fccntl(fd, FLOWSRCVTIMEO, NULL); - - pos = buf; /* i2d_PUBKEY increments the pointer, don't use buf! */ - pub = d2i_PUBKEY(NULL, (const uint8_t **) &pos, (long) len); - - pos = buf; /* i2d_PUBKEY increments the pointer, don't use buf! */ - buf_sz = i2d_PUBKEY(kp, &pos); - if (buf_sz < 0) - goto fail_get_key; - - if (flow_write(fd, buf, (size_t) buf_sz) < 0) - goto fail_get_key; - - if (__openssl_ecdh_derive_secret(kp, pub, s) < 0) - goto fail_get_key; - - EVP_PKEY_free(kp); - EVP_PKEY_free(pub); - - return 0; - - fail_get_key: - EVP_PKEY_free(kp); - fail_gen_key: - return -ECRYPT; + return len; } -/* ECDH from the client side. */ -static int openssl_ecdh_clt(int fd, - uint8_t * s) +static void openssl_ecdh_pkp_destroy(void * pkp) { - EVP_PKEY * kp = NULL; - EVP_PKEY * pub = NULL; - uint8_t buf[MSGBUFSZ]; - int buf_sz; - uint8_t * pos; - ssize_t len; - struct timespec timeo = {DH_TIMEO,0}; - - assert(s != NULL); - - (void) fd; - (void) s; - - if (__openssl_ecdh_gen_key(&kp) < 0) - goto fail_gen_key; - - pos = buf; /* i2d_PUBKEY increments the pointer, don't use buf! */ - buf_sz = i2d_PUBKEY(kp, &pos); - if (buf_sz < 0) - goto fail_get_key; - - if (flow_write(fd, buf, (size_t) buf_sz) < 0) - goto fail_get_key; - - fccntl(fd, FLOWSRCVTIMEO, &timeo); - - len = flow_read(fd, buf, MSGBUFSZ); - if (len < 0) { - fccntl(fd, FLOWSRCVTIMEO, NULL); - goto fail_get_key; - } + EVP_PKEY_free((EVP_PKEY *) pkp); +} - fccntl(fd, FLOWSRCVTIMEO, NULL); +static int openssl_ecdh_derive(void * pkp, + uint8_t * pk, + size_t len, + uint8_t * s) +{ + uint8_t * pos; + EVP_PKEY * pub; - pos = buf; /* i2d_PUBKEY increments the pointer, don't use buf! */ + pos = pk; /* d2i_PUBKEY increments the pointer, don't use key ptr! */ pub = d2i_PUBKEY(NULL, (const uint8_t **) &pos, (long) len); + if (pub == NULL) + return -ECRYPT; - if (__openssl_ecdh_derive_secret(kp, pub, s) < 0) - goto fail_get_key; + if (__openssl_ecdh_derive_secret(pkp, pub, s) < 0) { + EVP_PKEY_free(pub); + return -ECRYPT; + } - EVP_PKEY_free(kp); EVP_PKEY_free(pub); return 0; - - fail_get_key: - EVP_PKEY_free(kp); - fail_gen_key: - return -ECRYPT; } /* @@ -394,51 +337,64 @@ static int openssl_decrypt(struct flow * f, } -static int openssl_crypt_init(struct flow * f) +static int openssl_crypt_init(void ** ctx) { - f->ctx = EVP_CIPHER_CTX_new(); - if (f->ctx == NULL) - goto fail_new; + *ctx = EVP_CIPHER_CTX_new(); + if (*ctx == NULL) + return -ECRYPT; return 0; - - fail_new: - return -ECRYPT; } -static void openssl_crypt_fini(struct flow * f) +static void openssl_crypt_fini(void * ctx) { - EVP_CIPHER_CTX_free(f->ctx); - f->ctx = NULL; + EVP_CIPHER_CTX_free(ctx); } #endif /* HAVE_OPENSSL */ -static int crypt_dh_srv(int fd, - uint8_t * s) +static int crypt_dh_pkp_create(void ** pkp, + uint8_t * pk) { #ifdef HAVE_OPENSSL - return openssl_ecdh_srv(fd, s); + assert(pkp != NULL); + *pkp = NULL; + return openssl_ecdh_pkp_create(pkp, pk); #else - (void) fd; + (void) pkp; + (void) pk; - memset(s, 0, SYMMKEYSZ); + memset(pk, 0, MSGBUFSZ); return -ECRYPT; #endif } -static int crypt_dh_clt(int fd, - uint8_t * s) +static void crypt_dh_pkp_destroy(void * pkp) { #ifdef HAVE_OPENSSL - return openssl_ecdh_clt(fd, s); + openssl_ecdh_pkp_destroy(pkp); #else - (void) fd; + (void) pkp; + return 0; +#endif +} + +static int crypt_dh_derive(void * pkp, + uint8_t * pk, + size_t len, + uint8_t * s) +{ +#ifdef HAVE_OPENSSL + return openssl_ecdh_derive(pkp, pk, len, s); +#else + (void) pkp; + (void) pk; + (void) len; memset(s, 0, SYMMKEYSZ); - return 0; + return -ECRYPT; #endif } @@ -464,27 +420,28 @@ static int crypt_decrypt(struct flow * f, (void) f; (void) sdb; - return 0; + return -ECRYPT; #endif } - -static int crypt_init(struct flow * f) +static int crypt_init(void ** ctx) { #ifdef HAVE_OPENSSL - return openssl_crypt_init(f); + return openssl_crypt_init(ctx); #else - (void) f; + assert(ctx != NULL); + *ctx = NULL; return 0; #endif } -static void crypt_fini(struct flow * f) +static void crypt_fini(void * ctx) { #ifdef HAVE_OPENSSL - openssl_crypt_fini(f); + openssl_crypt_fini(ctx); #else - (void) f; + assert(ctx == NULL); + (void) ctx; #endif } diff --git a/src/lib/dev.c b/src/lib/dev.c index 43c7e98c..c427e20c 100644 --- a/src/lib/dev.c +++ b/src/lib/dev.c @@ -56,15 +56,16 @@ #endif /* Partial read information. */ -#define NO_PART -1 -#define DONE_PART -2 +#define NO_PART -1 +#define DONE_PART -2 #define CRCLEN (sizeof(uint32_t)) #define SECMEMSZ 16384 -#define SYMMKEYSZ 32 +#define SYMMKEYSZ 32 +#define MSGBUFSZ 2048 struct flow_set { - size_t idx; + size_t idx; }; struct fqueue { @@ -98,7 +99,7 @@ struct flow { struct shm_flow_set * set; int flow_id; int oflags; - qosspec_t spec; + qosspec_t qs; ssize_t part_idx; void * ctx; @@ -274,15 +275,16 @@ static void flow_fini(int fd) if (ai.flows[fd].frcti != NULL) frcti_destroy(ai.flows[fd].frcti); - if (ai.flows[fd].spec.cypher_s > 0) - crypt_fini(&ai.flows[fd]); + if (ai.flows[fd].ctx != NULL) + crypt_fini(ai.flows[fd].ctx); flow_clear(fd); } static int flow_init(int flow_id, pid_t pid, - qosspec_t qs) + qosspec_t qs, + uint8_t * s) { int fd; int err = -ENOMEM; @@ -311,10 +313,15 @@ static int flow_init(int flow_id, ai.flows[fd].oflags = FLOWFDEFAULT; ai.flows[fd].pid = pid; ai.flows[fd].part_idx = NO_PART; - ai.flows[fd].spec = qs; + ai.flows[fd].qs = qs; - if (qs.cypher_s > 0 && crypt_init(&ai.flows[fd]) < 0) - goto fail_crypt; + if (qs.cypher_s > 0) { + assert(s != NULL); + if (crypt_init(&ai.flows[fd].ctx) < 0) + goto fail_ctx; + + memcpy(ai.flows[fd].key, s, SYMMKEYSZ); + } ai.ports[flow_id].fd = fd; @@ -324,7 +331,7 @@ static int flow_init(int flow_id, return fd; - fail_crypt: + fail_ctx: shm_flow_set_close(ai.flows[fd].set); fail_set: shm_rbuff_close(ai.flows[fd].tx_rb); @@ -521,6 +528,13 @@ int flow_accept(qosspec_t * qs, irm_msg_t msg = IRM_MSG__INIT; irm_msg_t * recv_msg; int fd; + void * pkp; /* public key pair */ + uint8_t s[SYMMKEYSZ]; /* secret key for flow */ + uint8_t buf[MSGBUFSZ]; + int err = -EIRMD; + ssize_t key_len; + + memset(s, 0, SYMMKEYSZ); msg.code = IRM_MSG_CODE__IRM_FLOW_ACCEPT; msg.has_pid = true; @@ -533,29 +547,48 @@ int flow_accept(qosspec_t * qs, msg.timeo_nsec = timeo->tv_nsec; } + key_len = crypt_dh_pkp_create(&pkp, buf); + if (key_len < 0) { + err = -ECRYPT; + goto fail_crypt_pkp; + } + + msg.has_pk = true; + msg.pk.data = buf; + msg.pk.len = (uint32_t) key_len; + + pthread_cleanup_push(crypt_dh_pkp_destroy, pkp); + recv_msg = send_recv_irm_msg(&msg); + + pthread_cleanup_pop(false); + if (recv_msg == NULL) - return -EIRMD; + goto fail_recv; - if (!recv_msg->has_result) { - irm_msg__free_unpacked(recv_msg, NULL); - return -EIRMD; - } + if (!recv_msg->has_result) + goto fail_result; if (recv_msg->result != 0) { - int res = recv_msg->result; - irm_msg__free_unpacked(recv_msg, NULL); - return res; + err = recv_msg->result; + goto fail_result; } if (!recv_msg->has_pid || !recv_msg->has_flow_id || - recv_msg->qosspec == NULL) { - irm_msg__free_unpacked(recv_msg, NULL); - return -EIRMD; + recv_msg->qosspec == NULL) + goto fail_result; + + if (recv_msg->pk.len != 0 && + crypt_dh_derive(pkp, recv_msg->pk.data, + recv_msg->pk.len, s) < 0) { + err = -ECRYPT; + goto fail_result; } + crypt_dh_pkp_destroy(pkp); + fd = flow_init(recv_msg->flow_id, recv_msg->pid, - msg_to_spec(recv_msg->qosspec)); + msg_to_spec(recv_msg->qosspec), s); irm_msg__free_unpacked(recv_msg, NULL); @@ -566,7 +599,7 @@ int flow_accept(qosspec_t * qs, assert(ai.flows[fd].frcti == NULL); - if (ai.flows[fd].spec.in_order != 0) { + if (ai.flows[fd].qs.in_order != 0) { ai.flows[fd].frcti = frcti_create(fd); if (ai.flows[fd].frcti == NULL) { pthread_rwlock_unlock(&ai.lock); @@ -576,36 +609,18 @@ int flow_accept(qosspec_t * qs, } if (qs != NULL) - *qs = ai.flows[fd].spec; + *qs = ai.flows[fd].qs; pthread_rwlock_unlock(&ai.lock); - /* TODO: piggyback public keys at flow allocation. */ - if (ai.flows[fd].spec.cypher_s > 0) { - uint8_t key[SYMMKEYSZ]; - uint16_t tmp; - - pthread_rwlock_wrlock(&ai.lock); - - tmp = ai.flows[fd].spec.cypher_s; - ai.flows[fd].spec.cypher_s = 0; - - pthread_rwlock_unlock(&ai.lock); - - if (crypt_dh_srv(fd, key) < 0) { - flow_dealloc(fd); - return -ECRYPT; - } - - pthread_rwlock_wrlock(&ai.lock); - - ai.flows[fd].spec.cypher_s = tmp; - memcpy(ai.flows[fd].key, key, SYMMKEYSZ); - - pthread_rwlock_unlock(&ai.lock); - } - return fd; + + fail_result: + irm_msg__free_unpacked(recv_msg, NULL); + fail_recv: + crypt_dh_pkp_destroy(pkp); + fail_crypt_pkp: + return err; } static int __flow_alloc(const char * dst, @@ -617,15 +632,19 @@ static int __flow_alloc(const char * dst, qosspec_msg_t qs_msg = QOSSPEC_MSG__INIT; irm_msg_t * recv_msg; int fd; + void * pkp = NULL; /* public key pair */ + uint8_t s[SYMMKEYSZ]; /* secret key for flow */ + uint8_t buf[MSGBUFSZ]; + int err = -EIRMD; + + memset(s, 0, SYMMKEYSZ); #ifdef QOS_DISABLE_CRC if (qs != NULL) qs->ber = 1; #endif - if (join) - msg.code = IRM_MSG_CODE__IRM_FLOW_JOIN; - else - msg.code = IRM_MSG_CODE__IRM_FLOW_ALLOC; + msg.code = join ? IRM_MSG_CODE__IRM_FLOW_JOIN + : IRM_MSG_CODE__IRM_FLOW_ALLOC; msg.dst = (char *) dst; msg.has_pid = true; msg.pid = ai.pid; @@ -639,28 +658,52 @@ static int __flow_alloc(const char * dst, msg.timeo_nsec = timeo->tv_nsec; } + if (!join && qs != NULL && qs->cypher_s != 0) { + ssize_t key_len; + + key_len = crypt_dh_pkp_create(&pkp, buf); + if (key_len < 0) { + err = -ECRYPT; + goto fail_crypt_pkp; + } + + msg.has_pk = true; + msg.pk.data = buf; + msg.pk.len = (uint32_t) key_len; + } + recv_msg = send_recv_irm_msg(&msg); if (recv_msg == NULL) - return -EIRMD; + goto fail_send; - if (!recv_msg->has_result) { - irm_msg__free_unpacked(recv_msg, NULL); - return -EIRMD; - } + if (!recv_msg->has_result) + goto fail_result; if (recv_msg->result != 0) { - int res = recv_msg->result; - irm_msg__free_unpacked(recv_msg, NULL); - return res; + err = recv_msg->result; + goto fail_result; } - if (!recv_msg->has_pid || !recv_msg->has_flow_id) { - irm_msg__free_unpacked(recv_msg, NULL); - return -EIRMD; + if (!recv_msg->has_pid || !recv_msg->has_flow_id) + goto fail_result; + + if (!join && qs != NULL && qs->cypher_s != 0) { + if (!recv_msg->has_pk || recv_msg->pk.len == 0) { + err = -ECRYPT; + goto fail_result; + } + + if (crypt_dh_derive(pkp, recv_msg->pk.data, + recv_msg->pk.len, s) < 0) { + err = -ECRYPT; + goto fail_result; + } + + crypt_dh_pkp_destroy(pkp); } fd = flow_init(recv_msg->flow_id, recv_msg->pid, - qs == NULL ? qos_raw : *qs); + qs == NULL ? qos_raw : *qs, s); irm_msg__free_unpacked(recv_msg, NULL); @@ -671,7 +714,7 @@ static int __flow_alloc(const char * dst, assert(ai.flows[fd].frcti == NULL); - if (ai.flows[fd].spec.in_order != 0) { + if (ai.flows[fd].qs.in_order != 0) { ai.flows[fd].frcti = frcti_create(fd); if (ai.flows[fd].frcti == NULL) { pthread_rwlock_unlock(&ai.lock); @@ -682,32 +725,14 @@ static int __flow_alloc(const char * dst, pthread_rwlock_unlock(&ai.lock); - /* TODO: piggyback public keys at flow allocation. */ - if (!join && ai.flows[fd].spec.cypher_s > 0) { - uint8_t key[SYMMKEYSZ]; - uint16_t tmp; - - pthread_rwlock_wrlock(&ai.lock); - - tmp = ai.flows[fd].spec.cypher_s; - ai.flows[fd].spec.cypher_s = 0; - - pthread_rwlock_unlock(&ai.lock); - - if (crypt_dh_clt(fd, key) < 0) { - flow_dealloc(fd); - return -ECRYPT; - } - - pthread_rwlock_wrlock(&ai.lock); - - ai.flows[fd].spec.cypher_s = tmp; - memcpy(ai.flows[fd].key, key, SYMMKEYSZ); - - pthread_rwlock_unlock(&ai.lock); - } - return fd; + + fail_result: + irm_msg__free_unpacked(recv_msg, NULL); + fail_send: + crypt_dh_pkp_destroy(pkp); + fail_crypt_pkp: + return err; } int flow_alloc(const char * dst, @@ -721,6 +746,9 @@ int flow_join(const char * dst, qosspec_t * qs, const struct timespec * timeo) { + if (qs != NULL && qs->cypher_s != 0) + return -ECRYPT; + return __flow_alloc(dst, qs, timeo, true); } @@ -836,7 +864,7 @@ int fccntl(int fd, qs = va_arg(l, qosspec_t *); if (qs == NULL) goto einval; - *qs = flow->spec; + *qs = flow->qs; break; case FLOWGRXQLEN: qlen = va_arg(l, size_t *); @@ -1001,7 +1029,7 @@ ssize_t flow_write(int fd, } pthread_rwlock_wrlock(&ai.lock); - if (flow->spec.cypher_s > 0) + if (flow->qs.cypher_s > 0) if (crypt_encrypt(flow, sdb) < 0) { pthread_rwlock_unlock(&ai.lock); shm_rdrbuff_remove(ai.rdrb, idx); @@ -1009,7 +1037,7 @@ ssize_t flow_write(int fd, } pthread_rwlock_unlock(&ai.lock); - if (flow->spec.ber == 0 && add_crc(sdb) != 0) { + if (flow->qs.ber == 0 && add_crc(sdb) != 0) { shm_rdrbuff_remove(ai.rdrb, idx); return -ENOMEM; } @@ -1089,13 +1117,13 @@ ssize_t flow_read(int fd, return idx; sdb = shm_rdrbuff_get(ai.rdrb, idx); - if (flow->spec.ber == 0 && chk_crc(sdb) != 0) { + if (flow->qs.ber == 0 && chk_crc(sdb) != 0) { shm_rdrbuff_remove(ai.rdrb, idx); continue; } pthread_rwlock_wrlock(&ai.lock); - if (flow->spec.cypher_s > 0) + if (flow->qs.cypher_s > 0) if (crypt_decrypt(flow, sdb) < 0) { pthread_rwlock_unlock(&ai.lock); shm_rdrbuff_remove(ai.rdrb, @@ -1333,7 +1361,8 @@ int np1_flow_alloc(pid_t n_pid, int flow_id, qosspec_t qs) { - return flow_init(flow_id, n_pid, qs); + qs.cypher_s = 0; /* No encryption ctx for np1 */ + return flow_init(flow_id, n_pid, qs, NULL); } int np1_flow_dealloc(int flow_id) @@ -1394,7 +1423,9 @@ int ipcp_create_r(int result) int ipcp_flow_req_arr(const uint8_t * dst, size_t len, - qosspec_t qs) + qosspec_t qs, + const void * data, + size_t dlen) { irm_msg_t msg = IRM_MSG__INIT; irm_msg_t * recv_msg; @@ -1411,6 +1442,9 @@ int ipcp_flow_req_arr(const uint8_t * dst, msg.hash.data = (uint8_t *) dst; qs_msg = spec_to_msg(&qs); msg.qosspec = &qs_msg; + msg.has_pk = true; + msg.pk.data = (uint8_t *) data; + msg.pk.len = dlen; recv_msg = send_recv_irm_msg(&msg); if (recv_msg == NULL) @@ -1426,15 +1460,18 @@ int ipcp_flow_req_arr(const uint8_t * dst, return -1; } - fd = flow_init(recv_msg->flow_id, recv_msg->pid, qs); + qs.cypher_s = 0; /* No encryption ctx for np1 */ + fd = flow_init(recv_msg->flow_id, recv_msg->pid, qs, NULL); irm_msg__free_unpacked(recv_msg, NULL); return fd; } -int ipcp_flow_alloc_reply(int fd, - int response) +int ipcp_flow_alloc_reply(int fd, + int response, + const void * data, + size_t len) { irm_msg_t msg = IRM_MSG__INIT; irm_msg_t * recv_msg; @@ -1444,6 +1481,9 @@ int ipcp_flow_alloc_reply(int fd, msg.code = IRM_MSG_CODE__IPCP_FLOW_ALLOC_REPLY; msg.has_flow_id = true; + msg.has_pk = true; + msg.pk.data = (uint8_t *) data; + msg.pk.len = (uint32_t) len; pthread_rwlock_rdlock(&ai.lock); @@ -1503,7 +1543,7 @@ int ipcp_flow_read(int fd, if (idx < 0) return idx; *sdb = shm_rdrbuff_get(ai.rdrb, idx); - if (flow->spec.ber == 0 && chk_crc(*sdb) != 0) + if (flow->qs.ber == 0 && chk_crc(*sdb) != 0) continue; } while (frcti_rcv(flow->frcti, *sdb) != 0); @@ -1543,7 +1583,7 @@ int ipcp_flow_write(int fd, return -ENOMEM; } - if (flow->spec.ber == 0 && add_crc(sdb) != 0) { + if (flow->qs.ber == 0 && add_crc(sdb) != 0) { pthread_rwlock_unlock(&ai.lock); shm_rdrbuff_remove(ai.rdrb, idx); return -ENOMEM; @@ -1613,7 +1653,7 @@ int ipcp_flow_get_qoscube(int fd, assert(ai.flows[fd].flow_id >= 0); - *cube = qos_spec_to_cube(ai.flows[fd].spec); + *cube = qos_spec_to_cube(ai.flows[fd].qs); pthread_rwlock_unlock(&ai.lock); diff --git a/src/lib/frct.c b/src/lib/frct.c index 541cce42..e4b858d0 100644 --- a/src/lib/frct.c +++ b/src/lib/frct.c @@ -120,7 +120,7 @@ static struct frcti * frcti_create(int fd) frcti->rttseq = 0; frcti->probe = false; - if (ai.flows[fd].spec.loss == 0) { + if (ai.flows[fd].qs.loss == 0) { frcti->snd_cr.cflags |= FRCTFRTX; frcti->rcv_cr.cflags |= FRCTFRTX; } diff --git a/src/lib/ipcpd_messages.proto b/src/lib/ipcpd_messages.proto index 6d31f3b9..b0efe9ab 100644 --- a/src/lib/ipcpd_messages.proto +++ b/src/lib/ipcpd_messages.proto @@ -46,10 +46,11 @@ message ipcp_msg { optional int32 flow_id = 3; optional string dst = 4; optional qosspec_msg qosspec = 5; - optional ipcp_config_msg conf = 6; - optional int32 pid = 7; - optional layer_info_msg layer_info = 8; - optional int32 response = 9; - optional string comp = 10; - optional int32 result = 11; + optional bytes pk = 6; /* piggyback */ + optional ipcp_config_msg conf = 7; + optional int32 pid = 8; + optional layer_info_msg layer_info = 9; + optional int32 response = 10; + optional string comp = 11; + optional int32 result = 12; }; diff --git a/src/lib/irmd_messages.proto b/src/lib/irmd_messages.proto index 0ac58af9..c8749699 100644 --- a/src/lib/irmd_messages.proto +++ b/src/lib/irmd_messages.proto @@ -76,5 +76,6 @@ message irm_msg { optional uint32 timeo_sec = 16; optional uint32 timeo_nsec = 17; optional string comp = 18; - optional sint32 result = 19; + optional bytes pk = 19; /* piggyback */ + optional sint32 result = 20; }; diff --git a/src/tools/oping/oping.c b/src/tools/oping/oping.c index b6d64051..ca685292 100644 --- a/src/tools/oping/oping.c +++ b/src/tools/oping/oping.c @@ -233,6 +233,8 @@ int main(int argc, client.qs = qos_voice; else if (strcmp(qos, "data") == 0) client.qs = qos_data; + else if (strcmp(qos, "raw_crypt") == 0) + client.qs = qos_raw_crypt; else printf("Unknown QoS cube, defaulting to raw.\n"); } diff --git a/src/tools/oping/oping_server.c b/src/tools/oping/oping_server.c index 1b5d2f78..5cc347cb 100644 --- a/src/tools/oping/oping_server.c +++ b/src/tools/oping/oping_server.c @@ -100,6 +100,8 @@ void * server_thread(void *o) continue; } + printf("Received %d bytes on fd %d.\n", msg_len, fd); + clock_gettime(CLOCK_REALTIME, &now); pthread_mutex_lock(&server.lock); |