diff options
author | Dimitri Staessens <dimitri.staessens@ugent.be> | 2018-10-06 18:06:47 +0200 |
---|---|---|
committer | Dimitri Staessens <dimitri.staessens@ugent.be> | 2018-10-06 18:06:47 +0200 |
commit | 0b2e5c5410580c755cef02114e51f15b19cfaffa (patch) | |
tree | 63d684e6057c9caa43739b599d54a72f9959d4f8 /src | |
parent | bfc29ca20406ccd69363b0f9796987534318e7ae (diff) | |
parent | d9ad3852613cda026d4520b5c608ada7433dd7d9 (diff) | |
download | ouroboros-0b2e5c5410580c755cef02114e51f15b19cfaffa.tar.gz ouroboros-0b2e5c5410580c755cef02114e51f15b19cfaffa.zip |
Merge branch 'testing' into be
Diffstat (limited to 'src')
73 files changed, 1433 insertions, 1261 deletions
diff --git a/src/ipcpd/config.h.in b/src/ipcpd/config.h.in index b9961b01..afce5e86 100644 --- a/src/ipcpd/config.h.in +++ b/src/ipcpd/config.h.in @@ -63,3 +63,5 @@ #cmakedefine HAVE_NETMAP #cmakedefine HAVE_BPF #cmakedefine HAVE_RAW_SOCKETS +#define IPCP_ETH_RD_THR @IPCP_ETH_RD_THR@ +#define IPCP_ETH_WR_THR @IPCP_ETH_WR_THR@ diff --git a/src/ipcpd/eth/CMakeLists.txt b/src/ipcpd/eth/CMakeLists.txt index 6b8d1a77..6672f93c 100644 --- a/src/ipcpd/eth/CMakeLists.txt +++ b/src/ipcpd/eth/CMakeLists.txt @@ -14,30 +14,31 @@ include_directories(${CMAKE_BINARY_DIR}/include) find_path(NETMAP_C_INCLUDE_DIR net/netmap_user.h - HINTS /usr/include /usr/local/include -) + HINTS /usr/include /usr/local/include) mark_as_advanced(NETMAP_C_INCLUDE_DIR) +# Check for raw sockets if (CMAKE_SYSTEM_NAME STREQUAL "Linux") set(DISABLE_RAW_SOCKETS FALSE CACHE BOOL "Disable raw socket support for Ethernet IPCPs") if (NOT DISABLE_RAW_SOCKETS) message(STATUS "Raw socket support for Ethernet IPCPs enabled") set(HAVE_RAW_SOCKETS TRUE PARENT_SCOPE) + set(HAVE_RAW_SOCKETS TRUE) set(HAVE_ETH TRUE) else () message(STATUS "Raw socket support for Ethernet IPCPs disabled by user") + unset(HAVE_RAW_SOCKETS PARENT_SCOPE) unset(HAVE_RAW_SOCKETS) - unset(HAVE_ETH) endif () endif () +# Check for BPF if (NOT CMAKE_SYSTEM_NAME STREQUAL "Linux") find_path(BPF_C_INCLUDE_DIR - net/bpf.h - HINTS /usr/include /usr/local/include - ) + net/bpf.h + HINTS /usr/include /usr/local/include) mark_as_advanced(BPF_C_INCLUDE_DIR) @@ -46,46 +47,51 @@ if (NOT CMAKE_SYSTEM_NAME STREQUAL "Linux") "Disable Berkeley Packet Filter support for Ethernet IPCPs") if (NOT DISABLE_BPF) message(STATUS "Berkeley Packet Filter support " - "for Ethernet IPCPs enabled") + "for Ethernet IPCPs enabled") set(HAVE_BPF TRUE PARENT_SCOPE) + set(HAVE_BPF TRUE) set(HAVE_ETH TRUE) else () message(STATUS "Berkeley Packet Filter support " - "for Ethernet IPCPs disabled by user") + "for Ethernet IPCPs disabled by user") + unset(HAVE_BPF PARENT_SCOPE) unset(HAVE_BPF) - unset(HAVE_ETH) endif () endif () endif () -if (NETMAP_C_INCLUDE_DIR) +# Check for netmap exclusively +if (NOT HAVE_RAW_SOCKETS AND NOT HAVE_BPF AND NETMAP_C_INCLUDE_DIR) set(DISABLE_NETMAP FALSE CACHE BOOL - "Disable netmap support for ETH IPCPs") + "Disable netmap support for ETH IPCPs") if (NOT DISABLE_NETMAP) message(STATUS "Netmap support for Ethernet IPCPs enabled") - set(HAVE_NETMAP TRUE PARENT_SCOPE) test_and_set_c_compiler_flag_global(-std=c99) + set(HAVE_NETMAP TRUE PARENT_SCOPE) set(HAVE_ETH TRUE) else () message(STATUS "Netmap support for Ethernet IPCPs disabled by user") - unset(HAVE_NETMAP) - unset(HAVE_ETH) - unset(IPCP_ETH_TARGET CACHE) + unset(HAVE_NETMAP PARENT_SCOPE) endif () endif () if (HAVE_ETH) message(STATUS "Supported raw packet API found, building eth-llc and eth-dix") + set(IPCP_ETH_RD_THR 3 CACHE STRING + "Number of reader threads in Ethernet IPCP") + set(IPCP_ETH_WR_THR 3 CACHE STRING + "Number of writer threads in Ethernet IPCP") + set(ETH_LLC_SOURCES # Add source files here ${CMAKE_CURRENT_SOURCE_DIR}/llc.c - ) + ) set(ETH_DIX_SOURCES # Add source files here ${CMAKE_CURRENT_SOURCE_DIR}/dix.c - ) + ) set(IPCP_ETH_LLC_TARGET ipcpd-eth-llc CACHE INTERNAL "") set(IPCP_ETH_DIX_TARGET ipcpd-eth-dix CACHE INTERNAL "") diff --git a/src/ipcpd/eth/eth.c b/src/ipcpd/eth/eth.c index 443f3fdb..1bbfac5b 100644 --- a/src/ipcpd/eth/eth.c +++ b/src/ipcpd/eth/eth.c @@ -29,6 +29,8 @@ #define _DARWIN_C_SOURCE #elif defined(__FreeBSD__) #define __BSD_VISIBLE 1 +#elif defined (__linux__) || defined (__CYGWIN__) +#define _DEFAULT_SOURCE #else #define _POSIX_C_SOURCE 200112L #endif @@ -121,7 +123,7 @@ #define DIX_HEADER_SIZE (DIX_EID_SIZE + DIX_LENGTH_SIZE) #define ETH_HEADER_TOT_SIZE (ETH_HEADER_SIZE + DIX_HEADER_SIZE) #define MAX_EIDS (1 << (8 * DIX_EID_SIZE)) -#define ETH_MAX_SDU_SIZE (ETH_MTU - DIX_HEADER_SIZE) +#define ETH_MAX_PACKET_SIZE (ETH_MTU - DIX_HEADER_SIZE) #define ETH_FRAME_SIZE (ETH_HEADER_SIZE + ETH_MTU_MAX) #elif defined(BUILD_ETH_LLC) #define THIS_TYPE IPCP_ETH_LLC @@ -129,7 +131,7 @@ #define LLC_HEADER_SIZE 3 #define ETH_HEADER_TOT_SIZE (ETH_HEADER_SIZE + LLC_HEADER_SIZE) #define MAX_SAPS 64 -#define ETH_MAX_SDU_SIZE (ETH_MTU - LLC_HEADER_SIZE) +#define ETH_MAX_PACKET_SIZE (ETH_MTU - LLC_HEADER_SIZE) #define ETH_FRAME_SIZE (ETH_HEADER_SIZE + ETH_MTU_MAX) #endif @@ -144,15 +146,27 @@ #define NAME_QUERY_REPLY 3 struct mgmt_msg { - uint8_t code; #if defined(BUILD_ETH_DIX) uint16_t seid; uint16_t deid; #elif defined(BUILD_ETH_LLC) uint8_t ssap; uint8_t dsap; + /* QoS here for alignment */ + uint8_t code; + uint8_t availability; +#endif + /* QoS parameters from spec, aligned */ + uint32_t loss; + uint64_t bandwidth; + uint32_t ber; + uint32_t max_gap; + uint32_t delay; + uint8_t in_order; +#if defined (BUILD_ETH_DIX) + uint8_t code; + uint8_t availability; #endif - uint8_t qoscube; int8_t response; } __attribute__((packed)); @@ -192,6 +206,7 @@ struct { struct shim_data * shim_data; #ifdef __linux__ int mtu; + int if_idx; #endif #if defined(HAVE_NETMAP) struct nm_desc * nmd; @@ -213,11 +228,10 @@ struct { #endif struct ef * fd_to_ef; fset_t * np1_flows; - fqueue_t * fq; pthread_rwlock_t flows_lock; - pthread_t sdu_writer; - pthread_t sdu_reader; + pthread_t packet_writer[IPCP_ETH_WR_THR]; + pthread_t packet_reader[IPCP_ETH_RD_THR]; #ifdef __linux__ pthread_t if_monitor; @@ -258,10 +272,6 @@ static int eth_data_init(void) if (eth_data.np1_flows == NULL) goto fail_np1_flows; - eth_data.fq = fqueue_create(); - if (eth_data.fq == NULL) - goto fail_fq; - for (i = 0; i < SYS_MAX_FLOWS; ++i) { #if defined(BUILD_ETH_DIX) eth_data.fd_to_ef[i].r_eid = -1; @@ -309,8 +319,6 @@ static int eth_data_init(void) fail_flows_lock: shim_data_destroy(eth_data.shim_data); fail_shim_data: - fqueue_destroy(eth_data.fq); - fail_fq: fset_destroy(eth_data.np1_flows); fail_np1_flows: #ifdef BUILD_ETH_LLC @@ -337,7 +345,6 @@ static void eth_data_fini(void) pthread_mutex_destroy(ð_data.mgmt_lock); pthread_rwlock_destroy(ð_data.flows_lock); shim_data_destroy(eth_data.shim_data); - fqueue_destroy(eth_data.fq); fset_destroy(eth_data.np1_flows); #ifdef BUILD_ETH_LLC bmp_destroy(eth_data.saps); @@ -376,7 +383,7 @@ static int eth_ipcp_send_frame(const uint8_t * dst_addr, assert(frame); - if (len > (size_t) ETH_MAX_SDU_SIZE) + if (len > (size_t) ETH_MAX_PACKET_SIZE) return -1; e_frame = (struct eth_frame *) frame; @@ -438,7 +445,7 @@ static int eth_ipcp_alloc(const uint8_t * dst_addr, uint8_t ssap, #endif const uint8_t * hash, - qoscube_t cube) + qosspec_t qs) { uint8_t * buf; struct mgmt_msg * msg; @@ -458,7 +465,14 @@ static int eth_ipcp_alloc(const uint8_t * dst_addr, #elif defined(BUILD_ETH_LLC) msg->ssap = ssap; #endif - msg->qoscube = cube; + + msg->delay = hton32(qs.delay); + msg->bandwidth = hton64(qs.bandwidth); + msg->availability = qs.availability; + msg->loss = hton32(qs.loss); + msg->ber = hton32(qs.ber); + msg->in_order = qs.in_order; + msg->max_gap = hton32(qs.max_gap); memcpy(msg + 1, hash, ipcp_dir_hash_len()); @@ -528,7 +542,7 @@ static int eth_ipcp_req(uint8_t * r_addr, uint8_t r_sap, #endif const uint8_t * dst, - qoscube_t cube) + qosspec_t qs) { struct timespec ts = {0, ALLOC_TIMEO * MILLION}; struct timespec abstime; @@ -552,7 +566,7 @@ static int eth_ipcp_req(uint8_t * r_addr, } /* reply to IRM, called under lock to prevent race */ - fd = ipcp_flow_req_arr(getpid(), dst, ipcp_dir_hash_len(), cube); + fd = ipcp_flow_req_arr(getpid(), dst, ipcp_dir_hash_len(), qs); if (fd < 0) { pthread_mutex_unlock(&ipcpi.alloc_lock); log_err("Could not get new flow from IRMd."); @@ -692,11 +706,20 @@ static int eth_ipcp_mgmt_frame(const uint8_t * buf, uint8_t * r_addr) { struct mgmt_msg * msg; + qosspec_t qs; msg = (struct mgmt_msg *) buf; switch (msg->code) { case FLOW_REQ: + qs.delay = ntoh32(msg->delay); + qs.bandwidth = ntoh64(msg->bandwidth); + qs.availability = msg->availability; + qs.loss = ntoh32(msg->loss); + qs.ber = ntoh32(msg->ber); + qs.in_order = msg->in_order; + qs.max_gap = ntoh32(msg->max_gap); + if (shim_data_reg_has(eth_data.shim_data, buf + sizeof(*msg))) { eth_ipcp_req(r_addr, @@ -706,7 +729,7 @@ static int eth_ipcp_mgmt_frame(const uint8_t * buf, msg->ssap, #endif buf + sizeof(*msg), - msg->qoscube); + qs); } break; case FLOW_REPLY: @@ -785,7 +808,7 @@ static void * eth_ipcp_mgmt_handler(void * o) return (void *) 0; } -static void * eth_ipcp_sdu_reader(void * o) +static void * eth_ipcp_packet_reader(void * o) { uint8_t br_addr[MAC_SIZE]; #if defined(BUILD_ETH_DIX) @@ -890,7 +913,9 @@ static void * eth_ipcp_sdu_reader(void * o) if (deid == MGMT_EID) { #elif defined (BUILD_ETH_LLC) if (length > 0x05FF) {/* DIX */ +#ifndef HAVE_NETMAP ipcp_sdb_release(sdb); +#endif continue; } @@ -911,8 +936,8 @@ static void * eth_ipcp_sdu_reader(void * o) memcpy(frame->buf, &e_frame->payload, length); memcpy(frame->r_addr, e_frame->src_hwaddr, MAC_SIZE); - pthread_mutex_unlock(ð_data.mgmt_lock); + pthread_mutex_lock(ð_data.mgmt_lock); list_add(&frame->next, ð_data.mgmt_frames); pthread_cond_signal(ð_data.mgmt_cond); pthread_mutex_unlock(ð_data.mgmt_lock); @@ -962,7 +987,12 @@ static void * eth_ipcp_sdu_reader(void * o) return (void *) 0; } -static void * eth_ipcp_sdu_writer(void * o) +static void cleanup_writer(void * o) +{ + fqueue_destroy((fqueue_t *) o); +} + +static void * eth_ipcp_packet_writer(void * o) { int fd; struct shm_du_buff * sdb; @@ -975,15 +1005,21 @@ static void * eth_ipcp_sdu_writer(void * o) #endif uint8_t r_addr[MAC_SIZE]; + fqueue_t * fq; + + fq = fqueue_create(); + if (fq == NULL) + return (void *) -1; + (void) o; + pthread_cleanup_push(cleanup_writer, fq); + ipcp_lock_to_core(); while (true) { - fevent(eth_data.np1_flows, eth_data.fq, NULL); - - pthread_rwlock_rdlock(ð_data.flows_lock); - while ((fd = fqueue_next(eth_data.fq)) >= 0) { + fevent(eth_data.np1_flows, fq, NULL); + while ((fd = fqueue_next(fq)) >= 0) { if (ipcp_flow_read(fd, &sdb)) { log_dbg("Bad read from fd %d.", fd); continue; @@ -996,6 +1032,8 @@ static void * eth_ipcp_sdu_writer(void * o) log_dbg("Failed to allocate header."); ipcp_sdb_release(sdb); } + + pthread_rwlock_rdlock(ð_data.flows_lock); #if defined(BUILD_ETH_DIX) deid = eth_data.fd_to_ef[fd].r_eid; #elif defined(BUILD_ETH_LLC) @@ -1006,6 +1044,8 @@ static void * eth_ipcp_sdu_writer(void * o) eth_data.fd_to_ef[fd].r_addr, MAC_SIZE); + pthread_rwlock_unlock(ð_data.flows_lock); + eth_ipcp_send_frame(r_addr, #if defined(BUILD_ETH_DIX) deid, @@ -1016,9 +1056,10 @@ static void * eth_ipcp_sdu_writer(void * o) len); ipcp_sdb_release(sdb); } - pthread_rwlock_unlock(ð_data.flows_lock); } + pthread_cleanup_pop(true); + return (void *) 1; } @@ -1129,7 +1170,7 @@ static void * eth_ipcp_if_monitor(void * o) ifi = NLMSG_DATA(h); /* Not our interface */ - if (ifi->ifi_index != eth_data.device.sll_ifindex) + if (ifi->ifi_index != eth_data.if_idx) continue; if (ifi->ifi_flags & IFF_UP) { @@ -1189,6 +1230,10 @@ static int eth_ipcp_bootstrap(const struct ipcp_config * conf) #ifndef SHM_RDRB_MULTI_BLOCK size_t maxsz; #endif +#if defined(HAVE_RAW_SOCKETS) + int qdisc_bypass = 1; + int flags; +#endif assert(conf); assert(conf->type == THIS_TYPE); @@ -1198,7 +1243,7 @@ static int eth_ipcp_bootstrap(const struct ipcp_config * conf) } memset(&ifr, 0, sizeof(ifr)); - memcpy(ifr.ifr_name, conf->dev, strlen(conf->dev)); + memcpy(ifr.ifr_name, conf->dev, IFNAMSIZ); #ifdef BUILD_ETH_DIX if (conf->ethertype < 0x0600 || conf->ethertype == 0xFFFF) { @@ -1274,9 +1319,9 @@ static int eth_ipcp_bootstrap(const struct ipcp_config * conf) idx = if_nametoindex(conf->dev); if (idx == 0) { log_err("Failed to retrieve interface index."); - close(skfd); return -1; } + eth_data.if_idx = idx; #endif /* __FreeBSD__ */ #if defined(HAVE_NETMAP) @@ -1354,16 +1399,32 @@ static int eth_ipcp_bootstrap(const struct ipcp_config * conf) return -1; } + flags = fcntl(eth_data.s_fd, F_GETFL, 0); + if (flags < 0) { + log_err("Failed to get flags."); + goto fail_device; + } + + if (fcntl(eth_data.s_fd, F_SETFL, flags | O_NONBLOCK)) { + log_err("Failed to set socket non-blocking."); + goto fail_device; + } + + if (setsockopt(eth_data.s_fd, SOL_PACKET, PACKET_QDISC_BYPASS, + &qdisc_bypass, sizeof(qdisc_bypass))) { + log_info("Qdisc bypass not supported."); + } + if (bind(eth_data.s_fd, (struct sockaddr *) ð_data.device, sizeof(eth_data.device))) { - log_err("Failed to bind socket to interface"); + log_err("Failed to bind socket to interface."); goto fail_device; } #endif /* HAVE_NETMAP */ ipcp_set_state(IPCP_OPERATIONAL); -#ifdef __linux__ +#if defined(__linux__) if (pthread_create(ð_data.if_monitor, NULL, eth_ipcp_if_monitor, @@ -1381,20 +1442,24 @@ static int eth_ipcp_bootstrap(const struct ipcp_config * conf) goto fail_mgmt_handler; } - if (pthread_create(ð_data.sdu_reader, - NULL, - eth_ipcp_sdu_reader, - NULL)) { - ipcp_set_state(IPCP_INIT); - goto fail_sdu_reader; + for (idx = 0; idx < IPCP_ETH_RD_THR; ++idx) { + if (pthread_create(ð_data.packet_reader[idx], + NULL, + eth_ipcp_packet_reader, + NULL)) { + ipcp_set_state(IPCP_INIT); + goto fail_packet_reader; + } } - if (pthread_create(ð_data.sdu_writer, - NULL, - eth_ipcp_sdu_writer, - NULL)) { - ipcp_set_state(IPCP_INIT); - goto fail_sdu_writer; + for (idx = 0; idx < IPCP_ETH_WR_THR; ++idx) { + if (pthread_create(ð_data.packet_writer[idx], + NULL, + eth_ipcp_packet_writer, + NULL)) { + ipcp_set_state(IPCP_INIT); + goto fail_packet_writer; + } } #if defined(BUILD_ETH_DIX) @@ -1407,10 +1472,17 @@ static int eth_ipcp_bootstrap(const struct ipcp_config * conf) return 0; - fail_sdu_writer: - pthread_cancel(eth_data.sdu_reader); - pthread_join(eth_data.sdu_reader, NULL); - fail_sdu_reader: + fail_packet_writer: + while (idx > 0) { + pthread_cancel(eth_data.packet_writer[--idx]); + pthread_join(eth_data.packet_writer[idx], NULL); + } + idx = IPCP_ETH_RD_THR; + fail_packet_reader: + while (idx > 0) { + pthread_cancel(eth_data.packet_reader[--idx]); + pthread_join(eth_data.packet_reader[idx], NULL); + } pthread_cancel(eth_data.mgmt_handler); pthread_join(eth_data.mgmt_handler, NULL); fail_mgmt_handler: @@ -1418,7 +1490,7 @@ static int eth_ipcp_bootstrap(const struct ipcp_config * conf) pthread_cancel(eth_data.if_monitor); pthread_join(eth_data.if_monitor, NULL); #endif -#if !defined(HAVE_NETMAP) +#if defined(__linux__) || !defined(HAVE_NETMAP) fail_device: #endif #if defined(HAVE_NETMAP) @@ -1509,7 +1581,7 @@ static int eth_ipcp_query(const uint8_t * hash) static int eth_ipcp_flow_alloc(int fd, const uint8_t * hash, - qoscube_t cube) + qosspec_t qs) { #ifdef BUILD_ETH_LLC uint8_t ssap = 0; @@ -1521,11 +1593,6 @@ static int eth_ipcp_flow_alloc(int fd, assert(hash); - if (cube > QOS_CUBE_DATA) { - log_dbg("Unsupported QoS requested."); - return -1; - } - if (!shim_data_dir_has(eth_data.shim_data, hash)) { log_err("Destination unreachable."); return -1; @@ -1553,7 +1620,7 @@ static int eth_ipcp_flow_alloc(int fd, #elif defined(BUILD_ETH_LLC) ssap, #endif - hash, cube) < 0) { + hash, qs) < 0) { #ifdef BUILD_ETH_LLC pthread_rwlock_wrlock(ð_data.flows_lock); bmp_release(eth_data.saps, eth_data.fd_to_ef[fd].sap); @@ -1696,6 +1763,8 @@ static struct ipcp_ops eth_ops = { int main(int argc, char * argv[]) { + int i; + if (ipcp_init(argc, argv, ð_ops) < 0) goto fail_init; @@ -1722,14 +1791,20 @@ int main(int argc, ipcp_shutdown(); if (ipcp_get_state() == IPCP_SHUTDOWN) { - pthread_cancel(eth_data.sdu_writer); - pthread_cancel(eth_data.sdu_reader); + for (i = 0; i < IPCP_ETH_WR_THR; ++i) + pthread_cancel(eth_data.packet_writer[i]); + for (i = 0; i < IPCP_ETH_RD_THR; ++i) + pthread_cancel(eth_data.packet_reader[i]); + pthread_cancel(eth_data.mgmt_handler); #ifdef __linux__ pthread_cancel(eth_data.if_monitor); #endif - pthread_join(eth_data.sdu_writer, NULL); - pthread_join(eth_data.sdu_reader, NULL); + for (i = 0; i < IPCP_ETH_WR_THR; ++i) + pthread_join(eth_data.packet_writer[i], NULL); + for (i = 0; i < IPCP_ETH_RD_THR; ++i) + pthread_join(eth_data.packet_reader[i], NULL); + pthread_join(eth_data.mgmt_handler, NULL); #ifdef __linux__ pthread_join(eth_data.if_monitor, NULL); diff --git a/src/ipcpd/ipcp.c b/src/ipcpd/ipcp.c index 0b5ddf11..f8df5640 100644 --- a/src/ipcpd/ipcp.c +++ b/src/ipcpd/ipcp.c @@ -20,13 +20,24 @@ * Foundation, Inc., http://www.fsf.org/about/contact/. */ +#if defined(__linux__) || defined(__CYGWIN__) +#define _DEFAULT_SOURCE +#else +#define _POSIX_C_SOURCE 200112L +#define __XSI_VISIBLE 500 +#endif + #if defined(__linux__) && !defined(DISABLE_CORE_LOCK) #define _GNU_SOURCE #define NPROC (sysconf(_SC_NPROCESSORS_ONLN)) #endif +#if defined(__linux__) || defined(__CYGWIN__) +#define _DEFAULT_SOURCE +#else #define _POSIX_C_SOURCE 200112L #define __XSI_VISIBLE 500 +#endif #include "config.h" @@ -194,6 +205,7 @@ static void * mainloop(void * o) layer_info_msg_t layer_info = LAYER_INFO_MSG__INIT; int fd = -1; struct cmd * cmd; + qosspec_t qs; ret_msg.code = IPCP_MSG_CODE__IPCP_REPLY; @@ -418,12 +430,13 @@ static void * mainloop(void * o) break; } + qs = msg_to_spec(msg->qosspec); fd = np1_flow_alloc(msg->pid, - msg->port_id, - msg->qoscube); + msg->flow_id, + qs); if (fd < 0) { - log_err("Failed allocating fd on port_id %d.", - msg->port_id); + log_err("Failed allocating fd on flow_id %d.", + msg->flow_id); ret_msg.result = -1; break; } @@ -431,7 +444,7 @@ static void * mainloop(void * o) ret_msg.result = ipcpi.ops->ipcp_flow_alloc(fd, msg->hash.data, - msg->qoscube); + qs); break; case IPCP_MSG_CODE__IPCP_FLOW_ALLOC_RESP: ret_msg.has_result = true; @@ -448,10 +461,10 @@ static void * mainloop(void * o) } if (!msg->response) { - fd = np1_flow_resp(msg->port_id); + fd = np1_flow_resp(msg->flow_id); if (fd < 0) { log_warn("Port_id %d is not known.", - msg->port_id); + msg->flow_id); ret_msg.result = -1; break; } @@ -475,10 +488,10 @@ static void * mainloop(void * o) break; } - fd = np1_flow_dealloc(msg->port_id); + fd = np1_flow_dealloc(msg->flow_id); if (fd < 0) { - log_warn("Could not deallocate port_id %d.", - msg->port_id); + log_warn("Could not deallocate flow_id %d.", + msg->flow_id); ret_msg.result = -1; break; } diff --git a/src/ipcpd/ipcp.h b/src/ipcpd/ipcp.h index 5417fc74..1d25fb3f 100644 --- a/src/ipcpd/ipcp.h +++ b/src/ipcpd/ipcp.h @@ -26,7 +26,6 @@ #include <ouroboros/hash.h> #include <ouroboros/ipcp.h> #include <ouroboros/list.h> -#include <ouroboros/qoscube.h> #include <ouroboros/sockets.h> #include <ouroboros/tpm.h> @@ -60,7 +59,7 @@ struct ipcp_ops { int (* ipcp_flow_alloc)(int fd, const uint8_t * dst, - qoscube_t qos); + qosspec_t qs); int (* ipcp_flow_alloc_resp)(int fd, int response); diff --git a/src/ipcpd/local/main.c b/src/ipcpd/local/main.c index 358f6388..ab43f1f8 100644 --- a/src/ipcpd/local/main.c +++ b/src/ipcpd/local/main.c @@ -20,7 +20,11 @@ * Foundation, Inc., http://www.fsf.org/about/contact/. */ +#if defined(__linux__) || defined(__CYGWIN__) +#define _DEFAULT_SOURCE +#else #define _POSIX_C_SOURCE 200112L +#endif #include "config.h" @@ -55,7 +59,7 @@ struct { fqueue_t * fq; pthread_rwlock_t lock; - pthread_t sduloop; + pthread_t packet_loop; } local_data; static int local_data_init(void) @@ -93,7 +97,7 @@ static void local_data_fini(void){ pthread_rwlock_destroy(&local_data.lock); } -static void * ipcp_local_sdu_loop(void * o) +static void * ipcp_local_packet_loop(void * o) { (void) o; @@ -135,8 +139,8 @@ static int ipcp_local_bootstrap(const struct ipcp_config * conf) ipcp_set_state(IPCP_OPERATIONAL); - if (pthread_create(&local_data.sduloop, NULL, - ipcp_local_sdu_loop, NULL)) { + if (pthread_create(&local_data.packet_loop, NULL, + ipcp_local_packet_loop, NULL)) { ipcp_set_state(IPCP_INIT); return -1; } @@ -179,7 +183,7 @@ static int ipcp_local_query(const uint8_t * hash) static int ipcp_local_flow_alloc(int fd, const uint8_t * dst, - qoscube_t cube) + qosspec_t qs) { struct timespec ts = {0, ALLOC_TIMEOUT * MILLION}; struct timespec abstime; @@ -208,7 +212,7 @@ static int ipcp_local_flow_alloc(int fd, assert(ipcpi.alloc_id == -1); - out_fd = ipcp_flow_req_arr(getpid(), dst, ipcp_dir_hash_len(), cube); + out_fd = ipcp_flow_req_arr(getpid(), dst, ipcp_dir_hash_len(), qs); if (out_fd < 0) { pthread_mutex_unlock(&ipcpi.alloc_lock); log_dbg("Flow allocation failed: %d", out_fd); @@ -360,8 +364,8 @@ int main(int argc, ipcp_shutdown(); if (ipcp_get_state() == IPCP_SHUTDOWN) { - pthread_cancel(local_data.sduloop); - pthread_join(local_data.sduloop, NULL); + pthread_cancel(local_data.packet_loop); + pthread_join(local_data.packet_loop, NULL); } local_data_fini(); diff --git a/src/ipcpd/normal/CMakeLists.txt b/src/ipcpd/normal/CMakeLists.txt index 6dd68385..d1585395 100644 --- a/src/ipcpd/normal/CMakeLists.txt +++ b/src/ipcpd/normal/CMakeLists.txt @@ -37,13 +37,12 @@ set(SOURCE_FILES dht.c dir.c dt.c - dt_pci.c enroll.c fa.c main.c pff.c routing.c - sdu_sched.c + psched.c # Add policies last pol/alternate_pff.c pol/flat.c diff --git a/src/ipcpd/normal/connmgr.c b/src/ipcpd/normal/connmgr.c index bf07ebc4..7b71761f 100644 --- a/src/ipcpd/normal/connmgr.c +++ b/src/ipcpd/normal/connmgr.c @@ -20,7 +20,11 @@ * Foundation, Inc., http://www.fsf.org/about/contact/. */ +#if defined(__linux__) || defined(__CYGWIN__) +#define _DEFAULT_SOURCE +#else #define _POSIX_C_SOURCE 200112L +#endif #define OUROBOROS_PREFIX "connection-manager" diff --git a/src/ipcpd/normal/dht.c b/src/ipcpd/normal/dht.c index a6f1928b..4064bf5c 100644 --- a/src/ipcpd/normal/dht.c +++ b/src/ipcpd/normal/dht.c @@ -20,7 +20,11 @@ * Foundation, Inc., http://www.fsf.org/about/contact/. */ +#if defined(__linux__) || defined(__CYGWIN__) +#define _DEFAULT_SOURCE +#else #define _POSIX_C_SOURCE 200112L +#endif #include "config.h" @@ -58,21 +62,21 @@ typedef KadContactMsg kad_contact_msg_t; #define CLOCK_REALTIME_COARSE CLOCK_REALTIME #endif -#define DHT_MAX_REQS 2048 /* KAD recommends rnd(), bmp can be changed. */ -#define KAD_ALPHA 3 /* Parallel factor, proven optimal value. */ -#define KAD_K 8 /* Replication factor, MDHT value. */ -#define KAD_T_REPL 900 /* Replication time, tied to k. MDHT value. */ -#define KAD_T_REFR 900 /* Refresh time stale bucket, MDHT value. */ -#define KAD_T_JOIN 8 /* Response time to wait for a join. */ -#define KAD_T_RESP 5 /* Response time to wait for a response. */ -#define KAD_R_PING 2 /* Ping retries before declaring peer dead. */ -#define KAD_QUEER 15 /* Time to declare peer questionable. */ -#define KAD_BETA 8 /* Bucket split factor, must be 1, 2, 4 or 8. */ -#define KAD_RESP_RETR 6 /* Number of retries on sending a response. */ -#define KAD_JOIN_RETR 8 /* Number of retries sending a join. */ -#define KAD_JOIN_INTV 1 /* Time (seconds) between join retries. */ -#define HANDLE_TIMEO 1000 /* Timeout for dht_handle_sdu tpm check (ms) */ -#define DHT_RETR_ADDR 1 /* Number of addresses to return on retrieve */ +#define DHT_MAX_REQS 2048 /* KAD recommends rnd(), bmp can be changed. */ +#define KAD_ALPHA 3 /* Parallel factor, proven optimal value. */ +#define KAD_K 8 /* Replication factor, MDHT value. */ +#define KAD_T_REPL 900 /* Replication time, tied to k. MDHT value. */ +#define KAD_T_REFR 900 /* Refresh time stale bucket, MDHT value. */ +#define KAD_T_JOIN 8 /* Response time to wait for a join. */ +#define KAD_T_RESP 5 /* Response time to wait for a response. */ +#define KAD_R_PING 2 /* Ping retries before declaring peer dead. */ +#define KAD_QUEER 15 /* Time to declare peer questionable. */ +#define KAD_BETA 8 /* Bucket split factor, must be 1, 2, 4 or 8. */ +#define KAD_RESP_RETR 6 /* Number of retries on sending a response. */ +#define KAD_JOIN_RETR 8 /* Number of retries sending a join. */ +#define KAD_JOIN_INTV 1 /* Time (seconds) between join retries. */ +#define HANDLE_TIMEO 1000 /* Timeout for dht_handle_packet tpm check (ms) */ +#define DHT_RETR_ADDR 1 /* Number of addresses to return on retrieve */ enum dht_state { DHT_INIT = 0, @@ -247,7 +251,7 @@ struct join_info { uint64_t addr; }; -struct sdu_info { +struct packet_info { struct dht * dht; struct shm_du_buff * sdb; }; @@ -1485,7 +1489,7 @@ static int send_msg(struct dht * dht, kad_msg__pack(msg, shm_du_buff_head(sdb)); - if (dt_write_sdu(addr, QOS_CUBE_BE, dht->fd, sdb) == 0) + if (dt_write_packet(addr, QOS_CUBE_BE, dht->fd, sdb) == 0) break; ipcp_sdb_release(sdb); @@ -2396,7 +2400,7 @@ uint64_t dht_query(struct dht * dht, return 0; } -static void * dht_handle_sdu(void * o) +static void * dht_handle_packet(void * o) { struct dht * dht = (struct dht *) o; @@ -2580,8 +2584,8 @@ static void * dht_handle_sdu(void * o) return (void *) 0; } -static void dht_post_sdu(void * comp, - struct shm_du_buff * sdb) +static void dht_post_packet(void * comp, + struct shm_du_buff * sdb) { struct cmd * cmd; struct dht * dht = (struct dht *) comp; @@ -2796,19 +2800,19 @@ struct dht * dht_create(uint64_t addr) dht->addr = addr; dht->id = NULL; #ifndef __DHT_TEST__ - dht->tpm = tpm_create(2, 1, dht_handle_sdu, dht); + dht->tpm = tpm_create(2, 1, dht_handle_packet, dht); if (dht->tpm == NULL) goto fail_tpm_create; if (tpm_start(dht->tpm)) goto fail_tpm_start; - dht->fd = dt_reg_comp(dht, &dht_post_sdu, DHT); + dht->fd = dt_reg_comp(dht, &dht_post_packet, DHT); notifier_reg(handle_event, dht); #else (void) handle_event; - (void) dht_handle_sdu; - (void) dht_post_sdu; + (void) dht_handle_packet; + (void) dht_post_packet; #endif dht->state = DHT_INIT; diff --git a/src/ipcpd/normal/dir.c b/src/ipcpd/normal/dir.c index 345d220d..a195f016 100644 --- a/src/ipcpd/normal/dir.c +++ b/src/ipcpd/normal/dir.c @@ -20,7 +20,11 @@ * Foundation, Inc., http://www.fsf.org/about/contact/. */ +#if defined(__linux__) || defined(__CYGWIN__) +#define _DEFAULT_SOURCE +#else #define _POSIX_C_SOURCE 200112L +#endif #define OUROBOROS_PREFIX "directory" diff --git a/src/ipcpd/normal/dt.c b/src/ipcpd/normal/dt.c index b9d8934e..dc7343f1 100644 --- a/src/ipcpd/normal/dt.c +++ b/src/ipcpd/normal/dt.c @@ -20,15 +20,17 @@ * Foundation, Inc., http://www.fsf.org/about/contact/. */ +#if defined(__linux__) || defined(__CYGWIN__) +#define _DEFAULT_SOURCE +#else #define _POSIX_C_SOURCE 200112L +#endif #include "config.h" #define DT "dt" #define OUROBOROS_PREFIX DT -/* FIXME: fix #defines and remove endian.h include. */ -#include <ouroboros/endian.h> #include <ouroboros/bitmap.h> #include <ouroboros/errno.h> #include <ouroboros/logs.h> @@ -42,10 +44,9 @@ #include "connmgr.h" #include "ipcp.h" #include "dt.h" -#include "dt_pci.h" #include "pff.h" #include "routing.h" -#include "sdu_sched.h" +#include "psched.h" #include "comp.h" #include "fa.h" @@ -64,13 +65,96 @@ #endif struct comp_info { - void (* post_sdu)(void * comp, struct shm_du_buff * sdb); + void (* post_packet)(void * comp, struct shm_du_buff * sdb); void * comp; char * name; }; +/* Abstract syntax */ +enum dtp_fields { + DTP_DST = 0, /* DST ADDRESS */ + DTP_QOS, /* QOS ID */ + DTP_DEID, /* DST Endpoint ID */ + DTP_TTL, /* TTL FIELD */ + DTP_NUM_FIELDS /* Number of fields */ +}; + +/* Fixed field lengths */ +#define TTL_LEN 1 +#define QOS_LEN 1 + +struct dt_pci { + uint64_t dst_addr; + qoscube_t qc; + uint8_t ttl; + uint32_t eid; +}; + +struct { + uint8_t addr_size; + uint8_t eid_size; + size_t head_size; + + /* Offsets */ + size_t qc_o; + size_t ttl_o; + size_t eid_o; + + /* Initial TTL value */ + uint8_t max_ttl; +} dt_pci_info; + +static int dt_pci_ser(struct shm_du_buff * sdb, + struct dt_pci * dt_pci) +{ + uint8_t * head; + uint8_t ttl = dt_pci_info.max_ttl; + + assert(sdb); + assert(dt_pci); + + head = shm_du_buff_head_alloc(sdb, dt_pci_info.head_size); + if (head == NULL) + return -EPERM; + + /* FIXME: Add check and operations for Big Endian machines. */ + memcpy(head, &dt_pci->dst_addr, dt_pci_info.addr_size); + memcpy(head + dt_pci_info.qc_o, &dt_pci->qc, QOS_LEN); + memcpy(head + dt_pci_info.ttl_o, &ttl, TTL_LEN); + memcpy(head + dt_pci_info.eid_o, &dt_pci->eid, dt_pci_info.eid_size); + + return 0; +} + +static void dt_pci_des(struct shm_du_buff * sdb, + struct dt_pci * dt_pci) +{ + uint8_t * head; + + assert(sdb); + assert(dt_pci); + + head = shm_du_buff_head(sdb); + + /* Decrease TTL */ + --*(head + dt_pci_info.ttl_o); + + /* FIXME: Add check and operations for Big Endian machines. */ + memcpy(&dt_pci->dst_addr, head, dt_pci_info.addr_size); + memcpy(&dt_pci->qc, head + dt_pci_info.qc_o, QOS_LEN); + memcpy(&dt_pci->ttl, head + dt_pci_info.ttl_o, TTL_LEN); + memcpy(&dt_pci->eid, head + dt_pci_info.eid_o, dt_pci_info.eid_size); +} + +static void dt_pci_shrink(struct shm_du_buff * sdb) +{ + assert(sdb); + + shm_du_buff_head_release(sdb, dt_pci_info.head_size); +} + struct { - struct sdu_sched * sdu_sched; + struct psched * psched; struct pff * pff[QOS_CUBE_MAX]; struct routing_i * routing[QOS_CUBE_MAX]; @@ -337,24 +421,25 @@ static void handle_event(void * self, #ifdef IPCP_FLOW_STATS stat_used(c->flow_info.fd, c->conn_info.addr); #endif - sdu_sched_add(dt.sdu_sched, c->flow_info.fd); - log_dbg("Added fd %d to SDU scheduler.", c->flow_info.fd); + psched_add(dt.psched, c->flow_info.fd); + log_dbg("Added fd %d to packet scheduler.", c->flow_info.fd); break; case NOTIFY_DT_CONN_DEL: #ifdef IPCP_FLOW_STATS stat_used(c->flow_info.fd, INVALID_ADDR); #endif - sdu_sched_del(dt.sdu_sched, c->flow_info.fd); - log_dbg("Removed fd %d from SDU scheduler.", c->flow_info.fd); + psched_del(dt.psched, c->flow_info.fd); + log_dbg("Removed fd %d from " + "packet scheduler.", c->flow_info.fd); break; default: break; } } -static void sdu_handler(int fd, - qoscube_t qc, - struct shm_du_buff * sdb) +static void packet_handler(int fd, + qoscube_t qc, + struct shm_du_buff * sdb) { struct dt_pci dt_pci; int ret; @@ -407,7 +492,7 @@ static void sdu_handler(int fd, ret = ipcp_flow_write(ofd, sdb); if (ret < 0) { - log_dbg("Failed to write SDU to fd %d.", ofd); + log_dbg("Failed to write packet to fd %d.", ofd); if (ret == -EFLOWDOWN) notifier_event(NOTIFY_DT_FLOW_DOWN, &ofd); ipcp_sdb_release(sdb); @@ -476,7 +561,7 @@ static void sdu_handler(int fd, return; } - if (dt.comps[dt_pci.eid].post_sdu == NULL) { + if (dt.comps[dt_pci.eid].post_packet == NULL) { log_err("No registered component on eid %d.", dt_pci.eid); ipcp_sdb_release(sdb); @@ -512,7 +597,8 @@ static void sdu_handler(int fd, pthread_mutex_unlock(&dt.stat[dt_pci.eid].lock); #endif - dt.comps[dt_pci.eid].post_sdu(dt.comps[dt_pci.eid].comp, sdb); + dt.comps[dt_pci.eid].post_packet(dt.comps[dt_pci.eid].comp, + sdb); } } @@ -555,10 +641,14 @@ int dt_init(enum pol_routing pr, info.pref_syntax = PROTO_FIXED; info.addr = ipcpi.dt_addr; - if (dt_pci_init(addr_size, eid_size, max_ttl)) { - log_err("Failed to init shm dt_pci."); - goto fail_pci_init; - } + dt_pci_info.addr_size = addr_size; + dt_pci_info.eid_size = eid_size; + dt_pci_info.max_ttl = max_ttl; + + dt_pci_info.qc_o = dt_pci_info.addr_size; + dt_pci_info.ttl_o = dt_pci_info.qc_o + QOS_LEN; + dt_pci_info.eid_o = dt_pci_info.ttl_o + TTL_LEN; + dt_pci_info.head_size = dt_pci_info.eid_o + dt_pci_info.eid_size; if (notifier_reg(handle_event, NULL)) { log_err("Failed to register with notifier."); @@ -642,8 +732,6 @@ int dt_init(enum pol_routing pr, fail_connmgr_comp_init: notifier_unreg(&handle_event); fail_notifier_reg: - dt_pci_fini(); - fail_pci_init: return -1; } @@ -671,21 +759,19 @@ void dt_fini(void) connmgr_comp_fini(COMPID_DT); notifier_unreg(&handle_event); - - dt_pci_fini(); } int dt_start(void) { - dt.sdu_sched = sdu_sched_create(sdu_handler); - if (dt.sdu_sched == NULL) { - log_err("Failed to create N-1 SDU scheduler."); + dt.psched = psched_create(packet_handler); + if (dt.psched == NULL) { + log_err("Failed to create N-1 packet scheduler."); return -1; } if (pthread_create(&dt.listener, NULL, dt_conn_handle, NULL)) { log_err("Failed to create listener thread."); - sdu_sched_destroy(dt.sdu_sched); + psched_destroy(dt.psched); return -1; } @@ -696,7 +782,7 @@ void dt_stop(void) { pthread_cancel(dt.listener); pthread_join(dt.listener, NULL); - sdu_sched_destroy(dt.sdu_sched); + psched_destroy(dt.psched); } int dt_reg_comp(void * comp, @@ -716,11 +802,11 @@ int dt_reg_comp(void * comp, return -EBADF; } - assert(dt.comps[res_fd].post_sdu == NULL); + assert(dt.comps[res_fd].post_packet == NULL); assert(dt.comps[res_fd].comp == NULL); assert(dt.comps[res_fd].name == NULL); - dt.comps[res_fd].post_sdu = func; + dt.comps[res_fd].post_packet = func; dt.comps[res_fd].comp = comp; dt.comps[res_fd].name = name; @@ -731,10 +817,10 @@ int dt_reg_comp(void * comp, return res_fd; } -int dt_write_sdu(uint64_t dst_addr, - qoscube_t qc, - int np1_fd, - struct shm_du_buff * sdb) +int dt_write_packet(uint64_t dst_addr, + qoscube_t qc, + int np1_fd, + struct shm_du_buff * sdb) { int fd; struct dt_pci dt_pci; @@ -779,7 +865,7 @@ int dt_write_sdu(uint64_t dst_addr, #endif ret = ipcp_flow_write(fd, sdb); if (ret < 0) { - log_dbg("Failed to write SDU to fd %d.", fd); + log_dbg("Failed to write packet to fd %d.", fd); if (ret == -EFLOWDOWN) notifier_event(NOTIFY_DT_FLOW_DOWN, &fd); goto fail_write; diff --git a/src/ipcpd/normal/dt.h b/src/ipcpd/normal/dt.h index e0bbe3f3..b74e84b0 100644 --- a/src/ipcpd/normal/dt.h +++ b/src/ipcpd/normal/dt.h @@ -24,10 +24,9 @@ #define OUROBOROS_IPCPD_NORMAL_DT_H #include <ouroboros/ipcp.h> +#include <ouroboros/qoscube.h> #include <ouroboros/shm_rdrbuff.h> -#include "dt_pci.h" - #define DT_COMP "Data Transfer" #define DT_PROTO "dtp" #define INVALID_ADDR 0 @@ -49,9 +48,9 @@ int dt_reg_comp(void * comp, void (* func)(void * comp, struct shm_du_buff * sdb), char * name); -int dt_write_sdu(uint64_t dst_addr, - qoscube_t qc, - int res_fd, - struct shm_du_buff * sdb); +int dt_write_packet(uint64_t dst_addr, + qoscube_t qc, + int res_fd, + struct shm_du_buff * sdb); #endif /* OUROBOROS_IPCPD_NORMAL_DT_H */ diff --git a/src/ipcpd/normal/dt_const.h b/src/ipcpd/normal/dt_const.h deleted file mode 100644 index fb005f06..00000000 --- a/src/ipcpd/normal/dt_const.h +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Ouroboros - Copyright (C) 2016 - 2018 - * - * Data Transfer Constants for the IPCP - * - * Dimitri Staessens <dimitri.staessens@ugent.be> - * Sander Vrijders <sander.vrijders@ugent.be> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., http://www.fsf.org/about/contact/. - */ - -#ifndef OUROBOROS_IPCPD_NORMAL_DT_CONST_H -#define OUROBOROS_IPCPD_NORMAL_DT_CONST_H - -#include <stdint.h> -#include <stdbool.h> - -struct dt_const { - uint8_t addr_size; - uint8_t cep_id_size; - uint8_t seqno_size; - bool has_ttl; - bool has_chk; - uint32_t min_pdu_size; - uint32_t max_pdu_size; -}; - -#endif /* OUROBOROS_IPCPD_NORMAL_DT_CONST_H */ diff --git a/src/ipcpd/normal/dt_pci.c b/src/ipcpd/normal/dt_pci.c deleted file mode 100644 index 76304668..00000000 --- a/src/ipcpd/normal/dt_pci.c +++ /dev/null @@ -1,112 +0,0 @@ -/* - * Ouroboros - Copyright (C) 2016 - 2018 - * - * Protocol Control Information of Data Transfer Component - * - * Dimitri Staessens <dimitri.staessens@ugent.be> - * Sander Vrijders <sander.vrijders@ugent.be> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., http://www.fsf.org/about/contact/. - */ - -#include <ouroboros/errno.h> - -#include "dt_pci.h" - -#include <stdlib.h> -#include <string.h> -#include <assert.h> - -struct { - uint8_t addr_size; - uint8_t eid_size; - size_t head_size; - - /* Offsets */ - size_t qc_o; - size_t ttl_o; - size_t eid_o; - - /* Initial TTL value */ - uint8_t max_ttl; -} dt_pci_info; - -int dt_pci_init(uint8_t addr_size, - uint8_t eid_size, - uint8_t max_ttl) -{ - dt_pci_info.addr_size = addr_size; - dt_pci_info.eid_size = eid_size; - dt_pci_info.max_ttl = max_ttl; - - dt_pci_info.qc_o = dt_pci_info.addr_size; - dt_pci_info.ttl_o = dt_pci_info.qc_o + QOS_LEN; - dt_pci_info.eid_o = dt_pci_info.ttl_o + TTL_LEN; - dt_pci_info.head_size = dt_pci_info.eid_o + dt_pci_info.eid_size; - - return 0; -} - -void dt_pci_fini(void) { - return; -} - -int dt_pci_ser(struct shm_du_buff * sdb, - struct dt_pci * dt_pci) -{ - uint8_t * head; - uint8_t ttl = dt_pci_info.max_ttl; - - assert(sdb); - assert(dt_pci); - - head = shm_du_buff_head_alloc(sdb, dt_pci_info.head_size); - if (head == NULL) - return -EPERM; - - /* FIXME: Add check and operations for Big Endian machines. */ - memcpy(head, &dt_pci->dst_addr, dt_pci_info.addr_size); - memcpy(head + dt_pci_info.qc_o, &dt_pci->qc, QOS_LEN); - memcpy(head + dt_pci_info.ttl_o, &ttl, TTL_LEN); - memcpy(head + dt_pci_info.eid_o, &dt_pci->eid, dt_pci_info.eid_size); - - return 0; -} - -void dt_pci_des(struct shm_du_buff * sdb, - struct dt_pci * dt_pci) -{ - uint8_t * head; - - assert(sdb); - assert(dt_pci); - - head = shm_du_buff_head(sdb); - - /* Decrease TTL */ - --*(head + dt_pci_info.ttl_o); - - /* FIXME: Add check and operations for Big Endian machines. */ - memcpy(&dt_pci->dst_addr, head, dt_pci_info.addr_size); - memcpy(&dt_pci->qc, head + dt_pci_info.qc_o, QOS_LEN); - memcpy(&dt_pci->ttl, head + dt_pci_info.ttl_o, TTL_LEN); - memcpy(&dt_pci->eid, head + dt_pci_info.eid_o, dt_pci_info.eid_size); -} - -void dt_pci_shrink(struct shm_du_buff * sdb) -{ - assert(sdb); - - shm_du_buff_head_release(sdb, dt_pci_info.head_size); -} diff --git a/src/ipcpd/normal/dt_pci.h b/src/ipcpd/normal/dt_pci.h deleted file mode 100644 index 8022f84a..00000000 --- a/src/ipcpd/normal/dt_pci.h +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Ouroboros - Copyright (C) 2016 - 2018 - * - * Protocol Control Information of Data Transfer Component - * - * Dimitri Staessens <dimitri.staessens@ugent.be> - * Sander Vrijders <sander.vrijders@ugent.be> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., http://www.fsf.org/about/contact/. - */ - -#ifndef OUROBOROS_IPCPD_NORMAL_DT_PCI_H -#define OUROBOROS_IPCPD_NORMAL_DT_PCI_H - -#include <ouroboros/shm_du_buff.h> -#include <ouroboros/proto.h> -#include <ouroboros/qoscube.h> - -#include <stdint.h> -#include <stdbool.h> - -/* Abstract syntax */ -enum dtp_fields { - DTP_DST = 0, /* DST ADDRESS */ - DTP_QOS, /* QOS ID */ - DTP_DEID, /* DST Endpoint ID */ - DTP_TTL, /* TTL FIELD */ - DTP_NUM_FIELDS /* Number of fields */ -}; - -/* Fixed field lengths */ -#define TTL_LEN 1 -#define QOS_LEN 1 - -struct dt_pci { - uint64_t dst_addr; - qoscube_t qc; - uint8_t ttl; - uint32_t eid; -}; - -int dt_pci_init(uint8_t addr_size, - uint8_t eid_size, - uint8_t max_ttl); - -void dt_pci_fini(void); - -int dt_pci_ser(struct shm_du_buff * sdb, - struct dt_pci * dt_pci); - -void dt_pci_des(struct shm_du_buff * sdb, - struct dt_pci * dt_pci); - -void dt_pci_shrink(struct shm_du_buff * sdb); - -#endif /* OUROBOROS_IPCPD_NORMAL_DT_PCI_H */ diff --git a/src/ipcpd/normal/enroll.c b/src/ipcpd/normal/enroll.c index a321db2b..78305ff0 100644 --- a/src/ipcpd/normal/enroll.c +++ b/src/ipcpd/normal/enroll.c @@ -20,7 +20,11 @@ * Foundation, Inc., http://www.fsf.org/about/contact/. */ +#if defined(__linux__) || defined(__CYGWIN__) +#define _DEFAULT_SOURCE +#else #define _POSIX_C_SOURCE 199309L +#endif #define OUROBOROS_PREFIX "enrollment" diff --git a/src/ipcpd/normal/fa.c b/src/ipcpd/normal/fa.c index 067a6e73..027223b7 100644 --- a/src/ipcpd/normal/fa.c +++ b/src/ipcpd/normal/fa.c @@ -20,7 +20,11 @@ * Foundation, Inc., http://www.fsf.org/about/contact/. */ +#if defined(__linux__) || defined(__CYGWIN__) +#define _DEFAULT_SOURCE +#else #define _POSIX_C_SOURCE 200112L +#endif #include "config.h" @@ -34,9 +38,8 @@ #include <ouroboros/ipcp-dev.h> #include "dir.h" -#include "dt_pci.h" #include "fa.h" -#include "sdu_sched.h" +#include "psched.h" #include "ipcp.h" #include "dt.h" @@ -54,8 +57,15 @@ struct fa_msg { uint32_t r_eid; uint32_t s_eid; uint8_t code; - uint8_t qc; int8_t response; + /* QoS parameters from spec, aligned */ + uint8_t availability; + uint8_t in_order; + uint32_t delay; + uint64_t bandwidth; + uint32_t loss; + uint32_t ber; + uint32_t max_gap; } __attribute__((packed)); struct { @@ -64,19 +74,19 @@ struct { uint64_t r_addr[PROG_MAX_FLOWS]; int fd; - struct sdu_sched * sdu_sched; + struct psched * psched; } fa; -static void sdu_handler(int fd, - qoscube_t qc, - struct shm_du_buff * sdb) +static void packet_handler(int fd, + qoscube_t qc, + struct shm_du_buff * sdb) { pthread_rwlock_rdlock(&fa.flows_lock); - if (dt_write_sdu(fa.r_addr[fd], qc, fa.r_eid[fd], sdb)) { + if (dt_write_packet(fa.r_addr[fd], qc, fa.r_eid[fd], sdb)) { pthread_rwlock_unlock(&fa.flows_lock); ipcp_sdb_release(sdb); - log_warn("Failed to forward SDU."); + log_warn("Failed to forward packet."); return; } @@ -89,7 +99,7 @@ static void destroy_conn(int fd) fa.r_addr[fd] = INVALID_ADDR; } -static void fa_post_sdu(void * comp, +static void fa_post_packet(void * comp, struct shm_du_buff * sdb) { struct timespec ts = {0, TIMEOUT * 1000}; @@ -97,6 +107,7 @@ static void fa_post_sdu(void * comp, int fd; uint8_t * buf; struct fa_msg * msg; + qosspec_t qs; (void) comp; @@ -139,10 +150,18 @@ static void fa_post_sdu(void * comp, assert(ipcpi.alloc_id == -1); + qs.delay = ntoh32(msg->delay); + qs.bandwidth = ntoh64(msg->bandwidth); + qs.availability = msg->availability; + qs.loss = ntoh32(msg->loss); + qs.ber = ntoh32(msg->ber); + qs.in_order = msg->in_order; + qs.max_gap = ntoh32(msg->max_gap); + fd = ipcp_flow_req_arr(getpid(), (uint8_t *) (msg + 1), ipcp_dir_hash_len(), - msg->qc); + qs); if (fd < 0) { pthread_mutex_unlock(&ipcpi.alloc_lock); log_err("Failed to get fd for flow."); @@ -152,8 +171,8 @@ static void fa_post_sdu(void * comp, pthread_rwlock_wrlock(&fa.flows_lock); - fa.r_eid[fd] = msg->s_eid; - fa.r_addr[fd] = msg->s_addr; + fa.r_eid[fd] = ntoh32(msg->s_eid); + fa.r_addr[fd] = ntoh64(msg->s_addr); pthread_rwlock_unlock(&fa.flows_lock); @@ -166,14 +185,14 @@ static void fa_post_sdu(void * comp, case FLOW_REPLY: pthread_rwlock_wrlock(&fa.flows_lock); - fa.r_eid[msg->r_eid] = msg->s_eid; + fa.r_eid[ntoh32(msg->r_eid)] = ntoh32(msg->s_eid); - ipcp_flow_alloc_reply(msg->r_eid, msg->response); + ipcp_flow_alloc_reply(ntoh32(msg->r_eid), msg->response); if (msg->response < 0) - destroy_conn(msg->r_eid); + destroy_conn(ntoh32(msg->r_eid)); else - sdu_sched_add(fa.sdu_sched, msg->r_eid); + psched_add(fa.psched, ntoh32(msg->r_eid)); pthread_rwlock_unlock(&fa.flows_lock); @@ -196,7 +215,7 @@ int fa_init(void) if (pthread_rwlock_init(&fa.flows_lock, NULL)) return -1; - fa.fd = dt_reg_comp(&fa, &fa_post_sdu, FA); + fa.fd = dt_reg_comp(&fa, &fa_post_packet, FA); return 0; } @@ -208,9 +227,9 @@ void fa_fini(void) int fa_start(void) { - fa.sdu_sched = sdu_sched_create(sdu_handler); - if (fa.sdu_sched == NULL) { - log_err("Failed to create SDU scheduler."); + fa.psched = psched_create(packet_handler); + if (fa.psched == NULL) { + log_err("Failed to create packet scheduler."); return -1; } @@ -219,16 +238,17 @@ int fa_start(void) void fa_stop(void) { - sdu_sched_destroy(fa.sdu_sched); + psched_destroy(fa.psched); } int fa_alloc(int fd, const uint8_t * dst, - qoscube_t qc) + qosspec_t qs) { struct fa_msg * msg; uint64_t addr; struct shm_du_buff * sdb; + qoscube_t qc; addr = dir_query(dst); if (addr == 0) @@ -237,15 +257,23 @@ int fa_alloc(int fd, if (ipcp_sdb_reserve(&sdb, sizeof(*msg) + ipcp_dir_hash_len())) return -1; - msg = (struct fa_msg *) shm_du_buff_head(sdb); - msg->code = FLOW_REQ; - msg->qc = qc; - msg->s_eid = fd; - msg->s_addr = ipcpi.dt_addr; + msg = (struct fa_msg *) shm_du_buff_head(sdb); + msg->code = FLOW_REQ; + msg->s_eid = hton32(fd); + msg->s_addr = hton64(ipcpi.dt_addr); + msg->delay = hton32(qs.delay); + msg->bandwidth = hton64(qs.bandwidth); + msg->availability = qs.availability; + msg->loss = hton32(qs.loss); + msg->ber = hton32(qs.ber); + msg->in_order = qs.in_order; + msg->max_gap = hton32(qs.max_gap); memcpy(msg + 1, dst, ipcp_dir_hash_len()); - if (dt_write_sdu(addr, qc, fa.fd, sdb)) { + qc = qos_spec_to_cube(qs); + + if (dt_write_packet(addr, qc, fa.fd, sdb)) { ipcp_sdb_release(sdb); return -1; } @@ -299,22 +327,22 @@ int fa_alloc_resp(int fd, msg = (struct fa_msg *) shm_du_buff_head(sdb); msg->code = FLOW_REPLY; - msg->r_eid = fa.r_eid[fd]; - msg->s_eid = fd; + msg->r_eid = hton32(fa.r_eid[fd]); + msg->s_eid = hton32(fd); msg->response = response; if (response < 0) { destroy_conn(fd); ipcp_sdb_release(sdb); } else { - sdu_sched_add(fa.sdu_sched, fd); + psched_add(fa.psched, fd); } ipcp_flow_get_qoscube(fd, &qc); assert(qc >= 0 && qc < QOS_CUBE_MAX); - if (dt_write_sdu(fa.r_addr[fd], qc, fa.fd, sdb)) { + if (dt_write_packet(fa.r_addr[fd], qc, fa.fd, sdb)) { destroy_conn(fd); pthread_rwlock_unlock(&fa.flows_lock); ipcp_sdb_release(sdb); @@ -332,7 +360,7 @@ int fa_dealloc(int fd) pthread_rwlock_wrlock(&fa.flows_lock); - sdu_sched_del(fa.sdu_sched, fd); + psched_del(fa.psched, fd); destroy_conn(fd); diff --git a/src/ipcpd/normal/fa.h b/src/ipcpd/normal/fa.h index 87819d6f..6a836e17 100644 --- a/src/ipcpd/normal/fa.h +++ b/src/ipcpd/normal/fa.h @@ -23,7 +23,7 @@ #ifndef OUROBOROS_IPCPD_NORMAL_FA_H #define OUROBOROS_IPCPD_NORMAL_FA_H -#include <ouroboros/qoscube.h> +#include <ouroboros/qos.h> #include <ouroboros/utils.h> int fa_init(void); @@ -36,7 +36,7 @@ void fa_stop(void); int fa_alloc(int fd, const uint8_t * dst, - qoscube_t qos); + qosspec_t qs); int fa_alloc_resp(int fd, int response); diff --git a/src/ipcpd/normal/main.c b/src/ipcpd/normal/main.c index b131bbb6..3f05f421 100644 --- a/src/ipcpd/normal/main.c +++ b/src/ipcpd/normal/main.c @@ -20,7 +20,11 @@ * Foundation, Inc., http://www.fsf.org/about/contact/. */ +#if defined(__linux__) || defined(__CYGWIN__) +#define _DEFAULT_SOURCE +#else #define _POSIX_C_SOURCE 200809L +#endif #include "config.h" diff --git a/src/ipcpd/normal/pol/flat.c b/src/ipcpd/normal/pol/flat.c index cab74159..89b7fff6 100644 --- a/src/ipcpd/normal/pol/flat.c +++ b/src/ipcpd/normal/pol/flat.c @@ -20,7 +20,11 @@ * Foundation, Inc., http://www.fsf.org/about/contact/. */ +#if defined(__linux__) || defined(__CYGWIN__) +#define _DEFAULT_SOURCE +#else #define _POSIX_C_SOURCE 200112L +#endif #define OUROBOROS_PREFIX "flat-addr-auth" diff --git a/src/ipcpd/normal/pol/graph.c b/src/ipcpd/normal/pol/graph.c index f3c053ab..ec0917c5 100644 --- a/src/ipcpd/normal/pol/graph.c +++ b/src/ipcpd/normal/pol/graph.c @@ -20,7 +20,11 @@ * Foundation, Inc., http://www.fsf.org/about/contact/. */ +#if defined(__linux__) || defined(__CYGWIN__) +#define _DEFAULT_SOURCE +#else #define _POSIX_C_SOURCE 200112L +#endif #define OUROBOROS_PREFIX "graph" @@ -547,27 +551,6 @@ static int graph_routing_table_simple(struct graph * graph, return -1; } -int graph_routing_table(struct graph * graph, - uint64_t s_addr, - struct list_head * table) -{ - int ret = 0; - int * dist; - - assert(graph); - assert(table); - - pthread_mutex_lock(&graph->lock); - - ret = graph_routing_table_simple(graph, s_addr, table, &dist); - - free(dist); - - pthread_mutex_unlock(&graph->lock); - - return ret; -} - static int add_lfa_to_table(struct list_head * table, uint64_t addr, uint64_t lfa) @@ -595,9 +578,10 @@ static int add_lfa_to_table(struct list_head * table, return -1; } -int graph_routing_table_lfa(struct graph * graph, - uint64_t s_addr, - struct list_head * table) +int graph_routing_table(struct graph * graph, + enum routing_algo algo, + uint64_t s_addr, + struct list_head * table) { int * s_dist; int * n_dist[PROG_MAX_FLOWS]; @@ -617,66 +601,82 @@ int graph_routing_table_lfa(struct graph * graph, pthread_mutex_lock(&graph->lock); - for (j = 0; j < PROG_MAX_FLOWS; j++) { - n_dist[j] = NULL; - n_index[j] = -1; - addrs[j] = -1; - } - /* Get the normal next hops routing table. */ if (graph_routing_table_simple(graph, s_addr, table, &s_dist)) goto fail_table_simple; - list_for_each(p, &graph->vertices) { - v = list_entry(p, struct vertex, next); + /* Possibly augment the routing table. */ + switch (algo) { + case ROUTING_SIMPLE: + break; + case ROUTING_LFA: + for (j = 0; j < PROG_MAX_FLOWS; j++) { + n_dist[j] = NULL; + n_index[j] = -1; + addrs[j] = -1; + } - if (v->addr != s_addr) - continue; + list_for_each(p, &graph->vertices) { + v = list_entry(p, struct vertex, next); - /* Get the distances for every neighbor of the source. */ - list_for_each(q, &v->edges) { - e = list_entry(q, struct edge, next); + if (v->addr != s_addr) + continue; - addrs[i] = e->nb->addr; - n_index[i] = e->nb->index; - if (dijkstra(graph, e->nb->addr, - &nhops, &(n_dist[i++]))) - goto fail_dijkstra; + /* + * Get the distances for every neighbor + * of the source. + */ + list_for_each(q, &v->edges) { + e = list_entry(q, struct edge, next); - free(nhops); - } + addrs[i] = e->nb->addr; + n_index[i] = e->nb->index; + if (dijkstra(graph, e->nb->addr, + &nhops, &(n_dist[i++]))) + goto fail_dijkstra; - break; - } + free(nhops); + } - /* Loop though all nodes to see if we have a LFA for them. */ - list_for_each(p, &graph->vertices) { - v = list_entry(p, struct vertex, next); + break; + } - if (v->addr == s_addr) - continue; + /* Loop though all nodes to see if we have a LFA for them. */ + list_for_each(p, &graph->vertices) { + v = list_entry(p, struct vertex, next); - /* - * Check for every neighbor if dist(neighbor, destination) < - * dist(neighbor, source) + dist(source, destination). - */ - for (j = 0; j < i; j++) { - /* Exclude ourselves. */ - if (addrs[j] == v->addr) + if (v->addr == s_addr) continue; - if (n_dist[j][v->index] < - s_dist[n_index[j]] + s_dist[v->index]) - if (add_lfa_to_table(table, v->addr, addrs[j])) - goto fail_add_lfa; + /* + * Check for every neighbor if + * dist(neighbor, destination) < + * dist(neighbor, source) + dist(source, destination). + */ + for (j = 0; j < i; j++) { + /* Exclude ourselves. */ + if (addrs[j] == v->addr) + continue; + + if (n_dist[j][v->index] < + s_dist[n_index[j]] + s_dist[v->index]) + if (add_lfa_to_table(table, v->addr, + addrs[j])) + goto fail_add_lfa; + } } + + for (j = 0; j < i; j++) + free(n_dist[j]); + + break; + default: + log_err("Unsupported algorithm."); + goto fail_algo; } pthread_mutex_unlock(&graph->lock); - for (j = 0; j < i; j++) - free(n_dist[j]); - free(s_dist); return 0; @@ -686,6 +686,7 @@ int graph_routing_table_lfa(struct graph * graph, free(n_dist[k]); fail_dijkstra: free_routing_table(table); + fail_algo: free(s_dist); fail_table_simple: pthread_mutex_unlock(&graph->lock); diff --git a/src/ipcpd/normal/pol/graph.h b/src/ipcpd/normal/pol/graph.h index 13657fd0..7cd14ad6 100644 --- a/src/ipcpd/normal/pol/graph.h +++ b/src/ipcpd/normal/pol/graph.h @@ -28,6 +28,11 @@ #include <inttypes.h> +enum routing_algo { + ROUTING_SIMPLE = 0, + ROUTING_LFA +}; + struct nhop { struct list_head next; uint64_t nhop; @@ -53,13 +58,10 @@ int graph_del_edge(struct graph * graph, uint64_t d_addr); int graph_routing_table(struct graph * graph, + enum routing_algo algo, uint64_t s_addr, struct list_head * table); -int graph_routing_table_lfa(struct graph * graph, - uint64_t s_addr, - struct list_head * table); - void graph_free_routing_table(struct graph * graph, struct list_head * table); diff --git a/src/ipcpd/normal/pol/link_state.c b/src/ipcpd/normal/pol/link_state.c index 1c418ffc..e2e9eab5 100644 --- a/src/ipcpd/normal/pol/link_state.c +++ b/src/ipcpd/normal/pol/link_state.c @@ -20,7 +20,11 @@ * Foundation, Inc., http://www.fsf.org/about/contact/. */ +#if defined(__linux__) || defined(__CYGWIN__) +#define _DEFAULT_SOURCE +#else #define _POSIX_C_SOURCE 200112L +#endif #include "config.h" @@ -101,30 +105,26 @@ struct nb { enum nb_type type; }; -typedef int (* rtable_fn_t)(struct graph * graph, - uint64_t s_addr, - struct list_head * table); - struct { - struct list_head nbs; - size_t nbs_len; - fset_t * mgmt_set; + struct list_head nbs; + size_t nbs_len; + fset_t * mgmt_set; - struct list_head db; - size_t db_len; + struct list_head db; + size_t db_len; - pthread_rwlock_t db_lock; + pthread_rwlock_t db_lock; - struct graph * graph; + struct graph * graph; - pthread_t lsupdate; - pthread_t lsreader; - pthread_t listener; + pthread_t lsupdate; + pthread_t lsreader; + pthread_t listener; - struct list_head routing_instances; - pthread_mutex_t routing_i_lock; + struct list_head routing_instances; + pthread_mutex_t routing_i_lock; - rtable_fn_t rtable; + enum routing_algo routing_algo; } ls; struct pol_routing_ops link_state_ops = { @@ -500,7 +500,8 @@ static void calculate_pff(struct routing_i * instance) struct list_head * q; int fds[PROG_MAX_FLOWS]; - if (ls.rtable(ls.graph, ipcpi.dt_addr, &table)) + if (graph_routing_table(ls.graph, ls.routing_algo, + ipcpi.dt_addr, &table)) return; pff_lock(instance->pff); @@ -902,11 +903,11 @@ int link_state_init(enum pol_routing pr) switch (pr) { case ROUTING_LINK_STATE: log_dbg("Using link state routing policy."); - ls.rtable = graph_routing_table; + ls.routing_algo = ROUTING_SIMPLE; break; case ROUTING_LINK_STATE_LFA: log_dbg("Using Loop-Free Alternates policy."); - ls.rtable = graph_routing_table_lfa; + ls.routing_algo = ROUTING_LFA; break; default: goto fail_graph; diff --git a/src/ipcpd/normal/pol/tests/graph_test.c b/src/ipcpd/normal/pol/tests/graph_test.c index d226398c..8050f73a 100644 --- a/src/ipcpd/normal/pol/tests/graph_test.c +++ b/src/ipcpd/normal/pol/tests/graph_test.c @@ -39,7 +39,7 @@ int graph_test_entries(int entries) struct list_head * p; int i = 0; - if (graph_routing_table(graph, 1, &table)) { + if (graph_routing_table(graph, ROUTING_SIMPLE, 1, &table)) { printf("Failed to get routing table.\n"); return -1; } @@ -63,7 +63,7 @@ int graph_test_double_link(void) struct list_head * p; int i = 0; - if (graph_routing_table(graph, 1, &table)) { + if (graph_routing_table(graph, ROUTING_SIMPLE, 1, &table)) { printf("Failed to get routing table.\n"); return -1; } @@ -101,7 +101,7 @@ int graph_test_single_link(void) struct list_head * p; int i = 0; - if (graph_routing_table(graph, 1, &table)) { + if (graph_routing_table(graph, ROUTING_SIMPLE, 1, &table)) { printf("Failed to get routing table.\n"); return -1; } @@ -243,7 +243,7 @@ int graph_test(int argc, return -1; } - if (graph_routing_table(graph, 1, &table)) { + if (graph_routing_table(graph, ROUTING_SIMPLE, 1, &table)) { printf("Failed to get routing table.\n"); return -1; } diff --git a/src/ipcpd/normal/sdu_sched.c b/src/ipcpd/normal/psched.c index 6ce18ed5..27e5f1de 100644 --- a/src/ipcpd/normal/sdu_sched.c +++ b/src/ipcpd/normal/psched.c @@ -1,7 +1,7 @@ /* * Ouroboros - Copyright (C) 2016 - 2018 * - * SDU scheduler component + * Packet scheduler component * * Dimitri Staessens <dimitri.staessens@ugent.be> * Sander Vrijders <sander.vrijders@ugent.be> @@ -20,7 +20,11 @@ * Foundation, Inc., http://www.fsf.org/about/contact/. */ +#if defined(__linux__) || defined(__CYGWIN__) +#define _DEFAULT_SOURCE +#else #define _POSIX_C_SOURCE 200112L +#endif #include "config.h" @@ -28,7 +32,7 @@ #include <ouroboros/notifier.h> #include "ipcp.h" -#include "sdu_sched.h" +#include "psched.h" #include "connmgr.h" #include <assert.h> @@ -45,15 +49,15 @@ static int qos_prio [] = { QOS_PRIO_DATA }; -struct sdu_sched { - fset_t * set[QOS_CUBE_MAX]; - next_sdu_fn_t callback; - pthread_t readers[QOS_CUBE_MAX * IPCP_SCHED_THR_MUL]; +struct psched { + fset_t * set[QOS_CUBE_MAX]; + next_packet_fn_t callback; + pthread_t readers[QOS_CUBE_MAX * IPCP_SCHED_THR_MUL]; }; struct sched_info { - struct sdu_sched * sch; - qoscube_t qc; + struct psched * sch; + qoscube_t qc; }; static void cleanup_reader(void * o) @@ -61,13 +65,13 @@ static void cleanup_reader(void * o) fqueue_destroy((fqueue_t *) o); } -static void * sdu_reader(void * o) +static void * packet_reader(void * o) { - struct sdu_sched * sched; - struct shm_du_buff * sdb; - int fd; - fqueue_t * fq; - qoscube_t qc; + struct psched * sched; + struct shm_du_buff * sdb; + int fd; + fqueue_t * fq; + qoscube_t qc; sched = ((struct sched_info *) o)->sch; qc = ((struct sched_info *) o)->qc; @@ -115,26 +119,26 @@ static void * sdu_reader(void * o) return (void *) 0; } -struct sdu_sched * sdu_sched_create(next_sdu_fn_t callback) +struct psched * psched_create(next_packet_fn_t callback) { - struct sdu_sched * sdu_sched; - struct sched_info * infos[QOS_CUBE_MAX * IPCP_SCHED_THR_MUL]; - int i; - int j; + struct psched * psched; + struct sched_info * infos[QOS_CUBE_MAX * IPCP_SCHED_THR_MUL]; + int i; + int j; assert(callback); - sdu_sched = malloc(sizeof(*sdu_sched)); - if (sdu_sched == NULL) + psched = malloc(sizeof(*psched)); + if (psched == NULL) goto fail_malloc; - sdu_sched->callback = callback; + psched->callback = callback; for (i = 0; i < QOS_CUBE_MAX; ++i) { - sdu_sched->set[i] = fset_create(); - if (sdu_sched->set[i] == NULL) { + psched->set[i] = fset_create(); + if (psched->set[i] == NULL) { for (j = 0; j < i; ++j) - fset_destroy(sdu_sched->set[j]); + fset_destroy(psched->set[j]); goto fail_flow_set; } } @@ -146,17 +150,17 @@ struct sdu_sched * sdu_sched_create(next_sdu_fn_t callback) free(infos[j]); goto fail_infos; } - infos[i]->sch = sdu_sched; + infos[i]->sch = psched; infos[i]->qc = i % QOS_CUBE_MAX; } for (i = 0; i < QOS_CUBE_MAX * IPCP_SCHED_THR_MUL; ++i) { - if (pthread_create(&sdu_sched->readers[i], NULL, - sdu_reader, infos[i])) { + if (pthread_create(&psched->readers[i], NULL, + packet_reader, infos[i])) { for (j = 0; j < i; ++j) - pthread_cancel(sdu_sched->readers[j]); + pthread_cancel(psched->readers[j]); for (j = 0; j < i; ++j) - pthread_join(sdu_sched->readers[j], NULL); + pthread_join(psched->readers[j], NULL); for (j = i; j < QOS_CUBE_MAX * IPCP_SCHED_THR_MUL; ++j) free(infos[i]); goto fail_infos; @@ -177,61 +181,61 @@ struct sdu_sched * sdu_sched_create(next_sdu_fn_t callback) par.sched_priority = min + (qos_prio[i % QOS_CUBE_MAX] * (max - min) / 99); - if (pthread_setschedparam(sdu_sched->readers[i], pol, &par)) + if (pthread_setschedparam(psched->readers[i], pol, &par)) goto fail_sched; } - return sdu_sched; + return psched; fail_sched: for (j = 0; j < QOS_CUBE_MAX * IPCP_SCHED_THR_MUL; ++j) - pthread_cancel(sdu_sched->readers[j]); + pthread_cancel(psched->readers[j]); for (j = 0; j < QOS_CUBE_MAX * IPCP_SCHED_THR_MUL; ++j) - pthread_join(sdu_sched->readers[j], NULL); + pthread_join(psched->readers[j], NULL); fail_infos: for (j = 0; j < QOS_CUBE_MAX; ++j) - fset_destroy(sdu_sched->set[j]); + fset_destroy(psched->set[j]); fail_flow_set: - free(sdu_sched); + free(psched); fail_malloc: return NULL; } -void sdu_sched_destroy(struct sdu_sched * sdu_sched) +void psched_destroy(struct psched * psched) { int i; - assert(sdu_sched); + assert(psched); for (i = 0; i < QOS_CUBE_MAX * IPCP_SCHED_THR_MUL; ++i) { - pthread_cancel(sdu_sched->readers[i]); - pthread_join(sdu_sched->readers[i], NULL); + pthread_cancel(psched->readers[i]); + pthread_join(psched->readers[i], NULL); } for (i = 0; i < QOS_CUBE_MAX; ++i) - fset_destroy(sdu_sched->set[i]); + fset_destroy(psched->set[i]); - free(sdu_sched); + free(psched); } -void sdu_sched_add(struct sdu_sched * sdu_sched, - int fd) +void psched_add(struct psched * psched, + int fd) { qoscube_t qc; - assert(sdu_sched); + assert(psched); ipcp_flow_get_qoscube(fd, &qc); - fset_add(sdu_sched->set[qc], fd); + fset_add(psched->set[qc], fd); } -void sdu_sched_del(struct sdu_sched * sdu_sched, - int fd) +void psched_del(struct psched * psched, + int fd) { qoscube_t qc; - assert(sdu_sched); + assert(psched); ipcp_flow_get_qoscube(fd, &qc); - fset_del(sdu_sched->set[qc], fd); + fset_del(psched->set[qc], fd); } diff --git a/src/ipcpd/normal/sdu_sched.h b/src/ipcpd/normal/psched.h index cdbda272..137c8fd1 100644 --- a/src/ipcpd/normal/sdu_sched.h +++ b/src/ipcpd/normal/psched.h @@ -1,7 +1,7 @@ /* * Ouroboros - Copyright (C) 2016 - 2018 * - * SDU scheduler component + * Packet scheduler component * * Dimitri Staessens <dimitri.staessens@ugent.be> * Sander Vrijders <sander.vrijders@ugent.be> @@ -20,24 +20,24 @@ * Foundation, Inc., http://www.fsf.org/about/contact/. */ -#ifndef OUROBOROS_IPCPD_NORMAL_SDU_SCHED_H -#define OUROBOROS_IPCPD_NORMAL_SDU_SCHED_H +#ifndef OUROBOROS_IPCPD_NORMAL_PSCHED_H +#define OUROBOROS_IPCPD_NORMAL_PSCHED_H #include <ouroboros/ipcp-dev.h> #include <ouroboros/fqueue.h> -typedef void (* next_sdu_fn_t)(int fd, - qoscube_t qc, - struct shm_du_buff * sdb); +typedef void (* next_packet_fn_t)(int fd, + qoscube_t qc, + struct shm_du_buff * sdb); -struct sdu_sched * sdu_sched_create(next_sdu_fn_t callback); +struct psched * psched_create(next_packet_fn_t callback); -void sdu_sched_destroy(struct sdu_sched * sdu_sched); +void psched_destroy(struct psched * psched); -void sdu_sched_add(struct sdu_sched * sdu_sched, - int fd); +void psched_add(struct psched * psched, + int fd); -void sdu_sched_del(struct sdu_sched * sdu_sched, - int fd); +void psched_del(struct psched * psched, + int fd); -#endif /* OUROBOROS_IPCPD_NORMAL_SDU_SCHED_H */ +#endif /* OUROBOROS_IPCPD_NORMAL_PSCHED_H */ diff --git a/src/ipcpd/raptor/CMakeLists.txt b/src/ipcpd/raptor/CMakeLists.txt index 06e6ee29..1883d9bb 100644 --- a/src/ipcpd/raptor/CMakeLists.txt +++ b/src/ipcpd/raptor/CMakeLists.txt @@ -16,6 +16,7 @@ if (CMAKE_SYSTEM_NAME STREQUAL "Linux") find_path(RAPTOR_KERNEL_MODULE NAMES raptor.ko.gz + raptor.ko.xz HINTS /lib/modules/${CMAKE_SYSTEM_VERSION}/extra ) diff --git a/src/ipcpd/raptor/main.c b/src/ipcpd/raptor/main.c index 4f0099b3..8f578611 100644 --- a/src/ipcpd/raptor/main.c +++ b/src/ipcpd/raptor/main.c @@ -64,9 +64,9 @@ #include <sys/mman.h> #define THIS_TYPE IPCP_RAPTOR -#define MGMT_SAP 0x01 +#define MGMT_EID 0x01 #define MAC_SIZE 6 -#define MAX_SAPS 64 +#define MAX_EIDS 64 #define EVENT_WAIT_TIMEOUT 100 /* us */ #define NAME_QUERY_TIMEOUT 2000 /* ms */ @@ -90,16 +90,23 @@ #define NAME_QUERY_REPLY 3 struct mgmt_msg { - uint8_t code; - uint8_t ssap; - uint8_t dsap; - uint8_t qoscube; - int8_t response; + uint8_t code; + uint8_t seid; + uint8_t deid; + int8_t response; + /* QoS parameters from spec, aligned */ + uint32_t loss; + uint64_t bandwidth; + uint32_t ber; + uint32_t max_gap; + uint32_t delay; + uint8_t in_order; + uint8_t availability; } __attribute__((packed)); struct ef { - int8_t sap; - int8_t r_sap; + int8_t eid; + int8_t r_eid; }; struct mgmt_frame { @@ -113,7 +120,7 @@ struct { int ioctl_fd; - struct bmp * saps; + struct bmp * eids; fset_t * np1_flows; fqueue_t * fq; int * ef_to_fd; @@ -145,13 +152,13 @@ static int raptor_data_init(void) goto fail_fd_to_ef; raptor_data.ef_to_fd = - malloc(sizeof(*raptor_data.ef_to_fd) * MAX_SAPS); + malloc(sizeof(*raptor_data.ef_to_fd) * MAX_EIDS); if (raptor_data.ef_to_fd == NULL) goto fail_ef_to_fd; - raptor_data.saps = bmp_create(MAX_SAPS, 2); - if (raptor_data.saps == NULL) - goto fail_saps; + raptor_data.eids = bmp_create(MAX_EIDS, 2); + if (raptor_data.eids == NULL) + goto fail_eids; raptor_data.np1_flows = fset_create(); if (raptor_data.np1_flows == NULL) @@ -161,12 +168,12 @@ static int raptor_data_init(void) if (raptor_data.fq == NULL) goto fail_fq; - for (i = 0; i < MAX_SAPS; ++i) + for (i = 0; i < MAX_EIDS; ++i) raptor_data.ef_to_fd[i] = -1; for (i = 0; i < SYS_MAX_FLOWS; ++i) { - raptor_data.fd_to_ef[i].sap = -1; - raptor_data.fd_to_ef[i].r_sap = -1; + raptor_data.fd_to_ef[i].eid = -1; + raptor_data.fd_to_ef[i].r_eid = -1; } raptor_data.shim_data = shim_data_create(); @@ -210,8 +217,8 @@ static int raptor_data_init(void) fail_fq: fset_destroy(raptor_data.np1_flows); fail_np1_flows: - bmp_destroy(raptor_data.saps); - fail_saps: + bmp_destroy(raptor_data.eids); + fail_eids: free(raptor_data.ef_to_fd); fail_ef_to_fd: free(raptor_data.fd_to_ef); @@ -227,13 +234,13 @@ static void raptor_data_fini(void) pthread_rwlock_destroy(&raptor_data.flows_lock); fqueue_destroy(raptor_data.fq); fset_destroy(raptor_data.np1_flows); - bmp_destroy(raptor_data.saps); + bmp_destroy(raptor_data.eids); free(raptor_data.fd_to_ef); free(raptor_data.ef_to_fd); } static int raptor_send_frame(struct shm_du_buff * sdb, - uint8_t dsap) + uint8_t deid) { uint8_t * frame; size_t frame_len; @@ -263,7 +270,7 @@ static int raptor_send_frame(struct shm_du_buff * sdb, frame[0] = (frame_len & 0x00FF) >> 0; frame[1] = (frame_len & 0xFF00) >> 8; - frame[2] = dsap; + frame[2] = deid; memcpy(&frame[RAPTOR_HEADER], payload, len); @@ -276,9 +283,9 @@ static int raptor_send_frame(struct shm_du_buff * sdb, return 0; } -static int raptor_sap_alloc(uint8_t ssap, +static int raptor_eid_alloc(uint8_t seid, const uint8_t * hash, - qoscube_t cube) + qosspec_t qs) { struct mgmt_msg * msg; struct shm_du_buff * sdb; @@ -288,14 +295,20 @@ static int raptor_sap_alloc(uint8_t ssap, return -1; } - msg = (struct mgmt_msg *) shm_du_buff_head(sdb); - msg->code = FLOW_REQ; - msg->ssap = ssap; - msg->qoscube = cube; + msg = (struct mgmt_msg *) shm_du_buff_head(sdb); + msg->code = FLOW_REQ; + msg->seid = seid; + msg->delay = hton32(qs.delay); + msg->bandwidth = hton64(qs.bandwidth); + msg->availability = qs.availability; + msg->loss = hton32(qs.loss); + msg->ber = hton32(qs.ber); + msg->in_order = qs.in_order; + msg->max_gap = hton32(qs.max_gap); memcpy(msg + 1, hash, ipcp_dir_hash_len()); - if (raptor_send_frame(sdb, MGMT_SAP)) { + if (raptor_send_frame(sdb, MGMT_EID)) { log_err("Failed to send management frame."); ipcp_sdb_release(sdb); return -1; @@ -306,25 +319,25 @@ static int raptor_sap_alloc(uint8_t ssap, return 0; } -static int raptor_sap_alloc_resp(uint8_t ssap, - uint8_t dsap, - int response) +static int raptor_eid_alloc_resp(uint8_t seid, + uint8_t deid, + int response) { - struct mgmt_msg * msg; + struct mgmt_msg * msg; struct shm_du_buff * sdb; if (ipcp_sdb_reserve(&sdb, sizeof(*msg)) < 0) { - log_err("failed to reserve sdb for management frame."); + log_err("Failed to reserve sdb for management frame."); return -1; } msg = (struct mgmt_msg *) shm_du_buff_head(sdb); msg->code = FLOW_REPLY; - msg->ssap = ssap; - msg->dsap = dsap; + msg->seid = seid; + msg->deid = deid; msg->response = response; - if (raptor_send_frame(sdb, MGMT_SAP)) { + if (raptor_send_frame(sdb, MGMT_EID)) { log_err("Failed to send management frame."); ipcp_sdb_release(sdb); return -1; @@ -335,9 +348,9 @@ static int raptor_sap_alloc_resp(uint8_t ssap, return 0; } -static int raptor_sap_req(uint8_t r_sap, +static int raptor_eid_req(uint8_t r_eid, const uint8_t * dst, - qoscube_t cube) + qosspec_t qs) { struct timespec ts = {0, EVENT_WAIT_TIMEOUT * 1000}; struct timespec abstime; @@ -361,7 +374,7 @@ static int raptor_sap_req(uint8_t r_sap, } /* reply to IRM, called under lock to prevent race */ - fd = ipcp_flow_req_arr(getpid(), dst, ipcp_dir_hash_len(), cube); + fd = ipcp_flow_req_arr(getpid(), dst, ipcp_dir_hash_len(), qs); if (fd < 0) { pthread_mutex_unlock(&ipcpi.alloc_lock); log_err("Could not get new flow from IRMd."); @@ -370,7 +383,7 @@ static int raptor_sap_req(uint8_t r_sap, pthread_rwlock_wrlock(&raptor_data.flows_lock); - raptor_data.fd_to_ef[fd].r_sap = r_sap; + raptor_data.fd_to_ef[fd].r_eid = r_eid; ipcpi.alloc_id = fd; pthread_cond_broadcast(&ipcpi.alloc_cond); @@ -378,13 +391,13 @@ static int raptor_sap_req(uint8_t r_sap, pthread_rwlock_unlock(&raptor_data.flows_lock); pthread_mutex_unlock(&ipcpi.alloc_lock); - log_dbg("New flow request, fd %d, remote SAP %d.", fd, r_sap); + log_dbg("New flow request, fd %d, remote EID %d.", fd, r_eid); return 0; } -static int raptor_sap_alloc_reply(uint8_t ssap, - int dsap, +static int raptor_eid_alloc_reply(uint8_t seid, + int deid, int response) { int ret = 0; @@ -392,21 +405,21 @@ static int raptor_sap_alloc_reply(uint8_t ssap, pthread_rwlock_wrlock(&raptor_data.flows_lock); - fd = raptor_data.ef_to_fd[dsap]; + fd = raptor_data.ef_to_fd[deid]; if (fd < 0) { pthread_rwlock_unlock(& raptor_data.flows_lock); - log_err("No flow found with that SAP."); + log_err("No flow found with that EID."); return -1; /* -EFLOWNOTFOUND */ } if (response) - bmp_release(raptor_data.saps, raptor_data.fd_to_ef[fd].sap); + bmp_release(raptor_data.eids, raptor_data.fd_to_ef[fd].eid); else - raptor_data.fd_to_ef[fd].r_sap = ssap; + raptor_data.fd_to_ef[fd].r_eid = seid; pthread_rwlock_unlock(&raptor_data.flows_lock); - log_dbg("Flow reply, fd %d, SSAP %d, DSAP %d.", fd, ssap, dsap); + log_dbg("Flow reply, fd %d, SEID %d, DEID %d.", fd, seid, deid); if ((ret = ipcp_flow_alloc_reply(fd, response)) < 0) return -1; @@ -424,16 +437,16 @@ static int raptor_name_query_req(const uint8_t * hash) return 0; if (ipcp_sdb_reserve(&sdb, sizeof(*msg) + ipcp_dir_hash_len()) < 0) { - log_err("failed to reserve sdb for management frame."); + log_err("Failed to reserve sdb for management frame."); return -1; } - msg = (struct mgmt_msg *) shm_du_buff_head(sdb); - msg->code = NAME_QUERY_REPLY; + msg = (struct mgmt_msg *) shm_du_buff_head(sdb); + msg->code = NAME_QUERY_REPLY; memcpy(msg + 1, hash, ipcp_dir_hash_len()); - if (raptor_send_frame(sdb, MGMT_SAP)) { + if (raptor_send_frame(sdb, MGMT_EID)) { log_err("Failed to send management frame."); ipcp_sdb_release(sdb); return -1; @@ -456,8 +469,9 @@ static int raptor_name_query_reply(const uint8_t * hash) static int raptor_mgmt_frame(const uint8_t * buf, size_t len) { - struct mgmt_msg * msg = (struct mgmt_msg *) buf; - uint8_t * hash = (uint8_t *) (msg + 1); + struct mgmt_msg * msg = (struct mgmt_msg *) buf; + uint8_t * hash = (uint8_t *) (msg + 1); + qosspec_t qs; switch (msg->code) { case FLOW_REQ: @@ -466,8 +480,16 @@ static int raptor_mgmt_frame(const uint8_t * buf, return -1; } + qs.delay = ntoh32(msg->delay); + qs.bandwidth = ntoh64(msg->bandwidth); + qs.availability = msg->availability; + qs.loss = ntoh32(msg->loss); + qs.ber = ntoh32(msg->ber); + qs.in_order = msg->in_order; + qs.max_gap = ntoh32(msg->max_gap); + if (shim_data_reg_has(raptor_data.shim_data, hash)) - raptor_sap_req(msg->ssap, hash, msg->qoscube); + raptor_eid_req(msg->seid, hash, qs); break; case FLOW_REPLY: if (len != sizeof(*msg)) { @@ -475,7 +497,7 @@ static int raptor_mgmt_frame(const uint8_t * buf, return -1; } - raptor_sap_alloc_reply(msg->ssap, msg->dsap, msg->response); + raptor_eid_alloc_reply(msg->seid, msg->deid, msg->response); break; case NAME_QUERY_REQ: if (len != sizeof(*msg) + ipcp_dir_hash_len()) { @@ -552,7 +574,7 @@ static void * raptor_mgmt_handler(void * o) static void raptor_recv_frame(uint8_t * frame) { - uint8_t dsap; + uint8_t deid; uint8_t * payload; size_t frame_len; size_t length; @@ -577,14 +599,14 @@ static void raptor_recv_frame(uint8_t * frame) return; } - dsap = frame[2]; + deid = frame[2]; payload = &frame[RAPTOR_HEADER]; length = frame_len - RAPTOR_HEADER; shm_du_buff_head_release(sdb, RAPTOR_HEADER); shm_du_buff_tail_release(sdb, RAPTOR_PAGE - frame_len); - if (dsap == MGMT_SAP) { + if (deid == MGMT_EID) { pthread_mutex_lock(&raptor_data.mgmt_lock); mgmt_frame = malloc(sizeof(*mgmt_frame)); @@ -604,7 +626,7 @@ static void raptor_recv_frame(uint8_t * frame) } else { pthread_rwlock_rdlock(&raptor_data.flows_lock); - fd = raptor_data.ef_to_fd[dsap]; + fd = raptor_data.ef_to_fd[deid]; if (fd < 0) { pthread_rwlock_unlock(&raptor_data.flows_lock); ipcp_sdb_release(sdb); @@ -647,7 +669,7 @@ static void * raptor_send_thread(void * o) struct timespec timeout = {0, EVENT_WAIT_TIMEOUT * 1000}; int fd; struct shm_du_buff * sdb; - uint8_t dsap; + uint8_t deid; (void) o; @@ -662,9 +684,9 @@ static void * raptor_send_thread(void * o) continue; } - dsap = raptor_data.fd_to_ef[fd].r_sap; + deid = raptor_data.fd_to_ef[fd].r_eid; - raptor_send_frame(sdb, dsap); + raptor_send_frame(sdb, deid); } pthread_rwlock_unlock(&raptor_data.flows_lock); } @@ -886,7 +908,7 @@ static int raptor_query(const uint8_t * hash) return -1; } - if (raptor_send_frame(sdb, MGMT_SAP)) { + if (raptor_send_frame(sdb, MGMT_EID)) { log_err("Failed to send management frame."); ipcp_sdb_release(sdb); return -1; @@ -901,19 +923,14 @@ static int raptor_query(const uint8_t * hash) static int raptor_flow_alloc(int fd, const uint8_t * hash, - qoscube_t cube) + qosspec_t qs) { - uint8_t ssap = 0; + uint8_t seid = 0; log_dbg("Allocating flow to " HASH_FMT ".", HASH_VAL(hash)); assert(hash); - if (cube != QOS_CUBE_BE) { - log_dbg("Unsupported QoS requested."); - return -1; - } - if (!shim_data_dir_has(raptor_data.shim_data, hash)) { log_err("Destination unreachable."); return -1; @@ -921,29 +938,29 @@ static int raptor_flow_alloc(int fd, pthread_rwlock_wrlock(&raptor_data.flows_lock); - ssap = bmp_allocate(raptor_data.saps); - if (!bmp_is_id_valid(raptor_data.saps, ssap)) { + seid = bmp_allocate(raptor_data.eids); + if (!bmp_is_id_valid(raptor_data.eids, seid)) { pthread_rwlock_unlock(&raptor_data.flows_lock); return -1; } - raptor_data.fd_to_ef[fd].sap = ssap; - raptor_data.ef_to_fd[ssap] = fd; + raptor_data.fd_to_ef[fd].eid = seid; + raptor_data.ef_to_fd[seid] = fd; pthread_rwlock_unlock(&raptor_data.flows_lock); - if (raptor_sap_alloc(ssap, hash, cube) < 0) { + if (raptor_eid_alloc(seid, hash, qs) < 0) { pthread_rwlock_wrlock(&raptor_data.flows_lock); - bmp_release(raptor_data.saps, raptor_data.fd_to_ef[fd].sap); - raptor_data.fd_to_ef[fd].sap = -1; - raptor_data.ef_to_fd[ssap] = -1; + bmp_release(raptor_data.eids, raptor_data.fd_to_ef[fd].eid); + raptor_data.fd_to_ef[fd].eid = -1; + raptor_data.ef_to_fd[seid] = -1; pthread_rwlock_unlock(&raptor_data.flows_lock); return -1; } fset_add(raptor_data.np1_flows, fd); - log_dbg("Pending flow with fd %d on SAP %d.", fd, ssap); + log_dbg("Pending flow with fd %d on EID %d.", fd, seid); return 0; } @@ -953,8 +970,8 @@ static int raptor_flow_alloc_resp(int fd, { struct timespec ts = {0, EVENT_WAIT_TIMEOUT * 1000}; struct timespec abstime; - uint8_t ssap = 0; - uint8_t r_sap = 0; + uint8_t seid = 0; + uint8_t r_eid = 0; clock_gettime(PTHREAD_COND_CLOCK, &abstime); @@ -979,35 +996,35 @@ static int raptor_flow_alloc_resp(int fd, pthread_rwlock_wrlock(&raptor_data.flows_lock); - ssap = bmp_allocate(raptor_data.saps); - if (!bmp_is_id_valid(raptor_data.saps, ssap)) { + seid = bmp_allocate(raptor_data.eids); + if (!bmp_is_id_valid(raptor_data.eids, seid)) { pthread_rwlock_unlock(&raptor_data.flows_lock); return -1; } - raptor_data.fd_to_ef[fd].sap = ssap; - r_sap = raptor_data.fd_to_ef[fd].r_sap; - raptor_data.ef_to_fd[ssap] = fd; + raptor_data.fd_to_ef[fd].eid = seid; + r_eid = raptor_data.fd_to_ef[fd].r_eid; + raptor_data.ef_to_fd[seid] = fd; pthread_rwlock_unlock(&raptor_data.flows_lock); - if (raptor_sap_alloc_resp(ssap, r_sap, response) < 0) { + if (raptor_eid_alloc_resp(seid, r_eid, response) < 0) { pthread_rwlock_wrlock(&raptor_data.flows_lock); - bmp_release(raptor_data.saps, raptor_data.fd_to_ef[fd].sap); + bmp_release(raptor_data.eids, raptor_data.fd_to_ef[fd].eid); pthread_rwlock_unlock(&raptor_data.flows_lock); return -1; } fset_add(raptor_data.np1_flows, fd); - log_dbg("Accepted flow, fd %d, SAP %d.", fd, (uint8_t)ssap); + log_dbg("Accepted flow, fd %d, EID %d.", fd, (uint8_t)seid); return 0; } static int raptor_flow_dealloc(int fd) { - uint8_t sap; + uint8_t eid; ipcp_flow_fini(fd); @@ -1015,12 +1032,12 @@ static int raptor_flow_dealloc(int fd) fset_del(raptor_data.np1_flows, fd); - sap = raptor_data.fd_to_ef[fd].sap; - bmp_release(raptor_data.saps, sap); - raptor_data.fd_to_ef[fd].sap = -1; - raptor_data.fd_to_ef[fd].r_sap = -1; + eid = raptor_data.fd_to_ef[fd].eid; + bmp_release(raptor_data.eids, eid); + raptor_data.fd_to_ef[fd].eid = -1; + raptor_data.fd_to_ef[fd].r_eid = -1; - raptor_data.ef_to_fd[sap] = -1; + raptor_data.ef_to_fd[eid] = -1; pthread_rwlock_unlock(&raptor_data.flows_lock); diff --git a/src/ipcpd/shim-data.c b/src/ipcpd/shim-data.c index 27b98171..6c28c79f 100644 --- a/src/ipcpd/shim-data.c +++ b/src/ipcpd/shim-data.c @@ -20,7 +20,11 @@ * Foundation, Inc., http://www.fsf.org/about/contact/. */ +#if defined(__linux__) || defined(__CYGWIN__) +#define _DEFAULT_SOURCE +#else #define _POSIX_C_SOURCE 200112L +#endif #include "config.h" diff --git a/src/ipcpd/shim-data.h b/src/ipcpd/shim-data.h index 336ffa35..7a8c01c4 100644 --- a/src/ipcpd/shim-data.h +++ b/src/ipcpd/shim-data.h @@ -23,7 +23,6 @@ #ifndef OUROBOROS_IPCPD_IPCP_DATA_H #define OUROBOROS_IPCPD_IPCP_DATA_H -#include <ouroboros/qoscube.h> #include <ouroboros/list.h> #include <sys/types.h> diff --git a/src/ipcpd/udp/main.c b/src/ipcpd/udp/main.c index 2e59e1a5..a1af1e85 100644 --- a/src/ipcpd/udp/main.c +++ b/src/ipcpd/udp/main.c @@ -20,7 +20,11 @@ * Foundation, Inc., http://www.fsf.org/about/contact/. */ +#if defined(__linux__) || defined(__CYGWIN__) +#define _DEFAULT_SOURCE +#else #define _POSIX_C_SOURCE 200112L +#endif #include "config.h" @@ -50,27 +54,34 @@ #include <sys/wait.h> #include <fcntl.h> -#define FLOW_REQ 1 -#define FLOW_REPLY 2 +#define FLOW_REQ 1 +#define FLOW_REPLY 2 -#define THIS_TYPE IPCP_UDP -#define LISTEN_PORT htons(0x0D1F) -#define SHIM_UDP_BUF_SIZE 256 -#define SHIM_UDP_MSG_SIZE 256 -#define SHIM_UDP_MAX_SDU_SIZE 8980 -#define DNS_TTL 86400 -#define FD_UPDATE_TIMEOUT 100 /* microseconds */ +#define THIS_TYPE IPCP_UDP +#define LISTEN_PORT htons(0x0D1F) +#define SHIM_UDP_BUF_SIZE 256 +#define SHIM_UDP_MSG_SIZE 256 +#define SHIM_UDP_MAX_PACKET_SIZE 8980 +#define DNS_TTL 86400 +#define FD_UPDATE_TIMEOUT 100 /* microseconds */ -#define local_ip (udp_data.s_saddr.sin_addr.s_addr) +#define local_ip (udp_data.s_saddr.sin_addr.s_addr) -#define UDP_MAX_PORTS 0xFFFF +#define UDP_MAX_PORTS 0xFFFF struct mgmt_msg { uint16_t src_udp_port; uint16_t dst_udp_port; uint8_t code; - uint8_t qoscube; uint8_t response; + /* QoS parameters from spec, aligned */ + uint8_t availability; + uint8_t in_order; + uint32_t delay; + uint64_t bandwidth; + uint32_t loss; + uint32_t ber; + uint32_t max_gap; } __attribute__((packed)); struct uf { @@ -95,9 +106,9 @@ struct { struct uf fd_to_uf[SYS_MAX_FLOWS]; pthread_rwlock_t flows_lock; - pthread_t sduloop; + pthread_t packet_loop; pthread_t handler; - pthread_t sdu_reader; + pthread_t packet_reader; bool fd_set_mod; pthread_cond_t fd_set_cond; @@ -108,6 +119,15 @@ static int udp_data_init(void) { int i; + if (pthread_rwlock_init(&udp_data.flows_lock, NULL)) + return -1; + + if (pthread_cond_init(&udp_data.fd_set_cond, NULL)) + goto fail_set_cond; + + if (pthread_mutex_init(&udp_data.fd_set_lock, NULL)) + goto fail_set_lock; + for (i = 0; i < FD_SETSIZE; ++i) udp_data.uf_to_fd[i] = -1; @@ -120,26 +140,28 @@ static int udp_data_init(void) udp_data.np1_flows = fset_create(); if (udp_data.np1_flows == NULL) - return -ENOMEM; + goto fail_fset; udp_data.fq = fqueue_create(); - if (udp_data.fq == NULL) { - fset_destroy(udp_data.np1_flows); - return -ENOMEM; - } + if (udp_data.fq == NULL) + goto fail_fqueue; udp_data.shim_data = shim_data_create(); - if (udp_data.shim_data == NULL) { - fqueue_destroy(udp_data.fq); - fset_destroy(udp_data.np1_flows); - return -ENOMEM; - } - - pthread_rwlock_init(&udp_data.flows_lock, NULL); - pthread_cond_init(&udp_data.fd_set_cond, NULL); - pthread_mutex_init(&udp_data.fd_set_lock, NULL); + if (udp_data.shim_data == NULL) + goto fail_data; return 0; + fail_data: + fqueue_destroy(udp_data.fq); + fail_fqueue: + fset_destroy(udp_data.np1_flows); + fail_fset: + pthread_mutex_destroy(&udp_data.fd_set_lock); + fail_set_lock: + pthread_cond_destroy(&udp_data.fd_set_cond); + fail_set_cond: + pthread_rwlock_destroy(&udp_data.flows_lock); + return -1; } static void udp_data_fini(void) @@ -204,7 +226,7 @@ static int send_shim_udp_msg(uint8_t * buf, static int ipcp_udp_port_alloc(uint32_t dst_ip_addr, uint16_t src_udp_port, const uint8_t * dst, - qoscube_t cube) + qosspec_t qs) { uint8_t * buf; struct mgmt_msg * msg; @@ -220,7 +242,13 @@ static int ipcp_udp_port_alloc(uint32_t dst_ip_addr, msg = (struct mgmt_msg *) buf; msg->code = FLOW_REQ; msg->src_udp_port = src_udp_port; - msg->qoscube = cube; + msg->delay = hton32(qs.delay); + msg->bandwidth = hton64(qs.bandwidth); + msg->availability = qs.availability; + msg->loss = hton32(qs.loss); + msg->ber = hton32(qs.ber); + msg->in_order = qs.in_order; + msg->max_gap = hton32(qs.max_gap); memcpy(msg + 1, dst, ipcp_dir_hash_len()); @@ -257,7 +285,7 @@ static int ipcp_udp_port_alloc_resp(uint32_t dst_ip_addr, static int ipcp_udp_port_req(struct sockaddr_in * c_saddr, const uint8_t * dst, - qoscube_t cube) + qosspec_t qs) { struct timespec ts = {0, FD_UPDATE_TIMEOUT * 1000}; struct timespec abstime; @@ -311,11 +339,12 @@ static int ipcp_udp_port_req(struct sockaddr_in * c_saddr, if (ipcp_get_state() != IPCP_OPERATIONAL) { log_dbg("Won't allocate over non-operational IPCP."); pthread_mutex_unlock(&ipcpi.alloc_lock); + close(skfd); return -1; } /* reply to IRM */ - fd = ipcp_flow_req_arr(getpid(), dst, ipcp_dir_hash_len(), cube); + fd = ipcp_flow_req_arr(getpid(), dst, ipcp_dir_hash_len(), qs); if (fd < 0) { pthread_mutex_unlock(&ipcpi.alloc_lock); log_err("Could not get new flow from IRMd."); @@ -371,6 +400,11 @@ static int ipcp_udp_port_alloc_reply(uint16_t src_udp_port, pthread_rwlock_rdlock(&udp_data.flows_lock); fd = udp_port_to_fd(dst_udp_port); + if (fd < 0) { + pthread_rwlock_unlock(&udp_data.flows_lock); + return -1; + } + skfd = udp_data.fd_to_uf[fd].skfd; pthread_rwlock_unlock(&udp_data.flows_lock); @@ -415,11 +449,11 @@ static void * ipcp_udp_listener(void * o) while (true) { struct mgmt_msg * msg = NULL; - + qosspec_t qs; memset(&buf, 0, SHIM_UDP_MSG_SIZE); - n = sizeof(c_saddr); n = recvfrom(sfd, buf, SHIM_UDP_MSG_SIZE, 0, - (struct sockaddr *) &c_saddr, (unsigned *) &n); + (struct sockaddr *) &c_saddr, + (socklen_t *) sizeof(c_saddr)); if (n < 0) continue; @@ -434,9 +468,16 @@ static void * ipcp_udp_listener(void * o) switch (msg->code) { case FLOW_REQ: c_saddr.sin_port = msg->src_udp_port; + qs.delay = ntoh32(msg->delay); + qs.bandwidth = ntoh64(msg->bandwidth); + qs.availability = msg->availability; + qs.loss = ntoh32(msg->loss); + qs.ber = ntoh32(msg->ber); + qs.in_order = msg->in_order; + qs.max_gap = ntoh32(msg->max_gap); ipcp_udp_port_req(&c_saddr, (uint8_t *) (msg + 1), - msg->qoscube); + qs); break; case FLOW_REPLY: ipcp_udp_port_alloc_reply(msg->src_udp_port, @@ -454,13 +495,13 @@ static void * ipcp_udp_listener(void * o) return 0; } -static void * ipcp_udp_sdu_reader(void * o) +static void * ipcp_udp_packet_reader(void * o) { ssize_t n; int skfd; int fd; /* FIXME: avoid this copy */ - char buf[SHIM_UDP_MAX_SDU_SIZE]; + char buf[SHIM_UDP_MAX_PACKET_SIZE]; struct sockaddr_in r_saddr; struct timeval tv = {0, FD_UPDATE_TIMEOUT}; fd_set read_fds; @@ -492,7 +533,7 @@ static void * ipcp_udp_sdu_reader(void * o) n = sizeof(r_saddr); if ((n = recvfrom(skfd, &buf, - SHIM_UDP_MAX_SDU_SIZE, + SHIM_UDP_MAX_PACKET_SIZE, 0, (struct sockaddr *) &r_saddr, (unsigned *) &n)) <= 0) @@ -511,7 +552,7 @@ static void * ipcp_udp_sdu_reader(void * o) return (void *) 0; } -static void * ipcp_udp_sdu_loop(void * o) +static void * ipcp_udp_packet_loop(void * o) { int fd; struct shm_du_buff * sdb; @@ -534,13 +575,14 @@ static void * ipcp_udp_sdu_loop(void * o) pthread_rwlock_unlock(&udp_data.flows_lock); - pthread_cleanup_push((void (*)(void *)) ipcp_sdb_release, + pthread_cleanup_push((void (*)(void *)) + ipcp_sdb_release, (void *) sdb); if (send(fd, shm_du_buff_head(sdb), shm_du_buff_tail(sdb) - shm_du_buff_head(sdb), 0) < 0) - log_err("Failed to send SDU."); + log_err("Failed to send PACKET."); pthread_cleanup_pop(true); } @@ -624,20 +666,20 @@ static int ipcp_udp_bootstrap(const struct ipcp_config * conf) goto fail_bind; } - if (pthread_create(&udp_data.sdu_reader, + if (pthread_create(&udp_data.packet_reader, NULL, - ipcp_udp_sdu_reader, + ipcp_udp_packet_reader, NULL)) { ipcp_set_state(IPCP_INIT); - goto fail_sdu_reader; + goto fail_packet_reader; } - if (pthread_create(&udp_data.sduloop, + if (pthread_create(&udp_data.packet_loop, NULL, - ipcp_udp_sdu_loop, + ipcp_udp_packet_loop, NULL)) { ipcp_set_state(IPCP_INIT); - goto fail_sduloop; + goto fail_packet_loop; } log_dbg("Bootstrapped IPCP over UDP with pid %d.", getpid()); @@ -646,10 +688,10 @@ static int ipcp_udp_bootstrap(const struct ipcp_config * conf) return 0; - fail_sduloop: - pthread_cancel(udp_data.sdu_reader); - pthread_join(udp_data.sdu_reader, NULL); - fail_sdu_reader: + fail_packet_loop: + pthread_cancel(udp_data.packet_reader); + pthread_join(udp_data.packet_reader, NULL); + fail_packet_reader: pthread_cancel(udp_data.handler); pthread_join(udp_data.handler, NULL); fail_bind: @@ -753,7 +795,8 @@ static uint32_t ddns_resolve(char * name, close(pipe_fd[0]); waitpid(pid, &wstatus, 0); - if (WIFEXITED(wstatus) && WEXITSTATUS(wstatus) == 0) + if (WIFEXITED(wstatus) && WEXITSTATUS(wstatus) == 0 && + count != SHIM_UDP_BUF_SIZE) log_dbg("Succesfully communicated with nslookup."); else log_err("Failed to resolve DNS address."); @@ -946,7 +989,7 @@ static int ipcp_udp_query(const uint8_t * hash) static int ipcp_udp_flow_alloc(int fd, const uint8_t * dst, - qoscube_t cube) + qosspec_t qs) { struct sockaddr_in r_saddr; /* server address */ struct sockaddr_in f_saddr; /* flow */ @@ -956,14 +999,13 @@ static int ipcp_udp_flow_alloc(int fd, log_dbg("Allocating flow to " HASH_FMT ".", HASH_VAL(dst)); - assert(dst); + (void) qs; - if (cube > QOS_CUBE_DATA) { - log_dbg("Unsupported QoS requested."); - return -1; - } + assert(dst); skfd = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP); + if (skfd < 0) + return -1; /* this socket is for the flow */ memset((char *) &f_saddr, 0, sizeof(f_saddr)); @@ -1010,7 +1052,7 @@ static int ipcp_udp_flow_alloc(int fd, pthread_rwlock_unlock(&udp_data.flows_lock); - if (ipcp_udp_port_alloc(ip_addr, f_saddr.sin_port, dst, cube) < 0) { + if (ipcp_udp_port_alloc(ip_addr, f_saddr.sin_port, dst, qs) < 0) { pthread_rwlock_wrlock(&udp_data.flows_lock); udp_data.fd_to_uf[fd].udp = -1; @@ -1180,13 +1222,13 @@ int main(int argc, ipcp_shutdown(); if (ipcp_get_state() == IPCP_SHUTDOWN) { - pthread_cancel(udp_data.sduloop); + pthread_cancel(udp_data.packet_loop); pthread_cancel(udp_data.handler); - pthread_cancel(udp_data.sdu_reader); + pthread_cancel(udp_data.packet_reader); - pthread_join(udp_data.sduloop, NULL); + pthread_join(udp_data.packet_loop, NULL); pthread_join(udp_data.handler, NULL); - pthread_join(udp_data.sdu_reader, NULL); + pthread_join(udp_data.packet_reader, NULL); } udp_data_fini(); diff --git a/src/irmd/ipcp.c b/src/irmd/ipcp.c index dc8f1c6e..20aee79f 100644 --- a/src/irmd/ipcp.c +++ b/src/irmd/ipcp.c @@ -429,28 +429,29 @@ int ipcp_query(pid_t pid, } int ipcp_flow_alloc(pid_t pid, - int port_id, + int flow_id, pid_t n_pid, const uint8_t * dst, size_t len, - qoscube_t cube) + qosspec_t qs) { - ipcp_msg_t msg = IPCP_MSG__INIT; - ipcp_msg_t * recv_msg = NULL; - int ret = -1; + ipcp_msg_t msg = IPCP_MSG__INIT; + qosspec_msg_t qs_msg; + ipcp_msg_t * recv_msg = NULL; + int ret = -1; assert(dst); msg.code = IPCP_MSG_CODE__IPCP_FLOW_ALLOC; - msg.has_port_id = true; - msg.port_id = port_id; + msg.has_flow_id = true; + msg.flow_id = flow_id; msg.has_pid = true; msg.pid = n_pid; msg.has_hash = true; msg.hash.len = len; msg.hash.data = (uint8_t *) dst; - msg.has_qoscube = true; - msg.qoscube = cube; + qs_msg = spec_to_msg(&qs); + msg.qosspec = &qs_msg; recv_msg = send_recv_ipcp_msg(pid, &msg); if (recv_msg == NULL) @@ -468,7 +469,7 @@ int ipcp_flow_alloc(pid_t pid, } int ipcp_flow_alloc_resp(pid_t pid, - int port_id, + int flow_id, pid_t n_pid, int response) { @@ -477,8 +478,8 @@ int ipcp_flow_alloc_resp(pid_t pid, int ret = -1; msg.code = IPCP_MSG_CODE__IPCP_FLOW_ALLOC_RESP; - msg.has_port_id = true; - msg.port_id = port_id; + msg.has_flow_id = true; + msg.flow_id = flow_id; msg.has_pid = true; msg.pid = n_pid; msg.has_response = true; @@ -500,15 +501,15 @@ int ipcp_flow_alloc_resp(pid_t pid, } int ipcp_flow_dealloc(pid_t pid, - int port_id) + int flow_id) { ipcp_msg_t msg = IPCP_MSG__INIT; ipcp_msg_t * recv_msg = NULL; int ret = -1; msg.code = IPCP_MSG_CODE__IPCP_FLOW_DEALLOC; - msg.has_port_id = true; - msg.port_id = port_id; + msg.has_flow_id = true; + msg.flow_id = flow_id; recv_msg = send_recv_ipcp_msg(pid, &msg); if (recv_msg == NULL) diff --git a/src/irmd/ipcp.h b/src/irmd/ipcp.h index 8ff062b2..8d9686c2 100644 --- a/src/irmd/ipcp.h +++ b/src/irmd/ipcp.h @@ -22,7 +22,6 @@ #include <ouroboros/ipcp.h> #include <ouroboros/sockets.h> -#include <ouroboros/qoscube.h> #include <sys/types.h> @@ -63,18 +62,18 @@ int ipcp_query(pid_t pid, size_t len); int ipcp_flow_alloc(pid_t pid, - int port_id, + int flow_id, pid_t n_pid, const uint8_t * dst, size_t len, - qoscube_t qos); + qosspec_t qs); int ipcp_flow_alloc_resp(pid_t pid, - int port_id, + int flow_id, pid_t n_pid, int response); int ipcp_flow_dealloc(pid_t pid, - int port_id); + int flow_id); #endif /* OUROBOROS_IRMD_IPCP_H */ diff --git a/src/irmd/irm_flow.c b/src/irmd/irm_flow.c index dfbe5e95..a0889f09 100644 --- a/src/irmd/irm_flow.c +++ b/src/irmd/irm_flow.c @@ -38,8 +38,8 @@ struct irm_flow * irm_flow_create(pid_t n_pid, pid_t n_1_pid, - int port_id, - qoscube_t qc) + int flow_id, + qosspec_t qs) { pthread_condattr_t cattr; struct irm_flow * f = malloc(sizeof(*f)); @@ -60,16 +60,16 @@ struct irm_flow * irm_flow_create(pid_t n_pid, f->n_pid = n_pid; f->n_1_pid = n_1_pid; - f->port_id = port_id; - f->qc = qc; + f->flow_id = flow_id; + f->qs = qs; - f->n_rb = shm_rbuff_create(n_pid, port_id); + f->n_rb = shm_rbuff_create(n_pid, flow_id); if (f->n_rb == NULL) { log_err("Could not create ringbuffer for process %d.", n_pid); goto fail_n_rbuff; } - f->n_1_rb = shm_rbuff_create(n_1_pid, port_id); + f->n_1_rb = shm_rbuff_create(n_1_pid, flow_id); if (f->n_1_rb == NULL) { log_err("Could not create ringbuffer for process %d.", n_1_pid); goto fail_n_1_rbuff; diff --git a/src/irmd/irm_flow.h b/src/irmd/irm_flow.h index d53984e8..26263107 100644 --- a/src/irmd/irm_flow.h +++ b/src/irmd/irm_flow.h @@ -24,8 +24,8 @@ #define OUROBOROS_IRMD_IRM_FLOW_H #include <ouroboros/list.h> +#include <ouroboros/qos.h> #include <ouroboros/shm_rbuff.h> -#include <ouroboros/qoscube.h> #include <sys/types.h> #include <pthread.h> @@ -42,12 +42,13 @@ enum flow_state { struct irm_flow { struct list_head next; - int port_id; - qoscube_t qc; + int flow_id; pid_t n_pid; pid_t n_1_pid; + qosspec_t qs; + struct shm_rbuff * n_rb; struct shm_rbuff * n_1_rb; @@ -60,8 +61,8 @@ struct irm_flow { struct irm_flow * irm_flow_create(pid_t n_pid, pid_t n_1_pid, - int port_id, - qoscube_t qc); + int flow_id, + qosspec_t qs); void irm_flow_destroy(struct irm_flow * f); diff --git a/src/irmd/main.c b/src/irmd/main.c index aeb43f0d..78fcf7b5 100644 --- a/src/irmd/main.c +++ b/src/irmd/main.c @@ -20,8 +20,11 @@ * Foundation, Inc., http://www.fsf.org/about/contact/. */ -#define _POSIX_C_SOURCE 200812L -#define __XSI_VISIBLE 500 +#if defined(__linux__) || defined(__CYGWIN__) +#define _DEFAULT_SOURCE +#else +#define _POSIX_C_SOURCE 200809L +#endif #include "config.h" @@ -115,12 +118,12 @@ struct { struct list_head spawned_pids; /* child processes */ pthread_rwlock_t reg_lock; /* lock for registration info */ - struct bmp * port_ids; /* port_ids for flows */ + struct bmp * flow_ids; /* flow_ids for flows */ struct list_head irm_flows; /* flow information */ pthread_rwlock_t flows_lock; /* lock for flows */ struct lockfile * lf; /* single irmd per system */ - struct shm_rdrbuff * rdrb; /* rdrbuff for SDUs */ + struct shm_rdrbuff * rdrb; /* rdrbuff for packets */ int sockfd; /* UNIX socket */ @@ -171,13 +174,13 @@ static void clear_irm_flow(struct irm_flow * f) { shm_rdrbuff_remove(irmd.rdrb, idx); } -static struct irm_flow * get_irm_flow(int port_id) +static struct irm_flow * get_irm_flow(int flow_id) { struct list_head * pos = NULL; list_for_each(pos, &irmd.irm_flows) { struct irm_flow * e = list_entry(pos, struct irm_flow, next); - if (e->port_id == port_id) + if (e->flow_id == flow_id) return e; } @@ -346,8 +349,10 @@ static struct ipcp_entry * get_ipcp_by_dst_name(const char * name, len = IPCP_HASH_LEN(e); hash = malloc(len); - if (hash == NULL) + if (hash == NULL) { + pthread_rwlock_unlock(&irmd.reg_lock); return NULL; + } str_hash(e->dir_hash_algo, hash, name); @@ -825,13 +830,13 @@ static int unbind_program(char * prog, if (name == NULL) prog_table_del(&irmd.prog_table, prog); else { - struct prog_entry * e = prog_table_get(&irmd.prog_table, prog); - prog_entry_del_name(e, name); - } + struct prog_entry * en = prog_table_get(&irmd.prog_table, prog); + prog_entry_del_name(en, name); - e = registry_get_entry(&irmd.registry, name); - if (e != NULL) - reg_entry_del_prog(e, prog); + e = registry_get_entry(&irmd.registry, name); + if (e != NULL) + reg_entry_del_prog(e, prog); + } pthread_rwlock_unlock(&irmd.reg_lock); @@ -853,13 +858,14 @@ static int unbind_process(pid_t pid, if (name == NULL) proc_table_del(&irmd.proc_table, pid); else { - struct proc_entry * e = proc_table_get(&irmd.proc_table, pid); - proc_entry_del_name(e, name); - } + struct proc_entry * en = proc_table_get(&irmd.proc_table, pid); + if (en != NULL) + proc_entry_del_name(en, name); - e = registry_get_entry(&irmd.registry, name); - if (e != NULL) - reg_entry_del_pid(e, pid); + e = registry_get_entry(&irmd.registry, name); + if (e != NULL) + reg_entry_del_pid(e, pid); + } pthread_rwlock_unlock(&irmd.reg_lock); @@ -919,6 +925,7 @@ static ssize_t list_ipcps(ipcp_info_msg_t *** ipcps, return 0; fail: + pthread_rwlock_unlock(&irmd.reg_lock); while (i >= 0) { free((*ipcps)[i]->layer); free((*ipcps)[i]->name); @@ -1145,7 +1152,7 @@ static int flow_accept(pid_t pid, pid_t pid_n1; pid_t pid_n; - int port_id; + int flow_id; int ret; pthread_rwlock_wrlock(&irmd.reg_lock); @@ -1177,10 +1184,8 @@ static int flow_accept(pid_t pid, if (ret == -1) return -EPIPE; - if (irmd_get_state() != IRMD_RUNNING) { - reg_entry_set_state(re, REG_NAME_NULL); + if (irmd_get_state() != IRMD_RUNNING) return -EIRMD; - } pthread_rwlock_rdlock(&irmd.flows_lock); @@ -1193,7 +1198,7 @@ static int flow_accept(pid_t pid, pid_n = f->n_pid; pid_n1 = f->n_1_pid; - port_id = f->port_id; + flow_id = f->flow_id; pthread_rwlock_unlock(&irmd.flows_lock); pthread_rwlock_rdlock(&irmd.reg_lock); @@ -1203,9 +1208,9 @@ static int flow_accept(pid_t pid, pthread_rwlock_unlock(&irmd.reg_lock); pthread_rwlock_wrlock(&irmd.flows_lock); list_del(&f->next); - bmp_release(irmd.port_ids, f->port_id); + bmp_release(irmd.flow_ids, f->flow_id); pthread_rwlock_unlock(&irmd.flows_lock); - ipcp_flow_alloc_resp(pid_n1, port_id, pid_n, -1); + ipcp_flow_alloc_resp(pid_n1, flow_id, pid_n, -1); clear_irm_flow(f); irm_flow_set_state(f, FLOW_NULL); irm_flow_destroy(f); @@ -1223,9 +1228,9 @@ static int flow_accept(pid_t pid, pthread_rwlock_unlock(&irmd.reg_lock); pthread_rwlock_wrlock(&irmd.flows_lock); list_del(&f->next); - bmp_release(irmd.port_ids, f->port_id); + bmp_release(irmd.flow_ids, f->flow_id); pthread_rwlock_unlock(&irmd.flows_lock); - ipcp_flow_alloc_resp(pid_n1, port_id, pid_n, -1); + ipcp_flow_alloc_resp(pid_n1, flow_id, pid_n, -1); clear_irm_flow(f); irm_flow_set_state(f, FLOW_NULL); irm_flow_destroy(f); @@ -1237,7 +1242,7 @@ static int flow_accept(pid_t pid, pthread_rwlock_unlock(&irmd.reg_lock); - if (ipcp_flow_alloc_resp(pid_n1, port_id, pid_n, 0)) { + if (ipcp_flow_alloc_resp(pid_n1, flow_id, pid_n, 0)) { pthread_rwlock_wrlock(&irmd.flows_lock); list_del(&f->next); pthread_rwlock_unlock(&irmd.flows_lock); @@ -1250,7 +1255,7 @@ static int flow_accept(pid_t pid, irm_flow_set_state(f, FLOW_ALLOCATED); - log_info("Flow on port_id %d allocated.", f->port_id); + log_info("Flow on flow_id %d allocated.", f->flow_id); *fl = f; @@ -1259,13 +1264,13 @@ static int flow_accept(pid_t pid, static int flow_alloc(pid_t pid, const char * dst, - qoscube_t cube, + qosspec_t qs, struct timespec * timeo, struct irm_flow ** e) { struct irm_flow * f; struct ipcp_entry * ipcp; - int port_id; + int flow_id; int state; uint8_t * hash; @@ -1276,18 +1281,18 @@ static int flow_alloc(pid_t pid, } pthread_rwlock_wrlock(&irmd.flows_lock); - port_id = bmp_allocate(irmd.port_ids); - if (!bmp_is_id_valid(irmd.port_ids, port_id)) { + flow_id = bmp_allocate(irmd.flow_ids); + if (!bmp_is_id_valid(irmd.flow_ids, flow_id)) { pthread_rwlock_unlock(&irmd.flows_lock); - log_err("Could not allocate port_id."); + log_err("Could not allocate flow_id."); return -EBADF; } - f = irm_flow_create(pid, ipcp->pid, port_id, cube); + f = irm_flow_create(pid, ipcp->pid, flow_id, qs); if (f == NULL) { - bmp_release(irmd.port_ids, port_id); + bmp_release(irmd.flow_ids, flow_id); pthread_rwlock_unlock(&irmd.flows_lock); - log_err("Could not allocate port_id."); + log_err("Could not allocate flow_id."); return -ENOMEM; } @@ -1304,8 +1309,8 @@ static int flow_alloc(pid_t pid, str_hash(ipcp->dir_hash_algo, hash, dst); - if (ipcp_flow_alloc(ipcp->pid, port_id, pid, hash, - IPCP_HASH_LEN(ipcp), cube)) { + if (ipcp_flow_alloc(ipcp->pid, flow_id, pid, hash, + IPCP_HASH_LEN(ipcp), qs)) { /* sanitizer cleans this */ log_info("Flow_allocation failed."); free(hash); @@ -1329,13 +1334,13 @@ static int flow_alloc(pid_t pid, *e = f; - log_info("Flow on port_id %d allocated.", port_id); + log_info("Flow on flow_id %d allocated.", flow_id); return 0; } static int flow_dealloc(pid_t pid, - int port_id) + int flow_id) { pid_t n_1_pid = -1; int ret = 0; @@ -1344,10 +1349,10 @@ static int flow_dealloc(pid_t pid, pthread_rwlock_wrlock(&irmd.flows_lock); - f = get_irm_flow(port_id); + f = get_irm_flow(flow_id); if (f == NULL) { pthread_rwlock_unlock(&irmd.flows_lock); - log_dbg("Deallocate unknown port %d by %d.", port_id, pid); + log_dbg("Deallocate unknown port %d by %d.", flow_id, pid); return 0; } @@ -1369,19 +1374,19 @@ static int flow_dealloc(pid_t pid, irm_flow_set_state(f, FLOW_NULL); clear_irm_flow(f); irm_flow_destroy(f); - bmp_release(irmd.port_ids, port_id); - log_info("Completed deallocation of port_id %d by process %d.", - port_id, pid); + bmp_release(irmd.flow_ids, flow_id); + log_info("Completed deallocation of flow_id %d by process %d.", + flow_id, pid); } else { irm_flow_set_state(f, FLOW_DEALLOC_PENDING); - log_dbg("Partial deallocation of port_id %d by process %d.", - port_id, pid); + log_dbg("Partial deallocation of flow_id %d by process %d.", + flow_id, pid); } pthread_rwlock_unlock(&irmd.flows_lock); if (n_1_pid != -1) - ret = ipcp_flow_dealloc(n_1_pid, port_id); + ret = ipcp_flow_dealloc(n_1_pid, flow_id); return ret; } @@ -1413,7 +1418,7 @@ static pid_t auto_execute(char ** argv) static struct irm_flow * flow_req_arr(pid_t pid, const uint8_t * hash, - qoscube_t cube) + qosspec_t qs) { struct reg_entry * re = NULL; struct prog_entry * a = NULL; @@ -1423,7 +1428,7 @@ static struct irm_flow * flow_req_arr(pid_t pid, struct pid_el * c_pid; struct ipcp_entry * ipcp; pid_t h_pid = -1; - int port_id = -1; + int flow_id = -1; struct timespec wt = {IRMD_REQ_ARR_TIMEOUT / 1000, (IRMD_REQ_ARR_TIMEOUT % 1000) * MILLION}; @@ -1510,17 +1515,17 @@ static struct irm_flow * flow_req_arr(pid_t pid, pthread_rwlock_unlock(&irmd.reg_lock); pthread_rwlock_wrlock(&irmd.flows_lock); - port_id = bmp_allocate(irmd.port_ids); - if (!bmp_is_id_valid(irmd.port_ids, port_id)) { + flow_id = bmp_allocate(irmd.flow_ids); + if (!bmp_is_id_valid(irmd.flow_ids, flow_id)) { pthread_rwlock_unlock(&irmd.flows_lock); return NULL; } - f = irm_flow_create(h_pid, pid, port_id, cube); + f = irm_flow_create(h_pid, pid, flow_id, qs); if (f == NULL) { - bmp_release(irmd.port_ids, port_id); + bmp_release(irmd.flow_ids, flow_id); pthread_rwlock_unlock(&irmd.flows_lock); - log_err("Could not allocate port_id."); + log_err("Could not allocate flow_id."); return NULL; } @@ -1536,7 +1541,7 @@ static struct irm_flow * flow_req_arr(pid_t pid, pthread_rwlock_unlock(&irmd.reg_lock); pthread_rwlock_wrlock(&irmd.flows_lock); clear_irm_flow(f); - bmp_release(irmd.port_ids, f->port_id); + bmp_release(irmd.flow_ids, f->flow_id); list_del(&f->next); pthread_rwlock_unlock(&irmd.flows_lock); log_err("Could not get process table entry for %d.", h_pid); @@ -1553,14 +1558,14 @@ static struct irm_flow * flow_req_arr(pid_t pid, return f; } -static int flow_alloc_reply(int port_id, +static int flow_alloc_reply(int flow_id, int response) { struct irm_flow * f; pthread_rwlock_rdlock(&irmd.flows_lock); - f = get_irm_flow(port_id); + f = get_irm_flow(flow_id); if (f == NULL) { pthread_rwlock_unlock(&irmd.flows_lock); return -1; @@ -1626,8 +1631,8 @@ static void irm_fini(void) pthread_rwlock_wrlock(&irmd.flows_lock); - if (irmd.port_ids != NULL) - bmp_destroy(irmd.port_ids); + if (irmd.flow_ids != NULL) + bmp_destroy(irmd.flow_ids); list_for_each_safe(p, h, &irmd.irm_flows) { struct irm_flow * f = list_entry(p, struct irm_flow, next); @@ -1754,14 +1759,14 @@ void * irm_sanitize(void * o) list_for_each_safe(p, h, &irmd.irm_flows) { int ipcpi; - int port_id; + int flow_id; struct irm_flow * f = list_entry(p, struct irm_flow, next); if (irm_flow_get_state(f) == FLOW_ALLOC_PENDING && ts_diff_ms(&f->t0, &now) > IRMD_FLOW_TIMEOUT) { - log_dbg("Pending port_id %d timed out.", - f->port_id); + log_dbg("Pending flow_id %d timed out.", + f->flow_id); f->n_pid = -1; irm_flow_set_state(f, FLOW_DEALLOC_PENDING); continue; @@ -1771,16 +1776,16 @@ void * irm_sanitize(void * o) struct shm_flow_set * set; log_dbg("Process %d gone, deallocating " "flow %d.", - f->n_pid, f->port_id); + f->n_pid, f->flow_id); set = shm_flow_set_open(f->n_pid); if (set != NULL) shm_flow_set_destroy(set); f->n_pid = -1; irm_flow_set_state(f, FLOW_DEALLOC_PENDING); ipcpi = f->n_1_pid; - port_id = f->port_id; + flow_id = f->flow_id; pthread_rwlock_unlock(&irmd.flows_lock); - ipcp_flow_dealloc(ipcpi, port_id); + ipcp_flow_dealloc(ipcpi, flow_id); pthread_rwlock_wrlock(&irmd.flows_lock); continue; } @@ -1788,7 +1793,7 @@ void * irm_sanitize(void * o) if (kill(f->n_1_pid, 0) < 0) { struct shm_flow_set * set; log_err("IPCP %d gone, flow %d removed.", - f->n_1_pid, f->port_id); + f->n_1_pid, f->flow_id); set = shm_flow_set_open(f->n_pid); if (set != NULL) shm_flow_set_destroy(set); @@ -1988,41 +1993,43 @@ static void * mainloop(void * o) case IRM_MSG_CODE__IRM_FLOW_ACCEPT: result = flow_accept(msg->pid, timeo, &e); if (result == 0) { - ret_msg->has_port_id = true; - ret_msg->port_id = e->port_id; + qosspec_msg_t qs_msg; + ret_msg->has_flow_id = true; + ret_msg->flow_id = e->flow_id; ret_msg->has_pid = true; ret_msg->pid = e->n_1_pid; - ret_msg->has_qoscube = true; - ret_msg->qoscube = e->qc; + qs_msg = spec_to_msg(&e->qs); + ret_msg->qosspec = &qs_msg; } break; case IRM_MSG_CODE__IRM_FLOW_ALLOC: result = flow_alloc(msg->pid, msg->dst, - msg->qoscube, timeo, &e); + msg_to_spec(msg->qosspec), + timeo, &e); if (result == 0) { - ret_msg->has_port_id = true; - ret_msg->port_id = e->port_id; + ret_msg->has_flow_id = true; + ret_msg->flow_id = e->flow_id; ret_msg->has_pid = true; ret_msg->pid = e->n_1_pid; } break; case IRM_MSG_CODE__IRM_FLOW_DEALLOC: - result = flow_dealloc(msg->pid, msg->port_id); + result = flow_dealloc(msg->pid, msg->flow_id); break; case IRM_MSG_CODE__IPCP_FLOW_REQ_ARR: e = flow_req_arr(msg->pid, msg->hash.data, - msg->qoscube); + msg_to_spec(msg->qosspec)); result = (e == NULL ? -1 : 0); if (result == 0) { - ret_msg->has_port_id = true; - ret_msg->port_id = e->port_id; + ret_msg->has_flow_id = true; + ret_msg->flow_id = e->flow_id; ret_msg->has_pid = true; ret_msg->pid = e->n_pid; } break; case IRM_MSG_CODE__IPCP_FLOW_ALLOC_REPLY: - result = flow_alloc_reply(msg->port_id, msg->response); + result = flow_alloc_reply(msg->flow_id, msg->response); break; default: log_err("Don't know that message code."); @@ -2056,6 +2063,8 @@ static void * mainloop(void * o) irm_msg__pack(ret_msg, buffer.data); + /* Can't free the qosspec. */ + ret_msg->qosspec = NULL; irm_msg__free_unpacked(ret_msg, NULL); pthread_cleanup_push(close_ptr, &sfd); @@ -2134,10 +2143,10 @@ static int irm_init(void) list_head_init(&irmd.irm_flows); list_head_init(&irmd.cmds); - irmd.port_ids = bmp_create(SYS_MAX_FLOWS, 0); - if (irmd.port_ids == NULL) { - log_err("Failed to create port_ids bitmap."); - goto fail_port_ids; + irmd.flow_ids = bmp_create(SYS_MAX_FLOWS, 0); + if (irmd.flow_ids == NULL) { + log_err("Failed to create flow_ids bitmap."); + goto fail_flow_ids; } if ((irmd.lf = lockfile_create()) == NULL) { @@ -2160,6 +2169,11 @@ static int irm_init(void) } } + if (irmd.lf == NULL) { + log_err("Failed to create lockfile."); + goto fail_lockfile; + } + if (stat(SOCK_PATH, &st) == -1) { if (mkdir(SOCK_PATH, 0777)) { log_err("Failed to create sockets directory."); @@ -2184,11 +2198,6 @@ static int irm_init(void) goto fail_sock_opt; } - if (irmd.lf == NULL) { - log_err("Failed to create lockfile."); - goto fail_sock_opt; - } - if ((irmd.rdrb = shm_rdrbuff_create()) == NULL) { log_err("Failed to create rdrbuff."); goto fail_rdrbuff; @@ -2207,7 +2216,7 @@ static int irm_init(void) gcry_control(GCRYCTL_INITIALIZATION_FINISHED); #endif - irmd.state = IRMD_RUNNING; + irmd_set_state(IRMD_RUNNING); log_info("Ouroboros IPC Resource Manager daemon started..."); @@ -2226,8 +2235,8 @@ static int irm_init(void) fail_stat: lockfile_destroy(irmd.lf); fail_lockfile: - bmp_destroy(irmd.port_ids); - fail_port_ids: + bmp_destroy(irmd.flow_ids); + fail_flow_ids: pthread_cond_destroy(&irmd.cmd_cond); fail_cmd_cond: pthread_mutex_destroy(&irmd.cmd_lock); diff --git a/src/irmd/proc_table.c b/src/irmd/proc_table.c index e8d08447..6f9d8e20 100644 --- a/src/irmd/proc_table.c +++ b/src/irmd/proc_table.c @@ -20,7 +20,11 @@ * Foundation, Inc., http://www.fsf.org/about/contact/. */ +#if defined(__linux__) || defined(__CYGWIN__) +#define _DEFAULT_SOURCE +#else #define _POSIX_C_SOURCE 200112L +#endif #include "config.h" @@ -168,8 +172,7 @@ void proc_entry_del_name(struct proc_entry * e, struct str_el * s = list_entry(p, struct str_el, next); if (!strcmp(name, s->str)) { list_del(&s->next); - if (s->str != NULL) - free(s->str); + free(s->str); free(s); } } diff --git a/src/irmd/prog_table.c b/src/irmd/prog_table.c index bd69e156..9aa9be9d 100644 --- a/src/irmd/prog_table.c +++ b/src/irmd/prog_table.c @@ -81,8 +81,7 @@ void prog_entry_destroy(struct prog_entry * e) list_for_each_safe(p, h, &e->names) { struct str_el * s = list_entry(p, struct str_el, next); list_del(&s->next); - if (s->str != NULL) - free(s->str); + free(s->str); free(s); } diff --git a/src/irmd/registry.c b/src/irmd/registry.c index 145a7452..6c86da24 100644 --- a/src/irmd/registry.c +++ b/src/irmd/registry.c @@ -20,7 +20,11 @@ * Foundation, Inc., http://www.fsf.org/about/contact/. */ +#if defined(__linux__) || defined(__CYGWIN__) +#define _DEFAULT_SOURCE +#else #define _POSIX_C_SOURCE 200809L +#endif #include "config.h" diff --git a/src/irmd/registry.h b/src/irmd/registry.h index 62d90c39..c9ea8cce 100644 --- a/src/irmd/registry.h +++ b/src/irmd/registry.h @@ -26,7 +26,6 @@ #include <ouroboros/hash.h> #include <ouroboros/ipcp.h> #include <ouroboros/list.h> -#include <ouroboros/qoscube.h> #include "proc_table.h" #include "prog_table.h" diff --git a/src/lib/CMakeLists.txt b/src/lib/CMakeLists.txt index 47e93d61..1c2007c3 100644 --- a/src/lib/CMakeLists.txt +++ b/src/lib/CMakeLists.txt @@ -6,6 +6,8 @@ include_directories(${CMAKE_BINARY_DIR}/include) protobuf_generate_c(IRM_PROTO_SRCS IRM_PROTO_HDRS irmd_messages.proto) protobuf_generate_c(IPCP_PROTO_SRCS IPCP_PROTO_HDRS ipcpd_messages.proto) +protobuf_generate_c(QOSSPEC_PROTO_SRCS QOSSPEC_PROTO_HDRS + qosspec.proto) protobuf_generate_c(LAYER_CONFIG_PROTO_SRCS LAYER_CONFIG_PROTO_HDRS ipcp_config.proto) protobuf_generate_c(CACEP_PROTO_SRCS CACEP_PROTO_HDRS cacep.proto) @@ -136,7 +138,7 @@ mark_as_advanced(LIBRT_LIBRARIES LIBPTHREAD_LIBRARIES LIBGCRYPT_INCLUDE_DIR SYS_RND_HDR) set(SHM_BUFFER_SIZE 4096 CACHE STRING - "Number of blocks in SDU buffer, must be a power of 2") + "Number of blocks in packet buffer, must be a power of 2") set(SYS_MAX_FLOWS 10240 CACHE STRING "Maximum number of total flows for this system") set(PROG_MAX_FLOWS 4096 CACHE STRING @@ -169,11 +171,13 @@ set(SHM_FLOW_SET_PREFIX "/${SHM_PREFIX}.set." CACHE INTERNAL set(SHM_RDRB_NAME "/${SHM_PREFIX}.rdrb" CACHE INTERNAL "Name for the main POSIX shared memory buffer") set(SHM_RDRB_BLOCK_SIZE "sysconf(_SC_PAGESIZE)" CACHE STRING - "SDU buffer block size, multiple of pagesize for performance") + "Packet buffer block size, multiple of pagesize for performance") set(SHM_RDRB_MULTI_BLOCK true CACHE BOOL - "SDU buffer multiblock SDU support") + "Packet buffer multiblock packet support") set(SHM_RBUFF_LOCKLESS 0 CACHE BOOL "Enable shared memory lockless rbuff support") +set(QOS_DISABLE_CRC 0 CACHE BOOL + "Ignores ber setting on all QoS cubes") set(SOURCE_FILES_DEV # Add source files here @@ -213,7 +217,7 @@ configure_file("${CMAKE_CURRENT_SOURCE_DIR}/config.h.in" "${CMAKE_CURRENT_BINARY_DIR}/config.h" @ONLY) add_library(ouroboros-common SHARED ${SOURCE_FILES_COMMON} ${IRM_PROTO_SRCS} - ${IPCP_PROTO_SRCS} ${LAYER_CONFIG_PROTO_SRCS}) + ${IPCP_PROTO_SRCS} ${LAYER_CONFIG_PROTO_SRCS} ${QOSSPEC_PROTO_SRCS}) add_library(ouroboros-dev SHARED ${SOURCE_FILES_DEV} ${CACEP_PROTO_SRCS}) diff --git a/src/lib/cacep.c b/src/lib/cacep.c index 6efb7295..12751078 100644 --- a/src/lib/cacep.c +++ b/src/lib/cacep.c @@ -32,7 +32,7 @@ #include "cacep.pb-c.h" typedef CacepMsg cacep_msg_t; -#define BUF_SIZE 64 +#define BUF_SIZE 128 static int read_msg(int fd, struct conn_info * info) @@ -49,6 +49,11 @@ static int read_msg(int fd, if (msg == NULL) return -1; + if (strlen(msg->comp_name) > CACEP_BUF_STRLEN) { + cacep_msg__free_unpacked(msg, NULL); + return -1; + } + strcpy(info->comp_name, msg->comp_name); strcpy(info->protocol, msg->protocol); diff --git a/src/lib/config.h.in b/src/lib/config.h.in index 69e7f4b0..e8cfeba3 100644 --- a/src/lib/config.h.in +++ b/src/lib/config.h.in @@ -28,6 +28,7 @@ #cmakedefine SHM_RBUFF_LOCKLESS #cmakedefine SHM_RDRB_MULTI_BLOCK +#cmakedefine QOS_DISABLE_CRC #define SHM_RBUFF_PREFIX "@SHM_RBUFF_PREFIX@" #define SHM_LOCKFILE_NAME "@SHM_LOCKFILE_NAME@" diff --git a/src/lib/dev.c b/src/lib/dev.c index e69fec26..2a5c3f83 100644 --- a/src/lib/dev.c +++ b/src/lib/dev.c @@ -20,9 +20,14 @@ * Foundation, Inc., http://www.fsf.org/about/contact/. */ +#if defined(__linux__) || defined(__CYGWIN__) +#define _DEFAULT_SOURCE +#else +#define _POSIX_C_SOURCE 200809L +#endif + #include <ouroboros/endian.h> -#define _POSIX_C_SOURCE 200809L #include "config.h" #include <ouroboros/hash.h> @@ -39,7 +44,6 @@ #include <ouroboros/shm_rbuff.h> #include <ouroboros/utils.h> #include <ouroboros/fqueue.h> -#include <ouroboros/qoscube.h> #include <stdlib.h> #include <string.h> @@ -56,6 +60,8 @@ #define NO_PART -1 #define DONE_PART -2 +#define CRCLEN (sizeof(uint32_t)) + struct flow_set { size_t idx; }; @@ -89,9 +95,8 @@ struct flow { struct shm_rbuff * rx_rb; struct shm_rbuff * tx_rb; struct shm_flow_set * set; - int port_id; + int flow_id; int oflags; - qoscube_t cube; qosspec_t spec; ssize_t part_idx; @@ -166,12 +171,12 @@ static void port_set_state(struct port * p, pthread_mutex_unlock(&p->state_lock); } -static enum port_state port_wait_assign(int port_id) +static enum port_state port_wait_assign(int flow_id) { enum port_state state; struct port * p; - p = &ai.ports[port_id]; + p = &ai.ports[flow_id]; pthread_mutex_lock(&p->state_lock); @@ -230,17 +235,16 @@ static void flow_clear(int fd) { memset(&ai.flows[fd], 0, sizeof(ai.flows[fd])); - ai.flows[fd].port_id = -1; + ai.flows[fd].flow_id = -1; ai.flows[fd].pid = -1; - ai.flows[fd].cube = QOS_CUBE_BE; } static void flow_fini(int fd) { assert(fd >= 0 && fd < SYS_MAX_FLOWS); - if (ai.flows[fd].port_id != -1) { - port_destroy(&ai.ports[ai.flows[fd].port_id]); + if (ai.flows[fd].flow_id != -1) { + port_destroy(&ai.ports[ai.flows[fd].flow_id]); bmp_release(ai.fds, fd); } @@ -256,7 +260,7 @@ static void flow_fini(int fd) if (ai.flows[fd].set != NULL) { shm_flow_set_notify(ai.flows[fd].set, - ai.flows[fd].port_id, + ai.flows[fd].flow_id, FLOW_DEALLOC); shm_flow_set_close(ai.flows[fd].set); } @@ -267,9 +271,9 @@ static void flow_fini(int fd) flow_clear(fd); } -static int flow_init(int port_id, +static int flow_init(int flow_id, pid_t pid, - qoscube_t qc) + qosspec_t qs) { int fd; int err = -ENOMEM; @@ -282,11 +286,11 @@ static int flow_init(int port_id, goto fail_fds; } - ai.flows[fd].rx_rb = shm_rbuff_open(ai.pid, port_id); + ai.flows[fd].rx_rb = shm_rbuff_open(ai.pid, flow_id); if (ai.flows[fd].rx_rb == NULL) goto fail; - ai.flows[fd].tx_rb = shm_rbuff_open(pid, port_id); + ai.flows[fd].tx_rb = shm_rbuff_open(pid, flow_id); if (ai.flows[fd].tx_rb == NULL) goto fail; @@ -294,16 +298,15 @@ static int flow_init(int port_id, if (ai.flows[fd].set == NULL) goto fail; - ai.flows[fd].port_id = port_id; + ai.flows[fd].flow_id = flow_id; ai.flows[fd].oflags = FLOWFDEFAULT; ai.flows[fd].pid = pid; - ai.flows[fd].cube = qc; - ai.flows[fd].spec = qos_cube_to_spec(qc); ai.flows[fd].part_idx = NO_PART; + ai.flows[fd].spec = qs; - ai.ports[port_id].fd = fd; + ai.ports[flow_id].fd = fd; - port_set_state(&ai.ports[port_id], PORT_ID_ASSIGNED); + port_set_state(&ai.ports[flow_id], PORT_ID_ASSIGNED); pthread_rwlock_unlock(&ai.lock); @@ -447,7 +450,7 @@ static void fini(void) pthread_rwlock_wrlock(&ai.lock); for (i = 0; i < PROG_MAX_FLOWS; ++i) { - if (ai.flows[i].port_id != -1) { + if (ai.flows[i].flow_id != -1) { ssize_t idx; shm_rbuff_set_acl(ai.flows[i].rx_rb, ACL_FLOWDOWN); while ((idx = shm_rbuff_read(ai.flows[i].rx_rb)) >= 0) @@ -493,7 +496,6 @@ int flow_accept(qosspec_t * qs, irm_msg_t msg = IRM_MSG__INIT; irm_msg_t * recv_msg; int fd; - qoscube_t qc; msg.code = IRM_MSG_CODE__IRM_FLOW_ACCEPT; msg.has_pid = true; @@ -521,15 +523,14 @@ int flow_accept(qosspec_t * qs, return res; } - if (!recv_msg->has_pid || !recv_msg->has_port_id || - !recv_msg->has_qoscube) { + if (!recv_msg->has_pid || !recv_msg->has_flow_id || + recv_msg->qosspec == NULL) { irm_msg__free_unpacked(recv_msg, NULL); return -EIRMD; } - qc = recv_msg->qoscube; - - fd = flow_init(recv_msg->port_id, recv_msg->pid, recv_msg->qoscube); + fd = flow_init(recv_msg->flow_id, recv_msg->pid, + msg_to_spec(recv_msg->qosspec)); irm_msg__free_unpacked(recv_msg, NULL); @@ -538,12 +539,10 @@ int flow_accept(qosspec_t * qs, pthread_rwlock_wrlock(&ai.lock); - /* FIXME: check if FRCT is needed based on qc? */ - assert(ai.flows[fd].frcti == NULL); - if (qc != QOS_CUBE_RAW) { - ai.flows[fd].frcti = frcti_create(fd, qc); + if (ai.flows[fd].spec.in_order != 0) { + ai.flows[fd].frcti = frcti_create(fd); if (ai.flows[fd].frcti == NULL) { flow_fini(fd); pthread_rwlock_unlock(&ai.lock); @@ -563,21 +562,21 @@ int flow_alloc(const char * dst, qosspec_t * qs, const struct timespec * timeo) { - irm_msg_t msg = IRM_MSG__INIT; - irm_msg_t * recv_msg; - qoscube_t qc = QOS_CUBE_RAW; - int fd; - - msg.code = IRM_MSG_CODE__IRM_FLOW_ALLOC; - msg.dst = (char *) dst; - msg.has_pid = true; - msg.has_qoscube = true; - msg.pid = ai.pid; + irm_msg_t msg = IRM_MSG__INIT; + qosspec_msg_t qs_msg = QOSSPEC_MSG__INIT; + irm_msg_t * recv_msg; + int fd; +#ifdef QOS_DISABLE_CRC if (qs != NULL) - qc = qos_spec_to_cube(*qs); - - msg.qoscube = qc; + qs->ber = 1; +#endif + msg.code = IRM_MSG_CODE__IRM_FLOW_ALLOC; + msg.dst = (char *) dst; + msg.has_pid = true; + msg.pid = ai.pid; + qs_msg = spec_to_msg(qs); + msg.qosspec = &qs_msg; if (timeo != NULL) { msg.has_timeo_sec = true; @@ -601,12 +600,13 @@ int flow_alloc(const char * dst, return res; } - if (!recv_msg->has_pid || !recv_msg->has_port_id) { + if (!recv_msg->has_pid || !recv_msg->has_flow_id) { irm_msg__free_unpacked(recv_msg, NULL); return -EIRMD; } - fd = flow_init(recv_msg->port_id, recv_msg->pid, qc); + fd = flow_init(recv_msg->flow_id, recv_msg->pid, + qs == NULL ? qos_raw : *qs); irm_msg__free_unpacked(recv_msg, NULL); @@ -617,8 +617,8 @@ int flow_alloc(const char * dst, assert(ai.flows[fd].frcti == NULL); - if (qc != QOS_CUBE_RAW) { - ai.flows[fd].frcti = frcti_create(fd, qc); + if (ai.flows[fd].spec.in_order != 0) { + ai.flows[fd].frcti = frcti_create(fd); if (ai.flows[fd].frcti == NULL) { flow_fini(fd); pthread_rwlock_unlock(&ai.lock); @@ -640,15 +640,15 @@ int flow_dealloc(int fd) return -EINVAL; msg.code = IRM_MSG_CODE__IRM_FLOW_DEALLOC; - msg.has_port_id = true; + msg.has_flow_id = true; msg.has_pid = true; msg.pid = ai.pid; pthread_rwlock_rdlock(&ai.lock); - assert(ai.flows[fd].port_id >= 0); + assert(ai.flows[fd].flow_id >= 0); - msg.port_id = ai.flows[fd].port_id; + msg.flow_id = ai.flows[fd].flow_id; pthread_rwlock_unlock(&ai.lock); @@ -676,7 +676,6 @@ int fccntl(int fd, int cmd, ...) { - uint16_t sflags; uint32_t * fflags; uint16_t * cflags; va_list l; @@ -696,7 +695,7 @@ int fccntl(int fd, pthread_rwlock_wrlock(&ai.lock); - if (flow->port_id < 0) { + if (flow->flow_id < 0) { pthread_rwlock_unlock(&ai.lock); va_end(l); return -ENOTALLOC; @@ -768,13 +767,13 @@ int fccntl(int fd, rx_acl |= ACL_FLOWDOWN; tx_acl |= ACL_FLOWDOWN; shm_flow_set_notify(flow->set, - flow->port_id, + flow->flow_id, FLOW_DOWN); } else { rx_acl &= ~ACL_FLOWDOWN; tx_acl &= ~ACL_FLOWDOWN; shm_flow_set_notify(flow->set, - flow->port_id, + flow->flow_id, FLOW_UP); } @@ -788,11 +787,6 @@ int fccntl(int fd, goto einval; *fflags = flow->oflags; break; - case FRCTSFLAGS: - sflags = (uint16_t) va_arg(l, int); - if (flow->frcti == NULL || frcti_setconf(flow->frcti, sflags)) - goto eperm; - break; case FRCTGFLAGS: cflags = (uint16_t *) va_arg(l, int *); if (cflags == NULL) @@ -824,16 +818,40 @@ int fccntl(int fd, return -EPERM; } +static int chk_crc(struct shm_du_buff * sdb) +{ + uint32_t crc; + uint8_t * head = shm_du_buff_head(sdb); + uint8_t * tail = shm_du_buff_tail_release(sdb, CRCLEN); + + mem_hash(HASH_CRC32, &crc, head, tail - head); + + return !(crc == *((uint32_t *) tail)); +} + +static int add_crc(struct shm_du_buff * sdb) +{ + uint8_t * head = shm_du_buff_head(sdb); + uint8_t * tail = shm_du_buff_tail_alloc(sdb, CRCLEN); + if (tail == NULL) + return -1; + + mem_hash(HASH_CRC32, tail, head, tail - head); + + return 0; +} + ssize_t flow_write(int fd, const void * buf, size_t count) { - struct flow * flow; - ssize_t idx; - int ret; - int flags; - struct timespec abs; - struct timespec * abstime = NULL; + struct flow * flow; + ssize_t idx; + int ret; + int flags; + struct timespec abs; + struct timespec * abstime = NULL; + struct shm_du_buff * sdb; if (buf == NULL) return 0; @@ -847,7 +865,7 @@ ssize_t flow_write(int fd, pthread_rwlock_rdlock(&ai.lock); - if (flow->port_id < 0) { + if (flow->flow_id < 0) { pthread_rwlock_unlock(&ai.lock); return -ENOTALLOC; } @@ -880,18 +898,25 @@ ssize_t flow_write(int fd, if (idx < 0) return idx; - if (frcti_snd(flow->frcti, shm_rdrbuff_get(ai.rdrb, idx)) < 0) { + sdb = shm_rdrbuff_get(ai.rdrb, idx); + + if (frcti_snd(flow->frcti, sdb) < 0) { + shm_rdrbuff_remove(ai.rdrb, idx); + return -ENOMEM; + } + + if (flow->spec.ber == 0 && add_crc(sdb) != 0) { shm_rdrbuff_remove(ai.rdrb, idx); return -ENOMEM; } pthread_rwlock_rdlock(&ai.lock); - ret = shm_rbuff_write(ai.flows[fd].tx_rb, idx); + ret = shm_rbuff_write(flow->tx_rb, idx); if (ret < 0) shm_rdrbuff_remove(ai.rdrb, idx); else - shm_flow_set_notify(flow->set, flow->port_id, FLOW_PKT); + shm_flow_set_notify(flow->set, flow->flow_id, FLOW_PKT); pthread_rwlock_unlock(&ai.lock); @@ -906,7 +931,7 @@ ssize_t flow_read(int fd, { ssize_t idx; ssize_t n; - uint8_t * sdu; + uint8_t * packet; struct shm_rbuff * rb; struct shm_du_buff * sdb; struct timespec abs; @@ -929,7 +954,7 @@ ssize_t flow_read(int fd, pthread_rwlock_rdlock(&ai.lock); - if (flow->port_id < 0) { + if (flow->flow_id < 0) { pthread_rwlock_unlock(&ai.lock); return -ENOTALLOC; } @@ -955,23 +980,25 @@ ssize_t flow_read(int fd, if (idx < 0) return idx; sdb = shm_rdrbuff_get(ai.rdrb, idx); + if (flow->spec.ber == 0 && chk_crc(sdb) != 0) + continue; } while (frcti_rcv(flow->frcti, sdb) != 0); } } - n = shm_rdrbuff_read(&sdu, ai.rdrb, idx); + n = shm_rdrbuff_read(&packet, ai.rdrb, idx); assert(n >= 0); if (n <= (ssize_t) count) { - memcpy(buf, sdu, n); + memcpy(buf, packet, n); shm_rdrbuff_remove(ai.rdrb, idx); flow->part_idx = (partrd && n == (ssize_t) count) ? DONE_PART : NO_PART; return n; } else { if (partrd) { - memcpy(buf, sdu, count); + memcpy(buf, packet, count); sdb = shm_rdrbuff_get(ai.rdrb, idx); shm_du_buff_head_release(sdb, n); flow->part_idx = idx; @@ -1053,7 +1080,7 @@ int fset_add(struct flow_set * set, int fd) { int ret; - size_t sdus; + size_t packets; size_t i; if (set == NULL || fd < 0 || fd > SYS_MAX_FLOWS) @@ -1061,11 +1088,11 @@ int fset_add(struct flow_set * set, pthread_rwlock_wrlock(&ai.lock); - ret = shm_flow_set_add(ai.fqset, set->idx, ai.flows[fd].port_id); + ret = shm_flow_set_add(ai.fqset, set->idx, ai.flows[fd].flow_id); - sdus = shm_rbuff_queued(ai.flows[fd].rx_rb); - for (i = 0; i < sdus; i++) - shm_flow_set_notify(ai.fqset, ai.flows[fd].port_id, FLOW_PKT); + packets = shm_rbuff_queued(ai.flows[fd].rx_rb); + for (i = 0; i < packets; i++) + shm_flow_set_notify(ai.fqset, ai.flows[fd].flow_id, FLOW_PKT); pthread_rwlock_unlock(&ai.lock); @@ -1080,8 +1107,8 @@ void fset_del(struct flow_set * set, pthread_rwlock_wrlock(&ai.lock); - if (ai.flows[fd].port_id >= 0) - shm_flow_set_del(ai.fqset, set->idx, ai.flows[fd].port_id); + if (ai.flows[fd].flow_id >= 0) + shm_flow_set_del(ai.fqset, set->idx, ai.flows[fd].flow_id); pthread_rwlock_unlock(&ai.lock); } @@ -1096,12 +1123,12 @@ bool fset_has(const struct flow_set * set, pthread_rwlock_rdlock(&ai.lock); - if (ai.flows[fd].port_id < 0) { + if (ai.flows[fd].flow_id < 0) { pthread_rwlock_unlock(&ai.lock); return false; } - ret = (shm_flow_set_has(ai.fqset, set->idx, ai.flows[fd].port_id) == 1); + ret = (shm_flow_set_has(ai.fqset, set->idx, ai.flows[fd].flow_id) == 1); pthread_rwlock_unlock(&ai.lock); @@ -1177,35 +1204,35 @@ int fevent(struct flow_set * set, /* ipcp-dev functions. */ int np1_flow_alloc(pid_t n_pid, - int port_id, - qoscube_t qc) + int flow_id, + qosspec_t qs) { - return flow_init(port_id, n_pid, qc); + return flow_init(flow_id, n_pid, qs); } -int np1_flow_dealloc(int port_id) +int np1_flow_dealloc(int flow_id) { int fd; pthread_rwlock_rdlock(&ai.lock); - fd = ai.ports[port_id].fd; + fd = ai.ports[flow_id].fd; pthread_rwlock_unlock(&ai.lock); return fd; } -int np1_flow_resp(int port_id) +int np1_flow_resp(int flow_id) { int fd; - if (port_wait_assign(port_id) != PORT_ID_ASSIGNED) + if (port_wait_assign(flow_id) != PORT_ID_ASSIGNED) return -1; pthread_rwlock_rdlock(&ai.lock); - fd = ai.ports[port_id].fd; + fd = ai.ports[flow_id].fd; pthread_rwlock_unlock(&ai.lock); @@ -1243,29 +1270,29 @@ int ipcp_create_r(pid_t pid, int ipcp_flow_req_arr(pid_t pid, const uint8_t * dst, size_t len, - qoscube_t qc) + qosspec_t qs) { - irm_msg_t msg = IRM_MSG__INIT; - irm_msg_t * recv_msg; - int fd; + irm_msg_t msg = IRM_MSG__INIT; + irm_msg_t * recv_msg; + qosspec_msg_t qs_msg; + int fd; assert(dst != NULL); - msg.code = IRM_MSG_CODE__IPCP_FLOW_REQ_ARR; - msg.has_pid = true; - msg.pid = pid; - msg.has_hash = true; - msg.hash.len = len; - msg.hash.data = (uint8_t *) dst; - msg.has_qoscube = true; - msg.qoscube = qc; + msg.code = IRM_MSG_CODE__IPCP_FLOW_REQ_ARR; + msg.has_pid = true; + msg.pid = pid; + msg.has_hash = true; + msg.hash.len = len; + msg.hash.data = (uint8_t *) dst; + qs_msg = spec_to_msg(&qs); + msg.qosspec = &qs_msg; recv_msg = send_recv_irm_msg(&msg); - if (recv_msg == NULL) return -EIRMD; - if (!recv_msg->has_port_id || !recv_msg->has_pid) { + if (!recv_msg->has_flow_id || !recv_msg->has_pid) { irm_msg__free_unpacked(recv_msg, NULL); return -1; } @@ -1275,7 +1302,7 @@ int ipcp_flow_req_arr(pid_t pid, return -1; } - fd = flow_init(recv_msg->port_id, recv_msg->pid, qc); + fd = flow_init(recv_msg->flow_id, recv_msg->pid, qs); irm_msg__free_unpacked(recv_msg, NULL); @@ -1292,11 +1319,11 @@ int ipcp_flow_alloc_reply(int fd, assert(fd >= 0 && fd < SYS_MAX_FLOWS); msg.code = IRM_MSG_CODE__IPCP_FLOW_ALLOC_REPLY; - msg.has_port_id = true; + msg.has_flow_id = true; pthread_rwlock_rdlock(&ai.lock); - msg.port_id = ai.flows[fd].port_id; + msg.flow_id = ai.flows[fd].flow_id; pthread_rwlock_unlock(&ai.lock); @@ -1333,7 +1360,7 @@ int ipcp_flow_read(int fd, pthread_rwlock_rdlock(&ai.lock); - assert(flow->port_id >= 0); + assert(flow->flow_id >= 0); rb = flow->rx_rb; @@ -1352,6 +1379,8 @@ int ipcp_flow_read(int fd, if (idx < 0) return idx; *sdb = shm_rdrbuff_get(ai.rdrb, idx); + if (flow->spec.ber == 0 && chk_crc(*sdb) != 0) + continue; } while (frcti_rcv(flow->frcti, *sdb) != 0); return 0; @@ -1371,7 +1400,7 @@ int ipcp_flow_write(int fd, pthread_rwlock_rdlock(&ai.lock); - assert(flow->port_id >= 0); + assert(flow->flow_id >= 0); if ((flow->oflags & FLOWFACCMODE) == FLOWFRDONLY) { pthread_rwlock_unlock(&ai.lock); @@ -1387,9 +1416,15 @@ int ipcp_flow_write(int fd, return -ENOMEM; } + if (flow->spec.ber == 0 && add_crc(sdb) != 0) { + pthread_rwlock_unlock(&ai.lock); + shm_rdrbuff_remove(ai.rdrb, idx); + return -ENOMEM; + } + ret = shm_rbuff_write(flow->tx_rb, idx); if (ret == 0) - shm_flow_set_notify(flow->set, flow->port_id, FLOW_PKT); + shm_flow_set_notify(flow->set, flow->flow_id, FLOW_PKT); pthread_rwlock_unlock(&ai.lock); @@ -1435,7 +1470,7 @@ void ipcp_flow_fini(int fd) shm_rbuff_set_acl(ai.flows[fd].tx_rb, ACL_FLOWDOWN); shm_flow_set_notify(ai.flows[fd].set, - ai.flows[fd].port_id, + ai.flows[fd].flow_id, FLOW_DEALLOC); rx_rb = ai.flows[fd].rx_rb; @@ -1455,9 +1490,9 @@ int ipcp_flow_get_qoscube(int fd, pthread_rwlock_rdlock(&ai.lock); - assert(ai.flows[fd].port_id >= 0); + assert(ai.flows[fd].flow_id >= 0); - *cube = ai.flows[fd].cube; + *cube = qos_spec_to_cube(ai.flows[fd].spec); pthread_rwlock_unlock(&ai.lock); @@ -1491,14 +1526,14 @@ int local_flow_write(int fd, pthread_rwlock_rdlock(&ai.lock); - if (flow->port_id < 0) { + if (flow->flow_id < 0) { pthread_rwlock_unlock(&ai.lock); return -ENOTALLOC; } ret = shm_rbuff_write(flow->tx_rb, idx); if (ret == 0) - shm_flow_set_notify(flow->set, flow->port_id, FLOW_PKT); + shm_flow_set_notify(flow->set, flow->flow_id, FLOW_PKT); pthread_rwlock_unlock(&ai.lock); diff --git a/src/lib/frct.c b/src/lib/frct.c index 0f3173c5..db3572e3 100644 --- a/src/lib/frct.c +++ b/src/lib/frct.c @@ -31,16 +31,13 @@ #define TW_RESOLUTION 1 /* ms */ #define FRCT_PCILEN (sizeof(struct frct_pci)) -#define FRCT_CRCLEN (sizeof(uint32_t)) struct frct_cr { - bool drf; uint32_t lwe; uint32_t rwe; - uint32_t seqno; - bool conf; uint8_t cflags; + uint32_t seqno; time_t rto; /* ms */ time_t act; /* s */ @@ -67,15 +64,12 @@ enum frct_flags { FRCT_ACK = 0x03, /* ACK field valid */ FRCT_FC = 0x08, /* FC window valid */ FRCT_RDVZ = 0x10, /* Rendez-vous */ - FRCT_CFG = 0x20, /* Configuration */ - FRCT_MFGM = 0x40, /* More fragments */ - FRCT_CRC = 0x80, /* CRC present */ + FRCT_MFGM = 0x20, /* More fragments */ + FRCT_CRC = 0x40, /* CRC present */ }; struct frct_pci { - uint8_t flags; - - uint8_t cflags; + uint16_t flags; uint16_t window; @@ -85,8 +79,7 @@ struct frct_pci { #include <rxmwheel.c> -static struct frcti * frcti_create(int fd, - qoscube_t qc) +static struct frcti * frcti_create(int fd) { struct frcti * frcti; time_t delta_t; @@ -114,16 +107,15 @@ static struct frcti * frcti_create(int fd, delta_t = (frcti->mpl + frcti->a + frcti->r) / 1000; - if (qc == QOS_CUBE_DATA) - frcti->snd_cr.cflags |= FRCTFRTX; - - frcti->snd_cr.conf = true; - frcti->snd_cr.inact = 3 * delta_t + 1; + frcti->snd_cr.inact = 3 * delta_t; frcti->snd_cr.act = now.tv_sec - (frcti->snd_cr.inact + 1); /* Initial rto. FIXME: recalc using Karn algorithm. */ frcti->snd_cr.rto = 120; - frcti->rcv_cr.inact = 2 * delta_t + 1; + if (ai.flows[fd].spec.loss == 0) + frcti->snd_cr.cflags |= FRCTFRTX; + + frcti->rcv_cr.inact = 2 * delta_t; frcti->rcv_cr.act = now.tv_sec - (frcti->rcv_cr.inact + 1); return frcti; @@ -138,7 +130,7 @@ static void frcti_destroy(struct frcti * frcti) { /* * FIXME: In case of reliable transmission we should - * make sure everything is acked. + * make sure everything we sent is acked. */ rxmwheel_clear(frcti->fd); @@ -148,24 +140,6 @@ static void frcti_destroy(struct frcti * frcti) free(frcti); } -static int frcti_setconf(struct frcti * frcti, - uint16_t flags) -{ - assert(frcti); - - pthread_rwlock_wrlock(&frcti->lock); - - if (frcti->snd_cr.cflags != flags) { - frcti->snd_cr.cflags = flags; - frcti->snd_cr.conf = true; - frcti->snd_cr.drf = true; - } - - pthread_rwlock_unlock(&frcti->lock); - - return 0; -} - static uint16_t frcti_getconf(struct frcti * frcti) { uint16_t ret; @@ -203,14 +177,6 @@ static ssize_t __frcti_queued_pdu(struct frcti * frcti) pos = frcti->rcv_cr.lwe & (RQ_SIZE - 1); idx = frcti->rq[pos]; if (idx != -1) { - struct shm_du_buff * sdb; - struct frct_pci * pci; - - sdb = shm_rdrbuff_get(ai.rdrb, idx); - pci = (struct frct_pci *) shm_du_buff_head(sdb) - 1; - if (pci->flags & FRCT_CFG) - frcti->rcv_cr.cflags = pci->cflags; - ++frcti->rcv_cr.lwe; frcti->rq[pos] = -1; } @@ -220,22 +186,6 @@ static ssize_t __frcti_queued_pdu(struct frcti * frcti) return idx; } -static int frct_chk_crc(uint8_t * head, - uint8_t * tail) -{ - uint32_t crc; - - mem_hash(HASH_CRC32, &crc, head, tail - head); - - return crc == *((uint32_t *) tail); -} - -static void frct_add_crc(uint8_t * head, - uint8_t * tail) -{ - mem_hash(HASH_CRC32, tail, head, tail - head); -} - static struct frct_pci * frcti_alloc_head(struct shm_du_buff * sdb) { struct frct_pci * pci; @@ -272,38 +222,20 @@ static int __frcti_snd(struct frcti * frcti, pci->flags |= FRCT_DATA; - if (snd_cr->cflags & FRCTFERRCHCK) { - uint8_t * tail = shm_du_buff_tail_alloc(sdb, FRCT_CRCLEN); - if (tail == NULL) { - pthread_rwlock_unlock(&frcti->lock); - return -1; - } - - frct_add_crc((uint8_t *) pci, tail); - - pci->flags |= FRCT_CRC; - } - /* Set DRF if there are no unacknowledged packets. */ if (snd_cr->seqno == snd_cr->lwe) pci->flags |= FRCT_DRF; - if (snd_cr->conf) { - /* FIXME: This packet must be acked! */ - pci->flags |= FRCT_CFG; - pci->cflags = snd_cr->cflags; - } - /* Choose a new sequence number if sender inactivity expired. */ if (now.tv_sec - snd_cr->act > snd_cr->inact) { /* There are no unacknowledged packets. */ assert(snd_cr->seqno == snd_cr->lwe); #ifdef CONFIG_OUROBOROS_DEBUG - frcti->snd_cr.seqno = 0; + snd_cr->seqno = 0; #else random_buffer(&snd_cr->seqno, sizeof(snd_cr->seqno)); #endif - frcti->snd_cr.lwe = frcti->snd_cr.seqno; + frcti->snd_cr.lwe = snd_cr->seqno; } pci->seqno = hton32(snd_cr->seqno); @@ -317,14 +249,13 @@ static int __frcti_snd(struct frcti * frcti, snd_cr->seqno++; snd_cr->act = now.tv_sec; - snd_cr->conf = false; pthread_rwlock_unlock(&frcti->lock); return 0; } -/* Returns 0 when idx contains an SDU for the application. */ +/* Returns 0 when idx contains a packet for the application. */ static int __frcti_rcv(struct frcti * frcti, struct shm_du_buff * sdb) { @@ -349,43 +280,33 @@ static int __frcti_rcv(struct frcti * frcti, idx = shm_du_buff_get_idx(sdb); - /* PDU may be corrupted. */ - if (pci->flags & FRCT_CRC) { - uint8_t * tail = shm_du_buff_tail_release(sdb, FRCT_CRCLEN); - if (frct_chk_crc((uint8_t *) pci, tail)) - goto drop_packet; - } - seqno = ntoh32(pci->seqno); /* Check if receiver inactivity is true. */ if (now.tv_sec - rcv_cr->act > rcv_cr->inact) { /* Inactive receiver, check for DRF. */ if (pci->flags & FRCT_DRF) /* New run. */ - rcv_cr->lwe = seqno - 1; + rcv_cr->seqno = seqno; else goto drop_packet; } - if (seqno == rcv_cr->lwe + 1) { - rcv_cr->lwe = seqno; - /* Check for online reconfiguration. */ - if (pci->flags & FRCT_CFG) - rcv_cr->cflags = pci->cflags; + if (seqno == rcv_cr->seqno) { + ++rcv_cr->seqno; } else { /* Out of order. */ - if ((int32_t)(seqno - rcv_cr->lwe) <= 0) /* Duplicate. */ + if ((int32_t)(seqno - rcv_cr->seqno) < 0) /* Duplicate. */ goto drop_packet; if (rcv_cr->cflags & FRCTFRTX) { size_t pos = seqno & (RQ_SIZE - 1); - if ((seqno - rcv_cr->lwe) > RQ_SIZE /* Out of rq. */ + if ((seqno - rcv_cr->seqno) > RQ_SIZE /* Out of rq. */ || frcti->rq[pos] != -1) /* Duplicate in rq. */ goto drop_packet; /* Queue. */ frcti->rq[pos] = idx; ret = -EAGAIN; } else { - rcv_cr->lwe = seqno; + rcv_cr->seqno = seqno; } } @@ -398,18 +319,18 @@ static int __frcti_rcv(struct frcti * frcti, rcv_cr->act = now.tv_sec; + pthread_rwlock_unlock(&frcti->lock); + if (!(pci->flags & FRCT_DATA)) shm_rdrbuff_remove(ai.rdrb, idx); - pthread_rwlock_unlock(&frcti->lock); - rxmwheel_move(); return ret; drop_packet: - shm_rdrbuff_remove(ai.rdrb, idx); pthread_rwlock_unlock(&frcti->lock); + shm_rdrbuff_remove(ai.rdrb, idx); rxmwheel_move(); return -EAGAIN; } diff --git a/src/lib/hash.c b/src/lib/hash.c index 9b74967b..10e10c13 100644 --- a/src/lib/hash.c +++ b/src/lib/hash.c @@ -23,6 +23,10 @@ * Foundation, Inc., http://www.fsf.org/about/contact/. */ +#if defined(__linux__) || defined(__CYGWIN__) +#define _DEFAULT_SOURCE +#endif + #include "config.h" #include <ouroboros/hash.h> diff --git a/src/lib/hashtable.c b/src/lib/hashtable.c index be5c3ffd..68a0f545 100644 --- a/src/lib/hashtable.c +++ b/src/lib/hashtable.c @@ -20,6 +20,10 @@ * Foundation, Inc., http://www.fsf.org/about/contact/. */ +#if defined(__linux__) || defined(__CYGWIN__) +#define _DEFAULT_SOURCE +#endif + #include <ouroboros/hashtable.h> #include <ouroboros/list.h> #include <ouroboros/errno.h> diff --git a/src/lib/ipcpd_messages.proto b/src/lib/ipcpd_messages.proto index 454af0dc..ae1014ac 100644 --- a/src/lib/ipcpd_messages.proto +++ b/src/lib/ipcpd_messages.proto @@ -23,6 +23,7 @@ syntax = "proto2"; import "ipcp_config.proto"; +import "qosspec.proto"; enum ipcp_msg_code { IPCP_BOOTSTRAP = 1; @@ -41,9 +42,9 @@ enum ipcp_msg_code { message ipcp_msg { required ipcp_msg_code code = 1; optional bytes hash = 2; - optional int32 port_id = 3; + optional int32 flow_id = 3; optional string dst = 4; - optional uint32 qoscube = 5; + optional qosspec_msg qosspec = 5; optional ipcp_config_msg conf = 6; optional int32 pid = 7; optional layer_info_msg layer_info = 8; diff --git a/src/lib/irm.c b/src/lib/irm.c index 6a9f837e..d88475c4 100644 --- a/src/lib/irm.c +++ b/src/lib/irm.c @@ -20,7 +20,11 @@ * Foundation, Inc., http://www.fsf.org/about/contact/. */ +#if defined(__linux__) || defined(__CYGWIN__) +#define _DEFAULT_SOURCE +#else #define _POSIX_C_SOURCE 200809L +#endif #include <ouroboros/errno.h> #include <ouroboros/hash.h> @@ -315,10 +319,10 @@ static int check_prog(const char * prog) static int check_prog_path(char ** prog) { - char * path = getenv("PATH"); - char * path_end = path + strlen(path) + 1; + char * path; + char * path_end; char * pstart; - char * pstop = path; + char * pstop; char * tmp; char * tstop; char * tstart; @@ -327,9 +331,15 @@ static int check_prog_path(char ** prog) assert(prog); - if (*prog == NULL || path == NULL) + if (*prog == NULL) return -EINVAL; + path = getenv("PATH"); + if (path == NULL) + return -ENOENT; + + pstop = path; + path_end = path + strlen(path) + 1; if (!strlen(path) || strchr(*prog, '/') != NULL) { if ((ret = check_prog(*prog)) < 0) return ret; diff --git a/src/lib/irmd_messages.proto b/src/lib/irmd_messages.proto index 16dfe828..351b4a8e 100644 --- a/src/lib/irmd_messages.proto +++ b/src/lib/irmd_messages.proto @@ -23,6 +23,7 @@ syntax = "proto2"; import "ipcp_config.proto"; +import "qosspec.proto"; enum irm_msg_code { IRM_CREATE_IPCP = 1; @@ -66,8 +67,8 @@ message irm_msg { optional sint32 response = 8; optional string dst = 9; optional bytes hash = 10; - optional sint32 port_id = 11; - optional sint32 qoscube = 12; + optional sint32 flow_id = 11; + optional qosspec_msg qosspec = 12; optional ipcp_config_msg conf = 13; optional uint32 opts = 14; repeated ipcp_info_msg ipcps = 15; diff --git a/src/lib/md5.c b/src/lib/md5.c index 6f2b2e36..959865fe 100644 --- a/src/lib/md5.c +++ b/src/lib/md5.c @@ -40,6 +40,10 @@ * or FITNESS FOR A PARTICULAR PURPOSE. Use this program at your own risk! */ +#if defined(__linux__) || defined(__CYGWIN__) +#define _DEFAULT_SOURCE +#endif + #include <ouroboros/endian.h> #include <ouroboros/md5.h> diff --git a/src/lib/qos.c b/src/lib/qos.c index bee6ed71..8607031e 100644 --- a/src/lib/qos.c +++ b/src/lib/qos.c @@ -28,66 +28,61 @@ #include <string.h> qosspec_t qos_raw = { - .delay = UINT32_MAX, - .bandwidth = 0, - .availability = 0, - .loss = 1, - .in_order = 0, - .maximum_interruption = UINT32_MAX + .delay = UINT32_MAX, + .bandwidth = 0, + .availability = 0, + .loss = 1, + .ber = 1, + .in_order = 0, + .max_gap = UINT32_MAX +}; + +qosspec_t qos_raw_no_errors = { + .delay = UINT32_MAX, + .bandwidth = 0, + .availability = 0, + .loss = 1, + .ber = 0, + .in_order = 0, + .max_gap = UINT32_MAX }; qosspec_t qos_best_effort = { - .delay = UINT32_MAX, - .bandwidth = 0, - .availability = 0, - .loss = 1, - .in_order = 1, - .maximum_interruption = UINT32_MAX + .delay = UINT32_MAX, + .bandwidth = 0, + .availability = 0, + .loss = 1, + .ber = 0, + .in_order = 1, + .max_gap = UINT32_MAX }; -qosspec_t qos_video = { - .delay = 100, - .bandwidth = UINT64_MAX, - .availability = 3, - .loss = 1, - .in_order = 1, - .maximum_interruption = 100 +qosspec_t qos_video = { + .delay = 100, + .bandwidth = UINT64_MAX, + .availability = 3, + .loss = 1, + .ber = 0, + .in_order = 1, + .max_gap = 100 }; qosspec_t qos_voice = { - .delay = 50, - .bandwidth = 100000, - .availability = 5, - .loss = 1, - .in_order = 1, - .maximum_interruption = 50 + .delay = 50, + .bandwidth = 100000, + .availability = 5, + .loss = 1, + .ber = 0, + .in_order = 1, + .max_gap = 50 }; qosspec_t qos_data = { - .delay = 1000, - .bandwidth = 0, - .availability = 0, - .in_order = 1, - .loss = 0, - .maximum_interruption = 2000 + .delay = 1000, + .bandwidth = 0, + .availability = 0, + .loss = 0, + .ber = 0, + .in_order = 1, + .max_gap = 2000 }; - -int qosspec_init(qosspec_t * qs) -{ - if (qs == NULL) - return -EINVAL; - - *qs = qos_best_effort; - - return 0; -} - -int qosspec_fini(qosspec_t * qs) -{ - if (qs == NULL) - return -EINVAL; - - memset(qs, 0, sizeof(*qs)); - - return 0; -} diff --git a/src/lib/qoscube.c b/src/lib/qoscube.c index 5dfa35ad..efca0e42 100644 --- a/src/lib/qoscube.c +++ b/src/lib/qoscube.c @@ -25,38 +25,20 @@ #include <string.h> + + qoscube_t qos_spec_to_cube(qosspec_t qs) { - if (qs.loss == 0) - return QOS_CUBE_DATA; - else if (qs.delay <= qos_voice.delay && + if (qs.delay <= qos_voice.delay && qs.bandwidth <= qos_voice.bandwidth && qs.availability >= qos_voice.availability && - qs.maximum_interruption <= qos_voice.maximum_interruption) + qs.max_gap <= qos_voice.max_gap) return QOS_CUBE_VOICE; else if (qs.delay <= qos_video.delay && qs.bandwidth <= qos_video.bandwidth && qs.availability >= qos_video.availability && - qs.maximum_interruption <= qos_video.maximum_interruption) + qs.max_gap <= qos_video.max_gap) return QOS_CUBE_VIDEO; - else if (qs.in_order == 1) - return QOS_CUBE_BE; else - return QOS_CUBE_RAW; -} - -qosspec_t qos_cube_to_spec(qoscube_t qc) -{ - switch (qc) { - case QOS_CUBE_VOICE: - return qos_voice; - case QOS_CUBE_VIDEO: - return qos_video; - case QOS_CUBE_BE: - return qos_best_effort; - case QOS_CUBE_DATA: - return qos_data; - default: - return qos_raw; - } + return QOS_CUBE_BE; } diff --git a/src/lib/qosspec.proto b/src/lib/qosspec.proto new file mode 100644 index 00000000..f355e345 --- /dev/null +++ b/src/lib/qosspec.proto @@ -0,0 +1,33 @@ +/* + * Ouroboros - Copyright (C) 2016 - 2018 + * + * QoS specification message + * + * Dimitri Staessens <dimitri.staessens@ugent.be> + * Sander Vrijders <sander.vrijders@ugent.be> + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public License + * version 2.1 as published by the Free Software Foundation. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., http://www.fsf.org/about/contact/. + */ + +syntax = "proto2"; + +message qosspec_msg { + required uint32 delay = 1; /* In ms */ + required uint64 bandwidth = 2; /* In bits/s */ + required uint32 availability = 3; /* Class of 9s */ + required uint32 loss = 4; /* Packet loss */ + required uint32 ber = 5; /* Bit error rate, ppb */ + required uint32 in_order = 6; /* In-order delivery */ + required uint32 max_gap = 7; /* In ms */ +}; diff --git a/src/lib/rib.c b/src/lib/rib.c index 685575e5..88db9ed8 100644 --- a/src/lib/rib.c +++ b/src/lib/rib.c @@ -101,6 +101,9 @@ static int rib_read(const char * path, char comp[RIB_PATH_LEN + 1]; char * c; + if (strlen(path) > RIB_PATH_LEN) + return -1; + strcpy(comp, path + 1); c = strstr(comp, "/"); @@ -183,6 +186,9 @@ static size_t __getattr(const char * path, char comp[RIB_PATH_LEN + 1]; char * c; + if (strlen(path) > RIB_PATH_LEN) + return -1; + strcpy(comp, path + 1); c = strstr(comp, "/"); @@ -282,7 +288,8 @@ int rib_init(const char * mountpt) if (stat(rib.mnt, &st) == -1) switch(errno) { case ENOENT: - mkdir(rib.mnt, 0777); + if (mkdir(rib.mnt, 0777)) + return -1; break; case ENOTCONN: fuse_unmount(rib.mnt, rib.ch); @@ -385,6 +392,12 @@ int rib_reg(const char * path, return -ENOMEM; } + if (strlen(path) > RIB_PATH_LEN) { + pthread_rwlock_unlock(&rib.lock); + free(rc); + return -1; + } + strcpy(rc->path, path); rc->ops = ops; diff --git a/src/lib/rxmwheel.c b/src/lib/rxmwheel.c index e5891081..697c6a48 100644 --- a/src/lib/rxmwheel.c +++ b/src/lib/rxmwheel.c @@ -192,7 +192,7 @@ static int rxmwheel_move(void) continue; } - shm_flow_set_notify(f->set, f->port_id, FLOW_PKT); + shm_flow_set_notify(f->set, f->flow_id, FLOW_PKT); /* Reschedule. */ shm_du_buff_wait_ack(sdb); diff --git a/src/lib/sha3.c b/src/lib/sha3.c index 6179af22..f6a82c57 100644 --- a/src/lib/sha3.c +++ b/src/lib/sha3.c @@ -42,6 +42,10 @@ * or FITNESS FOR A PARTICULAR PURPOSE. Use this program at your own risk! */ +#if defined(__linux__) || defined(__CYGWIN__) +#define _DEFAULT_SOURCE +#endif + #include <ouroboros/endian.h> #include <ouroboros/sha3.h> diff --git a/src/lib/shm_flow_set.c b/src/lib/shm_flow_set.c index bb9e3caa..1c94c599 100644 --- a/src/lib/shm_flow_set.c +++ b/src/lib/shm_flow_set.c @@ -64,7 +64,7 @@ #define fqueue_ptr(fs, idx) (fs->fqueues + (SHM_BUFFER_SIZE) * idx) struct portevent { - int port_id; + int flow_id; int event; }; @@ -98,17 +98,14 @@ struct shm_flow_set * shm_flow_set_create() mask = umask(0); shm_fd = shm_open(fn, O_CREAT | O_RDWR, 0666); - if (shm_fd == -1) { - free(set); - return NULL; - } + if (shm_fd == -1) + goto fail_shm_open; umask(mask); if (ftruncate(shm_fd, SHM_FLOW_SET_FILE_SIZE - 1) < 0) { - free(set); close(shm_fd); - return NULL; + goto fail_shm_open; } shm_base = mmap(NULL, @@ -120,11 +117,8 @@ struct shm_flow_set * shm_flow_set_create() close(shm_fd); - if (shm_base == MAP_FAILED) { - shm_unlink(fn); - free(set); - return NULL; - } + if (shm_base == MAP_FAILED) + goto fail_mmap; set->mtable = shm_base; set->heads = (size_t *) (set->mtable + SYS_MAX_FLOWS); @@ -133,21 +127,27 @@ struct shm_flow_set * shm_flow_set_create() set->lock = (pthread_mutex_t *) (set->fqueues + PROG_MAX_FQUEUES * (SHM_BUFFER_SIZE)); - pthread_mutexattr_init(&mattr); + if (pthread_mutexattr_init(&mattr)) + goto fail_mmap; + #ifdef HAVE_ROBUST_MUTEX - pthread_mutexattr_setrobust(&mattr, PTHREAD_MUTEX_ROBUST); + if (pthread_mutexattr_setrobust(&mattr, PTHREAD_MUTEX_ROBUST)) + goto fail_mmap; #endif - pthread_mutexattr_setpshared(&mattr, PTHREAD_PROCESS_SHARED); - pthread_mutex_init(set->lock, &mattr); + if (pthread_mutexattr_setpshared(&mattr, PTHREAD_PROCESS_SHARED) || + pthread_mutex_init(set->lock, &mattr) || + pthread_condattr_init(&cattr) || + pthread_condattr_setpshared(&cattr, PTHREAD_PROCESS_SHARED)) + goto fail_mmap; - pthread_condattr_init(&cattr); - pthread_condattr_setpshared(&cattr, PTHREAD_PROCESS_SHARED); #ifndef __APPLE__ - pthread_condattr_setclock(&cattr, PTHREAD_COND_CLOCK); + if (pthread_condattr_setclock(&cattr, PTHREAD_COND_CLOCK)) + goto fail_mmap; #endif for (i = 0; i < PROG_MAX_FQUEUES; ++i) { set->heads[i] = 0; - pthread_cond_init(&set->conds[i], &cattr); + if (pthread_cond_init(&set->conds[i], &cattr)) + goto fail_mmap; } for (i = 0; i < SYS_MAX_FLOWS; ++i) @@ -156,6 +156,12 @@ struct shm_flow_set * shm_flow_set_create() set->pid = getpid(); return set; + + fail_mmap: + shm_unlink(fn); + fail_shm_open: + free(set); + return NULL; } struct shm_flow_set * shm_flow_set_open(pid_t pid) @@ -262,20 +268,20 @@ void shm_flow_set_zero(struct shm_flow_set * set, int shm_flow_set_add(struct shm_flow_set * set, size_t idx, - int port_id) + int flow_id) { assert(set); - assert(!(port_id < 0) && port_id < SYS_MAX_FLOWS); + assert(!(flow_id < 0) && flow_id < SYS_MAX_FLOWS); assert(idx < PROG_MAX_FQUEUES); pthread_mutex_lock(set->lock); - if (set->mtable[port_id] != -1) { + if (set->mtable[flow_id] != -1) { pthread_mutex_unlock(set->lock); return -EPERM; } - set->mtable[port_id] = idx; + set->mtable[flow_id] = idx; pthread_mutex_unlock(set->lock); @@ -284,33 +290,33 @@ int shm_flow_set_add(struct shm_flow_set * set, void shm_flow_set_del(struct shm_flow_set * set, size_t idx, - int port_id) + int flow_id) { assert(set); - assert(!(port_id < 0) && port_id < SYS_MAX_FLOWS); + assert(!(flow_id < 0) && flow_id < SYS_MAX_FLOWS); assert(idx < PROG_MAX_FQUEUES); pthread_mutex_lock(set->lock); - if (set->mtable[port_id] == (ssize_t) idx) - set->mtable[port_id] = -1; + if (set->mtable[flow_id] == (ssize_t) idx) + set->mtable[flow_id] = -1; pthread_mutex_unlock(set->lock); } int shm_flow_set_has(struct shm_flow_set * set, size_t idx, - int port_id) + int flow_id) { int ret = 0; assert(set); - assert(!(port_id < 0) && port_id < SYS_MAX_FLOWS); + assert(!(flow_id < 0) && flow_id < SYS_MAX_FLOWS); assert(idx < PROG_MAX_FQUEUES); pthread_mutex_lock(set->lock); - if (set->mtable[port_id] == (ssize_t) idx) + if (set->mtable[flow_id] == (ssize_t) idx) ret = 1; pthread_mutex_unlock(set->lock); @@ -319,25 +325,25 @@ int shm_flow_set_has(struct shm_flow_set * set, } void shm_flow_set_notify(struct shm_flow_set * set, - int port_id, + int flow_id, int event) { assert(set); - assert(!(port_id < 0) && port_id < SYS_MAX_FLOWS); + assert(!(flow_id < 0) && flow_id < SYS_MAX_FLOWS); pthread_mutex_lock(set->lock); - if (set->mtable[port_id] == -1) { + if (set->mtable[flow_id] == -1) { pthread_mutex_unlock(set->lock); return; } - (fqueue_ptr(set, set->mtable[port_id]) + - (set->heads[set->mtable[port_id]]))->port_id = port_id; - (fqueue_ptr(set, set->mtable[port_id]) + - (set->heads[set->mtable[port_id]])++)->event = event; + (fqueue_ptr(set, set->mtable[flow_id]) + + (set->heads[set->mtable[flow_id]]))->flow_id = flow_id; + (fqueue_ptr(set, set->mtable[flow_id]) + + (set->heads[set->mtable[flow_id]])++)->event = event; - pthread_cond_signal(&set->conds[set->mtable[port_id]]); + pthread_cond_signal(&set->conds[set->mtable[flow_id]]); pthread_mutex_unlock(set->lock); } diff --git a/src/lib/shm_rbuff.c b/src/lib/shm_rbuff.c index 453f5183..a6eab699 100644 --- a/src/lib/shm_rbuff.c +++ b/src/lib/shm_rbuff.c @@ -1,7 +1,7 @@ /* * Ouroboros - Copyright (C) 2016 - 2018 * - * Ring buffer implementations for incoming SDUs + * Ring buffer implementations for incoming packets * * Dimitri Staessens <dimitri.staessens@ugent.be> * Sander Vrijders <sander.vrijders@ugent.be> @@ -63,10 +63,10 @@ struct shm_rbuff { size_t * tail; /* start of ringbuffer tail */ size_t * acl; /* access control */ pthread_mutex_t * lock; /* lock all free space in shm */ - pthread_cond_t * add; /* SDU arrived */ - pthread_cond_t * del; /* SDU removed */ + pthread_cond_t * add; /* packet arrived */ + pthread_cond_t * del; /* packet removed */ pid_t pid; /* pid of the owner */ - int port_id; /* port_id of the flow */ + int flow_id; /* flow_id of the flow */ }; void shm_rbuff_close(struct shm_rbuff * rb) @@ -81,7 +81,7 @@ void shm_rbuff_close(struct shm_rbuff * rb) #define MM_FLAGS (PROT_READ | PROT_WRITE) struct shm_rbuff * rbuff_create(pid_t pid, - int port_id, + int flow_id, int flags) { struct shm_rbuff * rb; @@ -89,7 +89,7 @@ struct shm_rbuff * rbuff_create(pid_t pid, ssize_t * shm_base; char fn[FN_MAX_CHARS]; - sprintf(fn, SHM_RBUFF_PREFIX "%d.%d", pid, port_id); + sprintf(fn, SHM_RBUFF_PREFIX "%d.%d", pid, flow_id); rb = malloc(sizeof(*rb)); if (rb == NULL) @@ -116,7 +116,7 @@ struct shm_rbuff * rbuff_create(pid_t pid, rb->add = (pthread_cond_t *) (rb->lock + 1); rb->del = rb->add + 1; rb->pid = pid; - rb->port_id = port_id; + rb->flow_id = flow_id; return rb; @@ -131,7 +131,7 @@ struct shm_rbuff * rbuff_create(pid_t pid, } struct shm_rbuff * shm_rbuff_create(pid_t pid, - int port_id) + int flow_id) { struct shm_rbuff * rb; pthread_mutexattr_t mattr; @@ -140,7 +140,7 @@ struct shm_rbuff * shm_rbuff_create(pid_t pid, mask = umask(0); - rb = rbuff_create(pid, port_id, O_CREAT | O_EXCL | O_RDWR); + rb = rbuff_create(pid, flow_id, O_CREAT | O_EXCL | O_RDWR); umask(mask); @@ -175,7 +175,7 @@ struct shm_rbuff * shm_rbuff_create(pid_t pid, *rb->tail = 0; rb->pid = pid; - rb->port_id = port_id; + rb->flow_id = flow_id; pthread_mutexattr_destroy(&mattr); pthread_condattr_destroy(&cattr); @@ -197,9 +197,9 @@ struct shm_rbuff * shm_rbuff_create(pid_t pid, } struct shm_rbuff * shm_rbuff_open(pid_t pid, - int port_id) + int flow_id) { - return rbuff_create(pid, port_id, O_RDWR); + return rbuff_create(pid, flow_id, O_RDWR); } #if (defined(SHM_RBUFF_LOCKLESS) && \ diff --git a/src/lib/shm_rbuff_ll.c b/src/lib/shm_rbuff_ll.c index c488f274..0fc9ae7b 100644 --- a/src/lib/shm_rbuff_ll.c +++ b/src/lib/shm_rbuff_ll.c @@ -1,7 +1,7 @@ /* * Ouroboros - Copyright (C) 2016 - 2018 * - * Lockless ring buffer for incoming SDUs + * Lockless ring buffer for incoming packets * * Dimitri Staessens <dimitri.staessens@ugent.be> * Sander Vrijders <sander.vrijders@ugent.be> @@ -29,7 +29,7 @@ void shm_rbuff_destroy(struct shm_rbuff * rb) assert(rb); - sprintf(fn, SHM_RBUFF_PREFIX "%d.%d", rb->pid, rb->port_id); + sprintf(fn, SHM_RBUFF_PREFIX "%d.%d", rb->pid, rb->flow_id); shm_rbuff_close(rb); diff --git a/src/lib/shm_rbuff_pthr.c b/src/lib/shm_rbuff_pthr.c index 3b7ea2d4..51d801f6 100644 --- a/src/lib/shm_rbuff_pthr.c +++ b/src/lib/shm_rbuff_pthr.c @@ -1,7 +1,7 @@ /* * Ouroboros - Copyright (C) 2016 - 2018 * - * Ring buffer for incoming SDUs + * Ring buffer for incoming packets * * Dimitri Staessens <dimitri.staessens@ugent.be> * Sander Vrijders <sander.vrijders@ugent.be> @@ -33,7 +33,7 @@ void shm_rbuff_destroy(struct shm_rbuff * rb) pthread_mutex_unlock(rb->lock); #endif - sprintf(fn, SHM_RBUFF_PREFIX "%d.%d", rb->pid, rb->port_id); + sprintf(fn, SHM_RBUFF_PREFIX "%d.%d", rb->pid, rb->flow_id); shm_rbuff_close(rb); diff --git a/src/lib/shm_rdrbuff.c b/src/lib/shm_rdrbuff.c index 182ad084..31d9f2b6 100644 --- a/src/lib/shm_rdrbuff.c +++ b/src/lib/shm_rdrbuff.c @@ -82,7 +82,7 @@ struct shm_rdrbuff { size_t * tail; /* start of ringbuffer tail */ pthread_mutex_t * lock; /* lock all free space in shm */ pthread_cond_t * full; /* flag when full */ - pthread_cond_t * healthy; /* flag when SDU is read */ + pthread_cond_t * healthy; /* flag when packet is read */ pid_t * pid; /* pid of the irmd owner */ }; diff --git a/src/lib/sockets.c b/src/lib/sockets.c index b148b7ca..85726783 100644 --- a/src/lib/sockets.c +++ b/src/lib/sockets.c @@ -165,3 +165,38 @@ char * ipcp_sock_path(pid_t pid) return full_name; } + +qosspec_msg_t spec_to_msg(qosspec_t * qs) +{ + qosspec_t spec; + qosspec_msg_t msg = QOSSPEC_MSG__INIT; + + spec = (qs == NULL ? qos_raw : *qs); + + msg.delay = spec.delay; + msg.bandwidth = spec.bandwidth; + msg.availability = spec.availability; + msg.loss = spec.loss; + msg.ber = spec.ber; + msg.in_order = spec.in_order; + msg.max_gap = spec.max_gap; + + return msg; +} + +qosspec_t msg_to_spec(qosspec_msg_t * msg) +{ + qosspec_t spec; + + assert(msg); + + spec.delay = msg->delay; + spec.bandwidth = msg->bandwidth; + spec.availability = msg->availability; + spec.loss = msg->loss; + spec.ber = msg->ber; + spec.in_order = msg->in_order; + spec.max_gap = msg->max_gap; + + return spec; +} diff --git a/src/tools/irm/irm_ipcp_bootstrap.c b/src/tools/irm/irm_ipcp_bootstrap.c index e1f75956..3d9386ad 100644 --- a/src/tools/irm/irm_ipcp_bootstrap.c +++ b/src/tools/irm/irm_ipcp_bootstrap.c @@ -287,10 +287,15 @@ int do_bootstrap_ipcp(int argc, if (autobind && conf.type != IPCP_NORMAL) { printf("Can only bind normal IPCPs, " - "autobind disabled.\n"); + "autobind disabled.\n\n"); autobind = false; } + if (strlen(layer) > LAYER_NAME_SIZE) { + printf("Layer name too big.\n\n"); + goto fail_usage; + } + strcpy(conf.layer_info.layer_name, layer); if (conf.type != IPCP_UDP) conf.layer_info.dir_hash_algo = hash_algo; diff --git a/src/tools/irm/irm_ipcp_destroy.c b/src/tools/irm/irm_ipcp_destroy.c index cb86b167..2d5ed983 100644 --- a/src/tools/irm/irm_ipcp_destroy.c +++ b/src/tools/irm/irm_ipcp_destroy.c @@ -89,6 +89,7 @@ int do_destroy_ipcp(int argc, break; } + free(ipcps); return 0; fail_destroy: diff --git a/src/tools/irm/irm_unregister.c b/src/tools/irm/irm_unregister.c index 52491b42..137bc7e9 100644 --- a/src/tools/irm/irm_unregister.c +++ b/src/tools/irm/irm_unregister.c @@ -69,7 +69,7 @@ int do_unregister(int argc, char ** argv) char * ipcp[MAX_IPCPS]; size_t ipcp_len = 0; struct ipcp_info * ipcps; - size_t len; + ssize_t len; size_t i; while (argc > 0) { @@ -103,7 +103,10 @@ int do_unregister(int argc, char ** argv) } len = irm_list_ipcps(&ipcps); - for (i = 0; i < len; ++i) { + if (len < 0) + return -1; + + for (i = 0; i < (size_t) len; ++i) { size_t j; for (j = 0; j < layers_len; j++) { if (wildcard_match(ipcps[i].layer, layers[j]) == 0) { diff --git a/src/tools/ocbr/ocbr.c b/src/tools/ocbr/ocbr.c index e2bd84af..12983da3 100644 --- a/src/tools/ocbr/ocbr.c +++ b/src/tools/ocbr/ocbr.c @@ -60,7 +60,7 @@ struct s { static void usage(void) { printf("Usage: cbr [OPTION]...\n" - "Sends SDUs from client to server at a constant bit rate.\n\n" + "Sends packets from client to server at a constant bit rate.\n\n" " -l, --listen Run in server mode\n" "\n" "Server options:\n" @@ -70,10 +70,10 @@ static void usage(void) "Client options:\n" " -n, --server_apn Specify the name of the server.\n" " -d, --duration Duration for sending (s)\n" - " -f, --flood Send SDUs as fast as possible\n" - " -s, --size SDU size (B, max %ld B)\n" + " -f, --flood Send packets as fast as possible\n" + " -s, --size packet size (B, max %ld B)\n" " -r, --rate Rate (b/s)\n" - " --sleep Sleep in between sending SDUs\n" + " --sleep Sleep in between sending packets\n" "\n\n" " --help Display this help text and exit\n", BUF_SIZE); @@ -82,7 +82,7 @@ static void usage(void) int main(int argc, char ** argv) { int duration = 60; /* One minute test */ - int size = 1000; /* 1000 byte SDUs */ + int size = 1000; /* 1000 byte packets */ long rate = 1000000; /* 1 Mb/s */ bool flood = false; bool sleep = false; diff --git a/src/tools/ocbr/ocbr_client.c b/src/tools/ocbr/ocbr_client.c index 026ab001..63b43721 100644 --- a/src/tools/ocbr/ocbr_client.c +++ b/src/tools/ocbr/ocbr_client.c @@ -155,7 +155,7 @@ int client_main(char * server, ms = ts_diff_ms(&start, &end); printf("sent statistics: " - "%9ld SDUs, %12ld bytes in %9d ms, %4.4f Mb/s\n", + "%9ld packets, %12ld bytes in %9d ms, %4.4f Mb/s\n", seqnr, seqnr * size, ms, (seqnr / (ms * 1000.0)) * size * 8.0); flow_dealloc(fd); diff --git a/src/tools/ocbr/ocbr_server.c b/src/tools/ocbr/ocbr_server.c index 4f080eff..75983201 100644 --- a/src/tools/ocbr/ocbr_server.c +++ b/src/tools/ocbr/ocbr_server.c @@ -90,8 +90,8 @@ static void handle_flow(int fd) bool stop = false; - long sdus = 0; - long sdus_intv = 0; + long packets = 0; + long packets_intv = 0; long bytes_read = 0; long bytes_read_intv = 0; @@ -109,7 +109,7 @@ static void handle_flow(int fd) if (count > 0) { clock_gettime(CLOCK_REALTIME, &alive); - sdus++; + packets++; bytes_read += count; } @@ -121,17 +121,18 @@ static void handle_flow(int fd) if (stop || ts_diff_ms(&now, &iv_end) < 0) { long us = ts_diff_us(&iv_start, &now); - printf("Flow %4d: %9ld SDUs (%12ld bytes) in %9ld ms" - " => %9.4f p/s, %9.4f Mb/s\n", + printf("Flow %4d: %9ld packets (%12ld bytes) in %9ld ms" + " => %9.4f pps, %9.4f Mbps\n", fd, - sdus - sdus_intv, + packets - packets_intv, bytes_read - bytes_read_intv, us / 1000, - ((sdus - sdus_intv) / (double) us) * MILLION, + ((packets - packets_intv) / (double) us) + * MILLION, 8 * ((bytes_read - bytes_read_intv) / (double)(us))); iv_start = iv_end; - sdus_intv = sdus; + packets_intv = packets; bytes_read_intv = bytes_read; ts_add(&iv_start, &intv, &iv_end); } diff --git a/src/tools/oecho/oecho.c b/src/tools/oecho/oecho.c index cc173988..b6a74aa5 100644 --- a/src/tools/oecho/oecho.c +++ b/src/tools/oecho/oecho.c @@ -72,7 +72,7 @@ static int server_main(void) count = flow_read(fd, &buf, BUF_SIZE); if (count < 0) { - printf("Failed to read SDU.\n"); + printf("Failed to read packet.\n"); flow_dealloc(fd); continue; } @@ -80,7 +80,7 @@ static int server_main(void) printf("Message from client is %.*s.\n", (int) count, buf); if (flow_write(fd, buf, count) == -1) { - printf("Failed to write SDU.\n"); + printf("Failed to write packet.\n"); flow_dealloc(fd); continue; } @@ -105,14 +105,14 @@ static int client_main(void) } if (flow_write(fd, message, strlen(message) + 1) < 0) { - printf("Failed to write SDU.\n"); + printf("Failed to write packet.\n"); flow_dealloc(fd); return -1; } count = flow_read(fd, buf, BUF_SIZE); if (count < 0) { - printf("Failed to read SDU.\n"); + printf("Failed to read packet.\n"); flow_dealloc(fd); return -1; } diff --git a/src/tools/operf/operf.c b/src/tools/operf/operf.c index 137e8647..92555c23 100644 --- a/src/tools/operf/operf.c +++ b/src/tools/operf/operf.c @@ -119,8 +119,8 @@ static void usage(void) " -d, --duration Test duration (default 60s)\n" " -r, --rate Rate (b/s)\n" " -s, --size Payload size (B, default 1500)\n" - " -f, --flood Send SDUs as fast as possible\n" - " --sleep Sleep in between sending SDUs\n" + " -f, --flood Send packets as fast as possible\n" + " --sleep Sleep in between sending packets\n" "\n" " --help Display this help text and exit\n"); } diff --git a/src/tools/operf/operf_client.c b/src/tools/operf/operf_client.c index c8873c54..6862944e 100644 --- a/src/tools/operf/operf_client.c +++ b/src/tools/operf/operf_client.c @@ -120,11 +120,12 @@ void * writer(void * o) msg = (struct msg *) buf; if (client.flood) - printf("Flooding %s with %d byte SDUs for %d seconds.\n\n", + printf("Flooding %s with %d byte packets for %d seconds.\n\n", client.server_name, client.size, client.duration / 1000); else - printf("Sending %d byte SDUs for %d s to %s at %.3lf Mb/s.\n\n", + printf("Sending %d byte packets for %d s to %s " + "at %.3lf Mb/s.\n\n", client.size, client.duration / 1000, client.server_name, client.rate / (double) MILLION); @@ -141,7 +142,7 @@ void * writer(void * o) msg->id = client.sent; if (flow_write(*fdp, buf, client.size) == -1) { - printf("Failed to send SDU.\n"); + printf("Failed to send packet.\n"); flow_dealloc(*fdp); free(buf); return (void *) -1; @@ -225,7 +226,7 @@ int client_main(void) printf("\n"); printf("--- %s perf statistics ---\n", client.server_name); - printf("%ld SDUs transmitted, ", client.sent); + printf("%ld packets transmitted, ", client.sent); printf("%ld received, ", client.rcvd); printf("%ld%% packet loss, ", client.sent == 0 ? 0 : 100 - ((100 * client.rcvd) / client.sent)); diff --git a/src/tools/oping/oping_client.c b/src/tools/oping/oping_client.c index 0f7695b5..a978e659 100644 --- a/src/tools/oping/oping_client.c +++ b/src/tools/oping/oping_client.c @@ -176,7 +176,7 @@ void * writer(void * o) msg->tv_nsec = now.tv_nsec; if (flow_write(*fdp, buf, client.size) == -1) { - printf("Failed to send SDU.\n"); + printf("Failed to send packet.\n"); flow_dealloc(*fdp); free(buf); return (void *) -1; @@ -253,7 +253,7 @@ static int client_main(void) printf("\n"); printf("--- %s ping statistics ---\n", client.s_apn); - printf("%d SDUs transmitted, ", client.sent); + printf("%d packets transmitted, ", client.sent); printf("%d received, ", client.rcvd); printf("%zd out-of-order, ", client.ooo); printf("%.0lf%% packet loss, ", client.sent == 0 ? 0 : |