diff options
Diffstat (limited to 'src/ipcpd')
31 files changed, 826 insertions, 738 deletions
diff --git a/src/ipcpd/config.h.in b/src/ipcpd/config.h.in index b9961b01..afce5e86 100644 --- a/src/ipcpd/config.h.in +++ b/src/ipcpd/config.h.in @@ -63,3 +63,5 @@ #cmakedefine HAVE_NETMAP #cmakedefine HAVE_BPF #cmakedefine HAVE_RAW_SOCKETS +#define IPCP_ETH_RD_THR @IPCP_ETH_RD_THR@ +#define IPCP_ETH_WR_THR @IPCP_ETH_WR_THR@ diff --git a/src/ipcpd/eth/CMakeLists.txt b/src/ipcpd/eth/CMakeLists.txt index 6b8d1a77..6672f93c 100644 --- a/src/ipcpd/eth/CMakeLists.txt +++ b/src/ipcpd/eth/CMakeLists.txt @@ -14,30 +14,31 @@ include_directories(${CMAKE_BINARY_DIR}/include) find_path(NETMAP_C_INCLUDE_DIR net/netmap_user.h - HINTS /usr/include /usr/local/include -) + HINTS /usr/include /usr/local/include) mark_as_advanced(NETMAP_C_INCLUDE_DIR) +# Check for raw sockets if (CMAKE_SYSTEM_NAME STREQUAL "Linux") set(DISABLE_RAW_SOCKETS FALSE CACHE BOOL "Disable raw socket support for Ethernet IPCPs") if (NOT DISABLE_RAW_SOCKETS) message(STATUS "Raw socket support for Ethernet IPCPs enabled") set(HAVE_RAW_SOCKETS TRUE PARENT_SCOPE) + set(HAVE_RAW_SOCKETS TRUE) set(HAVE_ETH TRUE) else () message(STATUS "Raw socket support for Ethernet IPCPs disabled by user") + unset(HAVE_RAW_SOCKETS PARENT_SCOPE) unset(HAVE_RAW_SOCKETS) - unset(HAVE_ETH) endif () endif () +# Check for BPF if (NOT CMAKE_SYSTEM_NAME STREQUAL "Linux") find_path(BPF_C_INCLUDE_DIR - net/bpf.h - HINTS /usr/include /usr/local/include - ) + net/bpf.h + HINTS /usr/include /usr/local/include) mark_as_advanced(BPF_C_INCLUDE_DIR) @@ -46,46 +47,51 @@ if (NOT CMAKE_SYSTEM_NAME STREQUAL "Linux") "Disable Berkeley Packet Filter support for Ethernet IPCPs") if (NOT DISABLE_BPF) message(STATUS "Berkeley Packet Filter support " - "for Ethernet IPCPs enabled") + "for Ethernet IPCPs enabled") set(HAVE_BPF TRUE PARENT_SCOPE) + set(HAVE_BPF TRUE) set(HAVE_ETH TRUE) else () message(STATUS "Berkeley Packet Filter support " - "for Ethernet IPCPs disabled by user") + "for Ethernet IPCPs disabled by user") + unset(HAVE_BPF PARENT_SCOPE) unset(HAVE_BPF) - unset(HAVE_ETH) endif () endif () endif () -if (NETMAP_C_INCLUDE_DIR) +# Check for netmap exclusively +if (NOT HAVE_RAW_SOCKETS AND NOT HAVE_BPF AND NETMAP_C_INCLUDE_DIR) set(DISABLE_NETMAP FALSE CACHE BOOL - "Disable netmap support for ETH IPCPs") + "Disable netmap support for ETH IPCPs") if (NOT DISABLE_NETMAP) message(STATUS "Netmap support for Ethernet IPCPs enabled") - set(HAVE_NETMAP TRUE PARENT_SCOPE) test_and_set_c_compiler_flag_global(-std=c99) + set(HAVE_NETMAP TRUE PARENT_SCOPE) set(HAVE_ETH TRUE) else () message(STATUS "Netmap support for Ethernet IPCPs disabled by user") - unset(HAVE_NETMAP) - unset(HAVE_ETH) - unset(IPCP_ETH_TARGET CACHE) + unset(HAVE_NETMAP PARENT_SCOPE) endif () endif () if (HAVE_ETH) message(STATUS "Supported raw packet API found, building eth-llc and eth-dix") + set(IPCP_ETH_RD_THR 3 CACHE STRING + "Number of reader threads in Ethernet IPCP") + set(IPCP_ETH_WR_THR 3 CACHE STRING + "Number of writer threads in Ethernet IPCP") + set(ETH_LLC_SOURCES # Add source files here ${CMAKE_CURRENT_SOURCE_DIR}/llc.c - ) + ) set(ETH_DIX_SOURCES # Add source files here ${CMAKE_CURRENT_SOURCE_DIR}/dix.c - ) + ) set(IPCP_ETH_LLC_TARGET ipcpd-eth-llc CACHE INTERNAL "") set(IPCP_ETH_DIX_TARGET ipcpd-eth-dix CACHE INTERNAL "") diff --git a/src/ipcpd/eth/eth.c b/src/ipcpd/eth/eth.c index 443f3fdb..1bbfac5b 100644 --- a/src/ipcpd/eth/eth.c +++ b/src/ipcpd/eth/eth.c @@ -29,6 +29,8 @@ #define _DARWIN_C_SOURCE #elif defined(__FreeBSD__) #define __BSD_VISIBLE 1 +#elif defined (__linux__) || defined (__CYGWIN__) +#define _DEFAULT_SOURCE #else #define _POSIX_C_SOURCE 200112L #endif @@ -121,7 +123,7 @@ #define DIX_HEADER_SIZE (DIX_EID_SIZE + DIX_LENGTH_SIZE) #define ETH_HEADER_TOT_SIZE (ETH_HEADER_SIZE + DIX_HEADER_SIZE) #define MAX_EIDS (1 << (8 * DIX_EID_SIZE)) -#define ETH_MAX_SDU_SIZE (ETH_MTU - DIX_HEADER_SIZE) +#define ETH_MAX_PACKET_SIZE (ETH_MTU - DIX_HEADER_SIZE) #define ETH_FRAME_SIZE (ETH_HEADER_SIZE + ETH_MTU_MAX) #elif defined(BUILD_ETH_LLC) #define THIS_TYPE IPCP_ETH_LLC @@ -129,7 +131,7 @@ #define LLC_HEADER_SIZE 3 #define ETH_HEADER_TOT_SIZE (ETH_HEADER_SIZE + LLC_HEADER_SIZE) #define MAX_SAPS 64 -#define ETH_MAX_SDU_SIZE (ETH_MTU - LLC_HEADER_SIZE) +#define ETH_MAX_PACKET_SIZE (ETH_MTU - LLC_HEADER_SIZE) #define ETH_FRAME_SIZE (ETH_HEADER_SIZE + ETH_MTU_MAX) #endif @@ -144,15 +146,27 @@ #define NAME_QUERY_REPLY 3 struct mgmt_msg { - uint8_t code; #if defined(BUILD_ETH_DIX) uint16_t seid; uint16_t deid; #elif defined(BUILD_ETH_LLC) uint8_t ssap; uint8_t dsap; + /* QoS here for alignment */ + uint8_t code; + uint8_t availability; +#endif + /* QoS parameters from spec, aligned */ + uint32_t loss; + uint64_t bandwidth; + uint32_t ber; + uint32_t max_gap; + uint32_t delay; + uint8_t in_order; +#if defined (BUILD_ETH_DIX) + uint8_t code; + uint8_t availability; #endif - uint8_t qoscube; int8_t response; } __attribute__((packed)); @@ -192,6 +206,7 @@ struct { struct shim_data * shim_data; #ifdef __linux__ int mtu; + int if_idx; #endif #if defined(HAVE_NETMAP) struct nm_desc * nmd; @@ -213,11 +228,10 @@ struct { #endif struct ef * fd_to_ef; fset_t * np1_flows; - fqueue_t * fq; pthread_rwlock_t flows_lock; - pthread_t sdu_writer; - pthread_t sdu_reader; + pthread_t packet_writer[IPCP_ETH_WR_THR]; + pthread_t packet_reader[IPCP_ETH_RD_THR]; #ifdef __linux__ pthread_t if_monitor; @@ -258,10 +272,6 @@ static int eth_data_init(void) if (eth_data.np1_flows == NULL) goto fail_np1_flows; - eth_data.fq = fqueue_create(); - if (eth_data.fq == NULL) - goto fail_fq; - for (i = 0; i < SYS_MAX_FLOWS; ++i) { #if defined(BUILD_ETH_DIX) eth_data.fd_to_ef[i].r_eid = -1; @@ -309,8 +319,6 @@ static int eth_data_init(void) fail_flows_lock: shim_data_destroy(eth_data.shim_data); fail_shim_data: - fqueue_destroy(eth_data.fq); - fail_fq: fset_destroy(eth_data.np1_flows); fail_np1_flows: #ifdef BUILD_ETH_LLC @@ -337,7 +345,6 @@ static void eth_data_fini(void) pthread_mutex_destroy(ð_data.mgmt_lock); pthread_rwlock_destroy(ð_data.flows_lock); shim_data_destroy(eth_data.shim_data); - fqueue_destroy(eth_data.fq); fset_destroy(eth_data.np1_flows); #ifdef BUILD_ETH_LLC bmp_destroy(eth_data.saps); @@ -376,7 +383,7 @@ static int eth_ipcp_send_frame(const uint8_t * dst_addr, assert(frame); - if (len > (size_t) ETH_MAX_SDU_SIZE) + if (len > (size_t) ETH_MAX_PACKET_SIZE) return -1; e_frame = (struct eth_frame *) frame; @@ -438,7 +445,7 @@ static int eth_ipcp_alloc(const uint8_t * dst_addr, uint8_t ssap, #endif const uint8_t * hash, - qoscube_t cube) + qosspec_t qs) { uint8_t * buf; struct mgmt_msg * msg; @@ -458,7 +465,14 @@ static int eth_ipcp_alloc(const uint8_t * dst_addr, #elif defined(BUILD_ETH_LLC) msg->ssap = ssap; #endif - msg->qoscube = cube; + + msg->delay = hton32(qs.delay); + msg->bandwidth = hton64(qs.bandwidth); + msg->availability = qs.availability; + msg->loss = hton32(qs.loss); + msg->ber = hton32(qs.ber); + msg->in_order = qs.in_order; + msg->max_gap = hton32(qs.max_gap); memcpy(msg + 1, hash, ipcp_dir_hash_len()); @@ -528,7 +542,7 @@ static int eth_ipcp_req(uint8_t * r_addr, uint8_t r_sap, #endif const uint8_t * dst, - qoscube_t cube) + qosspec_t qs) { struct timespec ts = {0, ALLOC_TIMEO * MILLION}; struct timespec abstime; @@ -552,7 +566,7 @@ static int eth_ipcp_req(uint8_t * r_addr, } /* reply to IRM, called under lock to prevent race */ - fd = ipcp_flow_req_arr(getpid(), dst, ipcp_dir_hash_len(), cube); + fd = ipcp_flow_req_arr(getpid(), dst, ipcp_dir_hash_len(), qs); if (fd < 0) { pthread_mutex_unlock(&ipcpi.alloc_lock); log_err("Could not get new flow from IRMd."); @@ -692,11 +706,20 @@ static int eth_ipcp_mgmt_frame(const uint8_t * buf, uint8_t * r_addr) { struct mgmt_msg * msg; + qosspec_t qs; msg = (struct mgmt_msg *) buf; switch (msg->code) { case FLOW_REQ: + qs.delay = ntoh32(msg->delay); + qs.bandwidth = ntoh64(msg->bandwidth); + qs.availability = msg->availability; + qs.loss = ntoh32(msg->loss); + qs.ber = ntoh32(msg->ber); + qs.in_order = msg->in_order; + qs.max_gap = ntoh32(msg->max_gap); + if (shim_data_reg_has(eth_data.shim_data, buf + sizeof(*msg))) { eth_ipcp_req(r_addr, @@ -706,7 +729,7 @@ static int eth_ipcp_mgmt_frame(const uint8_t * buf, msg->ssap, #endif buf + sizeof(*msg), - msg->qoscube); + qs); } break; case FLOW_REPLY: @@ -785,7 +808,7 @@ static void * eth_ipcp_mgmt_handler(void * o) return (void *) 0; } -static void * eth_ipcp_sdu_reader(void * o) +static void * eth_ipcp_packet_reader(void * o) { uint8_t br_addr[MAC_SIZE]; #if defined(BUILD_ETH_DIX) @@ -890,7 +913,9 @@ static void * eth_ipcp_sdu_reader(void * o) if (deid == MGMT_EID) { #elif defined (BUILD_ETH_LLC) if (length > 0x05FF) {/* DIX */ +#ifndef HAVE_NETMAP ipcp_sdb_release(sdb); +#endif continue; } @@ -911,8 +936,8 @@ static void * eth_ipcp_sdu_reader(void * o) memcpy(frame->buf, &e_frame->payload, length); memcpy(frame->r_addr, e_frame->src_hwaddr, MAC_SIZE); - pthread_mutex_unlock(ð_data.mgmt_lock); + pthread_mutex_lock(ð_data.mgmt_lock); list_add(&frame->next, ð_data.mgmt_frames); pthread_cond_signal(ð_data.mgmt_cond); pthread_mutex_unlock(ð_data.mgmt_lock); @@ -962,7 +987,12 @@ static void * eth_ipcp_sdu_reader(void * o) return (void *) 0; } -static void * eth_ipcp_sdu_writer(void * o) +static void cleanup_writer(void * o) +{ + fqueue_destroy((fqueue_t *) o); +} + +static void * eth_ipcp_packet_writer(void * o) { int fd; struct shm_du_buff * sdb; @@ -975,15 +1005,21 @@ static void * eth_ipcp_sdu_writer(void * o) #endif uint8_t r_addr[MAC_SIZE]; + fqueue_t * fq; + + fq = fqueue_create(); + if (fq == NULL) + return (void *) -1; + (void) o; + pthread_cleanup_push(cleanup_writer, fq); + ipcp_lock_to_core(); while (true) { - fevent(eth_data.np1_flows, eth_data.fq, NULL); - - pthread_rwlock_rdlock(ð_data.flows_lock); - while ((fd = fqueue_next(eth_data.fq)) >= 0) { + fevent(eth_data.np1_flows, fq, NULL); + while ((fd = fqueue_next(fq)) >= 0) { if (ipcp_flow_read(fd, &sdb)) { log_dbg("Bad read from fd %d.", fd); continue; @@ -996,6 +1032,8 @@ static void * eth_ipcp_sdu_writer(void * o) log_dbg("Failed to allocate header."); ipcp_sdb_release(sdb); } + + pthread_rwlock_rdlock(ð_data.flows_lock); #if defined(BUILD_ETH_DIX) deid = eth_data.fd_to_ef[fd].r_eid; #elif defined(BUILD_ETH_LLC) @@ -1006,6 +1044,8 @@ static void * eth_ipcp_sdu_writer(void * o) eth_data.fd_to_ef[fd].r_addr, MAC_SIZE); + pthread_rwlock_unlock(ð_data.flows_lock); + eth_ipcp_send_frame(r_addr, #if defined(BUILD_ETH_DIX) deid, @@ -1016,9 +1056,10 @@ static void * eth_ipcp_sdu_writer(void * o) len); ipcp_sdb_release(sdb); } - pthread_rwlock_unlock(ð_data.flows_lock); } + pthread_cleanup_pop(true); + return (void *) 1; } @@ -1129,7 +1170,7 @@ static void * eth_ipcp_if_monitor(void * o) ifi = NLMSG_DATA(h); /* Not our interface */ - if (ifi->ifi_index != eth_data.device.sll_ifindex) + if (ifi->ifi_index != eth_data.if_idx) continue; if (ifi->ifi_flags & IFF_UP) { @@ -1189,6 +1230,10 @@ static int eth_ipcp_bootstrap(const struct ipcp_config * conf) #ifndef SHM_RDRB_MULTI_BLOCK size_t maxsz; #endif +#if defined(HAVE_RAW_SOCKETS) + int qdisc_bypass = 1; + int flags; +#endif assert(conf); assert(conf->type == THIS_TYPE); @@ -1198,7 +1243,7 @@ static int eth_ipcp_bootstrap(const struct ipcp_config * conf) } memset(&ifr, 0, sizeof(ifr)); - memcpy(ifr.ifr_name, conf->dev, strlen(conf->dev)); + memcpy(ifr.ifr_name, conf->dev, IFNAMSIZ); #ifdef BUILD_ETH_DIX if (conf->ethertype < 0x0600 || conf->ethertype == 0xFFFF) { @@ -1274,9 +1319,9 @@ static int eth_ipcp_bootstrap(const struct ipcp_config * conf) idx = if_nametoindex(conf->dev); if (idx == 0) { log_err("Failed to retrieve interface index."); - close(skfd); return -1; } + eth_data.if_idx = idx; #endif /* __FreeBSD__ */ #if defined(HAVE_NETMAP) @@ -1354,16 +1399,32 @@ static int eth_ipcp_bootstrap(const struct ipcp_config * conf) return -1; } + flags = fcntl(eth_data.s_fd, F_GETFL, 0); + if (flags < 0) { + log_err("Failed to get flags."); + goto fail_device; + } + + if (fcntl(eth_data.s_fd, F_SETFL, flags | O_NONBLOCK)) { + log_err("Failed to set socket non-blocking."); + goto fail_device; + } + + if (setsockopt(eth_data.s_fd, SOL_PACKET, PACKET_QDISC_BYPASS, + &qdisc_bypass, sizeof(qdisc_bypass))) { + log_info("Qdisc bypass not supported."); + } + if (bind(eth_data.s_fd, (struct sockaddr *) ð_data.device, sizeof(eth_data.device))) { - log_err("Failed to bind socket to interface"); + log_err("Failed to bind socket to interface."); goto fail_device; } #endif /* HAVE_NETMAP */ ipcp_set_state(IPCP_OPERATIONAL); -#ifdef __linux__ +#if defined(__linux__) if (pthread_create(ð_data.if_monitor, NULL, eth_ipcp_if_monitor, @@ -1381,20 +1442,24 @@ static int eth_ipcp_bootstrap(const struct ipcp_config * conf) goto fail_mgmt_handler; } - if (pthread_create(ð_data.sdu_reader, - NULL, - eth_ipcp_sdu_reader, - NULL)) { - ipcp_set_state(IPCP_INIT); - goto fail_sdu_reader; + for (idx = 0; idx < IPCP_ETH_RD_THR; ++idx) { + if (pthread_create(ð_data.packet_reader[idx], + NULL, + eth_ipcp_packet_reader, + NULL)) { + ipcp_set_state(IPCP_INIT); + goto fail_packet_reader; + } } - if (pthread_create(ð_data.sdu_writer, - NULL, - eth_ipcp_sdu_writer, - NULL)) { - ipcp_set_state(IPCP_INIT); - goto fail_sdu_writer; + for (idx = 0; idx < IPCP_ETH_WR_THR; ++idx) { + if (pthread_create(ð_data.packet_writer[idx], + NULL, + eth_ipcp_packet_writer, + NULL)) { + ipcp_set_state(IPCP_INIT); + goto fail_packet_writer; + } } #if defined(BUILD_ETH_DIX) @@ -1407,10 +1472,17 @@ static int eth_ipcp_bootstrap(const struct ipcp_config * conf) return 0; - fail_sdu_writer: - pthread_cancel(eth_data.sdu_reader); - pthread_join(eth_data.sdu_reader, NULL); - fail_sdu_reader: + fail_packet_writer: + while (idx > 0) { + pthread_cancel(eth_data.packet_writer[--idx]); + pthread_join(eth_data.packet_writer[idx], NULL); + } + idx = IPCP_ETH_RD_THR; + fail_packet_reader: + while (idx > 0) { + pthread_cancel(eth_data.packet_reader[--idx]); + pthread_join(eth_data.packet_reader[idx], NULL); + } pthread_cancel(eth_data.mgmt_handler); pthread_join(eth_data.mgmt_handler, NULL); fail_mgmt_handler: @@ -1418,7 +1490,7 @@ static int eth_ipcp_bootstrap(const struct ipcp_config * conf) pthread_cancel(eth_data.if_monitor); pthread_join(eth_data.if_monitor, NULL); #endif -#if !defined(HAVE_NETMAP) +#if defined(__linux__) || !defined(HAVE_NETMAP) fail_device: #endif #if defined(HAVE_NETMAP) @@ -1509,7 +1581,7 @@ static int eth_ipcp_query(const uint8_t * hash) static int eth_ipcp_flow_alloc(int fd, const uint8_t * hash, - qoscube_t cube) + qosspec_t qs) { #ifdef BUILD_ETH_LLC uint8_t ssap = 0; @@ -1521,11 +1593,6 @@ static int eth_ipcp_flow_alloc(int fd, assert(hash); - if (cube > QOS_CUBE_DATA) { - log_dbg("Unsupported QoS requested."); - return -1; - } - if (!shim_data_dir_has(eth_data.shim_data, hash)) { log_err("Destination unreachable."); return -1; @@ -1553,7 +1620,7 @@ static int eth_ipcp_flow_alloc(int fd, #elif defined(BUILD_ETH_LLC) ssap, #endif - hash, cube) < 0) { + hash, qs) < 0) { #ifdef BUILD_ETH_LLC pthread_rwlock_wrlock(ð_data.flows_lock); bmp_release(eth_data.saps, eth_data.fd_to_ef[fd].sap); @@ -1696,6 +1763,8 @@ static struct ipcp_ops eth_ops = { int main(int argc, char * argv[]) { + int i; + if (ipcp_init(argc, argv, ð_ops) < 0) goto fail_init; @@ -1722,14 +1791,20 @@ int main(int argc, ipcp_shutdown(); if (ipcp_get_state() == IPCP_SHUTDOWN) { - pthread_cancel(eth_data.sdu_writer); - pthread_cancel(eth_data.sdu_reader); + for (i = 0; i < IPCP_ETH_WR_THR; ++i) + pthread_cancel(eth_data.packet_writer[i]); + for (i = 0; i < IPCP_ETH_RD_THR; ++i) + pthread_cancel(eth_data.packet_reader[i]); + pthread_cancel(eth_data.mgmt_handler); #ifdef __linux__ pthread_cancel(eth_data.if_monitor); #endif - pthread_join(eth_data.sdu_writer, NULL); - pthread_join(eth_data.sdu_reader, NULL); + for (i = 0; i < IPCP_ETH_WR_THR; ++i) + pthread_join(eth_data.packet_writer[i], NULL); + for (i = 0; i < IPCP_ETH_RD_THR; ++i) + pthread_join(eth_data.packet_reader[i], NULL); + pthread_join(eth_data.mgmt_handler, NULL); #ifdef __linux__ pthread_join(eth_data.if_monitor, NULL); diff --git a/src/ipcpd/ipcp.c b/src/ipcpd/ipcp.c index 0b5ddf11..f8df5640 100644 --- a/src/ipcpd/ipcp.c +++ b/src/ipcpd/ipcp.c @@ -20,13 +20,24 @@ * Foundation, Inc., http://www.fsf.org/about/contact/. */ +#if defined(__linux__) || defined(__CYGWIN__) +#define _DEFAULT_SOURCE +#else +#define _POSIX_C_SOURCE 200112L +#define __XSI_VISIBLE 500 +#endif + #if defined(__linux__) && !defined(DISABLE_CORE_LOCK) #define _GNU_SOURCE #define NPROC (sysconf(_SC_NPROCESSORS_ONLN)) #endif +#if defined(__linux__) || defined(__CYGWIN__) +#define _DEFAULT_SOURCE +#else #define _POSIX_C_SOURCE 200112L #define __XSI_VISIBLE 500 +#endif #include "config.h" @@ -194,6 +205,7 @@ static void * mainloop(void * o) layer_info_msg_t layer_info = LAYER_INFO_MSG__INIT; int fd = -1; struct cmd * cmd; + qosspec_t qs; ret_msg.code = IPCP_MSG_CODE__IPCP_REPLY; @@ -418,12 +430,13 @@ static void * mainloop(void * o) break; } + qs = msg_to_spec(msg->qosspec); fd = np1_flow_alloc(msg->pid, - msg->port_id, - msg->qoscube); + msg->flow_id, + qs); if (fd < 0) { - log_err("Failed allocating fd on port_id %d.", - msg->port_id); + log_err("Failed allocating fd on flow_id %d.", + msg->flow_id); ret_msg.result = -1; break; } @@ -431,7 +444,7 @@ static void * mainloop(void * o) ret_msg.result = ipcpi.ops->ipcp_flow_alloc(fd, msg->hash.data, - msg->qoscube); + qs); break; case IPCP_MSG_CODE__IPCP_FLOW_ALLOC_RESP: ret_msg.has_result = true; @@ -448,10 +461,10 @@ static void * mainloop(void * o) } if (!msg->response) { - fd = np1_flow_resp(msg->port_id); + fd = np1_flow_resp(msg->flow_id); if (fd < 0) { log_warn("Port_id %d is not known.", - msg->port_id); + msg->flow_id); ret_msg.result = -1; break; } @@ -475,10 +488,10 @@ static void * mainloop(void * o) break; } - fd = np1_flow_dealloc(msg->port_id); + fd = np1_flow_dealloc(msg->flow_id); if (fd < 0) { - log_warn("Could not deallocate port_id %d.", - msg->port_id); + log_warn("Could not deallocate flow_id %d.", + msg->flow_id); ret_msg.result = -1; break; } diff --git a/src/ipcpd/ipcp.h b/src/ipcpd/ipcp.h index 5417fc74..1d25fb3f 100644 --- a/src/ipcpd/ipcp.h +++ b/src/ipcpd/ipcp.h @@ -26,7 +26,6 @@ #include <ouroboros/hash.h> #include <ouroboros/ipcp.h> #include <ouroboros/list.h> -#include <ouroboros/qoscube.h> #include <ouroboros/sockets.h> #include <ouroboros/tpm.h> @@ -60,7 +59,7 @@ struct ipcp_ops { int (* ipcp_flow_alloc)(int fd, const uint8_t * dst, - qoscube_t qos); + qosspec_t qs); int (* ipcp_flow_alloc_resp)(int fd, int response); diff --git a/src/ipcpd/local/main.c b/src/ipcpd/local/main.c index 358f6388..ab43f1f8 100644 --- a/src/ipcpd/local/main.c +++ b/src/ipcpd/local/main.c @@ -20,7 +20,11 @@ * Foundation, Inc., http://www.fsf.org/about/contact/. */ +#if defined(__linux__) || defined(__CYGWIN__) +#define _DEFAULT_SOURCE +#else #define _POSIX_C_SOURCE 200112L +#endif #include "config.h" @@ -55,7 +59,7 @@ struct { fqueue_t * fq; pthread_rwlock_t lock; - pthread_t sduloop; + pthread_t packet_loop; } local_data; static int local_data_init(void) @@ -93,7 +97,7 @@ static void local_data_fini(void){ pthread_rwlock_destroy(&local_data.lock); } -static void * ipcp_local_sdu_loop(void * o) +static void * ipcp_local_packet_loop(void * o) { (void) o; @@ -135,8 +139,8 @@ static int ipcp_local_bootstrap(const struct ipcp_config * conf) ipcp_set_state(IPCP_OPERATIONAL); - if (pthread_create(&local_data.sduloop, NULL, - ipcp_local_sdu_loop, NULL)) { + if (pthread_create(&local_data.packet_loop, NULL, + ipcp_local_packet_loop, NULL)) { ipcp_set_state(IPCP_INIT); return -1; } @@ -179,7 +183,7 @@ static int ipcp_local_query(const uint8_t * hash) static int ipcp_local_flow_alloc(int fd, const uint8_t * dst, - qoscube_t cube) + qosspec_t qs) { struct timespec ts = {0, ALLOC_TIMEOUT * MILLION}; struct timespec abstime; @@ -208,7 +212,7 @@ static int ipcp_local_flow_alloc(int fd, assert(ipcpi.alloc_id == -1); - out_fd = ipcp_flow_req_arr(getpid(), dst, ipcp_dir_hash_len(), cube); + out_fd = ipcp_flow_req_arr(getpid(), dst, ipcp_dir_hash_len(), qs); if (out_fd < 0) { pthread_mutex_unlock(&ipcpi.alloc_lock); log_dbg("Flow allocation failed: %d", out_fd); @@ -360,8 +364,8 @@ int main(int argc, ipcp_shutdown(); if (ipcp_get_state() == IPCP_SHUTDOWN) { - pthread_cancel(local_data.sduloop); - pthread_join(local_data.sduloop, NULL); + pthread_cancel(local_data.packet_loop); + pthread_join(local_data.packet_loop, NULL); } local_data_fini(); diff --git a/src/ipcpd/normal/CMakeLists.txt b/src/ipcpd/normal/CMakeLists.txt index 6dd68385..d1585395 100644 --- a/src/ipcpd/normal/CMakeLists.txt +++ b/src/ipcpd/normal/CMakeLists.txt @@ -37,13 +37,12 @@ set(SOURCE_FILES dht.c dir.c dt.c - dt_pci.c enroll.c fa.c main.c pff.c routing.c - sdu_sched.c + psched.c # Add policies last pol/alternate_pff.c pol/flat.c diff --git a/src/ipcpd/normal/connmgr.c b/src/ipcpd/normal/connmgr.c index bf07ebc4..7b71761f 100644 --- a/src/ipcpd/normal/connmgr.c +++ b/src/ipcpd/normal/connmgr.c @@ -20,7 +20,11 @@ * Foundation, Inc., http://www.fsf.org/about/contact/. */ +#if defined(__linux__) || defined(__CYGWIN__) +#define _DEFAULT_SOURCE +#else #define _POSIX_C_SOURCE 200112L +#endif #define OUROBOROS_PREFIX "connection-manager" diff --git a/src/ipcpd/normal/dht.c b/src/ipcpd/normal/dht.c index a6f1928b..4064bf5c 100644 --- a/src/ipcpd/normal/dht.c +++ b/src/ipcpd/normal/dht.c @@ -20,7 +20,11 @@ * Foundation, Inc., http://www.fsf.org/about/contact/. */ +#if defined(__linux__) || defined(__CYGWIN__) +#define _DEFAULT_SOURCE +#else #define _POSIX_C_SOURCE 200112L +#endif #include "config.h" @@ -58,21 +62,21 @@ typedef KadContactMsg kad_contact_msg_t; #define CLOCK_REALTIME_COARSE CLOCK_REALTIME #endif -#define DHT_MAX_REQS 2048 /* KAD recommends rnd(), bmp can be changed. */ -#define KAD_ALPHA 3 /* Parallel factor, proven optimal value. */ -#define KAD_K 8 /* Replication factor, MDHT value. */ -#define KAD_T_REPL 900 /* Replication time, tied to k. MDHT value. */ -#define KAD_T_REFR 900 /* Refresh time stale bucket, MDHT value. */ -#define KAD_T_JOIN 8 /* Response time to wait for a join. */ -#define KAD_T_RESP 5 /* Response time to wait for a response. */ -#define KAD_R_PING 2 /* Ping retries before declaring peer dead. */ -#define KAD_QUEER 15 /* Time to declare peer questionable. */ -#define KAD_BETA 8 /* Bucket split factor, must be 1, 2, 4 or 8. */ -#define KAD_RESP_RETR 6 /* Number of retries on sending a response. */ -#define KAD_JOIN_RETR 8 /* Number of retries sending a join. */ -#define KAD_JOIN_INTV 1 /* Time (seconds) between join retries. */ -#define HANDLE_TIMEO 1000 /* Timeout for dht_handle_sdu tpm check (ms) */ -#define DHT_RETR_ADDR 1 /* Number of addresses to return on retrieve */ +#define DHT_MAX_REQS 2048 /* KAD recommends rnd(), bmp can be changed. */ +#define KAD_ALPHA 3 /* Parallel factor, proven optimal value. */ +#define KAD_K 8 /* Replication factor, MDHT value. */ +#define KAD_T_REPL 900 /* Replication time, tied to k. MDHT value. */ +#define KAD_T_REFR 900 /* Refresh time stale bucket, MDHT value. */ +#define KAD_T_JOIN 8 /* Response time to wait for a join. */ +#define KAD_T_RESP 5 /* Response time to wait for a response. */ +#define KAD_R_PING 2 /* Ping retries before declaring peer dead. */ +#define KAD_QUEER 15 /* Time to declare peer questionable. */ +#define KAD_BETA 8 /* Bucket split factor, must be 1, 2, 4 or 8. */ +#define KAD_RESP_RETR 6 /* Number of retries on sending a response. */ +#define KAD_JOIN_RETR 8 /* Number of retries sending a join. */ +#define KAD_JOIN_INTV 1 /* Time (seconds) between join retries. */ +#define HANDLE_TIMEO 1000 /* Timeout for dht_handle_packet tpm check (ms) */ +#define DHT_RETR_ADDR 1 /* Number of addresses to return on retrieve */ enum dht_state { DHT_INIT = 0, @@ -247,7 +251,7 @@ struct join_info { uint64_t addr; }; -struct sdu_info { +struct packet_info { struct dht * dht; struct shm_du_buff * sdb; }; @@ -1485,7 +1489,7 @@ static int send_msg(struct dht * dht, kad_msg__pack(msg, shm_du_buff_head(sdb)); - if (dt_write_sdu(addr, QOS_CUBE_BE, dht->fd, sdb) == 0) + if (dt_write_packet(addr, QOS_CUBE_BE, dht->fd, sdb) == 0) break; ipcp_sdb_release(sdb); @@ -2396,7 +2400,7 @@ uint64_t dht_query(struct dht * dht, return 0; } -static void * dht_handle_sdu(void * o) +static void * dht_handle_packet(void * o) { struct dht * dht = (struct dht *) o; @@ -2580,8 +2584,8 @@ static void * dht_handle_sdu(void * o) return (void *) 0; } -static void dht_post_sdu(void * comp, - struct shm_du_buff * sdb) +static void dht_post_packet(void * comp, + struct shm_du_buff * sdb) { struct cmd * cmd; struct dht * dht = (struct dht *) comp; @@ -2796,19 +2800,19 @@ struct dht * dht_create(uint64_t addr) dht->addr = addr; dht->id = NULL; #ifndef __DHT_TEST__ - dht->tpm = tpm_create(2, 1, dht_handle_sdu, dht); + dht->tpm = tpm_create(2, 1, dht_handle_packet, dht); if (dht->tpm == NULL) goto fail_tpm_create; if (tpm_start(dht->tpm)) goto fail_tpm_start; - dht->fd = dt_reg_comp(dht, &dht_post_sdu, DHT); + dht->fd = dt_reg_comp(dht, &dht_post_packet, DHT); notifier_reg(handle_event, dht); #else (void) handle_event; - (void) dht_handle_sdu; - (void) dht_post_sdu; + (void) dht_handle_packet; + (void) dht_post_packet; #endif dht->state = DHT_INIT; diff --git a/src/ipcpd/normal/dir.c b/src/ipcpd/normal/dir.c index 345d220d..a195f016 100644 --- a/src/ipcpd/normal/dir.c +++ b/src/ipcpd/normal/dir.c @@ -20,7 +20,11 @@ * Foundation, Inc., http://www.fsf.org/about/contact/. */ +#if defined(__linux__) || defined(__CYGWIN__) +#define _DEFAULT_SOURCE +#else #define _POSIX_C_SOURCE 200112L +#endif #define OUROBOROS_PREFIX "directory" diff --git a/src/ipcpd/normal/dt.c b/src/ipcpd/normal/dt.c index b9d8934e..dc7343f1 100644 --- a/src/ipcpd/normal/dt.c +++ b/src/ipcpd/normal/dt.c @@ -20,15 +20,17 @@ * Foundation, Inc., http://www.fsf.org/about/contact/. */ +#if defined(__linux__) || defined(__CYGWIN__) +#define _DEFAULT_SOURCE +#else #define _POSIX_C_SOURCE 200112L +#endif #include "config.h" #define DT "dt" #define OUROBOROS_PREFIX DT -/* FIXME: fix #defines and remove endian.h include. */ -#include <ouroboros/endian.h> #include <ouroboros/bitmap.h> #include <ouroboros/errno.h> #include <ouroboros/logs.h> @@ -42,10 +44,9 @@ #include "connmgr.h" #include "ipcp.h" #include "dt.h" -#include "dt_pci.h" #include "pff.h" #include "routing.h" -#include "sdu_sched.h" +#include "psched.h" #include "comp.h" #include "fa.h" @@ -64,13 +65,96 @@ #endif struct comp_info { - void (* post_sdu)(void * comp, struct shm_du_buff * sdb); + void (* post_packet)(void * comp, struct shm_du_buff * sdb); void * comp; char * name; }; +/* Abstract syntax */ +enum dtp_fields { + DTP_DST = 0, /* DST ADDRESS */ + DTP_QOS, /* QOS ID */ + DTP_DEID, /* DST Endpoint ID */ + DTP_TTL, /* TTL FIELD */ + DTP_NUM_FIELDS /* Number of fields */ +}; + +/* Fixed field lengths */ +#define TTL_LEN 1 +#define QOS_LEN 1 + +struct dt_pci { + uint64_t dst_addr; + qoscube_t qc; + uint8_t ttl; + uint32_t eid; +}; + +struct { + uint8_t addr_size; + uint8_t eid_size; + size_t head_size; + + /* Offsets */ + size_t qc_o; + size_t ttl_o; + size_t eid_o; + + /* Initial TTL value */ + uint8_t max_ttl; +} dt_pci_info; + +static int dt_pci_ser(struct shm_du_buff * sdb, + struct dt_pci * dt_pci) +{ + uint8_t * head; + uint8_t ttl = dt_pci_info.max_ttl; + + assert(sdb); + assert(dt_pci); + + head = shm_du_buff_head_alloc(sdb, dt_pci_info.head_size); + if (head == NULL) + return -EPERM; + + /* FIXME: Add check and operations for Big Endian machines. */ + memcpy(head, &dt_pci->dst_addr, dt_pci_info.addr_size); + memcpy(head + dt_pci_info.qc_o, &dt_pci->qc, QOS_LEN); + memcpy(head + dt_pci_info.ttl_o, &ttl, TTL_LEN); + memcpy(head + dt_pci_info.eid_o, &dt_pci->eid, dt_pci_info.eid_size); + + return 0; +} + +static void dt_pci_des(struct shm_du_buff * sdb, + struct dt_pci * dt_pci) +{ + uint8_t * head; + + assert(sdb); + assert(dt_pci); + + head = shm_du_buff_head(sdb); + + /* Decrease TTL */ + --*(head + dt_pci_info.ttl_o); + + /* FIXME: Add check and operations for Big Endian machines. */ + memcpy(&dt_pci->dst_addr, head, dt_pci_info.addr_size); + memcpy(&dt_pci->qc, head + dt_pci_info.qc_o, QOS_LEN); + memcpy(&dt_pci->ttl, head + dt_pci_info.ttl_o, TTL_LEN); + memcpy(&dt_pci->eid, head + dt_pci_info.eid_o, dt_pci_info.eid_size); +} + +static void dt_pci_shrink(struct shm_du_buff * sdb) +{ + assert(sdb); + + shm_du_buff_head_release(sdb, dt_pci_info.head_size); +} + struct { - struct sdu_sched * sdu_sched; + struct psched * psched; struct pff * pff[QOS_CUBE_MAX]; struct routing_i * routing[QOS_CUBE_MAX]; @@ -337,24 +421,25 @@ static void handle_event(void * self, #ifdef IPCP_FLOW_STATS stat_used(c->flow_info.fd, c->conn_info.addr); #endif - sdu_sched_add(dt.sdu_sched, c->flow_info.fd); - log_dbg("Added fd %d to SDU scheduler.", c->flow_info.fd); + psched_add(dt.psched, c->flow_info.fd); + log_dbg("Added fd %d to packet scheduler.", c->flow_info.fd); break; case NOTIFY_DT_CONN_DEL: #ifdef IPCP_FLOW_STATS stat_used(c->flow_info.fd, INVALID_ADDR); #endif - sdu_sched_del(dt.sdu_sched, c->flow_info.fd); - log_dbg("Removed fd %d from SDU scheduler.", c->flow_info.fd); + psched_del(dt.psched, c->flow_info.fd); + log_dbg("Removed fd %d from " + "packet scheduler.", c->flow_info.fd); break; default: break; } } -static void sdu_handler(int fd, - qoscube_t qc, - struct shm_du_buff * sdb) +static void packet_handler(int fd, + qoscube_t qc, + struct shm_du_buff * sdb) { struct dt_pci dt_pci; int ret; @@ -407,7 +492,7 @@ static void sdu_handler(int fd, ret = ipcp_flow_write(ofd, sdb); if (ret < 0) { - log_dbg("Failed to write SDU to fd %d.", ofd); + log_dbg("Failed to write packet to fd %d.", ofd); if (ret == -EFLOWDOWN) notifier_event(NOTIFY_DT_FLOW_DOWN, &ofd); ipcp_sdb_release(sdb); @@ -476,7 +561,7 @@ static void sdu_handler(int fd, return; } - if (dt.comps[dt_pci.eid].post_sdu == NULL) { + if (dt.comps[dt_pci.eid].post_packet == NULL) { log_err("No registered component on eid %d.", dt_pci.eid); ipcp_sdb_release(sdb); @@ -512,7 +597,8 @@ static void sdu_handler(int fd, pthread_mutex_unlock(&dt.stat[dt_pci.eid].lock); #endif - dt.comps[dt_pci.eid].post_sdu(dt.comps[dt_pci.eid].comp, sdb); + dt.comps[dt_pci.eid].post_packet(dt.comps[dt_pci.eid].comp, + sdb); } } @@ -555,10 +641,14 @@ int dt_init(enum pol_routing pr, info.pref_syntax = PROTO_FIXED; info.addr = ipcpi.dt_addr; - if (dt_pci_init(addr_size, eid_size, max_ttl)) { - log_err("Failed to init shm dt_pci."); - goto fail_pci_init; - } + dt_pci_info.addr_size = addr_size; + dt_pci_info.eid_size = eid_size; + dt_pci_info.max_ttl = max_ttl; + + dt_pci_info.qc_o = dt_pci_info.addr_size; + dt_pci_info.ttl_o = dt_pci_info.qc_o + QOS_LEN; + dt_pci_info.eid_o = dt_pci_info.ttl_o + TTL_LEN; + dt_pci_info.head_size = dt_pci_info.eid_o + dt_pci_info.eid_size; if (notifier_reg(handle_event, NULL)) { log_err("Failed to register with notifier."); @@ -642,8 +732,6 @@ int dt_init(enum pol_routing pr, fail_connmgr_comp_init: notifier_unreg(&handle_event); fail_notifier_reg: - dt_pci_fini(); - fail_pci_init: return -1; } @@ -671,21 +759,19 @@ void dt_fini(void) connmgr_comp_fini(COMPID_DT); notifier_unreg(&handle_event); - - dt_pci_fini(); } int dt_start(void) { - dt.sdu_sched = sdu_sched_create(sdu_handler); - if (dt.sdu_sched == NULL) { - log_err("Failed to create N-1 SDU scheduler."); + dt.psched = psched_create(packet_handler); + if (dt.psched == NULL) { + log_err("Failed to create N-1 packet scheduler."); return -1; } if (pthread_create(&dt.listener, NULL, dt_conn_handle, NULL)) { log_err("Failed to create listener thread."); - sdu_sched_destroy(dt.sdu_sched); + psched_destroy(dt.psched); return -1; } @@ -696,7 +782,7 @@ void dt_stop(void) { pthread_cancel(dt.listener); pthread_join(dt.listener, NULL); - sdu_sched_destroy(dt.sdu_sched); + psched_destroy(dt.psched); } int dt_reg_comp(void * comp, @@ -716,11 +802,11 @@ int dt_reg_comp(void * comp, return -EBADF; } - assert(dt.comps[res_fd].post_sdu == NULL); + assert(dt.comps[res_fd].post_packet == NULL); assert(dt.comps[res_fd].comp == NULL); assert(dt.comps[res_fd].name == NULL); - dt.comps[res_fd].post_sdu = func; + dt.comps[res_fd].post_packet = func; dt.comps[res_fd].comp = comp; dt.comps[res_fd].name = name; @@ -731,10 +817,10 @@ int dt_reg_comp(void * comp, return res_fd; } -int dt_write_sdu(uint64_t dst_addr, - qoscube_t qc, - int np1_fd, - struct shm_du_buff * sdb) +int dt_write_packet(uint64_t dst_addr, + qoscube_t qc, + int np1_fd, + struct shm_du_buff * sdb) { int fd; struct dt_pci dt_pci; @@ -779,7 +865,7 @@ int dt_write_sdu(uint64_t dst_addr, #endif ret = ipcp_flow_write(fd, sdb); if (ret < 0) { - log_dbg("Failed to write SDU to fd %d.", fd); + log_dbg("Failed to write packet to fd %d.", fd); if (ret == -EFLOWDOWN) notifier_event(NOTIFY_DT_FLOW_DOWN, &fd); goto fail_write; diff --git a/src/ipcpd/normal/dt.h b/src/ipcpd/normal/dt.h index e0bbe3f3..b74e84b0 100644 --- a/src/ipcpd/normal/dt.h +++ b/src/ipcpd/normal/dt.h @@ -24,10 +24,9 @@ #define OUROBOROS_IPCPD_NORMAL_DT_H #include <ouroboros/ipcp.h> +#include <ouroboros/qoscube.h> #include <ouroboros/shm_rdrbuff.h> -#include "dt_pci.h" - #define DT_COMP "Data Transfer" #define DT_PROTO "dtp" #define INVALID_ADDR 0 @@ -49,9 +48,9 @@ int dt_reg_comp(void * comp, void (* func)(void * comp, struct shm_du_buff * sdb), char * name); -int dt_write_sdu(uint64_t dst_addr, - qoscube_t qc, - int res_fd, - struct shm_du_buff * sdb); +int dt_write_packet(uint64_t dst_addr, + qoscube_t qc, + int res_fd, + struct shm_du_buff * sdb); #endif /* OUROBOROS_IPCPD_NORMAL_DT_H */ diff --git a/src/ipcpd/normal/dt_const.h b/src/ipcpd/normal/dt_const.h deleted file mode 100644 index fb005f06..00000000 --- a/src/ipcpd/normal/dt_const.h +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Ouroboros - Copyright (C) 2016 - 2018 - * - * Data Transfer Constants for the IPCP - * - * Dimitri Staessens <dimitri.staessens@ugent.be> - * Sander Vrijders <sander.vrijders@ugent.be> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., http://www.fsf.org/about/contact/. - */ - -#ifndef OUROBOROS_IPCPD_NORMAL_DT_CONST_H -#define OUROBOROS_IPCPD_NORMAL_DT_CONST_H - -#include <stdint.h> -#include <stdbool.h> - -struct dt_const { - uint8_t addr_size; - uint8_t cep_id_size; - uint8_t seqno_size; - bool has_ttl; - bool has_chk; - uint32_t min_pdu_size; - uint32_t max_pdu_size; -}; - -#endif /* OUROBOROS_IPCPD_NORMAL_DT_CONST_H */ diff --git a/src/ipcpd/normal/dt_pci.c b/src/ipcpd/normal/dt_pci.c deleted file mode 100644 index 76304668..00000000 --- a/src/ipcpd/normal/dt_pci.c +++ /dev/null @@ -1,112 +0,0 @@ -/* - * Ouroboros - Copyright (C) 2016 - 2018 - * - * Protocol Control Information of Data Transfer Component - * - * Dimitri Staessens <dimitri.staessens@ugent.be> - * Sander Vrijders <sander.vrijders@ugent.be> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., http://www.fsf.org/about/contact/. - */ - -#include <ouroboros/errno.h> - -#include "dt_pci.h" - -#include <stdlib.h> -#include <string.h> -#include <assert.h> - -struct { - uint8_t addr_size; - uint8_t eid_size; - size_t head_size; - - /* Offsets */ - size_t qc_o; - size_t ttl_o; - size_t eid_o; - - /* Initial TTL value */ - uint8_t max_ttl; -} dt_pci_info; - -int dt_pci_init(uint8_t addr_size, - uint8_t eid_size, - uint8_t max_ttl) -{ - dt_pci_info.addr_size = addr_size; - dt_pci_info.eid_size = eid_size; - dt_pci_info.max_ttl = max_ttl; - - dt_pci_info.qc_o = dt_pci_info.addr_size; - dt_pci_info.ttl_o = dt_pci_info.qc_o + QOS_LEN; - dt_pci_info.eid_o = dt_pci_info.ttl_o + TTL_LEN; - dt_pci_info.head_size = dt_pci_info.eid_o + dt_pci_info.eid_size; - - return 0; -} - -void dt_pci_fini(void) { - return; -} - -int dt_pci_ser(struct shm_du_buff * sdb, - struct dt_pci * dt_pci) -{ - uint8_t * head; - uint8_t ttl = dt_pci_info.max_ttl; - - assert(sdb); - assert(dt_pci); - - head = shm_du_buff_head_alloc(sdb, dt_pci_info.head_size); - if (head == NULL) - return -EPERM; - - /* FIXME: Add check and operations for Big Endian machines. */ - memcpy(head, &dt_pci->dst_addr, dt_pci_info.addr_size); - memcpy(head + dt_pci_info.qc_o, &dt_pci->qc, QOS_LEN); - memcpy(head + dt_pci_info.ttl_o, &ttl, TTL_LEN); - memcpy(head + dt_pci_info.eid_o, &dt_pci->eid, dt_pci_info.eid_size); - - return 0; -} - -void dt_pci_des(struct shm_du_buff * sdb, - struct dt_pci * dt_pci) -{ - uint8_t * head; - - assert(sdb); - assert(dt_pci); - - head = shm_du_buff_head(sdb); - - /* Decrease TTL */ - --*(head + dt_pci_info.ttl_o); - - /* FIXME: Add check and operations for Big Endian machines. */ - memcpy(&dt_pci->dst_addr, head, dt_pci_info.addr_size); - memcpy(&dt_pci->qc, head + dt_pci_info.qc_o, QOS_LEN); - memcpy(&dt_pci->ttl, head + dt_pci_info.ttl_o, TTL_LEN); - memcpy(&dt_pci->eid, head + dt_pci_info.eid_o, dt_pci_info.eid_size); -} - -void dt_pci_shrink(struct shm_du_buff * sdb) -{ - assert(sdb); - - shm_du_buff_head_release(sdb, dt_pci_info.head_size); -} diff --git a/src/ipcpd/normal/dt_pci.h b/src/ipcpd/normal/dt_pci.h deleted file mode 100644 index 8022f84a..00000000 --- a/src/ipcpd/normal/dt_pci.h +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Ouroboros - Copyright (C) 2016 - 2018 - * - * Protocol Control Information of Data Transfer Component - * - * Dimitri Staessens <dimitri.staessens@ugent.be> - * Sander Vrijders <sander.vrijders@ugent.be> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., http://www.fsf.org/about/contact/. - */ - -#ifndef OUROBOROS_IPCPD_NORMAL_DT_PCI_H -#define OUROBOROS_IPCPD_NORMAL_DT_PCI_H - -#include <ouroboros/shm_du_buff.h> -#include <ouroboros/proto.h> -#include <ouroboros/qoscube.h> - -#include <stdint.h> -#include <stdbool.h> - -/* Abstract syntax */ -enum dtp_fields { - DTP_DST = 0, /* DST ADDRESS */ - DTP_QOS, /* QOS ID */ - DTP_DEID, /* DST Endpoint ID */ - DTP_TTL, /* TTL FIELD */ - DTP_NUM_FIELDS /* Number of fields */ -}; - -/* Fixed field lengths */ -#define TTL_LEN 1 -#define QOS_LEN 1 - -struct dt_pci { - uint64_t dst_addr; - qoscube_t qc; - uint8_t ttl; - uint32_t eid; -}; - -int dt_pci_init(uint8_t addr_size, - uint8_t eid_size, - uint8_t max_ttl); - -void dt_pci_fini(void); - -int dt_pci_ser(struct shm_du_buff * sdb, - struct dt_pci * dt_pci); - -void dt_pci_des(struct shm_du_buff * sdb, - struct dt_pci * dt_pci); - -void dt_pci_shrink(struct shm_du_buff * sdb); - -#endif /* OUROBOROS_IPCPD_NORMAL_DT_PCI_H */ diff --git a/src/ipcpd/normal/enroll.c b/src/ipcpd/normal/enroll.c index a321db2b..78305ff0 100644 --- a/src/ipcpd/normal/enroll.c +++ b/src/ipcpd/normal/enroll.c @@ -20,7 +20,11 @@ * Foundation, Inc., http://www.fsf.org/about/contact/. */ +#if defined(__linux__) || defined(__CYGWIN__) +#define _DEFAULT_SOURCE +#else #define _POSIX_C_SOURCE 199309L +#endif #define OUROBOROS_PREFIX "enrollment" diff --git a/src/ipcpd/normal/fa.c b/src/ipcpd/normal/fa.c index 067a6e73..027223b7 100644 --- a/src/ipcpd/normal/fa.c +++ b/src/ipcpd/normal/fa.c @@ -20,7 +20,11 @@ * Foundation, Inc., http://www.fsf.org/about/contact/. */ +#if defined(__linux__) || defined(__CYGWIN__) +#define _DEFAULT_SOURCE +#else #define _POSIX_C_SOURCE 200112L +#endif #include "config.h" @@ -34,9 +38,8 @@ #include <ouroboros/ipcp-dev.h> #include "dir.h" -#include "dt_pci.h" #include "fa.h" -#include "sdu_sched.h" +#include "psched.h" #include "ipcp.h" #include "dt.h" @@ -54,8 +57,15 @@ struct fa_msg { uint32_t r_eid; uint32_t s_eid; uint8_t code; - uint8_t qc; int8_t response; + /* QoS parameters from spec, aligned */ + uint8_t availability; + uint8_t in_order; + uint32_t delay; + uint64_t bandwidth; + uint32_t loss; + uint32_t ber; + uint32_t max_gap; } __attribute__((packed)); struct { @@ -64,19 +74,19 @@ struct { uint64_t r_addr[PROG_MAX_FLOWS]; int fd; - struct sdu_sched * sdu_sched; + struct psched * psched; } fa; -static void sdu_handler(int fd, - qoscube_t qc, - struct shm_du_buff * sdb) +static void packet_handler(int fd, + qoscube_t qc, + struct shm_du_buff * sdb) { pthread_rwlock_rdlock(&fa.flows_lock); - if (dt_write_sdu(fa.r_addr[fd], qc, fa.r_eid[fd], sdb)) { + if (dt_write_packet(fa.r_addr[fd], qc, fa.r_eid[fd], sdb)) { pthread_rwlock_unlock(&fa.flows_lock); ipcp_sdb_release(sdb); - log_warn("Failed to forward SDU."); + log_warn("Failed to forward packet."); return; } @@ -89,7 +99,7 @@ static void destroy_conn(int fd) fa.r_addr[fd] = INVALID_ADDR; } -static void fa_post_sdu(void * comp, +static void fa_post_packet(void * comp, struct shm_du_buff * sdb) { struct timespec ts = {0, TIMEOUT * 1000}; @@ -97,6 +107,7 @@ static void fa_post_sdu(void * comp, int fd; uint8_t * buf; struct fa_msg * msg; + qosspec_t qs; (void) comp; @@ -139,10 +150,18 @@ static void fa_post_sdu(void * comp, assert(ipcpi.alloc_id == -1); + qs.delay = ntoh32(msg->delay); + qs.bandwidth = ntoh64(msg->bandwidth); + qs.availability = msg->availability; + qs.loss = ntoh32(msg->loss); + qs.ber = ntoh32(msg->ber); + qs.in_order = msg->in_order; + qs.max_gap = ntoh32(msg->max_gap); + fd = ipcp_flow_req_arr(getpid(), (uint8_t *) (msg + 1), ipcp_dir_hash_len(), - msg->qc); + qs); if (fd < 0) { pthread_mutex_unlock(&ipcpi.alloc_lock); log_err("Failed to get fd for flow."); @@ -152,8 +171,8 @@ static void fa_post_sdu(void * comp, pthread_rwlock_wrlock(&fa.flows_lock); - fa.r_eid[fd] = msg->s_eid; - fa.r_addr[fd] = msg->s_addr; + fa.r_eid[fd] = ntoh32(msg->s_eid); + fa.r_addr[fd] = ntoh64(msg->s_addr); pthread_rwlock_unlock(&fa.flows_lock); @@ -166,14 +185,14 @@ static void fa_post_sdu(void * comp, case FLOW_REPLY: pthread_rwlock_wrlock(&fa.flows_lock); - fa.r_eid[msg->r_eid] = msg->s_eid; + fa.r_eid[ntoh32(msg->r_eid)] = ntoh32(msg->s_eid); - ipcp_flow_alloc_reply(msg->r_eid, msg->response); + ipcp_flow_alloc_reply(ntoh32(msg->r_eid), msg->response); if (msg->response < 0) - destroy_conn(msg->r_eid); + destroy_conn(ntoh32(msg->r_eid)); else - sdu_sched_add(fa.sdu_sched, msg->r_eid); + psched_add(fa.psched, ntoh32(msg->r_eid)); pthread_rwlock_unlock(&fa.flows_lock); @@ -196,7 +215,7 @@ int fa_init(void) if (pthread_rwlock_init(&fa.flows_lock, NULL)) return -1; - fa.fd = dt_reg_comp(&fa, &fa_post_sdu, FA); + fa.fd = dt_reg_comp(&fa, &fa_post_packet, FA); return 0; } @@ -208,9 +227,9 @@ void fa_fini(void) int fa_start(void) { - fa.sdu_sched = sdu_sched_create(sdu_handler); - if (fa.sdu_sched == NULL) { - log_err("Failed to create SDU scheduler."); + fa.psched = psched_create(packet_handler); + if (fa.psched == NULL) { + log_err("Failed to create packet scheduler."); return -1; } @@ -219,16 +238,17 @@ int fa_start(void) void fa_stop(void) { - sdu_sched_destroy(fa.sdu_sched); + psched_destroy(fa.psched); } int fa_alloc(int fd, const uint8_t * dst, - qoscube_t qc) + qosspec_t qs) { struct fa_msg * msg; uint64_t addr; struct shm_du_buff * sdb; + qoscube_t qc; addr = dir_query(dst); if (addr == 0) @@ -237,15 +257,23 @@ int fa_alloc(int fd, if (ipcp_sdb_reserve(&sdb, sizeof(*msg) + ipcp_dir_hash_len())) return -1; - msg = (struct fa_msg *) shm_du_buff_head(sdb); - msg->code = FLOW_REQ; - msg->qc = qc; - msg->s_eid = fd; - msg->s_addr = ipcpi.dt_addr; + msg = (struct fa_msg *) shm_du_buff_head(sdb); + msg->code = FLOW_REQ; + msg->s_eid = hton32(fd); + msg->s_addr = hton64(ipcpi.dt_addr); + msg->delay = hton32(qs.delay); + msg->bandwidth = hton64(qs.bandwidth); + msg->availability = qs.availability; + msg->loss = hton32(qs.loss); + msg->ber = hton32(qs.ber); + msg->in_order = qs.in_order; + msg->max_gap = hton32(qs.max_gap); memcpy(msg + 1, dst, ipcp_dir_hash_len()); - if (dt_write_sdu(addr, qc, fa.fd, sdb)) { + qc = qos_spec_to_cube(qs); + + if (dt_write_packet(addr, qc, fa.fd, sdb)) { ipcp_sdb_release(sdb); return -1; } @@ -299,22 +327,22 @@ int fa_alloc_resp(int fd, msg = (struct fa_msg *) shm_du_buff_head(sdb); msg->code = FLOW_REPLY; - msg->r_eid = fa.r_eid[fd]; - msg->s_eid = fd; + msg->r_eid = hton32(fa.r_eid[fd]); + msg->s_eid = hton32(fd); msg->response = response; if (response < 0) { destroy_conn(fd); ipcp_sdb_release(sdb); } else { - sdu_sched_add(fa.sdu_sched, fd); + psched_add(fa.psched, fd); } ipcp_flow_get_qoscube(fd, &qc); assert(qc >= 0 && qc < QOS_CUBE_MAX); - if (dt_write_sdu(fa.r_addr[fd], qc, fa.fd, sdb)) { + if (dt_write_packet(fa.r_addr[fd], qc, fa.fd, sdb)) { destroy_conn(fd); pthread_rwlock_unlock(&fa.flows_lock); ipcp_sdb_release(sdb); @@ -332,7 +360,7 @@ int fa_dealloc(int fd) pthread_rwlock_wrlock(&fa.flows_lock); - sdu_sched_del(fa.sdu_sched, fd); + psched_del(fa.psched, fd); destroy_conn(fd); diff --git a/src/ipcpd/normal/fa.h b/src/ipcpd/normal/fa.h index 87819d6f..6a836e17 100644 --- a/src/ipcpd/normal/fa.h +++ b/src/ipcpd/normal/fa.h @@ -23,7 +23,7 @@ #ifndef OUROBOROS_IPCPD_NORMAL_FA_H #define OUROBOROS_IPCPD_NORMAL_FA_H -#include <ouroboros/qoscube.h> +#include <ouroboros/qos.h> #include <ouroboros/utils.h> int fa_init(void); @@ -36,7 +36,7 @@ void fa_stop(void); int fa_alloc(int fd, const uint8_t * dst, - qoscube_t qos); + qosspec_t qs); int fa_alloc_resp(int fd, int response); diff --git a/src/ipcpd/normal/main.c b/src/ipcpd/normal/main.c index b131bbb6..3f05f421 100644 --- a/src/ipcpd/normal/main.c +++ b/src/ipcpd/normal/main.c @@ -20,7 +20,11 @@ * Foundation, Inc., http://www.fsf.org/about/contact/. */ +#if defined(__linux__) || defined(__CYGWIN__) +#define _DEFAULT_SOURCE +#else #define _POSIX_C_SOURCE 200809L +#endif #include "config.h" diff --git a/src/ipcpd/normal/pol/flat.c b/src/ipcpd/normal/pol/flat.c index cab74159..89b7fff6 100644 --- a/src/ipcpd/normal/pol/flat.c +++ b/src/ipcpd/normal/pol/flat.c @@ -20,7 +20,11 @@ * Foundation, Inc., http://www.fsf.org/about/contact/. */ +#if defined(__linux__) || defined(__CYGWIN__) +#define _DEFAULT_SOURCE +#else #define _POSIX_C_SOURCE 200112L +#endif #define OUROBOROS_PREFIX "flat-addr-auth" diff --git a/src/ipcpd/normal/pol/graph.c b/src/ipcpd/normal/pol/graph.c index f3c053ab..ec0917c5 100644 --- a/src/ipcpd/normal/pol/graph.c +++ b/src/ipcpd/normal/pol/graph.c @@ -20,7 +20,11 @@ * Foundation, Inc., http://www.fsf.org/about/contact/. */ +#if defined(__linux__) || defined(__CYGWIN__) +#define _DEFAULT_SOURCE +#else #define _POSIX_C_SOURCE 200112L +#endif #define OUROBOROS_PREFIX "graph" @@ -547,27 +551,6 @@ static int graph_routing_table_simple(struct graph * graph, return -1; } -int graph_routing_table(struct graph * graph, - uint64_t s_addr, - struct list_head * table) -{ - int ret = 0; - int * dist; - - assert(graph); - assert(table); - - pthread_mutex_lock(&graph->lock); - - ret = graph_routing_table_simple(graph, s_addr, table, &dist); - - free(dist); - - pthread_mutex_unlock(&graph->lock); - - return ret; -} - static int add_lfa_to_table(struct list_head * table, uint64_t addr, uint64_t lfa) @@ -595,9 +578,10 @@ static int add_lfa_to_table(struct list_head * table, return -1; } -int graph_routing_table_lfa(struct graph * graph, - uint64_t s_addr, - struct list_head * table) +int graph_routing_table(struct graph * graph, + enum routing_algo algo, + uint64_t s_addr, + struct list_head * table) { int * s_dist; int * n_dist[PROG_MAX_FLOWS]; @@ -617,66 +601,82 @@ int graph_routing_table_lfa(struct graph * graph, pthread_mutex_lock(&graph->lock); - for (j = 0; j < PROG_MAX_FLOWS; j++) { - n_dist[j] = NULL; - n_index[j] = -1; - addrs[j] = -1; - } - /* Get the normal next hops routing table. */ if (graph_routing_table_simple(graph, s_addr, table, &s_dist)) goto fail_table_simple; - list_for_each(p, &graph->vertices) { - v = list_entry(p, struct vertex, next); + /* Possibly augment the routing table. */ + switch (algo) { + case ROUTING_SIMPLE: + break; + case ROUTING_LFA: + for (j = 0; j < PROG_MAX_FLOWS; j++) { + n_dist[j] = NULL; + n_index[j] = -1; + addrs[j] = -1; + } - if (v->addr != s_addr) - continue; + list_for_each(p, &graph->vertices) { + v = list_entry(p, struct vertex, next); - /* Get the distances for every neighbor of the source. */ - list_for_each(q, &v->edges) { - e = list_entry(q, struct edge, next); + if (v->addr != s_addr) + continue; - addrs[i] = e->nb->addr; - n_index[i] = e->nb->index; - if (dijkstra(graph, e->nb->addr, - &nhops, &(n_dist[i++]))) - goto fail_dijkstra; + /* + * Get the distances for every neighbor + * of the source. + */ + list_for_each(q, &v->edges) { + e = list_entry(q, struct edge, next); - free(nhops); - } + addrs[i] = e->nb->addr; + n_index[i] = e->nb->index; + if (dijkstra(graph, e->nb->addr, + &nhops, &(n_dist[i++]))) + goto fail_dijkstra; - break; - } + free(nhops); + } - /* Loop though all nodes to see if we have a LFA for them. */ - list_for_each(p, &graph->vertices) { - v = list_entry(p, struct vertex, next); + break; + } - if (v->addr == s_addr) - continue; + /* Loop though all nodes to see if we have a LFA for them. */ + list_for_each(p, &graph->vertices) { + v = list_entry(p, struct vertex, next); - /* - * Check for every neighbor if dist(neighbor, destination) < - * dist(neighbor, source) + dist(source, destination). - */ - for (j = 0; j < i; j++) { - /* Exclude ourselves. */ - if (addrs[j] == v->addr) + if (v->addr == s_addr) continue; - if (n_dist[j][v->index] < - s_dist[n_index[j]] + s_dist[v->index]) - if (add_lfa_to_table(table, v->addr, addrs[j])) - goto fail_add_lfa; + /* + * Check for every neighbor if + * dist(neighbor, destination) < + * dist(neighbor, source) + dist(source, destination). + */ + for (j = 0; j < i; j++) { + /* Exclude ourselves. */ + if (addrs[j] == v->addr) + continue; + + if (n_dist[j][v->index] < + s_dist[n_index[j]] + s_dist[v->index]) + if (add_lfa_to_table(table, v->addr, + addrs[j])) + goto fail_add_lfa; + } } + + for (j = 0; j < i; j++) + free(n_dist[j]); + + break; + default: + log_err("Unsupported algorithm."); + goto fail_algo; } pthread_mutex_unlock(&graph->lock); - for (j = 0; j < i; j++) - free(n_dist[j]); - free(s_dist); return 0; @@ -686,6 +686,7 @@ int graph_routing_table_lfa(struct graph * graph, free(n_dist[k]); fail_dijkstra: free_routing_table(table); + fail_algo: free(s_dist); fail_table_simple: pthread_mutex_unlock(&graph->lock); diff --git a/src/ipcpd/normal/pol/graph.h b/src/ipcpd/normal/pol/graph.h index 13657fd0..7cd14ad6 100644 --- a/src/ipcpd/normal/pol/graph.h +++ b/src/ipcpd/normal/pol/graph.h @@ -28,6 +28,11 @@ #include <inttypes.h> +enum routing_algo { + ROUTING_SIMPLE = 0, + ROUTING_LFA +}; + struct nhop { struct list_head next; uint64_t nhop; @@ -53,13 +58,10 @@ int graph_del_edge(struct graph * graph, uint64_t d_addr); int graph_routing_table(struct graph * graph, + enum routing_algo algo, uint64_t s_addr, struct list_head * table); -int graph_routing_table_lfa(struct graph * graph, - uint64_t s_addr, - struct list_head * table); - void graph_free_routing_table(struct graph * graph, struct list_head * table); diff --git a/src/ipcpd/normal/pol/link_state.c b/src/ipcpd/normal/pol/link_state.c index 1c418ffc..e2e9eab5 100644 --- a/src/ipcpd/normal/pol/link_state.c +++ b/src/ipcpd/normal/pol/link_state.c @@ -20,7 +20,11 @@ * Foundation, Inc., http://www.fsf.org/about/contact/. */ +#if defined(__linux__) || defined(__CYGWIN__) +#define _DEFAULT_SOURCE +#else #define _POSIX_C_SOURCE 200112L +#endif #include "config.h" @@ -101,30 +105,26 @@ struct nb { enum nb_type type; }; -typedef int (* rtable_fn_t)(struct graph * graph, - uint64_t s_addr, - struct list_head * table); - struct { - struct list_head nbs; - size_t nbs_len; - fset_t * mgmt_set; + struct list_head nbs; + size_t nbs_len; + fset_t * mgmt_set; - struct list_head db; - size_t db_len; + struct list_head db; + size_t db_len; - pthread_rwlock_t db_lock; + pthread_rwlock_t db_lock; - struct graph * graph; + struct graph * graph; - pthread_t lsupdate; - pthread_t lsreader; - pthread_t listener; + pthread_t lsupdate; + pthread_t lsreader; + pthread_t listener; - struct list_head routing_instances; - pthread_mutex_t routing_i_lock; + struct list_head routing_instances; + pthread_mutex_t routing_i_lock; - rtable_fn_t rtable; + enum routing_algo routing_algo; } ls; struct pol_routing_ops link_state_ops = { @@ -500,7 +500,8 @@ static void calculate_pff(struct routing_i * instance) struct list_head * q; int fds[PROG_MAX_FLOWS]; - if (ls.rtable(ls.graph, ipcpi.dt_addr, &table)) + if (graph_routing_table(ls.graph, ls.routing_algo, + ipcpi.dt_addr, &table)) return; pff_lock(instance->pff); @@ -902,11 +903,11 @@ int link_state_init(enum pol_routing pr) switch (pr) { case ROUTING_LINK_STATE: log_dbg("Using link state routing policy."); - ls.rtable = graph_routing_table; + ls.routing_algo = ROUTING_SIMPLE; break; case ROUTING_LINK_STATE_LFA: log_dbg("Using Loop-Free Alternates policy."); - ls.rtable = graph_routing_table_lfa; + ls.routing_algo = ROUTING_LFA; break; default: goto fail_graph; diff --git a/src/ipcpd/normal/pol/tests/graph_test.c b/src/ipcpd/normal/pol/tests/graph_test.c index d226398c..8050f73a 100644 --- a/src/ipcpd/normal/pol/tests/graph_test.c +++ b/src/ipcpd/normal/pol/tests/graph_test.c @@ -39,7 +39,7 @@ int graph_test_entries(int entries) struct list_head * p; int i = 0; - if (graph_routing_table(graph, 1, &table)) { + if (graph_routing_table(graph, ROUTING_SIMPLE, 1, &table)) { printf("Failed to get routing table.\n"); return -1; } @@ -63,7 +63,7 @@ int graph_test_double_link(void) struct list_head * p; int i = 0; - if (graph_routing_table(graph, 1, &table)) { + if (graph_routing_table(graph, ROUTING_SIMPLE, 1, &table)) { printf("Failed to get routing table.\n"); return -1; } @@ -101,7 +101,7 @@ int graph_test_single_link(void) struct list_head * p; int i = 0; - if (graph_routing_table(graph, 1, &table)) { + if (graph_routing_table(graph, ROUTING_SIMPLE, 1, &table)) { printf("Failed to get routing table.\n"); return -1; } @@ -243,7 +243,7 @@ int graph_test(int argc, return -1; } - if (graph_routing_table(graph, 1, &table)) { + if (graph_routing_table(graph, ROUTING_SIMPLE, 1, &table)) { printf("Failed to get routing table.\n"); return -1; } diff --git a/src/ipcpd/normal/sdu_sched.c b/src/ipcpd/normal/psched.c index 6ce18ed5..27e5f1de 100644 --- a/src/ipcpd/normal/sdu_sched.c +++ b/src/ipcpd/normal/psched.c @@ -1,7 +1,7 @@ /* * Ouroboros - Copyright (C) 2016 - 2018 * - * SDU scheduler component + * Packet scheduler component * * Dimitri Staessens <dimitri.staessens@ugent.be> * Sander Vrijders <sander.vrijders@ugent.be> @@ -20,7 +20,11 @@ * Foundation, Inc., http://www.fsf.org/about/contact/. */ +#if defined(__linux__) || defined(__CYGWIN__) +#define _DEFAULT_SOURCE +#else #define _POSIX_C_SOURCE 200112L +#endif #include "config.h" @@ -28,7 +32,7 @@ #include <ouroboros/notifier.h> #include "ipcp.h" -#include "sdu_sched.h" +#include "psched.h" #include "connmgr.h" #include <assert.h> @@ -45,15 +49,15 @@ static int qos_prio [] = { QOS_PRIO_DATA }; -struct sdu_sched { - fset_t * set[QOS_CUBE_MAX]; - next_sdu_fn_t callback; - pthread_t readers[QOS_CUBE_MAX * IPCP_SCHED_THR_MUL]; +struct psched { + fset_t * set[QOS_CUBE_MAX]; + next_packet_fn_t callback; + pthread_t readers[QOS_CUBE_MAX * IPCP_SCHED_THR_MUL]; }; struct sched_info { - struct sdu_sched * sch; - qoscube_t qc; + struct psched * sch; + qoscube_t qc; }; static void cleanup_reader(void * o) @@ -61,13 +65,13 @@ static void cleanup_reader(void * o) fqueue_destroy((fqueue_t *) o); } -static void * sdu_reader(void * o) +static void * packet_reader(void * o) { - struct sdu_sched * sched; - struct shm_du_buff * sdb; - int fd; - fqueue_t * fq; - qoscube_t qc; + struct psched * sched; + struct shm_du_buff * sdb; + int fd; + fqueue_t * fq; + qoscube_t qc; sched = ((struct sched_info *) o)->sch; qc = ((struct sched_info *) o)->qc; @@ -115,26 +119,26 @@ static void * sdu_reader(void * o) return (void *) 0; } -struct sdu_sched * sdu_sched_create(next_sdu_fn_t callback) +struct psched * psched_create(next_packet_fn_t callback) { - struct sdu_sched * sdu_sched; - struct sched_info * infos[QOS_CUBE_MAX * IPCP_SCHED_THR_MUL]; - int i; - int j; + struct psched * psched; + struct sched_info * infos[QOS_CUBE_MAX * IPCP_SCHED_THR_MUL]; + int i; + int j; assert(callback); - sdu_sched = malloc(sizeof(*sdu_sched)); - if (sdu_sched == NULL) + psched = malloc(sizeof(*psched)); + if (psched == NULL) goto fail_malloc; - sdu_sched->callback = callback; + psched->callback = callback; for (i = 0; i < QOS_CUBE_MAX; ++i) { - sdu_sched->set[i] = fset_create(); - if (sdu_sched->set[i] == NULL) { + psched->set[i] = fset_create(); + if (psched->set[i] == NULL) { for (j = 0; j < i; ++j) - fset_destroy(sdu_sched->set[j]); + fset_destroy(psched->set[j]); goto fail_flow_set; } } @@ -146,17 +150,17 @@ struct sdu_sched * sdu_sched_create(next_sdu_fn_t callback) free(infos[j]); goto fail_infos; } - infos[i]->sch = sdu_sched; + infos[i]->sch = psched; infos[i]->qc = i % QOS_CUBE_MAX; } for (i = 0; i < QOS_CUBE_MAX * IPCP_SCHED_THR_MUL; ++i) { - if (pthread_create(&sdu_sched->readers[i], NULL, - sdu_reader, infos[i])) { + if (pthread_create(&psched->readers[i], NULL, + packet_reader, infos[i])) { for (j = 0; j < i; ++j) - pthread_cancel(sdu_sched->readers[j]); + pthread_cancel(psched->readers[j]); for (j = 0; j < i; ++j) - pthread_join(sdu_sched->readers[j], NULL); + pthread_join(psched->readers[j], NULL); for (j = i; j < QOS_CUBE_MAX * IPCP_SCHED_THR_MUL; ++j) free(infos[i]); goto fail_infos; @@ -177,61 +181,61 @@ struct sdu_sched * sdu_sched_create(next_sdu_fn_t callback) par.sched_priority = min + (qos_prio[i % QOS_CUBE_MAX] * (max - min) / 99); - if (pthread_setschedparam(sdu_sched->readers[i], pol, &par)) + if (pthread_setschedparam(psched->readers[i], pol, &par)) goto fail_sched; } - return sdu_sched; + return psched; fail_sched: for (j = 0; j < QOS_CUBE_MAX * IPCP_SCHED_THR_MUL; ++j) - pthread_cancel(sdu_sched->readers[j]); + pthread_cancel(psched->readers[j]); for (j = 0; j < QOS_CUBE_MAX * IPCP_SCHED_THR_MUL; ++j) - pthread_join(sdu_sched->readers[j], NULL); + pthread_join(psched->readers[j], NULL); fail_infos: for (j = 0; j < QOS_CUBE_MAX; ++j) - fset_destroy(sdu_sched->set[j]); + fset_destroy(psched->set[j]); fail_flow_set: - free(sdu_sched); + free(psched); fail_malloc: return NULL; } -void sdu_sched_destroy(struct sdu_sched * sdu_sched) +void psched_destroy(struct psched * psched) { int i; - assert(sdu_sched); + assert(psched); for (i = 0; i < QOS_CUBE_MAX * IPCP_SCHED_THR_MUL; ++i) { - pthread_cancel(sdu_sched->readers[i]); - pthread_join(sdu_sched->readers[i], NULL); + pthread_cancel(psched->readers[i]); + pthread_join(psched->readers[i], NULL); } for (i = 0; i < QOS_CUBE_MAX; ++i) - fset_destroy(sdu_sched->set[i]); + fset_destroy(psched->set[i]); - free(sdu_sched); + free(psched); } -void sdu_sched_add(struct sdu_sched * sdu_sched, - int fd) +void psched_add(struct psched * psched, + int fd) { qoscube_t qc; - assert(sdu_sched); + assert(psched); ipcp_flow_get_qoscube(fd, &qc); - fset_add(sdu_sched->set[qc], fd); + fset_add(psched->set[qc], fd); } -void sdu_sched_del(struct sdu_sched * sdu_sched, - int fd) +void psched_del(struct psched * psched, + int fd) { qoscube_t qc; - assert(sdu_sched); + assert(psched); ipcp_flow_get_qoscube(fd, &qc); - fset_del(sdu_sched->set[qc], fd); + fset_del(psched->set[qc], fd); } diff --git a/src/ipcpd/normal/sdu_sched.h b/src/ipcpd/normal/psched.h index cdbda272..137c8fd1 100644 --- a/src/ipcpd/normal/sdu_sched.h +++ b/src/ipcpd/normal/psched.h @@ -1,7 +1,7 @@ /* * Ouroboros - Copyright (C) 2016 - 2018 * - * SDU scheduler component + * Packet scheduler component * * Dimitri Staessens <dimitri.staessens@ugent.be> * Sander Vrijders <sander.vrijders@ugent.be> @@ -20,24 +20,24 @@ * Foundation, Inc., http://www.fsf.org/about/contact/. */ -#ifndef OUROBOROS_IPCPD_NORMAL_SDU_SCHED_H -#define OUROBOROS_IPCPD_NORMAL_SDU_SCHED_H +#ifndef OUROBOROS_IPCPD_NORMAL_PSCHED_H +#define OUROBOROS_IPCPD_NORMAL_PSCHED_H #include <ouroboros/ipcp-dev.h> #include <ouroboros/fqueue.h> -typedef void (* next_sdu_fn_t)(int fd, - qoscube_t qc, - struct shm_du_buff * sdb); +typedef void (* next_packet_fn_t)(int fd, + qoscube_t qc, + struct shm_du_buff * sdb); -struct sdu_sched * sdu_sched_create(next_sdu_fn_t callback); +struct psched * psched_create(next_packet_fn_t callback); -void sdu_sched_destroy(struct sdu_sched * sdu_sched); +void psched_destroy(struct psched * psched); -void sdu_sched_add(struct sdu_sched * sdu_sched, - int fd); +void psched_add(struct psched * psched, + int fd); -void sdu_sched_del(struct sdu_sched * sdu_sched, - int fd); +void psched_del(struct psched * psched, + int fd); -#endif /* OUROBOROS_IPCPD_NORMAL_SDU_SCHED_H */ +#endif /* OUROBOROS_IPCPD_NORMAL_PSCHED_H */ diff --git a/src/ipcpd/raptor/CMakeLists.txt b/src/ipcpd/raptor/CMakeLists.txt index 06e6ee29..1883d9bb 100644 --- a/src/ipcpd/raptor/CMakeLists.txt +++ b/src/ipcpd/raptor/CMakeLists.txt @@ -16,6 +16,7 @@ if (CMAKE_SYSTEM_NAME STREQUAL "Linux") find_path(RAPTOR_KERNEL_MODULE NAMES raptor.ko.gz + raptor.ko.xz HINTS /lib/modules/${CMAKE_SYSTEM_VERSION}/extra ) diff --git a/src/ipcpd/raptor/main.c b/src/ipcpd/raptor/main.c index 4f0099b3..8f578611 100644 --- a/src/ipcpd/raptor/main.c +++ b/src/ipcpd/raptor/main.c @@ -64,9 +64,9 @@ #include <sys/mman.h> #define THIS_TYPE IPCP_RAPTOR -#define MGMT_SAP 0x01 +#define MGMT_EID 0x01 #define MAC_SIZE 6 -#define MAX_SAPS 64 +#define MAX_EIDS 64 #define EVENT_WAIT_TIMEOUT 100 /* us */ #define NAME_QUERY_TIMEOUT 2000 /* ms */ @@ -90,16 +90,23 @@ #define NAME_QUERY_REPLY 3 struct mgmt_msg { - uint8_t code; - uint8_t ssap; - uint8_t dsap; - uint8_t qoscube; - int8_t response; + uint8_t code; + uint8_t seid; + uint8_t deid; + int8_t response; + /* QoS parameters from spec, aligned */ + uint32_t loss; + uint64_t bandwidth; + uint32_t ber; + uint32_t max_gap; + uint32_t delay; + uint8_t in_order; + uint8_t availability; } __attribute__((packed)); struct ef { - int8_t sap; - int8_t r_sap; + int8_t eid; + int8_t r_eid; }; struct mgmt_frame { @@ -113,7 +120,7 @@ struct { int ioctl_fd; - struct bmp * saps; + struct bmp * eids; fset_t * np1_flows; fqueue_t * fq; int * ef_to_fd; @@ -145,13 +152,13 @@ static int raptor_data_init(void) goto fail_fd_to_ef; raptor_data.ef_to_fd = - malloc(sizeof(*raptor_data.ef_to_fd) * MAX_SAPS); + malloc(sizeof(*raptor_data.ef_to_fd) * MAX_EIDS); if (raptor_data.ef_to_fd == NULL) goto fail_ef_to_fd; - raptor_data.saps = bmp_create(MAX_SAPS, 2); - if (raptor_data.saps == NULL) - goto fail_saps; + raptor_data.eids = bmp_create(MAX_EIDS, 2); + if (raptor_data.eids == NULL) + goto fail_eids; raptor_data.np1_flows = fset_create(); if (raptor_data.np1_flows == NULL) @@ -161,12 +168,12 @@ static int raptor_data_init(void) if (raptor_data.fq == NULL) goto fail_fq; - for (i = 0; i < MAX_SAPS; ++i) + for (i = 0; i < MAX_EIDS; ++i) raptor_data.ef_to_fd[i] = -1; for (i = 0; i < SYS_MAX_FLOWS; ++i) { - raptor_data.fd_to_ef[i].sap = -1; - raptor_data.fd_to_ef[i].r_sap = -1; + raptor_data.fd_to_ef[i].eid = -1; + raptor_data.fd_to_ef[i].r_eid = -1; } raptor_data.shim_data = shim_data_create(); @@ -210,8 +217,8 @@ static int raptor_data_init(void) fail_fq: fset_destroy(raptor_data.np1_flows); fail_np1_flows: - bmp_destroy(raptor_data.saps); - fail_saps: + bmp_destroy(raptor_data.eids); + fail_eids: free(raptor_data.ef_to_fd); fail_ef_to_fd: free(raptor_data.fd_to_ef); @@ -227,13 +234,13 @@ static void raptor_data_fini(void) pthread_rwlock_destroy(&raptor_data.flows_lock); fqueue_destroy(raptor_data.fq); fset_destroy(raptor_data.np1_flows); - bmp_destroy(raptor_data.saps); + bmp_destroy(raptor_data.eids); free(raptor_data.fd_to_ef); free(raptor_data.ef_to_fd); } static int raptor_send_frame(struct shm_du_buff * sdb, - uint8_t dsap) + uint8_t deid) { uint8_t * frame; size_t frame_len; @@ -263,7 +270,7 @@ static int raptor_send_frame(struct shm_du_buff * sdb, frame[0] = (frame_len & 0x00FF) >> 0; frame[1] = (frame_len & 0xFF00) >> 8; - frame[2] = dsap; + frame[2] = deid; memcpy(&frame[RAPTOR_HEADER], payload, len); @@ -276,9 +283,9 @@ static int raptor_send_frame(struct shm_du_buff * sdb, return 0; } -static int raptor_sap_alloc(uint8_t ssap, +static int raptor_eid_alloc(uint8_t seid, const uint8_t * hash, - qoscube_t cube) + qosspec_t qs) { struct mgmt_msg * msg; struct shm_du_buff * sdb; @@ -288,14 +295,20 @@ static int raptor_sap_alloc(uint8_t ssap, return -1; } - msg = (struct mgmt_msg *) shm_du_buff_head(sdb); - msg->code = FLOW_REQ; - msg->ssap = ssap; - msg->qoscube = cube; + msg = (struct mgmt_msg *) shm_du_buff_head(sdb); + msg->code = FLOW_REQ; + msg->seid = seid; + msg->delay = hton32(qs.delay); + msg->bandwidth = hton64(qs.bandwidth); + msg->availability = qs.availability; + msg->loss = hton32(qs.loss); + msg->ber = hton32(qs.ber); + msg->in_order = qs.in_order; + msg->max_gap = hton32(qs.max_gap); memcpy(msg + 1, hash, ipcp_dir_hash_len()); - if (raptor_send_frame(sdb, MGMT_SAP)) { + if (raptor_send_frame(sdb, MGMT_EID)) { log_err("Failed to send management frame."); ipcp_sdb_release(sdb); return -1; @@ -306,25 +319,25 @@ static int raptor_sap_alloc(uint8_t ssap, return 0; } -static int raptor_sap_alloc_resp(uint8_t ssap, - uint8_t dsap, - int response) +static int raptor_eid_alloc_resp(uint8_t seid, + uint8_t deid, + int response) { - struct mgmt_msg * msg; + struct mgmt_msg * msg; struct shm_du_buff * sdb; if (ipcp_sdb_reserve(&sdb, sizeof(*msg)) < 0) { - log_err("failed to reserve sdb for management frame."); + log_err("Failed to reserve sdb for management frame."); return -1; } msg = (struct mgmt_msg *) shm_du_buff_head(sdb); msg->code = FLOW_REPLY; - msg->ssap = ssap; - msg->dsap = dsap; + msg->seid = seid; + msg->deid = deid; msg->response = response; - if (raptor_send_frame(sdb, MGMT_SAP)) { + if (raptor_send_frame(sdb, MGMT_EID)) { log_err("Failed to send management frame."); ipcp_sdb_release(sdb); return -1; @@ -335,9 +348,9 @@ static int raptor_sap_alloc_resp(uint8_t ssap, return 0; } -static int raptor_sap_req(uint8_t r_sap, +static int raptor_eid_req(uint8_t r_eid, const uint8_t * dst, - qoscube_t cube) + qosspec_t qs) { struct timespec ts = {0, EVENT_WAIT_TIMEOUT * 1000}; struct timespec abstime; @@ -361,7 +374,7 @@ static int raptor_sap_req(uint8_t r_sap, } /* reply to IRM, called under lock to prevent race */ - fd = ipcp_flow_req_arr(getpid(), dst, ipcp_dir_hash_len(), cube); + fd = ipcp_flow_req_arr(getpid(), dst, ipcp_dir_hash_len(), qs); if (fd < 0) { pthread_mutex_unlock(&ipcpi.alloc_lock); log_err("Could not get new flow from IRMd."); @@ -370,7 +383,7 @@ static int raptor_sap_req(uint8_t r_sap, pthread_rwlock_wrlock(&raptor_data.flows_lock); - raptor_data.fd_to_ef[fd].r_sap = r_sap; + raptor_data.fd_to_ef[fd].r_eid = r_eid; ipcpi.alloc_id = fd; pthread_cond_broadcast(&ipcpi.alloc_cond); @@ -378,13 +391,13 @@ static int raptor_sap_req(uint8_t r_sap, pthread_rwlock_unlock(&raptor_data.flows_lock); pthread_mutex_unlock(&ipcpi.alloc_lock); - log_dbg("New flow request, fd %d, remote SAP %d.", fd, r_sap); + log_dbg("New flow request, fd %d, remote EID %d.", fd, r_eid); return 0; } -static int raptor_sap_alloc_reply(uint8_t ssap, - int dsap, +static int raptor_eid_alloc_reply(uint8_t seid, + int deid, int response) { int ret = 0; @@ -392,21 +405,21 @@ static int raptor_sap_alloc_reply(uint8_t ssap, pthread_rwlock_wrlock(&raptor_data.flows_lock); - fd = raptor_data.ef_to_fd[dsap]; + fd = raptor_data.ef_to_fd[deid]; if (fd < 0) { pthread_rwlock_unlock(& raptor_data.flows_lock); - log_err("No flow found with that SAP."); + log_err("No flow found with that EID."); return -1; /* -EFLOWNOTFOUND */ } if (response) - bmp_release(raptor_data.saps, raptor_data.fd_to_ef[fd].sap); + bmp_release(raptor_data.eids, raptor_data.fd_to_ef[fd].eid); else - raptor_data.fd_to_ef[fd].r_sap = ssap; + raptor_data.fd_to_ef[fd].r_eid = seid; pthread_rwlock_unlock(&raptor_data.flows_lock); - log_dbg("Flow reply, fd %d, SSAP %d, DSAP %d.", fd, ssap, dsap); + log_dbg("Flow reply, fd %d, SEID %d, DEID %d.", fd, seid, deid); if ((ret = ipcp_flow_alloc_reply(fd, response)) < 0) return -1; @@ -424,16 +437,16 @@ static int raptor_name_query_req(const uint8_t * hash) return 0; if (ipcp_sdb_reserve(&sdb, sizeof(*msg) + ipcp_dir_hash_len()) < 0) { - log_err("failed to reserve sdb for management frame."); + log_err("Failed to reserve sdb for management frame."); return -1; } - msg = (struct mgmt_msg *) shm_du_buff_head(sdb); - msg->code = NAME_QUERY_REPLY; + msg = (struct mgmt_msg *) shm_du_buff_head(sdb); + msg->code = NAME_QUERY_REPLY; memcpy(msg + 1, hash, ipcp_dir_hash_len()); - if (raptor_send_frame(sdb, MGMT_SAP)) { + if (raptor_send_frame(sdb, MGMT_EID)) { log_err("Failed to send management frame."); ipcp_sdb_release(sdb); return -1; @@ -456,8 +469,9 @@ static int raptor_name_query_reply(const uint8_t * hash) static int raptor_mgmt_frame(const uint8_t * buf, size_t len) { - struct mgmt_msg * msg = (struct mgmt_msg *) buf; - uint8_t * hash = (uint8_t *) (msg + 1); + struct mgmt_msg * msg = (struct mgmt_msg *) buf; + uint8_t * hash = (uint8_t *) (msg + 1); + qosspec_t qs; switch (msg->code) { case FLOW_REQ: @@ -466,8 +480,16 @@ static int raptor_mgmt_frame(const uint8_t * buf, return -1; } + qs.delay = ntoh32(msg->delay); + qs.bandwidth = ntoh64(msg->bandwidth); + qs.availability = msg->availability; + qs.loss = ntoh32(msg->loss); + qs.ber = ntoh32(msg->ber); + qs.in_order = msg->in_order; + qs.max_gap = ntoh32(msg->max_gap); + if (shim_data_reg_has(raptor_data.shim_data, hash)) - raptor_sap_req(msg->ssap, hash, msg->qoscube); + raptor_eid_req(msg->seid, hash, qs); break; case FLOW_REPLY: if (len != sizeof(*msg)) { @@ -475,7 +497,7 @@ static int raptor_mgmt_frame(const uint8_t * buf, return -1; } - raptor_sap_alloc_reply(msg->ssap, msg->dsap, msg->response); + raptor_eid_alloc_reply(msg->seid, msg->deid, msg->response); break; case NAME_QUERY_REQ: if (len != sizeof(*msg) + ipcp_dir_hash_len()) { @@ -552,7 +574,7 @@ static void * raptor_mgmt_handler(void * o) static void raptor_recv_frame(uint8_t * frame) { - uint8_t dsap; + uint8_t deid; uint8_t * payload; size_t frame_len; size_t length; @@ -577,14 +599,14 @@ static void raptor_recv_frame(uint8_t * frame) return; } - dsap = frame[2]; + deid = frame[2]; payload = &frame[RAPTOR_HEADER]; length = frame_len - RAPTOR_HEADER; shm_du_buff_head_release(sdb, RAPTOR_HEADER); shm_du_buff_tail_release(sdb, RAPTOR_PAGE - frame_len); - if (dsap == MGMT_SAP) { + if (deid == MGMT_EID) { pthread_mutex_lock(&raptor_data.mgmt_lock); mgmt_frame = malloc(sizeof(*mgmt_frame)); @@ -604,7 +626,7 @@ static void raptor_recv_frame(uint8_t * frame) } else { pthread_rwlock_rdlock(&raptor_data.flows_lock); - fd = raptor_data.ef_to_fd[dsap]; + fd = raptor_data.ef_to_fd[deid]; if (fd < 0) { pthread_rwlock_unlock(&raptor_data.flows_lock); ipcp_sdb_release(sdb); @@ -647,7 +669,7 @@ static void * raptor_send_thread(void * o) struct timespec timeout = {0, EVENT_WAIT_TIMEOUT * 1000}; int fd; struct shm_du_buff * sdb; - uint8_t dsap; + uint8_t deid; (void) o; @@ -662,9 +684,9 @@ static void * raptor_send_thread(void * o) continue; } - dsap = raptor_data.fd_to_ef[fd].r_sap; + deid = raptor_data.fd_to_ef[fd].r_eid; - raptor_send_frame(sdb, dsap); + raptor_send_frame(sdb, deid); } pthread_rwlock_unlock(&raptor_data.flows_lock); } @@ -886,7 +908,7 @@ static int raptor_query(const uint8_t * hash) return -1; } - if (raptor_send_frame(sdb, MGMT_SAP)) { + if (raptor_send_frame(sdb, MGMT_EID)) { log_err("Failed to send management frame."); ipcp_sdb_release(sdb); return -1; @@ -901,19 +923,14 @@ static int raptor_query(const uint8_t * hash) static int raptor_flow_alloc(int fd, const uint8_t * hash, - qoscube_t cube) + qosspec_t qs) { - uint8_t ssap = 0; + uint8_t seid = 0; log_dbg("Allocating flow to " HASH_FMT ".", HASH_VAL(hash)); assert(hash); - if (cube != QOS_CUBE_BE) { - log_dbg("Unsupported QoS requested."); - return -1; - } - if (!shim_data_dir_has(raptor_data.shim_data, hash)) { log_err("Destination unreachable."); return -1; @@ -921,29 +938,29 @@ static int raptor_flow_alloc(int fd, pthread_rwlock_wrlock(&raptor_data.flows_lock); - ssap = bmp_allocate(raptor_data.saps); - if (!bmp_is_id_valid(raptor_data.saps, ssap)) { + seid = bmp_allocate(raptor_data.eids); + if (!bmp_is_id_valid(raptor_data.eids, seid)) { pthread_rwlock_unlock(&raptor_data.flows_lock); return -1; } - raptor_data.fd_to_ef[fd].sap = ssap; - raptor_data.ef_to_fd[ssap] = fd; + raptor_data.fd_to_ef[fd].eid = seid; + raptor_data.ef_to_fd[seid] = fd; pthread_rwlock_unlock(&raptor_data.flows_lock); - if (raptor_sap_alloc(ssap, hash, cube) < 0) { + if (raptor_eid_alloc(seid, hash, qs) < 0) { pthread_rwlock_wrlock(&raptor_data.flows_lock); - bmp_release(raptor_data.saps, raptor_data.fd_to_ef[fd].sap); - raptor_data.fd_to_ef[fd].sap = -1; - raptor_data.ef_to_fd[ssap] = -1; + bmp_release(raptor_data.eids, raptor_data.fd_to_ef[fd].eid); + raptor_data.fd_to_ef[fd].eid = -1; + raptor_data.ef_to_fd[seid] = -1; pthread_rwlock_unlock(&raptor_data.flows_lock); return -1; } fset_add(raptor_data.np1_flows, fd); - log_dbg("Pending flow with fd %d on SAP %d.", fd, ssap); + log_dbg("Pending flow with fd %d on EID %d.", fd, seid); return 0; } @@ -953,8 +970,8 @@ static int raptor_flow_alloc_resp(int fd, { struct timespec ts = {0, EVENT_WAIT_TIMEOUT * 1000}; struct timespec abstime; - uint8_t ssap = 0; - uint8_t r_sap = 0; + uint8_t seid = 0; + uint8_t r_eid = 0; clock_gettime(PTHREAD_COND_CLOCK, &abstime); @@ -979,35 +996,35 @@ static int raptor_flow_alloc_resp(int fd, pthread_rwlock_wrlock(&raptor_data.flows_lock); - ssap = bmp_allocate(raptor_data.saps); - if (!bmp_is_id_valid(raptor_data.saps, ssap)) { + seid = bmp_allocate(raptor_data.eids); + if (!bmp_is_id_valid(raptor_data.eids, seid)) { pthread_rwlock_unlock(&raptor_data.flows_lock); return -1; } - raptor_data.fd_to_ef[fd].sap = ssap; - r_sap = raptor_data.fd_to_ef[fd].r_sap; - raptor_data.ef_to_fd[ssap] = fd; + raptor_data.fd_to_ef[fd].eid = seid; + r_eid = raptor_data.fd_to_ef[fd].r_eid; + raptor_data.ef_to_fd[seid] = fd; pthread_rwlock_unlock(&raptor_data.flows_lock); - if (raptor_sap_alloc_resp(ssap, r_sap, response) < 0) { + if (raptor_eid_alloc_resp(seid, r_eid, response) < 0) { pthread_rwlock_wrlock(&raptor_data.flows_lock); - bmp_release(raptor_data.saps, raptor_data.fd_to_ef[fd].sap); + bmp_release(raptor_data.eids, raptor_data.fd_to_ef[fd].eid); pthread_rwlock_unlock(&raptor_data.flows_lock); return -1; } fset_add(raptor_data.np1_flows, fd); - log_dbg("Accepted flow, fd %d, SAP %d.", fd, (uint8_t)ssap); + log_dbg("Accepted flow, fd %d, EID %d.", fd, (uint8_t)seid); return 0; } static int raptor_flow_dealloc(int fd) { - uint8_t sap; + uint8_t eid; ipcp_flow_fini(fd); @@ -1015,12 +1032,12 @@ static int raptor_flow_dealloc(int fd) fset_del(raptor_data.np1_flows, fd); - sap = raptor_data.fd_to_ef[fd].sap; - bmp_release(raptor_data.saps, sap); - raptor_data.fd_to_ef[fd].sap = -1; - raptor_data.fd_to_ef[fd].r_sap = -1; + eid = raptor_data.fd_to_ef[fd].eid; + bmp_release(raptor_data.eids, eid); + raptor_data.fd_to_ef[fd].eid = -1; + raptor_data.fd_to_ef[fd].r_eid = -1; - raptor_data.ef_to_fd[sap] = -1; + raptor_data.ef_to_fd[eid] = -1; pthread_rwlock_unlock(&raptor_data.flows_lock); diff --git a/src/ipcpd/shim-data.c b/src/ipcpd/shim-data.c index 27b98171..6c28c79f 100644 --- a/src/ipcpd/shim-data.c +++ b/src/ipcpd/shim-data.c @@ -20,7 +20,11 @@ * Foundation, Inc., http://www.fsf.org/about/contact/. */ +#if defined(__linux__) || defined(__CYGWIN__) +#define _DEFAULT_SOURCE +#else #define _POSIX_C_SOURCE 200112L +#endif #include "config.h" diff --git a/src/ipcpd/shim-data.h b/src/ipcpd/shim-data.h index 336ffa35..7a8c01c4 100644 --- a/src/ipcpd/shim-data.h +++ b/src/ipcpd/shim-data.h @@ -23,7 +23,6 @@ #ifndef OUROBOROS_IPCPD_IPCP_DATA_H #define OUROBOROS_IPCPD_IPCP_DATA_H -#include <ouroboros/qoscube.h> #include <ouroboros/list.h> #include <sys/types.h> diff --git a/src/ipcpd/udp/main.c b/src/ipcpd/udp/main.c index 2e59e1a5..a1af1e85 100644 --- a/src/ipcpd/udp/main.c +++ b/src/ipcpd/udp/main.c @@ -20,7 +20,11 @@ * Foundation, Inc., http://www.fsf.org/about/contact/. */ +#if defined(__linux__) || defined(__CYGWIN__) +#define _DEFAULT_SOURCE +#else #define _POSIX_C_SOURCE 200112L +#endif #include "config.h" @@ -50,27 +54,34 @@ #include <sys/wait.h> #include <fcntl.h> -#define FLOW_REQ 1 -#define FLOW_REPLY 2 +#define FLOW_REQ 1 +#define FLOW_REPLY 2 -#define THIS_TYPE IPCP_UDP -#define LISTEN_PORT htons(0x0D1F) -#define SHIM_UDP_BUF_SIZE 256 -#define SHIM_UDP_MSG_SIZE 256 -#define SHIM_UDP_MAX_SDU_SIZE 8980 -#define DNS_TTL 86400 -#define FD_UPDATE_TIMEOUT 100 /* microseconds */ +#define THIS_TYPE IPCP_UDP +#define LISTEN_PORT htons(0x0D1F) +#define SHIM_UDP_BUF_SIZE 256 +#define SHIM_UDP_MSG_SIZE 256 +#define SHIM_UDP_MAX_PACKET_SIZE 8980 +#define DNS_TTL 86400 +#define FD_UPDATE_TIMEOUT 100 /* microseconds */ -#define local_ip (udp_data.s_saddr.sin_addr.s_addr) +#define local_ip (udp_data.s_saddr.sin_addr.s_addr) -#define UDP_MAX_PORTS 0xFFFF +#define UDP_MAX_PORTS 0xFFFF struct mgmt_msg { uint16_t src_udp_port; uint16_t dst_udp_port; uint8_t code; - uint8_t qoscube; uint8_t response; + /* QoS parameters from spec, aligned */ + uint8_t availability; + uint8_t in_order; + uint32_t delay; + uint64_t bandwidth; + uint32_t loss; + uint32_t ber; + uint32_t max_gap; } __attribute__((packed)); struct uf { @@ -95,9 +106,9 @@ struct { struct uf fd_to_uf[SYS_MAX_FLOWS]; pthread_rwlock_t flows_lock; - pthread_t sduloop; + pthread_t packet_loop; pthread_t handler; - pthread_t sdu_reader; + pthread_t packet_reader; bool fd_set_mod; pthread_cond_t fd_set_cond; @@ -108,6 +119,15 @@ static int udp_data_init(void) { int i; + if (pthread_rwlock_init(&udp_data.flows_lock, NULL)) + return -1; + + if (pthread_cond_init(&udp_data.fd_set_cond, NULL)) + goto fail_set_cond; + + if (pthread_mutex_init(&udp_data.fd_set_lock, NULL)) + goto fail_set_lock; + for (i = 0; i < FD_SETSIZE; ++i) udp_data.uf_to_fd[i] = -1; @@ -120,26 +140,28 @@ static int udp_data_init(void) udp_data.np1_flows = fset_create(); if (udp_data.np1_flows == NULL) - return -ENOMEM; + goto fail_fset; udp_data.fq = fqueue_create(); - if (udp_data.fq == NULL) { - fset_destroy(udp_data.np1_flows); - return -ENOMEM; - } + if (udp_data.fq == NULL) + goto fail_fqueue; udp_data.shim_data = shim_data_create(); - if (udp_data.shim_data == NULL) { - fqueue_destroy(udp_data.fq); - fset_destroy(udp_data.np1_flows); - return -ENOMEM; - } - - pthread_rwlock_init(&udp_data.flows_lock, NULL); - pthread_cond_init(&udp_data.fd_set_cond, NULL); - pthread_mutex_init(&udp_data.fd_set_lock, NULL); + if (udp_data.shim_data == NULL) + goto fail_data; return 0; + fail_data: + fqueue_destroy(udp_data.fq); + fail_fqueue: + fset_destroy(udp_data.np1_flows); + fail_fset: + pthread_mutex_destroy(&udp_data.fd_set_lock); + fail_set_lock: + pthread_cond_destroy(&udp_data.fd_set_cond); + fail_set_cond: + pthread_rwlock_destroy(&udp_data.flows_lock); + return -1; } static void udp_data_fini(void) @@ -204,7 +226,7 @@ static int send_shim_udp_msg(uint8_t * buf, static int ipcp_udp_port_alloc(uint32_t dst_ip_addr, uint16_t src_udp_port, const uint8_t * dst, - qoscube_t cube) + qosspec_t qs) { uint8_t * buf; struct mgmt_msg * msg; @@ -220,7 +242,13 @@ static int ipcp_udp_port_alloc(uint32_t dst_ip_addr, msg = (struct mgmt_msg *) buf; msg->code = FLOW_REQ; msg->src_udp_port = src_udp_port; - msg->qoscube = cube; + msg->delay = hton32(qs.delay); + msg->bandwidth = hton64(qs.bandwidth); + msg->availability = qs.availability; + msg->loss = hton32(qs.loss); + msg->ber = hton32(qs.ber); + msg->in_order = qs.in_order; + msg->max_gap = hton32(qs.max_gap); memcpy(msg + 1, dst, ipcp_dir_hash_len()); @@ -257,7 +285,7 @@ static int ipcp_udp_port_alloc_resp(uint32_t dst_ip_addr, static int ipcp_udp_port_req(struct sockaddr_in * c_saddr, const uint8_t * dst, - qoscube_t cube) + qosspec_t qs) { struct timespec ts = {0, FD_UPDATE_TIMEOUT * 1000}; struct timespec abstime; @@ -311,11 +339,12 @@ static int ipcp_udp_port_req(struct sockaddr_in * c_saddr, if (ipcp_get_state() != IPCP_OPERATIONAL) { log_dbg("Won't allocate over non-operational IPCP."); pthread_mutex_unlock(&ipcpi.alloc_lock); + close(skfd); return -1; } /* reply to IRM */ - fd = ipcp_flow_req_arr(getpid(), dst, ipcp_dir_hash_len(), cube); + fd = ipcp_flow_req_arr(getpid(), dst, ipcp_dir_hash_len(), qs); if (fd < 0) { pthread_mutex_unlock(&ipcpi.alloc_lock); log_err("Could not get new flow from IRMd."); @@ -371,6 +400,11 @@ static int ipcp_udp_port_alloc_reply(uint16_t src_udp_port, pthread_rwlock_rdlock(&udp_data.flows_lock); fd = udp_port_to_fd(dst_udp_port); + if (fd < 0) { + pthread_rwlock_unlock(&udp_data.flows_lock); + return -1; + } + skfd = udp_data.fd_to_uf[fd].skfd; pthread_rwlock_unlock(&udp_data.flows_lock); @@ -415,11 +449,11 @@ static void * ipcp_udp_listener(void * o) while (true) { struct mgmt_msg * msg = NULL; - + qosspec_t qs; memset(&buf, 0, SHIM_UDP_MSG_SIZE); - n = sizeof(c_saddr); n = recvfrom(sfd, buf, SHIM_UDP_MSG_SIZE, 0, - (struct sockaddr *) &c_saddr, (unsigned *) &n); + (struct sockaddr *) &c_saddr, + (socklen_t *) sizeof(c_saddr)); if (n < 0) continue; @@ -434,9 +468,16 @@ static void * ipcp_udp_listener(void * o) switch (msg->code) { case FLOW_REQ: c_saddr.sin_port = msg->src_udp_port; + qs.delay = ntoh32(msg->delay); + qs.bandwidth = ntoh64(msg->bandwidth); + qs.availability = msg->availability; + qs.loss = ntoh32(msg->loss); + qs.ber = ntoh32(msg->ber); + qs.in_order = msg->in_order; + qs.max_gap = ntoh32(msg->max_gap); ipcp_udp_port_req(&c_saddr, (uint8_t *) (msg + 1), - msg->qoscube); + qs); break; case FLOW_REPLY: ipcp_udp_port_alloc_reply(msg->src_udp_port, @@ -454,13 +495,13 @@ static void * ipcp_udp_listener(void * o) return 0; } -static void * ipcp_udp_sdu_reader(void * o) +static void * ipcp_udp_packet_reader(void * o) { ssize_t n; int skfd; int fd; /* FIXME: avoid this copy */ - char buf[SHIM_UDP_MAX_SDU_SIZE]; + char buf[SHIM_UDP_MAX_PACKET_SIZE]; struct sockaddr_in r_saddr; struct timeval tv = {0, FD_UPDATE_TIMEOUT}; fd_set read_fds; @@ -492,7 +533,7 @@ static void * ipcp_udp_sdu_reader(void * o) n = sizeof(r_saddr); if ((n = recvfrom(skfd, &buf, - SHIM_UDP_MAX_SDU_SIZE, + SHIM_UDP_MAX_PACKET_SIZE, 0, (struct sockaddr *) &r_saddr, (unsigned *) &n)) <= 0) @@ -511,7 +552,7 @@ static void * ipcp_udp_sdu_reader(void * o) return (void *) 0; } -static void * ipcp_udp_sdu_loop(void * o) +static void * ipcp_udp_packet_loop(void * o) { int fd; struct shm_du_buff * sdb; @@ -534,13 +575,14 @@ static void * ipcp_udp_sdu_loop(void * o) pthread_rwlock_unlock(&udp_data.flows_lock); - pthread_cleanup_push((void (*)(void *)) ipcp_sdb_release, + pthread_cleanup_push((void (*)(void *)) + ipcp_sdb_release, (void *) sdb); if (send(fd, shm_du_buff_head(sdb), shm_du_buff_tail(sdb) - shm_du_buff_head(sdb), 0) < 0) - log_err("Failed to send SDU."); + log_err("Failed to send PACKET."); pthread_cleanup_pop(true); } @@ -624,20 +666,20 @@ static int ipcp_udp_bootstrap(const struct ipcp_config * conf) goto fail_bind; } - if (pthread_create(&udp_data.sdu_reader, + if (pthread_create(&udp_data.packet_reader, NULL, - ipcp_udp_sdu_reader, + ipcp_udp_packet_reader, NULL)) { ipcp_set_state(IPCP_INIT); - goto fail_sdu_reader; + goto fail_packet_reader; } - if (pthread_create(&udp_data.sduloop, + if (pthread_create(&udp_data.packet_loop, NULL, - ipcp_udp_sdu_loop, + ipcp_udp_packet_loop, NULL)) { ipcp_set_state(IPCP_INIT); - goto fail_sduloop; + goto fail_packet_loop; } log_dbg("Bootstrapped IPCP over UDP with pid %d.", getpid()); @@ -646,10 +688,10 @@ static int ipcp_udp_bootstrap(const struct ipcp_config * conf) return 0; - fail_sduloop: - pthread_cancel(udp_data.sdu_reader); - pthread_join(udp_data.sdu_reader, NULL); - fail_sdu_reader: + fail_packet_loop: + pthread_cancel(udp_data.packet_reader); + pthread_join(udp_data.packet_reader, NULL); + fail_packet_reader: pthread_cancel(udp_data.handler); pthread_join(udp_data.handler, NULL); fail_bind: @@ -753,7 +795,8 @@ static uint32_t ddns_resolve(char * name, close(pipe_fd[0]); waitpid(pid, &wstatus, 0); - if (WIFEXITED(wstatus) && WEXITSTATUS(wstatus) == 0) + if (WIFEXITED(wstatus) && WEXITSTATUS(wstatus) == 0 && + count != SHIM_UDP_BUF_SIZE) log_dbg("Succesfully communicated with nslookup."); else log_err("Failed to resolve DNS address."); @@ -946,7 +989,7 @@ static int ipcp_udp_query(const uint8_t * hash) static int ipcp_udp_flow_alloc(int fd, const uint8_t * dst, - qoscube_t cube) + qosspec_t qs) { struct sockaddr_in r_saddr; /* server address */ struct sockaddr_in f_saddr; /* flow */ @@ -956,14 +999,13 @@ static int ipcp_udp_flow_alloc(int fd, log_dbg("Allocating flow to " HASH_FMT ".", HASH_VAL(dst)); - assert(dst); + (void) qs; - if (cube > QOS_CUBE_DATA) { - log_dbg("Unsupported QoS requested."); - return -1; - } + assert(dst); skfd = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP); + if (skfd < 0) + return -1; /* this socket is for the flow */ memset((char *) &f_saddr, 0, sizeof(f_saddr)); @@ -1010,7 +1052,7 @@ static int ipcp_udp_flow_alloc(int fd, pthread_rwlock_unlock(&udp_data.flows_lock); - if (ipcp_udp_port_alloc(ip_addr, f_saddr.sin_port, dst, cube) < 0) { + if (ipcp_udp_port_alloc(ip_addr, f_saddr.sin_port, dst, qs) < 0) { pthread_rwlock_wrlock(&udp_data.flows_lock); udp_data.fd_to_uf[fd].udp = -1; @@ -1180,13 +1222,13 @@ int main(int argc, ipcp_shutdown(); if (ipcp_get_state() == IPCP_SHUTDOWN) { - pthread_cancel(udp_data.sduloop); + pthread_cancel(udp_data.packet_loop); pthread_cancel(udp_data.handler); - pthread_cancel(udp_data.sdu_reader); + pthread_cancel(udp_data.packet_reader); - pthread_join(udp_data.sduloop, NULL); + pthread_join(udp_data.packet_loop, NULL); pthread_join(udp_data.handler, NULL); - pthread_join(udp_data.sdu_reader, NULL); + pthread_join(udp_data.packet_reader, NULL); } udp_data_fini(); |