From 2936dd5e4232f7d7d9c720d89bb4a696abcd1830 Mon Sep 17 00:00:00 2001 From: Dimitri Staessens Date: Sat, 10 Mar 2018 10:09:08 +0100 Subject: ipcpd: Add IPCP over DIX Ethernet This adds an IPC Process that uses DIX Ethernet with an Ethertype that is configurable at bootstrap. This allows parallel DIX layers over the same Ethernet network with different Ethertypes (and one LLC layer). It allows jumbo frames in the future, and should avoid the problems we have with some routers not handling LLC traffic very well. The destination endpoint ID is sent as a 16 bit integer, so the maximum payload is 1498 bytes in standard Ethernet, and 8998 bytes when Jumbo frames are used. The implementation is very similar to the Ethernet LLC IPCP, so it is implemented using preprocessor macros in the single source instead of duplicating code. Signed-off-by: Dimitri Staessens Signed-off-by: Sander Vrijders --- src/ipcpd/eth/CMakeLists.txt | 119 +++ src/ipcpd/eth/dix.c | 26 + src/ipcpd/eth/eth.c | 1640 ++++++++++++++++++++++++++++++++++++++++++ src/ipcpd/eth/llc.c | 26 + 4 files changed, 1811 insertions(+) create mode 100644 src/ipcpd/eth/CMakeLists.txt create mode 100644 src/ipcpd/eth/dix.c create mode 100644 src/ipcpd/eth/eth.c create mode 100644 src/ipcpd/eth/llc.c (limited to 'src/ipcpd/eth') diff --git a/src/ipcpd/eth/CMakeLists.txt b/src/ipcpd/eth/CMakeLists.txt new file mode 100644 index 00000000..6b8d1a77 --- /dev/null +++ b/src/ipcpd/eth/CMakeLists.txt @@ -0,0 +1,119 @@ +get_filename_component(CURRENT_SOURCE_PARENT_DIR + ${CMAKE_CURRENT_SOURCE_DIR} DIRECTORY) +get_filename_component(CURRENT_BINARY_PARENT_DIR + ${CMAKE_CURRENT_BINARY_DIR} DIRECTORY) + +include_directories(${CMAKE_CURRENT_SOURCE_DIR}) +include_directories(${CMAKE_CURRENT_BINARY_DIR}) + +include_directories(${CURRENT_SOURCE_PARENT_DIR}) +include_directories(${CURRENT_BINARY_PARENT_DIR}) + +include_directories(${CMAKE_SOURCE_DIR}/include) +include_directories(${CMAKE_BINARY_DIR}/include) + +find_path(NETMAP_C_INCLUDE_DIR + net/netmap_user.h + HINTS /usr/include /usr/local/include +) + +mark_as_advanced(NETMAP_C_INCLUDE_DIR) + +if (CMAKE_SYSTEM_NAME STREQUAL "Linux") + set(DISABLE_RAW_SOCKETS FALSE CACHE BOOL + "Disable raw socket support for Ethernet IPCPs") + if (NOT DISABLE_RAW_SOCKETS) + message(STATUS "Raw socket support for Ethernet IPCPs enabled") + set(HAVE_RAW_SOCKETS TRUE PARENT_SCOPE) + set(HAVE_ETH TRUE) + else () + message(STATUS "Raw socket support for Ethernet IPCPs disabled by user") + unset(HAVE_RAW_SOCKETS) + unset(HAVE_ETH) + endif () +endif () + +if (NOT CMAKE_SYSTEM_NAME STREQUAL "Linux") + find_path(BPF_C_INCLUDE_DIR + net/bpf.h + HINTS /usr/include /usr/local/include + ) + + mark_as_advanced(BPF_C_INCLUDE_DIR) + + if (BPF_C_INCLUDE_DIR) + set(DISABLE_BPF FALSE CACHE BOOL + "Disable Berkeley Packet Filter support for Ethernet IPCPs") + if (NOT DISABLE_BPF) + message(STATUS "Berkeley Packet Filter support " + "for Ethernet IPCPs enabled") + set(HAVE_BPF TRUE PARENT_SCOPE) + set(HAVE_ETH TRUE) + else () + message(STATUS "Berkeley Packet Filter support " + "for Ethernet IPCPs disabled by user") + unset(HAVE_BPF) + unset(HAVE_ETH) + endif () + endif () +endif () + +if (NETMAP_C_INCLUDE_DIR) + set(DISABLE_NETMAP FALSE CACHE BOOL + "Disable netmap support for ETH IPCPs") + if (NOT DISABLE_NETMAP) + message(STATUS "Netmap support for Ethernet IPCPs enabled") + set(HAVE_NETMAP TRUE PARENT_SCOPE) + test_and_set_c_compiler_flag_global(-std=c99) + set(HAVE_ETH TRUE) + else () + message(STATUS "Netmap support for Ethernet IPCPs disabled by user") + unset(HAVE_NETMAP) + unset(HAVE_ETH) + unset(IPCP_ETH_TARGET CACHE) + endif () +endif () + +if (HAVE_ETH) + message(STATUS "Supported raw packet API found, building eth-llc and eth-dix") + + set(ETH_LLC_SOURCES + # Add source files here + ${CMAKE_CURRENT_SOURCE_DIR}/llc.c + ) + + set(ETH_DIX_SOURCES + # Add source files here + ${CMAKE_CURRENT_SOURCE_DIR}/dix.c + ) + + set(IPCP_ETH_LLC_TARGET ipcpd-eth-llc CACHE INTERNAL "") + set(IPCP_ETH_DIX_TARGET ipcpd-eth-dix CACHE INTERNAL "") + + add_executable(ipcpd-eth-llc ${ETH_LLC_SOURCES} ${IPCP_SOURCES}) + add_executable(ipcpd-eth-dix ${ETH_DIX_SOURCES} ${IPCP_SOURCES}) + + if (HAVE_BPF AND NOT APPLE) + target_include_directories(ipcpd-eth-llc PUBLIC ${BPF_C_INCLUDE_DIR}) + target_include_directories(ipcpd-eth-dix PUBLIC ${BPF_C_INCLUDE_DIR}) + endif () + + if (HAVE_NETMAP AND NOT APPLE) + target_include_directories(ipcpd-eth-llc PUBLIC + ${NETMAP_C_INCLUDE_DIR}) + target_include_directories(ipcpd-eth-dix PUBLIC + ${NETMAP_C_INCLUDE_DIR}) + endif () + + target_link_libraries(ipcpd-eth-llc LINK_PUBLIC ouroboros-dev) + target_link_libraries(ipcpd-eth-dix LINK_PUBLIC ouroboros-dev) + + include(AddCompileFlags) + if (CMAKE_BUILD_TYPE MATCHES "Debug*") + add_compile_flags(ipcpd-eth-llc -DCONFIG_OUROBOROS_DEBUG) + add_compile_flags(ipcpd-eth-dix -DCONFIG_OUROBOROS_DEBUG) + endif () + + install(TARGETS ipcpd-eth-llc ipcpd-eth-dix RUNTIME DESTINATION + ${CMAKE_INSTALL_SBINDIR}) +endif () diff --git a/src/ipcpd/eth/dix.c b/src/ipcpd/eth/dix.c new file mode 100644 index 00000000..dfa92ea3 --- /dev/null +++ b/src/ipcpd/eth/dix.c @@ -0,0 +1,26 @@ +/* + * Ouroboros - Copyright (C) 2016 - 2018 + * + * IPC processes over Ethernet - DIX + * + * Dimitri Staessens + * Sander Vrijders + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., http://www.fsf.org/about/contact/. + */ + +#define BUILD_ETH_DIX +#define OUROBOROS_PREFIX "ipcpd/eth-dix" + +#include "eth.c" diff --git a/src/ipcpd/eth/eth.c b/src/ipcpd/eth/eth.c new file mode 100644 index 00000000..ce806dc2 --- /dev/null +++ b/src/ipcpd/eth/eth.c @@ -0,0 +1,1640 @@ +/* + * Ouroboros - Copyright (C) 2016 - 2018 + * + * IPC processes over Ethernet + * + * Dimitri Staessens + * Sander Vrijders + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., http://www.fsf.org/about/contact/. + */ + +#if !defined(BUILD_ETH_DIX) && !defined(BUILD_ETH_LLC) +#error Define BUILD_ETH_DIX or BUILD_ETH_LLC to build an Ethernet IPCP +#endif + +#if defined(__APPLE__) +#define _BSD_SOURCE +#define _DARWIN_C_SOURCE +#elif defined(__FreeBSD__) +#define __BSD_VISIBLE 1 +#else +#define _POSIX_C_SOURCE 200112L +#endif + +#include "config.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ipcp.h" +#include "shim-data.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#ifdef __linux__ +#include +#include +#include +#include +#endif + +#ifdef __FreeBSD__ +#include +#include +#include +#endif + +#ifdef __APPLE__ +#include +#include +#endif + +#include +#include + +#if defined(HAVE_NETMAP) +#define NETMAP_WITH_LIBS +#include +#elif defined(HAVE_BPF) +#define BPF_DEV_MAX 256 +#define BPF_BLEN sysconf(_SC_PAGESIZE) +#include +#endif + +#define MAC_SIZE 6 +#define ETH_TYPE_LENGTH_SIZE 2 +#define ETH_HEADER_SIZE (2 * MAC_SIZE + ETH_TYPE_LENGTH_SIZE) + +#if defined(BUILD_ETH_DIX) +#define THIS_TYPE IPCP_ETH_DIX +#define MGMT_EID 0 +#define DIX_HEADER_SIZE 2 +#define MAX_EIDS (1 << (8 * DIX_HEADER_SIZE)) +#define ETH_MAX_SDU_SIZE (1500 - DIX_HEADER_SIZE) +#define ETH_FRAME_SIZE (ETH_HEADER_SIZE + DIX_HEADER_SIZE \ + + ETH_MAX_SDU_SIZE) +#elif defined(BUILD_ETH_LLC) +#define THIS_TYPE IPCP_ETH_LLC +#define MGMT_SAP 0x01 +#define LLC_HEADER_SIZE 3 +#define MAX_SAPS 64 +#define ETH_MAX_SDU_SIZE (1500 - LLC_HEADER_SIZE) +#define ETH_FRAME_SIZE (ETH_HEADER_SIZE + LLC_HEADER_SIZE \ + + ETH_MAX_SDU_SIZE) +#endif +#define ALLOC_TIMEO 10 /* ms */ +#define NAME_QUERY_TIMEO 2000 /* ms */ +#define MGMT_TIMEO 100 /* ms */ + +#define FLOW_REQ 0 +#define FLOW_REPLY 1 +#define NAME_QUERY_REQ 2 +#define NAME_QUERY_REPLY 3 + +struct mgmt_msg { + uint8_t code; +#if defined(BUILD_ETH_DIX) + uint16_t seid; + uint16_t deid; +#elif defined(BUILD_ETH_LLC) + uint8_t ssap; + uint8_t dsap; +#endif + uint8_t qoscube; + int8_t response; +} __attribute__((packed)); + +struct eth_frame { + uint8_t dst_hwaddr[MAC_SIZE]; + uint8_t src_hwaddr[MAC_SIZE]; +#if defined(BUILD_ETH_DIX) + uint8_t ethertype[ETH_TYPE_LENGTH_SIZE]; + uint8_t eid[DIX_HEADER_SIZE]; +#elif defined(BUILD_ETH_LLC) + uint8_t length[ETH_TYPE_LENGTH_SIZE]; + uint8_t dsap; + uint8_t ssap; + uint8_t cf; +#endif + uint8_t payload; +} __attribute__((packed)); + +struct ef { +#if defined(BUILD_ETH_DIX) + int32_t r_eid; +#elif defined(BUILD_ETH_LLC) + int8_t sap; + int8_t r_sap; +#endif + uint8_t r_addr[MAC_SIZE]; +}; + +struct mgmt_frame { + struct list_head next; + uint8_t r_addr[MAC_SIZE]; + uint8_t buf[ETH_FRAME_SIZE]; +}; + +struct { + struct shim_data * shim_data; + +#if defined(HAVE_NETMAP) + struct nm_desc * nmd; + uint8_t hw_addr[MAC_SIZE]; + struct pollfd poll_in; + struct pollfd poll_out; +#elif defined(HAVE_BPF) + int bpf; + uint8_t hw_addr[MAC_SIZE]; +#elif defined(HAVE_RAW_SOCKETS) + int s_fd; + struct sockaddr_ll device; +#endif /* HAVE_NETMAP */ +#if defined (BUILD_ETH_DIX) + uint16_t ethertype; +#elif defined(BUILD_ETH_LLC) + struct bmp * saps; + int * ef_to_fd; +#endif + struct ef * fd_to_ef; + fset_t * np1_flows; + fqueue_t * fq; + pthread_rwlock_t flows_lock; + + pthread_t sdu_writer; + pthread_t sdu_reader; + +#ifdef __linux__ + pthread_t if_monitor; +#endif + + /* Handle mgmt frames in a different thread */ + pthread_t mgmt_handler; + pthread_mutex_t mgmt_lock; + pthread_cond_t mgmt_cond; + struct list_head mgmt_frames; +} eth_data; + +static int eth_data_init(void) +{ + int i; + int ret = -ENOMEM; + pthread_condattr_t cattr; + + eth_data.fd_to_ef = + malloc(sizeof(*eth_data.fd_to_ef) * SYS_MAX_FLOWS); + if (eth_data.fd_to_ef == NULL) + goto fail_fd_to_ef; + +#ifdef BUILD_ETH_LLC + eth_data.ef_to_fd = + malloc(sizeof(*eth_data.ef_to_fd) * MAX_SAPS); + if (eth_data.ef_to_fd == NULL) + goto fail_ef_to_fd; + + for (i = 0; i < MAX_SAPS; ++i) + eth_data.ef_to_fd[i] = -1; + + eth_data.saps = bmp_create(MAX_SAPS, 2); + if (eth_data.saps == NULL) + goto fail_saps; +#endif + eth_data.np1_flows = fset_create(); + if (eth_data.np1_flows == NULL) + goto fail_np1_flows; + + eth_data.fq = fqueue_create(); + if (eth_data.fq == NULL) + goto fail_fq; + + for (i = 0; i < SYS_MAX_FLOWS; ++i) { +#if defined(BUILD_ETH_DIX) + eth_data.fd_to_ef[i].r_eid = -1; +#elif defined(BUILD_ETH_LLC) + eth_data.fd_to_ef[i].sap = -1; + eth_data.fd_to_ef[i].r_sap = -1; +#endif + memset(ð_data.fd_to_ef[i].r_addr, 0, MAC_SIZE); + } + + eth_data.shim_data = shim_data_create(); + if (eth_data.shim_data == NULL) + goto fail_shim_data; + + ret = -1; + + if (pthread_rwlock_init(ð_data.flows_lock, NULL)) + goto fail_flows_lock; + + if (pthread_mutex_init(ð_data.mgmt_lock, NULL)) + goto fail_mgmt_lock; + + if (pthread_condattr_init(&cattr)) + goto fail_condattr; + +#ifndef __APPLE__ + pthread_condattr_setclock(&cattr, PTHREAD_COND_CLOCK); +#endif + + if (pthread_cond_init(ð_data.mgmt_cond, &cattr)) + goto fail_mgmt_cond; + + pthread_condattr_destroy(&cattr); + + list_head_init(ð_data.mgmt_frames); + + return 0; + + fail_mgmt_cond: + pthread_condattr_destroy(&cattr); + fail_condattr: + pthread_mutex_destroy(ð_data.mgmt_lock); + fail_mgmt_lock: + pthread_rwlock_destroy(ð_data.flows_lock); + fail_flows_lock: + shim_data_destroy(eth_data.shim_data); + fail_shim_data: + fqueue_destroy(eth_data.fq); + fail_fq: + fset_destroy(eth_data.np1_flows); + fail_np1_flows: +#ifdef BUILD_ETH_LLC + bmp_destroy(eth_data.saps); + fail_saps: + free(eth_data.ef_to_fd); + fail_ef_to_fd: +#endif + free(eth_data.fd_to_ef); + fail_fd_to_ef: + return ret; +} + +static void eth_data_fini(void) +{ +#if defined(HAVE_NETMAP) + nm_close(eth_data.nmd); +#elif defined(HAVE_BPF) + close(eth_data.bpf); +#elif defined(HAVE_RAW_SOCKETS) + close(eth_data.s_fd); +#endif + pthread_cond_destroy(ð_data.mgmt_cond); + pthread_mutex_destroy(ð_data.mgmt_lock); + pthread_rwlock_destroy(ð_data.flows_lock); + shim_data_destroy(eth_data.shim_data); + fqueue_destroy(eth_data.fq); + fset_destroy(eth_data.np1_flows); +#ifdef BUILD_ETH_LLC + bmp_destroy(eth_data.saps); + free(eth_data.ef_to_fd); +#endif + free(eth_data.fd_to_ef); +} + +#ifdef BUILD_ETH_LLC +static uint8_t reverse_bits(uint8_t b) +{ + b = (b & 0xF0) >> 4 | (b & 0x0F) << 4; + b = (b & 0xCC) >> 2 | (b & 0x33) << 2; + b = (b & 0xAA) >> 1 | (b & 0x55) << 1; + + return b; +} +#endif + +static int eth_ipcp_send_frame(const uint8_t * dst_addr, +#if defined(BUILD_ETH_DIX) + uint16_t deid, +#elif defined(BUILD_ETH_LLC) + uint8_t dsap, + uint8_t ssap, +#endif + const uint8_t * payload, + size_t len) +{ + uint32_t frame_len = 0; +#ifdef BUILD_ETH_LLC + uint8_t cf = 0x03; + uint16_t length; +#endif + uint8_t frame[ETH_MAX_SDU_SIZE]; + struct eth_frame * e_frame; + + if (payload == NULL) { + log_err("Payload was NULL."); + return -1; + } + + if (len > ETH_MAX_SDU_SIZE) + return -1; + + e_frame = (struct eth_frame *) frame; + + memcpy(e_frame->dst_hwaddr, dst_addr, MAC_SIZE); + memcpy(e_frame->src_hwaddr, +#if defined(HAVE_NETMAP) || defined(HAVE_BPF) + eth_data.hw_addr, +#elif defined(HAVE_RAW_SOCKETS) + eth_data.device.sll_addr, +#endif /* HAVE_NETMAP */ + MAC_SIZE); +#if defined(BUILD_ETH_DIX) + memcpy(&e_frame->ethertype, ð_data.ethertype, ETH_TYPE_LENGTH_SIZE); + deid = htons(deid); + memcpy(&e_frame->eid, &deid, DIX_HEADER_SIZE); + frame_len = ETH_HEADER_SIZE + DIX_HEADER_SIZE + len; +#elif defined(BUILD_ETH_LLC) + length = htons(LLC_HEADER_SIZE + len); + memcpy(&e_frame->length, &length, sizeof(length)); + e_frame->dsap = dsap; + e_frame->ssap = ssap; + e_frame->cf = cf; + frame_len = ETH_HEADER_SIZE + LLC_HEADER_SIZE + len; +#endif + memcpy(&e_frame->payload, payload, len); + +#if defined(HAVE_NETMAP) + if (poll(ð_data.poll_out, 1, -1) < 0) + return -1; + + if (nm_inject(eth_data.nmd, frame, frame_len) != (int) frame_len) { + log_dbg("Failed to send message."); + return -1; + } +#elif defined(HAVE_BPF) + if (write(eth_data.bpf, frame, frame_len) < 0) { + log_dbg("Failed to send message."); + return -1; + } + +#elif defined(HAVE_RAW_SOCKETS) + if (sendto(eth_data.s_fd, + frame, + frame_len, + 0, + (struct sockaddr *) ð_data.device, + sizeof(eth_data.device)) <= 0) { + log_dbg("Failed to send message."); + return -1; + } +#endif /* HAVE_NETMAP */ + return 0; +} + +static int eth_ipcp_alloc(const uint8_t * dst_addr, +#if defined(BUILD_ETH_DIX) + uint16_t eid, +#elif defined(BUILD_ETH_LLC) + uint8_t ssap, +#endif + const uint8_t * hash, + qoscube_t cube) +{ + uint8_t * buf; + struct mgmt_msg * msg; + size_t len; + int ret; + + len = sizeof(*msg) + ipcp_dir_hash_len(); + + buf = malloc(len); + if (buf == NULL) + return -1; + + msg = (struct mgmt_msg *) buf; + msg->code = FLOW_REQ; +#if defined(BUILD_ETH_DIX) + msg->seid = htons(eid); +#elif defined(BUILD_ETH_LLC) + msg->ssap = ssap; +#endif + msg->qoscube = cube; + + memcpy(msg + 1, hash, ipcp_dir_hash_len()); + + ret = eth_ipcp_send_frame(dst_addr, +#if defined(BUILD_ETH_DIX) + MGMT_EID, +#elif defined(BUILD_ETH_LLC) + reverse_bits(MGMT_SAP), + reverse_bits(MGMT_SAP), +#endif + buf, len); + free(buf); + + return ret; +} + +static int eth_ipcp_alloc_resp(uint8_t * dst_addr, +#if defined(BUILD_ETH_DIX) + uint16_t seid, + uint16_t deid, +#elif defined(BUILD_ETH_LLC) + uint8_t ssap, + uint8_t dsap, +#endif + int response) +{ + struct mgmt_msg msg; + + msg.code = FLOW_REPLY; +#if defined(BUILD_ETH_DIX) + msg.seid = htons(seid); + msg.deid = htons(deid); +#elif defined(BUILD_ETH_LLC) + msg.ssap = ssap; + msg.dsap = dsap; +#endif + msg.response = response; + + return eth_ipcp_send_frame(dst_addr, +#if defined(BUILD_ETH_DIX) + MGMT_EID, +#elif defined(BUILD_ETH_LLC) + reverse_bits(MGMT_SAP), + reverse_bits(MGMT_SAP), +#endif + (uint8_t *) &msg, sizeof(msg)); +} + +static int eth_ipcp_req(uint8_t * r_addr, +#if defined(BUILD_ETH_DIX) + uint16_t r_eid, +#elif defined(BUILD_ETH_LLC) + uint8_t r_sap, +#endif + const uint8_t * dst, + qoscube_t cube) +{ + struct timespec ts = {0, ALLOC_TIMEO * MILLION}; + struct timespec abstime; + int fd; + + clock_gettime(PTHREAD_COND_CLOCK, &abstime); + + pthread_mutex_lock(&ipcpi.alloc_lock); + + while (ipcpi.alloc_id != -1 && ipcp_get_state() == IPCP_OPERATIONAL) { + ts_add(&abstime, &ts, &abstime); + pthread_cond_timedwait(&ipcpi.alloc_cond, + &ipcpi.alloc_lock, + &abstime); + } + + if (ipcp_get_state() != IPCP_OPERATIONAL) { + log_dbg("Won't allocate over non-operational IPCP."); + pthread_mutex_unlock(&ipcpi.alloc_lock); + return -1; + } + + /* reply to IRM, called under lock to prevent race */ + fd = ipcp_flow_req_arr(getpid(), dst, ipcp_dir_hash_len(), cube); + if (fd < 0) { + pthread_mutex_unlock(&ipcpi.alloc_lock); + log_err("Could not get new flow from IRMd."); + return -1; + } + + pthread_rwlock_wrlock(ð_data.flows_lock); +#if defined(BUILD_ETH_DIX) + eth_data.fd_to_ef[fd].r_eid = r_eid; +#elif defined(BUILD_ETH_LLC) + eth_data.fd_to_ef[fd].r_sap = r_sap; +#endif + memcpy(eth_data.fd_to_ef[fd].r_addr, r_addr, MAC_SIZE); + + pthread_rwlock_unlock(ð_data.flows_lock); + + ipcpi.alloc_id = fd; + pthread_cond_broadcast(&ipcpi.alloc_cond); + + pthread_mutex_unlock(&ipcpi.alloc_lock); + +#if defined(BUILD_ETH_DIX) + log_dbg("New flow request, fd %d, remote endpoint %d.", fd, r_eid); +#elif defined(BUILD_ETH_LLC) + log_dbg("New flow request, fd %d, remote SAP %d.", fd, r_sap); +#endif + + return 0; +} + +static int eth_ipcp_alloc_reply(uint8_t * r_addr, +#if defined(BUILD_ETH_DIX) + uint16_t seid, + uint16_t deid, +#elif defined(BUILD_ETH_LLC) + uint8_t ssap, + int dsap, +#endif + int response) +{ + int ret = 0; + int fd = -1; + + pthread_rwlock_wrlock(ð_data.flows_lock); + +#if defined(BUILD_ETH_DIX) + fd = deid; +#elif defined(BUILD_ETH_LLC) + fd = eth_data.ef_to_fd[dsap]; +#endif + if (fd < 0) { + pthread_rwlock_unlock(& eth_data.flows_lock); + log_err("No flow found with that SAP."); + return -1; /* -EFLOWNOTFOUND */ + } + + if (response) { +#ifdef BUILD_ETH_LLC + bmp_release(eth_data.saps, eth_data.fd_to_ef[fd].sap); +#endif + } else { +#if defined(BUILD_ETH_DIX) + eth_data.fd_to_ef[fd].r_eid = seid; +#elif defined(BUILD_ETH_LLC) + eth_data.fd_to_ef[fd].r_sap = ssap; +#endif + memcpy(eth_data.fd_to_ef[fd].r_addr, r_addr, MAC_SIZE); + } + + pthread_rwlock_unlock(ð_data.flows_lock); + +#if defined(BUILD_ETH_DIX) + log_dbg("Flow reply, fd %d, src eid %d, dst eid %d.", fd, seid, deid); +#elif defined(BUILD_ETH_LLC) + log_dbg("Flow reply, fd %d, SSAP %d, DSAP %d.", fd, ssap, dsap); +#endif + if ((ret = ipcp_flow_alloc_reply(fd, response)) < 0) + return -1; + + return ret; + +} + +static int eth_ipcp_name_query_req(const uint8_t * hash, + uint8_t * r_addr) +{ + uint8_t * buf; + struct mgmt_msg * msg; + size_t len; + + if (shim_data_reg_has(eth_data.shim_data, hash)) { + len = sizeof(*msg) + ipcp_dir_hash_len(); + + buf = malloc(len); + if (buf == NULL) + return -1; + + msg = (struct mgmt_msg *) buf; + msg->code = NAME_QUERY_REPLY; + + memcpy(msg + 1, hash, ipcp_dir_hash_len()); + + if (eth_ipcp_send_frame(r_addr, +#if defined(BUILD_ETH_DIX) + MGMT_EID, +#elif defined(BUILD_ETH_LLC) + reverse_bits(MGMT_SAP), + reverse_bits(MGMT_SAP), +#endif + buf, len)) { + log_err("Failed to send management frame."); + free(buf); + return -1; + } + + free(buf); + } + + return 0; +} + +static int eth_ipcp_name_query_reply(const uint8_t * hash, + uint8_t * r_addr) +{ + uint64_t address = 0; + + memcpy(&address, r_addr, MAC_SIZE); + + shim_data_dir_add_entry(eth_data.shim_data, hash, address); + + shim_data_dir_query_respond(eth_data.shim_data, hash); + + return 0; +} + +static int eth_ipcp_mgmt_frame(const uint8_t * buf, + uint8_t * r_addr) +{ + struct mgmt_msg * msg; + + msg = (struct mgmt_msg *) buf; + + switch (msg->code) { + case FLOW_REQ: + if (shim_data_reg_has(eth_data.shim_data, + buf + sizeof(*msg))) { + eth_ipcp_req(r_addr, +#if defined(BUILD_ETH_DIX) + ntohs(msg->seid), +#elif defined(BUILD_ETH_LLC) + msg->ssap, +#endif + buf + sizeof(*msg), + msg->qoscube); + } + break; + case FLOW_REPLY: + eth_ipcp_alloc_reply(r_addr, +#if defined(BUILD_ETH_DIX) + ntohs(msg->seid), + ntohs(msg->deid), +#elif defined(BUILD_ETH_LLC) + msg->ssap, + msg->dsap, +#endif + msg->response); + break; + case NAME_QUERY_REQ: + eth_ipcp_name_query_req(buf + sizeof(*msg), r_addr); + break; + case NAME_QUERY_REPLY: + eth_ipcp_name_query_reply(buf + sizeof(*msg), r_addr); + break; + default: + log_err("Unknown message received %d.", msg->code); + return -1; + } + + return 0; +} + +static void * eth_ipcp_mgmt_handler(void * o) +{ + int ret; + struct timespec timeout = {(MGMT_TIMEO / 1000), + (MGMT_TIMEO % 1000) * MILLION}; + struct timespec abstime; + struct mgmt_frame * frame; + + (void) o; + + pthread_cleanup_push((void (*)(void *)) pthread_mutex_unlock, + (void *) ð_data.mgmt_lock); + + while (true) { + ret = 0; + + clock_gettime(PTHREAD_COND_CLOCK, &abstime); + ts_add(&abstime, &timeout, &abstime); + + pthread_mutex_lock(ð_data.mgmt_lock); + + while (list_is_empty(ð_data.mgmt_frames) && + ret != -ETIMEDOUT) + ret = -pthread_cond_timedwait(ð_data.mgmt_cond, + ð_data.mgmt_lock, + &abstime); + + if (ret == -ETIMEDOUT) { + pthread_mutex_unlock(ð_data.mgmt_lock); + continue; + } + + frame = list_first_entry((ð_data.mgmt_frames), + struct mgmt_frame, next); + if (frame == NULL) { + pthread_mutex_unlock(ð_data.mgmt_lock); + continue; + } + + list_del(&frame->next); + pthread_mutex_unlock(ð_data.mgmt_lock); + + eth_ipcp_mgmt_frame(frame->buf, frame->r_addr); + free(frame); + } + + pthread_cleanup_pop(false); + + return (void *) 0; +} + +static void * eth_ipcp_sdu_reader(void * o) +{ + uint8_t br_addr[MAC_SIZE]; +#if defined(BUILD_ETH_DIX) + uint16_t deid; +#elif defined(BUILD_ETH_LLC) + uint8_t dsap; + uint8_t ssap; +#endif + uint16_t length; + int fd; +#if defined(HAVE_NETMAP) + uint8_t * buf; + struct nm_pkthdr hdr; +#elif defined(HAVE_BPF) + uint8_t buf[BPF_BLEN]; +#elif defined(HAVE_RAW_SOCKETS) + uint8_t buf[ETH_FRAME_SIZE]; +#endif + int frame_len = 0; + struct eth_frame * e_frame; + struct mgmt_frame * frame; + + (void) o; + + memset(br_addr, 0xff, MAC_SIZE * sizeof(uint8_t)); + + while (true) { +#if defined(HAVE_NETMAP) + if (poll(ð_data.poll_in, 1, -1) < 0) + continue; + if (eth_data.poll_in.revents == 0) /* TIMED OUT */ + continue; + + buf = nm_nextpkt(eth_data.nmd, &hdr); + if (buf == NULL) { + log_err("Bad read from netmap device."); + continue; + } +#elif defined(HAVE_BPF) + frame_len = read(eth_data.bpf, buf, BPF_BLEN); +#elif defined(HAVE_RAW_SOCKETS) + frame_len = recv(eth_data.s_fd, buf, + ETH_MAX_SDU_SIZE, 0); +#endif + if (frame_len <= 0) + continue; + +#if defined(HAVE_BPF) && !defined(HAVE_NETMAP) + e_frame = (struct eth_frame *) + (buf + ((struct bpf_hdr *) buf)->bh_hdrlen); +#else + e_frame = (struct eth_frame *) buf; +#endif + assert(e_frame->dst_hwaddr); + +#if !defined(HAVE_BPF) + #if defined(HAVE_NETMAP) + if (memcmp(eth_data.hw_addr, + #elif defined(HAVE_RAW_SOCKETS) + if (memcmp(eth_data.device.sll_addr, + #endif /* HAVE_NETMAP */ + e_frame->dst_hwaddr, + MAC_SIZE) && + memcmp(br_addr, e_frame->dst_hwaddr, MAC_SIZE)) { + } +#endif + +#if defined(BUILD_ETH_DIX) + length = frame_len - ETH_HEADER_SIZE - DIX_HEADER_SIZE; + memcpy(&deid, &e_frame->eid, sizeof(deid)); + deid = ntohs(deid); + + if (deid == MGMT_EID) { +#elif defined (BUILD_ETH_LLC) + memcpy(&length, &e_frame->length, sizeof(length)); + length = ntohs(length); + + if (length > 0x05FF) /* DIX */ + continue; + + length -= LLC_HEADER_SIZE; + + dsap = reverse_bits(e_frame->dsap); + ssap = reverse_bits(e_frame->ssap); + + if (ssap == MGMT_SAP && dsap == MGMT_SAP) { +#endif + pthread_mutex_lock(ð_data.mgmt_lock); + + frame = malloc(sizeof(*frame)); + if (frame == NULL) { + pthread_mutex_unlock(ð_data.mgmt_lock); + continue; + } + + memcpy(frame->buf, &e_frame->payload, length); + memcpy(frame->r_addr, e_frame->src_hwaddr, MAC_SIZE); + list_add(&frame->next, ð_data.mgmt_frames); + pthread_cond_signal(ð_data.mgmt_cond); + pthread_mutex_unlock(ð_data.mgmt_lock); + } else { + pthread_rwlock_rdlock(ð_data.flows_lock); + +#if defined(BUILD_ETH_DIX) + fd = deid; +#elif defined(BUILD_ETH_LLC) + fd = eth_data.ef_to_fd[dsap]; +#endif + if (fd < 0) { + pthread_rwlock_unlock(ð_data.flows_lock); + continue; + } + +#ifdef BUILD_ETH_LLC + if (eth_data.fd_to_ef[fd].r_sap != ssap + || memcmp(eth_data.fd_to_ef[fd].r_addr, + e_frame->src_hwaddr, MAC_SIZE)) { + pthread_rwlock_unlock(ð_data.flows_lock); + continue; + } +#endif + pthread_rwlock_unlock(ð_data.flows_lock); + + flow_write(fd, &e_frame->payload, length); + } + } + + return (void *) 0; +} + +static void * eth_ipcp_sdu_writer(void * o) +{ + int fd; + struct shm_du_buff * sdb; +#if defined(BUILD_ETH_DIX) + uint16_t deid; +#elif defined(BUILD_ETH_LLC) + uint8_t dsap; + uint8_t ssap; +#endif + uint8_t r_addr[MAC_SIZE]; + + (void) o; + + while (true) { + fevent(eth_data.np1_flows, eth_data.fq, NULL); + + pthread_rwlock_rdlock(ð_data.flows_lock); + while ((fd = fqueue_next(eth_data.fq)) >= 0) { + if (ipcp_flow_read(fd, &sdb)) { + log_err("Bad read from fd %d.", fd); + continue; + } + +#if defined(BUILD_ETH_DIX) + deid = eth_data.fd_to_ef[fd].r_eid; +#elif defined(BUILD_ETH_LLC) + dsap = reverse_bits(eth_data.fd_to_ef[fd].r_sap); + ssap = reverse_bits(eth_data.fd_to_ef[fd].sap); +#endif + memcpy(r_addr, + eth_data.fd_to_ef[fd].r_addr, + MAC_SIZE); + + eth_ipcp_send_frame(r_addr, +#if defined(BUILD_ETH_DIX) + deid, +#elif defined(BUILD_ETH_LLC) + dsap, ssap, +#endif + shm_du_buff_head(sdb), + shm_du_buff_tail(sdb) + - shm_du_buff_head(sdb)); + ipcp_sdb_release(sdb); + } + pthread_rwlock_unlock(ð_data.flows_lock); + } + + return (void *) 1; +} + +#ifdef __linux__ +static int open_netlink_socket(void) +{ + struct sockaddr_nl sa; + int fd; + + memset(&sa, 0, sizeof(sa)); + sa.nl_family = AF_NETLINK; + sa.nl_pid = getpid(); + sa.nl_groups = RTMGRP_LINK; + + fd = socket(AF_NETLINK, SOCK_RAW, NETLINK_ROUTE); + if (fd < 0) + return -1; + + if (bind(fd, (struct sockaddr *) &sa, sizeof(sa))) { + close(fd); + return -1; + } + + return fd; +} + +static void change_flows_state(bool up) +{ + int i; + uint32_t flags; + + pthread_rwlock_rdlock(ð_data.flows_lock); + +#if defined(BUILD_ETH_DIX) + for (i = 0; i < SYS_MAX_FLOWS; ++i) + if (eth_data.fd_to_ef[i].r_eid != -1) { + fccntl(i, FLOWGFLAGS, &flags); + if (up) + fccntl(i, FLOWSFLAGS, flags & ~FLOWFDOWN); + else + fccntl(i, FLOWSFLAGS, flags | FLOWFDOWN); + } +#elif defined(BUILD_ETH_LLC) + for (i = 0; i < MAX_SAPS; i++) + if (eth_data.ef_to_fd[i] != -1) { + fccntl(eth_data.ef_to_fd[i], FLOWGFLAGS, &flags); + if (up) + fccntl(eth_data.ef_to_fd[i], + FLOWSFLAGS, flags & ~FLOWFDOWN); + else + fccntl(eth_data.ef_to_fd[i], + FLOWSFLAGS, flags | FLOWFDOWN); + } +#endif + + pthread_rwlock_unlock(ð_data.flows_lock); +} + +static void close_ptr(void * o) +{ + close(*((int *) o)); +} + + +static void * eth_ipcp_if_monitor(void * o) +{ + int fd; + int status; + char buf[4096]; + struct iovec iov = {buf, sizeof(buf)}; + struct sockaddr_nl snl; + struct msghdr msg = {(void *) &snl, sizeof(snl), + &iov, 1, NULL, 0, 0}; + struct nlmsghdr * h; + struct ifinfomsg * ifi; + + (void ) o; + + fd = open_netlink_socket(); + if (fd < 0) { + log_err("Failed to open socket."); + return (void *) -1; + } + + pthread_cleanup_push(close_ptr, &fd); + + while (true) { + status = recvmsg(fd, &msg, 0); + if (status < 0) + continue; + + for (h = (struct nlmsghdr *) buf; + NLMSG_OK(h, (unsigned int) status); + h = NLMSG_NEXT(h, status)) { + + /* Finish reading */ + if (h->nlmsg_type == NLMSG_DONE) + break; + + /* Message is some kind of error */ + if (h->nlmsg_type == NLMSG_ERROR) + continue; + + /* Only interested in link up/down */ + if (h->nlmsg_type != RTM_NEWLINK) + continue; + + ifi = NLMSG_DATA(h); + + /* Not our interface */ + if (ifi->ifi_index != eth_data.device.sll_ifindex) + continue; + + if (ifi->ifi_flags & IFF_UP) { + change_flows_state(true); + log_dbg("Interface up."); + } else { + change_flows_state(false); + log_dbg("Interface down."); + } + } + } + + pthread_cleanup_pop(true); + + return (void *) 0; +} +#endif + +#if defined (HAVE_BPF) && !defined(HAVE_NETMAP) +static int open_bpf_device(void) +{ + char dev[32]; + size_t i = 0; + + for (i = 0; i < BPF_DEV_MAX; i++) { + int fd = -1; + + snprintf(dev, sizeof(dev), "/dev/bpf%zu", i); + + fd = open(dev, O_RDWR); + if (fd > -1) + return fd; + } + + return -1; +} +#endif + +static int eth_ipcp_bootstrap(const struct ipcp_config * conf) +{ + int idx; + struct ifreq ifr; +#if defined(HAVE_NETMAP) + char ifn[IFNAMSIZ]; +#elif defined(HAVE_BPF) + int enable = 1; + int disable = 0; + int blen; +#endif /* HAVE_NETMAP */ + +#if defined(__FreeBSD__) || defined(__APPLE__) + struct ifaddrs * ifaddr; + struct ifaddrs * ifa; +#elif defined(__linux__) + int skfd; +#endif + assert(conf); + assert(conf->type == THIS_TYPE); + + if (conf->dev == NULL) { + log_err("Device name is NULL."); + return -1; + } + + memset(&ifr, 0, sizeof(ifr)); + memcpy(ifr.ifr_name, conf->dev, strlen(conf->dev)); + +#ifdef BUILD_ETH_DIX + eth_data.ethertype = htons(conf->ethertype); +#endif + +#if defined(__FreeBSD__) || defined(__APPLE__) + if (getifaddrs(&ifaddr) < 0) { + log_err("Could not get interfaces."); + return -1; + } + + for (ifa = ifaddr, idx = 0; ifa != NULL; ifa = ifa->ifa_next, ++idx) { + if (strcmp(ifa->ifa_name, conf->dev)) + continue; + log_dbg("Interface %s found.", conf->dev); + + #if defined(HAVE_NETMAP) || defined(HAVE_BPF) + memcpy(eth_data.hw_addr, + LLADDR((struct sockaddr_dl *) (ifa)->ifa_addr), + MAC_SIZE); + #elif defined (HAVE_RAW_SOCKETS) + memcpy(&ifr.ifr_addr, ifa->ifa_addr, sizeof(*ifa->ifa_addr)); + #endif + break; + } + + freeifaddrs(ifaddr); + + if (ifa == NULL) { + log_err("Interface not found."); + return -1; + } + +#elif defined(__linux__) + skfd = socket(AF_UNIX, SOCK_STREAM, 0); + if (skfd < 0) { + log_err("Failed to open socket."); + return -1; + } + + if (ioctl(skfd, SIOCGIFHWADDR, &ifr)) { + log_err("Failed to ioctl."); + close(skfd); + return -1; + } + + close(skfd); + + idx = if_nametoindex(conf->dev); + if (idx == 0) { + log_err("Failed to retrieve interface index."); + close(skfd); + return -1; + } +#endif /* __FreeBSD__ */ + +#if defined(HAVE_NETMAP) + strcpy(ifn, "netmap:"); + strcat(ifn, conf->dev); + + eth_data.nmd = nm_open(ifn, NULL, 0, NULL); + if (eth_data.nmd == NULL) { + log_err("Failed to open netmap device."); + return -1; + } + + memset(ð_data.poll_in, 0, sizeof(eth_data.poll_in)); + memset(ð_data.poll_out, 0, sizeof(eth_data.poll_out)); + + eth_data.poll_in.fd = NETMAP_FD(eth_data.nmd); + eth_data.poll_in.events = POLLIN; + eth_data.poll_out.fd = NETMAP_FD(eth_data.nmd); + eth_data.poll_out.events = POLLOUT; + + log_info("Using netmap device."); +#elif defined(HAVE_BPF) /* !HAVE_NETMAP */ + eth_data.bpf = open_bpf_device(); + if (eth_data.bpf < 0) { + log_err("Failed to open bpf device."); + return -1; + } + + ioctl(eth_data.bpf, BIOCGBLEN, &blen); + if (BPF_BLEN < blen) { + log_err("BPF buffer too small (is: %ld must be: %d).", + BPF_BLEN, blen); + goto fail_device; + } + + if (ioctl(eth_data.bpf, BIOCSETIF, &ifr) < 0) { + log_err("Failed to set interface."); + goto fail_device; + } + + if (ioctl(eth_data.bpf, BIOCSHDRCMPLT, &enable) < 0) { + log_err("Failed to set BIOCSHDRCMPLT."); + goto fail_device; + } + + if (ioctl(eth_data.bpf, BIOCSSEESENT, &disable) < 0) { + log_err("Failed to set BIOCSSEESENT."); + goto fail_device; + } + + if (ioctl(eth_data.bpf, BIOCIMMEDIATE, &enable) < 0) { + log_err("Failed to set BIOCIMMEDIATE."); + goto fail_device; + } + + log_info("Using Berkeley Packet Filter."); +#elif defined(HAVE_RAW_SOCKETS) + memset(&(eth_data.device), 0, sizeof(eth_data.device)); + eth_data.device.sll_ifindex = idx; + eth_data.device.sll_family = AF_PACKET; + memcpy(eth_data.device.sll_addr, ifr.ifr_hwaddr.sa_data, MAC_SIZE); + eth_data.device.sll_halen = MAC_SIZE; + eth_data.device.sll_protocol = htons(ETH_P_ALL); + + #if defined (BUILD_ETH_DIX) + eth_data.s_fd = socket(AF_PACKET, SOCK_RAW, eth_data.ethertype); + #elif defined (BUILD_ETH_LLC) + eth_data.s_fd = socket(AF_PACKET, SOCK_RAW, htons(ETH_P_802_2)); + #endif + + log_info("Using raw socket device."); + + if (eth_data.s_fd < 0) { + log_err("Failed to create socket."); + return -1; + } + + if (bind(eth_data.s_fd, (struct sockaddr *) ð_data.device, + sizeof(eth_data.device))) { + log_err("Failed to bind socket to interface"); + goto fail_device; + } + +#endif /* HAVE_NETMAP */ + ipcp_set_state(IPCP_OPERATIONAL); + +#ifdef __linux__ + if (pthread_create(ð_data.if_monitor, + NULL, + eth_ipcp_if_monitor, + NULL)) { + ipcp_set_state(IPCP_INIT); + goto fail_device; + } +#endif + + if (pthread_create(ð_data.mgmt_handler, + NULL, + eth_ipcp_mgmt_handler, + NULL)) { + ipcp_set_state(IPCP_INIT); + goto fail_mgmt_handler; + } + + if (pthread_create(ð_data.sdu_reader, + NULL, + eth_ipcp_sdu_reader, + NULL)) { + ipcp_set_state(IPCP_INIT); + goto fail_sdu_reader; + } + + if (pthread_create(ð_data.sdu_writer, + NULL, + eth_ipcp_sdu_writer, + NULL)) { + ipcp_set_state(IPCP_INIT); + goto fail_sdu_writer; + } + +#if defined(BUILD_ETH_DIX) + log_dbg("Bootstrapped IPCP over DIX Ethernet with pid %d " + "and Ethertype 0x%X.", getpid(), conf->ethertype); +#elif defined(BUILD_ETH_LLC) + log_dbg("Bootstrapped IPCP over Ethernet with LLC with pid %d.", + getpid()); +#endif + + return 0; + + fail_sdu_writer: + pthread_cancel(eth_data.sdu_reader); + pthread_join(eth_data.sdu_reader, NULL); + fail_sdu_reader: + pthread_cancel(eth_data.mgmt_handler); + pthread_join(eth_data.mgmt_handler, NULL); + fail_mgmt_handler: +#if defined(__linux__) + pthread_cancel(eth_data.if_monitor); + pthread_join(eth_data.if_monitor, NULL); +#endif +#if !defined(HAVE_NETMAP) + fail_device: +#endif +#if defined(HAVE_NETMAP) + nm_close(eth_data.nmd); +#elif defined(HAVE_BPF) + close(eth_data.bpf); +#elif defined(HAVE_RAW_SOCKETS) + close(eth_data.s_fd); +#endif + return -1; +} + +static int eth_ipcp_reg(const uint8_t * hash) +{ + if (shim_data_reg_add_entry(eth_data.shim_data, hash)) { + log_err("Failed to add " HASH_FMT " to local registry.", + HASH_VAL(hash)); + return -1; + } + + log_dbg("Registered " HASH_FMT ".", HASH_VAL(hash)); + + return 0; +} + +static int eth_ipcp_unreg(const uint8_t * hash) +{ + shim_data_reg_del_entry(eth_data.shim_data, hash); + + return 0; +} + +static int eth_ipcp_query(const uint8_t * hash) +{ + uint8_t r_addr[MAC_SIZE]; + struct timespec timeout = {(NAME_QUERY_TIMEO / 1000), + (NAME_QUERY_TIMEO % 1000) * MILLION}; + struct dir_query * query; + int ret; + uint8_t * buf; + struct mgmt_msg * msg; + size_t len; + + if (shim_data_dir_has(eth_data.shim_data, hash)) + return 0; + + len = sizeof(*msg) + ipcp_dir_hash_len(); + + buf = malloc(len); + if (buf == NULL) + return -1; + + msg = (struct mgmt_msg *) buf; + msg->code = NAME_QUERY_REQ; + + memcpy(buf + sizeof(*msg), hash, ipcp_dir_hash_len()); + + memset(r_addr, 0xff, MAC_SIZE); + + query = shim_data_dir_query_create(eth_data.shim_data, hash); + if (query == NULL) { + free(buf); + return -1; + } + + if (eth_ipcp_send_frame(r_addr, +#if defined(BUILD_ETH_DIX) + MGMT_EID, +#elif defined(BUILD_ETH_LLC) + reverse_bits(MGMT_SAP), + reverse_bits(MGMT_SAP), +#endif + buf, len)) { + log_err("Failed to send management frame."); + shim_data_dir_query_destroy(eth_data.shim_data, query); + free(buf); + return -1; + } + + free(buf); + + ret = shim_data_dir_query_wait(query, &timeout); + + shim_data_dir_query_destroy(eth_data.shim_data, query); + + return ret; +} + +static int eth_ipcp_flow_alloc(int fd, + const uint8_t * hash, + qoscube_t cube) +{ +#ifdef BUILD_ETH_LLC + uint8_t ssap = 0; +#endif + uint8_t r_addr[MAC_SIZE]; + uint64_t addr = 0; + + log_dbg("Allocating flow to " HASH_FMT ".", HASH_VAL(hash)); + + assert(hash); + + if (cube != QOS_CUBE_BE) { + log_dbg("Unsupported QoS requested."); + return -1; + } + + if (!shim_data_dir_has(eth_data.shim_data, hash)) { + log_err("Destination unreachable."); + return -1; + } + addr = shim_data_dir_get_addr(eth_data.shim_data, hash); + + pthread_rwlock_wrlock(ð_data.flows_lock); +#ifdef BUILD_ETH_LLC + ssap = bmp_allocate(eth_data.saps); + if (!bmp_is_id_valid(eth_data.saps, ssap)) { + pthread_rwlock_unlock(ð_data.flows_lock); + return -1; + } + + eth_data.fd_to_ef[fd].sap = ssap; + eth_data.ef_to_fd[ssap] = fd; +#endif + pthread_rwlock_unlock(ð_data.flows_lock); + + memcpy(r_addr, &addr, MAC_SIZE); + + if (eth_ipcp_alloc(r_addr, +#if defined(BUILD_ETH_DIX) + fd, +#elif defined(BUILD_ETH_LLC) + ssap, +#endif + hash, cube) < 0) { +#ifdef BUILD_ETH_LLC + pthread_rwlock_wrlock(ð_data.flows_lock); + bmp_release(eth_data.saps, eth_data.fd_to_ef[fd].sap); + eth_data.fd_to_ef[fd].sap = -1; + eth_data.ef_to_fd[ssap] = -1; + pthread_rwlock_unlock(ð_data.flows_lock); +#endif + return -1; + } + + fset_add(eth_data.np1_flows, fd); +#if defined(BUILD_ETH_DIX) + log_dbg("Pending flow with fd %d.", fd); +#elif defined(BUILD_ETH_LLC) + log_dbg("Pending flow with fd %d on SAP %d.", fd, ssap); +#endif + return 0; +} + +static int eth_ipcp_flow_alloc_resp(int fd, + int response) +{ + struct timespec ts = {0, ALLOC_TIMEO * MILLION}; + struct timespec abstime; +#if defined(BUILD_ETH_DIX) + uint16_t r_eid; +#elif defined(BUILD_ETH_LLC) + uint8_t ssap; + uint8_t r_sap; +#endif + uint8_t r_addr[MAC_SIZE]; + + clock_gettime(PTHREAD_COND_CLOCK, &abstime); + + pthread_mutex_lock(&ipcpi.alloc_lock); + + while (ipcpi.alloc_id != fd && ipcp_get_state() == IPCP_OPERATIONAL) { + ts_add(&abstime, &ts, &abstime); + pthread_cond_timedwait(&ipcpi.alloc_cond, + &ipcpi.alloc_lock, + &abstime); + } + + if (ipcp_get_state() != IPCP_OPERATIONAL) { + pthread_mutex_unlock(&ipcpi.alloc_lock); + return -1; + } + + ipcpi.alloc_id = -1; + pthread_cond_broadcast(&ipcpi.alloc_cond); + + pthread_mutex_unlock(&ipcpi.alloc_lock); + + pthread_rwlock_wrlock(ð_data.flows_lock); +#if defined(BUILD_ETH_DIX) + r_eid = eth_data.fd_to_ef[fd].r_eid; +#elif defined(BUILD_ETH_LLC) + ssap = bmp_allocate(eth_data.saps); + if (!bmp_is_id_valid(eth_data.saps, ssap)) { + pthread_rwlock_unlock(ð_data.flows_lock); + return -1; + } + + eth_data.fd_to_ef[fd].sap = ssap; + r_sap = eth_data.fd_to_ef[fd].r_sap; + eth_data.ef_to_fd[ssap] = fd; +#endif + memcpy(r_addr, eth_data.fd_to_ef[fd].r_addr, MAC_SIZE); + + pthread_rwlock_unlock(ð_data.flows_lock); + + if (eth_ipcp_alloc_resp(r_addr, +#if defined(BUILD_ETH_DIX) + fd, r_eid, +#elif defined(BUILD_ETH_LLC) + ssap, r_sap, +#endif + response) < 0) { +#ifdef BUILD_ETH_LLC + pthread_rwlock_wrlock(ð_data.flows_lock); + bmp_release(eth_data.saps, eth_data.fd_to_ef[fd].sap); + pthread_rwlock_unlock(ð_data.flows_lock); +#endif + return -1; + } + + fset_add(eth_data.np1_flows, fd); +#if defined(BUILD_ETH_DIX) + log_dbg("Accepted flow, fd %d.", fd); +#elif defined(BUILD_ETH_LLC) + log_dbg("Accepted flow, fd %d, SAP %d.", fd, (uint8_t)ssap); +#endif + return 0; +} + +static int eth_ipcp_flow_dealloc(int fd) +{ +#ifdef BUILD_ETH_LLC + uint8_t sap; +#endif + ipcp_flow_fini(fd); + + pthread_rwlock_wrlock(ð_data.flows_lock); + + fset_del(eth_data.np1_flows, fd); + +#if defined(BUILD_ETH_DIX) + eth_data.fd_to_ef[fd].r_eid = -1; +#elif defined BUILD_ETH_LLC + sap = eth_data.fd_to_ef[fd].sap; + bmp_release(eth_data.saps, sap); + eth_data.fd_to_ef[fd].sap = -1; + eth_data.fd_to_ef[fd].r_sap = -1; + eth_data.ef_to_fd[sap] = -1; +#endif + memset(ð_data.fd_to_ef[fd].r_addr, 0, MAC_SIZE); + + pthread_rwlock_unlock(ð_data.flows_lock); + + flow_dealloc(fd); + + log_dbg("Flow with fd %d deallocated.", fd); + + return 0; +} + +static struct ipcp_ops eth_ops = { + .ipcp_bootstrap = eth_ipcp_bootstrap, + .ipcp_enroll = NULL, + .ipcp_connect = NULL, + .ipcp_disconnect = NULL, + .ipcp_reg = eth_ipcp_reg, + .ipcp_unreg = eth_ipcp_unreg, + .ipcp_query = eth_ipcp_query, + .ipcp_flow_alloc = eth_ipcp_flow_alloc, + .ipcp_flow_alloc_resp = eth_ipcp_flow_alloc_resp, + .ipcp_flow_dealloc = eth_ipcp_flow_dealloc +}; + +int main(int argc, + char * argv[]) +{ + if (ipcp_init(argc, argv, ð_ops) < 0) + goto fail_init; + + if (eth_data_init() < 0) { +#if defined(BUILD_ETH_DIX) + log_err("Failed to init eth-llc data."); +#elif defined(BUILD_ETH_LLC) + log_err("Failed to init eth-dix data."); +#endif + goto fail_data_init; + } + + if (ipcp_boot() < 0) { + log_err("Failed to boot IPCP."); + goto fail_boot; + } + + if (ipcp_create_r(getpid(), 0)) { + log_err("Failed to notify IRMd we are initialized."); + ipcp_set_state(IPCP_NULL); + goto fail_create_r; + } + + ipcp_shutdown(); + + if (ipcp_get_state() == IPCP_SHUTDOWN) { + pthread_cancel(eth_data.sdu_writer); + pthread_cancel(eth_data.sdu_reader); + pthread_cancel(eth_data.mgmt_handler); +#ifdef __linux__ + pthread_cancel(eth_data.if_monitor); +#endif + pthread_join(eth_data.sdu_writer, NULL); + pthread_join(eth_data.sdu_reader, NULL); + pthread_join(eth_data.mgmt_handler, NULL); +#ifdef __linux__ + pthread_join(eth_data.if_monitor, NULL); +#endif + } + + eth_data_fini(); + + ipcp_fini(); + + exit(EXIT_SUCCESS); + + fail_create_r: + ipcp_shutdown(); + fail_boot: + eth_data_fini(); + fail_data_init: + ipcp_fini(); + fail_init: + ipcp_create_r(getpid(), -1); + exit(EXIT_FAILURE); +} diff --git a/src/ipcpd/eth/llc.c b/src/ipcpd/eth/llc.c new file mode 100644 index 00000000..fa332189 --- /dev/null +++ b/src/ipcpd/eth/llc.c @@ -0,0 +1,26 @@ +/* + * Ouroboros - Copyright (C) 2016 - 2018 + * + * IPC processes over Ethernet - LLC + * + * Dimitri Staessens + * Sander Vrijders + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., http://www.fsf.org/about/contact/. + */ + +#define BUILD_ETH_LLC +#define OUROBOROS_PREFIX "ipcpd/eth-llc" + +#include "eth.c" -- cgit v1.2.3