summaryrefslogtreecommitdiff
path: root/src/ipcpd
diff options
context:
space:
mode:
authorDimitri Staessens <dimitri.staessens@ugent.be>2018-10-04 18:06:32 +0200
committerSander Vrijders <sander.vrijders@ugent.be>2018-10-05 09:07:47 +0200
commitb802b25ddfe6f1b6ecabe3ba70e3dac2e99e7a50 (patch)
tree94e787f0f0ca1f0254b3728b0156b2e3283d8518 /src/ipcpd
parent937adca2a718b160b6d42bb8a3f28d96321fdb49 (diff)
downloadouroboros-b802b25ddfe6f1b6ecabe3ba70e3dac2e99e7a50.tar.gz
ouroboros-b802b25ddfe6f1b6ecabe3ba70e3dac2e99e7a50.zip
lib: Pass qosspec at flow allocation
The flow allocator now passes the full qos specification to the endpoint, instead of just a cube. This is a more flexible architecture, as it makes QoS cubes internal to the layers. Adds endianness transforms for the flow allocator protocol in the normal IPCP. Signed-off-by: Dimitri Staessens <dimitri.staessens@ugent.be> Signed-off-by: Sander Vrijders <sander.vrijders@ugent.be>
Diffstat (limited to 'src/ipcpd')
-rw-r--r--src/ipcpd/eth/eth.c51
-rw-r--r--src/ipcpd/ipcp.c13
-rw-r--r--src/ipcpd/ipcp.h2
-rw-r--r--src/ipcpd/local/main.c4
-rw-r--r--src/ipcpd/normal/dt.c2
-rw-r--r--src/ipcpd/normal/fa.c57
-rw-r--r--src/ipcpd/normal/fa.h2
-rw-r--r--src/ipcpd/raptor/CMakeLists.txt1
-rw-r--r--src/ipcpd/raptor/main.c77
-rw-r--r--src/ipcpd/udp/main.c48
10 files changed, 174 insertions, 83 deletions
diff --git a/src/ipcpd/eth/eth.c b/src/ipcpd/eth/eth.c
index 44ef3756..6fd7b805 100644
--- a/src/ipcpd/eth/eth.c
+++ b/src/ipcpd/eth/eth.c
@@ -146,15 +146,27 @@
#define NAME_QUERY_REPLY 3
struct mgmt_msg {
- uint8_t code;
#if defined(BUILD_ETH_DIX)
uint16_t seid;
uint16_t deid;
#elif defined(BUILD_ETH_LLC)
uint8_t ssap;
uint8_t dsap;
+ /* QoS here for alignment */
+ uint8_t code;
+ uint8_t availability;
+#endif
+ /* QoS parameters from spec, aligned */
+ uint32_t loss;
+ uint64_t bandwidth;
+ uint32_t ber;
+ uint32_t max_gap;
+ uint32_t delay;
+ uint8_t in_order;
+#if defined (BUILD_ETH_DIX)
+ uint8_t code;
+ uint8_t availability;
#endif
- uint8_t qoscube;
int8_t response;
} __attribute__((packed));
@@ -433,7 +445,7 @@ static int eth_ipcp_alloc(const uint8_t * dst_addr,
uint8_t ssap,
#endif
const uint8_t * hash,
- qoscube_t cube)
+ qosspec_t qs)
{
uint8_t * buf;
struct mgmt_msg * msg;
@@ -453,7 +465,14 @@ static int eth_ipcp_alloc(const uint8_t * dst_addr,
#elif defined(BUILD_ETH_LLC)
msg->ssap = ssap;
#endif
- msg->qoscube = cube;
+
+ msg->delay = hton32(qs.delay);
+ msg->bandwidth = hton64(qs.bandwidth);
+ msg->availability = qs.availability;
+ msg->loss = hton32(qs.loss);
+ msg->ber = hton32(qs.ber);
+ msg->in_order = qs.in_order;
+ msg->max_gap = hton32(qs.max_gap);
memcpy(msg + 1, hash, ipcp_dir_hash_len());
@@ -523,7 +542,7 @@ static int eth_ipcp_req(uint8_t * r_addr,
uint8_t r_sap,
#endif
const uint8_t * dst,
- qoscube_t cube)
+ qosspec_t qs)
{
struct timespec ts = {0, ALLOC_TIMEO * MILLION};
struct timespec abstime;
@@ -547,7 +566,7 @@ static int eth_ipcp_req(uint8_t * r_addr,
}
/* reply to IRM, called under lock to prevent race */
- fd = ipcp_flow_req_arr(getpid(), dst, ipcp_dir_hash_len(), cube);
+ fd = ipcp_flow_req_arr(getpid(), dst, ipcp_dir_hash_len(), qs);
if (fd < 0) {
pthread_mutex_unlock(&ipcpi.alloc_lock);
log_err("Could not get new flow from IRMd.");
@@ -687,11 +706,20 @@ static int eth_ipcp_mgmt_frame(const uint8_t * buf,
uint8_t * r_addr)
{
struct mgmt_msg * msg;
+ qosspec_t qs;
msg = (struct mgmt_msg *) buf;
switch (msg->code) {
case FLOW_REQ:
+ qs.delay = ntoh32(msg->delay);
+ qs.bandwidth = ntoh64(msg->bandwidth);
+ qs.availability = msg->availability;
+ qs.loss = ntoh32(msg->loss);
+ qs.ber = ntoh32(msg->ber);
+ qs.in_order = msg->in_order;
+ qs.max_gap = ntoh32(msg->max_gap);
+
if (shim_data_reg_has(eth_data.shim_data,
buf + sizeof(*msg))) {
eth_ipcp_req(r_addr,
@@ -701,7 +729,7 @@ static int eth_ipcp_mgmt_frame(const uint8_t * buf,
msg->ssap,
#endif
buf + sizeof(*msg),
- msg->qoscube);
+ qs);
}
break;
case FLOW_REPLY:
@@ -1553,7 +1581,7 @@ static int eth_ipcp_query(const uint8_t * hash)
static int eth_ipcp_flow_alloc(int fd,
const uint8_t * hash,
- qoscube_t cube)
+ qosspec_t qs)
{
#ifdef BUILD_ETH_LLC
uint8_t ssap = 0;
@@ -1565,11 +1593,6 @@ static int eth_ipcp_flow_alloc(int fd,
assert(hash);
- if (cube > QOS_CUBE_DATA) {
- log_dbg("Unsupported QoS requested.");
- return -1;
- }
-
if (!shim_data_dir_has(eth_data.shim_data, hash)) {
log_err("Destination unreachable.");
return -1;
@@ -1597,7 +1620,7 @@ static int eth_ipcp_flow_alloc(int fd,
#elif defined(BUILD_ETH_LLC)
ssap,
#endif
- hash, cube) < 0) {
+ hash, qs) < 0) {
#ifdef BUILD_ETH_LLC
pthread_rwlock_wrlock(&eth_data.flows_lock);
bmp_release(eth_data.saps, eth_data.fd_to_ef[fd].sap);
diff --git a/src/ipcpd/ipcp.c b/src/ipcpd/ipcp.c
index 5ea54533..e415bbd9 100644
--- a/src/ipcpd/ipcp.c
+++ b/src/ipcpd/ipcp.c
@@ -20,6 +20,13 @@
* Foundation, Inc., http://www.fsf.org/about/contact/.
*/
+#if defined(__linux__) || defined(__CYGWIN__)
+#define _DEFAULT_SOURCE
+#else
+#define _POSIX_C_SOURCE 200112L
+#define __XSI_VISIBLE 500
+#endif
+
#if defined(__linux__) && !defined(DISABLE_CORE_LOCK)
#define _GNU_SOURCE
#define NPROC (sysconf(_SC_NPROCESSORS_ONLN))
@@ -198,6 +205,7 @@ static void * mainloop(void * o)
layer_info_msg_t layer_info = LAYER_INFO_MSG__INIT;
int fd = -1;
struct cmd * cmd;
+ qosspec_t qs;
ret_msg.code = IPCP_MSG_CODE__IPCP_REPLY;
@@ -422,9 +430,10 @@ static void * mainloop(void * o)
break;
}
+ qs = msg_to_spec(msg->qosspec);
fd = np1_flow_alloc(msg->pid,
msg->port_id,
- msg->qoscube);
+ qs);
if (fd < 0) {
log_err("Failed allocating fd on port_id %d.",
msg->port_id);
@@ -435,7 +444,7 @@ static void * mainloop(void * o)
ret_msg.result =
ipcpi.ops->ipcp_flow_alloc(fd,
msg->hash.data,
- msg->qoscube);
+ qs);
break;
case IPCP_MSG_CODE__IPCP_FLOW_ALLOC_RESP:
ret_msg.has_result = true;
diff --git a/src/ipcpd/ipcp.h b/src/ipcpd/ipcp.h
index 5417fc74..13751b6d 100644
--- a/src/ipcpd/ipcp.h
+++ b/src/ipcpd/ipcp.h
@@ -60,7 +60,7 @@ struct ipcp_ops {
int (* ipcp_flow_alloc)(int fd,
const uint8_t * dst,
- qoscube_t qos);
+ qosspec_t qs);
int (* ipcp_flow_alloc_resp)(int fd,
int response);
diff --git a/src/ipcpd/local/main.c b/src/ipcpd/local/main.c
index c83f85fe..8eae7503 100644
--- a/src/ipcpd/local/main.c
+++ b/src/ipcpd/local/main.c
@@ -183,7 +183,7 @@ static int ipcp_local_query(const uint8_t * hash)
static int ipcp_local_flow_alloc(int fd,
const uint8_t * dst,
- qoscube_t cube)
+ qosspec_t qs)
{
struct timespec ts = {0, ALLOC_TIMEOUT * MILLION};
struct timespec abstime;
@@ -212,7 +212,7 @@ static int ipcp_local_flow_alloc(int fd,
assert(ipcpi.alloc_id == -1);
- out_fd = ipcp_flow_req_arr(getpid(), dst, ipcp_dir_hash_len(), cube);
+ out_fd = ipcp_flow_req_arr(getpid(), dst, ipcp_dir_hash_len(), qs);
if (out_fd < 0) {
pthread_mutex_unlock(&ipcpi.alloc_lock);
log_dbg("Flow allocation failed: %d", out_fd);
diff --git a/src/ipcpd/normal/dt.c b/src/ipcpd/normal/dt.c
index c3f8f198..a350e4be 100644
--- a/src/ipcpd/normal/dt.c
+++ b/src/ipcpd/normal/dt.c
@@ -31,8 +31,6 @@
#define DT "dt"
#define OUROBOROS_PREFIX DT
-/* FIXME: fix #defines and remove endian.h include. */
-#include <ouroboros/endian.h>
#include <ouroboros/bitmap.h>
#include <ouroboros/errno.h>
#include <ouroboros/logs.h>
diff --git a/src/ipcpd/normal/fa.c b/src/ipcpd/normal/fa.c
index 10f0a863..4c82e0e0 100644
--- a/src/ipcpd/normal/fa.c
+++ b/src/ipcpd/normal/fa.c
@@ -57,8 +57,15 @@ struct fa_msg {
uint32_t r_eid;
uint32_t s_eid;
uint8_t code;
- uint8_t qc;
int8_t response;
+ /* QoS parameters from spec, aligned */
+ uint8_t availability;
+ uint8_t in_order;
+ uint32_t delay;
+ uint64_t bandwidth;
+ uint32_t loss;
+ uint32_t ber;
+ uint32_t max_gap;
} __attribute__((packed));
struct {
@@ -100,6 +107,7 @@ static void fa_post_sdu(void * comp,
int fd;
uint8_t * buf;
struct fa_msg * msg;
+ qosspec_t qs;
(void) comp;
@@ -142,10 +150,18 @@ static void fa_post_sdu(void * comp,
assert(ipcpi.alloc_id == -1);
+ qs.delay = ntoh32(msg->delay);
+ qs.bandwidth = ntoh64(msg->bandwidth);
+ qs.availability = msg->availability;
+ qs.loss = ntoh32(msg->loss);
+ qs.ber = ntoh32(msg->ber);
+ qs.in_order = msg->in_order;
+ qs.max_gap = ntoh32(msg->max_gap);
+
fd = ipcp_flow_req_arr(getpid(),
(uint8_t *) (msg + 1),
ipcp_dir_hash_len(),
- msg->qc);
+ qs);
if (fd < 0) {
pthread_mutex_unlock(&ipcpi.alloc_lock);
log_err("Failed to get fd for flow.");
@@ -155,8 +171,8 @@ static void fa_post_sdu(void * comp,
pthread_rwlock_wrlock(&fa.flows_lock);
- fa.r_eid[fd] = msg->s_eid;
- fa.r_addr[fd] = msg->s_addr;
+ fa.r_eid[fd] = ntoh32(msg->s_eid);
+ fa.r_addr[fd] = ntoh64(msg->s_addr);
pthread_rwlock_unlock(&fa.flows_lock);
@@ -169,14 +185,14 @@ static void fa_post_sdu(void * comp,
case FLOW_REPLY:
pthread_rwlock_wrlock(&fa.flows_lock);
- fa.r_eid[msg->r_eid] = msg->s_eid;
+ fa.r_eid[ntoh32(msg->r_eid)] = ntoh32(msg->s_eid);
- ipcp_flow_alloc_reply(msg->r_eid, msg->response);
+ ipcp_flow_alloc_reply(ntoh32(msg->r_eid), msg->response);
if (msg->response < 0)
- destroy_conn(msg->r_eid);
+ destroy_conn(ntoh32(msg->r_eid));
else
- sdu_sched_add(fa.sdu_sched, msg->r_eid);
+ sdu_sched_add(fa.sdu_sched, ntoh32(msg->r_eid));
pthread_rwlock_unlock(&fa.flows_lock);
@@ -227,11 +243,12 @@ void fa_stop(void)
int fa_alloc(int fd,
const uint8_t * dst,
- qoscube_t qc)
+ qosspec_t qs)
{
struct fa_msg * msg;
uint64_t addr;
struct shm_du_buff * sdb;
+ qoscube_t qc;
addr = dir_query(dst);
if (addr == 0)
@@ -240,14 +257,22 @@ int fa_alloc(int fd,
if (ipcp_sdb_reserve(&sdb, sizeof(*msg) + ipcp_dir_hash_len()))
return -1;
- msg = (struct fa_msg *) shm_du_buff_head(sdb);
- msg->code = FLOW_REQ;
- msg->qc = qc;
- msg->s_eid = fd;
- msg->s_addr = ipcpi.dt_addr;
+ msg = (struct fa_msg *) shm_du_buff_head(sdb);
+ msg->code = FLOW_REQ;
+ msg->s_eid = hton32(fd);
+ msg->s_addr = hton64(ipcpi.dt_addr);
+ msg->delay = hton32(qs.delay);
+ msg->bandwidth = hton64(qs.bandwidth);
+ msg->availability = qs.availability;
+ msg->loss = hton32(qs.loss);
+ msg->ber = hton32(qs.ber);
+ msg->in_order = qs.in_order;
+ msg->max_gap = hton32(qs.max_gap);
memcpy(msg + 1, dst, ipcp_dir_hash_len());
+ qc = qos_spec_to_cube(qs);
+
if (dt_write_sdu(addr, qc, fa.fd, sdb)) {
ipcp_sdb_release(sdb);
return -1;
@@ -302,8 +327,8 @@ int fa_alloc_resp(int fd,
msg = (struct fa_msg *) shm_du_buff_head(sdb);
msg->code = FLOW_REPLY;
- msg->r_eid = fa.r_eid[fd];
- msg->s_eid = fd;
+ msg->r_eid = hton32(fa.r_eid[fd]);
+ msg->s_eid = hton32(fd);
msg->response = response;
if (response < 0) {
diff --git a/src/ipcpd/normal/fa.h b/src/ipcpd/normal/fa.h
index 87819d6f..a98d834a 100644
--- a/src/ipcpd/normal/fa.h
+++ b/src/ipcpd/normal/fa.h
@@ -36,7 +36,7 @@ void fa_stop(void);
int fa_alloc(int fd,
const uint8_t * dst,
- qoscube_t qos);
+ qosspec_t qs);
int fa_alloc_resp(int fd,
int response);
diff --git a/src/ipcpd/raptor/CMakeLists.txt b/src/ipcpd/raptor/CMakeLists.txt
index 06e6ee29..1883d9bb 100644
--- a/src/ipcpd/raptor/CMakeLists.txt
+++ b/src/ipcpd/raptor/CMakeLists.txt
@@ -16,6 +16,7 @@ if (CMAKE_SYSTEM_NAME STREQUAL "Linux")
find_path(RAPTOR_KERNEL_MODULE
NAMES
raptor.ko.gz
+ raptor.ko.xz
HINTS
/lib/modules/${CMAKE_SYSTEM_VERSION}/extra
)
diff --git a/src/ipcpd/raptor/main.c b/src/ipcpd/raptor/main.c
index 4f0099b3..a01889ec 100644
--- a/src/ipcpd/raptor/main.c
+++ b/src/ipcpd/raptor/main.c
@@ -90,11 +90,18 @@
#define NAME_QUERY_REPLY 3
struct mgmt_msg {
- uint8_t code;
- uint8_t ssap;
- uint8_t dsap;
- uint8_t qoscube;
- int8_t response;
+ uint8_t code;
+ uint8_t ssap;
+ uint8_t dsap;
+ int8_t response;
+ /* QoS parameters from spec, aligned */
+ uint32_t loss;
+ uint64_t bandwidth;
+ uint32_t ber;
+ uint32_t max_gap;
+ uint32_t delay;
+ uint8_t in_order;
+ uint8_t availability;
} __attribute__((packed));
struct ef {
@@ -278,7 +285,7 @@ static int raptor_send_frame(struct shm_du_buff * sdb,
static int raptor_sap_alloc(uint8_t ssap,
const uint8_t * hash,
- qoscube_t cube)
+ qosspec_t qs)
{
struct mgmt_msg * msg;
struct shm_du_buff * sdb;
@@ -288,10 +295,16 @@ static int raptor_sap_alloc(uint8_t ssap,
return -1;
}
- msg = (struct mgmt_msg *) shm_du_buff_head(sdb);
- msg->code = FLOW_REQ;
- msg->ssap = ssap;
- msg->qoscube = cube;
+ msg = (struct mgmt_msg *) shm_du_buff_head(sdb);
+ msg->code = FLOW_REQ;
+ msg->ssap = ssap;
+ msg->delay = hton32(qs.delay);
+ msg->bandwidth = hton64(qs.bandwidth);
+ msg->availability = qs.availability;
+ msg->loss = hton32(qs.loss);
+ msg->ber = hton32(qs.ber);
+ msg->in_order = qs.in_order;
+ msg->max_gap = hton32(qs.max_gap);
memcpy(msg + 1, hash, ipcp_dir_hash_len());
@@ -306,15 +319,15 @@ static int raptor_sap_alloc(uint8_t ssap,
return 0;
}
-static int raptor_sap_alloc_resp(uint8_t ssap,
- uint8_t dsap,
- int response)
+static int raptor_sap_alloc_resp(uint8_t ssap,
+ uint8_t dsap,
+ int response)
{
- struct mgmt_msg * msg;
+ struct mgmt_msg * msg;
struct shm_du_buff * sdb;
if (ipcp_sdb_reserve(&sdb, sizeof(*msg)) < 0) {
- log_err("failed to reserve sdb for management frame.");
+ log_err("Failed to reserve sdb for management frame.");
return -1;
}
@@ -337,7 +350,7 @@ static int raptor_sap_alloc_resp(uint8_t ssap,
static int raptor_sap_req(uint8_t r_sap,
const uint8_t * dst,
- qoscube_t cube)
+ qosspec_t qs)
{
struct timespec ts = {0, EVENT_WAIT_TIMEOUT * 1000};
struct timespec abstime;
@@ -361,7 +374,7 @@ static int raptor_sap_req(uint8_t r_sap,
}
/* reply to IRM, called under lock to prevent race */
- fd = ipcp_flow_req_arr(getpid(), dst, ipcp_dir_hash_len(), cube);
+ fd = ipcp_flow_req_arr(getpid(), dst, ipcp_dir_hash_len(), qs);
if (fd < 0) {
pthread_mutex_unlock(&ipcpi.alloc_lock);
log_err("Could not get new flow from IRMd.");
@@ -424,12 +437,12 @@ static int raptor_name_query_req(const uint8_t * hash)
return 0;
if (ipcp_sdb_reserve(&sdb, sizeof(*msg) + ipcp_dir_hash_len()) < 0) {
- log_err("failed to reserve sdb for management frame.");
+ log_err("Failed to reserve sdb for management frame.");
return -1;
}
- msg = (struct mgmt_msg *) shm_du_buff_head(sdb);
- msg->code = NAME_QUERY_REPLY;
+ msg = (struct mgmt_msg *) shm_du_buff_head(sdb);
+ msg->code = NAME_QUERY_REPLY;
memcpy(msg + 1, hash, ipcp_dir_hash_len());
@@ -456,8 +469,9 @@ static int raptor_name_query_reply(const uint8_t * hash)
static int raptor_mgmt_frame(const uint8_t * buf,
size_t len)
{
- struct mgmt_msg * msg = (struct mgmt_msg *) buf;
- uint8_t * hash = (uint8_t *) (msg + 1);
+ struct mgmt_msg * msg = (struct mgmt_msg *) buf;
+ uint8_t * hash = (uint8_t *) (msg + 1);
+ qosspec_t qs;
switch (msg->code) {
case FLOW_REQ:
@@ -466,8 +480,16 @@ static int raptor_mgmt_frame(const uint8_t * buf,
return -1;
}
+ qs.delay = ntoh32(msg->delay);
+ qs.bandwidth = ntoh64(msg->bandwidth);
+ qs.availability = msg->availability;
+ qs.loss = ntoh32(msg->loss);
+ qs.ber = ntoh32(msg->ber);
+ qs.in_order = msg->in_order;
+ qs.max_gap = ntoh32(msg->max_gap);
+
if (shim_data_reg_has(raptor_data.shim_data, hash))
- raptor_sap_req(msg->ssap, hash, msg->qoscube);
+ raptor_sap_req(msg->ssap, hash, qs);
break;
case FLOW_REPLY:
if (len != sizeof(*msg)) {
@@ -901,7 +923,7 @@ static int raptor_query(const uint8_t * hash)
static int raptor_flow_alloc(int fd,
const uint8_t * hash,
- qoscube_t cube)
+ qosspec_t qs)
{
uint8_t ssap = 0;
@@ -909,11 +931,6 @@ static int raptor_flow_alloc(int fd,
assert(hash);
- if (cube != QOS_CUBE_BE) {
- log_dbg("Unsupported QoS requested.");
- return -1;
- }
-
if (!shim_data_dir_has(raptor_data.shim_data, hash)) {
log_err("Destination unreachable.");
return -1;
@@ -932,7 +949,7 @@ static int raptor_flow_alloc(int fd,
pthread_rwlock_unlock(&raptor_data.flows_lock);
- if (raptor_sap_alloc(ssap, hash, cube) < 0) {
+ if (raptor_sap_alloc(ssap, hash, qs) < 0) {
pthread_rwlock_wrlock(&raptor_data.flows_lock);
bmp_release(raptor_data.saps, raptor_data.fd_to_ef[fd].sap);
raptor_data.fd_to_ef[fd].sap = -1;
diff --git a/src/ipcpd/udp/main.c b/src/ipcpd/udp/main.c
index 6a350da0..96820662 100644
--- a/src/ipcpd/udp/main.c
+++ b/src/ipcpd/udp/main.c
@@ -73,8 +73,15 @@ struct mgmt_msg {
uint16_t src_udp_port;
uint16_t dst_udp_port;
uint8_t code;
- uint8_t qoscube;
uint8_t response;
+ /* QoS parameters from spec, aligned */
+ uint8_t availability;
+ uint8_t in_order;
+ uint32_t delay;
+ uint64_t bandwidth;
+ uint32_t loss;
+ uint32_t ber;
+ uint32_t max_gap;
} __attribute__((packed));
struct uf {
@@ -219,7 +226,7 @@ static int send_shim_udp_msg(uint8_t * buf,
static int ipcp_udp_port_alloc(uint32_t dst_ip_addr,
uint16_t src_udp_port,
const uint8_t * dst,
- qoscube_t cube)
+ qosspec_t qs)
{
uint8_t * buf;
struct mgmt_msg * msg;
@@ -235,7 +242,13 @@ static int ipcp_udp_port_alloc(uint32_t dst_ip_addr,
msg = (struct mgmt_msg *) buf;
msg->code = FLOW_REQ;
msg->src_udp_port = src_udp_port;
- msg->qoscube = cube;
+ msg->delay = hton32(qs.delay);
+ msg->bandwidth = hton64(qs.bandwidth);
+ msg->availability = qs.availability;
+ msg->loss = hton32(qs.loss);
+ msg->ber = hton32(qs.ber);
+ msg->in_order = qs.in_order;
+ msg->max_gap = hton32(qs.max_gap);
memcpy(msg + 1, dst, ipcp_dir_hash_len());
@@ -272,7 +285,7 @@ static int ipcp_udp_port_alloc_resp(uint32_t dst_ip_addr,
static int ipcp_udp_port_req(struct sockaddr_in * c_saddr,
const uint8_t * dst,
- qoscube_t cube)
+ qosspec_t qs)
{
struct timespec ts = {0, FD_UPDATE_TIMEOUT * 1000};
struct timespec abstime;
@@ -331,7 +344,7 @@ static int ipcp_udp_port_req(struct sockaddr_in * c_saddr,
}
/* reply to IRM */
- fd = ipcp_flow_req_arr(getpid(), dst, ipcp_dir_hash_len(), cube);
+ fd = ipcp_flow_req_arr(getpid(), dst, ipcp_dir_hash_len(), qs);
if (fd < 0) {
pthread_mutex_unlock(&ipcpi.alloc_lock);
log_err("Could not get new flow from IRMd.");
@@ -436,7 +449,7 @@ static void * ipcp_udp_listener(void * o)
while (true) {
struct mgmt_msg * msg = NULL;
-
+ qosspec_t qs;
memset(&buf, 0, SHIM_UDP_MSG_SIZE);
n = recvfrom(sfd, buf, SHIM_UDP_MSG_SIZE, 0,
(struct sockaddr *) &c_saddr,
@@ -455,9 +468,16 @@ static void * ipcp_udp_listener(void * o)
switch (msg->code) {
case FLOW_REQ:
c_saddr.sin_port = msg->src_udp_port;
+ qs.delay = ntoh32(msg->delay);
+ qs.bandwidth = ntoh64(msg->bandwidth);
+ qs.availability = msg->availability;
+ qs.loss = ntoh32(msg->loss);
+ qs.ber = ntoh32(msg->ber);
+ qs.in_order = msg->in_order;
+ qs.max_gap = ntoh32(msg->max_gap);
ipcp_udp_port_req(&c_saddr,
(uint8_t *) (msg + 1),
- msg->qoscube);
+ qs);
break;
case FLOW_REPLY:
ipcp_udp_port_alloc_reply(msg->src_udp_port,
@@ -555,7 +575,8 @@ static void * ipcp_udp_sdu_loop(void * o)
pthread_rwlock_unlock(&udp_data.flows_lock);
- pthread_cleanup_push((void (*)(void *)) ipcp_sdb_release,
+ pthread_cleanup_push((void (*)(void *))
+ ipcp_sdb_release,
(void *) sdb);
if (send(fd, shm_du_buff_head(sdb),
@@ -968,7 +989,7 @@ static int ipcp_udp_query(const uint8_t * hash)
static int ipcp_udp_flow_alloc(int fd,
const uint8_t * dst,
- qoscube_t cube)
+ qosspec_t qs)
{
struct sockaddr_in r_saddr; /* server address */
struct sockaddr_in f_saddr; /* flow */
@@ -978,12 +999,9 @@ static int ipcp_udp_flow_alloc(int fd,
log_dbg("Allocating flow to " HASH_FMT ".", HASH_VAL(dst));
- assert(dst);
+ (void) qs;
- if (cube > QOS_CUBE_DATA) {
- log_dbg("Unsupported QoS requested.");
- return -1;
- }
+ assert(dst);
skfd = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP);
if (skfd < 0)
@@ -1034,7 +1052,7 @@ static int ipcp_udp_flow_alloc(int fd,
pthread_rwlock_unlock(&udp_data.flows_lock);
- if (ipcp_udp_port_alloc(ip_addr, f_saddr.sin_port, dst, cube) < 0) {
+ if (ipcp_udp_port_alloc(ip_addr, f_saddr.sin_port, dst, qs) < 0) {
pthread_rwlock_wrlock(&udp_data.flows_lock);
udp_data.fd_to_uf[fd].udp = -1;