summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authordimitri staessens <dimitri.staessens@ugent.be>2017-09-25 17:10:11 +0200
committerdimitri staessens <dimitri.staessens@ugent.be>2017-09-25 17:10:11 +0200
commitb349b4693b71f2ab82efe1e8b6aa83db2f8e36ed (patch)
treef80744a3b81f79eb24a83167b96209d4b640d58a /src
parentbaa9da56af12d14d63d504101c7efeb20da71a78 (diff)
downloadouroboros-b349b4693b71f2ab82efe1e8b6aa83db2f8e36ed.tar.gz
ouroboros-b349b4693b71f2ab82efe1e8b6aa83db2f8e36ed.zip
ipcpd: Add multiplier to scale scheduler threads
Diffstat (limited to 'src')
-rw-r--r--src/ipcpd/CMakeLists.txt2
-rw-r--r--src/ipcpd/config.h.in1
-rw-r--r--src/ipcpd/normal/sdu_sched.c21
3 files changed, 14 insertions, 10 deletions
diff --git a/src/ipcpd/CMakeLists.txt b/src/ipcpd/CMakeLists.txt
index a71c4e98..a1559b4d 100644
--- a/src/ipcpd/CMakeLists.txt
+++ b/src/ipcpd/CMakeLists.txt
@@ -10,6 +10,8 @@ set(IPCP_MIN_THREADS 4 CACHE STRING
"Minimum number of worker threads in the IPCP")
set(IPCP_ADD_THREADS 4 CACHE STRING
"Number of extra threads to start when an IPCP faces thread starvation")
+set(IPCP_SCHED_THR_MUL 2 CACHE STRING
+ "Number of scheduler threads per QoS cube")
if ((IPCP_QOS_CUBE_BE_PRIO LESS 0) OR (IPCP_QOS_CUBE_BE_PRIO GREATER 99))
message(FATAL_ERROR "Invalid priority for best effort QoS cube")
diff --git a/src/ipcpd/config.h.in b/src/ipcpd/config.h.in
index 04be22ba..f09c3c2c 100644
--- a/src/ipcpd/config.h.in
+++ b/src/ipcpd/config.h.in
@@ -39,6 +39,7 @@
#define QOS_PRIO_BE @IPCP_QOS_CUBE_BE_PRIO@
#define QOS_PRIO_VIDEO @IPCP_QOS_CUBE_VIDEO_PRIO@
#define QOS_PRIO_VOICE @IPCP_QOS_CUBE_VOICE_PRIO@
+#define IPCP_SCHED_THR_MUL @IPCP_SCHED_THR_MUL@
#define PFT_SIZE @PFT_SIZE@
/* shim-udp */
diff --git a/src/ipcpd/normal/sdu_sched.c b/src/ipcpd/normal/sdu_sched.c
index e5f2c701..18855319 100644
--- a/src/ipcpd/normal/sdu_sched.c
+++ b/src/ipcpd/normal/sdu_sched.c
@@ -43,7 +43,7 @@ static int qos_prio [] = {
struct sdu_sched {
fset_t * set[QOS_CUBE_MAX];
next_sdu_fn_t callback;
- pthread_t readers[QOS_CUBE_MAX];
+ pthread_t readers[QOS_CUBE_MAX * IPCP_SCHED_THR_MUL];
};
struct sched_info {
@@ -96,7 +96,7 @@ static void * sdu_reader(void * o)
struct sdu_sched * sdu_sched_create(next_sdu_fn_t callback)
{
struct sdu_sched * sdu_sched;
- struct sched_info * infos[QOS_CUBE_MAX];
+ struct sched_info * infos[QOS_CUBE_MAX * IPCP_SCHED_THR_MUL];
int i;
int j;
@@ -117,7 +117,7 @@ struct sdu_sched * sdu_sched_create(next_sdu_fn_t callback)
}
}
- for (i = 0; i < QOS_CUBE_MAX; ++i) {
+ for (i = 0; i < QOS_CUBE_MAX * IPCP_SCHED_THR_MUL; ++i) {
infos[i] = malloc(sizeof(*infos[i]));
if (infos[i] == NULL) {
for (j = 0; j < i; ++j)
@@ -125,10 +125,10 @@ struct sdu_sched * sdu_sched_create(next_sdu_fn_t callback)
goto fail_infos;
}
infos[i]->sch = sdu_sched;
- infos[i]->qc = i;
+ infos[i]->qc = i % QOS_CUBE_MAX;
}
- for (i = 0; i < QOS_CUBE_MAX; ++i) {
+ for (i = 0; i < QOS_CUBE_MAX * IPCP_SCHED_THR_MUL; ++i) {
if (pthread_create(&sdu_sched->readers[i], NULL,
sdu_reader, infos[i])) {
for (j = 0; j < i; ++j)
@@ -139,7 +139,7 @@ struct sdu_sched * sdu_sched_create(next_sdu_fn_t callback)
}
}
- for (i = 0; i < QOS_CUBE_MAX; ++i) {
+ for (i = 0; i < QOS_CUBE_MAX * IPCP_SCHED_THR_MUL; ++i) {
struct sched_param par;
int pol = SCHED_RR;
int min;
@@ -150,7 +150,8 @@ struct sdu_sched * sdu_sched_create(next_sdu_fn_t callback)
min = (max - min) / 2;
- par.sched_priority = min + (qos_prio[i] * (max - min) / 99);
+ par.sched_priority = min +
+ (qos_prio[i % QOS_CUBE_MAX] * (max - min) / 99);
if (pthread_setschedparam(sdu_sched->readers[i], pol, &par))
goto fail_sched;
}
@@ -158,12 +159,12 @@ struct sdu_sched * sdu_sched_create(next_sdu_fn_t callback)
return sdu_sched;
fail_sched:
- for (j = 0; j < QOS_CUBE_MAX; ++j)
+ for (j = 0; j < QOS_CUBE_MAX * IPCP_SCHED_THR_MUL; ++j)
pthread_cancel(sdu_sched->readers[j]);
- for (j = 0; j < QOS_CUBE_MAX; ++j)
+ for (j = 0; j < QOS_CUBE_MAX * IPCP_SCHED_THR_MUL; ++j)
pthread_join(sdu_sched->readers[j], NULL);
fail_pthr:
- for (j = 0; j < QOS_CUBE_MAX; ++j)
+ for (j = 0; j < QOS_CUBE_MAX * IPCP_SCHED_THR_MUL; ++j)
free(infos[j]);
fail_infos:
for (j = 0; j < QOS_CUBE_MAX; ++j)