diff options
Diffstat (limited to 'src/ipcpd')
-rw-r--r-- | src/ipcpd/CMakeLists.txt | 2 | ||||
-rw-r--r-- | src/ipcpd/config.h.in | 1 | ||||
-rw-r--r-- | src/ipcpd/normal/dht.c | 6 | ||||
-rw-r--r-- | src/ipcpd/normal/sdu_sched.c | 21 |
4 files changed, 17 insertions, 13 deletions
diff --git a/src/ipcpd/CMakeLists.txt b/src/ipcpd/CMakeLists.txt index a71c4e98..a1559b4d 100644 --- a/src/ipcpd/CMakeLists.txt +++ b/src/ipcpd/CMakeLists.txt @@ -10,6 +10,8 @@ set(IPCP_MIN_THREADS 4 CACHE STRING "Minimum number of worker threads in the IPCP") set(IPCP_ADD_THREADS 4 CACHE STRING "Number of extra threads to start when an IPCP faces thread starvation") +set(IPCP_SCHED_THR_MUL 2 CACHE STRING + "Number of scheduler threads per QoS cube") if ((IPCP_QOS_CUBE_BE_PRIO LESS 0) OR (IPCP_QOS_CUBE_BE_PRIO GREATER 99)) message(FATAL_ERROR "Invalid priority for best effort QoS cube") diff --git a/src/ipcpd/config.h.in b/src/ipcpd/config.h.in index 04be22ba..f09c3c2c 100644 --- a/src/ipcpd/config.h.in +++ b/src/ipcpd/config.h.in @@ -39,6 +39,7 @@ #define QOS_PRIO_BE @IPCP_QOS_CUBE_BE_PRIO@ #define QOS_PRIO_VIDEO @IPCP_QOS_CUBE_VIDEO_PRIO@ #define QOS_PRIO_VOICE @IPCP_QOS_CUBE_VOICE_PRIO@ +#define IPCP_SCHED_THR_MUL @IPCP_SCHED_THR_MUL@ #define PFT_SIZE @PFT_SIZE@ /* shim-udp */ diff --git a/src/ipcpd/normal/dht.c b/src/ipcpd/normal/dht.c index 4d0cdb02..b06c4480 100644 --- a/src/ipcpd/normal/dht.c +++ b/src/ipcpd/normal/dht.c @@ -2320,12 +2320,12 @@ static void * dht_handle_sdu(void * o) struct cmd * cmd; int ret = 0; - clock_gettime(PTHREAD_COND_CLOCK, &dl); + clock_gettime(CLOCK_REALTIME_COARSE, &dl); ts_add(&dl, &to, &dl); pthread_mutex_lock(&dht->mtx); - while(list_is_empty(&dht->cmds) && ret != -ETIMEDOUT) + while (list_is_empty(&dht->cmds) && ret != -ETIMEDOUT) ret = -pthread_cond_timedwait(&dht->cond, &dht->mtx, &dl); @@ -2400,7 +2400,7 @@ static void * dht_handle_sdu(void * o) "DHT enrolment refused."); break; - } + } if (msg->t_refresh != KAD_T_REFR) { log_warn("Refresh time mismatch. " diff --git a/src/ipcpd/normal/sdu_sched.c b/src/ipcpd/normal/sdu_sched.c index e5f2c701..18855319 100644 --- a/src/ipcpd/normal/sdu_sched.c +++ b/src/ipcpd/normal/sdu_sched.c @@ -43,7 +43,7 @@ static int qos_prio [] = { struct sdu_sched { fset_t * set[QOS_CUBE_MAX]; next_sdu_fn_t callback; - pthread_t readers[QOS_CUBE_MAX]; + pthread_t readers[QOS_CUBE_MAX * IPCP_SCHED_THR_MUL]; }; struct sched_info { @@ -96,7 +96,7 @@ static void * sdu_reader(void * o) struct sdu_sched * sdu_sched_create(next_sdu_fn_t callback) { struct sdu_sched * sdu_sched; - struct sched_info * infos[QOS_CUBE_MAX]; + struct sched_info * infos[QOS_CUBE_MAX * IPCP_SCHED_THR_MUL]; int i; int j; @@ -117,7 +117,7 @@ struct sdu_sched * sdu_sched_create(next_sdu_fn_t callback) } } - for (i = 0; i < QOS_CUBE_MAX; ++i) { + for (i = 0; i < QOS_CUBE_MAX * IPCP_SCHED_THR_MUL; ++i) { infos[i] = malloc(sizeof(*infos[i])); if (infos[i] == NULL) { for (j = 0; j < i; ++j) @@ -125,10 +125,10 @@ struct sdu_sched * sdu_sched_create(next_sdu_fn_t callback) goto fail_infos; } infos[i]->sch = sdu_sched; - infos[i]->qc = i; + infos[i]->qc = i % QOS_CUBE_MAX; } - for (i = 0; i < QOS_CUBE_MAX; ++i) { + for (i = 0; i < QOS_CUBE_MAX * IPCP_SCHED_THR_MUL; ++i) { if (pthread_create(&sdu_sched->readers[i], NULL, sdu_reader, infos[i])) { for (j = 0; j < i; ++j) @@ -139,7 +139,7 @@ struct sdu_sched * sdu_sched_create(next_sdu_fn_t callback) } } - for (i = 0; i < QOS_CUBE_MAX; ++i) { + for (i = 0; i < QOS_CUBE_MAX * IPCP_SCHED_THR_MUL; ++i) { struct sched_param par; int pol = SCHED_RR; int min; @@ -150,7 +150,8 @@ struct sdu_sched * sdu_sched_create(next_sdu_fn_t callback) min = (max - min) / 2; - par.sched_priority = min + (qos_prio[i] * (max - min) / 99); + par.sched_priority = min + + (qos_prio[i % QOS_CUBE_MAX] * (max - min) / 99); if (pthread_setschedparam(sdu_sched->readers[i], pol, &par)) goto fail_sched; } @@ -158,12 +159,12 @@ struct sdu_sched * sdu_sched_create(next_sdu_fn_t callback) return sdu_sched; fail_sched: - for (j = 0; j < QOS_CUBE_MAX; ++j) + for (j = 0; j < QOS_CUBE_MAX * IPCP_SCHED_THR_MUL; ++j) pthread_cancel(sdu_sched->readers[j]); - for (j = 0; j < QOS_CUBE_MAX; ++j) + for (j = 0; j < QOS_CUBE_MAX * IPCP_SCHED_THR_MUL; ++j) pthread_join(sdu_sched->readers[j], NULL); fail_pthr: - for (j = 0; j < QOS_CUBE_MAX; ++j) + for (j = 0; j < QOS_CUBE_MAX * IPCP_SCHED_THR_MUL; ++j) free(infos[j]); fail_infos: for (j = 0; j < QOS_CUBE_MAX; ++j) |