From 0e9d376bde25a42b15705dc4cb3f746a60e106c9 Mon Sep 17 00:00:00 2001 From: Armin Luntzer <armin.luntzer@univie.ac.at> Date: Mon, 14 Oct 2019 13:26:56 +0200 Subject: [PATCH] EDF threads need fixed assignments... --- arch/sparc/include/asm/spinlock.h | 6 +- arch/sparc/kernel/setup.c | 2 +- arch/sparc/kernel/thread.c | 4 +- include/kernel/sched.h | 3 +- init/Kconfig | 10 +++ init/main.c | 43 +++++++----- kernel/kthread.c | 6 +- kernel/sched/core.c | 6 +- kernel/sched/edf.c | 111 ++++++++++++++++++------------ kernel/sched/rr.c | 19 +++-- 10 files changed, 128 insertions(+), 82 deletions(-) diff --git a/arch/sparc/include/asm/spinlock.h b/arch/sparc/include/asm/spinlock.h index a5ed587..05a642a 100644 --- a/arch/sparc/include/asm/spinlock.h +++ b/arch/sparc/include/asm/spinlock.h @@ -141,11 +141,12 @@ static void spin_lock(struct spinlock *lock) __attribute__((unused)) static void spin_lock_raw(struct spinlock *lock) { +#if 0 if (unlikely(lock->lock_recursion)) return; lock->lock_recursion = 1; - +#endif __asm__ __volatile__( "1: \n\t" "ldstub [%0], %%g2 \n\t" @@ -155,8 +156,9 @@ static void spin_lock_raw(struct spinlock *lock) : : "r" (&lock->lock) : "g2", "memory", "cc"); - +#if 0 lock->lock_recursion = 0; +#endif } /** diff --git a/arch/sparc/kernel/setup.c b/arch/sparc/kernel/setup.c index dc5096d..d1f045e 100644 --- a/arch/sparc/kernel/setup.c +++ b/arch/sparc/kernel/setup.c @@ -132,7 +132,7 @@ void smp_cpu_entry(void) iowrite32be(0x3, &cpu1_ready); while (ioread32be(&cpu1_ready) != 0x4); while(1) { - // printk("x"); + //printk("."); // cpu_relax(); } // printk("1\n"); diff --git a/arch/sparc/kernel/thread.c b/arch/sparc/kernel/thread.c index 73cef27..2bff101 100644 --- a/arch/sparc/kernel/thread.c +++ b/arch/sparc/kernel/thread.c @@ -112,9 +112,9 @@ void arch_promote_to_task(struct task_struct *task) task->data = NULL; - printk(MSG "kernel stack %x %x\n", leon_get_fp(), leon_get_sp()); + pr_debug(MSG "kernel stack %x %x\n", leon_get_fp(), leon_get_sp()); - printk(MSG "is next at %p stack %p\n", &task->thread_info, task->stack); + pr_debug(MSG "is next at %p stack %p\n", &task->thread_info, task->stack); /* and set the new thread as current */ diff --git a/include/kernel/sched.h b/include/kernel/sched.h index c080b62..ed0d287 100644 --- a/include/kernel/sched.h +++ b/include/kernel/sched.h @@ -6,6 +6,7 @@ #ifndef _KERNEL_SCHED_H_ #define _KERNEL_SCHED_H_ +#include <generated/autoconf.h> /*XXX */ @@ -46,7 +47,7 @@ struct rq { struct task_queue { struct list_head new; - struct list_head run; + struct list_head run[CONFIG_SMP_CPUS_MAX]; struct list_head wake; struct list_head dead; }; diff --git a/init/Kconfig b/init/Kconfig index ddc0eeb..9f9165e 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -10,6 +10,16 @@ config KERNELVERSION menu "General Setup" +config SMP_CPUS_MAX + int "Number of supported CPUs" + default 2 + range 0 16 + help + Set maximum number of CPUs supported at runtime. Note that this + configures the upper limit, if the number of CPUs present in a system + is smaller or larger than this number, either the former or the latter + will determine the actual number of CPUs used at runtime. + config KALLSYMS bool "Generate a kernel symbol table" default y diff --git a/init/main.c b/init/main.c index 05778b6..a0025a9 100644 --- a/init/main.c +++ b/init/main.c @@ -131,7 +131,7 @@ int task0(void *data) printk("%d %d %d %llu irq: %s %d per sec; sched %llu us %llu per call, calls %d cpu %d\n", a, b, c, ktime_get(), buf1, (curr -last), ktime_to_us(sched_last_time), sched_last_time /sched_ev, sched_ev - last_call, leon3_cpuid()); last = curr; last_call = sched_ev; -// sched_yield(); + sched_yield(); } } @@ -278,50 +278,54 @@ int kernel_main(void) attr.wcet = ms_to_ktime(200); sched_set_attr(t, &attr); kthread_wake_up(t); +#endif #if 1 - t = kthread_create(task3, NULL, KTHREAD_CPU_AFFINITY_NONE, "print2"); + + t = kthread_create(task0, NULL, 0, "res"); sched_get_attr(t, &attr); attr.policy = SCHED_EDF; - attr.period = ms_to_ktime(800); - attr.deadline_rel = ms_to_ktime(700); - attr.wcet = ms_to_ktime(200); + attr.period = ms_to_ktime(1000); + attr.deadline_rel = ms_to_ktime(900); + attr.wcet = ms_to_ktime(800); sched_set_attr(t, &attr); kthread_wake_up(t); #endif #if 1 - t = kthread_create(task1, NULL, KTHREAD_CPU_AFFINITY_NONE, "print3"); + t = kthread_create(task1, NULL, 0, "task1"); sched_get_attr(t, &attr); attr.policy = SCHED_EDF; - attr.period = ms_to_ktime(400); - attr.deadline_rel = ms_to_ktime(200); - attr.wcet = ms_to_ktime(100); + attr.period = ms_to_ktime(500); + attr.deadline_rel = ms_to_ktime(300); + attr.wcet = ms_to_ktime(200); sched_set_attr(t, &attr); kthread_wake_up(t); #endif -#endif #if 1 - - t = kthread_create(task0, NULL, 0, "res"); + t = kthread_create(task2, NULL, 1, "task2"); sched_get_attr(t, &attr); attr.policy = SCHED_EDF; - attr.period = ms_to_ktime(1000); - attr.deadline_rel = ms_to_ktime(900); - attr.wcet = ms_to_ktime(800); + attr.period = ms_to_ktime(400); + attr.deadline_rel = ms_to_ktime(200); + attr.wcet = ms_to_ktime(100); sched_set_attr(t, &attr); kthread_wake_up(t); #endif + #if 1 - t = kthread_create(task1, NULL, 1, "print3"); + t = kthread_create(task3, NULL, 1, "task3"); sched_get_attr(t, &attr); attr.policy = SCHED_EDF; - attr.period = ms_to_ktime(500); - attr.deadline_rel = ms_to_ktime(300); + attr.period = ms_to_ktime(800); + attr.deadline_rel = ms_to_ktime(700); attr.wcet = ms_to_ktime(200); sched_set_attr(t, &attr); kthread_wake_up(t); #endif + + + #endif @@ -333,6 +337,7 @@ int kernel_main(void) while (ioread32be(&cpu1_ready) != 0x3); iowrite32be(0x4, &cpu1_ready); while(1) { +// printk("o"); #if 0 int val = inc; static ktime last; @@ -375,7 +380,7 @@ int kernel_main(void) // sched_yield(); // printk("cpu1\n"); -// cpu_relax(); + cpu_relax(); } //printk("%lld\n", buf[i]); diff --git a/kernel/kthread.c b/kernel/kthread.c index ae16357..c75b680 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -19,6 +19,8 @@ #include <kernel/tick.h> +#include <generated/autoconf.h> /* XXX need common CPU include */ + #define MSG "KTHREAD: " @@ -41,7 +43,7 @@ static struct spinlock kthread_spinlock; #include <asm/processor.h> -struct thread_info *current_set[2]; /* XXX */ +struct thread_info *current_set[CONFIG_SMP_CPUS_MAX]; /* XXX */ /** @@ -50,7 +52,7 @@ struct thread_info *current_set[2]; /* XXX */ void kthread_lock(void) { - spin_lock(&kthread_spinlock); + spin_lock_raw(&kthread_spinlock); } diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 093d17f..65ab90c 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -61,7 +61,6 @@ void schedule(void) arch_local_irq_disable(); // if (leon3_cpuid() != 0) // printk("cpu %d\n", leon3_cpuid()); -kthread_lock(); /* get the current task for this CPU */ /* XXX leon3_cpuid() should be smp_cpu_id() arch call*/ @@ -92,8 +91,10 @@ retry: /* XXX: for now, try to wake up any threads not running * this is a waste of cycles and instruction space; should be * done in the scheduler's code (somewhere) */ + kthread_lock(); list_for_each_entry(sched, &kernel_schedulers, node) sched->wake_next_task(&sched->tq, now); + kthread_unlock(); /* XXX need sorted list: highest->lowest scheduler priority, e.g.: @@ -108,7 +109,6 @@ retry: /* if one of the schedulers have a task which needs to run now, * next is non-NULL */ -retry2: next = sched->pick_next_task(&sched->tq, now); #if 0 @@ -174,7 +174,6 @@ retry2: if (!next) { /* there is absolutely nothing nothing to do, check again later */ tick_set_next_ns(wake_ns); - kthread_unlock(); goto exit; } @@ -209,7 +208,6 @@ retry2: sched_ev++; sched_last_time = ktime_add(sched_last_time, ktime_delta(ktime_get(), now)); #endif - kthread_unlock(); prepare_arch_switch(1); switch_to(next); diff --git a/kernel/sched/edf.c b/kernel/sched/edf.c index 89c6788..74c0350 100644 --- a/kernel/sched/edf.c +++ b/kernel/sched/edf.c @@ -11,7 +11,8 @@ #include <kernel/string.h> #include <kernel/tick.h> - +#include <generated/autoconf.h> /* XXX need common CPU include */ +#include <asm/processor.h> void sched_print_edf_list_internal(struct task_queue *tq, ktime now) { @@ -23,6 +24,8 @@ void sched_print_edf_list_internal(struct task_queue *tq, ktime now) struct task_struct *tsk; struct task_struct *tmp; + int cpu = leon3_cpuid(); + ktime prev = 0; ktime prevd = 0; @@ -30,7 +33,7 @@ void sched_print_edf_list_internal(struct task_queue *tq, ktime now) printk("\nktime: %lld\n", ktime_to_us(now)); printk("S\tDeadline\tWakeup\t\tdelta W\tdelta P\tt_rem\ttotal\tslices\tName\twcet\tavg\n"); printk("---------------------------------------------------------------------------------------------------------------------------------\n"); - list_for_each_entry_safe(tsk, tmp, &tq->run, node) { + list_for_each_entry_safe(tsk, tmp, &tq->run[cpu], node) { if (tsk->attr.policy == SCHED_RR) continue; @@ -194,6 +197,18 @@ static inline void schedule_edf_reinit_task(struct task_struct *tsk, ktime now) * * -> need hyperperiod factor H = 2 * + * + * #### + * + * Note: EDF in SMP configurations is not an optimal algorithm, and deadlines + * cannot be guaranteed even for utilisation values just above 1.0 + * (Dhall's effect). In order to mitigate this for EDF tasks with no + * CPU affinity set (KTHREAD_CPU_AFFINITY_NONE), we search the per-cpu + * queues until we find one which is below the utilisation limit and + * force the affinity of the task to that particular CPU + * + * + * XXX function needs adaptation */ static int edf_schedulable(struct task_queue *tq, const struct task_struct *task) @@ -201,36 +216,46 @@ static int edf_schedulable(struct task_queue *tq, const struct task_struct *task struct task_struct *tsk = NULL; struct task_struct *tmp; + int cpu; + double u = 0.0; /* utilisation */ /* add all in new */ if (!list_empty(&tq->new)) { - list_for_each_entry_safe(tsk, tmp, &tq->new, node) + list_for_each_entry_safe(tsk, tmp, &tq->new, node) { + u += (double) (int32_t) tsk->attr.wcet / (double) (int32_t) tsk->attr.period; + + } } /* add all in wakeup */ if (!list_empty(&tq->wake)) { - list_for_each_entry_safe(tsk, tmp, &tq->wake, node) - u += (double) (int32_t) tsk->attr.wcet / (double) (int32_t) tsk->attr.period; + list_for_each_entry_safe(tsk, tmp, &tq->wake, node) { + u += (double) (int32_t) tsk->attr.wcet / (double) (int32_t) tsk->attr.period; + } + } /* add all running */ - if (!list_empty(&tq->run)) { - list_for_each_entry_safe(tsk, tmp, &tq->run, node) - u += (double) (int32_t) tsk->attr.wcet / (double) (int32_t) tsk->attr.period; + + for (cpu = 0; cpu < 2; cpu++) { + if (!list_empty(&tq->run[cpu])) { + list_for_each_entry_safe(tsk, tmp, &tq->run[cpu], node) + u += (double) (int32_t) tsk->attr.wcet / (double) (int32_t) tsk->attr.period; + } } - if (u > 1.9) { + if (u >= 1.9) { printk("I am NOT schedul-ableh: %f ", u); BUG(); return -EINVAL; @@ -252,6 +277,7 @@ static struct task_struct *edf_pick_next(struct task_queue *tq, ktime now) { int64_t delta; + int cpu = leon3_cpuid(); struct task_struct *tsk; struct task_struct *tmp; @@ -259,10 +285,10 @@ static struct task_struct *edf_pick_next(struct task_queue *tq, ktime now) static int cnt; - if (list_empty(&tq->run)) + if (list_empty(&tq->run[cpu])) return NULL; - list_for_each_entry_safe(tsk, tmp, &tq->run, node) { + list_for_each_entry_safe(tsk, tmp, &tq->run[cpu], node) { /* time to wake up yet? */ delta = ktime_delta(tsk->wakeup, now); @@ -292,7 +318,7 @@ static struct task_struct *edf_pick_next(struct task_queue *tq, ktime now) schedule_edf_reinit_task(tsk, now); /* always queue it up at the tail */ - list_move_tail(&tsk->node, &tq->run); + list_move_tail(&tsk->node, &tq->run[cpu]); } @@ -300,17 +326,14 @@ static struct task_struct *edf_pick_next(struct task_queue *tq, ktime now) * head of the list, we become the new head */ - first = list_first_entry(&tq->run, struct task_struct, node); + first = list_first_entry(&tq->run[cpu], struct task_struct, node); - if (tsk->on_cpu == KTHREAD_CPU_AFFINITY_NONE - || tsk->on_cpu == leon3_cpuid()) { if (ktime_before (tsk->wakeup, now)) { if (ktime_before (tsk->deadline - tsk->runtime, first->deadline)) { tsk->state = TASK_RUN; - list_move(&tsk->node, &tq->run); + list_move(&tsk->node, &tq->run[cpu]); } } - } continue; } @@ -324,36 +347,25 @@ static struct task_struct *edf_pick_next(struct task_queue *tq, ktime now) /* if our deadline is earlier than the deadline at the * head of the list, move us to top */ - if (tsk->on_cpu == KTHREAD_CPU_AFFINITY_NONE - || tsk->on_cpu == leon3_cpuid()) { - first = list_first_entry(&tq->run, struct task_struct, node); + first = list_first_entry(&tq->run[cpu], struct task_struct, node); - if (first->on_cpu != KTHREAD_CPU_AFFINITY_NONE - || tsk->on_cpu != leon3_cpuid()) { - list_move(&tsk->node, &tq->run); - } + list_move(&tsk->node, &tq->run[cpu]); if (ktime_before (tsk->deadline - tsk->runtime, first->deadline)) - list_move(&tsk->node, &tq->run); - } + list_move(&tsk->node, &tq->run[cpu]); continue; } } -/* XXX argh, need cpu affinity run lists */ - list_for_each_entry_safe(tsk, tmp, &tq->run, node) { + /* XXX argh, need cpu affinity run lists */ + list_for_each_entry_safe(tsk, tmp, &tq->run[cpu], node) { if (tsk->state == TASK_RUN) { - if (tsk->on_cpu == KTHREAD_CPU_AFFINITY_NONE - || tsk->on_cpu == leon3_cpuid()) { - tsk->state = TASK_BUSY; - return tsk; - } else { - continue; - } + tsk->state = TASK_BUSY; + return tsk; } } @@ -376,6 +388,8 @@ static struct task_struct *edf_pick_next(struct task_queue *tq, ktime now) static void edf_wake_next(struct task_queue *tq, ktime now) { + int cpu = leon3_cpuid(); + ktime last; struct task_struct *tmp; @@ -403,17 +417,21 @@ static void edf_wake_next(struct task_queue *tq, ktime now) task = list_entry(tq->wake.next, struct task_struct, node); - list_for_each_entry_safe(t, tmp, &tq->run, node) { + BUG_ON(task->on_cpu == KTHREAD_CPU_AFFINITY_NONE); + + cpu = task->on_cpu; - first = list_first_entry(&tq->run, struct task_struct, node); + list_for_each_entry_safe(t, tmp, &tq->run[cpu], node) { + + first = list_first_entry(&tq->run[cpu], struct task_struct, node); if (ktime_before (t->wakeup, now)) { if (ktime_before (t->deadline - t->runtime, first->deadline)) { - list_move(&t->node, &tq->run); + list_move(&t->node, &tq->run[cpu]); } } } - list_for_each_entry_safe(t, tmp, &tq->run, node) { + list_for_each_entry_safe(t, tmp, &tq->run[cpu], node) { /* if the relative deadline of task-to-wake can fit in between the unused * timeslice of this running task, insert after the next wakeup @@ -446,7 +464,7 @@ static void edf_wake_next(struct task_queue *tq, ktime now) task->first_dead = task->deadline; - list_move_tail(&task->node, &tq->run); + list_move_tail(&task->node, &tq->run[cpu]); } @@ -539,6 +557,8 @@ ktime edf_task_ready_ns(struct task_queue *tq, ktime now) { int64_t delta; + int cpu = leon3_cpuid(); + struct task_struct *first; struct task_struct *tsk; struct task_struct *tmp; @@ -547,7 +567,7 @@ ktime edf_task_ready_ns(struct task_queue *tq, ktime now) - list_for_each_entry_safe(first, tmp, &tq->run, node) { + list_for_each_entry_safe(first, tmp, &tq->run[cpu], node) { if (first->state != TASK_RUN) continue; @@ -555,7 +575,7 @@ ktime edf_task_ready_ns(struct task_queue *tq, ktime now) break; } - list_for_each_entry_safe(tsk, tmp, &tq->run, node) { + list_for_each_entry_safe(tsk, tmp, &tq->run[cpu], node) { #if 0 if (tsk->state == TASK_BUSY) continue; /*meh */ @@ -593,12 +613,15 @@ static struct scheduler sched_edf = { static int sched_edf_init(void) { - /* XXX */ + int i; + INIT_LIST_HEAD(&sched_edf.tq.new); - INIT_LIST_HEAD(&sched_edf.tq.run); INIT_LIST_HEAD(&sched_edf.tq.wake); INIT_LIST_HEAD(&sched_edf.tq.dead); + for (i = 0; i < CONFIG_SMP_CPUS_MAX; i++) + INIT_LIST_HEAD(&sched_edf.tq.run[i]); + sched_register(&sched_edf); return 0; diff --git a/kernel/sched/rr.c b/kernel/sched/rr.c index 9cd49de..4268be1 100644 --- a/kernel/sched/rr.c +++ b/kernel/sched/rr.c @@ -19,10 +19,10 @@ static struct task_struct *rr_pick_next(struct task_queue *tq, ktime now) struct task_struct *tmp; - if (list_empty(&tq->run)) + if (list_empty(&tq->run[leon3_cpuid()])) return NULL; - list_for_each_entry_safe(next, tmp, &tq->run, node) { + list_for_each_entry_safe(next, tmp, &tq->run[leon3_cpuid()], node) { if (next->on_cpu == KTHREAD_CPU_AFFINITY_NONE @@ -33,7 +33,7 @@ static struct task_struct *rr_pick_next(struct task_queue *tq, ktime now) * following a scheduling event. for now, just force * round robin */ - list_move_tail(&next->node, &tq->run); + list_move_tail(&next->node, &tq->run[leon3_cpuid()]); /* reset runtime */ next->runtime = (next->attr.priority * tick_get_period_min_ns()); @@ -43,7 +43,7 @@ static struct task_struct *rr_pick_next(struct task_queue *tq, ktime now) } if (next->state == TASK_IDLE) - list_move_tail(&next->node, &tq->run); + list_move_tail(&next->node, &tq->run[leon3_cpuid()]); if (next->state == TASK_DEAD) list_move_tail(&next->node, &tq->dead); @@ -81,7 +81,7 @@ static void rr_wake_next(struct task_queue *tq, ktime now) BUG_ON(task->attr.policy != SCHED_RR); /** XXX NO LOCKS */ task->state = TASK_RUN; - list_move(&task->node, &tq->run); + list_move(&task->node, &tq->run[leon3_cpuid()]); } @@ -91,7 +91,7 @@ static void rr_enqueue(struct task_queue *tq, struct task_struct *task) task->runtime = (task->attr.priority * tick_get_period_min_ns()); /** XXX **/ if (task->state == TASK_RUN) - list_add_tail(&task->node, &tq->run); + list_add_tail(&task->node, &tq->run[leon3_cpuid()]); else list_add_tail(&task->node, &tq->wake); } @@ -168,12 +168,17 @@ static struct scheduler sched_rr = { static int sched_rr_init(void) { + int i; + /* XXX */ INIT_LIST_HEAD(&sched_rr.tq.new); - INIT_LIST_HEAD(&sched_rr.tq.run); INIT_LIST_HEAD(&sched_rr.tq.wake); INIT_LIST_HEAD(&sched_rr.tq.dead); + for (i = 0; i < CONFIG_SMP_CPUS_MAX; i++) + INIT_LIST_HEAD(&sched_rr.tq.run[i]); + + sched_register(&sched_rr); return 0; -- GitLab