Skip to content
Snippets Groups Projects
Commit fe96b197 authored by Armin Luntzer's avatar Armin Luntzer
Browse files

some cleanup of sched

parent 2b1b24c6
Branches
No related tags found
No related merge requests found
...@@ -55,19 +55,14 @@ void schedule(void) ...@@ -55,19 +55,14 @@ void schedule(void)
} }
arch_local_irq_disable(); arch_local_irq_disable();
// if (leon3_cpuid() != 0)
// printk("cpu %d\n", leon3_cpuid());
/* get the current task for this CPU */ /* get the current task for this CPU */
/* XXX leon3_cpuid() should be smp_cpu_id() arch call*/ /* XXX leon3_cpuid() should be smp_cpu_id() arch call*/
current = current_set[leon3_cpuid()]->task; current = current_set[leon3_cpuid()]->task;
// if (leon3_cpuid() != 0)
// if (current)
// printk("current %s\n", current->name);
now = ktime_get(); now = ktime_get();
...@@ -80,7 +75,6 @@ void schedule(void) ...@@ -80,7 +75,6 @@ void schedule(void)
current->total = ktime_add(current->total, rt); current->total = ktime_add(current->total, rt);
current->state = TASK_RUN; current->state = TASK_RUN;
// current->runtime = 0;
retry: retry:
...@@ -107,18 +101,10 @@ retry: ...@@ -107,18 +101,10 @@ retry:
*/ */
next = sched->pick_next_task(sched->tq, leon3_cpuid(), now); next = sched->pick_next_task(sched->tq, leon3_cpuid(), now);
#if 0
if (next)
printk("next %s %llu %llu\n", next->name, next->first_wake, ktime_get());
else
printk("NULL %llu\n", ktime_get());
#endif
/* check if we need to limit the next tasks timeslice; /* check if we need to limit the next tasks timeslice;
* since our scheduler list is sorted by scheduler priority, * since our scheduler list is sorted by scheduler priority,
* only update the value if wake_next is not set; * only update the value if wake_next is not set;
* * XXX ---wrong description for implementation ---
* because our schedulers are sorted, this means that if next * because our schedulers are sorted, this means that if next
* is set, the highest priority scheduler will both tell us * is set, the highest priority scheduler will both tell us
* whether it has another task pending soon. If next is not set, * whether it has another task pending soon. If next is not set,
...@@ -136,32 +122,8 @@ retry: ...@@ -136,32 +122,8 @@ retry:
*/ */
if (next) { if (next) {
#if 0
if (next->on_cpu != KTHREAD_CPU_AFFINITY_NONE) {
if (next->on_cpu != leon3_cpuid()) {
// printk("%s on_cpu: %d but am %d\n", next->name, next->on_cpu, leon3_cpuid());
if (prev == next)
continue;
prev = next;
next = NULL;
goto retry2;
}
// else
// printk("yay %s on_cpu: %d and am %d\n", next->name, next->on_cpu, leon3_cpuid());
}
if (next->sched) {
#endif
slot_ns = next->sched->timeslice_ns(next);
#if 0
if (slot_ns < 0)
printk("<0 ! %s\n", next->name);
}
else continue;
#endif
/* we found something to execute, off we go */ /* we found something to execute, off we go */
slot_ns = next->sched->timeslice_ns(next);
break; break;
} }
} }
...@@ -173,8 +135,6 @@ retry: ...@@ -173,8 +135,6 @@ retry:
goto exit; goto exit;
} }
// if (leon3_cpuid() != 0)
// printk("next %s\n", next->name);
/* see if the remaining runtime in a thread is smaller than the wakeup /* see if the remaining runtime in a thread is smaller than the wakeup
* timeout. In this case, we will restrict ourselves to the remaining * timeout. In this case, we will restrict ourselves to the remaining
* runtime. This is particularly needeed for strictly periodic * runtime. This is particularly needeed for strictly periodic
...@@ -182,17 +142,20 @@ retry: ...@@ -182,17 +142,20 @@ retry:
*/ */
/* XXX should go through sched list in reverse to pick most pressing /* XXX should go through sched list in reverse to pick most pressing
* wakeup time */ * wakeup time
*/
// list_for_each_entry(sched, &kernel_schedulers, node) { // list_for_each_entry(sched, &kernel_schedulers, node) {
sched = list_first_entry(&kernel_schedulers, struct scheduler, node); sched = list_first_entry(&kernel_schedulers, struct scheduler, node);
wake_ns = sched->task_ready_ns(sched->tq, leon3_cpuid(), now); wake_ns = sched->task_ready_ns(sched->tq, leon3_cpuid(), now);
if (wake_ns > 0) BUG_ON(wake_ns < 0);
if (wake_ns < slot_ns) if (wake_ns < slot_ns)
slot_ns = wake_ns; slot_ns = wake_ns;
/* ALWAYS get current time here */ /* ALWAYS get current time here */
next->exec_start = ktime_get(); next->exec_start = ktime_get();
next->state = TASK_BUSY;
/* subtract readout overhead */ /* subtract readout overhead */
......
...@@ -10,9 +10,16 @@ ...@@ -10,9 +10,16 @@
#include <kernel/printk.h> #include <kernel/printk.h>
#include <kernel/string.h> #include <kernel/string.h>
#include <kernel/tick.h> #include <kernel/tick.h>
#include <kernel/init.h>
#include <generated/autoconf.h> /* XXX need common CPU include */ #include <generated/autoconf.h> /* XXX need common CPU include */
#define MSG "SCHED_EDF: "
#define UTIL_MAX 0.95 /* XXX should be config option, also should be adaptive depending on RT load */
void sched_print_edf_list_internal(struct task_queue *tq, int cpu, ktime now) void sched_print_edf_list_internal(struct task_queue *tq, int cpu, ktime now)
{ {
char state = 'U'; char state = 'U';
...@@ -105,36 +112,19 @@ static inline bool schedule_edf_can_execute(struct task_struct *tsk, int cpu, kt ...@@ -105,36 +112,19 @@ static inline bool schedule_edf_can_execute(struct task_struct *tsk, int cpu, kt
/* should to consider twice the min tick period for overhead */ /* should to consider twice the min tick period for overhead */
if (tsk->runtime <= (tick_get_period_min_ns() << 1)) if (tsk->runtime <= (tick_get_period_min_ns() << 1))
goto bad; return false;
to_deadline = ktime_delta(tsk->deadline, now); to_deadline = ktime_delta(tsk->deadline, now);
//barrier();
//tsk->last_visit = now;
/* should to consider twice the min tick period for overhead */ /* should to consider twice the min tick period for overhead */
if (to_deadline <= (tick_get_period_min_ns() << 1)) if (ktime_delta(tsk->deadline, now) <= (tick_get_period_min_ns() << 1))
goto bad; return false;
barrier();
good:
tsk->last_visit_true = now;
tsk->last_visit_false = tsk->runtime;
return true; return true;
barrier();
bad:
return false;
} }
#include <asm/leon.h>
static inline void schedule_edf_reinit_task(struct task_struct *tsk, ktime now) static inline void schedule_edf_reinit_task(struct task_struct *tsk, ktime now)
{ {
ktime new_wake; ktime new_wake;
...@@ -148,9 +138,10 @@ static inline void schedule_edf_reinit_task(struct task_struct *tsk, ktime now) ...@@ -148,9 +138,10 @@ static inline void schedule_edf_reinit_task(struct task_struct *tsk, ktime now)
new_wake = ktime_add(tsk->wakeup, tsk->attr.period); new_wake = ktime_add(tsk->wakeup, tsk->attr.period);
#if 1 #if 1
/* need FDIR procedure for this situation */ /* need FDIR procedure for this situation: report and wind
* wakeup/deadline forward */
if (ktime_after(now, new_wake)){ /* deadline missed earlier? */ if (ktime_after(now, new_wake)){ /* deadline missed earlier? */
// printk("me thinks: sp %x bot %x top %x tsk %x %lld\n", leon_get_sp(), tsk->stack_bottom, tsk->stack_top, (int) &tsk->attr, new_wake);
printk("%s violated, rt: %lld, last_visit %lld false %lld, last_adjust %lld next wake: %lld (%lld)\n", tsk->name, printk("%s violated, rt: %lld, last_visit %lld false %lld, last_adjust %lld next wake: %lld (%lld)\n", tsk->name,
tsk->runtime, tsk->last_visit_true, tsk->last_visit_false, tsk->last_adjust, tsk->wakeup, new_wake); tsk->runtime, tsk->last_visit_true, tsk->last_visit_false, tsk->last_adjust, tsk->wakeup, new_wake);
sched_print_edf_list_internal(&tsk->sched->tq[tsk->on_cpu], tsk->on_cpu, now); sched_print_edf_list_internal(&tsk->sched->tq[tsk->on_cpu], tsk->on_cpu, now);
...@@ -171,18 +162,12 @@ static inline void schedule_edf_reinit_task(struct task_struct *tsk, ktime now) ...@@ -171,18 +162,12 @@ static inline void schedule_edf_reinit_task(struct task_struct *tsk, ktime now)
} }
#include <kernel/init.h>
#define MSG "SCHED_EDF: "
static ktime edf_hyperperiod(struct task_queue tq[], int cpu, const struct task_struct *task) static ktime edf_hyperperiod(struct task_queue tq[], int cpu, const struct task_struct *task)
{ {
ktime lcm = 1; ktime lcm = 1;
ktime a,b; ktime a,b;
struct task_struct *t0 = NULL;
struct task_struct *tsk; struct task_struct *tsk;
struct task_struct *tmp; struct task_struct *tmp;
...@@ -281,7 +266,8 @@ static ktime edf_hyperperiod(struct task_queue tq[], int cpu, const struct task_ ...@@ -281,7 +266,8 @@ static ktime edf_hyperperiod(struct task_queue tq[], int cpu, const struct task_
* *
* XXX function needs adaptation * XXX function needs adaptation
*/ */
#define UTIL_MAX 0.95
static int edf_schedulable(struct task_queue tq[], const struct task_struct *task) static int edf_schedulable(struct task_queue tq[], const struct task_struct *task)
{ {
struct task_struct *tsk = NULL; struct task_struct *tsk = NULL;
...@@ -294,12 +280,9 @@ static int edf_schedulable(struct task_queue tq[], const struct task_struct *tas ...@@ -294,12 +280,9 @@ static int edf_schedulable(struct task_queue tq[], const struct task_struct *tas
printk("me thinks: sp %x bot %x top %x task %x\n", leon_get_sp(), task->stack_bottom, task->stack_top, (int) task);
if (task->on_cpu == KTHREAD_CPU_AFFINITY_NONE) { if (task->on_cpu == KTHREAD_CPU_AFFINITY_NONE) {
int i; int i;
double util_max = 0.0; double util_max = 0.0;
double util;
for (i = 0; i < 2; i++) { for (i = 0; i < 2; i++) {
/* XXX look for best fit */ /* XXX look for best fit */
...@@ -405,22 +388,6 @@ if (1) ...@@ -405,22 +388,6 @@ if (1)
//printk("max UH: %lld, UT: %lld\n", ktime_to_ms(uh), ktime_to_ms(ut)); //printk("max UH: %lld, UT: %lld\n", ktime_to_ms(uh), ktime_to_ms(ut));
#if 0
/* XXX already implicitly subtracted! */
/* subtract longest period thread from head, its slices must always
* be used before the deadline
*/
sh = h * t0->attr.wcet * t0->attr.deadline_rel / t0->attr.period;
if (sh < shmin)
shmin = sh;
uh = uh - sh;
//printk("%s UH: %lld, UT: %lld\n", t0->name, ktime_to_ms(uh), ktime_to_ms(ut));
//printk("%s SH: %lld, ST: %lld\n", t0->name, ktime_to_ms(sh), ktime_to_ms(st));
#endif
/* tasks queued in wakeup */ /* tasks queued in wakeup */
if (!list_empty(&tq[cpu].wake)) { if (!list_empty(&tq[cpu].wake)) {
list_for_each_entry_safe(tsk, tmp, &tq[cpu].wake, node) { list_for_each_entry_safe(tsk, tmp, &tq[cpu].wake, node) {
...@@ -606,42 +573,32 @@ static struct task_struct *edf_pick_next(struct task_queue *tq, int cpu, ...@@ -606,42 +573,32 @@ static struct task_struct *edf_pick_next(struct task_queue *tq, int cpu,
struct task_struct *tmp; struct task_struct *tmp;
struct task_struct *first; struct task_struct *first;
static int cnt;
if (list_empty(&tq[cpu].run)) if (list_empty(&tq[cpu].run))
return NULL; return NULL;
//kthread_lock();
/* XXX need to lock run list for wakeup() */
list_for_each_entry_safe(tsk, tmp, &tq[cpu].run, node) { list_for_each_entry_safe(tsk, tmp, &tq[cpu].run, node) {
/* time to wake up yet? */ /* time to wake up yet? */
delta = ktime_delta(tsk->wakeup, now); delta = ktime_delta(tsk->wakeup, now);
/* not yet XXX min period to variable */
if (delta > (tick_get_period_min_ns() << 1)) if (delta > (tick_get_period_min_ns() << 1))
continue; continue;
/* XXX ok: what we need here: are multiple queues: one
* where tasks are stored which are currently able to
* run, here we need one per affinity and one generic one
*
* one where tasks are stored which are currently idle.
* tasks move to the idle queue when they cannot execute anymore
* and are moved from idle to run when their wakeup time has
* passed
*/
/* if it's already running, see if there is time remaining */ /* if it's already running, see if there is time remaining */
if (tsk->state == TASK_RUN) { if (tsk->state == TASK_RUN) {
#if 0
if (cnt++ > 10) { /* If it has to be reinitialised, always queue it
sched_print_edf_list_internal(&tsk->sched->tq, now); * up at the tail.
BUG(); */
}
#endif
if (!schedule_edf_can_execute(tsk, cpu, now)) { if (!schedule_edf_can_execute(tsk, cpu, now)) {
schedule_edf_reinit_task(tsk, now); schedule_edf_reinit_task(tsk, now);
/* always queue it up at the tail */
list_move_tail(&tsk->node, &tq[cpu].run); list_move_tail(&tsk->node, &tq[cpu].run);
} }
...@@ -650,63 +607,45 @@ static struct task_struct *edf_pick_next(struct task_queue *tq, int cpu, ...@@ -650,63 +607,45 @@ static struct task_struct *edf_pick_next(struct task_queue *tq, int cpu,
* head of the list, we become the new head * head of the list, we become the new head
*/ */
first = list_first_entry(&tq[cpu].run, struct task_struct, node); first = list_first_entry(&tq[cpu].run,
struct task_struct, node);
if (ktime_before (now, tsk->wakeup))
continue;
if (ktime_before (tsk->wakeup, now)) {
// if (ktime_before (tsk->deadline - tsk->runtime, first->deadline)) {
if (ktime_before (tsk->deadline, first->deadline)) { if (ktime_before (tsk->deadline, first->deadline)) {
tsk->state = TASK_RUN; tsk->state = TASK_RUN;
list_move(&tsk->node, &tq[cpu].run); list_move(&tsk->node, &tq[cpu].run);
} }
}
continue; continue;
} }
/* time to wake up */ /* time to wake up */
if (tsk->state == TASK_IDLE) { if (tsk->state == TASK_IDLE) {
tsk->state = TASK_RUN;
//BUG_ON(ktime_delta(tsk->wakeup, now) > 0); tsk->state = TASK_RUN;
/* if our deadline is earlier than the deadline at the /* if our deadline is earlier than the deadline at the
* head of the list, move us to top */ * head of the list, move us to top */
first = list_first_entry(&tq[cpu].run, struct task_struct, node); first = list_first_entry(&tq[cpu].run,
struct task_struct, node);
// if (ktime_before (tsk->deadline - tsk->runtime, first->deadline)) if (first->state != TASK_RUN) {
if (ktime_before (tsk->deadline, first->deadline))
list_move(&tsk->node, &tq[cpu].run); list_move(&tsk->node, &tq[cpu].run);
continue; continue;
} }
} if (ktime_before (tsk->deadline, first->deadline))
list_move(&tsk->node, &tq[cpu].run);
/* XXX argh, need cpu affinity run lists */
list_for_each_entry_safe(tsk, tmp, &tq[cpu].run, node) {
if (tsk->state == TASK_RUN) {
tsk->state = TASK_BUSY;
// kthread_unlock();
return tsk;
} }
} }
#if 0
first = list_first_entry(&tq->run, struct task_struct, node);
delta = ktime_delta(first->wakeup, now);
if (first->state == TASK_RUN) { first = list_first_entry(&tq[cpu].run, struct task_struct, node);
// printk("c %d\n", leon3_cpuid()); if (first->state == TASK_RUN)
first->state = TASK_BUSY;
return first; return first;
}
#endif
// kthread_unlock();
return NULL; return NULL;
} }
...@@ -720,7 +659,6 @@ static void edf_wake_next(struct task_queue *tq, int cpu, ktime now) ...@@ -720,7 +659,6 @@ static void edf_wake_next(struct task_queue *tq, int cpu, ktime now)
struct task_struct *task; struct task_struct *task;
struct task_struct *first = NULL; struct task_struct *first = NULL;
struct task_struct *t; struct task_struct *t;
struct task_struct *prev = NULL;
ktime max = 0; ktime max = 0;
...@@ -728,9 +666,6 @@ static void edf_wake_next(struct task_queue *tq, int cpu, ktime now) ...@@ -728,9 +666,6 @@ static void edf_wake_next(struct task_queue *tq, int cpu, ktime now)
struct task_struct *after = NULL; struct task_struct *after = NULL;
ktime wake;
if (list_empty(&tq[cpu].wake)) if (list_empty(&tq[cpu].wake))
return; return;
...@@ -749,12 +684,10 @@ static void edf_wake_next(struct task_queue *tq, int cpu, ktime now) ...@@ -749,12 +684,10 @@ static void edf_wake_next(struct task_queue *tq, int cpu, ktime now)
} }
if (after) { if (after)
last = ktime_add(after->wakeup, after->attr.period); last = ktime_add(after->wakeup, after->attr.period);
}
#if 1 /* better */
task = list_first_entry(&tq[cpu].wake, struct task_struct, node); task = list_first_entry(&tq[cpu].wake, struct task_struct, node);
...@@ -785,33 +718,25 @@ static void edf_wake_next(struct task_queue *tq, int cpu, ktime now) ...@@ -785,33 +718,25 @@ static void edf_wake_next(struct task_queue *tq, int cpu, ktime now)
* timeslice of this running task, insert after the next wakeup * timeslice of this running task, insert after the next wakeup
*/ */
if (task->attr.deadline_rel < ktime_sub(t->deadline, t->wakeup)) { if (task->attr.deadline_rel < ktime_sub(t->deadline, t->wakeup)) {
//last = ktime_add(t->deadline, t->attr.period);
last = t->wakeup; last = t->wakeup;
break; break;
} }
if (task->attr.wcet < ktime_sub(t->deadline, t->wakeup)) { if (task->attr.wcet < ktime_sub(t->deadline, t->wakeup)) {
//last = ktime_add(t->deadline, t->attr.period);
last = t->deadline; last = t->deadline;
break; break;
} }
} }
} }
#endif
task->state = TASK_IDLE; task->state = TASK_IDLE;
/* initially furthest deadline as wakeup */ /* initially furthest deadline as wakeup */
last = ktime_add(last, 3000000000ULL); last = ktime_add(last, 3000000000ULL); /* XXX */
task->wakeup = ktime_add(last, task->attr.period); task->wakeup = ktime_add(last, task->attr.period);
task->deadline = ktime_add(task->wakeup, task->attr.deadline_rel); task->deadline = ktime_add(task->wakeup, task->attr.deadline_rel);
// printk("%s now %lld, last %lld, wake at %lld dead at %lld\n", task->name, now, last, task->wakeup, task->deadline);
list_move_tail(&task->node, &tq[cpu].run); list_move_tail(&task->node, &tq[cpu].run);
kthread_unlock(); kthread_unlock();
} }
...@@ -838,16 +763,8 @@ static void edf_enqueue(struct task_queue tq[], struct task_struct *task) ...@@ -838,16 +763,8 @@ static void edf_enqueue(struct task_queue tq[], struct task_struct *task)
return; return;
} }
task->on_cpu = cpu; task->on_cpu = cpu;
#if 0
/** XXX **/
if (task->state == TASK_RUN)
list_move(&task->node, &tq->run);
else
#endif
#if 1
list_add_tail(&task->node, &tq[cpu].wake); list_add_tail(&task->node, &tq[cpu].wake);
#endif
} }
...@@ -902,19 +819,16 @@ error: ...@@ -902,19 +819,16 @@ error:
return -EINVAL; return -EINVAL;
} }
/* called after pick_next() */
/* called after pick_next() */
ktime edf_task_ready_ns(struct task_queue *tq, int cpu, ktime now) ktime edf_task_ready_ns(struct task_queue *tq, int cpu, ktime now)
{ {
int64_t delta; ktime delta;
ktime ready = (unsigned int) ~0 >> 1;
struct task_struct *first;
struct task_struct *tsk; struct task_struct *tsk;
struct task_struct *tmp; struct task_struct *tmp;
ktime slice = 12345679123ULL;
ktime wake = 123456789123ULL;
list_for_each_entry_safe(tsk, tmp, &tq[cpu].run, node) { list_for_each_entry_safe(tsk, tmp, &tq[cpu].run, node) {
...@@ -924,19 +838,15 @@ ktime edf_task_ready_ns(struct task_queue *tq, int cpu, ktime now) ...@@ -924,19 +838,15 @@ ktime edf_task_ready_ns(struct task_queue *tq, int cpu, ktime now)
delta = ktime_delta(tsk->wakeup, now); delta = ktime_delta(tsk->wakeup, now);
if (delta <= (tick_get_period_min_ns() << 1)) if (delta <= (tick_get_period_min_ns() << 1)) /* XXX init once */
continue; continue;
if (wake > delta) if (ready > delta)
wake = delta; ready = delta;
} }
if (slice > wake)
slice = wake;
BUG_ON(slice <= (tick_get_period_min_ns() << 1));
return slice; return ready;
} }
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment