diff --git a/init/main.c b/init/main.c index 1bd509f45f6912dfeaf3f9dc31a2fcff1dc489c1..27bb193113c0defcca66edcd3ec17e9ccab78b68 100644 --- a/init/main.c +++ b/init/main.c @@ -251,7 +251,7 @@ int kernel_main(void) attr.wcet = us_to_ktime(200); sched_set_attr(t, &attr); kthread_wake_up(t); - +#if 1 t = kthread_create(task3, NULL, KTHREAD_CPU_AFFINITY_NONE, "print2"); sched_get_attr(t, &attr); attr.policy = SCHED_EDF; @@ -260,7 +260,8 @@ int kernel_main(void) attr.wcet = us_to_ktime(100); sched_set_attr(t, &attr); kthread_wake_up(t); - +#endif +#if 1 t = kthread_create(task1, NULL, KTHREAD_CPU_AFFINITY_NONE, "print3"); sched_get_attr(t, &attr); attr.policy = SCHED_EDF; @@ -269,7 +270,8 @@ int kernel_main(void) attr.wcet = us_to_ktime(100); sched_set_attr(t, &attr); kthread_wake_up(t); - +#endif +#if 1 t = kthread_create(task0, NULL, KTHREAD_CPU_AFFINITY_NONE, "res"); sched_get_attr(t, &attr); attr.policy = SCHED_EDF; @@ -278,7 +280,7 @@ int kernel_main(void) attr.wcet = ms_to_ktime(100); sched_set_attr(t, &attr); kthread_wake_up(t); - +#endif #endif diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 90855ea226f106e708c7b497ff740c505b9bd476..43a3ab42ae58fd43f13243bfc77bcaa8edd3a71f 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -36,7 +36,9 @@ void schedule(void) struct task_struct *current; int64_t slot_ns; - int64_t wake_ns = 0; + int64_t wake_ns = 1000000000; + + ktime rt; static int once; @@ -55,12 +57,20 @@ void schedule(void) /* get the current task for this CPU */ current = current_set[0]->task; + + rt = ktime_sub(ktime_get(), current->exec_start); + /** XXX need timeslice_update callback for schedulers */ /* update remaining runtime of current thread */ - current->runtime = ktime_sub(current->exec_start, ktime_get()); + + current->runtime = ktime_sub(current->runtime, rt); + current->total = ktime_add(current->total, rt); + retry: + next = NULL; + wake_ns = 1000000000; /* XXX: for now, try to wake up any threads not running * this is a waste of cycles and instruction space; should be * done in the scheduler's code (somewhere) */ @@ -82,13 +92,15 @@ retry: * next is non-NULL */ next = sched->pick_next_task(&sched->tq); + #if 0 if (next) printk("next %s %llu %llu\n", next->name, next->first_wake, ktime_get()); else - printk("NULL\n"); + printk("NULL %llu\n", ktime_get()); #endif + /* check if we need to limit the next tasks timeslice; * since our scheduler list is sorted by scheduler priority, * only update the value if wake_next is not set; @@ -109,22 +121,17 @@ retry: * the corresponding scheduler */ - if (!wake_ns) - wake_ns = sched->task_ready_ns(&sched->tq); - - /* we found something to execute, off we go */ - if (next) + if (next) { + slot_ns = next->sched->timeslice_ns(next); + /* we found something to execute, off we go */ break; + } } if (!next) { /* there is absolutely nothing nothing to do, check again later */ - if (wake_ns) - tick_set_next_ns(wake_ns); - else - tick_set_next_ns(1e9); /* XXX pause for a second, there are no threads in any scheduler */ - + tick_set_next_ns(wake_ns); goto exit; } @@ -134,7 +141,7 @@ retry: * schedulers, e.g. EDF */ - slot_ns = next->sched->timeslice_ns(next); + wake_ns = sched->task_ready_ns(&sched->tq); if (wake_ns < slot_ns) slot_ns = wake_ns; @@ -152,9 +159,9 @@ retry: /* subtract readout overhead */ tick_set_next_ns(ktime_sub(slot_ns, 2000LL)); #if 1 - if (slot_ns < 20000UL) { + if (slot_ns < 19000UL) { + // printk("wake %llu slot %llu %s\n", wake_ns, slot_ns, next->name); goto retry; - printk("wake %llu slot %llu %s\n", wake_ns, slot_ns, next->name); BUG(); } #endif diff --git a/kernel/sched/edf.c b/kernel/sched/edf.c index 0b4b0c0241dac48526174d02df2508fa96ed377c..8d57bb71b00b10bf812f1d5b3ccd20d7e87d5ed6 100644 --- a/kernel/sched/edf.c +++ b/kernel/sched/edf.c @@ -107,6 +107,7 @@ static inline bool schedule_edf_can_execute(struct task_struct *tsk, ktime now) if (tsk->runtime <= 0) return false; if (ktime_before(tsk->deadline, now)) { + sched_print_edf_list_internal(&tsk->sched->tq, ktime_get()); printk("%s violated, %lld %lld, dead %lld wake %lld now %lld start %lld\n", tsk->name, tsk->runtime, ktime_us_delta(tsk->deadline, now), tsk->deadline, tsk->wakeup, now, tsk->exec_start); @@ -153,6 +154,8 @@ static inline void schedule_edf_reinit_task(struct task_struct *tsk, ktime now) tsk->deadline = ktime_add(tsk->wakeup, tsk->attr.deadline_rel); tsk->runtime = tsk->attr.wcet; + + tsk->slices++; } @@ -242,7 +245,6 @@ static int edf_schedulable(struct task_queue *tq, const struct task_struct *task return 0; } - u = (double) (int32_t) task->attr.wcet / (double) (int32_t) task->attr.period; return 0; } @@ -346,7 +348,7 @@ static struct task_struct *edf_pick_next(struct task_queue *tq) slot = first->runtime; } -#if 0 /* XXX should not be needed, but needs verification! */ +#if 1 /* XXX should not be needed, but needs verification! */ if (!go) { list_for_each_entry_safe(tsk, tmp, &tq->run, node) {