Skip to content
Snippets Groups Projects
Commit d7b4041d authored by Armin Luntzer's avatar Armin Luntzer
Browse files

improved, deadline selection still sucks, downtime not properly used

parent 2464b9f2
No related branches found
No related tags found
No related merge requests found
......@@ -58,8 +58,11 @@ int task1(void *data)
int task2(void *data)
{
while (1) {
printk("-");
sched_yield();
//printk("x %llu\n", ktime_get());
printk("_\n");
// sched_yield();
// printk("-");
// sched_yield();
}
}
static int cnt;
......@@ -81,7 +84,9 @@ int task3(void *data)
// sched_yield();
#endif
printk("%llu\n", ktime_get());
//printk("y %llu\n", ktime_get());
printk(".\n");
// sched_yield();
}
}
......@@ -223,12 +228,21 @@ int kernel_main(void)
kthread_wake_up(t);
#endif
#if 1
t = kthread_create(task3, NULL, KTHREAD_CPU_AFFINITY_NONE, "edfprint");
t = kthread_create(task2, NULL, KTHREAD_CPU_AFFINITY_NONE, "print1");
sched_get_attr(t, &attr);
attr.policy = SCHED_EDF;
attr.period = ms_to_ktime(1000);
attr.deadline_rel = ms_to_ktime(900);
attr.wcet = ms_to_ktime(200);
sched_set_attr(t, &attr);
kthread_wake_up(t);
t = kthread_create(task3, NULL, KTHREAD_CPU_AFFINITY_NONE, "print2");
sched_get_attr(t, &attr);
attr.policy = SCHED_EDF;
attr.period = ms_to_ktime(3000);
attr.deadline_rel = ms_to_ktime(2000);
attr.wcet = ms_to_ktime(1000);
attr.period = ms_to_ktime(1000);
attr.deadline_rel = ms_to_ktime(900);
attr.wcet = ms_to_ktime(200);
sched_set_attr(t, &attr);
kthread_wake_up(t);
#endif
......@@ -263,6 +277,7 @@ int kernel_main(void)
i++;
#endif
// sched_yield();
#if 0
if (cnt > 1023) {
int i;
for (i = 1; i < 1024; i++)
......@@ -270,10 +285,12 @@ int kernel_main(void)
// cnt = 0;
break;
}
printk("xxx %llu\n", ktime_get());
#endif
// printk("xxx %llu\n", ktime_get());
//printk("%d\n", cnt);
printk("o\n");
// sched_yield();
// cpu_relax();
......
......@@ -34,7 +34,7 @@ void schedule(void)
struct task_struct *next = NULL;
struct task_struct *current = current_set[0]->task;
struct task_struct *current;
int64_t slot_ns;
int64_t wake_ns = 0;
......@@ -52,6 +52,11 @@ void schedule(void)
arch_local_irq_disable();
/* kthread_lock(); */
/* get the current task for this CPU */
current = current_set[0]->task;
/** XXX need timeslice_update callback for schedulers */
/* update remaining runtime of current thread */
current->runtime = ktime_sub(current->exec_start, ktime_get());
......@@ -62,77 +67,95 @@ void schedule(void)
sched->wake_next_task(&sched->tq);
}
current = current_set[0]->task;
/* XXX need sorted list: highest->lowest scheduler priority, e.g.:
* EDF -> RMS -> FIFO -> RR
* TODO: scheduler priority value
*/
list_for_each_entry(sched, &kernel_schedulers, node) {
// struct task_struct *tmpn;
/* if one of the schedulers have a task which needs to run now,
* next is non-NULL
*/
next = sched->pick_next_task(&sched->tq);
#if 0
if (next)
printk("next %s %llu %llu\n", next->name, next->first_wake, ktime_get());
else
printk("NULL\n");
#endif
/* check if we need to limit the next tasks timeslice;
* since our scheduler list is sorted by scheduler priority,
* only update the value if wake_next is not set;
*
* because our schedulers are sorted, this means that if next
* is set, the highest priority scheduler will both tell us
* whether it has another task pending soon. If next is not set,
* a lower-priority scheduler may set the next thread to run,
* but we will take the smallest timeout from high to low
* priority schedulers, so we enter this function again when
* the timeslice of the next thread is over and we can determine
* what needs to be run in the following scheduling cycle. This
* way, we will distribute CPU time even to the lowest priority
* scheduler, if available, but guarantee, that the highest
* priority threads are always ranked and executed on time
*
* we assume that the timeslice is reasonable; if not fix it in
* the corresponding scheduler
*/
//int64_t tmp;
if (!wake_ns)
wake_ns = sched->task_ready_ns(&sched->tq);
#if 0
if (next)
printk("next %s\n", next->name);
printk("wake_ns %llu\n", wake_ns);
#endif
#if 0
else {
tmp = sched->task_ready_ns(&sched->tq);
if (tmp > wake_ns) {
wake_ns = tmp;
next = tmpn;
}
}
if (!next)
next = tmpn;
#endif
/* we found something to execute, off we go */
if (next)
break;
}
if (!next) {
/* nothing to do, check again later */
if (wake_ns) {
/* there is absolutely nothing nothing to do, check again later */
if (wake_ns)
tick_set_next_ns(wake_ns);
}
else
tick_set_next_ns(1e9); /* XXX pause for a second */
tick_set_next_ns(1e9); /* XXX pause for a second, there are no threads in any scheduler */
goto exit;
}
/* see if we can use a full slice or if we have to wake earlier */
slot_ns = wake_ns;
/* see if the remaining runtime in a thread is smaller than the wakeup
* timeout. In this case, we will restrict ourselves to the remaining
* runtime. This is particularly needeed for strictly periodic
* schedulers, e.g. EDF
*/
slot_ns = next->sched->timeslice_ns(next);
if (wake_ns < slot_ns)
slot_ns = wake_ns;
/* statistics */
next->exec_start = ktime_get();
// printk("at %llu\n", next->exec_start);
// if (next)
// printk("real next %s %llu %llu\n", next->name, next->exec_start, slot_ns);
/* kthread_unlock(); */
// printk("wake %llu\n", slot_ns);
/* subtract readout overhead */
tick_set_next_ns(ktime_sub(slot_ns, 1000LL));
// tick_set_next_ns(slot_ns);
#if 0
if (slot_ns < 20000UL) {
printk("wake %llu\n", slot_ns);
BUG();
}
#endif
prepare_arch_switch(1);
switch_to(next);
......
......@@ -148,7 +148,7 @@ static inline void schedule_edf_reinit_task(struct task_struct *tsk, ktime now)
tsk->state = TASK_IDLE;
tsk->wakeup = ktime_add(tsk->wakeup, tsk->attr.period);
#if 0
#if 1
if (ktime_after(now, tsk->wakeup))
printk("%s delta %lld\n",tsk->name, ktime_us_delta(tsk->wakeup, now));
......@@ -583,7 +583,7 @@ static int edf_schedulable(struct task_queue *tq, const struct task_struct *task
static int64_t dmin = 0x7fffffffffffffLL;
printk("\nvvvv EDF analysis vvvv (%lld) \n\n", p);
printk("\nvvvv EDF analysis vvvv (%lld ms) \n\n", ktime_to_ms(p));
/* list_empty(....) */
......@@ -615,12 +615,42 @@ static int edf_schedulable(struct task_queue *tq, const struct task_struct *task
printk("max UH: %lld, UT: %lld\n", ktime_to_ms(uh), ktime_to_ms(ut));
list_for_each_entry_safe(tsk, tmp, &tq->new, node) {
ktime sh, st;
if (tsk == t0)
/* add all in wakeup */
struct task_struct *tsk2 = NULL;
struct task_struct *tmp2;
if (!list_empty(&tq->wake)) {
list_for_each_entry_safe(tsk2, tmp2, &tq->wake, node) {
printk("%d\n", __LINE__);
if (tsk2->attr.policy != SCHED_EDF)
continue;
u += (double) (int32_t) tsk2->attr.wcet / (double) (int32_t) tsk2->attr.period;
}
}
/* add all running */
if (!list_empty(&tq->run)) {
list_for_each_entry_safe(tsk2, tmp2, &tq->run, node) {
printk("%d\n", __LINE__);
if (tsk2->attr.policy != SCHED_EDF)
continue;
u += (double) (int32_t) tsk2->attr.wcet / (double) (int32_t) tsk2->attr.period;
}
}
//list_for_each_entry_safe(tsk, tmp, &tq->new, node)
{
tsk = t0;
ktime sh, st;
// if (tsk == t0)
// continue;
if (tsk->attr.deadline_rel < t0->attr.deadline_rel) {
......@@ -659,16 +689,11 @@ static int edf_schedulable(struct task_queue *tq, const struct task_struct *task
u += (double) (int32_t) task->attr.wcet / (double) (int32_t) task->attr.period;
list_for_each_entry_safe(tsk, tmp, &tq->new, node) {
if (tsk->attr.policy != SCHED_EDF)
continue;
u += (double) (int32_t) tsk->attr.wcet / (double) (int32_t) tsk->attr.period;
}
if (u > 1.0) {
printk("I am NOT schedul-ableh: %g ", u);
printk("I am NOT schedul-ableh: %f ", u);
return -EINVAL;
printk("changed task mode to RR\n", u);
} else {
......@@ -693,7 +718,7 @@ static int64_t slot;
static struct task_struct *edf_pick_next(struct task_queue *tq)
{
#define SOME_DEFAULT_TICK_PERIOD_FOR_SCHED_MODE 100000LL
#define SOME_DEFAULT_TICK_PERIOD_FOR_SCHED_MODE 111111LL
int64_t delta;
......@@ -701,16 +726,18 @@ static struct task_struct *edf_pick_next(struct task_queue *tq)
struct task_struct *tsk;
struct task_struct *tmp;
ktime now = ktime_get();
slot = SOME_DEFAULT_TICK_PERIOD_FOR_SCHED_MODE;
slot = 1000000000000; //SOME_DEFAULT_TICK_PERIOD_FOR_SCHED_MODE;
// printk("-\n");
list_for_each_entry_safe(tsk, tmp, &tq->run, node) {
/* time to wake up yet? */
delta = ktime_delta(tsk->wakeup, now);
if (delta >= 0) {
/* nope, just update minimum runtime for this slot */
if (delta < slot) {
slot = delta;
// printk("d %lld now: %lld \n", ktime_to_us(delta), now);
......@@ -736,10 +763,14 @@ slot = SOME_DEFAULT_TICK_PERIOD_FOR_SCHED_MODE;
if (delta < 0) {
delta = tsk->attr.wcet;
slot = delta;
// printk("NOW!\n");
}
#endif
if (delta < slot)
if (delta < slot) {
// printk("HERE!\n");
slot = delta;
}
if (delta < 0)
printk("delta %lld %lld\n", ktime_to_us(delta), ktime_to_us(tick_get_period_min_ns()));
BUG_ON(delta < 0);
......@@ -748,6 +779,7 @@ slot = SOME_DEFAULT_TICK_PERIOD_FOR_SCHED_MODE;
}
/* move to top */
go = tsk;
list_move(&tsk->node, &tq->run);
continue;
}
......@@ -758,38 +790,57 @@ slot = SOME_DEFAULT_TICK_PERIOD_FOR_SCHED_MODE;
/* move to top */
list_move(&tsk->node, &tq->run);
go = tsk;
break;
}
}
#if 0
/** XXX **/
//tsk = list_entry(tq->run.next, struct task_struct, node);
tsk = list_entry(tq->run.next, struct task_struct, node);
if (tsk->state == TASK_RUN)
return tsk;
else
#endif
return go;
}
static void edf_wake_next(struct task_queue *tq)
{
struct task_struct *tsk;
struct task_struct *tmp;
struct task_struct *task;
ktime last=0;
if (list_empty(&tq->wake))
return;
task = list_entry(tq->wake.next, struct task_struct, node);
last = ktime_get();
list_for_each_entry_safe(tsk, tmp, &tq->run, node) {
if (tsk->deadline > last)
last = tsk->deadline;
}
// if (next->attr.policy == SCHED_EDF)
// return;
/* initially set current time as wakeup */
task->wakeup = ktime_add(ktime_get(), task->attr.period);
//
/* initially furthest deadline as wakeup */
task->wakeup = ktime_add(last, task->attr.period);
/* add overhead */
task->wakeup = ktime_add(task->wakeup, 2000UL);
task->deadline = ktime_add(task->wakeup, task->attr.deadline_rel);
task->first_wake = task->wakeup;
task->first_dead = task->deadline;
// printk("---- %s %llu\n", task->name, task->first_wake);
list_move(&task->node, &tq->run);
}
......@@ -812,20 +863,23 @@ static void edf_enqueue(struct task_queue *tq, struct task_struct *task)
printk("---- NOT SCHEDUL-ABLE---\n");
return;
}
#if 0
/** XXX **/
if (task->state == TASK_RUN)
list_add_tail(&task->node, &tq->run);
list_move(&task->node, &tq->run);
else
list_add_tail(&task->node, &tq->wake);
#endif
#if 1
list_move(&task->node, &tq->wake);
#endif
}
static ktime edf_timeslice_ns(struct task_struct *task)
{
return 0;
return (ktime) (task->runtime);
}
static int edf_check_sched_attr(struct sched_attr *attr)
......
......@@ -30,6 +30,10 @@ static struct task_struct *rr_pick_next(struct task_queue *tq)
* round robin
*/
list_move_tail(&next->node, &tq->run);
/* reset runtime */
next->runtime = (next->attr.priority * tick_get_period_min_ns());
break;
}
......@@ -68,6 +72,7 @@ static void rr_wake_next(struct task_queue *tq)
static void rr_enqueue(struct task_queue *tq, struct task_struct *task)
{
task->runtime = (task->attr.priority * tick_get_period_min_ns());
/** XXX **/
if (task->state == TASK_RUN)
list_add_tail(&task->node, &tq->run);
......@@ -89,7 +94,7 @@ static void rr_enqueue(struct task_queue *tq, struct task_struct *task)
static ktime rr_timeslice_ns(struct task_struct *task)
{
return (ktime) (task->attr.priority * tick_get_period_min_ns() * 1000);
return (ktime) (task->attr.priority * tick_get_period_min_ns());
}
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment