diff --git a/include/asm-generic/thread.h b/include/asm-generic/thread.h index 02b3ac8592f0c9516b73e65c905f63c8fe2a689e..11b61f11652732b9dda09712b75d980a09829d1b 100644 --- a/include/asm-generic/thread.h +++ b/include/asm-generic/thread.h @@ -19,6 +19,8 @@ #include <asm/thread.h> +struct task_struct; + void arch_init_task(struct task_struct *task, int (*thread_fn)(void *data), void *data); diff --git a/include/kernel/time.h b/include/kernel/time.h index cb74b543269990e5e319d5c2b2be2b2e23ef5eab..5ffde6a5f18cfc6c74efc29968f0d8e4834eacc1 100644 --- a/include/kernel/time.h +++ b/include/kernel/time.h @@ -25,7 +25,7 @@ #include <kernel/clocksource.h> - +#if (INTPTR_MAX == INT32_MAX) /* we use the compiler-defined struct timespec at this time, but we can * at least verify the size of the types to see if we are compatible */ @@ -33,7 +33,7 @@ compile_time_assert((member_size(struct timespec, tv_sec) == sizeof(int32_t)), TIMESPEC_SEC_SIZE_MISMATCH); compile_time_assert((member_size(struct timespec, tv_nsec) == sizeof(int32_t)), TIMESPEC_NSEC_SIZE_MISMATCH); - +#endif /* (INTPTR_MAX == INT32_MAX) */ #define MSEC_PER_SEC 1000L #define USEC_PER_MSEC 1000L diff --git a/init/main.c b/init/main.c index 20288b960707dafe14550c229cbf135803d8f0e9..1bd509f45f6912dfeaf3f9dc31a2fcff1dc489c1 100644 --- a/init/main.c +++ b/init/main.c @@ -104,8 +104,8 @@ int task0(void *data) a = xa; b = xb; c = xc; - printk("%d %d %d\n", a, b, c); -// sched_yield(); + printk("%d %d %d %llu\n", a, b, c, ktime_get()); + sched_yield(); } } diff --git a/kernel/sched/core.c b/kernel/sched/core.c index d1e47538b769dc531773368ae11b56fbb60c669e..90855ea226f106e708c7b497ff740c505b9bd476 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -60,6 +60,7 @@ void schedule(void) current->runtime = ktime_sub(current->exec_start, ktime_get()); +retry: /* XXX: for now, try to wake up any threads not running * this is a waste of cycles and instruction space; should be * done in the scheduler's code (somewhere) */ @@ -152,6 +153,7 @@ void schedule(void) tick_set_next_ns(ktime_sub(slot_ns, 2000LL)); #if 1 if (slot_ns < 20000UL) { + goto retry; printk("wake %llu slot %llu %s\n", wake_ns, slot_ns, next->name); BUG(); } diff --git a/kernel/sched/edf.c b/kernel/sched/edf.c index fe5f1959d39a99e95bf9045fda33e6c62124ba7c..e04c07ffed7721c94fd624dea863133a90a5e659 100644 --- a/kernel/sched/edf.c +++ b/kernel/sched/edf.c @@ -8,15 +8,7 @@ #include <kernel/kmem.h> #include <kernel/err.h> #include <kernel/printk.h> - -#include <asm-generic/irqflags.h> -#include <asm-generic/spinlock.h> - - -#include <asm/switch_to.h> - #include <kernel/string.h> - #include <kernel/tick.h> @@ -37,9 +29,9 @@ void sched_print_edf_list_internal(struct task_queue *tq, ktime now) ktime prev = 0; ktime prevd = 0; - printk("\nt: %lld\n", ktime_to_us(now)); - printk("S\tDeadline\tWakeup\tdelta W\tdelta P\tt_rem\ttotal\tslices\tName\t\tfirstwake, firstdead, execstart\n"); - printk("----------------------------------------------\n"); + printk("\nktime: %lld\n", ktime_to_us(now)); + printk("S\tDeadline\tWakeup\tdelta W\tdelta P\tt_rem\ttotal\tslices\tName\t|\tdeltaw\tdeltad\twake0\tdead0\texecstart\n"); + printk("---------------------------------------------------------------------------------------------------------------------------------\n"); list_for_each_entry_safe(tsk, tmp, &tq->run, node) { if (tsk->attr.policy == SCHED_RR) @@ -55,16 +47,20 @@ void sched_print_edf_list_internal(struct task_queue *tq, ktime now) if (tsk->state == TASK_RUN) state = 'R'; - printk("%c\t%lld\t\t%lld\t%lld\t%lld\t%lld\t%lld\t%d\t%s %lld | %lld %lld %lld %lld\n", + printk("%c\t%lld\t\t%lld\t%lld\t%lld\t%lld\t%lld\t%d\t%s\t|\t%lld\t%lld\t%lld\t%lld\t%lld\n", state, ktime_to_us(tsk->deadline), ktime_to_us(tsk->wakeup), ktime_to_us(rel_wait), ktime_to_us(rel_deadline), ktime_to_us(tsk->runtime), ktime_to_us(tsk->total), - tsk->slices, tsk->name, ktime_us_delta(prev, tsk->wakeup), ktime_us_delta(prevd, tsk->deadline), + tsk->slices, tsk->name, + ktime_us_delta(prev, tsk->wakeup), + ktime_us_delta(prevd, tsk->deadline), ktime_to_us(tsk->first_wake), ktime_to_us(tsk->first_dead), ktime_to_us(tsk->exec_start)); prev = tsk->wakeup; prevd = tsk->deadline; } + + printk("\n\n"); } ktime total; ktime times; @@ -117,9 +113,9 @@ static inline bool schedule_edf_can_execute(struct task_struct *tsk, ktime now) if (tsk->runtime <= 0) return false; if (ktime_before(tsk->deadline, now)) { - printk("%s violated, %lld %lld, dead %lld wake %lld now %lld\n", tsk->name, + printk("%s violated, %lld %lld, dead %lld wake %lld now %lld start %lld\n", tsk->name, tsk->runtime, ktime_us_delta(tsk->deadline, now), - tsk->deadline, tsk->wakeup, now); + tsk->deadline, tsk->wakeup, now, tsk->exec_start); // sched_print_edf_list_internal(now); BUG(); return false; @@ -149,8 +145,14 @@ static inline void schedule_edf_reinit_task(struct task_struct *tsk, ktime now) tsk->wakeup = ktime_add(tsk->wakeup, tsk->attr.period); #if 1 - if (ktime_after(now, tsk->wakeup)) + if (ktime_after(now, tsk->wakeup)) { printk("%s delta %lld\n",tsk->name, ktime_us_delta(tsk->wakeup, now)); + printk("%s violated, %lld %lld, dead %lld wake %lld now %lld start %lld\n", tsk->name, + tsk->runtime, ktime_us_delta(tsk->deadline, now), + tsk->deadline, tsk->wakeup, now, tsk->exec_start); + } + + BUG_ON(ktime_after(now, tsk->wakeup)); /* deadline missed earlier? */ #endif @@ -604,6 +606,34 @@ static ktime edf_hyperperiod(struct task_queue *tq) * @brief EDF schedulability test * * @returns 0 if schedulable, <0 otherwise + * + * + * * 1) determine task with longest period + * + * T1: (P=50, D=20, R=10) + * + * 2) calculate unused head and tail (before and after deadline) + * + * UH = D1 - R1 (= 20) (Hyperperiod) + * UT = P1 - D1 (= 60) + * + * 3) loop over other tasks (Period < Deadline of Task 1) + * + * calculate slots usage before deadline of Task 1: + * + * H * Ri * D1 / Pi (T2: 10, T5: 10) + * + * update head slots UH = UH - 20 = 0 -> all used + * + * + * calculate slot usage after deadline of Task2: + * + * H * Ri * F1 / Pi (T2: 15, T5: 15) + * + * update tail slots: UT = UT - 30 = 30 + * + * -> need hyperperiod factor H = 2 + * */ static int edf_schedulable(struct task_queue *tq, const struct task_struct *task) @@ -616,6 +646,7 @@ static int edf_schedulable(struct task_queue *tq, const struct task_struct *task ktime uh, ut, f1; + ktime sh = 0, st = 0; struct task_struct *t0 = NULL; struct task_struct *tsk = NULL; struct task_struct *tmp; @@ -633,13 +664,28 @@ static int edf_schedulable(struct task_queue *tq, const struct task_struct *task printk("appears to be empty\n"); list_for_each_entry_safe(tsk, tmp, &tq->new, node) { -// printk("%s\n", tsk->name); if (tsk->attr.period > max) { t0 = tsk; max = tsk->attr.period; } } + list_for_each_entry_safe(tsk, tmp, &tq->wake, node) { + if (tsk->attr.period > max) { + t0 = tsk; + max = tsk->attr.period; + } + } + + list_for_each_entry_safe(tsk, tmp, &tq->run, node) { + if (tsk->attr.period > max) { + t0 = tsk; + max = tsk->attr.period; + } + } + + + BUG_ON(!t0); BUG_ON(p < t0->attr.period); @@ -654,35 +700,102 @@ static int edf_schedulable(struct task_queue *tq, const struct task_struct *task f1 = ut/h; -// printk("max UH: %lld, UT: %lld\n", ktime_to_us(uh), ktime_to_us(ut)); + printk("max UH: %lld, UT: %lld\n", ktime_to_us(uh), ktime_to_us(ut)); + + /* subtract longest period thread from head, its slices must always + * be used before the deadline + */ + sh = h * t0->attr.wcet * t0->attr.deadline_rel / t0->attr.period; + uh = uh - sh; + printk("%s UH: %lld, UT: %lld\n", t0->name, ktime_to_us(uh), ktime_to_us(ut)); + printk("%s SH: %lld, ST: %lld\n", t0->name, ktime_to_us(sh), ktime_to_us(st)); /* add all in wakeup */ - struct task_struct *tsk2 = NULL; struct task_struct *tmp2; if (!list_empty(&tq->wake)) { - list_for_each_entry_safe(tsk2, tmp2, &tq->wake, node) { + list_for_each_entry_safe(tsk, tmp, &tq->wake, node) { - if (tsk2->attr.policy != SCHED_EDF) + if (tsk == t0) + continue; + + if (tsk->attr.policy != SCHED_EDF) continue; - u += (double) (int32_t) tsk2->attr.wcet / (double) (int32_t) tsk2->attr.period; + u += (double) (int32_t) tsk->attr.wcet / (double) (int32_t) tsk->attr.period; + + + if (tsk->attr.deadline_rel < t0->attr.deadline_rel) { + + /* slots before deadline of T0 */ + sh = h * tsk->attr.wcet * t0->attr.deadline_rel / tsk->attr.period; + + if (sh > uh) { + printk("NOT SCHEDULABLE in head: %s\n", tsk->name); + BUG(); + } + uh = uh - sh; + + } + + /* slots after deadline of T0 */ + st = h * tsk->attr.wcet * f1 / tsk->attr.period; +// printk("%s tail usage: %lld\n", tsk->name, ktime_to_ms(st)); + if (st > ut) { + printk("NOT SCHEDULABLE in tail: %s\n", tsk->name); + BUG(); + } + ut = ut - st; + + + printk("%s UH: %lld, UT: %lld\n", tsk->name, ktime_to_us(uh), ktime_to_us(ut)); + printk("%s SH: %lld, ST: %lld\n", tsk->name, ktime_to_us(sh), ktime_to_us(st)); } } /* add all running */ if (!list_empty(&tq->run)) { - list_for_each_entry_safe(tsk2, tmp2, &tq->run, node) { + list_for_each_entry_safe(tsk, tmp, &tq->run, node) { - if (tsk2->attr.policy != SCHED_EDF) + if (tsk == t0) continue; - u += (double) (int32_t) tsk2->attr.wcet / (double) (int32_t) tsk2->attr.period; + if (tsk->attr.policy != SCHED_EDF) + continue; + + u += (double) (int32_t) tsk->attr.wcet / (double) (int32_t) tsk->attr.period; + + + if (tsk->attr.deadline_rel < t0->attr.deadline_rel) { + + /* slots before deadline of T0 */ + sh = h * tsk->attr.wcet * t0->attr.deadline_rel / tsk->attr.period; + + if (sh > uh) { + printk("NOT SCHEDULABLE in head: %s\n", tsk->name); + BUG(); + } + uh = uh - sh; + + } + + /* slots after deadline of T0 */ + st = h * tsk->attr.wcet * f1 / tsk->attr.period; +// printk("%s tail usage: %lld\n", tsk->name, ktime_to_ms(st)); + if (st > ut) { + printk("NOT SCHEDULABLE in tail: %s\n", tsk->name); + BUG(); + } + ut = ut - st; + + + printk("UH: %lld, UT: %lld\n", ktime_to_us(uh), ktime_to_us(ut)); + printk("SH: %lld, ST: %lld\n", ktime_to_us(sh), ktime_to_us(st)); } } - +#if 0 //list_for_each_entry_safe(tsk, tmp, &tq->new, node) { tsk = t0; @@ -715,7 +828,8 @@ static int edf_schedulable(struct task_queue *tq, const struct task_struct *task ut = ut - st; -// printk("UH: %lld, UT: %lld\n", ktime_to_ms(uh), ktime_to_ms(ut)); + printk("UH: %lld, UT: %lld\n", ktime_to_us(uh), ktime_to_us(ut)); + printk("SH: %lld, ST: %lld\n", ktime_to_us(sh), ktime_to_us(st)); @@ -727,7 +841,7 @@ static int edf_schedulable(struct task_queue *tq, const struct task_struct *task u += (double) (int32_t) task->attr.wcet / (double) (int32_t) task->attr.period; - +#endif @@ -737,10 +851,10 @@ static int edf_schedulable(struct task_queue *tq, const struct task_struct *task return -EINVAL; printk("changed task mode to RR\n", u); } else { - // printk("Utilisation: %g\n", u); + printk("Utilisation: %g\n", u); return 0; } - } +// } u = (double) (int32_t) task->attr.wcet / (double) (int32_t) task->attr.period; @@ -772,10 +886,11 @@ static struct task_struct *edf_pick_next(struct task_queue *tq) list_for_each_entry_safe(tsk, tmp, &tq->run, node) { +// printk("checking %s\n", tsk->name); /* time to wake up yet? */ delta = ktime_delta(tsk->wakeup, now); - if (delta >= 20000) { + if (delta > 0) { /* nope, just update minimum runtime for this slot */ @@ -783,7 +898,7 @@ static struct task_struct *edf_pick_next(struct task_queue *tq) slot = delta; // printk("d %lld now: %lld \n", ktime_to_us(delta), now); } -// printk("delta %llu %llu\n", delta, tsk->wakeup); + // printk("delta %llu %llu\n", delta, tsk->wakeup); continue; } @@ -798,6 +913,7 @@ static struct task_struct *edf_pick_next(struct task_queue *tq) if (!schedule_edf_can_execute(tsk, now)) { schedule_edf_reinit_task(tsk, now); + // printk("reinit %s\n", tsk->name); /* nope, update minimum runtime for this slot */ delta = ktime_delta(tsk->wakeup, now); @@ -813,15 +929,20 @@ static struct task_struct *edf_pick_next(struct task_queue *tq) printk("delta %lld %lld\n", ktime_to_us(delta), ktime_to_us(tick_get_period_min_ns())); BUG_ON(delta < 0); - continue; + // continue; } + + // if (tsk->runtime < tsk->attr.wcet) + // printk("VVV %s %lld %lld\n", tsk->name, tsk->runtime, tsk->attr.wcet); + /* if our deadline is earlier than the deadline at the * head of the list, move us to top */ first = list_first_entry(&tq->run, struct task_struct, node); if (ktime_before (tsk->deadline, first->deadline)) { + tsk->state = TASK_RUN; // go = tsk; list_move(&tsk->node, &tq->run); // printk("1 to top! %s\n", tsk->name); @@ -836,6 +957,7 @@ static struct task_struct *edf_pick_next(struct task_queue *tq) tsk->state = TASK_RUN; /* move to top */ + // printk("%s now in state RUN\n", tsk->name); /* if our deadline is earlier than the deadline at the @@ -846,7 +968,7 @@ static struct task_struct *edf_pick_next(struct task_queue *tq) if (ktime_before (tsk->deadline, first->deadline)) { // go = tsk; list_move(&tsk->node, &tq->run); - // printk("2 to top! %s\n", tsk->name); + // printk("%s has earlier deadline, moved to top\n", tsk->name); } // printk("2 nope %s\n", tsk->name); @@ -857,9 +979,11 @@ static struct task_struct *edf_pick_next(struct task_queue *tq) first = list_first_entry(&tq->run, struct task_struct, node); delta = ktime_delta(first->wakeup, now); - if (delta <= 0) - if (first->state == TASK_RUN) +// if (delta <= 0) + if (first->state == TASK_RUN) { go = first; + slot = first->runtime; + } #if 0 list_for_each_entry_safe(tsk, tmp, &tq->run, node) { @@ -871,10 +995,6 @@ static struct task_struct *edf_pick_next(struct task_queue *tq) ); } #endif - - if (slot < 20000) - printk("BUG %lld\n", slot); - // if (!go) // printk("NULL\n"); // printk("in %llu\n", ktime_to_ms(slot)); @@ -922,15 +1042,18 @@ static void edf_wake_next(struct task_queue *tq) task->wakeup = ktime_add(last, task->attr.period); /* add overhead */ // task->wakeup = ktime_add(task->wakeup, 50000UL); +#if 0 task->wakeup = ktime_add(task->wakeup, per); +#endif task->deadline = ktime_add(task->wakeup, task->attr.deadline_rel); task->first_wake = task->wakeup; task->first_dead = task->deadline; + task->state = TASK_IDLE; // printk("---- %s %llu\n", task->name, task->first_wake); - list_move(&task->node, &tq->run); + list_move_tail(&task->node, &tq->run); } @@ -960,7 +1083,7 @@ static void edf_enqueue(struct task_queue *tq, struct task_struct *task) #endif #if 1 - list_move(&task->node, &tq->wake); + list_move_tail(&task->node, &tq->wake); #endif } @@ -1015,6 +1138,8 @@ error: return -EINVAL; } +/* called after pick_next() */ + ktime edf_task_ready_ns(struct task_queue *tq) { @@ -1024,15 +1149,29 @@ ktime edf_task_ready_ns(struct task_queue *tq) struct task_struct *tsk; struct task_struct *tmp; ktime now = ktime_get(); - ktime wake; + ktime wake = 123456789123LL; - wake = ktime_add(now, slot); + + +// wake = ktime_add(now, slot); list_for_each_entry_safe(tsk, tmp, &tq->run, node) { +#if 0 + if (tsk->state == TASK_IDLE) + continue; +#endif + delta = ktime_delta(now, tsk->wakeup); + if (delta <= 0) + continue; + + if (wake > delta) + wake = delta; +#if 0 /* all currently runnable task are at the top of the list */ if (tsk->state != TASK_RUN) break; - +#endif +#if 0 if (ktime_before(wake, tsk->wakeup)) continue; @@ -1054,13 +1193,22 @@ ktime edf_task_ready_ns(struct task_queue *tq) // list_move(&tsk->node, &tq->run); BUG_ON(slot <= 0); } +#endif } /* subtract call overhead */ +// slot = wake; //slot = ktime_sub(slot, 10000ULL); //slot = ktime_sub(slot, 2000ULL); + // + if (slot > wake) { + printk("\nvvvvvvvvvvvvvvv\n"); + printk("Slice adjusted from %lld to %lld (%lld)\n", ktime_to_us(slot), ktime_to_us(wake), ktime_to_us(wake - slot)); + printk("\n^^^^^^^^^^^^^^^\n"); + slot = wake; - BUG_ON(slot < 0); + } + BUG_ON(slot <= 0); return slot; } diff --git a/tools/testing/unittest/Makefile b/tools/testing/unittest/Makefile index 04ee6565e0f7469f33a8f7a4d2541df5e52418c1..484f904a9e2812f70ab3c5a603063af8c1deaba0 100644 --- a/tools/testing/unittest/Makefile +++ b/tools/testing/unittest/Makefile @@ -1,4 +1,4 @@ -TARGETS += sysctl +TARGETS += edf sysctl #Please keep the TARGETS list alphabetically sorted diff --git a/tools/testing/unittest/edf/Makefile b/tools/testing/unittest/edf/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..5fc31a5a9115392ce8961f8e3811b790218219cb --- /dev/null +++ b/tools/testing/unittest/edf/Makefile @@ -0,0 +1,34 @@ +CPPFLAGS += -DCONFIG_KERNEL_PRINTK + +CFLAGS += -g +CFLAGS += -I. +CFLAGS += -I../ +CFLAGS += -I../shared +CFLAGS += -I../../../../include/ +CFLAGS += -I../../../../include/kernel +CFLAGS += -I../../../../kernel + +LDFLAGS += -Wl,--wrap=kmalloc +LDFLAGS += -Wl,--wrap=ktime_get + +TEST_PROGS := edf_test + + +$(TEST_PROGS): edf_test.o \ + ../shared/kmalloc_test_wrapper.o \ + ../shared/wrap_ktime_get.o \ + ../../../../kernel/time.o \ + ../../../../kernel/printk.o + + +all: $(TEST_PROGS) + + +include ../lib.mk + +clean: + $(RM) $(TEST_PROGS) edf_test.o \ + ../shared/kmalloc_test_wrapper.o \ + ../shared/wrap_ktime_get.o \ + ../../../../kernel/time.o \ + ../../../../kernel/printk.o diff --git a/tools/testing/unittest/edf/asm/thread.h b/tools/testing/unittest/edf/asm/thread.h new file mode 100644 index 0000000000000000000000000000000000000000..d5d53f6bb76493df6767f9e9671ecbe51ec22539 --- /dev/null +++ b/tools/testing/unittest/edf/asm/thread.h @@ -0,0 +1,7 @@ +#ifndef _ASM_THREAD_H_ +#define _ASM_THREAD_H_ + +struct thread_info { +}; + +#endif /* _ASM_THREAD_H_ */ diff --git a/tools/testing/unittest/edf/edf_test.c b/tools/testing/unittest/edf/edf_test.c new file mode 100644 index 0000000000000000000000000000000000000000..2df831c7003bb7d6e7241a7ac257a80d5affdca9 --- /dev/null +++ b/tools/testing/unittest/edf/edf_test.c @@ -0,0 +1,221 @@ +#define _GNU_SOURCE + +#include <stdio.h> +#include <errno.h> +#include <string.h> + +#include <kselftest.h> + +#include <shared.h> +#include <kmalloc_test_wrapper.h> +#include <wrap_ktime_get.h> + +#include <kernel/kthread.h> + +#ifdef printk +#undef printk +#define printk(fmt, ...) printk(fmt, ...) +#endif + + +/* include header + src file for static function testing */ +//#include <kernel/sysctl.h> +#include <sched/edf.c> + + +static ktime kernel_time; +static unsigned long tick_period_min_ns = 100000UL; + +/* needed dummy functions */ +unsigned long tick_get_period_min_ns(void) +{ + return tick_period_min_ns; +} + +int sched_register(struct scheduler *sched) +{ +} + + +/* tests */ + + +/* + * @test sched_edf_init_test + */ + +static void sched_edf_init_test(void) +{ + KSFT_ASSERT(sched_edf_init() == 0); +} + + +/* + * @test sched_edf_create_tasks_test + */ + +static void sched_edf_create_tasks_test(void) +{ + struct task_struct *t; + struct sched_attr attr; + + + /* create task 1 */ + t = kmalloc(sizeof(struct task_struct)); + KSFT_ASSERT_PTR_NOT_NULL(t); + + t->name = kmalloc(32); + KSFT_ASSERT_PTR_NOT_NULL(t->name); + + snprintf(t->name, 32, "task_1"); + + t->sched = &sched_edf; + t->attr.policy = SCHED_EDF; + t->attr.period = us_to_ktime(1000); + t->attr.deadline_rel = us_to_ktime(900); + t->attr.wcet = us_to_ktime(300); + edf_enqueue(&t->sched->tq, t); + + + /* create task 2 */ + t = kmalloc(sizeof(struct task_struct)); + KSFT_ASSERT_PTR_NOT_NULL(t); + + t->name = kmalloc(32); + KSFT_ASSERT_PTR_NOT_NULL(t->name); + + snprintf(t->name, 32, "task_2"); + + t->sched = &sched_edf; + t->attr.policy = SCHED_EDF; + t->attr.period = us_to_ktime(800); + t->attr.deadline_rel = us_to_ktime(700); + t->attr.wcet = us_to_ktime(200); + edf_enqueue(&t->sched->tq, t); + + + /* create task 3 */ + t = kmalloc(sizeof(struct task_struct)); + KSFT_ASSERT_PTR_NOT_NULL(t); + + t->name = kmalloc(32); + KSFT_ASSERT_PTR_NOT_NULL(t->name); + + snprintf(t->name, 32, "task_3"); + + t->sched = &sched_edf; + t->attr.policy = SCHED_EDF; + t->attr.period = us_to_ktime(300); + t->attr.deadline_rel = us_to_ktime(200); + t->attr.wcet = us_to_ktime(100); + edf_enqueue(&t->sched->tq, t); + + + /* create task 4 */ + t = kmalloc(sizeof(struct task_struct)); + KSFT_ASSERT_PTR_NOT_NULL(t); + + t->name = kmalloc(32); + KSFT_ASSERT_PTR_NOT_NULL(t->name); + + snprintf(t->name, 32, "task_4"); + + t->sched = &sched_edf; + t->attr.policy = SCHED_EDF; + t->attr.period = us_to_ktime(2000); + t->attr.deadline_rel = us_to_ktime(900); + t->attr.wcet = us_to_ktime(100); + edf_enqueue(&t->sched->tq, t); +} + + +/* + * @test sched_edf_create_tasks_test + */ + +#define CYCLES 10 + +static void sched_edf_schedule_test(void) +{ + int i; + int64_t wake; + int64_t slice; + + struct task_struct *next = NULL; + struct task_struct *curr = NULL; + + + + for (i = 0; i < CYCLES; i++) { + curr = next; + + if (curr) { + printk("started: %lld now %lld\n", curr->exec_start, ktime_get()); + /* remove runtime of slice from curr */ + curr->runtime = ktime_sub(curr->runtime, ktime_sub(ktime_get(), curr->exec_start)); + } + + edf_wake_next(&sched_edf.tq); + + sched_print_edf_list_internal(&sched_edf.tq, ktime_get()); + + next = edf_pick_next(&sched_edf.tq); + sched_print_edf_list_internal(&sched_edf.tq, ktime_get()); + + if (next) { + slice = next->runtime; + printk("Next: %s slice %lld\n", next->name, ktime_to_us(slice)); + } else { + slice = 1000000000; /* retry in 1 second */ + printk("Next: NONE\n"); + } + + wake = edf_task_ready_ns(&sched_edf.tq); + printk("New task ready in %llu\n", ktime_to_us(wake)); + + if (wake < slice) { + printk("reducing slice from %lld to %lld (%lld)\n", ktime_to_us(slice), ktime_to_us(wake), ktime_to_us(wake - slice)); + slice = wake; + } + + + /* prepare run: save slice start time */ + if (next) + next->exec_start = ktime_get(); + + + + /* our timeslice has passed: assume we return in time */ + kernel_time += slice; + printk("\npretending slice of %lld\n", ktime_to_us(slice)); + ktime_wrap_set_time(kernel_time); + + } +} + + + +int main(int argc, char **argv) +{ + + printk("Testing EDF scheduler\n\n"); + + /* we need full control over ktime */ + ktime_get_wrapper(ENABLED); + + KSFT_RUN_TEST("sched_edf_init", + sched_edf_init_test); + + KSFT_RUN_TEST("creating tasks", + sched_edf_create_tasks_test) + + KSFT_RUN_TEST("emulating scheduling cycles", + sched_edf_schedule_test) + + + printk("\n\nEDF scheduler test complete:\n"); + + ksft_print_cnts(); + + return ksft_exit_pass(); +} diff --git a/tools/testing/unittest/shared/wrap_ktime_get.c b/tools/testing/unittest/shared/wrap_ktime_get.c new file mode 100644 index 0000000000000000000000000000000000000000..26bbbdbb6c22a4e8d560f3bd32ca63f7ae6fc884 --- /dev/null +++ b/tools/testing/unittest/shared/wrap_ktime_get.c @@ -0,0 +1,62 @@ +/** + * @file wrap_ktime_get.c + * @ingroup mockups + * @author Armin Luntzer (armin.luntzer@univie.ac.at), + * @date 2015 + * + * @copyright GPLv2 + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#include <shared.h> +#include <stdint.h> +#include <kernel/time.h> +#include <wrap_ktime_get.h> + + +static ktime kernel_time; + + +/** + * @brief sets the internal kernel time + */ + +void ktime_wrap_set_time(ktime time) +{ + kernel_time = time; +} + +/** + * @brief tracks the functional status of the wrapper + */ + +enum wrap_status ktime_get_wrapper(enum wrap_status ws) +{ + static enum wrap_status status = DISABLED; + + if (ws != QUERY) + status = ws; + + return status; +} + + +/** + * @brief a wrapper of ktime_get + */ + +ktime __wrap_ktime_get(void) +{ + if (ktime_get_wrapper(QUERY) == DISABLED) + return __real_ktime_get(); + + return kernel_time; +} diff --git a/tools/testing/unittest/shared/wrap_ktime_get.h b/tools/testing/unittest/shared/wrap_ktime_get.h new file mode 100644 index 0000000000000000000000000000000000000000..eb3b081909ae0eab3861cae614615c626595c01f --- /dev/null +++ b/tools/testing/unittest/shared/wrap_ktime_get.h @@ -0,0 +1,34 @@ +/** + * @file wrap_ktime_get.h + * @author Armin Luntzer (armin.luntzer@univie.ac.at), + * @date 2015 + * + * @copyright GPLv2 + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef WRAP_KTIME_GET +#define WRAP_KTIME_GET + +#include <shared.h> +#include <stdint.h> +#include <kernel/time.h> + +enum wrap_status ktime_get_wrapper(enum wrap_status ws); + +ktime __real_ktime_get(void); + +ktime __wrap_ktime_get(void); + +void ktime_wrap_set_time(ktime time); + + + +#endif