Skip to content
Snippets Groups Projects
Commit d0d927b5 authored by Armin Luntzer's avatar Armin Luntzer
Browse files

saving changes:

	* split EDF scheduler to separate file
	* some cleanup
parent 66fd145d
No related branches found
No related tags found
No related merge requests found
......@@ -37,15 +37,27 @@ extern struct thread_info *current_set[];
/**
* @brief this is a wrapper that actually executes the thread function
*/
#include <kernel/time.h>
static void th_starter(void)
{
struct task_struct *task = current_set[0]->task;
struct timespec ts;
double start;
double stop;
ts = get_uptime();
start = (double) ts.tv_sec + (double) ts.tv_nsec / 1e9;
task->thread_fn(task->data);
printk("thread: %p returned\n", task->thread_fn);
arch_local_irq_disable();
ts = get_uptime();
stop = (double) ts.tv_sec + (double) ts.tv_nsec / 1e9;
printk("thread: %p returned after %gs\n", task->stack, stop-start);
task->state = TASK_DEAD;
arch_local_irq_enable();
schedule();
......
......@@ -2,25 +2,27 @@
* @file arch/sparc/kernel/traps/data_access_exception_trap.S
* @brief this is a function that is called by a custom trap handler to handle
* an MMU or EDAC trap
*
* @todo this is BCC-specific
*/
#ifdef CONFIG_ARCH_CUSTOM_BOOT_CODE
#include <asm/ttable.h>
#else
/* XXX: BCC */
#define SAVE_ALL_HEAD \
sethi %hi(leonbare_trapsetup), %l4; \
jmpl %l4 + %lo(leonbare_trapsetup), %l6;
#define SAVE_ALL \
SAVE_ALL_HEAD \
nop;
/* All traps low-level code here must end with this macro. */
#define RESTORE_ALL b leonbare_trapreturn; clr %l6;
#endif
#define FW_REGS_SZ 0x90 /* 36*4 */
#define SF_REGS_SZ 0x60 /* 24*4 */
/* All traps low-level code here must end with this macro. */
#define RESTORE_ALL b leonbare_trapreturn; clr %l6;
......
......@@ -32,7 +32,7 @@ enum sched_policy {
SCHED_OTHER,
};
extern volatile int sched_edf;
struct task_struct {
struct thread_info thread_info;
......@@ -69,7 +69,11 @@ struct task_struct {
ktime deadline; /* deadline of current period */
ktime exec_start;
ktime total;
unsigned long slices;
ktime first_wake;
ktime first_dead;
/* Tasks may have a parent and any number of siblings or children.
* If the parent is killed or terminated, so are all siblings and
......@@ -96,6 +100,7 @@ void switch_to(struct task_struct *next);
void schedule(void);
void sched_yield(void);
void sched_print_edf_list_internal(ktime now);
void sched_print_edf_list(void);
void kthread_set_sched_edf(struct task_struct *task, unsigned long period_us,
......
......@@ -45,160 +45,28 @@
#endif /* GCC_VERSION */
#include <kernel/irq.h>
irqreturn_t dummy(unsigned int irq, void *userdata)
int task1(void *data)
{
// printk("IRQ!\n");
//schedule();
return 0;
}
/**
* @brief do something useless
*/
__attribute__((unused))
static void twiddle(void)
{
static int i;
const char cursor[] = {'/', '-', '\\', '|'};
printk("%c\b\b ", cursor[i]);
i = (i + 1) % ARRAY_SIZE(cursor);
}
#define TREADY 4
#if 0
static volatile int *console = (int *)0x80100100;
#else
static volatile int *console = (int *)0x80000100;
#endif
static int putchar(int c)
{
while (!(console[1] & TREADY));
console[0] = 0x0ff & c;
if (c == '\n') {
while (!(console[1] & TREADY));
console[0] = (int) '\r';
}
return c;
}
int edf1(void *data)
{
struct timespec ts;
static struct timespec t0;
int i;
while (1) {
ts = get_ktime();
#if 0
printk("\tedf1: %g s; delta: %g s (%g Hz)\n",
(double) ts.tv_sec + (double) ts.tv_nsec / 1e9,
difftime(ts, t0), 1.0/difftime(ts, t0));
#endif
for (i = 0; i < 100000; i++)
putchar('.');
t0 = ts;
sched_yield();
printk(".");
// sched_yield();
}
}
int edf2(void *data)
{
while (1)
putchar( *((char *) data) );
}
int edf3(void *data)
{
while (1) {
putchar( *((char *) data) );
putchar( *((char *) data) );
sched_yield();
}
}
int edf4(void *data)
int task2(void *data)
{
while (1) {
//sched_print_edf_list();
putchar('-');
sched_yield();
printk("-");
// sched_yield();
}
}
int add0r(void *data)
{
volatile int *add = (int *) data;
while (1)
add[0]++;
}
int print0r(void *data)
{
int i;
int *s = (int *) data;
static int addx[30];
while (1) {
for (i = 0; i < 30; i++)
addx[i] = s[i];
printk("delta: ");
for (i = 0; i < 30; i++)
printk("%d ", s[i]- addx[i]);
printk("\nabs: ");
for (i = 0; i < 30; i++)
printk("%d ", s[i]);
printk("\n\n");
sched_yield();
}
}
extern struct task_struct *kernel;
int threadx(void *data)
{
char c = (char) (* (char *)data);
int b = 0;
while(1) {
//printk(".");
int i;
for (i = 0; i < (int) c; i++) {
putchar(c);
b++;
}
putchar('\n');
#if 1
if (b > (int) c * (int)c)
break;
#endif
// schedule();putchar( *((char *) data) );
//twiddle();
//cpu_relax();
}
return 0;
}
/**
* @brief kernel initialisation routines
......@@ -224,8 +92,8 @@ arch_initcall(kernel_init);
#include <kernel/clockevent.h>
int kernel_main(void)
{
struct task_struct *tasks[MAX_TASKS];
int tcnt = 0;
struct task_struct *t;
#if 0
void *addr;
struct elf_module m;
......@@ -280,165 +148,26 @@ int kernel_main(void)
/* elevate boot thread */
kernel = kthread_init_main();
#if 0
{
struct task_struct *t1;
t1 = kthread_create(edf1, NULL, KTHREAD_CPU_AFFINITY_NONE, "Thread2");
kthread_set_sched_edf(t1, 2e5, 1e5);
kthread_wake_up(t1);
}
#endif
#if 0
{
struct task_struct *t2;
struct task_struct *t3;
t2 = kthread_create(edf3, "\n", KTHREAD_CPU_AFFINITY_NONE, "EDF%d", 1);
kthread_set_sched_edf(t2, 1 * USEC_PER_SEC, 40 * USEC_PER_MSEC, 60 * USEC_PER_MSEC + 1);
kthread_wake_up(t2);
t2 = kthread_create(edf3, "?", KTHREAD_CPU_AFFINITY_NONE, "EDF%d", 1);
kthread_set_sched_edf(t2, 1 * USEC_PER_SEC, 40 * USEC_PER_MSEC, 60 * USEC_PER_MSEC + 1);
kthread_wake_up(t2);
t3 = kthread_create(edf4, NULL, KTHREAD_CPU_AFFINITY_NONE, "EDF_other");
kthread_set_sched_edf(t3, 200 * USEC_PER_MSEC, 10 * USEC_PER_MSEC, 10 * USEC_PER_MSEC + 1);
kthread_wake_up(t3);
t3 = kthread_create(edf2, "x", KTHREAD_CPU_AFFINITY_NONE, "EDF_otherx");
kthread_set_sched_edf(t3, 300 * USEC_PER_MSEC, 5 * USEC_PER_MSEC, 5 * USEC_PER_MSEC + 1);
kthread_wake_up(t3);
t3 = kthread_create(edf2, ":", KTHREAD_CPU_AFFINITY_NONE, "EDF_otherx");
kthread_set_sched_edf(t3, 50 * USEC_PER_MSEC, 5 * USEC_PER_MSEC, 5 * USEC_PER_MSEC + 1);
kthread_wake_up(t3);
t3 = kthread_create(edf2, "o", KTHREAD_CPU_AFFINITY_NONE, "EDF_otherx");
kthread_set_sched_edf(t3, 50 * USEC_PER_MSEC, 5 * USEC_PER_MSEC, 5 * USEC_PER_MSEC + 1);
kthread_wake_up(t3);
t3 = kthread_create(edf2, "/", KTHREAD_CPU_AFFINITY_NONE, "EDF_otherx");
kthread_set_sched_edf(t3, 30 * USEC_PER_MSEC, 3 * USEC_PER_MSEC, 3 * USEC_PER_MSEC + 1);
kthread_wake_up(t3);
t3 = kthread_create(edf2, "\\", KTHREAD_CPU_AFFINITY_NONE, "EDF_otherx");
kthread_set_sched_edf(t3, 6 * USEC_PER_MSEC, 2 * USEC_PER_MSEC, 2 * USEC_PER_MSEC + 1);
kthread_wake_up(t3);
}
#endif
#if 1
{
int i;
struct task_struct *t;
static int add[30];
t = kthread_create(print0r, add, KTHREAD_CPU_AFFINITY_NONE, "print");
kthread_set_sched_edf(t, 1e6, 300 * USEC_PER_MSEC, 300 * USEC_PER_MSEC + 1);
t = kthread_create(task1, NULL, KTHREAD_CPU_AFFINITY_NONE, "print");
//kthread_set_sched_edf(t, 1000000, 50000, 90000);
t->priority = 4;
kthread_wake_up(t);
#if 1
for (i = 0; i < 30; i++) {
t = kthread_create(add0r, &add[i], KTHREAD_CPU_AFFINITY_NONE, "EDF%d", i);
kthread_set_sched_edf(t, 45 * USEC_PER_MSEC, 1 * USEC_PER_MSEC, 1 * USEC_PER_MSEC + 1);
t = kthread_create(task2, NULL, KTHREAD_CPU_AFFINITY_NONE, "print1");
//kthread_set_sched_edf(t, 1000000, 50000, 90000);
t->priority = 8;
kthread_wake_up(t);
}
#endif
}
#endif
while(1) {
// putchar('o');
#if 0
static ktime t0;
ktime ts;
ts = ktime_get();
printk("now: %llu %llu delta %lld\n\n", ts, t0, ts-t0);
t0 = ts;
#endif
cpu_relax();
}
while(1) {
struct timespec ts;
static struct timespec t0;
ts = get_ktime();
//printk("now: %g s; delta: %g ns (%g Hz)\n", (double) ts.tv_sec + (double) ts.tv_nsec / 1e9, difftime(ts, t0), 1.0/difftime(ts, t0) );
t0 = ts;
cpu_relax();
}
{
static char zzz[] = {':', '/', '\\', '~', '|'};
int i;
for (i = 0; i < ARRAY_SIZE(zzz); i++)
kthread_create(threadx, &zzz[i], KTHREAD_CPU_AFFINITY_NONE, "Thread2");
}
{
static char zzz[] = {':', '/', '\\', '~', '|'};
static int z;
char *buf = NULL;
int i;
struct timespec ts;
ts = get_uptime();
printk("creating tasks at %d s %d ns (%g)\n", ts.tv_sec, ts.tv_nsec, (double) ts.tv_sec + (double) ts.tv_nsec / 1e9);
for (i = 0; i < MAX_TASKS; i++) {
// buf = kmalloc(30);
// BUG_ON(!buf);
// sprintf(buf, "Thread %d", z);
z++;
tasks[tcnt++] = kthread_create(threadx, &zzz[i], KTHREAD_CPU_AFFINITY_NONE, buf);
// kfree(buf);
}
}
{
int i;
struct timespec ts;
ts = get_uptime();
printk("total %d after %d s %d ns (%g)\n", tcnt, ts.tv_sec, ts.tv_nsec, (double) ts.tv_sec + (double) ts.tv_nsec / 1e9);
BUG_ON(tcnt > MAX_TASKS);
for (i = 0; i < tcnt; i++)
kthread_wake_up(tasks[i]);
arch_local_irq_disable();
ts = get_uptime();
printk("all awake after %d s %d ns (%g)\n", ts.tv_sec, ts.tv_nsec, (double) ts.tv_sec + (double) ts.tv_nsec / 1e9);
arch_local_irq_enable();
}
while(1) {
twiddle();
printk("|");
cpu_relax();
}
......
......@@ -27,12 +27,12 @@
static struct {
struct list_head new;
struct list_head run;
struct list_head idle;
struct list_head wake;
struct list_head dead;
} _kthreads = {
.new = LIST_HEAD_INIT(_kthreads.new),
.run = LIST_HEAD_INIT(_kthreads.run),
.idle = LIST_HEAD_INIT(_kthreads.idle),
.wake = LIST_HEAD_INIT(_kthreads.wake),
.dead = LIST_HEAD_INIT(_kthreads.dead)
};
......@@ -86,238 +86,62 @@ void kthread_cleanup_dead(void)
}
void sched_print_edf_list(void)
{
ktime now;
char state = 'U';
int64_t rel_deadline;
int64_t rel_wait;
struct task_struct *tsk;
struct task_struct *tmp;
now = ktime_get();
printk("\nt: %lld\n", ktime_to_us(now));
printk("S\tDeadline\tWakeup\tdelta W\tdelta P\tt_rem\tName\n");
printk("----------------------------------------------\n");
list_for_each_entry_safe(tsk, tmp, &_kthreads.run, node) {
if (tsk->policy != SCHED_EDF)
continue;
rel_deadline = ktime_delta(tsk->deadline, now);
rel_wait = ktime_delta(tsk->wakeup, now);
if (rel_wait < 0)
rel_wait = 0; /* running */
if (tsk->state == TASK_IDLE)
state = 'I';
if (tsk->state == TASK_RUN)
state = 'R';
printk("%c\t%lld\t%lld\t%lld\t%lld\t%lld\t%s\n",
state, ktime_to_us(tsk->deadline), ktime_to_us(tsk->wakeup),
ktime_to_us(rel_wait), ktime_to_us(rel_deadline), ktime_to_us(tsk->runtime), tsk->name);
}
}
/**
* Our EDF task scheduling timeline:
*
*
*
* wakeup/
* activation
* | absolute
* | deadline
* | start |
* | time | next
* | | | wakeup
* | | computation| | |
* | | time | | |
* | |############| | |
* +-----+-------------------+-----------------
* |------ WCET -------|
* ^- latest start time
* |--- relative deadline ---|
* |---------------- period ------------------|
*
*
*
*
*/
/**
* @brief check if an EDF task can still execute given its deadline
*
* @note effectively checks
* wcet remaining runtime in slot
* ------ < --------------------------
* period remaining time to deadline
*
* @returns true if can still execute before deadline
*/
static inline bool schedule_edf_can_execute(struct task_struct *tsk, ktime now)
{
int64_t rel_deadline;
if (tsk->runtime <= 0)
return false;
rel_deadline = ktime_delta(tsk->deadline, now);
if (rel_deadline <= 0)
return false;
if (tsk->wcet * rel_deadline < tsk->period * tsk->runtime)
return true;
return false;
}
static inline void schedule_edf_reinit_task(struct task_struct *tsk, ktime now)
{
tsk->state = TASK_IDLE;
tsk->wakeup = ktime_add(tsk->wakeup, tsk->period);
// BUG_ON(ktime_after(tsk->wakeup, now)); /* deadline missed earlier? */
tsk->deadline = ktime_add(tsk->wakeup, tsk->deadline_rel);
tsk->runtime = tsk->wcet;
}
#define SOME_DEFAULT_TICK_PERIOD_FOR_SCHED_MODE 3000000000UL
/* stupidly sort EDFs */
static int64_t schedule_edf(ktime now)
void sched_yield(void)
{
// ktime now;
int64_t delta;
int64_t slot = SOME_DEFAULT_TICK_PERIOD_FOR_SCHED_MODE;
struct task_struct *tsk;
struct task_struct *tmp;
ktime wake;
// now = ktime_get();
// printk("vvvv\n");
list_for_each_entry_safe(tsk, tmp, &_kthreads.run, node) {
if (tsk->policy != SCHED_EDF)
continue;
// printk("%s: %lld\n", tsk->name, ktime_to_us(tsk->wakeup));
/* time to wake up yet? */
if (ktime_after(tsk->wakeup, now)) {
/* nope, update minimum runtime for this slot */
delta = ktime_delta(tsk->wakeup, now);
if (delta > 0 && (delta < slot))
slot = delta;
continue;
}
/* if it's already running, see if there is time remaining */
if (tsk->state == TASK_RUN) {
if (ktime_after(tsk->wakeup, now)) {
printk("violated %s\n", tsk->name);
}
if (!schedule_edf_can_execute(tsk, now)) {
schedule_edf_reinit_task(tsk, now);
/* nope, update minimum runtime for this slot */
delta = ktime_delta(tsk->wakeup, now);
if (delta > 0 && (delta < slot))
slot = delta;
continue;
}
/* move to top */
list_move(&tsk->node, &_kthreads.run);
continue;
}
tsk = current_set[0]->task;
if (tsk->policy == SCHED_EDF)
tsk->runtime = 0;
/* time to wake up */
if (tsk->state == TASK_IDLE) {
tsk->state = TASK_RUN;
/* move to top */
list_move(&tsk->node, &_kthreads.run);
schedule();
}
}
// printk("---\n");
/* now find the closest relative deadline */
wake = ktime_add(now, slot);
list_for_each_entry_safe(tsk, tmp, &_kthreads.run, node) {
void sched_wake(struct task_struct *next, ktime now, int64_t slot_ns)
{
if (tsk->policy != SCHED_EDF)
break;
struct task_struct *task;
/* all currently runnable task are at the top of the list */
if (tsk->state != TASK_RUN)
break;
if (list_empty(&_kthreads.wake))
return;
if (ktime_before(wake, tsk->deadline))
continue;
task = list_entry(_kthreads.wake.next, struct task_struct, node);
delta = ktime_delta(wake, tsk->deadline);
if (task->policy == SCHED_EDF) {
if (next->policy == SCHED_EDF)
return;
/* initially set current time as wakeup */
task->wakeup = ktime_add(now, slot_ns);
task->deadline = ktime_add(task->wakeup, task->deadline_rel);
task->first_wake = task->wakeup;
task->first_dead = task->deadline;
if (delta < 0) {
delta = ktime_delta(now, tsk->deadline);
printk("\n [%lld] %s deadline violated by %lld us\n", ktime_to_ms(now), tsk->name, ktime_to_us(delta));
list_move(&task->node, &_kthreads.run);
}
if (delta < slot) {
if (delta)
slot = delta;
else
delta = tsk->runtime;
wake = ktime_add(now, slot); /* update next wakeup */
/* move to top */
list_move(&tsk->node, &_kthreads.run);
BUG_ON(slot <= 0);
}
if (task->policy == SCHED_RR) {
task->state = TASK_RUN;
list_move(&task->node, &_kthreads.run);
}
// printk("in: %lld\n", ktime_to_us(ktime_delta(ktime_get(), now)));
BUG_ON(slot < 0);
return slot;
}
void sched_yield(void)
{
struct task_struct *tsk;
tsk = current_set[0]->task;
if (tsk->policy == SCHED_EDF)
tsk->runtime = 0;
schedule();
}
#define MIN_SLICE 1000000LL /* 1 ms */
#define OVERHEAD 0LL
void schedule(void)
{
struct task_struct *next;
struct task_struct *current;
int64_t slot_ns;
ktime now = ktime_get();
int64_t slot_ns = MIN_SLICE;
ktime now;
if (list_empty(&_kthreads.run))
......@@ -336,24 +160,32 @@ void schedule(void)
arch_local_irq_disable();
kthread_lock();
now = ktime_add(ktime_get(), OVERHEAD);
tick_set_next_ns(1e9);
current = current_set[0]->task;
if (current->policy == SCHED_EDF) {
ktime d;
d = ktime_delta(now, current->exec_start);
// if (d > current->runtime);
// printk("\ntrem: %lld, %lld\n", ktime_to_us(current->runtime), ktime_to_us(d));
if (current->runtime)
current->runtime -= d;
BUG_ON(d < 0);
current->total = ktime_add(current->total, d);
current->slices++;
current->runtime = ktime_sub(current->runtime, d);
//printk("%lld %lld\n", d, current->runtime);
}
/** XXX not here, add cleanup thread */
kthread_cleanup_dead();
#if 1
{
static int init;
......@@ -363,16 +195,25 @@ void schedule(void)
}
}
#endif
#if 0
slot_ns = schedule_edf(now);
#endif
/* round robin as before */
do {
next = list_entry(_kthreads.run.next, struct task_struct, node);
//printk("[%lld] %s\n", ktime_to_ms(ktime_get()), next->name);
if (!next)
BUG();
BUG_ON(!next);
if (next->state == TASK_RUN) {
list_move_tail(&next->node, &_kthreads.run);
......@@ -390,45 +231,28 @@ void schedule(void)
if (next->policy == SCHED_EDF) {
if (next->runtime <= slot_ns) {
if (next->runtime <= slot_ns)
slot_ns = next->runtime; /* XXX must track actual time because of IRQs */
next->runtime = 0;
}
}
next->exec_start = now;
if (next->policy == SCHED_RR)
slot_ns = (ktime) next->priority * MIN_SLICE;
kthread_unlock();
if (slot_ns > 0xffffffff)
slot_ns = 0xffffffff;
if (slot_ns < 18000) {
printk("%u\n",slot_ns);
slot_ns = 18000;
}
// printk("\n%lld ms\n", ktime_to_ms(slot_ns));
BUG_ON (slot_ns < 18000);
sched_wake(next, now, slot_ns);
next->exec_start = ktime_get();
kthread_unlock();
tick_set_next_ns(slot_ns);
#if 0
#if 0
tick_set_next_ns(30000);
#else
{
static int cnt = 2;
static int sig = 1;
struct timespec now;
now = get_ktime();
now.tv_nsec += 1000000000 / cnt; // 10k Hz
if (cnt > 100)
sig = -1;
if (cnt < 2)
sig = 1;
cnt = cnt + sig;
BUG_ON(tick_set_next_ktime(now) < 0);
}
#endif
#endif
prepare_arch_switch(1);
switch_to(next);
......@@ -436,7 +260,7 @@ void schedule(void)
arch_local_irq_enable();
}
__attribute__((unused))
static void kthread_set_sched_policy(struct task_struct *task,
enum sched_policy policy)
{
......@@ -448,139 +272,20 @@ static void kthread_set_sched_policy(struct task_struct *task,
}
void kthread_set_sched_edf(struct task_struct *task, unsigned long period_us,
unsigned long wcet_us, unsigned long deadline_rel_us)
{
/* XXX schedulability tests */
if (wcet_us >= period_us) {
printk("Cannot schedule EDF task with WCET %u >= PERIOD %u !\n", wcet_us, period_us);
return;
}
if (wcet_us >= deadline_rel_us) {
printk("Cannot schedule EDF task with WCET %u >= DEADLINE %u !\n", wcet_us, deadline_rel_us);
return;
}
if (deadline_rel_us >= period_us) {
printk("Cannot schedule EDF task with DEADLINE %u >= PERIOD %u !\n", deadline_rel_us, period_us);
return;
}
arch_local_irq_disable();
task->period = us_to_ktime(period_us);
task->wcet = us_to_ktime(wcet_us);
task->runtime = task->wcet;
task->deadline_rel = us_to_ktime(deadline_rel_us);
{
double u = 0.0; /* utilisation */
struct task_struct *tsk;
struct task_struct *tmp;
u += (double) (int32_t) task->wcet / (double) (int32_t) task->period;
list_for_each_entry_safe(tsk, tmp, &_kthreads.run, node) {
if (tsk->policy != SCHED_EDF)
continue;
u += (double) (int32_t) tsk->wcet / (double) (int32_t) tsk->period;
}
if (u > 1.0)
printk("I am not schedule-able: %g\n", u);
}
kthread_set_sched_policy(task, SCHED_EDF);
arch_local_irq_enable();
}
/**
* t1: | ##d
*
* t2: | #####d
*
* t3: | ############d
* --------------------------------------------------
*
* |...wakeup
* #...wcet
* d...deadline
*/
void kthread_wake_up(struct task_struct *task)
{
// printk("wake thread %p\n", task->stack_top);
arch_local_irq_disable();
kthread_lock();
/* for now, simply take the current time and add the task wakeup
* period to configure the first wakeup, then set the deadline
* accordingly.
* note: once we have more proper scheduling, we will want to
* consider the following: if a EDF task is in paused state (e.g.
* with a semaphore locked, do the same when the semaphore is unlocked,
* but set the deadline to now + wcet
*/
BUG_ON(task->state != TASK_NEW);
task->state = TASK_IDLE;
if (task->policy == SCHED_EDF) {
struct task_struct *tsk;
struct task_struct *tmp;
/* initially set current time as wakeup */
task->wakeup = ktime_add(ktime_get(), task->period);
task->deadline = ktime_add(task->wakeup, task->deadline_rel);
// printk("%s initial wake: %llu, deadline: %llu\n", task->name, ktime_to_us(task->wakeup), ktime_to_us(task->deadline));
list_for_each_entry_safe(tsk, tmp, &_kthreads.run, node) {
if (tsk->policy != SCHED_EDF)
continue;
if (ktime_before(tsk->deadline, task->deadline))
continue;
/* move the deadline forward */
task->deadline = ktime_add(tsk->deadline, task->deadline_rel);
// printk("%s deadline now: %llu\n", task->name, ktime_to_us(task->deadline));
}
/* update first wakeup time */
// printk("%s deadline fin: %llu\n", task->name, ktime_to_us(task->deadline));
task->wakeup = ktime_sub(task->deadline, task->deadline_rel);
// printk("%s wakeup now: %llu\n", task->name, ktime_to_us(task->wakeup));
}
list_move_tail(&task->node, &_kthreads.run);
list_move_tail(&task->node, &_kthreads.wake);
kthread_unlock();
arch_local_irq_enable();
schedule();
}
......@@ -596,6 +301,7 @@ struct task_struct *kthread_init_main(void)
/* XXX accessors */
task->policy = SCHED_RR; /* default */
task->priority = 1;
arch_promote_to_task(task);
......@@ -673,7 +379,10 @@ static struct task_struct *kthread_create_internal(int (*thread_fn)(void *data),
/* XXX accessors */
task->policy = SCHED_RR; /* default */
task->priority = 1;
task->total = 0;
task->slices = 0;
arch_init_task(task, thread_fn, data);
task->state = TASK_NEW;
......@@ -686,169 +395,13 @@ static struct task_struct *kthread_create_internal(int (*thread_fn)(void *data),
kthread_unlock();
arch_local_irq_enable();
//printk("%s is next at %p stack %p\n", namefmt, &task->thread_info, task->stack);
//printk("%s\n", namefmt);
return task;
}
/**
* try_to_wake_up - wake up a thread
* @p: the thread to be awakened
* @state: the mask of task states that can be woken
* @wake_flags: wake modifier flags (WF_*)
*
* If (@state & @p->state) @p->state = TASK_RUN.
*
* If the task was not queued/runnable, also place it back on a runqueue.
*
* Atomic against schedule() which would dequeue a task, also see
* set_current_state().
*
* Return: %true if @p->state changes (an actual wakeup was done),
* %false otherwise.
*/
static int
wake_up_thread_internal(struct task_struct *p, unsigned int state, int wake_flags)
{
//unsigned long flags;
//int cpu = 0;
int success = 0;
#if 0
/*
* If we are going to wake up a thread waiting for CONDITION we
* need to ensure that CONDITION=1 done by the caller can not be
* reordered with p->state check below. This pairs with mb() in
* set_current_state() the waiting thread does.
*/
raw_spin_lock_irqsave(&p->pi_lock, flags);
smp_mb__after_spinlock();
if (!(p->state & state))
goto out;
trace_sched_waking(p);
/* We're going to change ->state: */
success = 1;
cpu = task_cpu(p);
/*
* Ensure we load p->on_rq _after_ p->state, otherwise it would
* be possible to, falsely, observe p->on_rq == 0 and get stuck
* in smp_cond_load_acquire() below.
*
* sched_ttwu_pending() try_to_wake_up()
* [S] p->on_rq = 1; [L] P->state
* UNLOCK rq->lock -----.
* \
* +--- RMB
* schedule() /
* LOCK rq->lock -----'
* UNLOCK rq->lock
*
* [task p]
* [S] p->state = UNINTERRUPTIBLE [L] p->on_rq
*
* Pairs with the UNLOCK+LOCK on rq->lock from the
* last wakeup of our task and the schedule that got our task
* current.
*/
smp_rmb();
if (p->on_rq && ttwu_remote(p, wake_flags))
goto stat;
#ifdef CONFIG_SMP
/*
* Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be
* possible to, falsely, observe p->on_cpu == 0.
*
* One must be running (->on_cpu == 1) in order to remove oneself
* from the runqueue.
*
* [S] ->on_cpu = 1; [L] ->on_rq
* UNLOCK rq->lock
* RMB
* LOCK rq->lock
* [S] ->on_rq = 0; [L] ->on_cpu
*
* Pairs with the full barrier implied in the UNLOCK+LOCK on rq->lock
* from the consecutive calls to schedule(); the first switching to our
* task, the second putting it to sleep.
*/
smp_rmb();
/*
* If the owning (remote) CPU is still in the middle of schedule() with
* this task as prev, wait until its done referencing the task.
*
* Pairs with the smp_store_release() in finish_task().
*
* This ensures that tasks getting woken will be fully ordered against
* their previous state and preserve Program Order.
*/
smp_cond_load_acquire(&p->on_cpu, !VAL);
p->sched_contributes_to_load = !!task_contributes_to_load(p);
p->state = TASK_WAKING;
if (p->in_iowait) {
delayacct_blkio_end(p);
atomic_dec(&task_rq(p)->nr_iowait);
}
cpu = select_task_rq(p, p->wake_cpu, SD_BALANCE_WAKE, wake_flags);
if (task_cpu(p) != cpu) {
wake_flags |= WF_MIGRATED;
set_task_cpu(p, cpu);
}
#else /* CONFIG_SMP */
if (p->in_iowait) {
delayacct_blkio_end(p);
atomic_dec(&task_rq(p)->nr_iowait);
}
#endif /* CONFIG_SMP */
ttwu_queue(p, cpu, wake_flags);
stat:
ttwu_stat(p, cpu, wake_flags);
out:
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
#endif
return success;
}
/**
* wake_up_process - Wake up a specific process
* @p: The process to be woken up.
*
* Attempt to wake up the nominated process and move it to the set of runnable
* processes.
*
* Return: 1 if the process was woken up, 0 if it was already running.
*
* It may be assumed that this function implies a write memory barrier before
* changing the task state if and only if any tasks are woken up.
*/
/* Used in tsk->state: */
int wake_up_thread(struct task_struct *p)
{
return wake_up_thread_internal(p, 0xdead, 0);
}
EXPORT_SYMBOL(wake_up_thread);
/**
*
* @brief create a new kernel thread
*
* @param thread_fn the function to run in the thread
......@@ -859,27 +412,6 @@ EXPORT_SYMBOL(wake_up_thread);
* @param name_fmt a printf format string name for the thread
*
* @param ... parameters to the format string
*
* Create a named kernel thread. The thread will be initially stopped.
* Use wake_up_thread to activate it.
*
* If cpu is set to KTHREAD_CPU_AFFINITY_NONE, the thread will be affine to all
* CPUs. IF the selected CPU index excceds the number of available CPUS, it
* will default to KTHREAD_CPU_AFFINITY_NONE, otherwise the thread will be
* bound to that CPU
*
* The new thread has SCHED_NORMAL policy.
*
* If thread is going to be bound on a particular cpu, give its node
* in @node, to get NUMA affinity for kthread stack, or else give NUMA_NO_NODE.
* When woken, the thread will run @threadfn() with @data as its
* argument. @threadfn() can either call do_exit() directly if it is a
* standalone thread for which no one will call kthread_stop(), or
* return when 'kthread_should_stop()' is true (which means
* kthread_stop() has been called). The return value should be zero
* or a negative error number; it will be passed to kthread_stop().
*
* Returns a task_struct or ERR_PTR(-ENOMEM) or ERR_PTR(-EINTR).
*/
struct task_struct *kthread_create(int (*thread_fn)(void *data),
......
/**
* @file kernel/sched/edf.c
*/
#include <kernel/kthread.h>
#include <kernel/export.h>
#include <kernel/kmem.h>
#include <kernel/err.h>
#include <kernel/printk.h>
#include <asm-generic/irqflags.h>
#include <asm-generic/spinlock.h>
#include <asm/switch_to.h>
#include <kernel/string.h>
#include <kernel/tick.h>
void sched_print_edf_list_internal(ktime now)
{
// ktime now;
char state = 'U';
int64_t rel_deadline;
int64_t rel_wait;
struct task_struct *tsk;
struct task_struct *tmp;
// now = ktime_get();
ktime prev = 0;
ktime prevd = 0;
printk("\nt: %lld\n", ktime_to_us(now));
printk("S\tDeadline\tWakeup\tdelta W\tdelta P\tt_rem\ttotal\tslices\tName\t\tfirstwake, firstdead, execstart\n");
printk("----------------------------------------------\n");
list_for_each_entry_safe(tsk, tmp, &_kthreads.run, node) {
if (tsk->policy == SCHED_RR)
continue;
rel_deadline = ktime_delta(tsk->deadline, now);
rel_wait = ktime_delta(tsk->wakeup, now);
if (rel_wait < 0)
rel_wait = 0; /* running */
if (tsk->state == TASK_IDLE)
state = 'I';
if (tsk->state == TASK_RUN)
state = 'R';
printk("%c\t%lld\t\t%lld\t%lld\t%lld\t%lld\t%lld\t%d\t%s %lld | %lld %lld %lld %lld\n",
state, ktime_to_us(tsk->deadline), ktime_to_us(tsk->wakeup),
ktime_to_us(rel_wait), ktime_to_us(rel_deadline), ktime_to_us(tsk->runtime), ktime_to_us(tsk->total),
tsk->slices, tsk->name, ktime_us_delta(prev, tsk->wakeup), ktime_us_delta(prevd, tsk->deadline),
ktime_to_us(tsk->first_wake),
ktime_to_us(tsk->first_dead),
ktime_to_us(tsk->exec_start));
prev = tsk->wakeup;
prevd = tsk->deadline;
}
}
void sched_print_edf_list(void)
{
printk("avg: %lld\n", ktime_to_us(total/times));
}
/**
* Our EDF task scheduling timeline:
*
*
*
* wakeup/
* activation
* | absolute
* | deadline
* | start |
* | time | next
* | | | wakeup
* | | computation| | |
* | | time | | |
* | |############| | |
* +-----+-------------------+-----------------
* |------ WCET -------|
* ^- latest start time
* |--- relative deadline ---|
* |---------------- period ------------------|
*/
/**
* @brief check if an EDF task can still execute given its deadline
*
* @note effectively checks
* wcet remaining runtime in slot
* ------ < --------------------------
* period remaining time to deadline
*
* @returns true if can still execute before deadline
*/
static inline bool schedule_edf_can_execute(struct task_struct *tsk, ktime now)
{
int64_t to_deadline;
if (tsk->runtime <= 0)
return false;
if (ktime_before(tsk->deadline, now)) {
printk("%s violated, %lld %lld, %lld %lld\n", tsk->name,
tsk->runtime, ktime_us_delta(tsk->deadline, now),
tsk->deadline, now);
sched_print_edf_list_internal(now);
BUG();
return false;
}
to_deadline = ktime_delta(tsk->deadline, now);
if (to_deadline <= 0)
return false;
if (tsk->wcet * to_deadline < tsk->period * tsk->runtime)
return true;
return false;
}
static inline void schedule_edf_reinit_task(struct task_struct *tsk, ktime now)
{
tsk->state = TASK_IDLE;
tsk->wakeup = ktime_add(tsk->wakeup, tsk->period);
#if 0
if (ktime_after(now, tsk->wakeup))
printk("%s delta %lld\n",tsk->name, ktime_us_delta(tsk->wakeup, now));
BUG_ON(ktime_after(now, tsk->wakeup)); /* deadline missed earlier? */
#endif
tsk->deadline = ktime_add(tsk->wakeup, tsk->deadline_rel);
tsk->runtime = tsk->wcet;
}
#define SOME_DEFAULT_TICK_PERIOD_FOR_SCHED_MODE 10000000LL
/* stupidly sort EDFs */
static int64_t schedule_edf(ktime now)
{
// ktime now;
int64_t delta;
int64_t slot = SOME_DEFAULT_TICK_PERIOD_FOR_SCHED_MODE;
struct task_struct *tsk;
struct task_struct *tmp;
ktime wake;
list_for_each_entry_safe(tsk, tmp, &_kthreads.run, node) {
if (tsk->policy != SCHED_EDF)
continue;
/* time to wake up yet? */
delta = ktime_delta(tsk->wakeup, now);
if (delta >= 0) {
/* nope, just update minimum runtime for this slot */
if (delta < slot)
slot = delta;
continue;
}
/* if it's already running, see if there is time remaining */
if (tsk->state == TASK_RUN) {
if (!schedule_edf_can_execute(tsk, now)) {
schedule_edf_reinit_task(tsk, now);
/* nope, update minimum runtime for this slot */
delta = ktime_delta(tsk->wakeup, now);
if (delta < slot)
slot = delta;
continue;
}
/* move to top */
list_move(&tsk->node, &_kthreads.run);
continue;
}
/* time to wake up */
if (tsk->state == TASK_IDLE) {
tsk->state = TASK_RUN;
/* move to top */
list_move(&tsk->node, &_kthreads.run);
}
}
/* now find the closest relative deadline */
wake = ktime_add(now, slot);
list_for_each_entry_safe(tsk, tmp, &_kthreads.run, node) {
if (tsk->policy != SCHED_EDF)
break;
/* all currently runnable task are at the top of the list */
if (tsk->state != TASK_RUN)
break;
if (ktime_before(wake, tsk->deadline))
continue;
delta = ktime_delta(wake, tsk->deadline);
if (delta < 0) {
delta = ktime_delta(now, tsk->deadline);
printk("\n [%lld] %s deadline violated by %lld us\n", ktime_to_ms(now), tsk->name, ktime_to_us(delta));
}
if (delta < slot) {
if (delta)
slot = delta;
else
delta = tsk->runtime;
wake = ktime_add(now, slot); /* update next wakeup */
/* move to top */
list_move(&tsk->node, &_kthreads.run);
BUG_ON(slot <= 0);
}
}
total = ktime_add(total, ktime_delta(ktime_get(), now));
times++;
//printk("%3.d %3.lld\n", cnt, ktime_to_us(total / times) );
BUG_ON(slot < 0);
return slot;
}
/**
*
* we allow online task admission, so we need to be able to determine
* schedulability on the fly:
*
* EDF schedulability
*
*
* # comp time
* | deadline (== unused slot)
* _ unused slot
* > wakeup (== deadline if D == P)
* o free slots (deadline - wcet)
*
* simplest case: one long period task, one or more short period tasks
*
* W D W
* >oooooooooo##########|_____________________________> (P=50, D=20, R=10) (T1)
* >o#|_> (P= 4, D= 2, R= 1) (T2)
* >o#> (P= 2, D= 2, R= 1) (T3)
* >#> (P= 1, D= 1, R= 1) (T4)
* >ooooooo#####|_______> (P=20, D=12, R= 5) (T5)
* >oooooooooooooooooooooooooo####|__> (P=33, D=30, R= 4) (T6)
* >oooooooooooooooooooooooooooooooooooooooo######|___> (P=50, D=46, R= 6) (T7)
*
* If we map the short period task into the long period tasks "free" slots,
* we see that tasks with periods shorter than the deadline of the task
* of the longest period can only be scheduled, if their utilisation
* or "time density" R / D is smaller that the utilisation of the longest
* period task
*
*
* easily schedulable:
* ____________________________________________________________________________________________________
* .... . .. ......... . . . . . . ..... . ... ....... . . . . . . .
* >o###oooooo#oo####o##|_____________________________###oooooo##oo##o###o|_____________________________
* >#o|_o#|_o#|_#o|_o#|_#o|_o#|_o#|_#o|_o#|_o#|_o#|_o#|_o#|_o#|_o#|_o#|_o#|_o#|_o#|_o#|_o#|_o#|_o#|_o#|_
*
*
* R/D R/P
* T1: (P=50, D=20, R=10) 10/20 = 1/2 10/50 = 20/100
* T2: (P= 4, D= 2, R= 1) 1/2 = 1/2 100% 1/4 = 25/100 45% (== number of used slots)
*
*
* correct analysis sum(R_i/P_i)
*
* T1 (D-R) / D = 1/2
* T1 (D-R) / P = 1/5
*
* T2 (D-R) / P = 1/4 T1DRD > T2DRP -> R/P (correct)
*
*
*
* just schedulable:
* ____________________________________________________________________________________________________
* .................... . . . . . . . . . . . . . . ..................... . . . . . . . . . . . . . . .
* >#o#o#o#o#o#o#o#o#o#o|_____________________________#o#o#o#o#o#o#o#o#o#o|_____________________________
* >o#o#o#o#o#o#o#o#o#o#o#o#o#o#o#o#o#o#o#o#o#o#o#o#o#o#o#o#o#o#o#o#o#o#o#o#o#o#o#o#o#o#o#o#o#o#o#o#o#o#
*
* R/D R/P
* T1: (P=50, D=20, R=10) 10/20 = 1/2 10/50 = 2/10
* T3: (P= 2, D= 2, R= 1) 1/2 = 1/2 100% 1/2 = 5/10 70% (== number of used slots)
*
* -> this must be 100%, impossible to fit more slots into relative deadline of
* long task
*
* correct analysis sum(R_i/D_i)
*
* T1 (D-R) / D = 1/2
* T1 (D-R) / P = 1/5
*
* T3 (D-R) / P = 1/2 T1DRD <= T3DRP -> R/D (correct)
*
* not schedulable:
* ____________________________________________________________________________________________________
* ..........::::::::::........................................::::::::::..............................
* >oooooooooo##########|_____________________________oooooooooo##########|_____________________________
* >####################################################################################################
*
* R/D R/P
* T1: (P=50, D=20, R=10) 10/20 = 1/2 10/50 = 1/5
* T4: (P= 1, D= 1, R= 1) 1/1 = 1/1 150% 1/1 = 1/1 120%
*
* both correct, but R/P "more correct" -> actual slot usage
*
* T1 (D-R) / D = 1/2
* T1 (D-R) / P = 1/5
*
* T4 (D-R) / P = 0 T1DRD > T4DRD -> R/P (correct)
*
*
* schedulable:
*
* ____________________________________________________________________________________________________
* .................................................................................................xxx
* >o###oooooo#oo###o###|_____________________________##o###o#ooooooo###o#|_____________________________
* >#o|_#o|_o#|_#o|_#o|_#o|_#o|_#o|_#o|_#o|_#o|_#o|_#o|_#o|_#o|_#o|_#o|_#o|_#o|_#o|_#o|_#o|_#o|_#o|_#o|_
* >ooooo####oo#|_______o###oooooo##|_______o#ooo###o#oo|_______o###oooooo##|_______o###oooooo##|_______
* >ooooooooooooooooooooooooo###o#|__ooooooooo##ooppoooooooooo##ooo|__ooooooooooooooooooo###o#oooooo|__x
* >ooooooooooooooooooooooooooooooooo###o###oooooo|___ooooooooooooooooooooooo###o###ooooooooooooo###|___
*
* R/D R/P
* T1: (P=50, D=20, R=10) 10/20 50% 10/50 20%
* T2: (P= 4, D= 2, R= 1) 1/2 100% 1/4 45%
* T5: (P=20, D=12, R= 5) 5/12 142% 5/20 70%
* T6: (P=33, D=30, R= 4) 4/30 155% 4/33 82%
* T7: (P=50, D=46, R= 6) 6/46 168% 6/50 94%
*
*
*
* sum(R_i/P_i) correct, sum(R_i/D_i) absolutely incorrect!
*
* thread(p_max):
* T1 (D-R) / D = 1/2
* T1 (D-R) / P = 1/5
* ------------------
* T2 (D-R) / P = 1/4 T1DRD > T2DRP -> R/P (correct)
* T5 (D-R) / P = 7/20 T1DRD > T5DRP -> R/P (correct)
* T6 (D-R) / P = 26/33 T1RD <= T6DRP -> R/D (correct? looks ok)
* T7 (D-R) / P = 40/50 T1RD <= T6DRP -> R/D (correct? looks ok)
*
* usage: 96.4%
*
*
*
*
*
* try 1:
*
* T1: (P=50, D=20, R=10) (20%) T1DRP = 0.2 (0.95)
* TX: (P=10, D= 8, R=6) -> (D-R)/P = 0.2 T1DRD > T2DRP -> R/P (incorrect, should be R/D at least) (0.75)
* ................::..
* >##oooooo####oooo####|_____________________________
* >oo######|_oo######|_
*
* 22/20 slots used = 110%
*
* free T1 slots before deadline: D-R = 10
*
* TX runtime slots cannot be larger than that!
*
* TX runtime slots for T1 deadline periods:
*
*
* (D_x - R_x) / D_x * D_1 = 12.5 < 10 -> not schedulable
*
* sum((D_i - R_i) / D_i) * D_1 < 10 -> schedulable?
*
*
*
* i != D_1 && P_i < D_1
* sum((D_1 / P_i) * R_i) < (D_1 - R_1) ?
*
* i != D1 && P_i >
*
* (T1: 4 < 10 ok)
*
* T2: 5
* T5: 5
* T6 2.42
*
*
*
*
* new test:
*
* if (P_x < D_1) :
* if ((R_x - D_x) > 0) // otherwise we are at 100% slot usage within deadline
* (D_x - R_x) / D_x * (D_1 - R_1) = 12.5 > (D_1 - R_1) -> not schedulable
* else ?
* R_x / P_x * D_1 < (D_1 - R_1)
*
* (schedulable):
* ____________________________________________________________________________________________________
* .................................................................................................xxx
* >o###oooooo#oo###o###|_____________________________##o###o#ooooooo###o#|_____________________________
* >#o|_#o|_o#|_#o|_#o|_#o|_#o|_#o|_#o|_#o|_#o|_#o|_#o|_#o|_#o|_#o|_#o|_#o|_#o|_#o|_#o|_#o|_#o|_#o|_#o|_
* >ooooo####oo#|_______o###oooooo##|_______o#ooo###o#oo|_______o###oooooo##|_______o###oooooo##|_______
* >ooooooooooooooooooooooooo###o#|__ooooooooo##ooppoooooooooo##ooo|__ooooooooooooooooooo###o#oooooo|__x
* >ooooooooooooooooooooooooooooooooo###o###oooooo|___ooooooooooooooooooooooo###o###oooooooooooooooo|___
*
*
* T1: (P=50, D=20, R=10) -> max P -> D_1 = 20, D_1 - R_1 = 10 = F_1
*
* T2: (P= 4, D= 2, R= 1) F2 = 1 F2D = 1/2
* T5: (P=20, D=12, R= 5) F5 = 7 F5D = 7/12
* T6: (P=33, D=30, R= 4) F6 = 26 F6D = 26/30
* T7: (P=50, D=46, R= 6) F7 = 40 F7D = 40/46
*
*
* Utilisation: U1 = D_1 - R_1 = 10; U2 = P_1 - D_1 = 30
*
* check T2:
* f2 > 0 -> f2d * F1 = 5 <= U1 -> schedulable
* f2d * U2 = 10
* -> U1 = 5, U2 = 20
*
* check t5:
* f5 > 0 -> int(f5d * F1) = 5; 5 <= U1 -> schedulable
* f5d * U2 = int(11) ->
* U1 = 0, U2 = 9
*
*
* 1) determine task with longest period
*
* T1: (P=50, D=20, R=10)
*
* 2) calculate unused head and tail (before and after deadline)
*
* UH = D1 - R1 (= 20) (Hyperperiod)
* UT = P1 - D1 (= 60)
*
* 3) loop over other tasks (Period < Deadline of Task 1)
*
* calculate slots usage before deadline of Task 1:
*
* H * Ri * D1 / Pi (T2: 10, T5: 10)
*
* update head slots UH = UH - 20 = 0 -> all used
*
*
* calculate slot usage after deadline of Task2:
*
* H * Ri * F1 / Pi (T2: 15, T5: 15)
*
* update tail slots: UT = UT - 30 = 30
*
* -> need hyperperiod factor H = 2
*
*
*
* if (DEADLINE >
* ____________________________________________________________________________________________________
* ......................... . ............ .............. ......................... . ... .
* >o###oooooo#oo###o###|_____________________________##o###o#ooooooo###o#|_____________________________
* >#o|_#o|_o#|_#o|_#o|_#o|_#o|_#o|_#o|_#o|_#o|_#o|_#o|_#o|_#o|_#o|_#o|_#o|_#o|_#o|_#o|_#o|_#o|_#o|_#o|_
* >ooooo####oo#|_______o###oooooo##|_______o#ooo###o#oo|_______o###oooooo##|_______o###oooooo##|_______
* >ooooooooooooooooooooooooooooooooo###o###oooooo|___ooooooooooooooooooooooo###o###oooooooooooooooo|___
*
*
*
*/
static ktime hyperperiod(void)
{
ktime lcm = 0;
ktime a,b;
struct task_struct *t0;
struct task_struct *tsk;
struct task_struct *tmp;
if (list_empty(&_kthreads.new))
return 0;
t0 = list_entry(_kthreads.new.next, struct task_struct, node);
lcm = t0->period;
list_for_each_entry_safe(tsk, tmp, &_kthreads.new, node) {
if (tsk == t0)
continue;
a = lcm;
b = tsk->period;
/* already a multiple? */
if (a % b == 0)
continue;
while (a != b) {
if (a > b)
a -= b;
else
b -= a;
}
lcm = lcm * (tsk->period / a);
}
return lcm;
}
#define MIN(A,B) ((A) < (B) ? (A) : (B))
void kthread_set_sched_edf(struct task_struct *task, unsigned long period_us,
unsigned long wcet_us, unsigned long deadline_rel_us)
{
/* XXX schedulability tests */
if (wcet_us >= period_us) {
printk("Cannot schedule EDF task with WCET %u >= PERIOD %u !\n", wcet_us, period_us);
return;
}
if (wcet_us >= deadline_rel_us) {
printk("Cannot schedule EDF task with WCET %u >= DEADLINE %u !\n", wcet_us, deadline_rel_us);
return;
}
if (deadline_rel_us >= period_us) {
printk("Cannot schedule EDF task with DEADLINE %u >= PERIOD %u !\n", deadline_rel_us, period_us);
return;
}
arch_local_irq_disable();
task->period = us_to_ktime(period_us);
task->wcet = us_to_ktime(wcet_us);
//task->runtime = ktime_sub(task->wcet, 7000LL);
task->runtime = task->wcet;
task->deadline_rel = us_to_ktime(deadline_rel_us);
arch_local_irq_enable();
{
ktime p = hyperperiod();
ktime h ;
ktime max = 0;
ktime uh, ut, f1;
struct task_struct *t0 = NULL;
struct task_struct *tsk = NULL;
struct task_struct *tmp;
if (p < 0)
printk("appears to be empty\n");
list_for_each_entry_safe(tsk, tmp, &_kthreads.new, node) {
if (tsk->period > max) {
t0 = tsk;
max = tsk->period;
}
}
BUG_ON(!t0);
BUG_ON(p < t0->period);
h = p / t0->period;
// printk("Period factor %lld %lld %lld\n", h, p, t0->period);
uh = h * (t0->deadline_rel - t0->wcet);
ut = h * (t0->period - t0->deadline_rel);
f1 = ut/h;
printk("max UH: %lld, UT: %lld\n", (uh), (ut));
list_for_each_entry_safe(tsk, tmp, &_kthreads.new, node) {
ktime sh, st;
if (tsk == t0)
continue;
if (tsk->deadline_rel < t0->deadline_rel) {
/* slots before deadline of T0 */
sh = h * tsk->wcet * t0->deadline_rel / tsk->period;
if (sh > uh) {
printk("NOT SCHEDULABLE in head: %s\n", tsk->name);
BUG();
}
uh = uh - sh;
}
/* slots after deadline of T0 */
st = h * tsk->wcet * f1 / tsk->period;
printk("%s tail usage: %lld\n", tsk->name, ktime_to_ms(st));
if (st > ut) {
printk("NOT SCHEDULABLE in tail: %s\n", tsk->name);
BUG();
}
ut = ut - st;
printk("UH: %lld, UT: %lld\n", (uh),(ut));
}
}
{
double u = 0.0; /* utilisation */
struct task_struct *tsk;
struct task_struct *tmp;
static int64_t dmin = 0x7fffffffffffffLL;
if (dmin > task->deadline_rel)
dmin = task->deadline_rel;
u += (double) (int32_t) task->wcet / (double) (int32_t) task->period;
list_for_each_entry_safe(tsk, tmp, &_kthreads.new, node) {
if (tsk->policy != SCHED_EDF)
continue;
u += (double) (int32_t) tsk->wcet / (double) (int32_t) tsk->period;
}
if (u > 1.0) {
printk("I am NOT schedul-ableh: %g ", u);
kthread_set_sched_policy(task, SCHED_RR);
printk("changed task mode to RR\n", u);
} else {
printk("Utilisation %g\n", u);
kthread_set_sched_policy(task, SCHED_EDF);
}
}
// arch_local_irq_enable();
printk("\n");
}
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment