diff --git a/arch/sparc/kernel/thread.c b/arch/sparc/kernel/thread.c
index 2bff101a6c214be2422535d315b3da035ed728cc..85a526d80844ef591b2fabc401d0fa553d9495a0 100644
--- a/arch/sparc/kernel/thread.c
+++ b/arch/sparc/kernel/thread.c
@@ -52,16 +52,19 @@ static void th_starter(void)
 
 	task->thread_fn(task->data);
 
-	arch_local_irq_disable();
 	ts = get_uptime();
 	stop = (double) ts.tv_sec + (double) ts.tv_nsec / 1e9;
-	printk("thread: %p returned after %gs\n", task->stack, stop-start);
+
+//	printk("thread: %p returned after %gs\n", task->stack, stop-start);
+
+	arch_local_irq_disable();
 	task->state = TASK_DEAD;
 	arch_local_irq_enable();
 
+
 	schedule();
 
-	pr_crit(MSG "should never have reached %s:%d", __func__, __LINE__);
+	pr_crit(MSG "should never have reached %s:%d\n", __func__, __LINE__);
 	BUG();
 }
 
diff --git a/include/errno.h b/include/errno.h
index 7c5570b98ff78f36738019d190ec235ee1195f62..1e1c94975852a48a5afadb9db7d5a46a92b1e205 100644
--- a/include/errno.h
+++ b/include/errno.h
@@ -125,4 +125,6 @@
 
 #define EHWPOISON	133	/* Memory page has hardware error */
 
+#define ENOSCHED	134	/* Task cannot be scheduled */
+
 #endif /* _ERRNO_H_ */
diff --git a/include/kernel/kthread.h b/include/kernel/kthread.h
index 5cd2d428b010b515012b1e6e8bd9279cf007320e..863dfc098ec52cdf81a8f566d9cb9a2d88357ae0 100644
--- a/include/kernel/kthread.h
+++ b/include/kernel/kthread.h
@@ -36,6 +36,10 @@ struct remove_this_declaration {
 #define TASK_DEAD	0x0004
 #define TASK_BUSY	0x0005
 
+/* task flags */
+#define TASK_RUN_ONCE	(1 << 0)	/* execute for only one time slice */
+#define TASK_NO_CLEAN	(1 << 30)	/* user takes care of cleanup */
+#define TASK_NO_CHECK	(1 << 31)	/* skip any validation checks */
 
 
 
@@ -76,9 +80,6 @@ struct task_struct {
 	 */
 	int				unused;
 
-	ktime				last_visit_true;
-	ktime				last_visit_false;
-	ktime				last_adjust;
 	ktime				runtime; /* remaining runtime in this period  */
 	ktime				wakeup; /* start of next period */
 	ktime				deadline; /* deadline of current period */
@@ -87,6 +88,8 @@ struct task_struct {
 	ktime				total;
 	unsigned long			slices;
 
+	unsigned long			flags;
+
 
 	/* Tasks may have a parent and any number of siblings or children.
 	 * If the parent is killed or terminated, so are all siblings and
@@ -99,7 +102,7 @@ struct task_struct {
 
 
 
-};
+}  __attribute__ ((aligned (8)));
 
 struct task_struct *kthread_create(int (*thread_fn)(void *data),
 				   void *data, int cpu,
@@ -107,7 +110,8 @@ struct task_struct *kthread_create(int (*thread_fn)(void *data),
 				   ...);
 
 struct task_struct *kthread_init_main(void);
-void kthread_wake_up(struct task_struct *task);
+int kthread_wake_up(struct task_struct *task);
+
 /* XXX dummy */
 void switch_to(struct task_struct *next);
 void schedule(void);
diff --git a/include/kernel/sched.h b/include/kernel/sched.h
index 160683791c0eea5e739ee495daa2b24a9659e213..e7ae9672e7390daaaef087a010e2e36b8cb77789 100644
--- a/include/kernel/sched.h
+++ b/include/kernel/sched.h
@@ -25,10 +25,11 @@ struct sched_attr {
 	unsigned long		priority;
 
 	/* period based scheduling for EDF, RMS, ... */
-	ktime			period;		/* wakeup period */
-	ktime			wcet;		/* max runtime per period*/
-	ktime			deadline_rel;	/* time to deadline from begin of wakeup */
-};
+	ktime			period __attribute__ ((aligned (8)));		/* wakeup period */
+	ktime			wcet __attribute__ ((aligned (8)));		/* max runtime per period*/
+	ktime			deadline_rel __attribute__ ((aligned (8)));	/* time to deadline from begin of wakeup */
+
+}  __attribute__ ((aligned (8)));
 
 
 
@@ -70,7 +71,7 @@ struct scheduler {
 
 	/* XXX: sucks */
 	void (*wake_next_task)  (struct task_queue tq[], int cpu, ktime now);
-	void (*enqueue_task)    (struct task_queue tq[],
+	int  (*enqueue_task)    (struct task_queue tq[],
 			         struct task_struct *task);
 
 	ktime (*timeslice_ns)   (struct task_struct *task);
@@ -119,5 +120,7 @@ int sched_set_policy_default(struct task_struct *task);
 int sched_enqueue(struct task_struct *task);
 int sched_register(struct scheduler *sched);
 
+void sched_enable(void);
+void sched_disable(void);
 
 #endif /* _KERNEL_SCHED_H_ */
diff --git a/include/kernel/tick.h b/include/kernel/tick.h
index 7f1b3d16e9a944bde2607c8c785eadc15f347208..dccdb8537676f306fff5cafbb4b11dceb9c1db7c 100644
--- a/include/kernel/tick.h
+++ b/include/kernel/tick.h
@@ -35,6 +35,7 @@ enum tick_mode {
 void tick_check_device(struct clock_event_device *dev);
 int tick_set_mode(enum tick_mode mode);
 int tick_set_next_ns(unsigned long nanoseconds);
+int tick_set_next_ns_for_cpu(unsigned long nanoseconds, int cpu);
 int tick_set_next_ktime(struct timespec expires);
 unsigned long tick_get_period_min_ns(void);
 
diff --git a/init/main.c b/init/main.c
index 4725207b987a915ff2df1de29968e4ac732dfac1..c412f4cea970f85748a798e1deeb34aa8c2111f0 100644
--- a/init/main.c
+++ b/init/main.c
@@ -47,64 +47,107 @@
 volatile int inc;
 volatile unsigned int xa, xb, xc, xd;
 
+int task2(void *data);
+
 int task0(void *data)
 {
+
 	while (1) {
 
 		xd++;
 
-	//	printk("t1 %d %llu\n", leon3_cpuid(), ktime_to_us(ktime_get()));
-	//	sched_yield();
+		if (xd % 1000000000 == 0)
+			sched_yield();
+
+
 	}
 }
 
 int task1(void *data)
 {
+	xa = 0;
 	while (1) {
 
 		xa++;
 
-	//	printk("t1 %d %llu\n", leon3_cpuid(), ktime_to_us(ktime_get()));
-	//	sched_yield();
 	}
 }
-
+#define QUIT_AT		100000
 
 int task2(void *data)
 {
+
+	iowrite32be(0xdeadbeef, &xb);
+	return 0;
+
 	while (1) {
-		//printk("x %llu\n", ktime_get());
-		//printk("_");
 		xb++;
-	//	printk("t2 %d %llu\n", leon3_cpuid(), ktime_to_us(ktime_get()));
-	//	sched_yield();
-	//	printk("-");
-	//	sched_yield();
+
+
+		if (xb > QUIT_AT) {
+			printk("EXITING\n");
+			xb = 0xdeadbeef;
+			return 0;
+		}
 	}
 }
+extern int threadcnt;
+int task_rr(void *data);
 int task3(void *data)
 {
 	while (1) {
-#if 0
-		ktime now;
-		if (cnt < 1024) {
-			now = ktime_get();
-			buf[cnt] = ktime_delta(now, last);
-			last = now;
-			cnt++;
-		}
-	       //	else
-		//	sched_yield();
-
-#endif
-		//printk("y %llu\n", ktime_get());
-	//	printk(".");
-//		printk("t3 %d %llu\n", leon3_cpuid(), ktime_to_us(ktime_get()));
 		xc++;
+		task_rr(NULL);
+	}
+}
+
+int xf;
+int task4(void *data)
+{
+	while (1) {
+		xf++;
+	}
+}
+
+
+int task_restart(void *data)
+{
+	struct task_struct *t = NULL;
+	struct sched_attr attr;
 
-	//	sched_yield();
+	xb = 0xdeadbeef;
+	while(1) {
+		/* "fake" single shot reset */
+
+
+#if 1
+		if (ioread32be(&xb) == 0xdeadbeef)
+		{
+			xb = 0;
+
+			t = kthread_create(task2, NULL, KTHREAD_CPU_AFFINITY_NONE, "task7");
+
+		//	printk("now at %p %d\n", t, threadcnt);
+			sched_get_attr(t, &attr);
+			attr.policy = SCHED_EDF;
+
+			attr.period       = us_to_ktime(0);
+			attr.deadline_rel = us_to_ktime(100);
+			attr.wcet         = us_to_ktime(30);
+
+			sched_set_attr(t, &attr);
+			barrier();
+			BUG_ON (kthread_wake_up(t) < 0);
+			barrier();
+
+
+		}
+			sched_yield();
+#endif
 	}
 }
+
+
 #include <kernel/sysctl.h>
 extern ktime sched_last_time;
 	void sched_print_edf_list_internal(struct task_queue *tq, int cpu, ktime now);
@@ -114,14 +157,11 @@ extern uint32_t sched_ev;
 extern struct scheduler sched_edf;
 int task_rr(void *data)
 {
-	int last = 0;
-	int curr = 0;
 	char buf1[64];
+	char buf2[64];
+	char buf3[64];
 
-	uint32_t last_call = 0;
-	int a, b, c, d;
 
-	ktime sched_time;
 	struct sysobj *sys_irq = NULL;
 
 
@@ -134,30 +174,13 @@ int task_rr(void *data)
 
 		if (sys_irq) {
 			sysobj_show_attr(sys_irq, "irl", buf1);
-			printk("IRQ: %s\n", buf1);
+			sysobj_show_attr(sys_irq, "8", buf2);
+			sysobj_show_attr(sys_irq, "9", buf3);
+			printk("IRQs: %s timer1 %s timer2 %s threads created: %d\n", buf1, buf2, buf3, threadcnt);
 		}
 
-#if 0
-		a = xa;
-		b = xb;
-		c = xc;
-		d = xd;
-		sched_time = sched_last_time;
-		curr = atoi(buf1)/2;
-		printk("%u %u %u %u %llu ", a, b, c, d, ktime_get());
-//		printk("sched %llu us ", ktime_to_us(sched_last_time));
-		printk("%llu per call ", sched_last_time /sched_ev);
-//		printk("calls %d ", sched_ev - last_call);
-		printk("cpu %d", leon3_cpuid());
-
-		printk("\n");
-
-		last = curr;
-		last_call = sched_ev;
-#endif
-
-		sched_print_edf_list_internal(&sched_edf.tq[0], 0, ktime_get());
-		sched_print_edf_list_internal(&sched_edf.tq[1], 1, ktime_get());
+	//	sched_print_edf_list_internal(&sched_edf.tq[0], 0, ktime_get());
+	//	sched_print_edf_list_internal(&sched_edf.tq[1], 1, ktime_get());
 
 
 
@@ -167,6 +190,7 @@ int task_rr(void *data)
 }
 
 
+
 /**
  * @brief kernel initialisation routines
  */
@@ -191,9 +215,11 @@ extern int cpu1_ready;
  */
 #define MAX_TASKS 0
 #include <kernel/clockevent.h>
+#include <kernel/tick.h>
 int kernel_main(void)
 {
 	struct task_struct *t;
+	struct sched_attr attr;
 
 #if 0
 	void *addr;
@@ -245,61 +271,24 @@ int kernel_main(void)
 	/* run the demo */
 	xen_demo();
 #endif
-	printk(MSG "Boot complete, spinning idly.\n");
 
 
 
 	/* elevate boot thread */
 	kthread_init_main();
-#if 0
-	/*
-	 *  T1: (P=50, D=20, R=10)
-	 *
-	 *  T2: (P= 4, D= 2, R= 1)
-	 *  T5: (P=20, D=12, R= 5)
-	 *
-	 *  T6: (P=33, D=30, R= 4)
-	 *  T7: (P=50, D=46, R= 6)
-	 */
-
-	t = kthread_create(task1, NULL, KTHREAD_CPU_AFFINITY_NONE, "T1");
-	kthread_set_sched_edf(t, 50 * MSEC_PER_SEC,  10 * MSEC_PER_SEC, 20 * MSEC_PER_SEC);
-
-
-	t = kthread_create(task1, NULL, KTHREAD_CPU_AFFINITY_NONE, "T2");
-	kthread_set_sched_edf(t, 4 * MSEC_PER_SEC,  1 * MSEC_PER_SEC, 2 * MSEC_PER_SEC);
-
-	t = kthread_create(task1, NULL, KTHREAD_CPU_AFFINITY_NONE, "T5");
-	kthread_set_sched_edf(t, 20 * MSEC_PER_SEC, 5 * MSEC_PER_SEC, 12 * MSEC_PER_SEC);
+	tick_set_next_ns(1000000);
 
+	/* wait for cpus */
+	cpu1_ready = 2;
 
-	t = kthread_create(task1, NULL, KTHREAD_CPU_AFFINITY_NONE, "T6");
-	kthread_set_sched_edf(t, 33 * MSEC_PER_SEC, 4 * MSEC_PER_SEC, 30 * MSEC_PER_SEC);
-	t = kthread_create(task1, NULL, KTHREAD_CPU_AFFINITY_NONE, "T7");
-	kthread_set_sched_edf(t, 50 * MSEC_PER_SEC, 6 * MSEC_PER_SEC, 46 * MSEC_PER_SEC);
+	while (ioread32be(&cpu1_ready) != 0x3);
+	iowrite32be(0x4, &cpu1_ready);
 
-#endif
+	printk(MSG "Boot complete\n");
 
 
 
-	cpu1_ready = 2;
-#if 1
-{
-	struct sched_attr attr;
-#if 0
-	t = kthread_create(task1, NULL, KTHREAD_CPU_AFFINITY_NONE, "print");
-	sched_get_attr(t, &attr);
-	attr.priority = 4;
-	sched_set_attr(t, &attr);
-	kthread_wake_up(t);
 
-	t = kthread_create(task2, NULL, KTHREAD_CPU_AFFINITY_NONE, "print1");
-	sched_get_attr(t, &attr);
-	attr.priority = 8;
-	sched_set_attr(t, &attr);
-	kthread_wake_up(t);
-#endif
-#if 1
 #if 0
 	t = kthread_create(task2, NULL, KTHREAD_CPU_AFFINITY_NONE, "print1");
 	sched_get_attr(t, &attr);
@@ -311,56 +300,62 @@ int kernel_main(void)
 	kthread_wake_up(t);
 #endif
 
-
 #if 1
 
-	t = kthread_create(task0, NULL, KTHREAD_CPU_AFFINITY_NONE, "task0");
+	//t = kthread_create(task0, NULL, KTHREAD_CPU_AFFINITY_NONE, "task0");
+	t = kthread_create(task_restart, NULL, KTHREAD_CPU_AFFINITY_NONE, "task_restart");
 	sched_get_attr(t, &attr);
 	attr.policy = SCHED_EDF;
-	attr.period       = ms_to_ktime(100);
-	attr.deadline_rel = ms_to_ktime(90);
-	attr.wcet         = ms_to_ktime(44);
+	attr.period       = ms_to_ktime(10);
+	attr.deadline_rel = ms_to_ktime(9);
+	attr.wcet         = ms_to_ktime(5);
 	sched_set_attr(t, &attr);
-	kthread_wake_up(t);
+	if (kthread_wake_up(t) < 0)
+		printk("---- %s NOT SCHEDUL-ABLE---\n", t->name);
 #endif
 
 
 
-#if 1
+#if 0
 	t = kthread_create(task1, NULL, KTHREAD_CPU_AFFINITY_NONE, "task1");
 	sched_get_attr(t, &attr);
 	attr.policy = SCHED_EDF;
-	attr.period       = ms_to_ktime(50);
-	attr.deadline_rel = ms_to_ktime(40);
-	attr.wcet         = ms_to_ktime(33);
+	attr.period       = us_to_ktime(50000);
+	attr.deadline_rel = us_to_ktime(40000);
+	attr.wcet         = us_to_ktime(33000);
 	sched_set_attr(t, &attr);
-	kthread_wake_up(t);
+	if (kthread_wake_up(t) < 0)
+		printk("---- %s NOT SCHEDUL-ABLE---\n", t->name);
 #endif
 
-#if 1
+#if 0
 	t = kthread_create(task2, NULL, KTHREAD_CPU_AFFINITY_NONE, "task2");
 	sched_get_attr(t, &attr);
 	attr.policy = SCHED_EDF;
-	attr.period       = ms_to_ktime(40);
-	attr.deadline_rel = ms_to_ktime(22);
-	attr.wcet         = ms_to_ktime(19);
+	attr.period       = us_to_ktime(200);
+	attr.deadline_rel = us_to_ktime(110);
+	attr.wcet         = us_to_ktime(95);
 	sched_set_attr(t, &attr);
-	kthread_wake_up(t);
+	if (kthread_wake_up(t) < 0) {
+		printk("---- %s NOT SCHEDUL-ABLE---\n", t->name);
+		BUG();
+	}
 #endif
 
 #if 1
 	t = kthread_create(task3, NULL, KTHREAD_CPU_AFFINITY_NONE, "task3");
 	sched_get_attr(t, &attr);
 	attr.policy = SCHED_EDF;
-	attr.period       = ms_to_ktime(79);
-	attr.deadline_rel = ms_to_ktime(70);
-	attr.wcet         = ms_to_ktime(22);
+	attr.period       = ms_to_ktime(1000);
+	attr.deadline_rel = ms_to_ktime(999);
+	attr.wcet         = ms_to_ktime(300);
 	sched_set_attr(t, &attr);
-	kthread_wake_up(t);
+	if (kthread_wake_up(t) < 0)
+		printk("---- %s NOT SCHEDUL-ABLE---\n", t->name);
 #endif
 
 
-#if 1
+#if 0
 	t = kthread_create(task_rr, NULL, KTHREAD_CPU_AFFINITY_NONE, "task_rr");
 	sched_get_attr(t, &attr);
 	attr.policy = SCHED_RR;
@@ -369,66 +364,42 @@ int kernel_main(void)
 	kthread_wake_up(t);
 #endif
 
-#endif
 
+#if 0
+	t = kthread_create(task_restart, NULL, KTHREAD_CPU_AFFINITY_NONE, "task_restart");
+	sched_get_attr(t, &attr);
+	attr.policy = SCHED_RR;
+	attr.priority = 1;
+	sched_set_attr(t, &attr);
+	kthread_wake_up(t);
+#endif
+//	xb = 0xdeadbeef;
+	while(1) {
+		/* "fake" single shot reset */
 
+	//	task_rr(NULL);
+#if 0
+		if (xb == 0xdeadbeef)
+		{
+			xb = 0;
+			t = kthread_create(task2, NULL, KTHREAD_CPU_AFFINITY_NONE, "task2");
+			sched_get_attr(t, &attr);
+			attr.policy = SCHED_EDF;
 
+			attr.period       = us_to_ktime(0);
+			attr.deadline_rel = us_to_ktime(100);
+			attr.wcet         = us_to_ktime(60);
 
-}
-#endif
+			sched_set_attr(t, &attr);
+			BUG_ON (kthread_wake_up(t) < 0);
 
-	while (ioread32be(&cpu1_ready) != 0x3);
-      iowrite32be(0x4, &cpu1_ready);
-	while(1) {
-//		printk("o");
-#if 0
-		int val = inc;
-		static ktime last;
-		ktime now;
-		now = ktime_get();
-		static ktime delta;
-
-		delta	= ktime_delta(last, now);
-		last = now;
-		printk("%d %lld\n", val, ktime_to_us(delta));
-#endif
-#if 0
-	static int i;
-	static ktime last;
-		ktime now;
-		now = ktime_get();
-		if (i == 10) {
-		printk("%lld\n", ktime_to_ms(ktime_delta(now, last)));
-		last = now;
-		i = 0;
 		}
-		i++;
 #endif
-	//	sched_yield();
-#if 0
-		if (cnt > 1023) {
-			int i;
-			for (i = 1; i < 1024; i++)
-				printk("%lld\n", buf[i]);
-		//	cnt = 0;
-			break;
-		}
-#endif
-//		printk("xxx %llu\n", ktime_get());
 
-		//printk("%d\n", cnt);
-
-	//	printk("o");
-	//	printk("\n");
-
-	//	sched_yield();
-//		printk("cpu1\n");
 		cpu_relax();
 	}
 
-		//printk("%lld\n", buf[i]);
-
-	while(1)
+	while (1)
 		cpu_relax();
 	/* never reached */
 	BUG();
diff --git a/kernel/kthread.c b/kernel/kthread.c
index c75b6802c73efc38bc481abf5088f965e74011c5..3f3726ae91a420da131f6125983ce9497c7e820b 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -5,15 +5,16 @@
 
 #include <kernel/kthread.h>
 #include <kernel/export.h>
+#include <kernel/smp.h>
 #include <kernel/kmem.h>
 #include <kernel/err.h>
 #include <kernel/printk.h>
 
 #include <asm-generic/irqflags.h>
-#include <asm-generic/spinlock.h>
-
 
+#include <asm/io.h>
 #include <asm/switch_to.h>
+#include <asm/spinlock.h>
 
 #include <kernel/string.h>
 
@@ -42,7 +43,6 @@ static struct spinlock kthread_spinlock;
 
 
 
-#include <asm/processor.h>
 struct thread_info *current_set[CONFIG_SMP_CPUS_MAX]; /* XXX */
 
 
@@ -91,7 +91,7 @@ void sched_yield(void)
 {
 	struct task_struct *tsk;
 
-	tsk = current_set[leon3_cpuid()]->task;
+	tsk = current_set[smp_cpu_id()]->task;
 //	if (tsk->attr.policy == SCHED_EDF)
 	tsk->runtime = 0;
 
@@ -112,31 +112,53 @@ __attribute__((unused))
 
 
 
-void kthread_wake_up(struct task_struct *task)
+int threadcnt;
+int kthread_wake_up(struct task_struct *task)
 {
-	arch_local_irq_disable();
-	kthread_lock();
+	int ret = 0;
 
-	BUG_ON(task->state != TASK_NEW);
+	ktime now;
 
-	task->state = TASK_IDLE;
-	/** XXX **/
-	sched_enqueue(task);
-	//list_move_tail(&task->node, &_kthreads.wake);
 
-	kthread_unlock();
+	threadcnt++;
+
+	if (task->state != TASK_NEW)
+		return -EINVAL;
+
+	ret = sched_enqueue(task);
+	if(ret)
+		return ret;
+#if 1
+	kthread_lock();
+	arch_local_irq_disable();
+	now = ktime_get();
+#if 1
+	/* XXX need function in sched.c to do that */
+	task->sched->wake_next_task(task->sched->tq, task->on_cpu, now);
+
+	if (task->on_cpu != KTHREAD_CPU_AFFINITY_NONE)
+		smp_send_reschedule(task->on_cpu);
+#endif
+
 	arch_local_irq_enable();
+	kthread_unlock();
+#endif
+
+	return 0;
 }
 
 
-#include <asm/processor.h>
 struct task_struct *kthread_init_main(void)
 {
+	int cpu;
+
 	struct task_struct *task;
 
+
+	cpu = smp_cpu_id();
+
 	task = kmalloc(sizeof(*task));
 
-//	printk("hi there, someone called %d from %lx\n", leon3_cpuid(), __builtin_return_address(0));
 
 	if (!task)
 		return ERR_PTR(-ENOMEM);
@@ -144,7 +166,7 @@ struct task_struct *kthread_init_main(void)
 	/* XXX accessors */
 	task->attr.policy = SCHED_RR; /* default */
 	task->attr.priority = 1;
-	task->on_cpu = leon3_cpuid();
+	task->on_cpu = cpu;
 
 	arch_promote_to_task(task);
 
@@ -154,7 +176,7 @@ struct task_struct *kthread_init_main(void)
 	arch_local_irq_disable();
 	kthread_lock();
 
-	current_set[leon3_cpuid()] = &task->thread_info;
+	current_set[cpu] = &task->thread_info;
 
 
 	task->state = TASK_RUN;
@@ -162,6 +184,7 @@ struct task_struct *kthread_init_main(void)
 	sched_enqueue(task);
 	/*list_add_tail(&task->node, &_kthreads.run);*/
 
+	smp_send_reschedule(cpu);
 
 
 	kthread_unlock();
@@ -180,7 +203,7 @@ static struct task_struct *kthread_create_internal(int (*thread_fn)(void *data),
 {
 	struct task_struct *task;
 
-	task = kmalloc(sizeof(*task));
+	task = kzalloc(sizeof(*task));
 
 
 	if (!task)
@@ -189,7 +212,7 @@ static struct task_struct *kthread_create_internal(int (*thread_fn)(void *data),
 
 	/* XXX: need stack size detection and realloc/migration code */
 
-	task->stack = kmalloc(8192 + STACK_ALIGN); /* XXX */
+	task->stack = kzalloc(8192 + STACK_ALIGN); /* XXX */
 
 	BUG_ON((int) task->stack > (0x40800000 - 4096 + 1));
 
@@ -207,12 +230,14 @@ static struct task_struct *kthread_create_internal(int (*thread_fn)(void *data),
 	/* XXX: need wmemset() */
 	memset(task->stack, 0xab, 8192 + STACK_ALIGN);
 #else
+#if 0
 	{
 		int i;
 		for (i = 0; i < (8192 + STACK_ALIGN) / 4; i++)
 			((int *) task->stack)[i] = 0xdeadbeef;
 
 	}
+#endif
 #endif
 
 	/* dummy */
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 13238f5a71d2ecdbbc8321eccc123c8c5634184a..63a80399ad413f64122d0298304af5971bc1fcf0 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -11,12 +11,13 @@
 #include <kernel/sched.h>
 #include <kernel/init.h>
 #include <kernel/tick.h>
+#include <kernel/smp.h>
 #include <asm-generic/irqflags.h>
 #include <asm-generic/spinlock.h>
 #include <asm/switch_to.h>
+
 #include <string.h>
 
-#include <asm/leon.h>
 
 
 #define MSG "SCHEDULER: "
@@ -24,18 +25,21 @@
 static LIST_HEAD(kernel_schedulers);
 
 
-/* XXX: per-cpu */
+/* XXX: per-cpu... */
+
 extern struct thread_info *current_set[];
 
-ktime sched_last_time;
-uint32_t sched_ev;
+static bool sched_enabled[2] = {false, false};
+
+
 
 void schedule(void)
 {
+	int cpu;
+
 	struct scheduler *sched;
 
 	struct task_struct *next = NULL;
-	struct task_struct *prev = NULL;
 
 	struct task_struct *current;
 	int64_t slot_ns = 1000000LL;
@@ -45,22 +49,22 @@ void schedule(void)
 	ktime now;
 
 
-	static int once[2];
-	if (!once[leon3_cpuid()]) {
+	cpu = smp_cpu_id();
 
-		//	tick_set_mode(TICK_MODE_PERIODIC);
-		tick_set_next_ns(1e9);	/* XXX default to 1s ticks initially */
-		once[leon3_cpuid()] = 1;
+	if (!sched_enabled[cpu])
 		return;
-	}
+#if 1
+	/* booted yet? */
+	if (!current_set[cpu])
+		return;
+#endif
 
 
 	arch_local_irq_disable();
 
 
 	/* get the current task for this CPU */
-	/* XXX leon3_cpuid() should be smp_cpu_id() arch call*/
-	current = current_set[leon3_cpuid()]->task;
+	current = current_set[cpu]->task;
 
 
 
@@ -74,17 +78,13 @@ void schedule(void)
 	current->runtime = ktime_sub(current->runtime, rt);
 	current->total = ktime_add(current->total, rt);
 
-	current->state = TASK_RUN;
-
+	/* XXX */
+	if (current->state == TASK_BUSY)
+		current->state = TASK_RUN;
 
 retry:
 	next = NULL;
 	wake_ns = 1000000000;
-	/* XXX: for now, try to wake up any threads not running
-	 * this is a waste of cycles and instruction space; should be
-	 * done in the scheduler's code (somewhere) */
-	list_for_each_entry(sched, &kernel_schedulers, node)
-		sched->wake_next_task(sched->tq, leon3_cpuid(), now);
 
 
 	/* XXX need sorted list: highest->lowest scheduler priority, e.g.:
@@ -99,7 +99,7 @@ retry:
 		/* if one of the schedulers have a task which needs to run now,
 		 * next is non-NULL
 		 */
-		next = sched->pick_next_task(sched->tq, leon3_cpuid(), now);
+		next = sched->pick_next_task(sched->tq, cpu, now);
 
 		/* check if we need to limit the next tasks timeslice;
 		 * since our scheduler list is sorted by scheduler priority,
@@ -146,7 +146,7 @@ retry:
 	 */
 	//	list_for_each_entry(sched, &kernel_schedulers, node) {
 	sched = list_first_entry(&kernel_schedulers, struct scheduler, node);
-	wake_ns = sched->task_ready_ns(sched->tq, leon3_cpuid(), now);
+	wake_ns = sched->task_ready_ns(sched->tq, cpu, now);
 
 	BUG_ON(wake_ns < 0);
 
@@ -154,23 +154,26 @@ retry:
 		slot_ns  = wake_ns;
 
 	/* ALWAYS get current time here */
-	next->exec_start = ktime_get();
+	next->exec_start = now;;
 	next->state = TASK_BUSY;
 
 
-	/* subtract readout overhead */
-	tick_set_next_ns(ktime_sub(slot_ns, 9000LL));
 
 #if 1
-	if (slot_ns < 19000UL) {
+	if (slot_ns < 10000UL) {
 		//	printk("wake %lld slot %lld %s\n", wake_ns, slot_ns, next->name);
 		now = ktime_get();
+	//	BUG();
 		goto retry;
-		BUG();
 	}
-	sched_ev++;
-	sched_last_time = ktime_add(sched_last_time, ktime_delta(ktime_get(), now));
 #endif
+
+	/* subtract readout overhead */
+	tick_set_next_ns(ktime_sub(slot_ns, 9000LL));
+	//tick_set_next_ns(slot_ns);
+
+
+
 	prepare_arch_switch(1);
 	switch_to(next);
 
@@ -303,6 +306,25 @@ int sched_register(struct scheduler *sched)
 }
 
 
+/**
+ * @brief enable scheduling on the current cpu
+ */
+
+void sched_enable(void)
+{
+	sched_enabled[smp_cpu_id()] = true;
+}
+
+
+/*
+ * @brief disable scheduling on the current cpu
+ */
+void sched_disable(void)
+{
+	sched_enabled[smp_cpu_id()] = false;
+}
+
+
 /**
  * @brief scheduler initcall
  *
@@ -312,7 +334,6 @@ int sched_register(struct scheduler *sched)
 static int sched_init(void)
 {
 	tick_set_mode(TICK_MODE_ONESHOT);
-	tick_set_next_ns(1e9);	/* XXX default to 1s ticks initially */
 
 	return 0;
 }
diff --git a/kernel/sched/edf.c b/kernel/sched/edf.c
index f8c2558409a7b999b0ad8094d2f77dd51d32813c..d2e685bb33db0a3ac223c12d80295ca9ac1bc630 100644
--- a/kernel/sched/edf.c
+++ b/kernel/sched/edf.c
@@ -11,13 +11,14 @@
 #include <kernel/string.h>
 #include <kernel/tick.h>
 #include <kernel/init.h>
+#include <kernel/smp.h>
 
 #include <generated/autoconf.h> /* XXX need common CPU include */
 
 
 #define MSG "SCHED_EDF: "
 
-#define UTIL_MAX 0.95 /* XXX should be config option, also should be adaptive depending on RT load */
+#define UTIL_MAX 0.98 /* XXX should be config option, also should be adaptive depending on RT load */
 
 
 void sched_print_edf_list_internal(struct task_queue *tq, int cpu, ktime now)
@@ -36,7 +37,7 @@ void sched_print_edf_list_internal(struct task_queue *tq, int cpu, ktime now)
 
 
 	printk("\nktime: %lld CPU %d\n", ktime_to_ms(now), cpu);
-	printk("S\tDeadline\tWakeup\t\tt_rem\ttotal\tslices\tName\t\twcet\tavg\n");
+	printk("S\tDeadline\tWakeup\t\tt_rem\ttotal\tslices\tName\t\twcet\tavg(us)\n");
 	printk("------------------------------------------------------------------\n");
 	list_for_each_entry_safe(tsk, tmp, &tq->run, node) {
 
@@ -52,12 +53,12 @@ void sched_print_edf_list_internal(struct task_queue *tq, int cpu, ktime now)
 		if (tsk->slices == 0)
 			tsk->slices = 1;
 
-		dead = ktime_to_ms(tsk->wakeup);
-		wake = ktime_to_ms(tsk->wakeup);
-		rt   = ktime_to_ms(tsk->runtime);
-		tot  = ktime_to_ms(tsk->total);
-		wcet = ktime_to_ms(tsk->attr.wcet);
-		avg  = ktime_to_ms(tsk->total/tsk->slices);
+		dead = ktime_to_us(tsk->deadline);
+		wake = ktime_to_us(tsk->wakeup);
+		rt   = ktime_to_us(tsk->runtime);
+		tot  = ktime_to_us(tsk->total);
+		wcet = ktime_to_us(tsk->attr.wcet);
+		avg  = ktime_to_us(tsk->total/tsk->slices);
 
 		printk("%c\t%lld\t\t%lld\t\t%lld\t%lld\t%d\t%s\t|\t%lld\t%lld\n",
 		       state, wake, dead,
@@ -72,6 +73,33 @@ void sched_print_edf_list_internal(struct task_queue *tq, int cpu, ktime now)
 }
 
 
+#include <asm/spinlock.h>
+static struct spinlock edf_spinlock;
+
+
+/**
+ * @brief lock critical rr section
+ */
+
+ void edf_lock(void)
+{
+	return;
+	spin_lock_raw(&edf_spinlock);
+}
+
+
+/**
+ * @brief unlock critical rr section
+ */
+
+void edf_unlock(void)
+{
+	return;
+	spin_unlock(&edf_spinlock);
+}
+
+
+
 /**
  * Our EDF task scheduling timeline:
  *
@@ -130,11 +158,10 @@ static inline void schedule_edf_reinit_task(struct task_struct *tsk, ktime now)
 	ktime new_wake;
 
 	if (tsk->runtime == tsk->attr.wcet) {
-		printk("RT == WCET!!\n");
+		printk("T == WCET!! %s\n", tsk->name);
 		__asm__ __volatile__ ("ta 0\n\t");
 	}
 
-	tsk->state = TASK_IDLE;
 
 	new_wake = ktime_add(tsk->wakeup, tsk->attr.period);
 #if 1
@@ -142,13 +169,22 @@ static inline void schedule_edf_reinit_task(struct task_struct *tsk, ktime now)
 	 * wakeup/deadline forward */
 
 	if (ktime_after(now, new_wake)){ /* deadline missed earlier? */
-		printk("%s violated, rt: %lld, last_visit %lld false %lld, last_adjust %lld  next wake: %lld (%lld)\n", tsk->name,
-		       tsk->runtime, tsk->last_visit_true, tsk->last_visit_false, tsk->last_adjust,  tsk->wakeup, new_wake);
+		printk("%s violated, rt: %lld, next wake: %lld (%lld)\n", tsk->name,
+		       tsk->runtime, tsk->wakeup, new_wake);
 		sched_print_edf_list_internal(&tsk->sched->tq[tsk->on_cpu], tsk->on_cpu, now);
 		__asm__ __volatile__ ("ta 0\n\t");
+
+		/* XXX raise kernel alarm and attempt to recover wakeup */
 		BUG();
 	}
 #endif
+	if (tsk->flags & TASK_RUN_ONCE) {
+		tsk->state = TASK_DEAD;
+		return;
+	}
+
+
+	tsk->state = TASK_IDLE;
 
 	tsk->wakeup = new_wake;
 
@@ -156,8 +192,6 @@ static inline void schedule_edf_reinit_task(struct task_struct *tsk, ktime now)
 
 	tsk->runtime = tsk->attr.wcet;
 
-	tsk->last_adjust = now;
-
 	tsk->slices++;
 }
 
@@ -306,7 +340,7 @@ static int edf_schedulable(struct task_queue tq[], const struct task_struct *tas
 			}
 
 
-			printk("UTIL %g\n", util);
+		//	printk("UTIL %g\n", util);
 			if (util > UTIL_MAX)
 				continue;
 
@@ -317,12 +351,12 @@ static int edf_schedulable(struct task_queue tq[], const struct task_struct *tas
 
 
 		}
-		if (cpu == -EINVAL) { 
-			printk("---- WILL NOT FIT ----\n");
+		if (cpu == -EINVAL) {
+	//		printk("---- WILL NOT FIT ----\n");
 			return -EINVAL;
 		}
 
-		printk("best fit is %d\n", cpu);
+	//	printk("best fit is %d\n", cpu);
 	} else {
 		cpu = task->on_cpu;
 	}
@@ -546,13 +580,13 @@ if (1)
 	}
 
 	if (u > UTIL_MAX) {
-		printk("I am NOT schedul-ableh: %f ", u);
+//		printk("I am NOT schedul-ableh: %f ", u);
 		BUG();
 		return -EINVAL;
 		printk("changed task mode to RR\n", u);
 	}
 
-	printk("Utilisation: %g CPU %d\n", u, cpu);
+//	printk("Utilisation: %g CPU %d\n", u, cpu);
 
 
 	/* TODO check against projected interrupt rate, we really need a limit
@@ -561,8 +595,6 @@ if (1)
 	return cpu;
 }
 
- void kthread_lock(void);
- void kthread_unlock(void);
 
 static struct task_struct *edf_pick_next(struct task_queue *tq, int cpu,
 					 ktime now)
@@ -578,11 +610,14 @@ static struct task_struct *edf_pick_next(struct task_queue *tq, int cpu,
 	if (list_empty(&tq[cpu].run))
 		return NULL;
 
+	edf_lock();
+
 
 	/* XXX need to lock run list for wakeup() */
 
 	list_for_each_entry_safe(tsk, tmp, &tq[cpu].run, node) {
 
+
 		/* time to wake up yet? */
 		delta = ktime_delta(tsk->wakeup, now);
 
@@ -639,11 +674,25 @@ static struct task_struct *edf_pick_next(struct task_queue *tq, int cpu,
 
 			if (ktime_before (tsk->deadline, first->deadline))
 				list_move(&tsk->node, &tq[cpu].run);
+
+			continue;
 		}
+
+		if (tsk->state == TASK_DEAD){ /* XXX need other mechanism */
+			list_del(&tsk->node);
+			kfree(tsk->stack);
+			kfree(tsk->name);
+			kfree(tsk);
+			continue;
+		}
+
+
+
 	}
 
 
 	first = list_first_entry(&tq[cpu].run, struct task_struct, node);
+	edf_unlock();
 	if (first->state == TASK_RUN)
 		return first;
 
@@ -651,6 +700,8 @@ static struct task_struct *edf_pick_next(struct task_queue *tq, int cpu,
 }
 
 
+
+#include <asm-generic/irqflags.h>
 static void edf_wake_next(struct task_queue *tq, int cpu, ktime now)
 {
 	ktime last;
@@ -670,12 +721,29 @@ static void edf_wake_next(struct task_queue *tq, int cpu, ktime now)
 	if (list_empty(&tq[cpu].wake))
 		return;
 
-	kthread_lock();
+	edf_lock();
 	last = now;
 
+	/* no period, run it asap */
+	task = list_first_entry(&tq[cpu].wake, struct task_struct, node);
+	if (task->flags & TASK_RUN_ONCE)
+		goto insert;
+
 
 	list_for_each_entry_safe(task, tmp, &tq->run, node) {
 
+			/* XXX need other mechanism */
+			if (task->state == TASK_DEAD) {
+				list_del(&task->node);
+				kfree(task->stack);
+				kfree(task->name);
+				kfree(task);
+				continue;
+			}
+
+			if (task->flags & TASK_RUN_ONCE)
+				continue;
+
 		if (max > task->attr.period)
 			continue;
 
@@ -690,14 +758,24 @@ static void edf_wake_next(struct task_queue *tq, int cpu, ktime now)
 
 	task = list_first_entry(&tq[cpu].wake, struct task_struct, node);
 
-
+	/* XXX */
 	BUG_ON(task->on_cpu == KTHREAD_CPU_AFFINITY_NONE);
 
+
 	if (!list_empty(&tq[cpu].run)) {
 
 		/* reorder */
 
 		list_for_each_entry_safe(t, tmp, &tq[cpu].run, node) {
+
+
+			if (t->flags & TASK_RUN_ONCE)
+				continue;
+
+			if (t->state == TASK_DEAD)
+				continue;
+
+
 			first = list_first_entry(&tq[cpu].run, struct task_struct, node);
 			if (ktime_before (t->wakeup, now)) {
 				if (ktime_before (t->deadline - t->runtime, first->deadline)) {
@@ -708,6 +786,9 @@ static void edf_wake_next(struct task_queue *tq, int cpu, ktime now)
 
 		list_for_each_entry_safe(t, tmp, &tq[cpu].run, node) {
 
+			if (t->flags & TASK_RUN_ONCE)
+				continue;
+
 			if (t->state != TASK_IDLE)
 				continue;
 
@@ -729,43 +810,47 @@ static void edf_wake_next(struct task_queue *tq, int cpu, ktime now)
 		}
 	}
 
-	task->state = TASK_IDLE;
 
+insert:
 	/* initially furthest deadline as wakeup */
-	last  = ktime_add(last, 3000000000ULL); /* XXX */
+	last  = ktime_add(last, 30000ULL); /* XXX minimum wakeup shift for overheads */
 	task->wakeup     = ktime_add(last, task->attr.period);
 	task->deadline   = ktime_add(task->wakeup, task->attr.deadline_rel);
 
+	/* reset runtime to full */
+	task->runtime = task->attr.wcet;
+	task->state = TASK_IDLE;
 
 	list_move_tail(&task->node, &tq[cpu].run);
-	kthread_unlock();
+
+	edf_unlock();
 }
 
 
 
 
-static void edf_enqueue(struct task_queue tq[], struct task_struct *task)
+static int edf_enqueue(struct task_queue tq[], struct task_struct *task)
 {
 	int cpu;
 
 
-	/* reset runtime to full */
-	task->runtime = task->attr.wcet;
-
-
-	if (task->sched->check_sched_attr(&task->attr))
-		return;
+	if (!task->attr.period) {
+		task->flags |= TASK_RUN_ONCE;
+		task->attr.period = task->attr.deadline_rel;
+	} else
+		task->flags &= ~TASK_RUN_ONCE;
 
 	cpu = edf_schedulable(tq, task);
+	if (cpu < 0)
+		return -ENOSCHED;
 
-	if (cpu < 0) {
-		printk("---- NOT SCHEDUL-ABLE---\n");
-		return;
-	}
 	task->on_cpu = cpu;
 
 	list_add_tail(&task->node, &tq[cpu].wake);
 
+
+
+	return 0;
 }
 
 
@@ -776,41 +861,70 @@ static ktime edf_timeslice_ns(struct task_struct *task)
 
 static int edf_check_sched_attr(struct sched_attr *attr)
 {
-	return 0; /* XXX */
+	ktime tick_min;
+
+
 	if (!attr)
 		goto error;
 
+	tick_min = (ktime) tick_get_period_min_ns();
+
 	if (attr->policy != SCHED_EDF) {
 		pr_err(MSG "attribute policy is %d, expected SCHED_EDF (%d)\n",
 			attr->policy, SCHED_EDF);
 		return -EINVAL;
 	}
 
-	/* need only check WCET, all other times are longer */
-	if (attr->wcet < (ktime) tick_get_period_min_ns()) {
+	if (attr->wcet < tick_min) {
 		pr_err(MSG "Cannot schedule EDF task with WCET of %llu ns, "
 		           "minimum tick duration is %lld\n", attr->wcet,
-			   (ktime) tick_get_period_min_ns());
+			   tick_min);
 		goto error;
 	}
 
-	if (attr->wcet >= attr->period) {
-		pr_err(MSG "Cannot schedule EDF task with WCET %u >= "
-		           "PERIOD %u!\n", attr->wcet, attr->period);
+	if (ktime_delta(attr->deadline_rel, attr->wcet) < tick_min) {
+		pr_err(MSG "Cannot schedule EDF task with WCET-deadline delta "
+		           "of %llu ns, minimum tick duration is %lld\n",
+			   ktime_delta(attr->deadline_rel, attr->wcet),
+			   tick_min);
 		goto error;
 	}
 
+
+	if (attr->period > 0) {
+
+		if (attr->wcet >= attr->period) {
+			pr_err(MSG "Cannot schedule EDF task with WCET %u >= "
+			       "PERIOD %u!\n", attr->wcet, attr->period);
+			goto error;
+		}
+		if (attr->deadline_rel >= attr->period) {
+			pr_err(MSG "Cannot schedule EDF task with DEADLINE %llu >= "
+			       "PERIOD %llu !\n", attr->deadline_rel, attr->period);
+			goto error;
+		}
+
+
+		/* this is only relevant for periodic tasks */
+		if (ktime_delta(attr->period, attr->deadline_rel) < tick_min) {
+			pr_err(MSG "Cannot schedule EDF task with deadline-period delta "
+			       "of %llu ns, minimum tick duration is %lld\n",
+			       ktime_delta(attr->period, attr->deadline_rel),
+			       tick_min);
+			goto error;
+		}
+
+
+
+	}
+
+
 	if (attr->wcet >= attr->deadline_rel) {
 		pr_err(MSG "Cannot schedule EDF task with WCET %llu >= "
 		           "DEADLINE %llu !\n", attr->wcet, attr->deadline_rel);
 		goto error;
 	}
 
-	if (attr->deadline_rel >= attr->period) {
-		pr_err(MSG "Cannot schedule EDF task with DEADLINE %llu >= "
-		           "PERIOD %llu !\n", attr->deadline_rel, attr->period);
-		goto error;
-	}
 
 
 	return 0;
diff --git a/kernel/sched/rr.c b/kernel/sched/rr.c
index b3e09f53b702212da55faf02e81d4739721d08e2..d8d622f3b2e74fa7a1a88c415459687a13849b8d 100644
--- a/kernel/sched/rr.c
+++ b/kernel/sched/rr.c
@@ -9,10 +9,36 @@
 #include <kernel/init.h>
 #include <kernel/tick.h>
 #include <kernel/kthread.h>
+#include <asm/spinlock.h>
 
 #define MSG "SCHED_RR: "
 
-#include <asm/processor.h>
+#define MIN_RR_SLICE_NS		1000000
+
+
+static struct spinlock rr_spinlock;
+
+
+/**
+ * @brief lock critical rr section
+ */
+
+static void rr_lock(void)
+{
+	spin_lock_raw(&rr_spinlock);
+}
+
+
+/**
+ * @brief unlock critical rr section
+ */
+
+static void rr_unlock(void)
+{
+	spin_unlock(&rr_spinlock);
+}
+
+
 static struct task_struct *rr_pick_next(struct task_queue tq[], int cpu,
 					ktime now)
 {
@@ -23,6 +49,7 @@ static struct task_struct *rr_pick_next(struct task_queue tq[], int cpu,
 	if (list_empty(&tq[0].run))
 		return NULL;
 
+	rr_lock();
 	list_for_each_entry_safe(next, tmp, &tq[0].run, node) {
 
 
@@ -37,7 +64,7 @@ static struct task_struct *rr_pick_next(struct task_queue tq[], int cpu,
 			list_move_tail(&next->node, &tq[0].run);
 
 			/* reset runtime */
-			next->runtime = (next->attr.priority * tick_get_period_min_ns());
+			next->runtime = (next->attr.priority * MIN_RR_SLICE_NS);
 
 
 
@@ -60,6 +87,7 @@ static struct task_struct *rr_pick_next(struct task_queue tq[], int cpu,
 
 	}
 
+	rr_unlock();
 
 	return next;
 }
@@ -78,21 +106,29 @@ static void rr_wake_next(struct task_queue tq[], int cpu, ktime now)
 	task = list_entry(tq[0].wake.next, struct task_struct, node);
 
 	BUG_ON(task->attr.policy != SCHED_RR);
-	/** XXX NO LOCKS */
+
 	task->state = TASK_RUN;
+
+	rr_lock();
 	list_move(&task->node, &tq[0].run);
+	rr_unlock();
 }
 
 
-static void rr_enqueue(struct task_queue tq[], struct task_struct *task)
+static int rr_enqueue(struct task_queue tq[], struct task_struct *task)
 {
 
-	task->runtime = (task->attr.priority * tick_get_period_min_ns());
-	/** XXX **/
+	task->runtime = (task->attr.priority * MIN_RR_SLICE_NS);
+
+	rr_lock();
 	if (task->state == TASK_RUN)
 		list_add_tail(&task->node, &tq[0].run);
 	else
 		list_add_tail(&task->node, &tq[0].wake);
+
+	rr_unlock();
+
+	return 0;
 }
 
 /**
@@ -109,7 +145,7 @@ static void rr_enqueue(struct task_queue tq[], struct task_struct *task)
 
 static ktime rr_timeslice_ns(struct task_struct *task)
 {
-	return (ktime) (task->attr.priority * tick_get_period_min_ns() * 50);
+	return (ktime) (task->attr.priority * MIN_RR_SLICE_NS);
 }
 
 
diff --git a/kernel/time.c b/kernel/time.c
index 0f1783741258a4af92a2c96f68e0eac61cb55e46..147aec645744c509a37a4518a40b3920c5896d9c 100644
--- a/kernel/time.c
+++ b/kernel/time.c
@@ -146,8 +146,8 @@ static void time_init_overhead_calibrate(void)
 	/* overhead is readout delta / 2 */
 	tk.readout_ns = (typeof(tk.readout_ns)) (0.5 * delta / (double) i);
 
-	printk(MSG "calibrated main uptime clock readout overhead to %d ns\n",
-	           tk.readout_ns);
+	pr_info(MSG "calibrated main uptime clock readout overhead to %d ns\n",
+	            tk.readout_ns);
 }
 
 /**
diff --git a/lib/vsnprintf.c b/lib/vsnprintf.c
index 115e559ffcee336ca831a6786043ccd62b283d5a..2d68c50377403f35aedd7592ea42482e4e418b6c 100644
--- a/lib/vsnprintf.c
+++ b/lib/vsnprintf.c
@@ -19,7 +19,6 @@
  * - happy bug hunting, I bet there are lots
  */
 
-#include <kernel/kmem.h>
 #include <kernel/export.h>
 #include <kernel/types.h>
 #include <kernel/string.h>