diff --git a/init/main.c b/init/main.c
index 1ab9e60dedc259cce0339860281a4277549fe082..c98e734f0fd2c74f146a35a46482c1c0a4c7a228 100644
--- a/init/main.c
+++ b/init/main.c
@@ -44,7 +44,7 @@
 #endif /* __OPTIMIZE__ */
 #endif /* GCC_VERSION */
 
-
+volatile int inc;
 
 int task1(void *data)
 {
@@ -62,12 +62,27 @@ int task2(void *data)
 		sched_yield();
 	}
 }
-
+static int cnt;
+static	ktime buf[1024];
 int task3(void *data)
 {
+	ktime last = 0;
+
 	while (1) {
-		printk("x");
-		sched_yield();
+#if 0
+		ktime now;
+		if (cnt < 1024) {
+			now = ktime_get();
+			buf[cnt] = ktime_delta(now, last);
+			last = now;
+			cnt++;
+		}
+	       //	else
+		//	sched_yield();
+
+#endif
+		printk("%llu\n", ktime_get());
+	//	sched_yield();
 	}
 }
 
@@ -194,7 +209,7 @@ int kernel_main(void)
 #if 1
 {
 	struct sched_attr attr;
-
+#if 0
 	t = kthread_create(task1, NULL, KTHREAD_CPU_AFFINITY_NONE, "print");
 	sched_get_attr(t, &attr);
 	attr.priority = 4;
@@ -206,26 +221,68 @@ int kernel_main(void)
 	attr.priority = 8;
 	sched_set_attr(t, &attr);
 	kthread_wake_up(t);
-
-
+#endif
+#if 1
 	t = kthread_create(task3, NULL, KTHREAD_CPU_AFFINITY_NONE, "edfprint");
 	sched_get_attr(t, &attr);
 	attr.policy = SCHED_EDF;
-	attr.period       = 9000000;
-	attr.deadline_rel = 1100000;
-	attr.wcet         = 1000000;
+	attr.period       = ms_to_ktime(3000);
+	attr.deadline_rel = ms_to_ktime(2000);
+	attr.wcet         = ms_to_ktime(1000);
 	sched_set_attr(t, &attr);
 	kthread_wake_up(t);
+#endif
+
+
+
 
 }
 #endif
+	while(1) {
+#if 0
+		int val = inc;
+		static ktime last;
+		ktime now;
+		now = ktime_get();
+		static ktime delta;
+
+		delta	= ktime_delta(last, now);
+		last = now;
+		printk("%d %lld\n", val, ktime_to_us(delta));
+#endif
+#if 0
+	static int i;
+	static ktime last;
+		ktime now;
+		now = ktime_get();
+		if (i == 10) {
+		printk("%lld\n", ktime_to_ms(ktime_delta(now, last)));
+		last = now;
+		i = 0;
+		}
+		i++;
+#endif
+	//	sched_yield();
+		if (cnt > 1023) {
+			int i;
+			for (i = 1; i < 1024; i++)
+				printk("%lld\n", buf[i]);
+		//	cnt = 0;
+			break;
+		}
+		printk("xxx %llu\n", ktime_get());
 
+		//printk("%d\n", cnt);
 
-	while(1) {
-		//printk("|");
-		cpu_relax();
+
+	//	sched_yield();
+	//	cpu_relax();
 	}
 
+		//printk("%lld\n", buf[i]);
+
+	while(1)
+		cpu_relax();
 	/* never reached */
 	BUG();
 
diff --git a/kernel/kthread.c b/kernel/kthread.c
index eefbb11e1a75c3d35be76f808f9576a8f23d753f..ceb31f8267b7a7de8be7e1049edca06fc61d2b00 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -99,38 +99,6 @@ void sched_yield(void)
 }
 
 
-void sched_wake(struct task_struct *next, ktime now, int64_t slot_ns)
-{
-
-	struct task_struct *task;
-
-	if (list_empty(&_kthreads.wake))
-		return;
-
-	task = list_entry(_kthreads.wake.next, struct task_struct, node);
-
-	if (task->attr.policy == SCHED_EDF) {
-		if (next->attr.policy == SCHED_EDF)
-			return;
-		/* initially set current time as wakeup */
-		task->wakeup = ktime_add(now, slot_ns);
-		task->deadline = ktime_add(task->wakeup, task->attr.deadline_rel);
-		task->first_wake = task->wakeup;
-		task->first_dead = task->deadline;
-
-		list_move(&task->node, &_kthreads.run);
-	}
-
-	if (task->attr.policy == SCHED_RR) {
-		task->state = TASK_RUN;
-		list_move(&task->node, &_kthreads.run);
-	}
-
-}
-
-
-
-
 __attribute__((unused))
 /* static */ void kthread_set_sched_policy(struct task_struct *task,
 				     enum sched_policy policy)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 8edbc890a0169386a84a3b2ca84d35527be92f94..bd4b56f883450006d922e69dcc9cc0bb563f9d81 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -34,16 +34,26 @@ void schedule(void)
 
 	struct task_struct *next = NULL;
 
-	struct task_struct *current;
+	struct task_struct *current = current_set[0]->task;
 	int64_t slot_ns;
 	int64_t wake_ns = 0;
 
 
+	static int once;
+	if (!once) {
+
+//	tick_set_mode(TICK_MODE_PERIODIC);
+	tick_set_next_ns(1e9);	/* XXX default to 1s ticks initially */
+	once = 1;
+	return;
+	}
 
 
 	arch_local_irq_disable();
 	/* kthread_lock(); */
 
+	current->runtime = ktime_sub(current->exec_start, ktime_get());
+
 
 	/* XXX: for now, try to wake up any threads not running
 	 * this is a waste of cycles and instruction space; should be
@@ -59,7 +69,7 @@ void schedule(void)
 	 * TODO: scheduler priority value
 	 */
 	list_for_each_entry(sched, &kernel_schedulers, node) {
-
+	//	struct task_struct *tmpn;
 		next = sched->pick_next_task(&sched->tq);
 
 		/* check if we need to limit the next tasks timeslice;
@@ -68,9 +78,27 @@ void schedule(void)
 		 * we assume that the timeslice is reasonable; if not fix it in
 		 * the corresponding scheduler
 		 */
+		//int64_t tmp;
 		if (!wake_ns)
 			wake_ns = sched->task_ready_ns(&sched->tq);
+#if 0
+		if (next)
+			printk("next %s\n", next->name);
+		printk("wake_ns %llu\n", wake_ns);
+#endif
+#if 0
+		else {
+			tmp = sched->task_ready_ns(&sched->tq);
+			if (tmp > wake_ns) {
+				wake_ns = tmp;
+				next = tmpn;
+			}
+		}
+
+		if (!next)
+			next = tmpn;
 
+#endif
 		if (next)
 			break;
 	}
@@ -78,8 +106,9 @@ void schedule(void)
 
 	if (!next) {
 		/* nothing to do, check again later */
-		if (wake_ns)
+		if (wake_ns) {
 			tick_set_next_ns(wake_ns);
+		}
 		else
 			tick_set_next_ns(1e9);	/* XXX pause for a second */
 
@@ -87,17 +116,22 @@ void schedule(void)
 	}
 
 	/* see if we can use a full slice or if we have to wake earlier */
-	if (wake_ns)
-		slot_ns = wake_ns;
-	else
+	slot_ns = wake_ns;
 		slot_ns = next->sched->timeslice_ns(next);
+	if (wake_ns < slot_ns)
+		slot_ns = wake_ns;
 
 	/* statistics */
 	next->exec_start = ktime_get();
 
+//		if (next)
+//			printk("real next %s %llu %llu\n", next->name, next->exec_start, slot_ns);
 	/* kthread_unlock(); */
 
-	tick_set_next_ns(slot_ns);
+//	printk("wake %llu\n", slot_ns);
+	/* subtract readout overhead */
+	tick_set_next_ns(ktime_sub(slot_ns, 1000LL));
+//	tick_set_next_ns(slot_ns);
 
 	prepare_arch_switch(1);
 	switch_to(next);
diff --git a/kernel/sched/edf.c b/kernel/sched/edf.c
index ee5de0d1ce1aba4836829ecabf50c8d9423493f9..e3f6406d19820e2ab648c823f325412f80ad3d1a 100644
--- a/kernel/sched/edf.c
+++ b/kernel/sched/edf.c
@@ -116,11 +116,10 @@ static inline bool schedule_edf_can_execute(struct task_struct *tsk, ktime now)
 
 	if (tsk->runtime <= 0)
 		return false;
-
 	if (ktime_before(tsk->deadline, now))  {
-		printk("%s violated, %lld %lld, %lld %lld\n", tsk->name,
+		printk("%s violated, %lld %lld, dead %lld wake %lld now %lld\n", tsk->name,
 		       tsk->runtime, ktime_us_delta(tsk->deadline, now),
-		       tsk->deadline, now);
+		       tsk->deadline, tsk->wakeup, now);
 	//	sched_print_edf_list_internal(now);
 		BUG();
 		return false;
@@ -673,7 +672,7 @@ static int edf_schedulable(struct task_queue *tq, const struct task_struct *task
 			return -EINVAL;
 			printk("changed task mode to RR\n", u);
 		} else {
-			printk("Utilisation %g\n", u);
+			printk("Utilisation: %g\n", u);
 			return 0;
 		}
 	}
@@ -681,7 +680,7 @@ static int edf_schedulable(struct task_queue *tq, const struct task_struct *task
 
 	u = (double) (int32_t) task->attr.wcet / (double) (int32_t) task->attr.period;
 
-	printk("was the first task %g\n", u);
+	printk("was the first task, utilisation: %g\n", u);
 
 	return 0;
 }
@@ -694,7 +693,7 @@ static int64_t slot;
 
 static struct task_struct *edf_pick_next(struct task_queue *tq)
 {
-#define SOME_DEFAULT_TICK_PERIOD_FOR_SCHED_MODE 10000000LL
+#define SOME_DEFAULT_TICK_PERIOD_FOR_SCHED_MODE 100000LL
 	int64_t delta;
 
 
@@ -705,26 +704,46 @@ static struct task_struct *edf_pick_next(struct task_queue *tq)
 slot = SOME_DEFAULT_TICK_PERIOD_FOR_SCHED_MODE;
 
 
+//	printk("-\n");
 	list_for_each_entry_safe(tsk, tmp, &tq->run, node) {
 
 		/* time to wake up yet? */
 		delta = ktime_delta(tsk->wakeup, now);
 		if (delta >= 0) {
 			/* nope, just update minimum runtime for this slot */
-			if (delta < slot)
+			if (delta < slot) {
 				slot = delta;
+			//	printk("d %lld now: %lld \n", ktime_to_us(delta), now);
+			}
+		//	printk("delta %llu %llu\n", delta, tsk->wakeup);
 
-			continue;
+//			continue;
+		}
+		if (delta < 0) {
+
+		//	printk("\n%lld %d\n", ktime_to_us(delta), tsk->state);
+		//	printk("%s: %lld (%lld) deadline: %lld now: %lld state %d\n", tsk->name, ktime_to_ms(delta), tsk->wakeup, tsk->deadline, now, tsk->state);
 		}
 
 		/* if it's already running, see if there is time remaining */
 		if (tsk->state == TASK_RUN) {
+
 			if (!schedule_edf_can_execute(tsk, now)) {
 				schedule_edf_reinit_task(tsk, now);
 				/* nope, update minimum runtime for this slot */
 				delta = ktime_delta(tsk->wakeup, now);
+#if 1
+				if (delta < 0) {
+					delta = tsk->attr.wcet;
+					slot = delta;
+				}
+#endif
 				if (delta < slot)
 					slot = delta;
+				if (delta < 0)
+					printk("delta %lld %lld\n", ktime_to_us(delta), ktime_to_us(tick_get_period_min_ns()));
+				BUG_ON(delta < 0);
+
 				continue;
 			}
 
@@ -822,9 +841,9 @@ static int edf_check_sched_attr(struct sched_attr *attr)
 
 	/* need only check WCET, all other times are longer */
 	if (attr->wcet < (ktime) tick_get_period_min_ns()) {
-		pr_err(MSG "Cannot schedule EDF task with WCET of %lld ns, "
-		           "minimum tick duration is %ld\n", attr->wcet,
-			   tick_get_period_min_ns());
+		pr_err(MSG "Cannot schedule EDF task with WCET of %llu ns, "
+		           "minimum tick duration is %lld\n", attr->wcet,
+			   (ktime) tick_get_period_min_ns());
 		goto error;
 	}
 
@@ -835,14 +854,14 @@ static int edf_check_sched_attr(struct sched_attr *attr)
 	}
 
 	if (attr->wcet >= attr->deadline_rel) {
-		pr_err(MSG "Cannot schedule EDF task with WCET %u >= "
-		           "DEADLINE %u !\n", attr->wcet, attr->deadline_rel);
+		pr_err(MSG "Cannot schedule EDF task with WCET %llu >= "
+		           "DEADLINE %llu !\n", attr->wcet, attr->deadline_rel);
 		goto error;
 	}
 
 	if (attr->deadline_rel >= attr->period) {
-		pr_err(MSG "Cannot schedule EDF task with DEADLINE %u >= "
-		           "PERIOD %u !\n", attr->deadline_rel, attr->period);
+		pr_err(MSG "Cannot schedule EDF task with DEADLINE %llu >= "
+		           "PERIOD %llu !\n", attr->deadline_rel, attr->period);
 		goto error;
 	}
 
@@ -895,7 +914,10 @@ ktime edf_task_ready_ns(struct task_queue *tq)
 		}
 	}
 
-	slot = 1000000;
+	/* subtract call overhead */
+ 	//slot = ktime_sub(slot, 10000ULL);
+ 	//slot = ktime_sub(slot, 2000ULL);
+
 	BUG_ON(slot < 0);
 
 	return slot;
diff --git a/kernel/sched/rr.c b/kernel/sched/rr.c
index 232e19c5afe5faf6c1491d482bc3697efe01308e..f67bcc5807e1c05085fd3d04e4f8eeb74ae285b8 100644
--- a/kernel/sched/rr.c
+++ b/kernel/sched/rr.c
@@ -89,7 +89,7 @@ static void rr_enqueue(struct task_queue *tq, struct task_struct *task)
 
 static ktime rr_timeslice_ns(struct task_struct *task)
 {
-	return (ktime) task->attr.priority * tick_get_period_min_ns();
+	return (ktime) (task->attr.priority * tick_get_period_min_ns() * 1000);
 }
 
 
diff --git a/kernel/tick.c b/kernel/tick.c
index 5269e26892b0261606eeb0f71b71a31756367667..4a6e97b961c17e9a50ca9d3bd6488515b704db45 100644
--- a/kernel/tick.c
+++ b/kernel/tick.c
@@ -125,7 +125,7 @@ static void tick_calibrate_min(struct clock_event_device *dev)
 {
 #define RANDOM_TICK_RATE_NS	18000UL
 	tick_period_min_ns = RANDOM_TICK_RATE_NS;
-#define MIN_SLICE		1000000UL
+#define MIN_SLICE		100000UL
 	tick_period_min_ns = MIN_SLICE;
 }