diff --git a/arch/sparc/kernel/setup.c b/arch/sparc/kernel/setup.c
index d1f045ee30ad6397c9d0055fe8e581f4662654d2..128d22c05933abb646cbc34924a659b432621603 100644
--- a/arch/sparc/kernel/setup.c
+++ b/arch/sparc/kernel/setup.c
@@ -132,8 +132,8 @@ void smp_cpu_entry(void)
       iowrite32be(0x3, &cpu1_ready);
 	while (ioread32be(&cpu1_ready) != 0x4);
       while(1) {
-	     //printk(".");
-//	      cpu_relax();
+//	     printk(".\n");
+	      cpu_relax();
       }
 //	      printk("1\n");
 }
diff --git a/include/kernel/sched.h b/include/kernel/sched.h
index ed0d2874753aaee2b57a5520447249f96e1f90da..160683791c0eea5e739ee495daa2b24a9659e213 100644
--- a/include/kernel/sched.h
+++ b/include/kernel/sched.h
@@ -47,7 +47,7 @@ struct rq {
 
 struct task_queue {
 	struct list_head new;
-	struct list_head run[CONFIG_SMP_CPUS_MAX];
+	struct list_head run;
 	struct list_head wake;
 	struct list_head dead;
 };
@@ -61,18 +61,20 @@ struct task_queue {
 #if 1
 struct scheduler {
 
-	struct task_queue	tq;
+	struct task_queue	tq[CONFIG_SMP_CPUS_MAX]; /* XXX */
 
 	const enum sched_policy policy;
 
-	struct task_struct *(*pick_next_task)(struct task_queue *tq, ktime now);
+	struct task_struct *(*pick_next_task)(struct task_queue tq[], int cpu,
+					      ktime now);
 
 	/* XXX: sucks */
-	void (*wake_next_task)(struct task_queue *tq, ktime now);
-	void (*enqueue_task)  (struct task_queue *tq, struct task_struct *task);
+	void (*wake_next_task)  (struct task_queue tq[], int cpu, ktime now);
+	void (*enqueue_task)    (struct task_queue tq[],
+			         struct task_struct *task);
 
-	ktime (*timeslice_ns) (struct task_struct *task);
-	ktime (*task_ready_ns) (struct task_queue *tq, ktime now);
+	ktime (*timeslice_ns)   (struct task_struct *task);
+	ktime (*task_ready_ns)  (struct task_queue tq[], int cpu, ktime now);
 
 	int (*check_sched_attr) (struct sched_attr *attr);
 
diff --git a/init/main.c b/init/main.c
index a0025a97b8178479e5022651e7a18054ad06cfb5..c78a8d891ca2b21b34ea7afdfa78546e9af56e50 100644
--- a/init/main.c
+++ b/init/main.c
@@ -52,7 +52,7 @@ int task1(void *data)
 
 		xa++;
 
-	//	printk("# %d #\n", leon3_cpuid());
+	//	printk("t1 %d %llu\n", leon3_cpuid(), ktime_to_us(ktime_get()));
 	//	sched_yield();
 	}
 }
@@ -64,6 +64,7 @@ int task2(void *data)
 		//printk("x %llu\n", ktime_get());
 		//printk("_");
 		xb++;
+	//	printk("t2 %d %llu\n", leon3_cpuid(), ktime_to_us(ktime_get()));
 	//	sched_yield();
 	//	printk("-");
 	//	sched_yield();
@@ -90,6 +91,7 @@ int task3(void *data)
 #endif
 		//printk("y %llu\n", ktime_get());
 	//	printk(".");
+//		printk("t3 %d %llu\n", leon3_cpuid(), ktime_to_us(ktime_get()));
 		xc++;
 
 	//	sched_yield();
@@ -279,6 +281,8 @@ int kernel_main(void)
 	sched_set_attr(t, &attr);
 	kthread_wake_up(t);
 #endif
+
+
 #if 1
 
 	t = kthread_create(task0, NULL, 0, "res");
@@ -286,12 +290,15 @@ int kernel_main(void)
 	attr.policy = SCHED_EDF;
 	attr.period       = ms_to_ktime(1000);
 	attr.deadline_rel = ms_to_ktime(900);
-	attr.wcet         = ms_to_ktime(800);
+	attr.wcet         = ms_to_ktime(200);
 	sched_set_attr(t, &attr);
 	kthread_wake_up(t);
 #endif
+
+
+
 #if 1
-	t = kthread_create(task1, NULL, 0, "task1");
+	t = kthread_create(task1, NULL, 1, "task1");
 	sched_get_attr(t, &attr);
 	attr.policy = SCHED_EDF;
 	attr.period       = ms_to_ktime(500);
@@ -313,19 +320,18 @@ int kernel_main(void)
 #endif
 
 #if 1
-	t = kthread_create(task3, NULL, 1, "task3");
+	t = kthread_create(task3, NULL, 0, "task3");
 	sched_get_attr(t, &attr);
 	attr.policy = SCHED_EDF;
 	attr.period       = ms_to_ktime(800);
 	attr.deadline_rel = ms_to_ktime(700);
-	attr.wcet         = ms_to_ktime(200);
+	attr.wcet         = ms_to_ktime(100);
 	sched_set_attr(t, &attr);
 	kthread_wake_up(t);
 #endif
 
 
 
-
 #endif
 
 
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 65ab90cb7f509bc86d2f71c5acf92f5476ab38bd..18b16db08600fc18d6ac8a863bf523bfbc15022b 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -91,10 +91,8 @@ retry:
 	/* XXX: for now, try to wake up any threads not running
 	 * this is a waste of cycles and instruction space; should be
 	 * done in the scheduler's code (somewhere) */
-	kthread_lock();
 	list_for_each_entry(sched, &kernel_schedulers, node)
-		sched->wake_next_task(&sched->tq, now);
-	kthread_unlock();
+		sched->wake_next_task(sched->tq, leon3_cpuid(), now);
 
 
 	/* XXX need sorted list: highest->lowest scheduler priority, e.g.:
@@ -109,7 +107,7 @@ retry:
 		/* if one of the schedulers have a task which needs to run now,
 		 * next is non-NULL
 		 */
-		next = sched->pick_next_task(&sched->tq, now);
+		next = sched->pick_next_task(sched->tq, leon3_cpuid(), now);
 
 #if 0
 		if (next)
@@ -185,7 +183,7 @@ retry:
 	 * schedulers, e.g. EDF
 	 */
 
-	wake_ns = sched->task_ready_ns(&sched->tq, now);
+	wake_ns = sched->task_ready_ns(sched->tq, leon3_cpuid(), now);
 
 	if (wake_ns > 0)
 		if (wake_ns < slot_ns)
@@ -233,7 +231,7 @@ int sched_enqueue(struct task_struct *task)
 	if (task->sched->check_sched_attr(&task->attr))
 		return -EINVAL;
 
-	task->sched->enqueue_task(&task->sched->tq, task);
+	task->sched->enqueue_task(task->sched->tq, task);
 
 	return 0;
 }
diff --git a/kernel/sched/edf.c b/kernel/sched/edf.c
index 74c035008d6d2652ccbe50a17a824fb6322f5948..1471fcd00f52fdf0907c52e6a3c97e5468403a9d 100644
--- a/kernel/sched/edf.c
+++ b/kernel/sched/edf.c
@@ -33,7 +33,7 @@ void sched_print_edf_list_internal(struct task_queue *tq, ktime now)
 	printk("\nktime: %lld\n", ktime_to_us(now));
 	printk("S\tDeadline\tWakeup\t\tdelta W\tdelta P\tt_rem\ttotal\tslices\tName\twcet\tavg\n");
 	printk("---------------------------------------------------------------------------------------------------------------------------------\n");
-	list_for_each_entry_safe(tsk, tmp, &tq->run[cpu], node) {
+	list_for_each_entry_safe(tsk, tmp, &tq->run, node) {
 
 		if (tsk->attr.policy == SCHED_RR)
 			continue;
@@ -102,6 +102,7 @@ void sched_print_edf_list_internal(struct task_queue *tq, ktime now)
 
 static inline bool schedule_edf_can_execute(struct task_struct *tsk, ktime now)
 {
+	int cpu = leon3_cpuid();
 	int64_t to_deadline;
 
 
@@ -111,12 +112,14 @@ static inline bool schedule_edf_can_execute(struct task_struct *tsk, ktime now)
 		return false;
 
 	if (ktime_before(tsk->deadline, now))  {
-		sched_print_edf_list_internal(&tsk->sched->tq, now);
+#if 1
+		sched_print_edf_list_internal(&tsk->sched->tq[cpu], now);
 		printk("%s violated, %lld %lld, dead %lld wake %lld now %lld start %lld\n", tsk->name,
 		       tsk->runtime, ktime_us_delta(tsk->deadline, now),
 		       tsk->deadline, tsk->wakeup, now, tsk->exec_start);
 	//	sched_print_edf_list_internal(now);
 		BUG();
+#endif
 		return false;
 	}
 
@@ -165,10 +168,81 @@ static inline void schedule_edf_reinit_task(struct task_struct *tsk, ktime now)
 #define MSG "SCHED_EDF: "
 
 
+
+static ktime edf_hyperperiod(struct task_queue tq[], int cpu, const struct task_struct *task)
+{
+       ktime lcm = 1;
+
+       ktime a,b;
+
+       struct task_struct *t0 = NULL;
+       struct task_struct *tsk;
+       struct task_struct *tmp;
+
+
+	lcm = task->attr.period;
+
+       /* argh, need to consider everything */
+       list_for_each_entry_safe(tsk, tmp, &tq[cpu].run, node) {
+
+               a = lcm;
+               b = tsk->attr.period;
+
+               /* already a multiple? */
+               if (a % b == 0)
+                       continue;
+
+               while (a != b) {
+                       if (a > b)
+                               a -= b;
+                       else
+                               b -= a;
+               }
+
+               lcm = lcm * (tsk->attr.period / a);
+       }
+
+
+       /* meh ... */
+       list_for_each_entry_safe(tsk, tmp, &tq[cpu].wake, node) {
+
+               a = lcm;
+               b = tsk->attr.period;
+
+               /* already a multiple? */
+               if (a % b == 0)
+                       continue;
+
+               while (a != b) {
+                       if (a > b)
+                               a -= b;
+                       else
+                               b -= a;
+               }
+
+               lcm = lcm * (tsk->attr.period / a);
+       }
+
+
+       return lcm;
+}
+
+
+
+
+
+
+
+
+
+
+
+
+
 /**
  * @brief EDF schedulability test
  *
- * @returns 0 if schedulable, <0 otherwise
+ * @returns the cpu to run on if schedulable, -EINVAL otherwise
  *
  *
  * * 1) determine task with longest period
@@ -178,7 +252,7 @@ static inline void schedule_edf_reinit_task(struct task_struct *tsk, ktime now)
  * 2) calculate unused head and tail (before and after deadline)
  *
  *	UH = D1 - R1				(= 20) (Hyperperiod)
- *	UT = P1 - D1				(= 60) 
+ *	UT = P1 - D1				(= 60)
  *
  * 3) loop over other tasks (Period < Deadline of Task 1)
  *
@@ -211,7 +285,7 @@ static inline void schedule_edf_reinit_task(struct task_struct *tsk, ktime now)
  *	 XXX function needs adaptation
  */
 
-static int edf_schedulable(struct task_queue *tq, const struct task_struct *task)
+static int edf_schedulable(struct task_queue tq[], const struct task_struct *task)
 {
 	struct task_struct *tsk = NULL;
 	struct task_struct *tmp;
@@ -222,73 +296,262 @@ static int edf_schedulable(struct task_queue *tq, const struct task_struct *task
 
 
 
-	/* add all in new */
-	if (!list_empty(&tq->new)) {
-		list_for_each_entry_safe(tsk, tmp, &tq->new, node) {
 
-			u += (double) (int32_t) tsk->attr.wcet / (double) (int32_t) tsk->attr.period;
 
+	if (task->on_cpu == KTHREAD_CPU_AFFINITY_NONE) {
+		int i;
+		double util_max = 0.0;
+		double util;
+
+		for (i = 0; i < 2; i++) {
+			/* XXX look for best fit */
+			double util;
+			/* add new task */
+			util= (double) (int32_t) task->attr.wcet / (double) (int32_t) task->attr.period;
+
+
+			/* add tasks queued in wakeup */
+			if (!list_empty(&tq[i].wake)) {
+				list_for_each_entry_safe(tsk, tmp, &tq[i].wake, node) {
+					util += (double) (int32_t) tsk->attr.wcet / (double) (int32_t) tsk->attr.period;
+				}
+
+			}
+
+			/* add all running */
+			if (!list_empty(&tq[i].run)) {
+				list_for_each_entry_safe(tsk, tmp, &tq[i].run, node)
+					util += (double) (int32_t) tsk->attr.wcet / (double) (int32_t) tsk->attr.period;
+			}
+
+
+			if (util > 0.9)
+				continue;
+
+			if (util > util_max) {
+				util_max = util;
+				cpu = i;
+			}
+
+		}
+		printk("best fit is %d\n", cpu);
+	} else {
+		cpu = task->on_cpu;
+	}
+
+
+	/******/
+if (0)
+{
+	printk("\n\n\n");
+	ktime p;
+	ktime h;
+	ktime max = 0;
+
+	ktime uh, ut, f1;
+	ktime sh = 0, st = 0;
+        ktime stmin = 0x7fffffffffffffULL;
+        ktime shmin = 0x7fffffffffffffULL;
+
+	struct task_struct *t0;
+
+	printk("hyper? %s %lld\n", task->name, ktime_to_ms(task->attr.period));
+	p = edf_hyperperiod(tq, cpu, task);
+	printk("hyper %llu\n", ktime_to_ms(p));
+
+
+	/* new */
+	t0 = (struct task_struct *) task;
+	max = task->attr.period;
+
+	/* add tasks queued in wakeup */
+	if (!list_empty(&tq[cpu].wake)) {
+		list_for_each_entry_safe(tsk, tmp, &tq[cpu].wake, node) {
+			 if (tsk->attr.period > max) {
+				 t0 = tsk;
+				 max = tsk->attr.period;
+			 }
 		}
 	}
 
+	/* add tasks queued in run */
+	if (!list_empty(&tq[cpu].wake)) {
+		list_for_each_entry_safe(tsk, tmp, &tq[cpu].wake, node) {
+			 if (tsk->attr.period > max) {
+				 t0 = tsk;
+				 max = tsk->attr.period;
+			 }
+		}
+	}
 
+	h = p / t0->attr.period;
 
-	/* add all in wakeup */
-	if (!list_empty(&tq->wake)) {
-		list_for_each_entry_safe(tsk, tmp, &tq->wake, node) {
+	printk("Period factor %lld, duration %lld actual period: %lld\n", h, ktime_to_ms(p), ktime_to_ms(t0->attr.period));
 
 
-			u += (double) (int32_t) tsk->attr.wcet / (double) (int32_t) tsk->attr.period;
-		}
+	uh = h * (t0->attr.deadline_rel - t0->attr.wcet);
+	ut = h * (t0->attr.period - t0->attr.deadline_rel);
+	f1 = ut/h;
+
+	printk("max UH: %lld, UT: %lld\n", ktime_to_ms(uh), ktime_to_ms(ut));
+
+
+
+	/* subtract longest period thread from head, its slices must always
+	 * be used before the deadline
+	 */
+	sh = h * t0->attr.wcet * t0->attr.deadline_rel / t0->attr.period;
 
+	if (sh < shmin)
+		shmin = sh;
+
+	uh = uh - sh;
+	printk("%s UH: %lld, UT: %lld\n", t0->name, ktime_to_ms(uh), ktime_to_ms(ut));
+	printk("%s SH: %lld, ST: %lld\n", t0->name, ktime_to_ms(sh), ktime_to_ms(st));
+
+
+
+	/* tasks queued in wakeup */
+	if (!list_empty(&tq[cpu].wake)) {
+		list_for_each_entry_safe(tsk, tmp, &tq[cpu].wake, node) {
+
+			if (tsk == t0)
+				continue;
+
+			if (tsk->attr.deadline_rel <= t0->attr.deadline_rel) {
+
+				/* slots before deadline of  T0 */
+				sh = h * tsk->attr.wcet * t0->attr.deadline_rel / tsk->attr.period;
+				if (sh < shmin)
+					shmin = sh;
+				if (sh > uh) {
+					printk("WARN: NOT SCHEDULABLE in head: %s\n", tsk->name);
+				}
+				uh = uh - sh;
+			}
+
+			/* slots after deadline of T0 */
+			st = h * tsk->attr.wcet * f1 / tsk->attr.period;
+			if (st < stmin)
+				stmin = st;
+
+			if (st > ut) {
+				printk("WARN: NOT SCHEDULABLE in tail: %s\n", tsk->name);
+			}
+
+			ut = ut - st;
+
+			printk("w %s UH: %lld, UT: %lld\n", tsk->name, ktime_to_ms(uh), ktime_to_ms(ut));
+
+			printk("w %s SH: %lld, ST: %lld\n", tsk->name, ktime_to_ms(sh), ktime_to_ms(st));
+
+		}
 	}
 
-	/* add all running */
 
-	for (cpu = 0; cpu < 2; cpu++) {
-		if (!list_empty(&tq->run[cpu])) {
-			list_for_each_entry_safe(tsk, tmp, &tq->run[cpu], node)
-				u += (double) (int32_t) tsk->attr.wcet / (double) (int32_t) tsk->attr.period;
+	/* tasks queued in run */
+	if (!list_empty(&tq[cpu].run)) {
+		list_for_each_entry_safe(tsk, tmp, &tq[cpu].run, node) {
+
+			if (tsk == t0)
+				continue;
+
+			if (tsk->attr.deadline_rel <= t0->attr.deadline_rel) {
+
+				/* slots before deadline of  T0 */
+				sh = h * tsk->attr.wcet * t0->attr.deadline_rel / tsk->attr.period;
+				if (sh < shmin)
+					shmin = sh;
+				if (sh > uh) {
+					printk("WARN: NOT SCHEDULABLE in head: %s\n", tsk->name);
+				}
+				uh = uh - sh;
+			}
+
+			/* slots after deadline of T0 */
+			st = h * tsk->attr.wcet * f1 / tsk->attr.period;
+			if (st < stmin)
+				stmin = st;
+
+			if (st > ut) {
+				printk("WARN: NOT SCHEDULABLE in tail: %s\n", tsk->name);
+			}
+
+			ut = ut - st;
+
+			printk("w %s UH: %lld, UT: %lld\n", tsk->name, ktime_to_ms(uh), ktime_to_ms(ut));
+
+			printk("w %s SH: %lld, ST: %lld\n", tsk->name, ktime_to_ms(sh), ktime_to_ms(st));
+
 		}
 	}
 
 
 
 
-	if (u >= 1.9) {
+
+
+	printk("\n\n\n");
+}
+	/*******/
+
+
+
+
+	/* add new task */
+	u += (double) (int32_t) task->attr.wcet / (double) (int32_t) task->attr.period;
+
+
+
+	/* add tasks queued in wakeup */
+	if (!list_empty(&tq[cpu].wake)) {
+		list_for_each_entry_safe(tsk, tmp, &tq[cpu].wake, node) {
+			u += (double) (int32_t) tsk->attr.wcet / (double) (int32_t) tsk->attr.period;
+		}
+
+	}
+
+	/* add all running */
+	if (!list_empty(&tq[cpu].run)) {
+		list_for_each_entry_safe(tsk, tmp, &tq[cpu].run, node)
+			u += (double) (int32_t) tsk->attr.wcet / (double) (int32_t) tsk->attr.period;
+	}
+
+	if (u > 0.9) {
 		printk("I am NOT schedul-ableh: %f ", u);
 		BUG();
 		return -EINVAL;
 		printk("changed task mode to RR\n", u);
-	} else {
-		printk("Utilisation: %g\n", u);
-		return 0;
 	}
 
+	printk("Utilisation: %g\n", u);
+
 
 	/* TODO check against projected interrupt rate, we really need a limit
 	 * there */
 
-	return 0;
+	return cpu;
 }
 
+ void kthread_lock(void);
+ void kthread_unlock(void);
 #include <asm/processor.h>
-static struct task_struct *edf_pick_next(struct task_queue *tq, ktime now)
+static struct task_struct *edf_pick_next(struct task_queue *tq, int cpu,
+					 ktime now)
 {
 	int64_t delta;
 
-	int cpu = leon3_cpuid();
-
 	struct task_struct *tsk;
 	struct task_struct *tmp;
 	struct task_struct *first;
 
 	static int cnt;
 
-	if (list_empty(&tq->run[cpu]))
+	if (list_empty(&tq[cpu].run))
 		return NULL;
 
-	list_for_each_entry_safe(tsk, tmp, &tq->run[cpu], node) {
+	kthread_lock();
+	list_for_each_entry_safe(tsk, tmp, &tq[cpu].run, node) {
 
 		/* time to wake up yet? */
 		delta = ktime_delta(tsk->wakeup, now);
@@ -318,7 +581,7 @@ static struct task_struct *edf_pick_next(struct task_queue *tq, ktime now)
 				schedule_edf_reinit_task(tsk, now);
 
 				/* always queue it up at the tail */
-				list_move_tail(&tsk->node, &tq->run[cpu]);
+				list_move_tail(&tsk->node, &tq[cpu].run);
 			}
 
 
@@ -326,12 +589,12 @@ static struct task_struct *edf_pick_next(struct task_queue *tq, ktime now)
 			 * head of the list, we become the new head
 			 */
 
-			first = list_first_entry(&tq->run[cpu], struct task_struct, node);
+			first = list_first_entry(&tq[cpu].run, struct task_struct, node);
 
 			if (ktime_before (tsk->wakeup, now)) {
 				if (ktime_before (tsk->deadline - tsk->runtime, first->deadline)) {
 					tsk->state = TASK_RUN;
-					list_move(&tsk->node, &tq->run[cpu]);
+					list_move(&tsk->node, &tq[cpu].run);
 				}
 			}
 
@@ -347,12 +610,12 @@ static struct task_struct *edf_pick_next(struct task_queue *tq, ktime now)
 			/* if our deadline is earlier than the deadline at the
 			 * head of the list, move us to top */
 
-			first = list_first_entry(&tq->run[cpu], struct task_struct, node);
+			first = list_first_entry(&tq[cpu].run, struct task_struct, node);
 
-			list_move(&tsk->node, &tq->run[cpu]);
+			list_move(&tsk->node, &tq[cpu].run);
 
 			if (ktime_before (tsk->deadline - tsk->runtime, first->deadline))
-				list_move(&tsk->node, &tq->run[cpu]);
+				list_move(&tsk->node, &tq[cpu].run);
 
 			continue;
 		}
@@ -360,11 +623,12 @@ static struct task_struct *edf_pick_next(struct task_queue *tq, ktime now)
 	}
 
 	/* XXX argh, need cpu affinity run lists */
-	list_for_each_entry_safe(tsk, tmp, &tq->run[cpu], node) {
+	list_for_each_entry_safe(tsk, tmp, &tq[cpu].run, node) {
 
 		if (tsk->state == TASK_RUN) {
 
 			tsk->state = TASK_BUSY;
+			kthread_unlock();
 			return tsk;
 		}
 	}
@@ -381,15 +645,14 @@ static struct task_struct *edf_pick_next(struct task_queue *tq, ktime now)
 	       return first;
        }
 #endif
+	kthread_unlock();
 
 	return NULL;
 }
 
 
-static void edf_wake_next(struct task_queue *tq, ktime now)
+static void edf_wake_next(struct task_queue *tq, int cpu, ktime now)
 {
-	int cpu = leon3_cpuid();
-
 	ktime last;
 
 	struct task_struct *tmp;
@@ -402,9 +665,10 @@ static void edf_wake_next(struct task_queue *tq, ktime now)
 
 
 
-	if (list_empty(&tq->wake))
+	if (list_empty(&tq[cpu].wake))
 		return;
 
+	kthread_lock();
 	last = now;
 #if 0 /* OK */
 	list_for_each_entry_safe(task, tmp, &tq->run, node) {
@@ -414,24 +678,23 @@ static void edf_wake_next(struct task_queue *tq, ktime now)
 #endif
 
 #if 1 /* better */
-	task = list_entry(tq->wake.next, struct task_struct, node);
-
-
-	BUG_ON(task->on_cpu == KTHREAD_CPU_AFFINITY_NONE); 
+	task = list_first_entry(&tq[cpu].wake, struct task_struct, node);
 
-	cpu = task->on_cpu;
 
-	list_for_each_entry_safe(t, tmp, &tq->run[cpu], node) {
+	BUG_ON(task->on_cpu == KTHREAD_CPU_AFFINITY_NONE);
 
-		first = list_first_entry(&tq->run[cpu], struct task_struct, node);
+	if (!list_empty(&tq[cpu].run)) {
+	list_for_each_entry_safe(t, tmp, &tq[cpu].run, node) {
+		first = list_first_entry(&tq[cpu].run, struct task_struct, node);
 		if (ktime_before (t->wakeup, now)) {
 			if (ktime_before (t->deadline - t->runtime, first->deadline)) {
-				list_move(&t->node, &tq->run[cpu]);
+				list_move(&t->node, &tq[cpu].run);
 			}
 		}
 	}
+	}
 
-	list_for_each_entry_safe(t, tmp, &tq->run[cpu], node) {
+	list_for_each_entry_safe(t, tmp, &tq[cpu].run, node) {
 
 		/* if the relative deadline of task-to-wake can fit in between the unused
 		 * timeslice of this running task, insert after the next wakeup
@@ -464,28 +727,35 @@ static void edf_wake_next(struct task_queue *tq, ktime now)
 	task->first_dead = task->deadline;
 
 
-	list_move_tail(&task->node, &tq->run[cpu]);
+	list_move_tail(&task->node, &tq[cpu].run);
+	kthread_unlock();
 }
 
 
 
 
-static void edf_enqueue(struct task_queue *tq, struct task_struct *task)
+static void edf_enqueue(struct task_queue tq[], struct task_struct *task)
 {
+	int cpu;
+
+
 	/* reset runtime to full */
 	task->runtime = task->attr.wcet;
 
 	/* XXX */
-	list_add_tail(&task->node, &tq->new);
+	list_add_tail(&task->node, &tq[leon3_cpuid()].new);
 
 
 	if (task->sched->check_sched_attr(&task->attr))
 		return;
 
-	if (edf_schedulable(tq, task)) {
+	cpu = edf_schedulable(tq, task);
+
+	if (cpu < 0) {
 		printk("---- NOT SCHEDUL-ABLE---\n");
 		return;
 	}
+	task->on_cpu = cpu;
 #if 0
 	/** XXX **/
 	if (task->state == TASK_RUN)
@@ -494,7 +764,7 @@ static void edf_enqueue(struct task_queue *tq, struct task_struct *task)
 
 #endif
 #if 1
-	list_move_tail(&task->node, &tq->wake);
+	list_move_tail(&task->node, &tq[cpu].wake);
 #endif
 
 }
@@ -553,12 +823,10 @@ error:
 /* called after pick_next() */
 
 
-ktime edf_task_ready_ns(struct task_queue *tq, ktime now)
+ktime edf_task_ready_ns(struct task_queue *tq, int cpu, ktime now)
 {
 	int64_t delta;
 
-	int cpu = leon3_cpuid();
-
 	struct task_struct *first;
 	struct task_struct *tsk;
 	struct task_struct *tmp;
@@ -567,7 +835,7 @@ ktime edf_task_ready_ns(struct task_queue *tq, ktime now)
 
 
 
-	list_for_each_entry_safe(first, tmp, &tq->run[cpu], node) {
+	list_for_each_entry_safe(first, tmp, &tq[cpu].run, node) {
 		if (first->state != TASK_RUN)
 			continue;
 
@@ -575,7 +843,7 @@ ktime edf_task_ready_ns(struct task_queue *tq, ktime now)
 		break;
 	}
 
-	list_for_each_entry_safe(tsk, tmp, &tq->run[cpu], node) {
+	list_for_each_entry_safe(tsk, tmp, &tq[cpu].run, node) {
 #if 0
 		if (tsk->state == TASK_BUSY)
 			continue; /*meh */
@@ -615,12 +883,13 @@ static int sched_edf_init(void)
 {
 	int i;
 
-	INIT_LIST_HEAD(&sched_edf.tq.new);
-	INIT_LIST_HEAD(&sched_edf.tq.wake);
-	INIT_LIST_HEAD(&sched_edf.tq.dead);
 
-	for (i = 0; i < CONFIG_SMP_CPUS_MAX; i++)
-		INIT_LIST_HEAD(&sched_edf.tq.run[i]);
+	for (i = 0; i < CONFIG_SMP_CPUS_MAX; i++) {
+		INIT_LIST_HEAD(&sched_edf.tq[i].new);
+		INIT_LIST_HEAD(&sched_edf.tq[i].wake);
+		INIT_LIST_HEAD(&sched_edf.tq[i].run);
+		INIT_LIST_HEAD(&sched_edf.tq[i].dead);
+	}
 
 	sched_register(&sched_edf);
 
diff --git a/kernel/sched/rr.c b/kernel/sched/rr.c
index 4268be1323251c49086a9b5947d6b19d43cad54c..b3e09f53b702212da55faf02e81d4739721d08e2 100644
--- a/kernel/sched/rr.c
+++ b/kernel/sched/rr.c
@@ -13,27 +13,28 @@
 #define MSG "SCHED_RR: "
 
 #include <asm/processor.h>
-static struct task_struct *rr_pick_next(struct task_queue *tq, ktime now)
+static struct task_struct *rr_pick_next(struct task_queue tq[], int cpu,
+					ktime now)
 {
 	struct task_struct *next = NULL;
 	struct task_struct *tmp;
 
 
-	if (list_empty(&tq->run[leon3_cpuid()]))
+	if (list_empty(&tq[0].run))
 		return NULL;
 
-	list_for_each_entry_safe(next, tmp, &tq->run[leon3_cpuid()], node) {
+	list_for_each_entry_safe(next, tmp, &tq[0].run, node) {
 
 
 		if (next->on_cpu == KTHREAD_CPU_AFFINITY_NONE
-		    || next->on_cpu == leon3_cpuid()) {
+		    || next->on_cpu == cpu) {
 
 		if (next->state == TASK_RUN) {
 			/* XXX: must pick head first, then move tail on put()
 			 * following a scheduling event. for now, just force
 			 * round robin
 			 */
-			list_move_tail(&next->node, &tq->run[leon3_cpuid()]);
+			list_move_tail(&next->node, &tq[0].run);
 
 			/* reset runtime */
 			next->runtime = (next->attr.priority * tick_get_period_min_ns());
@@ -43,10 +44,10 @@ static struct task_struct *rr_pick_next(struct task_queue *tq, ktime now)
 		}
 
 		if (next->state == TASK_IDLE)
-			list_move_tail(&next->node, &tq->run[leon3_cpuid()]);
+			list_move_tail(&next->node, &tq[0].run);
 
 		if (next->state == TASK_DEAD)
-			list_move_tail(&next->node, &tq->dead);
+			list_move_tail(&next->node, &tq[0].dead);
 
 		break;
 
@@ -57,8 +58,6 @@ static struct task_struct *rr_pick_next(struct task_queue *tq, ktime now)
 		}
 
 
-
-
 	}
 
 
@@ -67,33 +66,33 @@ static struct task_struct *rr_pick_next(struct task_queue *tq, ktime now)
 
 
 /* this sucks, wrong place. keep for now */
-static void rr_wake_next(struct task_queue *tq, ktime now)
+static void rr_wake_next(struct task_queue tq[], int cpu, ktime now)
 {
 
 	struct task_struct *task;
 
-	if (list_empty(&tq->wake))
+	if (list_empty(&tq[0].wake))
 		return;
 
 
-	task = list_entry(tq->wake.next, struct task_struct, node);
+	task = list_entry(tq[0].wake.next, struct task_struct, node);
 
 	BUG_ON(task->attr.policy != SCHED_RR);
 	/** XXX NO LOCKS */
 	task->state = TASK_RUN;
-	list_move(&task->node, &tq->run[leon3_cpuid()]);
+	list_move(&task->node, &tq[0].run);
 }
 
 
-static void rr_enqueue(struct task_queue *tq, struct task_struct *task)
+static void rr_enqueue(struct task_queue tq[], struct task_struct *task)
 {
 
 	task->runtime = (task->attr.priority * tick_get_period_min_ns());
 	/** XXX **/
 	if (task->state == TASK_RUN)
-		list_add_tail(&task->node, &tq->run[leon3_cpuid()]);
+		list_add_tail(&task->node, &tq[0].run);
 	else
-		list_add_tail(&task->node, &tq->wake);
+		list_add_tail(&task->node, &tq[0].wake);
 }
 
 /**
@@ -145,7 +144,7 @@ static int rr_check_sched_attr(struct sched_attr *attr)
  *	 so this function always returns 0
  */
 
-ktime rr_task_ready_ns(struct task_queue *tq, ktime now)
+ktime rr_task_ready_ns(struct task_queue tq[], int cpu, ktime now)
 {
 	return (ktime) 0ULL;
 }
@@ -171,12 +170,13 @@ static int sched_rr_init(void)
 	int i;
 
 	/* XXX */
-	INIT_LIST_HEAD(&sched_rr.tq.new);
-	INIT_LIST_HEAD(&sched_rr.tq.wake);
-	INIT_LIST_HEAD(&sched_rr.tq.dead);
 
-	for (i = 0; i < CONFIG_SMP_CPUS_MAX; i++)
-		INIT_LIST_HEAD(&sched_rr.tq.run[i]);
+	for (i = 0; i < CONFIG_SMP_CPUS_MAX; i++) {
+		INIT_LIST_HEAD(&sched_rr.tq[i].new);
+		INIT_LIST_HEAD(&sched_rr.tq[i].wake);
+		INIT_LIST_HEAD(&sched_rr.tq[i].run);
+		INIT_LIST_HEAD(&sched_rr.tq[i].dead);
+	}
 
 
 	sched_register(&sched_rr);