Skip to content
Snippets Groups Projects
Commit 70f779fb authored by Armin Luntzer's avatar Armin Luntzer
Browse files

save last working state

parent e33bcc77
No related branches found
No related tags found
No related merge requests found
...@@ -120,11 +120,11 @@ static void boot_cpus(void) ...@@ -120,11 +120,11 @@ static void boot_cpus(void)
for (i = 1; i < CONFIG_SMP_CPUS_MAX; i++) { for (i = 1; i < CONFIG_SMP_CPUS_MAX; i++) {
printk("booting cpu %d\n", i); pr_info("booting cpu %d\n", i);
cpu_wake(i); cpu_wake(i);
while (!ioread32be(&cpu_ready[i])); while (!ioread32be(&cpu_ready[i]));
printk("cpu %d booted\n", i); pr_info("cpu %d booted\n", i);
} }
} }
...@@ -141,7 +141,7 @@ void smp_cpu_entry(void) ...@@ -141,7 +141,7 @@ void smp_cpu_entry(void)
arch_local_irq_enable(); arch_local_irq_enable();
printk("hi i'm cpu %d\n", leon3_cpuid()); pr_info("hi i'm cpu %d\n", leon3_cpuid());
BUG_ON(!leon3_cpuid()); BUG_ON(!leon3_cpuid());
/* signal ready */ /* signal ready */
......
...@@ -79,7 +79,11 @@ struct task_struct { ...@@ -79,7 +79,11 @@ struct task_struct {
ktime wakeup; /* start of next period */ ktime wakeup; /* start of next period */
ktime deadline; /* deadline of current period */ ktime deadline; /* deadline of current period */
ktime create; /* start of next period */
ktime wakeup_first;
ktime exec_start; ktime exec_start;
ktime exec_stop;
ktime total; ktime total;
unsigned long slices; unsigned long slices;
......
obj-y += main.o obj-y += main.o
obj-y += demo.o
obj-y += demo_net.o
obj-$(CONFIG_XENTIUM_PROC_DEMO) += xentium_demo.o obj-$(CONFIG_XENTIUM_PROC_DEMO) += xentium_demo.o
obj-$(CONFIG_EMBED_MODULES_IMAGE) += modules-image.o obj-$(CONFIG_EMBED_MODULES_IMAGE) += modules-image.o
/**
* This creates a number processing nodes in a processing network.
* Two special trackers are used for input and output.
*/
#include <kernel/kernel.h> #include <kernel/kernel.h>
#include <kernel/kmem.h> #include <kernel/kmem.h>
#include <kernel/kthread.h> #include <kernel/kthread.h>
#include <kernel/err.h>
#include <kernel/smp.h>
#include <asm/io.h>
#include <data_proc_task.h>
#include <data_proc_tracker.h>
#include <data_proc_net.h>
#define CRIT_LEVEL 10
#define OP_ADD 0x1234
#define OP_SUB 0x1235
#define OP_MUL 0x1236
#define STEPS 3 static volatile double per_loop_avg[CONFIG_SMP_CPUS_MAX];
static int copytask(void *data)
int op_output(unsigned long op_code, struct proc_task *t)
{ {
ssize_t i; #define BUFLEN 1024*1024
ssize_t n; int i;
int cpu;
unsigned int *p = NULL; int *go;
n = pt_get_nmemb(t);
printk("OUT: op code %d, %d items\n", op_code, n);
if (!n) ktime cnt = 0;
goto exit; ktime start, stop;
ktime total = 0;
// static uint32_t *common[CONFIG_SMP_CPUS_MAX];
static uint32_t *cpu_buf[CONFIG_SMP_CPUS_MAX];
p = (unsigned int *) pt_get_data(t);
if (!p)
goto exit;
go = (int *) data;
for (i = 0; i < n; i++) {
printk("\t%d\n", p[i]);
}
exit: cpu = smp_cpu_id();
kfree(p); /* clean up our data buffer */
pt_destroy(t); cpu_buf[smp_cpu_id()] = kmalloc(BUFLEN * sizeof(uint32_t));
return PN_TASK_SUCCESS;
}
if (!cpu_buf[cpu])
return 0;
int op_add(unsigned long op_code, struct proc_task *t) (*go) = 1; /* signal ready */
{
ssize_t i; /* wait for trigger */
ssize_t n; while (ioread32be(go) != CONFIG_SMP_CPUS_MAX);
unsigned int *p; while (ioread32be(go)) {
start = ktime_get();
for (i = 0 ; i < BUFLEN; i++) {
cpu_buf[cpu][i] = cpu_buf[CONFIG_SMP_CPUS_MAX - cpu - 1][i];
}
n = pt_get_nmemb(t); stop = ktime_get();
if (!n) total += stop - start;
return PN_TASK_SUCCESS;
cnt++;
p = (unsigned int *) pt_get_data(t); per_loop_avg[cpu] = ( ((double) total / (double) cnt) / (double) (BUFLEN));
if (!p) /* we have elements but data is NULL, error*/
return PN_TASK_DESTROY;
printk("ADD: op code %d, %d items\n", op_code, n);
for (i = 0; i < n; i++) {
p[i] += 10;
} }
return 0;
return PN_TASK_SUCCESS;
} }
int op_sub(unsigned long op_code, struct proc_task *t) int copy_resprint(void *data)
{ {
int i;
int *go;
ktime start;
ssize_t i; double res[CONFIG_SMP_CPUS_MAX];
ssize_t n;
unsigned int *p;
go = (int *) data;
n = pt_get_nmemb(t); /* wait for trigger */
while (ioread32be(go) != CONFIG_SMP_CPUS_MAX);
if (!n) start = ktime_get();
return PN_TASK_SUCCESS;
/* wait for about 60 seconds */
while (ktime_delta(ktime_get(), start) < ms_to_ktime(360 * 1000)) {
p = (unsigned int *) pt_get_data(t); for (i = 0; i < CONFIG_SMP_CPUS_MAX; i++)
res[i] = per_loop_avg[i];
if (!p) /* we have elements but data is NULL, error*/ printk("%g ", 0.001 * (double) ktime_to_ms(ktime_get()));
return PN_TASK_DESTROY;
printk("SUB: op code %d, %d items\n", op_code, n); for (i = 0; i < CONFIG_SMP_CPUS_MAX; i++)
printk("%g ", res[i]);
for (i = 0; i < n; i++) { printk("\n");
p[i] -= 2;
} }
return PN_TASK_SUCCESS; (*go) = 0; /* signal stop */
return 0;
} }
int op_mul(unsigned long op_code, struct proc_task *t)
int copybench_start(void)
{ {
int i;
int go;
ssize_t i; struct task_struct *t;
ssize_t n;
unsigned int *p;
n = pt_get_nmemb(t); printk("COPYBENCH STARTING\n");
if (!n) printk("Creating tasks, please stand by\n");
return PN_TASK_SUCCESS;
for (i = 0; i < CONFIG_SMP_CPUS_MAX; i++) {
// for (i = CONFIG_SMP_CPUS_MAX - 1; i >= 0; i--) {
p = (unsigned int *) pt_get_data(t); go = 0;
if (!p) /* we have elements but data is NULL, error*/ t = kthread_create(copytask, &go, i, "COPYTASK");
return PN_TASK_DESTROY;
printk("MUL: op code %d, %d items\n", op_code, n); if (!IS_ERR(t)) {
/* allocate 95% of the cpu, period = 1s */
kthread_set_sched_edf(t, 1000 * 1000, 980 * 1000, 950 * 1000);
for (i = 0; i < n; i++) { if (kthread_wake_up(t) < 0) {
p[i] *= 3; printk("---- %s NOT SCHEDUL-ABLE---\n", t->name);
} BUG();
}
while (!ioread32be(&go)); /* wait for task to become ready */
return PN_TASK_SUCCESS; } else {
} printk("Got an error in kthread_create!");
break;
}
printk("Copy task ready on cpu %d\n", i);
}
int pn_prepare_nodes(struct proc_net *pn) printk("Creating RR cpu-hopping printout task\n");
{
struct proc_tracker *pt;
t = kthread_create(copy_resprint, &go, KTHREAD_CPU_AFFINITY_NONE, "PRINTTASK");
if (kthread_wake_up(t) < 0) {
printk("---- %s NOT SCHEDUL-ABLE---\n", t->name);
BUG();
}
/* create and add processing node trackers for the each operation */ printk("Triggering...\n");
pt = pt_track_create(op_add, OP_ADD, CRIT_LEVEL); go = CONFIG_SMP_CPUS_MAX; /* set trigger */
BUG_ON(!pt); sched_yield();
BUG_ON(pn_add_node(pn, pt));
pt = pt_track_create(op_sub, OP_SUB, CRIT_LEVEL); while (ioread32be(&go)); /* wait for completion */
BUG_ON(!pt);
BUG_ON(pn_add_node(pn, pt));
pt = pt_track_create(op_mul, OP_MUL, CRIT_LEVEL); printk("Average time to cross-copy buffers:\n");
BUG_ON(!pt);
BUG_ON(pn_add_node(pn, pt));
BUG_ON(pn_create_output_node(pn, op_output)); for (i = 0; i < CONFIG_SMP_CPUS_MAX; i++) {
printk("\tCPU %d: %ld ns per sample\n", per_loop_avg[i]);
}
printk("COPYBENCH DONE\n");
return 0; return 0;
} }
int edftask(void *data)
void pn_new_input_task(struct proc_net *pn, size_t n)
{ {
struct proc_task *t;
static int seq;
int i; int i;
unsigned int *data; int loops = (* (int *) data);
t = pt_create(NULL, 0, STEPS, 0, seq++);
BUG_ON(!t);
BUG_ON(pt_add_step(t, OP_ADD, NULL));
BUG_ON(pt_add_step(t, OP_SUB, NULL));
BUG_ON(pt_add_step(t, OP_MUL, NULL));
for (i = 0; i < loops; i++);
data = kzalloc(sizeof(unsigned int) * n);
for (i = 0; i < n; i++) return i;
data[i] = i;
pt_set_data(t, data, n * sizeof(unsigned int));
pt_set_nmemb(t, n);
pn_input_task(pn, t);
} }
int demo(void *p __attribute__((unused))) int oneshotedf_start(void)
{ {
struct proc_net *pn; int i;
int loops = 1700000000;
printk("DEMO STARTING\n"); struct task_struct *t[CONFIG_SMP_CPUS_MAX];
pn = pn_create();
BUG_ON(!pn);
pn_prepare_nodes(pn); // printk("EDF CREATE STARTING\n");
// printk("Creating tasks, please stand by\n");
pn_new_input_task(pn, 5); for (i = 0; i < CONFIG_SMP_CPUS_MAX; i++) {
pn_new_input_task(pn, 0);
pn_new_input_task(pn, 3);
pn_process_inputs(pn); t[i] = kthread_create(edftask, &loops, i, "EDFTASK");
while (pn_process_next(pn)); if (!IS_ERR(t)) {
/* creaate and launch edf thread */
kthread_set_sched_edf(t[i], 0, 100, 50);
pn_process_outputs(pn); if (kthread_wake_up(t[i]) < 0) {
printk("---- %s NOT SCHEDUL-ABLE---\n", t[i]->name);
BUG();
}
} else {
printk("Got an error in kthread_create!");
break;
}
// printk("Copy task ready on cpu %d\n", i);
}
printk("DEMO COMPLETE\n"); sched_yield();
return 0; printk("%lld\n", ktime_to_ms(ktime_get()));
} printk("Wakeup, creation, exec_start, exec_stop, deadline:\n");
for (i = 0; i < CONFIG_SMP_CPUS_MAX; i++) {
printk("\tCPU %d: %lld %lld %lld %lld %lld\n",
i,
ktime_to_us(ktime_delta(t[i]->wakeup_first, t[i]->create)),
ktime_to_us(ktime_delta(t[i]->wakeup, t[i]->create)),
ktime_to_us(ktime_delta(t[i]->exec_start, t[i]->create)),
ktime_to_us(ktime_delta(t[i]->exec_stop, t[i]->create)),
ktime_to_us(ktime_delta(t[i]->deadline, t[i]->create)));
}
void demo_start(void) printk("COPYBENCH DONE\n");
{
struct task_struct *t;
t = kthread_create(demo, NULL, KTHREAD_CPU_AFFINITY_NONE, "DEMO"); return 0;
/* allocate 98% of the cpu */
kthread_set_sched_edf(t, 100*1000, 99*1000, 98*1000);
if (kthread_wake_up(t) < 0)
printk("---- IASW NOT SCHEDULABLE---\n");
} }
This diff is collapsed.
...@@ -49,6 +49,11 @@ ...@@ -49,6 +49,11 @@
/** XXX dummy **/ /** XXX dummy **/
extern int cpu_ready[CONFIG_SMP_CPUS_MAX]; extern int cpu_ready[CONFIG_SMP_CPUS_MAX];
void demo_start(void);
int copybench_start(void);
int oneshotedf_start(void);
/** /**
* @brief kernel initialisation routines * @brief kernel initialisation routines
*/ */
...@@ -76,6 +81,8 @@ int kernel_main(void) ...@@ -76,6 +81,8 @@ int kernel_main(void)
struct elf_module m __attribute__((unused)); struct elf_module m __attribute__((unused));
#ifdef CONFIG_EMBED_MODULES_IMAGE
printk(MSG "Loading module image\n"); printk(MSG "Loading module image\n");
/* load the embedded AR image */ /* load the embedded AR image */
...@@ -106,6 +113,7 @@ int kernel_main(void) ...@@ -106,6 +113,7 @@ int kernel_main(void)
modules_list_loaded(); modules_list_loaded();
#endif #endif
#endif
#ifdef CONFIG_MPPB #ifdef CONFIG_MPPB
...@@ -139,7 +147,7 @@ int kernel_main(void) ...@@ -139,7 +147,7 @@ int kernel_main(void)
} }
printk(MSG "Boot complete\n"); // printk(MSG "Boot complete\n");
#ifdef CONFIG_EMBED_APPLICATION #ifdef CONFIG_EMBED_APPLICATION
/* dummy demonstrator */ /* dummy demonstrator */
...@@ -155,6 +163,13 @@ int kernel_main(void) ...@@ -155,6 +163,13 @@ int kernel_main(void)
#endif #endif
#endif #endif
#if 0
copybench_start();
#else
//demo_start();
oneshotedf_start();
#endif
while(1) while(1)
cpu_relax(); cpu_relax();
......
...@@ -51,14 +51,14 @@ static void kthread_unlock(void) ...@@ -51,14 +51,14 @@ static void kthread_unlock(void)
void kthread_set_sched_edf(struct task_struct *task, unsigned long period_us, void kthread_set_sched_edf(struct task_struct *task, unsigned long period_us,
unsigned long wcet_us, unsigned long deadline_rel_us) unsigned long deadline_rel_us, unsigned long wcet_us)
{ {
struct sched_attr attr; struct sched_attr attr;
sched_get_attr(task, &attr); sched_get_attr(task, &attr);
attr.policy = SCHED_EDF; attr.policy = SCHED_EDF;
attr.period = us_to_ktime(period_us); attr.period = us_to_ktime(period_us);
attr.deadline_rel = us_to_ktime(wcet_us); attr.deadline_rel = us_to_ktime(deadline_rel_us);
attr.wcet = us_to_ktime(deadline_rel_us); attr.wcet = us_to_ktime(wcet_us);
sched_set_attr(task, &attr); sched_set_attr(task, &attr);
} }
...@@ -70,6 +70,7 @@ void kthread_set_sched_edf(struct task_struct *task, unsigned long period_us, ...@@ -70,6 +70,7 @@ void kthread_set_sched_edf(struct task_struct *task, unsigned long period_us,
void kthread_free(struct task_struct *task) void kthread_free(struct task_struct *task)
{ {
return;
if (task->flags & TASK_NO_CLEAN) /* delete from list as well */ if (task->flags & TASK_NO_CLEAN) /* delete from list as well */
return; return;
...@@ -107,7 +108,9 @@ int kthread_wake_up(struct task_struct *task) ...@@ -107,7 +108,9 @@ int kthread_wake_up(struct task_struct *task)
kthread_lock(); kthread_lock();
now = ktime_get(); now = ktime_get();
sched_wake(task, ktime_get()); sched_wake(task, now);
task->wakeup_first = now;
/* this may be a critical task, send reschedule */ /* this may be a critical task, send reschedule */
if (task->on_cpu != KTHREAD_CPU_AFFINITY_NONE) if (task->on_cpu != KTHREAD_CPU_AFFINITY_NONE)
...@@ -213,6 +216,7 @@ static struct task_struct *kthread_create_internal(int (*thread_fn)(void *data), ...@@ -213,6 +216,7 @@ static struct task_struct *kthread_create_internal(int (*thread_fn)(void *data),
return NULL; return NULL;
} }
task->create = ktime_get();
task->total = 0; task->total = 0;
task->slices = 0; task->slices = 0;
task->on_cpu = cpu; task->on_cpu = cpu;
......
...@@ -41,6 +41,7 @@ static void sched_update_runtime(struct task_struct *task, ktime now) ...@@ -41,6 +41,7 @@ static void sched_update_runtime(struct task_struct *task, ktime now)
rt = ktime_sub(now, task->exec_start); rt = ktime_sub(now, task->exec_start);
task->exec_stop = now;
task->runtime = ktime_sub(task->runtime, rt); task->runtime = ktime_sub(task->runtime, rt);
task->total = ktime_add(task->total, rt); task->total = ktime_add(task->total, rt);
...@@ -348,7 +349,7 @@ int sched_get_attr(struct task_struct *task, struct sched_attr *attr) ...@@ -348,7 +349,7 @@ int sched_get_attr(struct task_struct *task, struct sched_attr *attr)
int sched_set_policy_default(struct task_struct *task) int sched_set_policy_default(struct task_struct *task)
{ {
struct sched_attr attr = {.policy = SCHED_RR, struct sched_attr attr = {.policy = SCHED_RR,
.priority = 1}; .priority = 100};
return sched_set_attr(task, &attr); return sched_set_attr(task, &attr);
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment