diff --git a/arch/sparc/Kbuild b/arch/sparc/Kbuild index 082d329d32457042a5519ea71ba36ef3b1b579b6..133d36d17fca861eba256bfcbdedcc206ffd9cb9 100644 --- a/arch/sparc/Kbuild +++ b/arch/sparc/Kbuild @@ -1,2 +1,3 @@ obj-y += kernel/ obj-y += mm/ +obj-y += drv/ diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index edbf3af85f57339a31238d9fb5be7ed104084b57..17585ee0d9f72f14b6bf7b53d3a4b5bca528af8c 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -40,6 +40,13 @@ config PAGE_OFFSET addresses. This can not work unless the kernel is bootstrapped. If unsure, say N. +config CPU_CLOCK_FREQ + int "Set the CPU clock frequency (Hz)" + default 80000000 + help + At least LEON CPUs lack a feature to detect the system clock + frequency. If you set this incorrectly, your timing will be off. + config EXTRA_SPARC_PHYS_BANKS int "Number of extra physical memory banks" default 0 diff --git a/arch/sparc/drv/Makefile b/arch/sparc/drv/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..ef73f273abd175153959cb6f0828f93cfd65a220 --- /dev/null +++ b/arch/sparc/drv/Makefile @@ -0,0 +1,5 @@ + +CHECKFLAGS += -D__sparc__ + +obj-y += grtimer.o +obj-y += grtimer_longcount.o diff --git a/arch/sparc/drv/grtimer.c b/arch/sparc/drv/grtimer.c new file mode 100644 index 0000000000000000000000000000000000000000..84fbfeffe882f7a8d381cdc5ea315a2dce0a5eb8 --- /dev/null +++ b/arch/sparc/drv/grtimer.c @@ -0,0 +1,387 @@ +/** + * @file arch/sparc/drv/grtimer.c + * @ingroup time + * @author Armin Luntzer (armin.luntzer@univie.ac.at), + * @date July, 2016 + * + * @copyright GPLv2 + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * + * @brief Implements access to the LEON3 General Purpose Timer Unit with + * Time Latch Capability + * + * @see GR712RC user manual chapter 12 + */ + + +#include <asm/io.h> +#include <grtimer.h> + + +/** + * @brief set scaler reload value of the timer block + * @param rtu a struct grtimer_unit + * + */ + +void grtimer_set_scaler_reload(struct grtimer_unit *rtu, uint32_t value) +{ + iowrite32be(value, &rtu->scaler_reload); +} + + +/** + * @brief get scaler reload value of the timer block + * @param rtu a struct grtimer_unit + * + */ + +uint32_t grtimer_get_scaler_reload(struct grtimer_unit *rtu) +{ + return ioread32be(&rtu->scaler_reload); +} + + +/** + * @brief sets the interrupt enabled flag of a timer + * @param rtu a struct grtimer_unit + * @param timer the selected timer + */ + +void grtimer_set_interrupt_enabled(struct grtimer_unit *rtu, uint32_t timer) +{ + uint32_t flags; + + flags = ioread32be(&rtu->timer[timer].ctrl); + flags |= LEON3_TIMER_IE; + + iowrite32be(flags, &rtu->timer[timer].ctrl); +} + + +/** + * @brief sets the interrupt enabled flag of a timer + * @param rtu a struct grtimer_unit + * @param timer the selected timer + */ + +void grtimer_clear_interrupt_enabled(struct grtimer_unit *rtu, uint32_t timer) +{ + uint32_t flags; + + flags = ioread32be(&rtu->timer[timer].ctrl); + flags &= ~LEON3_TIMER_IE; + + iowrite32be(flags, &rtu->timer[timer].ctrl); +} + + +/** + * @brief sets the load flag of a timer + * @param rtu a struct grtimer_unit + * @param timer the selected timer + */ + +void grtimer_set_load(struct grtimer_unit *rtu, uint32_t timer) +{ + uint32_t flags; + + flags = ioread32be(&rtu->timer[timer].ctrl); + flags |= LEON3_TIMER_LD; + + iowrite32be(flags, &rtu->timer[timer].ctrl); +} + + +/** + * @brief clears the load flag of a timer + * @param rtu a struct grtimer_unit + * @param timer the selected timer + */ + +void grtimer_clear_load(struct grtimer_unit *rtu, uint32_t timer) +{ + uint32_t flags; + + flags = ioread32be(&rtu->timer[timer].ctrl); + flags &= ~LEON3_TIMER_LD; + + iowrite32be(flags, &rtu->timer[timer].ctrl); +} + + +/** + * @brief set enable flag in timer + * @param rtu a struct grtimer_unit + * @param timer the selected timer + */ + +void grtimer_set_enabled(struct grtimer_unit *rtu, uint32_t timer) +{ + uint32_t ctrl; + + ctrl = ioread32be(&rtu->timer[timer].ctrl); + ctrl |= LEON3_TIMER_EN; + + iowrite32be(ctrl, &rtu->timer[timer].ctrl); +} + + +/** + * @brief clear enable flag in timer + * @param rtu a struct grtimer_unit + * @param timer the selected timer + */ + +void grtimer_clear_enabled(struct grtimer_unit *rtu, uint32_t timer) +{ + uint32_t ctrl; + + ctrl = ioread32be(&rtu->timer[timer].ctrl); + ctrl &= ~LEON3_TIMER_EN; + + iowrite32be(ctrl, &rtu->timer[timer].ctrl); +} + + +/** + * @brief set restart flag in timer + * @param rtu a struct grtimer_unit + * @param timer the selected timer + */ + +void grtimer_set_restart(struct grtimer_unit *rtu, uint32_t timer) +{ + uint32_t ctrl; + + ctrl = ioread32be(&rtu->timer[timer].ctrl); + ctrl |= LEON3_TIMER_RS; + + iowrite32be(ctrl, &rtu->timer[timer].ctrl); +} + + +/** + * @brief clear restart flag in timer + * @param rtu a struct grtimer_unit + * @param timer the selected timer + */ + +void grtimer_clear_restart(struct grtimer_unit *rtu, uint32_t timer) +{ + uint32_t ctrl; + + ctrl = ioread32be(&rtu->timer[timer].ctrl); + ctrl &= ~LEON3_TIMER_RS; + + iowrite32be(ctrl, &rtu->timer[timer].ctrl); +} + + +/** + * @brief set timer to chain to the preceeding timer + * @param rtu a struct grtimer_unit + * @param timer the selected timer + */ + +void grtimer_set_chained(struct grtimer_unit *rtu, uint32_t timer) +{ + uint32_t ctrl; + + ctrl = ioread32be(&rtu->timer[timer].ctrl); + ctrl |= LEON3_TIMER_CH; + + iowrite32be(ctrl, &rtu->timer[timer].ctrl); +} + + +/** + * @brief clear timer to chain to the preceeding timer + * @param rtu a struct grtimer_unit + * @param timer the selected timer + */ + +void grtimer_clear_chained(struct grtimer_unit *rtu, uint32_t timer) +{ + uint32_t ctrl; + + ctrl = ioread32be(&rtu->timer[timer].ctrl); + ctrl &= ~LEON3_TIMER_CH; + + iowrite32be(ctrl, &rtu->timer[timer].ctrl); +} + + +/** + * @brief get status of interrupt pending status + * @param rtu a struct grtimer_unit + * @param timer the selected timer + */ + +uint32_t grtimer_get_interrupt_pending_status(struct grtimer_unit *rtu, + uint32_t timer) +{ + return ioread32be(&rtu->timer[timer].ctrl) & LEON3_TIMER_IP; +} + + +/** + * @brief clear status of interrupt pending status + * @param rtu a struct grtimer_unit + * @param timer the selected timer + */ + +void grtimer_clear_interrupt_pending_status(struct grtimer_unit *rtu, + uint32_t timer) +{ + uint32_t ctrl; + + ctrl = ioread32be(&rtu->timer[timer].ctrl); + ctrl &= ~LEON3_TIMER_IP; + + iowrite32be(ctrl, &rtu->timer[timer].ctrl); +} + + +/** + * @brief get number of implemented general purpose timers + * @param rtu a struct grtimer_unit + * @param timer the selected timer + */ + +uint32_t grtimer_get_num_implemented(struct grtimer_unit *rtu) +{ + return ioread32be(&rtu->config) & LEON3_CFG_TIMERS_MASK; +} + + +/** + * @brief get interrupt ID of first implemented timer + * @param rtu a struct grtimer_unit + * @param timer the selected timer + */ + +uint32_t grtimer_get_first_timer_irq_id(struct grtimer_unit *rtu) +{ + return (ioread32be(&rtu->config) & LEON3_CFG_IRQNUM_MASK) >> + LEON3_CFG_IRQNUM_SHIFT; +} + + +/** + * @brief set the value of a grtimer + * @param rtu a struct grtimer_unit + * @param timer the selected timer + * @param value the timer counter value to set + */ + +void grtimer_set_value(struct grtimer_unit *rtu, uint32_t timer, uint32_t value) +{ + iowrite32be(value, &rtu->timer[timer].value); +} + +/** + * @brief get the value of a grtimer + * @param rtu a struct grtimer_unit + * @param timer the selected timer + * @param value the timer counter value to set + */ + +uint32_t grtimer_get_value(struct grtimer_unit *rtu, uint32_t timer) +{ + return ioread32be(&rtu->timer[timer].value); +} + + +/** + * @brief set the reload of a grtimer + * @param rtu a struct grtimer_unit + * @param timer the selected timer + * @param reload the timer counter reload to set + */ + +void grtimer_set_reload(struct grtimer_unit *rtu, + uint32_t timer, + uint32_t reload) +{ + iowrite32be(reload, &rtu->timer[timer].reload); +} + +/** + * @brief get the reload of a grtimer + * @param rtu a struct grtimer_unit + * @param timer the selected timer + */ + +uint32_t grtimer_get_reload(struct grtimer_unit *rtu, uint32_t timer) +{ + return ioread32be(&rtu->timer[timer].reload); +} + +/** + * @brief set an irq to trigger a latch + * @param rtu a struct grtimer_unit + * @param irq the irq number to latch on + */ + +void grtimer_set_latch_irq(struct grtimer_unit *rtu, uint32_t irq) +{ + uint32_t irq_select; + + irq_select = ioread32be(&rtu->irq_select); + irq_select |= (1 << irq); + + iowrite32be(irq_select, &rtu->irq_select); +} + + +/** + * @brief clear an irq triggering a latch + * @param rtu a struct grtimer_unit + * @param irq the irq number to disable latching for + */ + +void grtimer_clear_latch_irq(struct grtimer_unit *rtu, uint32_t irq) +{ + uint32_t irq_select; + + irq_select = ioread32be(&rtu->irq_select); + irq_select &= ~(1 << irq); + + iowrite32be(irq_select, &rtu->irq_select); +} + + +/** + * @brief set the timer's latch bit + * @param rtu a struct grtimer_unit + */ + +void grtimer_enable_latch(struct grtimer_unit *rtu) +{ + uint32_t config; + + config = ioread32be(&rtu->config); + config |= LEON3_GRTIMER_CFG_LATCH; + + iowrite32be(config, &rtu->config); +} + +/** + * @brief get the latch value of a grtimer + * @param rtu a struct grtimer_unit + * @param timer the selected timer + */ + +uint32_t grtimer_get_latch_value(struct grtimer_unit *rtu, uint32_t timer) +{ + return ioread32be(&rtu->timer[timer].latch_value); +} diff --git a/arch/sparc/drv/grtimer_longcount.c b/arch/sparc/drv/grtimer_longcount.c new file mode 100644 index 0000000000000000000000000000000000000000..fc01c73f60bbec7b831da32712657b318cb9356c --- /dev/null +++ b/arch/sparc/drv/grtimer_longcount.c @@ -0,0 +1,203 @@ +/** + * @file leon3_grtimer_longcount.c + * @ingroup timing + * @author Armin Luntzer (armin.luntzer@univie.ac.at), + * @date July, 2016 + * + * @copyright GPLv2 + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * @brief implements a long-counting (uptime) clock using the LEON3 GRTIMER + * + */ + + +#include <asm/io.h> +#include <time.h> +#include <grtimer.h> +#include <grtimer_longcount.h> + + + + +/** + * @brief enable long count timer + * @param rtu a struct grtimer_unit + * @param reload a scaler reload value + * @param fine_ticks_per_sec a timer reload value in ticks per second + * @param coarse_ticks_max a timer reload value in ticks per second + * + * If properly configured, grtimer[0] will hold fractions of a second and + * grtimer[1] will be in seconds, counting down from coarse_ticks_max + * + * @return -1 if fine_ticks_per_sec is not an integer multiple of scaler_reload, + * 0 otherwise + * + * @note the return value warns about a configuration error, but will still + * accept the input + */ + + +int32_t grtimer_longcount_start(struct grtimer_unit *rtu, + uint32_t scaler_reload, + uint32_t fine_ticks_per_sec, + uint32_t coarse_ticks_max) +{ + grtimer_set_scaler_reload(rtu, scaler_reload); + grtimer_set_reload(rtu, 0, fine_ticks_per_sec); + grtimer_set_reload(rtu, 1, coarse_ticks_max); + + grtimer_set_load(rtu, 0); + grtimer_set_load(rtu, 1); + + grtimer_set_restart(rtu, 0); + grtimer_set_restart(rtu, 1); + + grtimer_set_chained(rtu, 1); + + grtimer_set_enabled(rtu, 0); + grtimer_set_enabled(rtu, 1); + + grtimer_enable_latch(rtu); + + /* not an integer multiple, clock will drift */ + if (fine_ticks_per_sec % scaler_reload) + return -1; + + return 0; +} + + +/** + * @brief get the time since the long counting grtimer was started + * @param rtu a struct grtimer_unit + * @param up a struct grtimer_uptime + * @note if configured properly, fine will be in cpu cycles and coarse will + * be in seconds + * + * XXX: sucks, need latching + */ + +void grtimer_longcount_get_uptime(struct grtimer_unit *rtu, + struct grtimer_uptime *up) +{ + uint32_t sc; + uint32_t t0, t0a, t0b, t0c; + uint32_t t1, t1a, t1b, t1c; + uint32_t r0; + uint32_t r1; + + + sc = ioread32be(&rtu->scaler_reload); + + t0a = ioread32be(&rtu->timer[0].value); + t1a = ioread32be(&rtu->timer[1].value); + + t0b = ioread32be(&rtu->timer[0].value); + t1b = ioread32be(&rtu->timer[1].value); + + t0c = ioread32be(&rtu->timer[0].value); + t1c = ioread32be(&rtu->timer[1].value); + + if ((t0a >= t0b) && (t1a >= t1b)) + { + t0 = t0a; + t1 = t1a; + } + else + { + t0 = t0c; + t1 = t1c; + } + + r0 = ioread32be(&rtu->timer[0].reload); + r1 = ioread32be(&rtu->timer[1].reload); + + up->fine = (r0 - t0) * (sc + 1); + up->coarse = (r1 - t1); +} + + +/** + * @brief + * get the number of seconds elapsed between timestamps taken from the + * longcount timer + * + * @brief param time1 a struct grtime_uptime + * @brief param time0 a struct grtime_uptime + * + * @return time difference in seconds represented as double + */ + +double grtimer_longcount_difftime(struct grtimer_unit *rtu, + struct grtimer_uptime time1, + struct grtimer_uptime time0) +{ + uint32_t sc; + uint32_t rl; + double cpu_freq; + + double t0; + double t1; + + + sc = grtimer_get_scaler_reload(rtu); + rl = grtimer_get_reload(rtu, 0); + + cpu_freq = (double) (sc + 1) * rl; + + t0 = (double) time0.coarse + (double) time0.fine / cpu_freq; + t1 = (double) time1.coarse + (double) time1.fine / cpu_freq; + + return t1 - t0; +} + + + + +/** + * @brief get the time since last latch occured in cpu cycles + * @param rtu a struct grtimer_unit + * @note does not compensate for function overhead + */ + +uint32_t grtimer_longcount_get_latch_time_diff(struct grtimer_unit *rtu) +{ + uint32_t t0; + uint32_t t1; + + uint32_t t0_latch; + uint32_t t1_latch; + + uint32_t t0_reload; + uint32_t t0_scaler; + + uint32_t diff; + + + + t0_latch = grtimer_get_latch_value(rtu, 0); + t1_latch = grtimer_get_latch_value(rtu, 1); + t0_reload = grtimer_get_reload(rtu, 0); + t0_scaler = grtimer_get_scaler_reload(rtu); + + t0 = grtimer_get_value(rtu, 0); + t1 = grtimer_get_value(rtu, 1); + + diff = (t1_latch - t1) * t0_reload * (t0_scaler + 1); + + if (t0 < t0_latch) + diff += (t0_latch - t0); + else + diff += (t0_reload - t0); + + + return diff; +} diff --git a/arch/sparc/include/asm/irqflags.h b/arch/sparc/include/asm/irqflags.h new file mode 100644 index 0000000000000000000000000000000000000000..3a2ae78a177e9feae29b592f81958090447b7492 --- /dev/null +++ b/arch/sparc/include/asm/irqflags.h @@ -0,0 +1,31 @@ +/** + * @file sparc/include/asm/irqflags.h + * + * @copyright GPLv2 + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#ifndef _ARCH_SPARC_ASM_IRQFLAGS_H_ +#define _ARCH_SPARC_ASM_IRQFLAGS_H_ + +void arch_local_irq_enable(void); +unsigned long arch_local_irq_save(void); +void arch_local_irq_restore(unsigned long flags); + + +static inline void arch_local_irq_disable(void) +{ + arch_local_irq_save(); +} + + +#endif /* _ARCH_SPARC_ASM_IRQFLAGS_H_ */ diff --git a/arch/sparc/include/asm/leon.h b/arch/sparc/include/asm/leon.h index f127144941f8a955d1444463c5bb888d074338f6..66d95c741917de4ec824fad9cc0554500f33ae69 100644 --- a/arch/sparc/include/asm/leon.h +++ b/arch/sparc/include/asm/leon.h @@ -201,6 +201,64 @@ static inline void leon_reg_win_flush(void) __asm__ __volatile__("ta 3"); } +__attribute__((unused)) +static inline unsigned int get_wim(void) +{ + unsigned int wim; + __asm__ __volatile__( + "rd %%wim, %0\n\t" + "nop\n\t" + "nop\n\t" + "nop\n\t" + : "=r" (wim) + : /* no inputs */ + : "memory"); + + return wim; +} + +__attribute__((unused)) +static inline void put_wim(unsigned int new_wim) +{ + __asm__ __volatile__( + "wr %0, 0x0, %%wim\n\t" + "nop\n\t" + "nop\n\t" + "nop\n\t" + : /* no outputs */ + : "r" (new_wim) + : "memory", "cc"); +} + +__attribute__((unused)) +static inline unsigned int get_psr(void) +{ + unsigned int psr; + __asm__ __volatile__( + "rd %%psr, %0\n\t" + "nop\n\t" + "nop\n\t" + "nop\n\t" + : "=r" (psr) + : /* no inputs */ + : "memory"); + + return psr; +} + +__attribute__((unused)) +static inline void put_psr(unsigned int new_psr) +{ + __asm__ __volatile__( + "wr %0, 0x0, %%psr\n\t" + "nop\n\t" + "nop\n\t" + "nop\n\t" + : /* no outputs */ + : "r" (new_psr) + : "memory", "cc"); +} + diff --git a/arch/sparc/include/asm/switch_to.h b/arch/sparc/include/asm/switch_to.h new file mode 100644 index 0000000000000000000000000000000000000000..8058fac6e4e377d719ef3f716c1ec845e2723fca --- /dev/null +++ b/arch/sparc/include/asm/switch_to.h @@ -0,0 +1,205 @@ +/** + * @file sparc/include/asm/switch_to.h + * + * @copyright GPLv2 + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * + * When implementing the actual task switching segment, I came up with + * essentially the same thing that David S. Miller did in the SPARC port + * switch_to() macro of the Linux kernel, just less tuned, so I adapted his + * code. + * Hence, I decided to just follow a similar scheme to Linux for the thread + * switching, which may make it easier to port to a new architecure in the + * future, as it'll be possible to adapt the according macros from the Linux + * source tree. No need to reinvent the wheel.. + * + * + * TODO: FPU (lazy) switching + * TODO: CPU id (for SMP) + */ + + +#include <kernel/kthread.h> +#include <asm/ttable.h> + +#ifndef _ARCH_SPARC_ASM_SWITCH_TO_H_ +#define _ARCH_SPARC_ASM_SWITCH_TO_H_ + + + +#define prepare_arch_switch(next) do { \ +__asm__ __volatile__( \ +"save %sp, -0x40, %sp; save %sp, -0x40, %sp; save %sp, -0x40, %sp\n\t" \ +"save %sp, -0x40, %sp; save %sp, -0x40, %sp; save %sp, -0x40, %sp\n\t" \ +"save %sp, -0x40, %sp\n\t" \ +"restore; restore; restore; restore; restore; restore; restore"); \ +} while(0); + + + +/* NOTE: we don't actually require the PSR_ET toggle, but if we have + * unaligned accesses (or access traps), it is a really good idea, or + * we'll die... + */ + +/* NOTE: this assumes we have a mixed kernel/user mapping in the MMU (if + * we are using it), otherwise we might would not be able to load the + * thread's data. Oh, and we'll have to do a switch user->kernel->new + * user OR we'll run into the same issue with different user contexts */ + +/* curptr is %g6! */ + + +/* so, here's what's happening + * + * + * 1+2: store a new program counter (~ return address) just after the actual + * switch block, so the old thread will hop over the actual switching + * section when it is re-scheduled. + * + * NOTE: in the SPARC ABI the return address to the caller in %i7 is actually + * (return address - 0x8), and the %o[] regs become the %i[] regs on a + * save instruction, so we actually have to store the reference address + * to the jump accordingly + * + * 3: store the current thread's %psr to %g4 + * + * 4: double-store the stack pointer (%o6) and the "skip" PC in %o7 + * note that this requires double-word alignment of struct thread_info + * members KSP an KPC + * + * 5: store the current thread's %wim to %g5 + * + * 6-7: toggle the enable traps bit in the %psr (should be off at this point!) + * and wait 3 cycles for the bits to settle + * + * 8: double-store store the PSR in %g4 and the WIM in %g5 + * note that this requires double-word alignment of struct thread_infio + * members KPSR an KWIM + * + * 9: double-load KPSR + KWIM into %g4, %g5 from new thread info + * + * NOTE: A double load takes 2 cycles, +1 extra if the subsequent instruction + * depends on the result of the load, that's why we don't set %g6 first + * and use it to load steps 10+11 form there + * + * 10: set the new thread info to "curptr" (%g6) of this CPU + * + * 11: set the new thread info to the global "current" set of this CPU + * + * 12: set the new thread's PSR and toggle the ET bit (should be off) + * + * 13: wait for the bits to settle, so the CPU puts us into the proper window + * before we can continue + * + * 14: double-load KSP and KPC to %sp (%o6) and the "skip" PC in %o7 + * + * 15: set the new thread's WIM + * + * 16: restore %l0 and %l1 from the memory stack, rtrap.S expects these to be + * l0 == t_psr, l1 == t_pc + * + * 17: restore the frame pointer %fp (%i6) and the return address in %i7 + * + * 18: restore the new thread's PSR + * + * NOTE: we don't have to wait there, as long as we don't return immediately + * following the macro + * + * 19: jump to the actual address of the label + * + * + * + * The corresponding (approximate) c code: + * + * register struct sparc_stackf *sp asm("sp"); + * register unsigned long calladdr asm("o7"); + * register struct thread_info *th asm("g6"); + * register unsigned long t_psr asm("l0"); + * register unsigned long t_pc asm("l1"); + * register unsigned long fp asm("fp"); + * register unsigned long ret asm("i7"); + * + * + * th->kpc = (unsigned long) &&here - 0x8; + * th->kpsr = get_psr(); + * th->ksp = (unsigned long) sp; + * th->kwim = get_wim(); + * + * put_psr(th->kpsr^0x20); + * + * th = &next->thread_info; + * current_set[0] = th; + * + * put_psr(th->kpsr^0x20); + * put_wim(th->kwim); + * + * calladdr = th->kpc; + * sp = (struct sparc_stackf *) th->ksp; + * + * t_psr = sp->locals[0]; + * t_pc = sp->locals[1]; + * + * fp = (unsigned long) sp->fp; + * ret = sp->callers_pc; + * + * put_psr(th->kpsr); + * + * __asm__ __volatile__( + * "jmpl %%o7 + 0x8, %%g0\n\t" + * "nop\n\t" + * ::: "%o7", "memory"); + * here: + * (void) 0; + */ + + +#define switch_to(next) do { \ + __asm__ __volatile__( \ + "sethi %%hi(here - 0x8), %%o7\n\t" \ + "or %%o7, %%lo(here - 0x8), %%o7\n\t" \ + "rd %%psr, %%g4\n\t" \ + "std %%sp, [%%g6 + %2]\n\t" \ + "rd %%wim, %%g5\n\t" \ + "wr %%g4, 0x20, %%psr\n\t" \ + "nop; nop; nop\n\t" \ + "std %%g4, [%%g6 + %4]\n\t" \ + "ldd [%1 + %4], %%g4\n\t" \ + "mov %1, %%g6\n\t" \ + "st %1, [%0]\n\t" \ + "wr %%g4, 0x20, %%psr\n\t" \ + "nop; nop; nop\n\t" \ + "ldd [%%g6 + %2], %%sp\n\t" \ + "wr %%g5, 0x0, %%wim\n\t" \ + "ldd [%%sp + 0x00], %%l0\n\t" \ + "ldd [%%sp + 0x38], %%i6\n\t" \ + "wr %%g4, 0x0, %%psr\n\t" \ + "jmpl %%o7 + 0x8, %%g0\n\t" \ + " nop\n\t" \ + "here:\n\t" \ + : \ + : "r" (&(current_set[0])), \ + "r" (&(next->thread_info)), \ + "i" (TI_KSP), \ + "i" (TI_KPC), \ + "i" (TI_KPSR) \ + : "g1", "g2", "g3", "g4", "g5", "g7", \ + "l0", "l1", "l3", "l4", "l5", "l6", "l7", \ + "i0", "i1", "i2", "i3", "i4", "i5", \ + "o0", "o1", "o2", "o3", "o7"); \ +} while(0); + + + + +#endif /* _ARCH_SPARC_ASM_SWITCH_TO_H_ */ + diff --git a/arch/sparc/include/asm/time.h b/arch/sparc/include/asm/time.h new file mode 100644 index 0000000000000000000000000000000000000000..316aaa172149f1c5fedfc277395b0d4b9859cda7 --- /dev/null +++ b/arch/sparc/include/asm/time.h @@ -0,0 +1,66 @@ +/** + * @file arch/sparc/include/time.h + */ + +#ifndef _SPARC_TIME_H_ +#define _SPARC_TIME_H_ + + +#include <kernel/kernel.h> + + +#define SPARC_CPU_CPS 0 + +#ifdef CONFIG_CPU_CLOCK_FREQ +#undef SPARC_CPU_CPS +#define SPARC_CPU_CPS CONFIG_CPU_CLOCK_FREQ +#endif /* CONFIG_CPU_CLOCK_FREQ */ + +#if !SPARC_CPU_CPS +#error CPU clock frequency not configured and no detection method available. +#endif + + +#define GPTIMER_RELOAD 4 +#define GRTIMER_RELOAD 4 /* use 5 instead of 3 cycle minimum for + round number of clock ticks */ + +#define GPTIMER_TICKS_PER_SEC ((SPARC_CPU_CPS / (GPTIMER_RELOAD + 1))) +#define GPTIMER_TICKS_PER_MSEC (GPTIMER_TICKS_PER_SEC / 1000) +#define GPTIMER_TICKS_PER_USEC (GPTIMER_TICKS_PER_SEC / 1000000) +#define GPTIMER_USEC_PER_TICK (1000000.0 / GPTIMER_TICKS_PER_SEC) + +#define GRTIMER_TICKS_PER_SEC ((SPARC_CPU_CPS / (GRTIMER_RELOAD + 1))) +#define GRTIMER_TICKS_PER_MSEC (GRTIMER_TICKS_PER_SEC / 1000) +#define GRTIMER_TICKS_PER_USEC (GRTIMER_TICKS_PER_SEC / 1000000) +#define GRTIMER_USEC_PER_TICK (1000000.0 / GRTIMER_TICKS_PER_SEC) + +#define GPTIMER_CYCLES_PER_SEC SPARC_CPU_CPS +#define GPTIMER_CYCLES_PER_MSEC (GPTIMER_CYCLES_PER_SEC / 1000) +#define GPTIMER_CYCLES_PER_USEC (GPTIMER_CYCLESS_PER_SEC / 1000000) +#define GPTIMER_USEC_PER_CYCLE (1000000.0 / GPTIMER_CYCLES_PER_SEC) + +#define GRTIMER_CYCLES_PER_SEC SPARC_CPU_CPS +#define GRTIMER_CYCLES_PER_MSEC (GRTIMER_CYCLES_PER_SEC / 1000) +#define GRTIMER_CYCLES_PER_USEC (GRTIMER_CYCLESS_PER_SEC / 1000000) +#define GRTIMER_CYCLES_PER_NSEC (GRTIMER_CYCLESS_PER_SEC / 1000000000) +#define GRTIMER_SEC_PER_CYCLE ( 1.0 / GRTIMER_CYCLES_PER_SEC) +#define GRTIMER_MSEC_PER_CYCLE ( 1000.0 / GRTIMER_CYCLES_PER_SEC) +#define GRTIMER_USEC_PER_CYCLE (1000000.0 / GRTIMER_CYCLES_PER_SEC) + + +/* this will definitely break if we run at GHz clock speeds + * note that the order is important, otherwise we may encounter integer + * overflow on multiplication + */ +#define CPU_CYCLES_TO_NS(x) (((x) / (SPARC_CPU_CPS / 1000000UL)) * 1000UL) +compile_time_assert((SPARC_CPU_CPS <= 1000000000UL), + CPU_CYCLES_TO_NS_NEEDS_FIXUP); + + + + + +void leon_uptime_init(void); + +#endif /* _SPARC_TIME_H_ */ diff --git a/arch/sparc/include/grtimer.h b/arch/sparc/include/grtimer.h new file mode 100644 index 0000000000000000000000000000000000000000..9ee6fdef0da748b2f99ae9d5786ec585981b5316 --- /dev/null +++ b/arch/sparc/include/grtimer.h @@ -0,0 +1,84 @@ +/** + * @file arch/sparc/grtimer.h + * @ingroup time + * @author Armin Luntzer (armin.luntzer@univie.ac.at), + * @date July, 2016 + * + * @copyright GPLv2 + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#ifndef _SPARC_GRTIMER_H_ +#define _SPARC_GRTIMER_H_ + +#include <asm/leon_reg.h> + + +#define LEON3_GRTIMER_CFG_LATCH 0x800 + +#define LEON3_TIMER_EN 0x00000001 /* enable counting */ +#define LEON3_TIMER_RS 0x00000002 /* restart from timer reload value */ +#define LEON3_TIMER_LD 0x00000004 /* load counter */ +#define LEON3_TIMER_IE 0x00000008 /* irq enable */ +#define LEON3_TIMER_IP 0x00000010 /* irq pending (clear by writing 0 */ +#define LEON3_TIMER_CH 0x00000020 /* chain with preceeding timer */ + +#define LEON3_CFG_TIMERS_MASK 0x00000007 +#define LEON3_CFG_IRQNUM_MASK 0x000000f8 +#define LEON3_CFG_IRQNUM_SHIFT 0x3 + + + +void grtimer_set_scaler_reload(struct grtimer_unit *rtu, uint32_t value); +uint32_t grtimer_get_scaler_reload(struct grtimer_unit *rtu); + +void grtimer_set_interrupt_enabled(struct grtimer_unit *rtu, uint32_t timer); +void grtimer_clear_interrupt_enabled(struct grtimer_unit *rtu, uint32_t timer); + +void grtimer_set_load(struct grtimer_unit *rtu, uint32_t timer); +void grtimer_clear_load(struct grtimer_unit *rtu, uint32_t timer); + +void grtimer_set_enabled(struct grtimer_unit *rtu, uint32_t timer); +void grtimer_clear_enabled(struct grtimer_unit *rtu, uint32_t timer); + +void grtimer_set_restart(struct grtimer_unit *rtu, uint32_t timer); +void grtimer_clear_restart(struct grtimer_unit *rtu, uint32_t timer); + +void grtimer_set_chained(struct grtimer_unit *rtu, uint32_t timer); +void grtimer_clear_chained(struct grtimer_unit *rtu, uint32_t timer); + +uint32_t grtimer_get_interrupt_pending_status(struct grtimer_unit *rtu, + uint32_t timer); +void grtimer_clear_interrupt_pending_status(struct grtimer_unit *rtu, + uint32_t timer); + +uint32_t grtimer_get_num_implemented(struct grtimer_unit *rtu); + +uint32_t grtimer_get_first_timer_irq_id(struct grtimer_unit *rtu); + +void grtimer_set_value(struct grtimer_unit *rtu, + uint32_t timer, + uint32_t value); +uint32_t grtimer_get_value(struct grtimer_unit *rtu, uint32_t timer); + + +void grtimer_set_reload(struct grtimer_unit *rtu, + uint32_t timer, + uint32_t reload); +uint32_t grtimer_get_reload(struct grtimer_unit *rtu, uint32_t timer); + +void grtimer_set_latch_irq(struct grtimer_unit *rtu, uint32_t irq); +void grtimer_clear_latch_irq(struct grtimer_unit *rtu, uint32_t irq); +void grtimer_enable_latch(struct grtimer_unit *rtu); + +uint32_t grtimer_get_latch_value(struct grtimer_unit *rtu, uint32_t timer); + +#endif /* _SPARC_GRTIMER_H */ diff --git a/arch/sparc/include/grtimer_longcount.h b/arch/sparc/include/grtimer_longcount.h new file mode 100644 index 0000000000000000000000000000000000000000..180f5396a253062c89e1411e3c29fa639d8f8468 --- /dev/null +++ b/arch/sparc/include/grtimer_longcount.h @@ -0,0 +1,50 @@ +/** + * @file arch/sparc/include/grtimer_longcount.h + * @ingroup timing + * @author Armin Luntzer (armin.luntzer@univie.ac.at), + * @date July, 2016 + * + * @copyright GPLv2 + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#ifndef _SPARC__GRTIMER_LONGCOUNT_H_ +#define _SPARC__GRTIMER_LONGCOUNT_H_ + +#include <grtimer.h> + +/** + * "coarse" contains the counter of the secondary (chained) timer in + * multiples of seconds and is chained + * to the "fine" timer, which should hence underflow in a 1-second cycle + */ + +struct grtimer_uptime { + uint32_t coarse; + uint32_t fine; +}; + + +int32_t grtimer_longcount_start(struct grtimer_unit *rtu, + uint32_t scaler_reload, + uint32_t fine_ticks_per_sec, + uint32_t coarse_ticks_max); + +void grtimer_longcount_get_uptime(struct grtimer_unit *rtu, + struct grtimer_uptime *up); + +double grtimer_longcount_difftime(struct grtimer_unit *rtu, + struct grtimer_uptime time1, + struct grtimer_uptime time0); + +uint32_t grtimer_longcount_get_latch_time_diff(struct grtimer_unit *rtu); + +#endif /* _SPARC_GRTIMER_LONGCOUNT_H_ */ diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile index 54ce8bb4ad74e4523d65fb6ecfed183834c85287..623e0c294f018672e7870c2c8437b4468107832c 100644 --- a/arch/sparc/kernel/Makefile +++ b/arch/sparc/kernel/Makefile @@ -10,6 +10,8 @@ obj-$(CONFIG_ARCH_CUSTOM_BOOT_CODE) += flush_windows.o obj-$(CONFIG_ARCH_CUSTOM_BOOT_CODE) += etrap.o obj-$(CONFIG_ARCH_CUSTOM_BOOT_CODE) += rtrap.o obj-$(CONFIG_ARCH_CUSTOM_BOOT_CODE) += irqtrap.o +obj-$(CONFIG_ARCH_CUSTOM_BOOT_CODE) += thread.o + obj-y += setup.o obj-y += init.o @@ -25,6 +27,7 @@ obj-y += stack.o obj-y += traps/data_access_exception_trap.o obj-y += traps/data_access_exception.o obj-y += irq.o +obj-y += time.o #libs-y += lib/ diff --git a/arch/sparc/kernel/etrap.S b/arch/sparc/kernel/etrap.S index d1d445ce1110e8a27f9440d28395bc53ab9da235..3a02ac9412d6ffad8178ae48dbe1776074625953 100644 --- a/arch/sparc/kernel/etrap.S +++ b/arch/sparc/kernel/etrap.S @@ -29,6 +29,8 @@ #define t_systable l7 /* Never touch this, could be the syscall table ptr. */ #define curptr g6 /* Set after pt_regs frame is built */ +/* at this time, trap_setup should take 43(+33 worst case) cycles */ + .text .align 4 diff --git a/arch/sparc/kernel/irq.c b/arch/sparc/kernel/irq.c index 93e6f0ffd77e4bc919b50048a7ef05dfde875a79..cbef5526a1a276306a657fadd2205563315c780a 100644 --- a/arch/sparc/kernel/irq.c +++ b/arch/sparc/kernel/irq.c @@ -46,6 +46,7 @@ #include <kernel/kmem.h> #include <kernel/printk.h> #include <kernel/kernel.h> +#include <kernel/export.h> #include <errno.h> #include <list.h> @@ -55,6 +56,7 @@ #include <asm/leon_reg.h> #include <asm/spinlock.h> #include <asm/irq.h> +#include <asm/irqflags.h> struct irl_vector_elem { @@ -293,6 +295,69 @@ static void leon_irq_enable(void) : "memory"); } +/** + * @brief get interrupt status and disable interrupts + */ + +static inline unsigned long leon_irq_save(void) +{ + unsigned long retval; + unsigned long tmp; + + __asm__ __volatile__( + "rd %%psr, %0\n\t" + "or %0, %2, %1\n\t" + "wr %1, 0, %%psr\n\t" + "nop; nop; nop\n" + : "=&r" (retval), "=r" (tmp) + : "i" (PSR_PIL) + : "memory"); + + return retval; +} + +/** + * @brief restore interrupts + */ + +static inline void leon_irq_restore(unsigned long old_psr) +{ + unsigned long tmp; + + __asm__ __volatile__( + "rd %%psr, %0\n\t" + "and %2, %1, %2\n\t" + "andn %0, %1, %0\n\t" + "wr %0, %2, %%psr\n\t" + "nop; nop; nop\n" + : "=&r" (tmp) + : "i" (PSR_PIL), "r" (old_psr) + : "memory"); +} + + + +void arch_local_irq_enable(void) +{ + leon_irq_enable(); +} +EXPORT_SYMBOL(arch_local_irq_enable); + + +unsigned long arch_local_irq_save(void) +{ + return leon_irq_save(); +} +EXPORT_SYMBOL(arch_local_irq_save); + + +void arch_local_irq_restore(unsigned long flags) +{ + leon_irq_restore(flags); +} +EXPORT_SYMBOL(arch_local_irq_restore); + + /** * @brief clear (acknowledge) a pending IRQ diff --git a/arch/sparc/kernel/irqtrap.S b/arch/sparc/kernel/irqtrap.S index b1bb5d8d6bdb32f75be791a437dfc25681103bce..e052a300463d08be24b64f33c6f73902cde30f30 100644 --- a/arch/sparc/kernel/irqtrap.S +++ b/arch/sparc/kernel/irqtrap.S @@ -13,6 +13,27 @@ #define t_irqlvl l7 /* set by trap entry */ +/* at this time with nested IRQs, it takes trap + jump + save_all up to call: + * 4 + 3 + 43(+33 worst case) + 7 = 57 (90 worst) cycles + * the call is pc-relative, hence 2 cycles total + * considering only the primary IRL and no statistics, leon_irq_dispatch will + * take 24 cycles to enter the assigned ISR, i.e. 83 (116) cycles until the + * IRQ can be serviced. + * + * only considering the time it takes to return from the ISR and assuming only + * a single ISR is assigned to the IRL, leon_irq_dispatch will return + * after 12 cycles and arrive at the call to schedule after a total of 13 cycles + * + * Assuming there is no scheduling event and not considering the internals of + * the function, the overhead for call to and return from schedule() will take + * a total of 4 cycles + * + * the psr is updated in 1 cycle, the call to restore_all will take + * 39 (61 worst case) cycles + * + * The total time to service an IRQ is hence 150 cycles (195 worst case), + * not considering an actual scheduling event or the run time of the ISR + */ .align 4 @@ -51,7 +72,7 @@ __interrupt_entry: #else /* CONFIG_SPARC_NESTED_IRQ */ or %t_psr, PSR_PIL, %t_tmp -#ifndef CONFIG_TASK_PREEMPTION_DISABLE +#ifndef CONFIG_DISABLE_TASK_PREEMPTION call schedule nop #endif diff --git a/arch/sparc/kernel/setup.c b/arch/sparc/kernel/setup.c index eebd07c1e60570d7497255fe1b8c9712c73de562..1a3edfce59882cfdd8a26819d62d8f8fa4be44ba 100644 --- a/arch/sparc/kernel/setup.c +++ b/arch/sparc/kernel/setup.c @@ -12,6 +12,7 @@ #include <init.h> #include <mm.h> #include <asm/irq.h> +#include <asm/time.h> #include <compiler.h> #include <page.h> @@ -101,4 +102,6 @@ void setup_arch(void) BUG_ON(stack_migrate(NULL, _kernel_stack_top)); leon_irq_init(); + + leon_uptime_init(); } diff --git a/arch/sparc/kernel/thread.c b/arch/sparc/kernel/thread.c new file mode 100644 index 0000000000000000000000000000000000000000..8b26071e19946ab0af134fecc10ca9158cebd892 --- /dev/null +++ b/arch/sparc/kernel/thread.c @@ -0,0 +1,114 @@ +/** + * @file arch/sparc/kernel/thread.c + * + * @ingroup sparc + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * + * @brief implements the architecture specific thread component + * + */ + + +#include <asm/thread.h> + +#include <asm/irqflags.h> +#include <asm/leon.h> + +#include <kernel/export.h> +#include <kernel/kthread.h> + +#define MSG "SPARC THREAD: " + + +extern struct thread_info *current_set[]; + + + + +/** + * @brief this is a wrapper that actually executes the thread function + */ + +static void th_starter(void) +{ + struct task_struct *task = current_set[0]->task; + + task->thread_fn(task->data); + + printk("thread: %p returned\n", task->thread_fn); + task->state = TASK_DEAD; + + schedule(); + + pr_crit(MSG "should never have reached %s:%d", __func__, __LINE__); + BUG(); +} + + + + +/** + * @brief initialise a task structure + */ + +void arch_init_task(struct task_struct *task, + int (*thread_fn)(void *data), + void *data) +{ + +#define STACKFRAME_SZ 96 +#define PTREG_SZ 80 +#define PSR_CWP 0x0000001f + task->thread_info.ksp = (unsigned long) task->stack_top - (STACKFRAME_SZ + PTREG_SZ); + task->thread_info.kpc = (unsigned long) th_starter - 8; + task->thread_info.kpsr = get_psr(); + task->thread_info.kwim = 1 << (((get_psr() & PSR_CWP) + 1) % 8); + task->thread_info.task = task; + + task->thread_fn = thread_fn; + task->data = data; +} +EXPORT_SYMBOL(arch_init_task); + + + +/** + * @brief promote the currently executed path to a task + * @note we use this to move our main thread to the task list + */ + +void arch_promote_to_task(struct task_struct *task) +{ +#define PSR_CWP 0x0000001f + + task->thread_info.ksp = (unsigned long) leon_get_fp(); + task->thread_info.kpc = (unsigned long) __builtin_return_address(1) - 8; + task->thread_info.kpsr = get_psr(); + task->thread_info.kwim = 1 << (((get_psr() & PSR_CWP) + 1) % 8); + task->thread_info.task = task; + + task->thread_fn = NULL; + task->data = NULL; + + + printk(MSG "kernel stack %x\n", leon_get_fp()); + + printk(MSG "is next at %p stack %p\n", &task->thread_info, task->stack); + + + /* and set the new thread as current */ + __asm__ __volatile__("mov %0, %%g6\n\t" + :: "r"(&(task->thread_info)) : "memory"); + + +} +EXPORT_SYMBOL(arch_promote_to_task); diff --git a/arch/sparc/kernel/time.c b/arch/sparc/kernel/time.c new file mode 100644 index 0000000000000000000000000000000000000000..5aaf9b61e9c94e095aaaac49f939543b4bc5667b --- /dev/null +++ b/arch/sparc/kernel/time.c @@ -0,0 +1,84 @@ +/** + * @file arch/sparc/kernel/time.c + */ + +#include <asm/time.h> +#include <kernel/time.h> +#include <kernel/printk.h> +#include <kernel/kernel.h> + + + +/* XXX: needs proper config option (for now; AMBA PNP autodetect later...) */ + +#ifdef CONFIG_LEON3 +#include <grtimer_longcount.h> +static struct grtimer_unit *grtimer_longcount = + (struct grtimer_unit *) LEON3_BASE_ADDRESS_GRTIMER; + + + +static void leon_grtimer_longcount_init(void) +{ + grtimer_longcount_start(grtimer_longcount, GRTIMER_RELOAD, + GRTIMER_TICKS_PER_SEC, 0xFFFFFFFFUL); +} + +#endif + + + + + +static void leon_get_uptime(uint32_t *seconds, uint32_t *nanoseconds) +{ +#ifdef CONFIG_LEON3 + struct grtimer_uptime up; + + grtimer_longcount_get_uptime(grtimer_longcount, &up); + (*seconds) = up.coarse; + (*nanoseconds) = CPU_CYCLES_TO_NS(up.fine); + +#else + printk("%s:%s not implemented\n", __FILE__, __func__); + BUG(); +#endif /* CONFIG_LEON3 */ +} + + + +static int leon_timer_enable(void) +{ + printk("%s:%s not implemented\n", __FILE__, __func__); + BUG(); + return 0; +} + + +static void leon_timer_disable(void) +{ + printk("%s:%s not implemented\n", __FILE__, __func__); + BUG(); +} + + + +/** + * the configuration for the high-level timing facility + */ + +static struct clocksource uptime_clock = { + .read = leon_get_uptime, + .enable = leon_timer_enable, + .disable = leon_timer_disable, + +}; + + +void leon_uptime_init(void) +{ +#ifdef CONFIG_LEON3 + leon_grtimer_longcount_init(); +#endif + time_init(&uptime_clock); +} diff --git a/include/asm-generic/irqflags.h b/include/asm-generic/irqflags.h new file mode 100644 index 0000000000000000000000000000000000000000..7ae219b8b735af92685379d1a749eca2fdeadd96 --- /dev/null +++ b/include/asm-generic/irqflags.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/** + * @file include/asm-generic/irqflags.h + * + * @copyright GPLv2 + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef _ASM_GENERIC_IRQFLAGS_H_ +#define _ASM_GENERIC_IRQFLAGS_H_ + + +void arch_local_irq_enable(void); +unsigned long arch_local_irq_save(void); +void arch_local_irq_restore(unsigned long flags); + +#endif /* _ASM_GENERIC_IRQFLAGS_H_ */ diff --git a/include/asm-generic/thread.h b/include/asm-generic/thread.h index ed6e97a307114682a604eb8f65e9296996eaf785..02b3ac8592f0c9516b73e65c905f63c8fe2a689e 100644 --- a/include/asm-generic/thread.h +++ b/include/asm-generic/thread.h @@ -19,5 +19,11 @@ #include <asm/thread.h> +void arch_init_task(struct task_struct *task, + int (*thread_fn)(void *data), + void *data); + +void arch_promote_to_task(struct task_struct *task); + #endif /* _ASM_GENERIC_THREAD_H_ */ diff --git a/include/kernel/clocksource.h b/include/kernel/clocksource.h new file mode 100644 index 0000000000000000000000000000000000000000..8aae39427ad9eb1fe932ba6afabd481efddaf604 --- /dev/null +++ b/include/kernel/clocksource.h @@ -0,0 +1,23 @@ +/** + * @file include/kernel/clocksource.h + */ + +#ifndef _KERNEL_BOOTMEM_H_ +#define _KERNEL_BOOTMEM_H_ + + +#include <kernel/types.h> + + +struct clocksource { + + void (*read)(uint32_t *seconds, uint32_t *nanoseconds); + + int (*enable)(void); + void (*disable)(void); +}; + + + +#endif /* _KERNEL_BOOTMEM_H_ */ + diff --git a/include/kernel/kthread.h b/include/kernel/kthread.h index 4f4048a963729251777b2217d0ecc332e6826bb2..77e36bf01eeec16874e3bf4c8df92fd985369138 100644 --- a/include/kernel/kthread.h +++ b/include/kernel/kthread.h @@ -16,6 +16,13 @@ +/* task states */ + +#define TASK_RUNNING 0xcafe +#define TASK_PARKED 0x0001 +#define TASK_NEW 0x0002 +#define TASK_DEAD 0x0004 + struct task_struct { @@ -26,6 +33,8 @@ struct task_struct { volatile long state; void *stack; + void *stack_top; + void *stack_bottom; int on_cpu; int (*thread_fn)(void *data); @@ -45,14 +54,14 @@ struct task_struct { * children. */ struct task_struct *parent; - struct list_head sibling; + struct list_head node; + struct list_head siblings; struct list_head children; }; - struct task_struct *kthread_create(int (*thread_fn)(void *data), void *data, int cpu, const char *namefmt, @@ -64,4 +73,6 @@ void kthread_wake_up(struct task_struct *task); void switch_to(struct task_struct *next); void schedule(void); + + #endif /* _KERNEL_KTHREAD_H_ */ diff --git a/include/kernel/time.h b/include/kernel/time.h new file mode 100644 index 0000000000000000000000000000000000000000..ec981ae6504dadfb7292a216dbd506500ca11e98 --- /dev/null +++ b/include/kernel/time.h @@ -0,0 +1,53 @@ +/** + * @file include/kernel/time.h + * @author Armin Luntzer (armin.luntzer@univie.ac.at) + * + * @ingroup time + * + * @copyright GPLv2 + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#ifndef _KERNEL_TIME_H_ +#define _KERNEL_TIME_H_ + +#include <kernel/types.h> +#include <kernel/kernel.h> +#include <kernel/clocksource.h> + + +#if 0 +struct timespec { + uint32_t tv_sec; /* seconds */ + uint32_t tv_nsec; /* nanoseconds */ +}; +#endif + +/* we use the compiler-defined struct timespec at this time, but we can + * at least verify the size of the types to see if we are compatible + */ +compile_time_assert((member_size(struct timespec, tv_sec) == sizeof(uint32_t)), + TIMESPEC_SEC_SIZE_MISMATCH); +compile_time_assert((member_size(struct timespec, tv_nsec) == sizeof(uint32_t)), + TIMESPEC_NSEC_SIZE_MISMATCH); + + +struct timekeeper { + struct clocksource *clock; +}; + + + +void time_get_uptime(struct timespec *ts); +void time_init(struct clocksource *clock); + +#endif /* _KERNEL_TIME_H_ */ diff --git a/init/main.c b/init/main.c index ba32a761e8c635afcc915ba53e04a50aae5ba1db..346056782f5ad39a42cdd34cf5d9e7f8f8db681e 100644 --- a/init/main.c +++ b/init/main.c @@ -43,7 +43,7 @@ irqreturn_t dummy(unsigned int irq, void *userdata) { - //printk("IRQ!\n"); + // printk("IRQ!\n"); //schedule(); return 0; } @@ -88,25 +88,72 @@ extern struct task_struct *kernel; struct task_struct *tsk1; struct task_struct *tsk2; +int threadx(void *data) +{ + + char c = (char) (* (char *)data); + int b = 0; + + while(1) { + //printk("."); + int i; + for (i = 0; i < (int) c; i++) { + putchar(c); + b++; + } + putchar('\n'); + + if (b > 3 * (int)c) + break; + // schedule(); + //twiddle(); + //cpu_relax(); + } + return 0; +} + int thread1(void *data) { + int b = 0; while(1) { //printk("."); - putchar('.'); + int i; + for (i = 0; i < 20; i++) + putchar('.'); + putchar('\n'); + + if (b++ > 20) + break; + + schedule(); //twiddle(); cpu_relax(); } + return 0; } int thread2(void *data) { + int b = 0; + + while (1) { + int i; + for (i = 0; i < 20; i++) { + putchar('o'); + b++; + } + + putchar('\n'); + if (b > 200) + break; + schedule(); - while(1) { - //printk("o"); - putchar('o'); - cpu_relax(); } + //schedule(); + //cpu_relax(); + printk("Actually, I left...\n"); + return 0xdeadbeef; } /** @@ -203,24 +250,38 @@ int kernel_main(void) irq_request(8, ISR_PRIORITY_NOW, dummy, NULL); mtu->scaler_reload = 5; - - mtu->timer[0].reload = 800 / (mtu->scaler_reload + 1); + /* abs min: 270 / (5+1) (sched printing) */ + /* abs min: 800 / (5+1) (threads printing) */ + mtu->timer[0].reload = 2000 / (mtu->scaler_reload + 1); mtu->timer[0].value = mtu->timer[0].reload; mtu->timer[0].ctrl = LEON3_TIMER_LD | LEON3_TIMER_EN | LEON3_TIMER_RL | LEON3_TIMER_IE; } + kernel = kthread_init_main(); tsk1 = kthread_create(thread1, NULL, KTHREAD_CPU_AFFINITY_NONE, "Thread1"); tsk2 = kthread_create(thread2, NULL, KTHREAD_CPU_AFFINITY_NONE, "Thread2"); //kthread_wake_up(tsk2); // kthread_wake_up(tsk2); + // + { + static char zzz[] = {':', '/', '\\', '~', '|'}; + int i; + + for (i = 0; i < ARRAY_SIZE(zzz); i++) + kthread_create(threadx, &zzz[i], KTHREAD_CPU_AFFINITY_NONE, "Thread2"); + } - kernel = kthread_init_main(); while(1) { //printk("-"); - putchar('-'); +#if 0 + int i; + for (i = 0; i < 20; i++) + putchar('-'); + putchar('\n'); +#endif cpu_relax(); } /* never reached */ diff --git a/kernel/Makefile b/kernel/Makefile index dfe58af469d4713843ff5dc6938c70c0334a7564..3334a1dc0916e180c8ec44ea6f47120c0466b418 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -7,3 +7,4 @@ obj-$(CONFIG_NOC_DMA) += noc_dma.o obj-y += irq.o obj-$(CONFIG_XENTIUM) += xentium.o obj-y += kthread.o +obj-y += time.o diff --git a/kernel/kthread.c b/kernel/kthread.c index d41192885daf53cc9ccccee68ecd0d7c88594dd8..e4d39d2b44fd6c38b5f392f6d2e554c82d4c2df4 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -7,257 +7,138 @@ #include <kernel/export.h> #include <kernel/kmem.h> #include <kernel/err.h> +#include <kernel/printk.h> +#include <asm/spinlock.h> +#include <asm/switch_to.h> +#include <asm/irqflags.h> -static inline unsigned int get_psr(void) -{ - unsigned int psr; - __asm__ __volatile__( - "rd %%psr, %0\n\t" - "nop\n\t" - "nop\n\t" - "nop\n\t" - : "=r" (psr) - : /* no inputs */ - : "memory"); - - return psr; -} +#include <string.h> -static inline void put_psr(unsigned int new_psr) -{ - __asm__ __volatile__( - "wr %0, 0x0, %%psr\n\t" - "nop\n\t" - "nop\n\t" - "nop\n\t" - : /* no outputs */ - : "r" (new_psr) - : "memory", "cc"); -} +#define MSG "KTHREAD: " +static struct { + struct list_head new; + struct list_head run; + struct list_head dead; +} _kthreads = { + .new = LIST_HEAD_INIT(_kthreads.new), + .run = LIST_HEAD_INIT(_kthreads.run), + .dead = LIST_HEAD_INIT(_kthreads.dead) +}; + +static struct spinlock kthread_spinlock; + + +/** XXX dummy **/ struct task_struct *kernel; -struct { - struct task_struct *current; - struct task_struct *second; - struct task_struct *third; -} tasks; +struct thread_info *current_set[1]; -struct thread_info *current_set[1]; // = {kernel->thread_info}; +/** + * @brief lock critical kthread section + */ +static inline void kthread_lock(void) +{ + spin_lock(&kthread_spinlock); +} -#define prepare_arch_switch(next) do { \ - __asm__ __volatile__( \ - "save %sp, -0x40, %sp; save %sp, -0x40, %sp; save %sp, -0x40, %sp\n\t" \ - "save %sp, -0x40, %sp; save %sp, -0x40, %sp; save %sp, -0x40, %sp\n\t" \ - "save %sp, -0x40, %sp\n\t" \ - "restore; restore; restore; restore; restore; restore; restore"); \ -} while(0) +/** + * @brief unlock critical kthread section + */ +static inline void kthread_unlock(void) +{ + spin_unlock(&kthread_spinlock); +} -void schedule(void) + +/* this should be a thread with a semaphore + * that is unlocked by schedule() if dead tasks + * were added + * (need to irq_disable/kthread_lock) + */ + +void kthread_cleanup_dead(void) { - struct task_struct *tmp; - - if (tasks.current && tasks.second && tasks.third) - (void) 0; - else - return; - - - tmp = tasks.current; - if (tasks.second) { - tasks.current = tasks.second; - if (tasks.third) { - tasks.second = tasks.third; - tasks.third = tmp; - } else { - tasks.second = tmp; - } - } else { - return; + struct task_struct *p_elem; + struct task_struct *p_tmp; + + list_for_each_entry_safe(p_elem, p_tmp, &_kthreads.dead, node) { + list_del(&p_elem->node); + kfree(p_elem->stack); + kfree(p_elem); } +} -// printk("new task: %p\n", tasks.current); - prepare_arch_switch(1); -#if 0 - __asm__ __volatile__("/*#define curptr g6*/" - "sethi %%hi(here - 0x8), %%o7\n\t" /* save the program counter just at the jump below as calĺed return address*/ - "or %%o7, %%lo(here - 0x8), %%o7\n\t" /* so the old thread will hop over this section when it returns */ - "rd %%psr, %%g4\n\t" - "std %%sp, [%%g6 + %2]\n\t" //store %sp and skip %pc to current thread's KSP - "rd %%wim, %%g5\n\t" // read wim - "wr %%g4, 0x00000020, %%psr\n\t" // toggle ET bit (should be off at this point! - "nop\n\t" - "nop\n\t" - "nop\n\t" - //? pause - "std %%g4, [%%g6 + %4]\n\t" // store %psr to KPSR and %wim to KWIM - - "ldd [%1 + %4], %%g4\n\t" // load KPSR + KWIM into %g4, %g5 from new thread - "mov %1, %%g6\n\t" // and set the new thread as current - "st %1, [%0]\n\t" // and to current_set[] - "wr %%g4, 0x20, %%psr\n\t" // set new PSR and toggle ET (should be off) - "nop; nop; nop\n\t" // wait for bits to settle, so we are in the proper window - "ldd [%%g6 + %2], %%sp\n\t" // and and load KSP to %sp (%o6) and KPC to %o7 (all of these MUST be aligned to dbl) - "wr %%g5, 0x0, %%wim\n\t" //set the new KWIM (from double load above) - - "ldd [%%sp + 0x00], %%l0\n\t" //load %l0 (%t_psr and %pc - "ldd [%%sp + 0x38], %%i6\n\t" // load %fp and %i7 (return address) - "wr %%g4, 0x0, %%psr\n\t" // set the original PSR (without traps) - "jmpl %%o7 + 0x8, %%g0\n\t" // as the thread is switched in, it will jump to the "here" marker and continue - "nop\n" - "here:\n" - : - : "r" (&(current_set[0])), - "r" (&(tasks.next->thread_info)), - "i" (TI_KSP), - "i" (TI_KPC), - "i" (TI_KPSR) - : "g1", "g2", "g3", "g4", "g5", "g7", - "l0", "l1", "l3", "l4", "l5", "l6", "l7", - "i0", "i1", "i2", "i3", "i4", "i5", - "o0", "o1", "o2", "o3", "o7"); -#else - __asm__ __volatile__("/*#define curptr g6*/" - "sethi %%hi(here - 0x8), %%o7\n\t" /* save the program counter just at the jump below as calĺed return address*/ - "or %%o7, %%lo(here - 0x8), %%o7\n\t" /* so the old thread will hop over this section when it returns */ - "rd %%psr, %%g4\n\t" - "std %%sp, [%%g6 + %2]\n\t" //store %sp and skip %pc to current thread's KSP - "rd %%wim, %%g5\n\t" // read wim - "wr %%g4, 0x00000020, %%psr\n\t" // toggle ET bit (should be off at this point! - "nop\n\t" - "nop\n\t" - "nop\n\t" - "std %%g4, [%%g6 + %4]\n\t" // store %psr to KPSR and %wim to KWIM - "ldd [%1 + %4], %%g4\n\t" // load KPSR + KWIM into %g4, %g5 from new thread - "mov %1, %%g6\n\t" // and set the new thread as current - "st %1, [%0]\n\t" // and to current_set[] - "wr %%g4, 0x20, %%psr\n\t" // set new PSR and toggle ET (should be off) - "nop; nop; nop\n\t" // wait for bits to settle, so we are in the proper window - "ldd [%%g6 + %2], %%sp\n\t" // and and load KSP to %sp (%o6) and KPC to %o7 (all of these MUST be aligned to dbl) - "wr %%g5, 0x0, %%wim\n\t" //set the new KWIM (from double load above) - - "ldd [%%sp + 0x00], %%l0\n\t" //load %l0 (%t_psr and %pc - "ldd [%%sp + 0x38], %%i6\n\t" // load %fp and %i7 (return address) - "wr %%g4, 0x0, %%psr\n\t" // set the original PSR (without traps) - "jmpl %%o7 + 0x8, %%g0\n\t" // as the thread is switched in, it will jump to the "here" marker and continue - "nop\n" - "here:\n" - : - : "r" (&(current_set[0])), - "r" (&(tasks.current->thread_info)), - "i" (TI_KSP), - "i" (TI_KPC), - "i" (TI_KPSR) - : "g1", "g2", "g3", "g4", "g5", "g7", - "l0", "l1", "l3", "l4", "l5", "l6", "l7", - "i0", "i1", "i2", "i3", "i4", "i5", - "o0", "o1", "o2", "o3", "o7"); -#endif +void schedule(void) +{ + struct task_struct *next; -} + if (list_empty(&_kthreads.run)) + return; -#define curptr g6 -/* this is executed from an interrupt exit */ -void __attribute__((always_inline)) switch_to(struct task_struct *next) -{ - //struct task_struct *task; - //struct thread_info *ti; - printk("Switch!\n"); - prepare_arch_switch(1); + arch_local_irq_disable(); + kthread_lock(); + kthread_cleanup_dead(); - /* NOTE: we don't actually require the PSR_ET toggle, but if we have - * unaligned accesses (or access traps), it is a really good idea, or we'll die */ - /* NOTE: this assumes we have a mixed kernel/user mapping in the MMU (if - * we are using it), otherwise we might would not be able to load the - * thread's data. Oh, and we'll have to do a switch user->kernel->new - * user OR we'll run into the same issue with different user contexts */ - /* first, store the current thread */ -#if 0 - __asm__ __volatile__("/*#define curptr g6*/" - "sethi %%hi(here - 0x8), %%o7\n\t" /* save the program counter just at the jump below as calĺed return address*/ - "or %%o7, %%lo(here - 0x8), %%o7\n\t" /* so the old thread will hop over this section when it returns */ - "rd %%psr, %%g4\n\t" - "std %%sp, [%%g6 + %2]\n\t" //store %sp and skip %pc to current thread's KSP - "rd %%wim, %%g5\n\t" // read wim - "wr %%g4, 0x00000020, %%psr\n\t" // toggle ET bit (should be off at this point! - "nop\n\t" - //? pause - "std %%g4, [%%g6 + %4]\n\t" // store %psr to KPSR and %wim to KWIM - - "ldd [%1 + %4], %%g4\n\t" // load KPSR + KWIM into %g4, %g5 from new thread - "mov %1, %%g6\n\t" // and set the new thread as current - "st %1, [%0]\n\t" // and to current_set[] - "wr %%g4, 0x20, %%psr\n\t" // set new PSR and toggle ET (should be off) - "nop; nop; nop\n\t" // wait for bits to settle, so we are in the proper window - "ldd [%%g6 + %2], %%sp\n\t" // and and load KSP to %sp (%o6) and KPC to %o7 (all of these MUST be aligned to dbl) - "wr %%g5, 0x0, %%wim\n\t" //set the new KWIM (from double load above) - - "ldd [%%sp + 0x00], %%l0\n\t" //load %l0 (%t_psr and %pc - "ldd [%%sp + 0x38], %%i6\n\t" // load %fp and %i7 (return address) - "wr %%g4, 0x0, %%psr\n\t" // set the original PSR (without traps) - "jmpl %%o7 + 0x8, %%g0\n\t" // as the thread is switched in, it will jump to the "here" marker and continue - "nop\n" - "here:\n" - : - : "r" (&(current_set[0])), - "r" (&(next->thread_info)), - "i" (TI_KSP), - "i" (TI_KPC), - "i" (TI_KPSR) - : "g1", "g2", "g3", "g4", "g5", "g7", - "l0", "l1", "l3", "l4", "l5", "l6", "l7", - "i0", "i1", "i2", "i3", "i4", "i5", - "o0", "o1", "o2", "o3", "o7"); + /* round robin */ + do { + next = list_entry(_kthreads.run.next, struct task_struct, node); + if (!next) + BUG(); -#endif + if (next->state == TASK_RUNNING) { + list_move_tail(&next->node, &_kthreads.run); + break; + } -} + list_move_tail(&next->node, &_kthreads.dead); -#if 0 + } while (!list_empty(&_kthreads.run)); - __asm__ __volatile__( - "mov %0, %%fp \n\t" - "sub %%fp, 96, %%sp\n\t" - : - : "r" (task->stack) - : "memory"); + kthread_unlock(); - thread_fn(data); -#endif -#include <asm/leon.h> -void kthread_wake_up(struct task_struct *task) -{ - printk("running thread function\n"); - task->thread_fn(task->data); + prepare_arch_switch(1); + switch_to(next); + + arch_local_irq_enable(); } -__attribute__((unused)) -static void kthread_exit(void) + + +void kthread_wake_up(struct task_struct *task) { - printk("thread leaving\n"); + printk("wake thread %p\n", task->stack_top); + arch_local_irq_disable(); + kthread_lock(); + task->state = TASK_RUNNING; + list_move_tail(&task->node, &_kthreads.run); + kthread_unlock(); + arch_local_irq_enable(); } + + struct task_struct *kthread_init_main(void) { struct task_struct *task; @@ -267,34 +148,30 @@ struct task_struct *kthread_init_main(void) if (!task) return ERR_PTR(-ENOMEM); - /*** XXX dummy **/ - current_set[0] = &kernel->thread_info; + arch_promote_to_task(task); -#define PSR_CWP 0x0000001f + arch_local_irq_disable(); + kthread_lock(); - task->thread_info.ksp = (unsigned long) leon_get_fp(); - task->thread_info.kpc = (unsigned long) __builtin_return_address(1) - 8; - task->thread_info.kpsr = get_psr(); - task->thread_info.kwim = 1 << (((get_psr() & PSR_CWP) + 1) % 8); - task->thread_info.task = task; + kernel = task; + /*** XXX dummy **/ + current_set[0] = &kernel->thread_info; - task->thread_fn = NULL; - task->data = NULL; + task->state = TASK_RUNNING; + list_add_tail(&task->node, &_kthreads.run); - printk("kernel stack %x\n", leon_get_fp()); - /* dummy */ - tasks.current = task; - __asm__ __volatile__("mov %0, %%g6\n\t" - :: "r"(&(tasks.current->thread_info)) : "memory"); // and set the new thread as current + kthread_unlock(); + arch_local_irq_enable(); return task; } + static struct task_struct *kthread_create_internal(int (*thread_fn)(void *data), void *data, int cpu, const char *namefmt, @@ -302,16 +179,16 @@ static struct task_struct *kthread_create_internal(int (*thread_fn)(void *data), { struct task_struct *task; - task = kmalloc(sizeof(*task)); + if (!task) return ERR_PTR(-ENOMEM); /* XXX: need stack size detection and realloc/migration code */ - task->stack = kmalloc(8192) + 8192; /* XXX */ + task->stack = kmalloc(8192); /* XXX */ if (!task->stack) { kfree(task); @@ -319,77 +196,28 @@ static struct task_struct *kthread_create_internal(int (*thread_fn)(void *data), } -#define STACKFRAME_SZ 96 -#define PTREG_SZ 80 -#define PSR_CWP 0x0000001f - task->thread_info.ksp = (unsigned long) task->stack - (STACKFRAME_SZ + PTREG_SZ); - task->thread_info.kpc = (unsigned long) thread_fn - 8; - task->thread_info.kpsr = get_psr(); - task->thread_info.kwim = 1 << (((get_psr() & PSR_CWP) + 1) % 8); - task->thread_info.task = task; + task->stack_bottom = task->stack; /* XXX */ + task->stack_top = task->stack + 8192/4; /* XXX need align */ - task->thread_fn = thread_fn; - task->data = data; + memset(task->stack, 0xdeadbeef, 8192); + arch_init_task(task, thread_fn, data); - printk("%s is next at %p stack %p\n", namefmt, &task->thread_info, task->stack); - if (!tasks.second) - tasks.second = task; - else - tasks.third = task; + task->state = TASK_NEW; - /* wake up */ + arch_local_irq_disable(); + kthread_lock(); + list_add_tail(&task->node, &_kthreads.new); + kthread_unlock(); + arch_local_irq_enable(); + + + //printk("%s is next at %p stack %p\n", namefmt, &task->thread_info, task->stack); + printk("%s\n", namefmt); -#if 0 - struct kthread_create_info *create = kmalloc(sizeof(*create), - GFP_KERNEL); - if (!create) - return ERR_PTR(-ENOMEM); - create->threadfn = threadfn; - create->data = data; - create->node = node; - create->done = &done; - spin_lock(&kthread_create_lock); - list_add_tail(&create->list, &kthread_create_list); - spin_unlock(&kthread_create_lock); - - wake_up_process(kthreadd_task); - /* - * Wait for completion in killable state, for I might be chosen by - * the OOM killer while kthreadd is trying to allocate memory for - * new kernel thread. - */ - if (unlikely(wait_for_completion_killable(&done))) { - /* - * If I was SIGKILLed before kthreadd (or new kernel thread) - * calls complete(), leave the cleanup of this structure to - * that thread. - */ - if (xchg(&create->done, NULL)) - return ERR_PTR(-EINTR); - /* - * kthreadd (or new kernel thread) will call complete() - * shortly. - */ - wait_for_completion(&done); - } - task = create->result; - if (!IS_ERR(task)) { - static const struct sched_param param = { .sched_priority = 0 }; - - vsnprintf(task->comm, sizeof(task->comm), namefmt, args); - /* - * root may have changed our (kthreadd's) priority or CPU mask. - * The kernel thread should not inherit these properties. - */ - sched_setscheduler_nocheck(task, SCHED_NORMAL, ¶m); - set_cpus_allowed_ptr(task, cpu_all_mask); - } - kfree(create); -#endif return task; } @@ -538,16 +366,10 @@ out: * changing the task state if and only if any tasks are woken up. */ /* Used in tsk->state: */ -#define TASK_RUNNING 0x0000 -#define TASK_INTERRUPTIBLE 0x0001 -#define TASK_UNINTERRUPTIBLE 0x0002 -#define __TASK_STOPPED 0x0004 -#define __TASK_TRACED 0x0008 -#define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE) int wake_up_thread(struct task_struct *p) { - return wake_up_thread_internal(p, TASK_NORMAL, 0); + return wake_up_thread_internal(p, 0xdead, 0); } EXPORT_SYMBOL(wake_up_thread); diff --git a/kernel/time.c b/kernel/time.c new file mode 100644 index 0000000000000000000000000000000000000000..344cc7e5fdd154cc5ca54aafbedf9a3dd1562b66 --- /dev/null +++ b/kernel/time.c @@ -0,0 +1,64 @@ +/** + * @file kernel/time.c + * @author Armin Luntzer (armin.luntzer@univie.ac.at) + * + * + * @ingroup time + * @defgroup time time interface + * + */ + + +#include <errno.h> +#include <kernel/time.h> +#include <kernel/export.h> + +static struct timekeeper tk; + + + + +/** + * @brief get the time elapsed since boot + * + * @param[out] ts a struct timespec + * + * @note if no uptime clock was configured, the result + * will be undefined + */ + +void time_get_uptime(struct timespec *ts) +{ + uint32_t sec; + uint32_t nsec; + + + if (!tk.clock) + return; + + + tk.clock->read(&sec, &nsec); + + /* We'll get away with this as long as we exist in 32-bit space, since + * the members of struct timespec are usually of long int type. + * (see also kernel/time.h) + */ + + ts->tv_sec = (typeof(ts->tv_sec)) sec; + ts->tv_nsec = (typeof(ts->tv_sec)) nsec; + + + +} +EXPORT_SYMBOL(time_get_uptime); + + + +/** + * @brief initialise the timing system + */ + +void time_init(struct clocksource *clock) +{ + tk.clock = clock; +}