Skip to content
Snippets Groups Projects
Commit 72599f38 authored by Armin Luntzer's avatar Armin Luntzer
Browse files

saving changes:

* scheduler
* timer
* stuff
parent e1560be6
No related branches found
No related tags found
No related merge requests found
Showing
with 1421 additions and 1 deletion
obj-y += kernel/ obj-y += kernel/
obj-y += mm/ obj-y += mm/
obj-y += drv/
...@@ -40,6 +40,13 @@ config PAGE_OFFSET ...@@ -40,6 +40,13 @@ config PAGE_OFFSET
addresses. This can not work unless the kernel is bootstrapped. addresses. This can not work unless the kernel is bootstrapped.
If unsure, say N. If unsure, say N.
config CPU_CLOCK_FREQ
int "Set the CPU clock frequency (Hz)"
default 80000000
help
At least LEON CPUs lack a feature to detect the system clock
frequency. If you set this incorrectly, your timing will be off.
config EXTRA_SPARC_PHYS_BANKS config EXTRA_SPARC_PHYS_BANKS
int "Number of extra physical memory banks" int "Number of extra physical memory banks"
default 0 default 0
......
CHECKFLAGS += -D__sparc__
obj-y += grtimer.o
obj-y += grtimer_longcount.o
/**
* @file arch/sparc/drv/grtimer.c
* @ingroup time
* @author Armin Luntzer (armin.luntzer@univie.ac.at),
* @date July, 2016
*
* @copyright GPLv2
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*
* @brief Implements access to the LEON3 General Purpose Timer Unit with
* Time Latch Capability
*
* @see GR712RC user manual chapter 12
*/
#include <asm/io.h>
#include <grtimer.h>
/**
* @brief set scaler reload value of the timer block
* @param rtu a struct grtimer_unit
*
*/
void grtimer_set_scaler_reload(struct grtimer_unit *rtu, uint32_t value)
{
iowrite32be(value, &rtu->scaler_reload);
}
/**
* @brief get scaler reload value of the timer block
* @param rtu a struct grtimer_unit
*
*/
uint32_t grtimer_get_scaler_reload(struct grtimer_unit *rtu)
{
return ioread32be(&rtu->scaler_reload);
}
/**
* @brief sets the interrupt enabled flag of a timer
* @param rtu a struct grtimer_unit
* @param timer the selected timer
*/
void grtimer_set_interrupt_enabled(struct grtimer_unit *rtu, uint32_t timer)
{
uint32_t flags;
flags = ioread32be(&rtu->timer[timer].ctrl);
flags |= LEON3_TIMER_IE;
iowrite32be(flags, &rtu->timer[timer].ctrl);
}
/**
* @brief sets the interrupt enabled flag of a timer
* @param rtu a struct grtimer_unit
* @param timer the selected timer
*/
void grtimer_clear_interrupt_enabled(struct grtimer_unit *rtu, uint32_t timer)
{
uint32_t flags;
flags = ioread32be(&rtu->timer[timer].ctrl);
flags &= ~LEON3_TIMER_IE;
iowrite32be(flags, &rtu->timer[timer].ctrl);
}
/**
* @brief sets the load flag of a timer
* @param rtu a struct grtimer_unit
* @param timer the selected timer
*/
void grtimer_set_load(struct grtimer_unit *rtu, uint32_t timer)
{
uint32_t flags;
flags = ioread32be(&rtu->timer[timer].ctrl);
flags |= LEON3_TIMER_LD;
iowrite32be(flags, &rtu->timer[timer].ctrl);
}
/**
* @brief clears the load flag of a timer
* @param rtu a struct grtimer_unit
* @param timer the selected timer
*/
void grtimer_clear_load(struct grtimer_unit *rtu, uint32_t timer)
{
uint32_t flags;
flags = ioread32be(&rtu->timer[timer].ctrl);
flags &= ~LEON3_TIMER_LD;
iowrite32be(flags, &rtu->timer[timer].ctrl);
}
/**
* @brief set enable flag in timer
* @param rtu a struct grtimer_unit
* @param timer the selected timer
*/
void grtimer_set_enabled(struct grtimer_unit *rtu, uint32_t timer)
{
uint32_t ctrl;
ctrl = ioread32be(&rtu->timer[timer].ctrl);
ctrl |= LEON3_TIMER_EN;
iowrite32be(ctrl, &rtu->timer[timer].ctrl);
}
/**
* @brief clear enable flag in timer
* @param rtu a struct grtimer_unit
* @param timer the selected timer
*/
void grtimer_clear_enabled(struct grtimer_unit *rtu, uint32_t timer)
{
uint32_t ctrl;
ctrl = ioread32be(&rtu->timer[timer].ctrl);
ctrl &= ~LEON3_TIMER_EN;
iowrite32be(ctrl, &rtu->timer[timer].ctrl);
}
/**
* @brief set restart flag in timer
* @param rtu a struct grtimer_unit
* @param timer the selected timer
*/
void grtimer_set_restart(struct grtimer_unit *rtu, uint32_t timer)
{
uint32_t ctrl;
ctrl = ioread32be(&rtu->timer[timer].ctrl);
ctrl |= LEON3_TIMER_RS;
iowrite32be(ctrl, &rtu->timer[timer].ctrl);
}
/**
* @brief clear restart flag in timer
* @param rtu a struct grtimer_unit
* @param timer the selected timer
*/
void grtimer_clear_restart(struct grtimer_unit *rtu, uint32_t timer)
{
uint32_t ctrl;
ctrl = ioread32be(&rtu->timer[timer].ctrl);
ctrl &= ~LEON3_TIMER_RS;
iowrite32be(ctrl, &rtu->timer[timer].ctrl);
}
/**
* @brief set timer to chain to the preceeding timer
* @param rtu a struct grtimer_unit
* @param timer the selected timer
*/
void grtimer_set_chained(struct grtimer_unit *rtu, uint32_t timer)
{
uint32_t ctrl;
ctrl = ioread32be(&rtu->timer[timer].ctrl);
ctrl |= LEON3_TIMER_CH;
iowrite32be(ctrl, &rtu->timer[timer].ctrl);
}
/**
* @brief clear timer to chain to the preceeding timer
* @param rtu a struct grtimer_unit
* @param timer the selected timer
*/
void grtimer_clear_chained(struct grtimer_unit *rtu, uint32_t timer)
{
uint32_t ctrl;
ctrl = ioread32be(&rtu->timer[timer].ctrl);
ctrl &= ~LEON3_TIMER_CH;
iowrite32be(ctrl, &rtu->timer[timer].ctrl);
}
/**
* @brief get status of interrupt pending status
* @param rtu a struct grtimer_unit
* @param timer the selected timer
*/
uint32_t grtimer_get_interrupt_pending_status(struct grtimer_unit *rtu,
uint32_t timer)
{
return ioread32be(&rtu->timer[timer].ctrl) & LEON3_TIMER_IP;
}
/**
* @brief clear status of interrupt pending status
* @param rtu a struct grtimer_unit
* @param timer the selected timer
*/
void grtimer_clear_interrupt_pending_status(struct grtimer_unit *rtu,
uint32_t timer)
{
uint32_t ctrl;
ctrl = ioread32be(&rtu->timer[timer].ctrl);
ctrl &= ~LEON3_TIMER_IP;
iowrite32be(ctrl, &rtu->timer[timer].ctrl);
}
/**
* @brief get number of implemented general purpose timers
* @param rtu a struct grtimer_unit
* @param timer the selected timer
*/
uint32_t grtimer_get_num_implemented(struct grtimer_unit *rtu)
{
return ioread32be(&rtu->config) & LEON3_CFG_TIMERS_MASK;
}
/**
* @brief get interrupt ID of first implemented timer
* @param rtu a struct grtimer_unit
* @param timer the selected timer
*/
uint32_t grtimer_get_first_timer_irq_id(struct grtimer_unit *rtu)
{
return (ioread32be(&rtu->config) & LEON3_CFG_IRQNUM_MASK) >>
LEON3_CFG_IRQNUM_SHIFT;
}
/**
* @brief set the value of a grtimer
* @param rtu a struct grtimer_unit
* @param timer the selected timer
* @param value the timer counter value to set
*/
void grtimer_set_value(struct grtimer_unit *rtu, uint32_t timer, uint32_t value)
{
iowrite32be(value, &rtu->timer[timer].value);
}
/**
* @brief get the value of a grtimer
* @param rtu a struct grtimer_unit
* @param timer the selected timer
* @param value the timer counter value to set
*/
uint32_t grtimer_get_value(struct grtimer_unit *rtu, uint32_t timer)
{
return ioread32be(&rtu->timer[timer].value);
}
/**
* @brief set the reload of a grtimer
* @param rtu a struct grtimer_unit
* @param timer the selected timer
* @param reload the timer counter reload to set
*/
void grtimer_set_reload(struct grtimer_unit *rtu,
uint32_t timer,
uint32_t reload)
{
iowrite32be(reload, &rtu->timer[timer].reload);
}
/**
* @brief get the reload of a grtimer
* @param rtu a struct grtimer_unit
* @param timer the selected timer
*/
uint32_t grtimer_get_reload(struct grtimer_unit *rtu, uint32_t timer)
{
return ioread32be(&rtu->timer[timer].reload);
}
/**
* @brief set an irq to trigger a latch
* @param rtu a struct grtimer_unit
* @param irq the irq number to latch on
*/
void grtimer_set_latch_irq(struct grtimer_unit *rtu, uint32_t irq)
{
uint32_t irq_select;
irq_select = ioread32be(&rtu->irq_select);
irq_select |= (1 << irq);
iowrite32be(irq_select, &rtu->irq_select);
}
/**
* @brief clear an irq triggering a latch
* @param rtu a struct grtimer_unit
* @param irq the irq number to disable latching for
*/
void grtimer_clear_latch_irq(struct grtimer_unit *rtu, uint32_t irq)
{
uint32_t irq_select;
irq_select = ioread32be(&rtu->irq_select);
irq_select &= ~(1 << irq);
iowrite32be(irq_select, &rtu->irq_select);
}
/**
* @brief set the timer's latch bit
* @param rtu a struct grtimer_unit
*/
void grtimer_enable_latch(struct grtimer_unit *rtu)
{
uint32_t config;
config = ioread32be(&rtu->config);
config |= LEON3_GRTIMER_CFG_LATCH;
iowrite32be(config, &rtu->config);
}
/**
* @brief get the latch value of a grtimer
* @param rtu a struct grtimer_unit
* @param timer the selected timer
*/
uint32_t grtimer_get_latch_value(struct grtimer_unit *rtu, uint32_t timer)
{
return ioread32be(&rtu->timer[timer].latch_value);
}
/**
* @file leon3_grtimer_longcount.c
* @ingroup timing
* @author Armin Luntzer (armin.luntzer@univie.ac.at),
* @date July, 2016
*
* @copyright GPLv2
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* @brief implements a long-counting (uptime) clock using the LEON3 GRTIMER
*
*/
#include <asm/io.h>
#include <time.h>
#include <grtimer.h>
#include <grtimer_longcount.h>
/**
* @brief enable long count timer
* @param rtu a struct grtimer_unit
* @param reload a scaler reload value
* @param fine_ticks_per_sec a timer reload value in ticks per second
* @param coarse_ticks_max a timer reload value in ticks per second
*
* If properly configured, grtimer[0] will hold fractions of a second and
* grtimer[1] will be in seconds, counting down from coarse_ticks_max
*
* @return -1 if fine_ticks_per_sec is not an integer multiple of scaler_reload,
* 0 otherwise
*
* @note the return value warns about a configuration error, but will still
* accept the input
*/
int32_t grtimer_longcount_start(struct grtimer_unit *rtu,
uint32_t scaler_reload,
uint32_t fine_ticks_per_sec,
uint32_t coarse_ticks_max)
{
grtimer_set_scaler_reload(rtu, scaler_reload);
grtimer_set_reload(rtu, 0, fine_ticks_per_sec);
grtimer_set_reload(rtu, 1, coarse_ticks_max);
grtimer_set_load(rtu, 0);
grtimer_set_load(rtu, 1);
grtimer_set_restart(rtu, 0);
grtimer_set_restart(rtu, 1);
grtimer_set_chained(rtu, 1);
grtimer_set_enabled(rtu, 0);
grtimer_set_enabled(rtu, 1);
grtimer_enable_latch(rtu);
/* not an integer multiple, clock will drift */
if (fine_ticks_per_sec % scaler_reload)
return -1;
return 0;
}
/**
* @brief get the time since the long counting grtimer was started
* @param rtu a struct grtimer_unit
* @param up a struct grtimer_uptime
* @note if configured properly, fine will be in cpu cycles and coarse will
* be in seconds
*
* XXX: sucks, need latching
*/
void grtimer_longcount_get_uptime(struct grtimer_unit *rtu,
struct grtimer_uptime *up)
{
uint32_t sc;
uint32_t t0, t0a, t0b, t0c;
uint32_t t1, t1a, t1b, t1c;
uint32_t r0;
uint32_t r1;
sc = ioread32be(&rtu->scaler_reload);
t0a = ioread32be(&rtu->timer[0].value);
t1a = ioread32be(&rtu->timer[1].value);
t0b = ioread32be(&rtu->timer[0].value);
t1b = ioread32be(&rtu->timer[1].value);
t0c = ioread32be(&rtu->timer[0].value);
t1c = ioread32be(&rtu->timer[1].value);
if ((t0a >= t0b) && (t1a >= t1b))
{
t0 = t0a;
t1 = t1a;
}
else
{
t0 = t0c;
t1 = t1c;
}
r0 = ioread32be(&rtu->timer[0].reload);
r1 = ioread32be(&rtu->timer[1].reload);
up->fine = (r0 - t0) * (sc + 1);
up->coarse = (r1 - t1);
}
/**
* @brief
* get the number of seconds elapsed between timestamps taken from the
* longcount timer
*
* @brief param time1 a struct grtime_uptime
* @brief param time0 a struct grtime_uptime
*
* @return time difference in seconds represented as double
*/
double grtimer_longcount_difftime(struct grtimer_unit *rtu,
struct grtimer_uptime time1,
struct grtimer_uptime time0)
{
uint32_t sc;
uint32_t rl;
double cpu_freq;
double t0;
double t1;
sc = grtimer_get_scaler_reload(rtu);
rl = grtimer_get_reload(rtu, 0);
cpu_freq = (double) (sc + 1) * rl;
t0 = (double) time0.coarse + (double) time0.fine / cpu_freq;
t1 = (double) time1.coarse + (double) time1.fine / cpu_freq;
return t1 - t0;
}
/**
* @brief get the time since last latch occured in cpu cycles
* @param rtu a struct grtimer_unit
* @note does not compensate for function overhead
*/
uint32_t grtimer_longcount_get_latch_time_diff(struct grtimer_unit *rtu)
{
uint32_t t0;
uint32_t t1;
uint32_t t0_latch;
uint32_t t1_latch;
uint32_t t0_reload;
uint32_t t0_scaler;
uint32_t diff;
t0_latch = grtimer_get_latch_value(rtu, 0);
t1_latch = grtimer_get_latch_value(rtu, 1);
t0_reload = grtimer_get_reload(rtu, 0);
t0_scaler = grtimer_get_scaler_reload(rtu);
t0 = grtimer_get_value(rtu, 0);
t1 = grtimer_get_value(rtu, 1);
diff = (t1_latch - t1) * t0_reload * (t0_scaler + 1);
if (t0 < t0_latch)
diff += (t0_latch - t0);
else
diff += (t0_reload - t0);
return diff;
}
/**
* @file sparc/include/asm/irqflags.h
*
* @copyright GPLv2
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*/
#ifndef _ARCH_SPARC_ASM_IRQFLAGS_H_
#define _ARCH_SPARC_ASM_IRQFLAGS_H_
void arch_local_irq_enable(void);
unsigned long arch_local_irq_save(void);
void arch_local_irq_restore(unsigned long flags);
static inline void arch_local_irq_disable(void)
{
arch_local_irq_save();
}
#endif /* _ARCH_SPARC_ASM_IRQFLAGS_H_ */
...@@ -201,6 +201,64 @@ static inline void leon_reg_win_flush(void) ...@@ -201,6 +201,64 @@ static inline void leon_reg_win_flush(void)
__asm__ __volatile__("ta 3"); __asm__ __volatile__("ta 3");
} }
__attribute__((unused))
static inline unsigned int get_wim(void)
{
unsigned int wim;
__asm__ __volatile__(
"rd %%wim, %0\n\t"
"nop\n\t"
"nop\n\t"
"nop\n\t"
: "=r" (wim)
: /* no inputs */
: "memory");
return wim;
}
__attribute__((unused))
static inline void put_wim(unsigned int new_wim)
{
__asm__ __volatile__(
"wr %0, 0x0, %%wim\n\t"
"nop\n\t"
"nop\n\t"
"nop\n\t"
: /* no outputs */
: "r" (new_wim)
: "memory", "cc");
}
__attribute__((unused))
static inline unsigned int get_psr(void)
{
unsigned int psr;
__asm__ __volatile__(
"rd %%psr, %0\n\t"
"nop\n\t"
"nop\n\t"
"nop\n\t"
: "=r" (psr)
: /* no inputs */
: "memory");
return psr;
}
__attribute__((unused))
static inline void put_psr(unsigned int new_psr)
{
__asm__ __volatile__(
"wr %0, 0x0, %%psr\n\t"
"nop\n\t"
"nop\n\t"
"nop\n\t"
: /* no outputs */
: "r" (new_psr)
: "memory", "cc");
}
......
/**
* @file sparc/include/asm/switch_to.h
*
* @copyright GPLv2
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*
* When implementing the actual task switching segment, I came up with
* essentially the same thing that David S. Miller did in the SPARC port
* switch_to() macro of the Linux kernel, just less tuned, so I adapted his
* code.
* Hence, I decided to just follow a similar scheme to Linux for the thread
* switching, which may make it easier to port to a new architecure in the
* future, as it'll be possible to adapt the according macros from the Linux
* source tree. No need to reinvent the wheel..
*
*
* TODO: FPU (lazy) switching
* TODO: CPU id (for SMP)
*/
#include <kernel/kthread.h>
#include <asm/ttable.h>
#ifndef _ARCH_SPARC_ASM_SWITCH_TO_H_
#define _ARCH_SPARC_ASM_SWITCH_TO_H_
#define prepare_arch_switch(next) do { \
__asm__ __volatile__( \
"save %sp, -0x40, %sp; save %sp, -0x40, %sp; save %sp, -0x40, %sp\n\t" \
"save %sp, -0x40, %sp; save %sp, -0x40, %sp; save %sp, -0x40, %sp\n\t" \
"save %sp, -0x40, %sp\n\t" \
"restore; restore; restore; restore; restore; restore; restore"); \
} while(0);
/* NOTE: we don't actually require the PSR_ET toggle, but if we have
* unaligned accesses (or access traps), it is a really good idea, or
* we'll die...
*/
/* NOTE: this assumes we have a mixed kernel/user mapping in the MMU (if
* we are using it), otherwise we might would not be able to load the
* thread's data. Oh, and we'll have to do a switch user->kernel->new
* user OR we'll run into the same issue with different user contexts */
/* curptr is %g6! */
/* so, here's what's happening
*
*
* 1+2: store a new program counter (~ return address) just after the actual
* switch block, so the old thread will hop over the actual switching
* section when it is re-scheduled.
*
* NOTE: in the SPARC ABI the return address to the caller in %i7 is actually
* (return address - 0x8), and the %o[] regs become the %i[] regs on a
* save instruction, so we actually have to store the reference address
* to the jump accordingly
*
* 3: store the current thread's %psr to %g4
*
* 4: double-store the stack pointer (%o6) and the "skip" PC in %o7
* note that this requires double-word alignment of struct thread_info
* members KSP an KPC
*
* 5: store the current thread's %wim to %g5
*
* 6-7: toggle the enable traps bit in the %psr (should be off at this point!)
* and wait 3 cycles for the bits to settle
*
* 8: double-store store the PSR in %g4 and the WIM in %g5
* note that this requires double-word alignment of struct thread_infio
* members KPSR an KWIM
*
* 9: double-load KPSR + KWIM into %g4, %g5 from new thread info
*
* NOTE: A double load takes 2 cycles, +1 extra if the subsequent instruction
* depends on the result of the load, that's why we don't set %g6 first
* and use it to load steps 10+11 form there
*
* 10: set the new thread info to "curptr" (%g6) of this CPU
*
* 11: set the new thread info to the global "current" set of this CPU
*
* 12: set the new thread's PSR and toggle the ET bit (should be off)
*
* 13: wait for the bits to settle, so the CPU puts us into the proper window
* before we can continue
*
* 14: double-load KSP and KPC to %sp (%o6) and the "skip" PC in %o7
*
* 15: set the new thread's WIM
*
* 16: restore %l0 and %l1 from the memory stack, rtrap.S expects these to be
* l0 == t_psr, l1 == t_pc
*
* 17: restore the frame pointer %fp (%i6) and the return address in %i7
*
* 18: restore the new thread's PSR
*
* NOTE: we don't have to wait there, as long as we don't return immediately
* following the macro
*
* 19: jump to the actual address of the label
*
*
*
* The corresponding (approximate) c code:
*
* register struct sparc_stackf *sp asm("sp");
* register unsigned long calladdr asm("o7");
* register struct thread_info *th asm("g6");
* register unsigned long t_psr asm("l0");
* register unsigned long t_pc asm("l1");
* register unsigned long fp asm("fp");
* register unsigned long ret asm("i7");
*
*
* th->kpc = (unsigned long) &&here - 0x8;
* th->kpsr = get_psr();
* th->ksp = (unsigned long) sp;
* th->kwim = get_wim();
*
* put_psr(th->kpsr^0x20);
*
* th = &next->thread_info;
* current_set[0] = th;
*
* put_psr(th->kpsr^0x20);
* put_wim(th->kwim);
*
* calladdr = th->kpc;
* sp = (struct sparc_stackf *) th->ksp;
*
* t_psr = sp->locals[0];
* t_pc = sp->locals[1];
*
* fp = (unsigned long) sp->fp;
* ret = sp->callers_pc;
*
* put_psr(th->kpsr);
*
* __asm__ __volatile__(
* "jmpl %%o7 + 0x8, %%g0\n\t"
* "nop\n\t"
* ::: "%o7", "memory");
* here:
* (void) 0;
*/
#define switch_to(next) do { \
__asm__ __volatile__( \
"sethi %%hi(here - 0x8), %%o7\n\t" \
"or %%o7, %%lo(here - 0x8), %%o7\n\t" \
"rd %%psr, %%g4\n\t" \
"std %%sp, [%%g6 + %2]\n\t" \
"rd %%wim, %%g5\n\t" \
"wr %%g4, 0x20, %%psr\n\t" \
"nop; nop; nop\n\t" \
"std %%g4, [%%g6 + %4]\n\t" \
"ldd [%1 + %4], %%g4\n\t" \
"mov %1, %%g6\n\t" \
"st %1, [%0]\n\t" \
"wr %%g4, 0x20, %%psr\n\t" \
"nop; nop; nop\n\t" \
"ldd [%%g6 + %2], %%sp\n\t" \
"wr %%g5, 0x0, %%wim\n\t" \
"ldd [%%sp + 0x00], %%l0\n\t" \
"ldd [%%sp + 0x38], %%i6\n\t" \
"wr %%g4, 0x0, %%psr\n\t" \
"jmpl %%o7 + 0x8, %%g0\n\t" \
" nop\n\t" \
"here:\n\t" \
: \
: "r" (&(current_set[0])), \
"r" (&(next->thread_info)), \
"i" (TI_KSP), \
"i" (TI_KPC), \
"i" (TI_KPSR) \
: "g1", "g2", "g3", "g4", "g5", "g7", \
"l0", "l1", "l3", "l4", "l5", "l6", "l7", \
"i0", "i1", "i2", "i3", "i4", "i5", \
"o0", "o1", "o2", "o3", "o7"); \
} while(0);
#endif /* _ARCH_SPARC_ASM_SWITCH_TO_H_ */
/**
* @file arch/sparc/include/time.h
*/
#ifndef _SPARC_TIME_H_
#define _SPARC_TIME_H_
#include <kernel/kernel.h>
#define SPARC_CPU_CPS 0
#ifdef CONFIG_CPU_CLOCK_FREQ
#undef SPARC_CPU_CPS
#define SPARC_CPU_CPS CONFIG_CPU_CLOCK_FREQ
#endif /* CONFIG_CPU_CLOCK_FREQ */
#if !SPARC_CPU_CPS
#error CPU clock frequency not configured and no detection method available.
#endif
#define GPTIMER_RELOAD 4
#define GRTIMER_RELOAD 4 /* use 5 instead of 3 cycle minimum for
round number of clock ticks */
#define GPTIMER_TICKS_PER_SEC ((SPARC_CPU_CPS / (GPTIMER_RELOAD + 1)))
#define GPTIMER_TICKS_PER_MSEC (GPTIMER_TICKS_PER_SEC / 1000)
#define GPTIMER_TICKS_PER_USEC (GPTIMER_TICKS_PER_SEC / 1000000)
#define GPTIMER_USEC_PER_TICK (1000000.0 / GPTIMER_TICKS_PER_SEC)
#define GRTIMER_TICKS_PER_SEC ((SPARC_CPU_CPS / (GRTIMER_RELOAD + 1)))
#define GRTIMER_TICKS_PER_MSEC (GRTIMER_TICKS_PER_SEC / 1000)
#define GRTIMER_TICKS_PER_USEC (GRTIMER_TICKS_PER_SEC / 1000000)
#define GRTIMER_USEC_PER_TICK (1000000.0 / GRTIMER_TICKS_PER_SEC)
#define GPTIMER_CYCLES_PER_SEC SPARC_CPU_CPS
#define GPTIMER_CYCLES_PER_MSEC (GPTIMER_CYCLES_PER_SEC / 1000)
#define GPTIMER_CYCLES_PER_USEC (GPTIMER_CYCLESS_PER_SEC / 1000000)
#define GPTIMER_USEC_PER_CYCLE (1000000.0 / GPTIMER_CYCLES_PER_SEC)
#define GRTIMER_CYCLES_PER_SEC SPARC_CPU_CPS
#define GRTIMER_CYCLES_PER_MSEC (GRTIMER_CYCLES_PER_SEC / 1000)
#define GRTIMER_CYCLES_PER_USEC (GRTIMER_CYCLESS_PER_SEC / 1000000)
#define GRTIMER_CYCLES_PER_NSEC (GRTIMER_CYCLESS_PER_SEC / 1000000000)
#define GRTIMER_SEC_PER_CYCLE ( 1.0 / GRTIMER_CYCLES_PER_SEC)
#define GRTIMER_MSEC_PER_CYCLE ( 1000.0 / GRTIMER_CYCLES_PER_SEC)
#define GRTIMER_USEC_PER_CYCLE (1000000.0 / GRTIMER_CYCLES_PER_SEC)
/* this will definitely break if we run at GHz clock speeds
* note that the order is important, otherwise we may encounter integer
* overflow on multiplication
*/
#define CPU_CYCLES_TO_NS(x) (((x) / (SPARC_CPU_CPS / 1000000UL)) * 1000UL)
compile_time_assert((SPARC_CPU_CPS <= 1000000000UL),
CPU_CYCLES_TO_NS_NEEDS_FIXUP);
void leon_uptime_init(void);
#endif /* _SPARC_TIME_H_ */
/**
* @file arch/sparc/grtimer.h
* @ingroup time
* @author Armin Luntzer (armin.luntzer@univie.ac.at),
* @date July, 2016
*
* @copyright GPLv2
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*/
#ifndef _SPARC_GRTIMER_H_
#define _SPARC_GRTIMER_H_
#include <asm/leon_reg.h>
#define LEON3_GRTIMER_CFG_LATCH 0x800
#define LEON3_TIMER_EN 0x00000001 /* enable counting */
#define LEON3_TIMER_RS 0x00000002 /* restart from timer reload value */
#define LEON3_TIMER_LD 0x00000004 /* load counter */
#define LEON3_TIMER_IE 0x00000008 /* irq enable */
#define LEON3_TIMER_IP 0x00000010 /* irq pending (clear by writing 0 */
#define LEON3_TIMER_CH 0x00000020 /* chain with preceeding timer */
#define LEON3_CFG_TIMERS_MASK 0x00000007
#define LEON3_CFG_IRQNUM_MASK 0x000000f8
#define LEON3_CFG_IRQNUM_SHIFT 0x3
void grtimer_set_scaler_reload(struct grtimer_unit *rtu, uint32_t value);
uint32_t grtimer_get_scaler_reload(struct grtimer_unit *rtu);
void grtimer_set_interrupt_enabled(struct grtimer_unit *rtu, uint32_t timer);
void grtimer_clear_interrupt_enabled(struct grtimer_unit *rtu, uint32_t timer);
void grtimer_set_load(struct grtimer_unit *rtu, uint32_t timer);
void grtimer_clear_load(struct grtimer_unit *rtu, uint32_t timer);
void grtimer_set_enabled(struct grtimer_unit *rtu, uint32_t timer);
void grtimer_clear_enabled(struct grtimer_unit *rtu, uint32_t timer);
void grtimer_set_restart(struct grtimer_unit *rtu, uint32_t timer);
void grtimer_clear_restart(struct grtimer_unit *rtu, uint32_t timer);
void grtimer_set_chained(struct grtimer_unit *rtu, uint32_t timer);
void grtimer_clear_chained(struct grtimer_unit *rtu, uint32_t timer);
uint32_t grtimer_get_interrupt_pending_status(struct grtimer_unit *rtu,
uint32_t timer);
void grtimer_clear_interrupt_pending_status(struct grtimer_unit *rtu,
uint32_t timer);
uint32_t grtimer_get_num_implemented(struct grtimer_unit *rtu);
uint32_t grtimer_get_first_timer_irq_id(struct grtimer_unit *rtu);
void grtimer_set_value(struct grtimer_unit *rtu,
uint32_t timer,
uint32_t value);
uint32_t grtimer_get_value(struct grtimer_unit *rtu, uint32_t timer);
void grtimer_set_reload(struct grtimer_unit *rtu,
uint32_t timer,
uint32_t reload);
uint32_t grtimer_get_reload(struct grtimer_unit *rtu, uint32_t timer);
void grtimer_set_latch_irq(struct grtimer_unit *rtu, uint32_t irq);
void grtimer_clear_latch_irq(struct grtimer_unit *rtu, uint32_t irq);
void grtimer_enable_latch(struct grtimer_unit *rtu);
uint32_t grtimer_get_latch_value(struct grtimer_unit *rtu, uint32_t timer);
#endif /* _SPARC_GRTIMER_H */
/**
* @file arch/sparc/include/grtimer_longcount.h
* @ingroup timing
* @author Armin Luntzer (armin.luntzer@univie.ac.at),
* @date July, 2016
*
* @copyright GPLv2
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*/
#ifndef _SPARC__GRTIMER_LONGCOUNT_H_
#define _SPARC__GRTIMER_LONGCOUNT_H_
#include <grtimer.h>
/**
* "coarse" contains the counter of the secondary (chained) timer in
* multiples of seconds and is chained
* to the "fine" timer, which should hence underflow in a 1-second cycle
*/
struct grtimer_uptime {
uint32_t coarse;
uint32_t fine;
};
int32_t grtimer_longcount_start(struct grtimer_unit *rtu,
uint32_t scaler_reload,
uint32_t fine_ticks_per_sec,
uint32_t coarse_ticks_max);
void grtimer_longcount_get_uptime(struct grtimer_unit *rtu,
struct grtimer_uptime *up);
double grtimer_longcount_difftime(struct grtimer_unit *rtu,
struct grtimer_uptime time1,
struct grtimer_uptime time0);
uint32_t grtimer_longcount_get_latch_time_diff(struct grtimer_unit *rtu);
#endif /* _SPARC_GRTIMER_LONGCOUNT_H_ */
...@@ -10,6 +10,8 @@ obj-$(CONFIG_ARCH_CUSTOM_BOOT_CODE) += flush_windows.o ...@@ -10,6 +10,8 @@ obj-$(CONFIG_ARCH_CUSTOM_BOOT_CODE) += flush_windows.o
obj-$(CONFIG_ARCH_CUSTOM_BOOT_CODE) += etrap.o obj-$(CONFIG_ARCH_CUSTOM_BOOT_CODE) += etrap.o
obj-$(CONFIG_ARCH_CUSTOM_BOOT_CODE) += rtrap.o obj-$(CONFIG_ARCH_CUSTOM_BOOT_CODE) += rtrap.o
obj-$(CONFIG_ARCH_CUSTOM_BOOT_CODE) += irqtrap.o obj-$(CONFIG_ARCH_CUSTOM_BOOT_CODE) += irqtrap.o
obj-$(CONFIG_ARCH_CUSTOM_BOOT_CODE) += thread.o
obj-y += setup.o obj-y += setup.o
obj-y += init.o obj-y += init.o
...@@ -25,6 +27,7 @@ obj-y += stack.o ...@@ -25,6 +27,7 @@ obj-y += stack.o
obj-y += traps/data_access_exception_trap.o obj-y += traps/data_access_exception_trap.o
obj-y += traps/data_access_exception.o obj-y += traps/data_access_exception.o
obj-y += irq.o obj-y += irq.o
obj-y += time.o
#libs-y += lib/ #libs-y += lib/
...@@ -29,6 +29,8 @@ ...@@ -29,6 +29,8 @@
#define t_systable l7 /* Never touch this, could be the syscall table ptr. */ #define t_systable l7 /* Never touch this, could be the syscall table ptr. */
#define curptr g6 /* Set after pt_regs frame is built */ #define curptr g6 /* Set after pt_regs frame is built */
/* at this time, trap_setup should take 43(+33 worst case) cycles */
.text .text
.align 4 .align 4
......
...@@ -46,6 +46,7 @@ ...@@ -46,6 +46,7 @@
#include <kernel/kmem.h> #include <kernel/kmem.h>
#include <kernel/printk.h> #include <kernel/printk.h>
#include <kernel/kernel.h> #include <kernel/kernel.h>
#include <kernel/export.h>
#include <errno.h> #include <errno.h>
#include <list.h> #include <list.h>
...@@ -55,6 +56,7 @@ ...@@ -55,6 +56,7 @@
#include <asm/leon_reg.h> #include <asm/leon_reg.h>
#include <asm/spinlock.h> #include <asm/spinlock.h>
#include <asm/irq.h> #include <asm/irq.h>
#include <asm/irqflags.h>
struct irl_vector_elem { struct irl_vector_elem {
...@@ -293,6 +295,69 @@ static void leon_irq_enable(void) ...@@ -293,6 +295,69 @@ static void leon_irq_enable(void)
: "memory"); : "memory");
} }
/**
* @brief get interrupt status and disable interrupts
*/
static inline unsigned long leon_irq_save(void)
{
unsigned long retval;
unsigned long tmp;
__asm__ __volatile__(
"rd %%psr, %0\n\t"
"or %0, %2, %1\n\t"
"wr %1, 0, %%psr\n\t"
"nop; nop; nop\n"
: "=&r" (retval), "=r" (tmp)
: "i" (PSR_PIL)
: "memory");
return retval;
}
/**
* @brief restore interrupts
*/
static inline void leon_irq_restore(unsigned long old_psr)
{
unsigned long tmp;
__asm__ __volatile__(
"rd %%psr, %0\n\t"
"and %2, %1, %2\n\t"
"andn %0, %1, %0\n\t"
"wr %0, %2, %%psr\n\t"
"nop; nop; nop\n"
: "=&r" (tmp)
: "i" (PSR_PIL), "r" (old_psr)
: "memory");
}
void arch_local_irq_enable(void)
{
leon_irq_enable();
}
EXPORT_SYMBOL(arch_local_irq_enable);
unsigned long arch_local_irq_save(void)
{
return leon_irq_save();
}
EXPORT_SYMBOL(arch_local_irq_save);
void arch_local_irq_restore(unsigned long flags)
{
leon_irq_restore(flags);
}
EXPORT_SYMBOL(arch_local_irq_restore);
/** /**
* @brief clear (acknowledge) a pending IRQ * @brief clear (acknowledge) a pending IRQ
......
...@@ -13,6 +13,27 @@ ...@@ -13,6 +13,27 @@
#define t_irqlvl l7 /* set by trap entry */ #define t_irqlvl l7 /* set by trap entry */
/* at this time with nested IRQs, it takes trap + jump + save_all up to call:
* 4 + 3 + 43(+33 worst case) + 7 = 57 (90 worst) cycles
* the call is pc-relative, hence 2 cycles total
* considering only the primary IRL and no statistics, leon_irq_dispatch will
* take 24 cycles to enter the assigned ISR, i.e. 83 (116) cycles until the
* IRQ can be serviced.
*
* only considering the time it takes to return from the ISR and assuming only
* a single ISR is assigned to the IRL, leon_irq_dispatch will return
* after 12 cycles and arrive at the call to schedule after a total of 13 cycles
*
* Assuming there is no scheduling event and not considering the internals of
* the function, the overhead for call to and return from schedule() will take
* a total of 4 cycles
*
* the psr is updated in 1 cycle, the call to restore_all will take
* 39 (61 worst case) cycles
*
* The total time to service an IRQ is hence 150 cycles (195 worst case),
* not considering an actual scheduling event or the run time of the ISR
*/
.align 4 .align 4
...@@ -51,7 +72,7 @@ __interrupt_entry: ...@@ -51,7 +72,7 @@ __interrupt_entry:
#else /* CONFIG_SPARC_NESTED_IRQ */ #else /* CONFIG_SPARC_NESTED_IRQ */
or %t_psr, PSR_PIL, %t_tmp or %t_psr, PSR_PIL, %t_tmp
#ifndef CONFIG_TASK_PREEMPTION_DISABLE #ifndef CONFIG_DISABLE_TASK_PREEMPTION
call schedule call schedule
nop nop
#endif #endif
......
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
#include <init.h> #include <init.h>
#include <mm.h> #include <mm.h>
#include <asm/irq.h> #include <asm/irq.h>
#include <asm/time.h>
#include <compiler.h> #include <compiler.h>
#include <page.h> #include <page.h>
...@@ -101,4 +102,6 @@ void setup_arch(void) ...@@ -101,4 +102,6 @@ void setup_arch(void)
BUG_ON(stack_migrate(NULL, _kernel_stack_top)); BUG_ON(stack_migrate(NULL, _kernel_stack_top));
leon_irq_init(); leon_irq_init();
leon_uptime_init();
} }
/**
* @file arch/sparc/kernel/thread.c
*
* @ingroup sparc
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*
* @brief implements the architecture specific thread component
*
*/
#include <asm/thread.h>
#include <asm/irqflags.h>
#include <asm/leon.h>
#include <kernel/export.h>
#include <kernel/kthread.h>
#define MSG "SPARC THREAD: "
extern struct thread_info *current_set[];
/**
* @brief this is a wrapper that actually executes the thread function
*/
static void th_starter(void)
{
struct task_struct *task = current_set[0]->task;
task->thread_fn(task->data);
printk("thread: %p returned\n", task->thread_fn);
task->state = TASK_DEAD;
schedule();
pr_crit(MSG "should never have reached %s:%d", __func__, __LINE__);
BUG();
}
/**
* @brief initialise a task structure
*/
void arch_init_task(struct task_struct *task,
int (*thread_fn)(void *data),
void *data)
{
#define STACKFRAME_SZ 96
#define PTREG_SZ 80
#define PSR_CWP 0x0000001f
task->thread_info.ksp = (unsigned long) task->stack_top - (STACKFRAME_SZ + PTREG_SZ);
task->thread_info.kpc = (unsigned long) th_starter - 8;
task->thread_info.kpsr = get_psr();
task->thread_info.kwim = 1 << (((get_psr() & PSR_CWP) + 1) % 8);
task->thread_info.task = task;
task->thread_fn = thread_fn;
task->data = data;
}
EXPORT_SYMBOL(arch_init_task);
/**
* @brief promote the currently executed path to a task
* @note we use this to move our main thread to the task list
*/
void arch_promote_to_task(struct task_struct *task)
{
#define PSR_CWP 0x0000001f
task->thread_info.ksp = (unsigned long) leon_get_fp();
task->thread_info.kpc = (unsigned long) __builtin_return_address(1) - 8;
task->thread_info.kpsr = get_psr();
task->thread_info.kwim = 1 << (((get_psr() & PSR_CWP) + 1) % 8);
task->thread_info.task = task;
task->thread_fn = NULL;
task->data = NULL;
printk(MSG "kernel stack %x\n", leon_get_fp());
printk(MSG "is next at %p stack %p\n", &task->thread_info, task->stack);
/* and set the new thread as current */
__asm__ __volatile__("mov %0, %%g6\n\t"
:: "r"(&(task->thread_info)) : "memory");
}
EXPORT_SYMBOL(arch_promote_to_task);
/**
* @file arch/sparc/kernel/time.c
*/
#include <asm/time.h>
#include <kernel/time.h>
#include <kernel/printk.h>
#include <kernel/kernel.h>
/* XXX: needs proper config option (for now; AMBA PNP autodetect later...) */
#ifdef CONFIG_LEON3
#include <grtimer_longcount.h>
static struct grtimer_unit *grtimer_longcount =
(struct grtimer_unit *) LEON3_BASE_ADDRESS_GRTIMER;
static void leon_grtimer_longcount_init(void)
{
grtimer_longcount_start(grtimer_longcount, GRTIMER_RELOAD,
GRTIMER_TICKS_PER_SEC, 0xFFFFFFFFUL);
}
#endif
static void leon_get_uptime(uint32_t *seconds, uint32_t *nanoseconds)
{
#ifdef CONFIG_LEON3
struct grtimer_uptime up;
grtimer_longcount_get_uptime(grtimer_longcount, &up);
(*seconds) = up.coarse;
(*nanoseconds) = CPU_CYCLES_TO_NS(up.fine);
#else
printk("%s:%s not implemented\n", __FILE__, __func__);
BUG();
#endif /* CONFIG_LEON3 */
}
static int leon_timer_enable(void)
{
printk("%s:%s not implemented\n", __FILE__, __func__);
BUG();
return 0;
}
static void leon_timer_disable(void)
{
printk("%s:%s not implemented\n", __FILE__, __func__);
BUG();
}
/**
* the configuration for the high-level timing facility
*/
static struct clocksource uptime_clock = {
.read = leon_get_uptime,
.enable = leon_timer_enable,
.disable = leon_timer_disable,
};
void leon_uptime_init(void)
{
#ifdef CONFIG_LEON3
leon_grtimer_longcount_init();
#endif
time_init(&uptime_clock);
}
/* SPDX-License-Identifier: GPL-2.0 */
/**
* @file include/asm-generic/irqflags.h
*
* @copyright GPLv2
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#ifndef _ASM_GENERIC_IRQFLAGS_H_
#define _ASM_GENERIC_IRQFLAGS_H_
void arch_local_irq_enable(void);
unsigned long arch_local_irq_save(void);
void arch_local_irq_restore(unsigned long flags);
#endif /* _ASM_GENERIC_IRQFLAGS_H_ */
...@@ -19,5 +19,11 @@ ...@@ -19,5 +19,11 @@
#include <asm/thread.h> #include <asm/thread.h>
void arch_init_task(struct task_struct *task,
int (*thread_fn)(void *data),
void *data);
void arch_promote_to_task(struct task_struct *task);
#endif /* _ASM_GENERIC_THREAD_H_ */ #endif /* _ASM_GENERIC_THREAD_H_ */
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment