Skip to content
Snippets Groups Projects
Commit 0776d5f1 authored by Armin Luntzer's avatar Armin Luntzer
Browse files

* add clock event devices

* add ticks
* adjust sparc timer defaults
* gptimer: always disable timer before reprogramming
* add get_ktime() alias for get_uptime() (formerly time_get_uptime)
parent d6b61f9b
No related branches found
No related tags found
No related merge requests found
/**
* @file arch/sparc/include/asm/clockevent.h
*/
#ifndef _SPARC_CLOCKEVENT_H_
#define _SPARC_CLOCKEVENT_H_
#include <kernel/kernel.h>
void sparc_clockevent_init(void);
#endif /* _SPARC_CLOCKEVENT_H_ */
...@@ -21,9 +21,8 @@ ...@@ -21,9 +21,8 @@
#endif #endif
#define GPTIMER_RELOAD 4 #define GPTIMER_RELOAD 7
#define GRTIMER_RELOAD 4 /* use 5 instead of 3 cycle minimum for #define GRTIMER_RELOAD 4
round number of clock ticks */
#define GPTIMER_TICKS_PER_SEC ((SPARC_CPU_CPS / (GPTIMER_RELOAD + 1))) #define GPTIMER_TICKS_PER_SEC ((SPARC_CPU_CPS / (GPTIMER_RELOAD + 1)))
#define GPTIMER_TICKS_PER_MSEC (GPTIMER_TICKS_PER_SEC / 1000) #define GPTIMER_TICKS_PER_MSEC (GPTIMER_TICKS_PER_SEC / 1000)
...@@ -37,13 +36,12 @@ ...@@ -37,13 +36,12 @@
#define GPTIMER_CYCLES_PER_SEC SPARC_CPU_CPS #define GPTIMER_CYCLES_PER_SEC SPARC_CPU_CPS
#define GPTIMER_CYCLES_PER_MSEC (GPTIMER_CYCLES_PER_SEC / 1000) #define GPTIMER_CYCLES_PER_MSEC (GPTIMER_CYCLES_PER_SEC / 1000)
#define GPTIMER_CYCLES_PER_USEC (GPTIMER_CYCLESS_PER_SEC / 1000000) #define GPTIMER_CYCLES_PER_USEC (GPTIMER_CYCLES_PER_SEC / 1000000)
#define GPTIMER_USEC_PER_CYCLE (1000000.0 / GPTIMER_CYCLES_PER_SEC) #define GPTIMER_USEC_PER_CYCLE (1000000.0 / GPTIMER_CYCLES_PER_SEC)
#define GRTIMER_CYCLES_PER_SEC SPARC_CPU_CPS #define GRTIMER_CYCLES_PER_SEC SPARC_CPU_CPS
#define GRTIMER_CYCLES_PER_MSEC (GRTIMER_CYCLES_PER_SEC / 1000) #define GRTIMER_CYCLES_PER_MSEC (GRTIMER_CYCLES_PER_SEC / 1000)
#define GRTIMER_CYCLES_PER_USEC (GRTIMER_CYCLESS_PER_SEC / 1000000) #define GRTIMER_CYCLES_PER_USEC (GRTIMER_CYCLES_PER_SEC / 1000000)
#define GRTIMER_CYCLES_PER_NSEC (GRTIMER_CYCLESS_PER_SEC / 1000000000)
#define GRTIMER_SEC_PER_CYCLE ( 1.0 / GRTIMER_CYCLES_PER_SEC) #define GRTIMER_SEC_PER_CYCLE ( 1.0 / GRTIMER_CYCLES_PER_SEC)
#define GRTIMER_MSEC_PER_CYCLE ( 1000.0 / GRTIMER_CYCLES_PER_SEC) #define GRTIMER_MSEC_PER_CYCLE ( 1000.0 / GRTIMER_CYCLES_PER_SEC)
#define GRTIMER_USEC_PER_CYCLE (1000000.0 / GRTIMER_CYCLES_PER_SEC) #define GRTIMER_USEC_PER_CYCLE (1000000.0 / GRTIMER_CYCLES_PER_SEC)
...@@ -53,7 +51,10 @@ ...@@ -53,7 +51,10 @@
* note that the order is important, otherwise we may encounter integer * note that the order is important, otherwise we may encounter integer
* overflow on multiplication * overflow on multiplication
*/ */
#define CPU_CYCLES_TO_NS(x) (((x) / (SPARC_CPU_CPS / 1000000UL)) * 1000UL) #define CPU_CYCLES_TO_NS(x) (((x) >= 1000UL) \
? (((x) / (SPARC_CPU_CPS / 1000000UL)) * 1000UL) \
: (((x) * 1000UL) / (SPARC_CPU_CPS / 1000000UL)))
compile_time_assert((SPARC_CPU_CPS <= 1000000000UL), compile_time_assert((SPARC_CPU_CPS <= 1000000000UL),
CPU_CYCLES_TO_NS_NEEDS_FIXUP); CPU_CYCLES_TO_NS_NEEDS_FIXUP);
......
...@@ -28,6 +28,6 @@ obj-y += traps/data_access_exception_trap.o ...@@ -28,6 +28,6 @@ obj-y += traps/data_access_exception_trap.o
obj-y += traps/data_access_exception.o obj-y += traps/data_access_exception.o
obj-y += irq.o obj-y += irq.o
obj-y += time.o obj-y += time.o
obj-y += clockevent.o
#libs-y += lib/
/**
* @file arch/sparc/kernel/clockevent.c
*/
#include <kernel/kernel.h>
#include <kernel/clockevent.h>
#include <kernel/kmem.h>
#include <kernel/string.h>
#include <kernel/irq.h>
#include <errno.h>
#ifdef CONFIG_LEON3
#include <gptimer.h>
#include <asm/time.h>
/* XXX: want AMBA PNP autodetect later...) */
#define LEON3_GPTIMERS 4
#define GPTIMER_0_IRQ 8
static struct gpclkdevs {
struct gptimer_unit *gptu;
struct clock_event_device dev[LEON3_GPTIMERS];
} _gp_clk_ev = {
.gptu = (struct gptimer_unit *) LEON3_BASE_ADDRESS_GPTIMER
};
/**
* @brief the clock device event handler
*/
static irqreturn_t gp_clk_dev_irq_handler(unsigned int irq, void *userdata)
{
struct clock_event_device *ce = (struct clock_event_device *) userdata;
if (ce)
if (ce->event_handler)
ce->event_handler(ce);
return 0;
}
/**
* @brief clock device suspend call
*/
static void gp_clk_dev_suspend(struct clock_event_device *ce)
{
gptimer_clear_enabled(_gp_clk_ev.gptu, ce->irq - GPTIMER_0_IRQ);
}
/**
* @brief clock device resume call
*/
static void gp_clk_dev_resume(struct clock_event_device *ce)
{
gptimer_set_enabled(_gp_clk_ev.gptu, ce->irq - GPTIMER_0_IRQ);
}
/**
* @brief clock device set_state call
*/
static void gp_clk_dev_set_state(enum clock_event_state state,
struct clock_event_device *ce)
{
int timer;
/* derive the timer index from its IRL */
timer = ce->irq - GPTIMER_0_IRQ;
switch (state) {
case CLOCK_EVT_STATE_PERIODIC:
gptimer_set_restart(_gp_clk_ev.gptu, timer);
gp_clk_dev_resume(ce);
break;
case CLOCK_EVT_STATE_ONESHOT:
gptimer_clear_restart(_gp_clk_ev.gptu, timer);
gp_clk_dev_resume(ce);
break;
case CLOCK_EVT_STATE_SHUTDOWN:
gp_clk_dev_suspend(ce);
break;
case CLOCK_EVT_STATE_UNUSED:
gp_clk_dev_suspend(ce);
break;
default:
break;
}
}
/**
* @brief program the gptimer to underflow at a given event timeout
* @param evt the number of clock source ticks until the event
*
* @note the expiration time will be forcibly clamped to the valid range of
* clock device
*/
static int gp_clk_dev_set_next_event(unsigned long evt,
struct clock_event_device *ce)
{
int timer;
/* derive the timer index from its IRL */
timer = ce->irq - GPTIMER_0_IRQ;
switch(ce->state) {
case CLOCK_EVT_STATE_PERIODIC:
gptimer_start_cyclical(_gp_clk_ev.gptu, timer, evt);
break;
case CLOCK_EVT_STATE_ONESHOT:
gptimer_start(_gp_clk_ev.gptu, timer, evt);
break;
default:
break; /* not sure what you want? */
};
return 0;
}
/**
* @brief program the gptimer to underflow at a given absolute kernel time
*
* @param ktime the kernel time at which the timer will underflow
*
*
* @returns 0 on success, -ETIME if time is in the past
*
* @note the expiration time will be forcibly clamped to the valid range of
* clock device
*
*/
static int gp_clk_dev_set_next_ktime(struct timespec expires,
struct clock_event_device *ce)
{
uint32_t evt;
double delta;
delta = difftime_ns(expires, get_ktime());
if (delta < 0)
return -ETIME;
/* clamp to valid range */
evt = clamp((typeof(ce->max_delta_ns)) delta,
ce->min_delta_ns, ce->max_delta_ns);
/* adjust ns delta to actual clock tick value*/
evt = evt / ce->mult;
return gp_clk_dev_set_next_event((unsigned long) evt, ce);
}
/**
* @brief register the 4 general purpose timers (of the GR712)
*/
static void leon_setup_clockdevs(void)
{
int i;
char *buf;
struct clock_event_device *ce;
/* the prescaler is the same for all timers */
gptimer_set_scaler_reload(_gp_clk_ev.gptu, GPTIMER_RELOAD);
for (i = 0; i < LEON3_GPTIMERS; i++) {
ce = &_gp_clk_ev.dev[i];
ce->mult = CPU_CYCLES_TO_NS(GPTIMER_RELOAD + 1);
/* we can only fit so many nanoseconds into a 32 bit number
* note that we set the timer value in "ticks", where each tick
* corresponds to GPTMER_RELOAD + 1 cpu cycles, so our actual
* possible timeout in nanoseconds is typically much higher
* than what we can fit in here
*/
if (sizeof(typeof(ce->max_delta_ns)) > 4)
ce->max_delta_ns = 0xFFFFFFFFUL * ce->mult;
else
ce->max_delta_ns = 0xFFFFFFFFUL;
/* We cannot be better than the system clock overhead,
* as long as we support CLOCK_EVT_FEAT_KTIME
* To be safe, we should at least use twice that value.
*/
ce->min_delta_ns = 2 * ktime_get_readout_overhead();
BUG_ON(!ce->mult);
ce->set_next_event = &gp_clk_dev_set_next_event;
ce->set_next_ktime = &gp_clk_dev_set_next_ktime;
ce->set_state = &gp_clk_dev_set_state;
ce->suspend = &gp_clk_dev_suspend;
ce->resume = &gp_clk_dev_resume;
buf = kmalloc(16);
BUG_ON(!buf);
snprintf(buf, 16, "GPTIMER%1d", i);
ce->name = buf;
/* we rate our timers by nanosecond resoltion, so we'll just
* the multiplier as our rating
*/
ce->rating = ce->mult;
ce->state = CLOCK_EVT_STATE_UNUSED;
ce->features = (CLOCK_EVT_FEAT_PERIODIC |
CLOCK_EVT_FEAT_ONESHOT |
CLOCK_EVT_FEAT_KTIME);
ce->irq = GPTIMER_0_IRQ + i;
gptimer_clear_enabled(_gp_clk_ev.gptu, i);
BUG_ON(irq_request(ce->irq, ISR_PRIORITY_NOW,
&gp_clk_dev_irq_handler,
&_gp_clk_ev.dev[i]));
clockevents_register_device(ce);
}
}
#endif /* CONFIG_LEON3 */
void sparc_clockevent_init(void)
{
#ifdef CONFIG_LEON3
leon_setup_clockdevs();
#endif /* CONFIG_LEON3 */
}
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
#include <mm.h> #include <mm.h>
#include <asm/irq.h> #include <asm/irq.h>
#include <asm/time.h> #include <asm/time.h>
#include <asm/clockevent.h>
#include <compiler.h> #include <compiler.h>
#include <page.h> #include <page.h>
...@@ -104,4 +105,6 @@ void setup_arch(void) ...@@ -104,4 +105,6 @@ void setup_arch(void)
leon_irq_init(); leon_irq_init();
sparc_uptime_init(); sparc_uptime_init();
sparc_clockevent_init();
} }
CHECKFLAGS += -D__sparc__ CHECKFLAGS += -D__sparc__
obj-y += grtimer.o lib-y += gptimer.o
obj-y += grtimer_longcount.o lib-y += grtimer.o
lib-y += grtimer_longcount.o
...@@ -333,6 +333,9 @@ uint32_t gptimer_get_reload(struct gptimer_unit *ptu, uint32_t timer) ...@@ -333,6 +333,9 @@ uint32_t gptimer_get_reload(struct gptimer_unit *ptu, uint32_t timer)
void gptimer_start(struct gptimer_unit *ptu, uint32_t timer, uint32_t value) void gptimer_start(struct gptimer_unit *ptu, uint32_t timer, uint32_t value)
{ {
gptimer_clear_enabled(ptu, timer);
gptimer_clear_restart(ptu, timer);
gptimer_set_value(ptu, timer, value); gptimer_set_value(ptu, timer, value);
gptimer_set_reload(ptu, timer, value); gptimer_set_reload(ptu, timer, value);
...@@ -352,10 +355,12 @@ void gptimer_start(struct gptimer_unit *ptu, uint32_t timer, uint32_t value) ...@@ -352,10 +355,12 @@ void gptimer_start(struct gptimer_unit *ptu, uint32_t timer, uint32_t value)
void gptimer_start_cyclical(struct gptimer_unit *ptu, void gptimer_start_cyclical(struct gptimer_unit *ptu,
uint32_t timer, uint32_t value) uint32_t timer, uint32_t value)
{ {
gptimer_clear_enabled(ptu, timer);
gptimer_clear_restart(ptu, timer);
gptimer_set_value(ptu, timer, value); gptimer_set_value(ptu, timer, value);
gptimer_set_reload(ptu, timer, value); gptimer_set_reload(ptu, timer, value);
gptimer_set_interrupt_enabled(ptu, timer); gptimer_set_interrupt_enabled(ptu, timer);
gptimer_set_load(ptu, timer); gptimer_set_load(ptu, timer);
gptimer_set_restart(ptu, timer); gptimer_set_restart(ptu, timer);
......
/**
* @file include/kernel/clockevent.h
* @author Armin Luntzer (armin.luntzer@univie.ac.at)
*
* @ingroup time
*
* @copyright GPLv2
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*/
#ifndef _KERNEL_CLOCKEVENT_H_
#define _KERNEL_CLOCKEVENT_H_
#include <list.h>
#include <kernel/types.h>
#include <kernel/time.h>
/* clock event states */
enum clock_event_state {
CLOCK_EVT_STATE_UNUSED,
CLOCK_EVT_STATE_SHUTDOWN,
CLOCK_EVT_STATE_PERIODIC,
CLOCK_EVT_STATE_ONESHOT
};
/* feature set of a particular clock device */
#define CLOCK_EVT_FEAT_PERIODIC 0x000001
#define CLOCK_EVT_FEAT_ONESHOT 0x000002
#define CLOCK_EVT_FEAT_KTIME 0x000004
/**
* event_handler: callback function executed as the event occurs
*
* set_next_event: set next event function using a clock source delta
* set_next_ktime: set next event function using a direct ktime value
*
* max_delta_ns: maximum programmable delta value in nanoseconds
* min_delta_ns: minimum programmable delta value in nanoseconds
* mult: device ticks to nanoseconds multiplier
* state: timer operating state
* features: timer event features
* set_state: set state function
* rating: quality rating of the device, less is better (more
* resolution, e.g nanosecond-resolution)
* name: clock event name
* irq: IRQ number (-1 if device without IRL)
*/
struct clock_event_device {
void (*event_handler)(struct clock_event_device *);
int (*set_next_event)(unsigned long evt,
struct clock_event_device *);
int (*set_next_ktime)(struct timespec expires,
struct clock_event_device *);
uint32_t max_delta_ns;
uint32_t min_delta_ns;
uint32_t mult;
enum clock_event_state state;
unsigned int features;
void (*set_state)(enum clock_event_state state,
struct clock_event_device *);
void (*suspend)(struct clock_event_device *);
void (*resume)(struct clock_event_device *);
unsigned int rating;
const char *name;
int irq;
struct list_head node;
};
bool clockevents_timout_in_range(struct clock_event_device *dev,
unsigned long nanoseconds);
bool clockevents_feature_periodic(struct clock_event_device *dev);
bool clockevents_feature_oneshot(struct clock_event_device *dev);
void clockevents_set_state(struct clock_event_device *dev,
enum clock_event_state state);
void clockevents_set_handler(struct clock_event_device *dev,
void (*event_handler)(struct clock_event_device *));
void clockevents_register_device(struct clock_event_device *dev);
void clockevents_exchange_device(struct clock_event_device *old,
struct clock_event_device *new);
int clockevents_program_event(struct clock_event_device *dev,
struct timespec expires);
int clockevents_program_timeout_ns(struct clock_event_device *dev,
unsigned long nanoseconds);
#endif /* _KERNEL_CLOCKEVENT_H_ */
...@@ -53,4 +53,32 @@ __extension__ ...@@ -53,4 +53,32 @@ __extension__
/* min()/max()/clamp() macros with strict type checking
* (ripped off from linux/kernel.h)
*/
#define min(x, y) ({ \
typeof(x) _min1 = (x); \
typeof(y) _min2 = (y); \
(void) (&_min1 == &_min2); \
_min1 < _min2 ? _min1 : _min2; })
#define max(x, y) ({ \
typeof(x) _max1 = (x); \
typeof(y) _max2 = (y); \
(void) (&_max1 == &_max2); \
_max1 > _max2 ? _max1 : _max2; })
#define clamp(val, min, max) ({ \
typeof(val) __val = (val); \
typeof(min) __min = (min); \
typeof(max) __max = (max); \
(void) (&__val == &__min); \
(void) (&__val == &__max); \
__val = __val < __min ? __min: __val; \
__val > __max ? __max: __val; })
#endif /* _KERNEL_H_ */ #endif /* _KERNEL_H_ */
/**
* @file include/kernel/time.h
* @author Armin Luntzer (armin.luntzer@univie.ac.at)
*
* @ingroup time
*
* @copyright GPLv2
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*/
#ifndef _KERNEL_TICK_H_
#define _KERNEL_TICK_H_
#include <kernel/types.h>
#include <kernel/kernel.h>
#include <kernel/clockevent.h>
/* tick modes */
enum tick_mode {
TICK_MODE_PERIODIC,
TICK_MODE_ONESHOT
};
void tick_check_device(struct clock_event_device *dev);
int tick_set_mode(enum tick_mode mode);
int tick_set_next_ns(unsigned long nanoseconds);
int tick_set_next_ktime(struct timespec expires);
#endif /* _KERNEL_TICK_H_ */
/** /**
* @file include/kernel/time.h * @file include/kernel/ktime.h
* @author Armin Luntzer (armin.luntzer@univie.ac.at) * @author Armin Luntzer (armin.luntzer@univie.ac.at)
* *
* @ingroup time * @ingroup time
...@@ -17,8 +17,8 @@ ...@@ -17,8 +17,8 @@
* *
*/ */
#ifndef _KERNEL_TIME_H_ #ifndef _KERNEL_KTIME_H_
#define _KERNEL_TIME_H_ #define _KERNEL_KTIME_H_
#include <kernel/types.h> #include <kernel/types.h>
#include <kernel/kernel.h> #include <kernel/kernel.h>
...@@ -43,11 +43,20 @@ compile_time_assert((member_size(struct timespec, tv_nsec) == sizeof(uint32_t)), ...@@ -43,11 +43,20 @@ compile_time_assert((member_size(struct timespec, tv_nsec) == sizeof(uint32_t)),
struct timekeeper { struct timekeeper {
struct clocksource *clock; struct clocksource *clock;
uint32_t readout_ns; /* readout time overhead in ns */
}; };
void time_get_uptime(struct timespec *ts); struct timespec get_uptime(void);
struct timespec get_ktime(void);
uint32_t ktime_get_readout_overhead(void);
double difftime(const struct timespec time1, const struct timespec time0);
double difftime_ns(const struct timespec time1, const struct timespec time0);
void time_init(struct clocksource *clock); void time_init(struct clocksource *clock);
#endif /* _KERNEL_TIME_H_ */ #endif /* _KERNEL_KTIME_H_ */
...@@ -18,12 +18,18 @@ ...@@ -18,12 +18,18 @@
#include <kernel/printk.h> #include <kernel/printk.h>
#include <kernel/kernel.h> #include <kernel/kernel.h>
#include <kernel/kthread.h> #include <kernel/kthread.h>
#include <kernel/time.h>
#include <modules-image.h> #include <modules-image.h>
#include <kernel/string.h>
#include <kernel/kmem.h>
#include <asm/processor.h> #include <asm/processor.h>
/* for our demo */ /* for our demo */
#include "xentium_demo.h" #include "xentium_demo.h"
/* arch irq disable/enable */
#include <asm/irqflags.h>
#define MSG "MAIN: " #define MSG "MAIN: "
...@@ -67,7 +73,11 @@ static void twiddle(void) ...@@ -67,7 +73,11 @@ static void twiddle(void)
#define TREADY 4 #define TREADY 4
#if 1
static volatile int *console = (int *)0x80100100;
#else
static volatile int *console = (int *)0x80000100; static volatile int *console = (int *)0x80000100;
#endif
static int putchar(int c) static int putchar(int c)
{ {
...@@ -85,8 +95,6 @@ static int putchar(int c) ...@@ -85,8 +95,6 @@ static int putchar(int c)
extern struct task_struct *kernel; extern struct task_struct *kernel;
struct task_struct *tsk1;
struct task_struct *tsk2;
int threadx(void *data) int threadx(void *data)
{ {
...@@ -102,9 +110,10 @@ int threadx(void *data) ...@@ -102,9 +110,10 @@ int threadx(void *data)
b++; b++;
} }
putchar('\n'); putchar('\n');
#if 1
if (b > 3 * (int)c) if (b > (int) c * (int)c)
break; break;
#endif
// schedule(); // schedule();
//twiddle(); //twiddle();
//cpu_relax(); //cpu_relax();
...@@ -112,50 +121,6 @@ int threadx(void *data) ...@@ -112,50 +121,6 @@ int threadx(void *data)
return 0; return 0;
} }
int thread1(void *data)
{
int b = 0;
while(1) {
//printk(".");
int i;
for (i = 0; i < 20; i++)
putchar('.');
putchar('\n');
if (b++ > 20)
break;
schedule();
//twiddle();
cpu_relax();
}
return 0;
}
int thread2(void *data)
{
int b = 0;
while (1) {
int i;
for (i = 0; i < 20; i++) {
putchar('o');
b++;
}
putchar('\n');
if (b > 200)
break;
schedule();
}
//schedule();
//cpu_relax();
printk("Actually, I left...\n");
return 0xdeadbeef;
}
/** /**
* @brief kernel initialisation routines * @brief kernel initialisation routines
*/ */
...@@ -176,13 +141,32 @@ static int kernel_init(void) ...@@ -176,13 +141,32 @@ static int kernel_init(void)
arch_initcall(kernel_init); arch_initcall(kernel_init);
#include <kernel/clockevent.h>
void clk_event_handler(struct clock_event_device *ce)
{
struct timespec expires;
struct timespec now;
// printk("DIIIING-DOOONG\n");
get_ktime(&now);
expires = now;
expires.tv_sec += 1;
clockevents_program_event(&clk_event_handler, expires, now, CLOCK_EVT_MODE_ONESHOT);
}
/** /**
* @brief kernel main function * @brief kernel main function
*/ */
#define MAX_TASKS 800
int kernel_main(void) int kernel_main(void)
{ {
struct task_struct *tasks[MAX_TASKS];
int tcnt = 0;
#if 0 #if 0
void *addr; void *addr;
struct elf_module m; struct elf_module m;
...@@ -236,36 +220,25 @@ int kernel_main(void) ...@@ -236,36 +220,25 @@ int kernel_main(void)
printk(MSG "Boot complete, spinning idly.\n"); printk(MSG "Boot complete, spinning idly.\n");
#define GR712_IRL1_GPTIMER_2 10
#define LEON3_TIMER_EN 0x00000001 /* enable counting */
#define LEON3_TIMER_RL 0x00000002 /* reload at 0 */
#define LEON3_TIMER_LD 0x00000004 /* load counter */
#define LEON3_TIMER_IE 0x00000008 /* irq enable */
{ {
struct gptimer_unit *mtu = (struct gptimer_unit *) 0x80000300; struct timespec expires;
struct timespec now;
get_ktime(&now);
printk("%s() entered\n", __func__); expires = now;
expires.tv_nsec += 18000;
irq_request(8, ISR_PRIORITY_NOW, dummy, NULL); BUG_ON(clockevents_program_event(NULL, expires, now, CLOCK_EVT_MODE_PERIODIC));
mtu->scaler_reload = 5;
/* abs min: 270 / (5+1) (sched printing) */
/* abs min: 800 / (5+1) (threads printing) */
mtu->timer[0].reload = 2000 / (mtu->scaler_reload + 1);
mtu->timer[0].value = mtu->timer[0].reload;
mtu->timer[0].ctrl = LEON3_TIMER_LD | LEON3_TIMER_EN
| LEON3_TIMER_RL | LEON3_TIMER_IE;
} }
kernel = kthread_init_main(); kernel = kthread_init_main();
tsk1 = kthread_create(thread1, NULL, KTHREAD_CPU_AFFINITY_NONE, "Thread1");
tsk2 = kthread_create(thread2, NULL, KTHREAD_CPU_AFFINITY_NONE, "Thread2");
//kthread_wake_up(tsk2);
// kthread_wake_up(tsk2);
//
{ {
static char zzz[] = {':', '/', '\\', '~', '|'}; static char zzz[] = {':', '/', '\\', '~', '|'};
int i; int i;
...@@ -274,16 +247,54 @@ int kernel_main(void) ...@@ -274,16 +247,54 @@ int kernel_main(void)
kthread_create(threadx, &zzz[i], KTHREAD_CPU_AFFINITY_NONE, "Thread2"); kthread_create(threadx, &zzz[i], KTHREAD_CPU_AFFINITY_NONE, "Thread2");
} }
while(1) { {
//printk("-"); static char zzz[] = {':', '/', '\\', '~', '|'};
#if 0 static int z;
char *buf = NULL;
int i; int i;
for (i = 0; i < 20; i++) struct timespec ts;
putchar('-'); get_uptime(&ts);
putchar('\n'); printk("creating tasks at %d s %d ns (%g)\n", ts.tv_sec, ts.tv_nsec, (double) ts.tv_sec + (double) ts.tv_nsec / 1e9);
#endif
for (i = 0; i < MAX_TASKS; i++) {
// buf = kmalloc(30);
// BUG_ON(!buf);
// sprintf(buf, "Thread %d", z);
z++;
tasks[tcnt++] = kthread_create(threadx, &zzz[i], KTHREAD_CPU_AFFINITY_NONE, buf);
// kfree(buf);
}
}
{
int i;
struct timespec ts;
get_uptime(&ts);
printk("total %d after %d s %d ns (%g)\n", tcnt, ts.tv_sec, ts.tv_nsec, (double) ts.tv_sec + (double) ts.tv_nsec / 1e9);
BUG_ON(tcnt > MAX_TASKS);
for (i = 0; i < tcnt; i++)
kthread_wake_up(tasks[i]);
arch_local_irq_disable();
get_uptime(&ts);
printk("all awake after %d s %d ns (%g)\n", ts.tv_sec, ts.tv_nsec, (double) ts.tv_sec + (double) ts.tv_nsec / 1e9);
arch_local_irq_enable();
}
while(1) {
twiddle();
cpu_relax(); cpu_relax();
} }
/* never reached */ /* never reached */
BUG(); BUG();
......
...@@ -8,3 +8,5 @@ obj-y += irq.o ...@@ -8,3 +8,5 @@ obj-y += irq.o
obj-$(CONFIG_XENTIUM) += xentium.o obj-$(CONFIG_XENTIUM) += xentium.o
obj-y += kthread.o obj-y += kthread.o
obj-y += time.o obj-y += time.o
obj-y += clockevent.o
obj-y += tick.o
/**
* @file kernel/clockevent.c
* @author Armin Luntzer (armin.luntzer@univie.ac.at)
*
*
* @ingroup time
*
* @note This roughly follows the concept found in linux clockevents
* All glory to the Hypnotoad!
*/
#include <asm-generic/spinlock.h>
#include <asm-generic/irqflags.h>
#include <kernel/clockevent.h>
#include <kernel/export.h>
#include <kernel/tick.h>
#include <errno.h>
static LIST_HEAD(clockevent_devices);
static struct spinlock clockevents_spinlock;
/**
* @brief convert nanoseconds delta to device ticks
*
* @note this implicitly clamps the delta to the valid range of the device
*/
static
unsigned long clockevents_delta2ticks(unsigned long delta,
struct clock_event_device *dev)
{
delta = (unsigned long) clamp((typeof(dev->max_delta_ns)) delta,
dev->min_delta_ns, dev->max_delta_ns);
return delta / dev->mult;
}
/**
* @brief check if a timeout is in the legal range for the device
*
* @returns true if in range, false otherwise
*/
bool clockevents_timout_in_range(struct clock_event_device *dev,
unsigned long nanoseconds)
{
unsigned long cl;
cl = (unsigned long) clamp((typeof(dev->max_delta_ns)) nanoseconds,
dev->min_delta_ns, dev->max_delta_ns);
return cl == nanoseconds;
}
/**
* @brief check if a device supports periodic ticks
*
* @returns true if the feature is supported
*/
bool clockevents_feature_periodic(struct clock_event_device *dev)
{
if (dev->features & CLOCK_EVT_FEAT_PERIODIC)
return true;
return false;
}
/**
* @brief check if a device supports oneshot ticks
*
* @returns true if the feature is supported
*/
bool clockevents_feature_oneshot(struct clock_event_device *dev)
{
if (dev->features & CLOCK_EVT_FEAT_ONESHOT)
return true;
return false;
}
/**
* @brief check if a device supports a given state
*
* @returns true if a feature is supported
*
* @note only operative modes (periodic, oneshot are considered)
*/
bool clockevents_state_supported(struct clock_event_device *dev,
enum clock_event_state state)
{
switch (state)
{
case CLOCK_EVT_STATE_PERIODIC:
return clockevents_feature_periodic(dev);
case CLOCK_EVT_STATE_ONESHOT:
return clockevents_feature_oneshot(dev);
default:
break;
}
return true;
}
/**
* @brief set the operating state of a clock event device
* @param dev the device to modify
* @param state the new state
*
* @note if a state is not supported according to the device features, calling
* this function will have no effect
*/
void clockevents_set_state(struct clock_event_device *dev,
enum clock_event_state state)
{
if (!dev) {
pr_warn("CLOCKEVENT: NULL pointer argument to %s in call from "
"%p\n", __func__, __caller(0));
return;
}
if (!clockevents_state_supported(dev, state)) {
pr_warn("CLOCKEVENT: selected state %d not supported by device "
"%s\n", state, dev->name);
return;
}
if (dev->state != state) {
dev->set_state(state, dev);
dev->state = state;
}
}
/**
* @brief set the event handler for a clock event device
*/
void clockevents_set_handler(struct clock_event_device *dev,
void (*event_handler)(struct clock_event_device *))
{
dev->event_handler = event_handler;
}
/**
* @brief suspend all clock devices
*/
void clockevents_suspend(void)
{
struct clock_event_device *dev;
list_for_each_entry_rev(dev, &clockevent_devices, node)
if (dev->suspend)
dev->suspend(dev);
}
/**
* @brief resume all clock devices
*/
void clockevents_resume(void)
{
struct clock_event_device *dev;
list_for_each_entry(dev, &clockevent_devices, node)
if (dev->resume)
dev->resume(dev);
}
/**
* @brief release a clock event device in exchange for another one
*
* @param old the device to be released (may be NULL)
* @param new the device to be acquired (may be NULL)
*/
void clockevents_exchange_device(struct clock_event_device *old,
struct clock_event_device *new)
{
if (old)
clockevents_set_state(old, CLOCK_EVT_STATE_UNUSED);
if (new) {
BUG_ON(new->state != CLOCK_EVT_STATE_UNUSED);
clockevents_set_state(new, CLOCK_EVT_STATE_SHUTDOWN);
}
}
/**
* @brief register a clock event device
*/
void clockevents_register_device(struct clock_event_device *dev)
{
BUG_ON(!dev);
if (!dev->set_next_event) {
pr_crit("set_next_event() not set for clock %p\n", dev);
return;
}
if (dev->features & CLOCK_EVT_FEAT_KTIME) {
if (!dev->set_next_ktime) {
pr_crit("set_next_ktime() not set for clock %p\n", dev);
return;
}
}
if (!dev->set_state) {
pr_crit("set_state() not set for clock %p\n", dev);
return;
}
if (!dev->suspend)
pr_err("suspend() not set for clock %p\n", dev);
spin_lock(&clockevents_spinlock);
list_add_tail(&dev->node, &clockevent_devices);
arch_local_irq_disable();
tick_check_device(dev);
arch_local_irq_enable();
spin_unlock(&clockevents_spinlock);
}
EXPORT_SYMBOL(clockevents_register_device);
/**
* @brief program a clock event
*
* returns 0 on success, -ETIME if expiration time is in the past
*
* @warn if the timeout exceeds the bounds of the programmable range
* for the device, it is forcibly clamped without warning
*
* @note if the clock event device is in periodic mode, the delta between
* expiration time and current time will be the new period
*/
int clockevents_program_event(struct clock_event_device *dev,
struct timespec expires)
{
unsigned long evt;
double delta;
if (dev->state == CLOCK_EVT_STATE_SHUTDOWN)
return 0;
/* if the set_nex_ktime handler was configured for this device */
if (dev->features & CLOCK_EVT_FEAT_KTIME)
return dev->set_next_ktime(expires, dev);
/* otherwise we'll do it ourselves */
delta = difftime_ns(expires, get_ktime());
if (delta < 0)
return -ETIME;
/* clamp, adjust to clock tick period and set event */
evt = clockevents_delta2ticks((unsigned long) delta, dev);
dev->set_next_event(evt, dev);
return 0;
}
/**
* @brief program a clockevent timeout in nanoseconds
*
* returns 0 on success, 1 if range was clamped
*
* @warn if the timeout exceeds the bounds of the programmable range
* for the device, it is forcibly clamped
*/
int clockevents_program_timeout_ns(struct clock_event_device *dev,
unsigned long nanoseconds)
{
unsigned long evt;
if (dev->state == CLOCK_EVT_STATE_SHUTDOWN)
return 0;
/* clamp and adjust to clock tick period */
evt = clockevents_delta2ticks(nanoseconds, dev);
dev->set_next_event(evt, dev);
return evt != nanoseconds;
}
/**
* @file kernel/tick.c
* @author Armin Luntzer (armin.luntzer@univie.ac.at)
*
*
* @ingroup time
*
* @note this roughly follows the concept found in linux ticks
*/
#include <errno.h>
#include <kernel/tick.h>
#include <kernel/time.h>
#include <kernel/export.h>
#include <kernel/clockevent.h>
#include <kernel/kthread.h>
#define MSG "TICK: "
static struct clock_event_device *tick_device;
static void tick_event_handler(struct clock_event_device *dev)
{
/* does nothing, schedule later */
}
struct clock_event_device *tick_get_device(__attribute__((unused)) int cpu)
{
return tick_device;
}
void tick_set_device(struct clock_event_device *dev,
__attribute__((unused)) int cpu)
{
tick_device = dev;
}
/**
* @brief tick device selection check
*
* @note placeholder, does not do much right now
*/
static bool tick_check_preferred(struct clock_event_device *cur,
struct clock_event_device *new)
{
/* XXX: need that until we have internal mode tracking for the
* ticker, after wich we can reprogram the the oneshot
* timer after each event to emulate periodicity
*/
if (!clockevents_feature_periodic(new))
return false;
/* If we have nothing, we'll take what we can get */
if (!cur)
return true;
return false;
}
/**
* @brief configure for periodic mode if available
*
* @returns -EINVAL if mode is not supported by underlying clock event device
*/
static int tick_set_mode_periodic(struct clock_event_device *dev)
{
if (!clockevents_feature_periodic(dev))
return -EINVAL;
clockevents_set_state(dev, CLOCK_EVT_STATE_PERIODIC);
return 0;
}
/**
* @brief configure for oneshot mode if available
*
* @returns -EINVAL if mode is not supported by underlying clock event device
*/
static int tick_set_mode_oneshot(struct clock_event_device *dev)
{
if (!clockevents_feature_oneshot(dev))
return -EINVAL;
clockevents_set_state(dev, CLOCK_EVT_STATE_ONESHOT);
return 0;
}
/**
* @brief configure the tick device
*/
static void tick_setup_device(struct clock_event_device *dev)
{
#define RANDOM_TICK_RATE_NS 18000
clockevents_set_handler(dev, tick_event_handler);
/* FIXME: assume blindly for the moment */
tick_set_mode_periodic(dev);
clockevents_program_timeout_ns(dev, RANDOM_TICK_RATE_NS);
}
/**
* @brief offer a new clock event device to the ticker
*/
void tick_check_device(struct clock_event_device *dev)
{
struct clock_event_device *cur;
if (!dev)
return;
/* XXX need per-cpu selection later */
cur = tick_get_device(0);
if (!tick_check_preferred(cur, dev))
return;
clockevents_exchange_device(cur, dev);
/* XXX as above */
tick_set_device(dev, 0);
tick_setup_device(dev);
}
/**
* @brief configure the mode of the ticker
*
* @returns 0 on success, -EINVAL if mode not available
*/
int tick_set_mode(enum tick_mode mode)
{
struct clock_event_device *dev;
/* XXX need per-cpu selection later */
dev = tick_get_device(0);
switch(mode) {
case TICK_MODE_PERIODIC:
return tick_set_mode_periodic(dev);
case TICK_MODE_ONESHOT:
return tick_set_mode_oneshot(dev);
default:
break;
}
return -EINVAL;
}
/**
* @brief configure next tick period in nanoseconds
*
* returns 0 on success, 1 if nanoseconds range was clamped to clock range
*/
int tick_set_next_ns(unsigned long nanoseconds)
{
struct clock_event_device *dev;
/* XXX need per-cpu selection later */
dev = tick_get_device(0);
return clockevents_program_timeout_ns(dev, nanoseconds);
}
/**
* @brief configure next tick period in ktime
*
* returns 0 on success, -ETIME if expiration time is in the past
*
* @warn if the timeout exceeds the bounds of the programmable range
* for the device, it is forcibly clamped without warning
*
* @note if the clock event device is in periodic mode, the delta between
* expiration time and current time will be the new period
*/
int tick_set_next_ktime(struct timespec expires)
{
struct clock_event_device *dev;
/* XXX need per-cpu selection later */
dev = tick_get_device(0);
return clockevents_program_event(dev, expires);
}
/** /**
* @file kernel/time.c * @file kernel/ktime.c
* @author Armin Luntzer (armin.luntzer@univie.ac.at) * @author Armin Luntzer (armin.luntzer@univie.ac.at)
* *
* *
...@@ -13,28 +13,43 @@ ...@@ -13,28 +13,43 @@
#include <kernel/time.h> #include <kernel/time.h>
#include <kernel/export.h> #include <kernel/export.h>
#define MSG "KTIME: "
static struct timekeeper tk; static struct timekeeper tk;
/**
* @brief returns the readout overhead of the uptime/ktime clock
* in nanoseconds
*
* @note this is a self-calibrated value
*/
uint32_t ktime_get_readout_overhead(void)
{
return tk.readout_ns;
}
EXPORT_SYMBOL(ktime_get_readout_overhead);
/** /**
* @brief get the time elapsed since boot * @brief get the time elapsed since boot
* *
* @param[out] ts a struct timespec * @return struct timespec
* *
* @note if no uptime clock was configured, the result * @note if no uptime clock was configured, the result will be 0
* will be undefined
*/ */
void time_get_uptime(struct timespec *ts) struct timespec get_uptime(void)
{ {
uint32_t sec; uint32_t sec;
uint32_t nsec; uint32_t nsec;
struct timespec ts = {0};
if (!tk.clock) if (!tk.clock)
return; return ts;
tk.clock->read(&sec, &nsec); tk.clock->read(&sec, &nsec);
...@@ -44,14 +59,84 @@ void time_get_uptime(struct timespec *ts) ...@@ -44,14 +59,84 @@ void time_get_uptime(struct timespec *ts)
* (see also kernel/time.h) * (see also kernel/time.h)
*/ */
ts->tv_sec = (typeof(ts->tv_sec)) sec; ts.tv_sec = (typeof(ts.tv_sec)) sec;
ts->tv_nsec = (typeof(ts->tv_sec)) nsec; ts.tv_nsec = (typeof(ts.tv_sec)) nsec;
return ts;
}
EXPORT_SYMBOL(get_uptime);
/**
* @brief get the current kernel time
* @note for now, this is just an alias of get_uptime
*/
struct timespec get_ktime(void) __attribute__((alias("get_uptime")));
EXPORT_SYMBOL(get_ktime);
/**
* @brief returns the number of seconds elapsed between time1 and time0
*
* @param ts1 a struct timespec
* @param ts2 a struct timespec
*
* @returns the time delta in seconds, represented as double
*/
double difftime(const struct timespec time1, const struct timespec time0)
{
double t0, t1;
t0 = (double) time0.tv_sec + (double) time0.tv_nsec * 1e-9;
t1 = (double) time1.tv_sec + (double) time1.tv_nsec * 1e-9;
return t1 - t0;
}
EXPORT_SYMBOL(difftime);
/**
* @brief returns the number of nanoseconds elapsed between time1 and time0
*
* @param ts1 a struct timespec
* @param ts2 a struct timespec
*
* @returns the time delta in nanoseconds, represented as double
*/
double difftime_ns(const struct timespec time1, const struct timespec time0)
{
return difftime(time1, time0) * 1e9;
}
EXPORT_SYMBOL(difftime_ns);
static void time_init_overhead_calibrate(void)
{
#define CALIBRATE_LOOPS 100
int i;
double delta = 0.0;
struct timespec t0;
for (i = 0; i < CALIBRATE_LOOPS; i++) {
t0 = get_ktime();
delta += difftime_ns(get_ktime(), t0);
} }
EXPORT_SYMBOL(time_get_uptime);
/* overhead is readout delta / 2 */
tk.readout_ns = (typeof(tk.readout_ns)) (0.5 * delta / (double) i);
printk(MSG "calibrated main uptime clock readout overhead to %d ns\n",
tk.readout_ns);
}
/** /**
...@@ -61,4 +146,5 @@ EXPORT_SYMBOL(time_get_uptime); ...@@ -61,4 +146,5 @@ EXPORT_SYMBOL(time_get_uptime);
void time_init(struct clocksource *clock) void time_init(struct clocksource *clock)
{ {
tk.clock = clock; tk.clock = clock;
time_init_overhead_calibrate();
} }
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment