Skip to content
Snippets Groups Projects
Commit 76062c29 authored by Armin Luntzer's avatar Armin Luntzer
Browse files

ktime: change to 64 bits

parent 0776d5f1
No related branches found
No related tags found
No related merge requests found
......@@ -4,3 +4,6 @@ CHECKFLAGS += -D__sparc__
lib-y += gptimer.o
lib-y += grtimer.o
lib-y += grtimer_longcount.o
lib-$(CONFIG_ARCH_CUSTOM_BOOT_CODE) += clz_tab.o
lib-$(CONFIG_ARCH_CUSTOM_BOOT_CODE) += divdi3.o
// SPDX-License-Identifier: GPL-2.0
const unsigned char __clz_tab[] = {
0, 1, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8,
};
/* Copyright (C) 1989, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
This file is part of GNU CC.
GNU CC is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2, or (at your option)
any later version.
GNU CC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with GNU CC; see the file COPYING. If not, write to
the Free Software Foundation, 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA. */
.text
.align 4
.globl __divdi3
__divdi3:
save %sp,-104,%sp
cmp %i0,0
bge .LL40
mov 0,%l4
mov -1,%l4
sub %g0,%i1,%o0
mov %o0,%o5
subcc %g0,%o0,%g0
sub %g0,%i0,%o0
subx %o0,0,%o4
mov %o4,%i0
mov %o5,%i1
.LL40:
cmp %i2,0
bge .LL84
mov %i3,%o4
xnor %g0,%l4,%l4
sub %g0,%i3,%o0
mov %o0,%o3
subcc %g0,%o0,%g0
sub %g0,%i2,%o0
subx %o0,0,%o2
mov %o2,%i2
mov %o3,%i3
mov %i3,%o4
.LL84:
cmp %i2,0
bne .LL45
mov %i1,%i3
cmp %o4,%i0
bleu .LL46
mov %i3,%o1
mov 32,%g1
subcc %i0,%o4,%g0
1: bcs 5f
addxcc %o1,%o1,%o1 ! shift n1n0 and a q-bit in lsb
sub %i0,%o4,%i0 ! this kills msb of n
addx %i0,%i0,%i0 ! so this cannot give carry
subcc %g1,1,%g1
2: bne 1b
subcc %i0,%o4,%g0
bcs 3f
addxcc %o1,%o1,%o1 ! shift n1n0 and a q-bit in lsb
b 3f
sub %i0,%o4,%i0 ! this kills msb of n
4: sub %i0,%o4,%i0
5: addxcc %i0,%i0,%i0
bcc 2b
subcc %g1,1,%g1
! Got carry from n. Subtract next step to cancel this carry.
bne 4b
addcc %o1,%o1,%o1 ! shift n1n0 and a 0-bit in lsb
sub %i0,%o4,%i0
3: xnor %o1,0,%o1
b .LL50
mov 0,%o2
.LL46:
cmp %o4,0
bne .LL85
mov %i0,%o2
mov 1,%o0
mov 0,%o1
wr %g0, 0, %y
udiv %o0, %o1, %o0
mov %o0,%o4
mov %i0,%o2
.LL85:
mov 0,%g3
mov 32,%g1
subcc %g3,%o4,%g0
1: bcs 5f
addxcc %o2,%o2,%o2 ! shift n1n0 and a q-bit in lsb
sub %g3,%o4,%g3 ! this kills msb of n
addx %g3,%g3,%g3 ! so this cannot give carry
subcc %g1,1,%g1
2: bne 1b
subcc %g3,%o4,%g0
bcs 3f
addxcc %o2,%o2,%o2 ! shift n1n0 and a q-bit in lsb
b 3f
sub %g3,%o4,%g3 ! this kills msb of n
4: sub %g3,%o4,%g3
5: addxcc %g3,%g3,%g3
bcc 2b
subcc %g1,1,%g1
! Got carry from n. Subtract next step to cancel this carry.
bne 4b
addcc %o2,%o2,%o2 ! shift n1n0 and a 0-bit in lsb
sub %g3,%o4,%g3
3: xnor %o2,0,%o2
mov %g3,%i0
mov %i3,%o1
mov 32,%g1
subcc %i0,%o4,%g0
1: bcs 5f
addxcc %o1,%o1,%o1 ! shift n1n0 and a q-bit in lsb
sub %i0,%o4,%i0 ! this kills msb of n
addx %i0,%i0,%i0 ! so this cannot give carry
subcc %g1,1,%g1
2: bne 1b
subcc %i0,%o4,%g0
bcs 3f
addxcc %o1,%o1,%o1 ! shift n1n0 and a q-bit in lsb
b 3f
sub %i0,%o4,%i0 ! this kills msb of n
4: sub %i0,%o4,%i0
5: addxcc %i0,%i0,%i0
bcc 2b
subcc %g1,1,%g1
! Got carry from n. Subtract next step to cancel this carry.
bne 4b
addcc %o1,%o1,%o1 ! shift n1n0 and a 0-bit in lsb
sub %i0,%o4,%i0
3: xnor %o1,0,%o1
b .LL86
mov %o1,%l1
.LL45:
cmp %i2,%i0
bleu .LL51
sethi %hi(65535),%o0
b .LL78
mov 0,%o1
.LL51:
or %o0,%lo(65535),%o0
cmp %i2,%o0
bgu .LL58
mov %i2,%o1
cmp %i2,256
addx %g0,-1,%o0
b .LL64
and %o0,8,%o2
.LL58:
sethi %hi(16777215),%o0
or %o0,%lo(16777215),%o0
cmp %i2,%o0
bgu .LL64
mov 24,%o2
mov 16,%o2
.LL64:
srl %o1,%o2,%o0
sethi %hi(__clz_tab),%o1
or %o1,%lo(__clz_tab),%o1
ldub [%o0+%o1],%o0
add %o0,%o2,%o0
mov 32,%o1
subcc %o1,%o0,%o3
bne,a .LL72
sub %o1,%o3,%o1
cmp %i0,%i2
bgu .LL74
cmp %i3,%o4
blu .LL78
mov 0,%o1
.LL74:
b .LL78
mov 1,%o1
.LL72:
sll %i2,%o3,%o2
srl %o4,%o1,%o0
or %o2,%o0,%i2
sll %o4,%o3,%o4
srl %i0,%o1,%o2
sll %i0,%o3,%o0
srl %i3,%o1,%o1
or %o0,%o1,%i0
sll %i3,%o3,%i3
mov %i0,%o1
mov 32,%g1
subcc %o2,%i2,%g0
1: bcs 5f
addxcc %o1,%o1,%o1 ! shift n1n0 and a q-bit in lsb
sub %o2,%i2,%o2 ! this kills msb of n
addx %o2,%o2,%o2 ! so this cannot give carry
subcc %g1,1,%g1
2: bne 1b
subcc %o2,%i2,%g0
bcs 3f
addxcc %o1,%o1,%o1 ! shift n1n0 and a q-bit in lsb
b 3f
sub %o2,%i2,%o2 ! this kills msb of n
4: sub %o2,%i2,%o2
5: addxcc %o2,%o2,%o2
bcc 2b
subcc %g1,1,%g1
! Got carry from n. Subtract next step to cancel this carry.
bne 4b
addcc %o1,%o1,%o1 ! shift n1n0 and a 0-bit in lsb
sub %o2,%i2,%o2
3: xnor %o1,0,%o1
mov %o2,%i0
wr %g0,%o1,%y ! SPARC has 0-3 delay insn after a wr
sra %o4,31,%g2 ! Do not move this insn
and %o1,%g2,%g2 ! Do not move this insn
andcc %g0,0,%g1 ! Do not move this insn
mulscc %g1,%o4,%g1
mulscc %g1,%o4,%g1
mulscc %g1,%o4,%g1
mulscc %g1,%o4,%g1
mulscc %g1,%o4,%g1
mulscc %g1,%o4,%g1
mulscc %g1,%o4,%g1
mulscc %g1,%o4,%g1
mulscc %g1,%o4,%g1
mulscc %g1,%o4,%g1
mulscc %g1,%o4,%g1
mulscc %g1,%o4,%g1
mulscc %g1,%o4,%g1
mulscc %g1,%o4,%g1
mulscc %g1,%o4,%g1
mulscc %g1,%o4,%g1
mulscc %g1,%o4,%g1
mulscc %g1,%o4,%g1
mulscc %g1,%o4,%g1
mulscc %g1,%o4,%g1
mulscc %g1,%o4,%g1
mulscc %g1,%o4,%g1
mulscc %g1,%o4,%g1
mulscc %g1,%o4,%g1
mulscc %g1,%o4,%g1
mulscc %g1,%o4,%g1
mulscc %g1,%o4,%g1
mulscc %g1,%o4,%g1
mulscc %g1,%o4,%g1
mulscc %g1,%o4,%g1
mulscc %g1,%o4,%g1
mulscc %g1,%o4,%g1
mulscc %g1,0,%g1
add %g1,%g2,%o0
rd %y,%o2
cmp %o0,%i0
bgu,a .LL78
add %o1,-1,%o1
bne,a .LL50
mov 0,%o2
cmp %o2,%i3
bleu .LL50
mov 0,%o2
add %o1,-1,%o1
.LL78:
mov 0,%o2
.LL50:
mov %o1,%l1
.LL86:
mov %o2,%l0
mov %l0,%i0
mov %l1,%i1
cmp %l4,0
be .LL81
sub %g0,%i1,%o0
mov %o0,%l3
subcc %g0,%o0,%g0
sub %g0,%i0,%o0
subx %o0,0,%l2
mov %l2,%i0
mov %l3,%i1
.LL81:
ret
restore
......@@ -25,22 +25,30 @@
#include <kernel/clocksource.h>
#if 0
struct timespec {
uint32_t tv_sec; /* seconds */
uint32_t tv_nsec; /* nanoseconds */
};
#endif
/* we use the compiler-defined struct timespec at this time, but we can
* at least verify the size of the types to see if we are compatible
*/
compile_time_assert((member_size(struct timespec, tv_sec) == sizeof(uint32_t)),
compile_time_assert((member_size(struct timespec, tv_sec) == sizeof(int32_t)),
TIMESPEC_SEC_SIZE_MISMATCH);
compile_time_assert((member_size(struct timespec, tv_nsec) == sizeof(uint32_t)),
compile_time_assert((member_size(struct timespec, tv_nsec) == sizeof(int32_t)),
TIMESPEC_NSEC_SIZE_MISMATCH);
#define MSEC_PER_SEC 1000L
#define USEC_PER_MSEC 1000L
#define NSEC_PER_USEC 1000L
#define NSEC_PER_MSEC 1000000L
#define USEC_PER_SEC 1000000L
#define NSEC_PER_SEC 1000000000L
#define KTIME_MAX (~(1LL << 63))
#define KTIME_SEC_MAX (KTIME_MAX / NSEC_PER_SEC)
/* ktime is nanoseconds since boot */
typedef int64_t ktime;
struct timekeeper {
struct clocksource *clock;
uint32_t readout_ns; /* readout time overhead in ns */
......@@ -48,6 +56,34 @@ struct timekeeper {
ktime ktime_get(void);
ktime ktime_set(const unsigned long sec, const unsigned long nsec);
ktime timespec_to_ktime(struct timespec ts);
ktime ktime_add(const ktime t1, const ktime t2);
ktime ktime_sub(const ktime later, const ktime earlier);
ktime ktime_add_ns(const ktime t, const uint64_t nsec);
ktime ktime_sub_ns(const ktime t, const uint64_t nsec);
ktime ktime_add_us(const ktime t, const uint64_t usec);
ktime ktime_add_ms(const ktime t, const uint64_t msec);
ktime ktime_sub_us(const ktime t, const uint64_t usec);
ktime ktime_sub_ms(const ktime t, const uint64_t msec);
int ktime_compare(const ktime t1, const ktime t2);
bool ktime_after(const ktime t1, const ktime t2);
bool ktime_before(const ktime t1, const ktime t2);
int64_t ktime_delta(const ktime later, const ktime earlier);
int64_t ktime_us_delta(const ktime later, const ktime earlier);
int64_t ktime_ms_delta(const ktime later, const ktime earlier);
int64_t ktime_to_us(const ktime t);
int64_t ktime_to_ms(const ktime t);
ktime us_to_ktime(const int64_t usec);
ktime ms_to_ktime(const int64_t msec);
struct timespec timespec_add(struct timespec t1, struct timespec t2);
struct timespec ns_to_timespec(const int64_t nsec);
struct timespec timespec_add_ns(struct timespec ts, int64_t nsec);
struct timespec get_uptime(void);
struct timespec get_ktime(void);
......
......@@ -77,6 +77,15 @@ EXPORT_SYMBOL(get_ktime);
/**
* @brief returns the number of seconds elapsed between time1 and time0
*
......@@ -105,6 +114,9 @@ EXPORT_SYMBOL(difftime);
* @param ts2 a struct timespec
*
* @returns the time delta in nanoseconds, represented as double
*
* XXX we really need a 64 bit ktime_t or overflows are very likely here
* TODO need software implementation of 64 bit add/sub/mult
*/
double difftime_ns(const struct timespec time1, const struct timespec time0)
......@@ -138,6 +150,278 @@ static void time_init_overhead_calibrate(void)
tk.readout_ns);
}
/**
* @brief normalise a struct timespec so 0 <= tv_nsec < NSEC_PER_SEC
*/
struct timespec timespec_normalise(struct timespec t)
{
while (t.tv_nsec >= NSEC_PER_SEC) {
t.tv_sec++;
t.tv_nsec -= NSEC_PER_SEC;
}
while (t.tv_nsec < 0) {
t.tv_sec--;
t.tv_nsec += NSEC_PER_SEC;
}
return t;
}
/**
* @brief add two struct timespec
*/
struct timespec timespec_add(struct timespec t1, struct timespec t2)
{
t1.tv_sec += t2.tv_sec;
t1.tv_nsec += t2.tv_nsec;
return timespec_normalise(t1);
}
EXPORT_SYMBOL(timespec_add);
/**
* @brief convert nanoseconds to a struct timespec
*
* @warn this is guaranteed to produce incorrect results for
* nsec >= 2^31 * 10^9 if sizeof(tv_sec) == 4
*/
struct timespec ns_to_timespec(const int64_t nsec)
{
struct timespec ts = {0};
if (!nsec)
return ts;
BUG_ON(nsec > (~(1LL << 31) * NSEC_PER_SEC));
ts.tv_sec = (typeof(ts.tv_sec)) (nsec / (int64_t) NSEC_PER_SEC);
ts.tv_nsec = (typeof(ts.tv_nsec)) (nsec - (int64_t) (ts.tv_sec
* NSEC_PER_SEC));
return timespec_normalise(ts);
}
EXPORT_SYMBOL(ns_to_timespec);
/**
* @brief add nanoseconds to a timespec
*
* @warn this is guaranteed to produce incorrect results for
* nsec >= 2^32 * 10^9 if sizeof(tv_sec) == 4
*/
struct timespec timespec_add_ns(struct timespec ts, int64_t nsec)
{
if (!nsec)
return ts;
return timespec_add(ts, ns_to_timespec(nsec));
}
EXPORT_SYMBOL(timespec_add_ns);
/**
* @brief set a ktime from a seconds and nanoseconds
* @param sec seconds
* @@param nsec nanoseconds
*
* @returns the ktime representation of the input values
*
* @note this allows to set ktime to be at most 10^9 * 2 * 2^32 =
* 4294967300294967296 ns ~ 136 years which is probably enough :)
*/
inline ktime ktime_set(const unsigned long sec, const unsigned long nsec)
{
return (ktime) sec * NSEC_PER_SEC + (ktime) nsec;
}
inline struct timespec ktime_to_timespec(ktime t)
{
return ns_to_timespec(t);
}
/**
* @brief converts a struct timespec to ktime_t
*/
inline ktime timespec_to_ktime(struct timespec ts)
{
return ktime_set(ts.tv_sec, ts.tv_nsec);
}
/**
* @brief compare ktimes
*
* @returns < 0 if t1 < t2
* 0 if t1 == t2
* > 0 if t1 > t2
*/
int ktime_compare(const ktime t1, const ktime t2)
{
if (t1 < t2)
return -1;
if (t1 > t2)
return 1;
return 0;
}
/**
* @param see if a ktime t1 is after t2
* Return: true if t1 was after t2, false otherwise
*/
inline bool ktime_after(const ktime t1, const ktime t2)
{
return ktime_compare(t1, t2) > 0;
}
/**
* @param see if a ktime t1 is before t2
* Return: true if t1 was before t2, false otherwise
*/
inline bool ktime_before(const ktime t1, const ktime t2)
{
return ktime_compare(t1, t2) < 0;
}
inline ktime ktime_add(const ktime t1, const ktime t2)
{
return t1 + t2;
}
inline ktime ktime_sub(const ktime later, const ktime earlier)
{
return later - earlier;
}
inline ktime ktime_add_ns(const ktime t, const uint64_t nsec)
{
return t + nsec;
}
inline ktime ktime_sub_ns(const ktime t, const uint64_t nsec)
{
return t - nsec;
}
inline ktime ktime_add_us(const ktime t, const uint64_t usec)
{
return ktime_add_ns(t, usec * NSEC_PER_USEC);
}
inline ktime ktime_add_ms(const ktime t, const uint64_t msec)
{
return ktime_add_ns(t, msec * NSEC_PER_MSEC);
}
inline ktime ktime_sub_us(const ktime t, const uint64_t usec)
{
return ktime_sub_ns(t, usec * NSEC_PER_USEC);
}
inline ktime ktime_sub_ms(const ktime t, const uint64_t msec)
{
return ktime_sub_ns(t, msec * NSEC_PER_MSEC);
}
inline int64_t ktime_delta(const ktime later, const ktime earlier)
{
return ktime_sub(later, earlier);
}
inline int64_t ktime_us_delta(const ktime later, const ktime earlier)
{
return ktime_to_us(ktime_sub(later, earlier));
}
inline int64_t ktime_ms_delta(const ktime later, const ktime earlier)
{
return ktime_to_ms(ktime_sub(later, earlier));
}
inline int64_t ktime_to_us(const ktime t)
{
return t / (int64_t) NSEC_PER_USEC;
}
inline int64_t ktime_to_ms(const ktime t)
{
return t / (int64_t) NSEC_PER_MSEC;
}
inline ktime us_to_ktime(const int64_t usec)
{
return (ktime) (usec * (int64_t) NSEC_PER_USEC);
}
inline ktime ms_to_ktime(const int64_t msec)
{
return (ktime) (msec * (int64_t) NSEC_PER_MSEC);
}
/**
* @brief get current kernel time (== uptime)
*
* @note if no uptime clock was configured, the result will be 0
*/
ktime ktime_get(void)
{
ktime ns;
uint32_t sec;
uint32_t nsec;
if (!tk.clock)
return 0;
tk.clock->read(&sec, &nsec);
ns = (ktime) sec * NSEC_PER_SEC + (ktime) nsec;
return ns;
}
EXPORT_SYMBOL(ktime_get);
/**
* @brief initialise the timing system
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment