This patchset cleans up all the direct accesses to xtime
and wall_to_monotonic, allowing them to be made static
so we can do further cleanup and rework of the timekeeping
core.
This is 2.6.36 (or later) material. But I wanted to get
the ball rolling.
Any feedback/testing would be greatly appreciated!
thanks
-john
John Stultz (6):
powerpc: Simplify update_vsyscall
powerpc: Cleanup xtime usage
Fix update_vsyscall to provide wall_to_monotonic offset
Convert um to use read_persistent_clock
Cleanup hrtimer.c's direct access to wall_to_monotonic
Make xtime and wall_to_monotonic static
CC: Martin Schwidefsky <[email protected]>
CC: Anton Blanchard <[email protected]>
CC: Paul Mackerras <[email protected]>
CC: Tony Luck <[email protected]>
CC: Thomas Gleixner <[email protected]>
CC: Jeff Dike <[email protected]>
Documentation/feature-removal-schedule.txt | 10 -----
arch/ia64/kernel/time.c | 7 ++-
arch/powerpc/kernel/time.c | 61 +++++++++++++---------------
arch/s390/kernel/time.c | 8 ++--
arch/um/kernel/time.c | 13 +++---
arch/x86/kernel/vsyscall_64.c | 6 +-
include/linux/clocksource.h | 6 ++-
include/linux/time.h | 5 +-
kernel/hrtimer.c | 9 ++--
kernel/time/timekeeping.c | 18 ++++++--
10 files changed, 69 insertions(+), 74 deletions(-)
This patch makes xtime and wall_to_monotonic static, as planned in
Documentation/feature-removal-schedule.txt. This will allow for
further cleanups to the timekeeping core.
Signed-off-by: John Stultz <[email protected]>
CC: Thomas Gleixner <[email protected]>
---
Documentation/feature-removal-schedule.txt | 10 ----------
include/linux/time.h | 2 --
kernel/time/timekeeping.c | 4 ++--
3 files changed, 2 insertions(+), 14 deletions(-)
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index c268783..0d91c6b 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -549,16 +549,6 @@ Who: Avi Kivity <[email protected]>
----------------------------
-What: xtime, wall_to_monotonic
-When: 2.6.36+
-Files: kernel/time/timekeeping.c include/linux/time.h
-Why: Cleaning up timekeeping internal values. Please use
- existing timekeeping accessor functions to access
- the equivalent functionality.
-Who: John Stultz <[email protected]>
-
-----------------------------
-
What: KVM kernel-allocated memory slots
When: July 2010
Why: Since 2.6.25, kvm supports user-allocated memory slots, which are
diff --git a/include/linux/time.h b/include/linux/time.h
index d18edd8..3d03d79 100644
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -97,8 +97,6 @@ static inline struct timespec timespec_sub(struct timespec lhs,
#define timespec_valid(ts) \
(((ts)->tv_sec >= 0) && (((unsigned long) (ts)->tv_nsec) < NSEC_PER_SEC))
-extern struct timespec xtime;
-extern struct timespec wall_to_monotonic;
extern seqlock_t xtime_lock;
extern void read_persistent_clock(struct timespec *ts);
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 8600218..a68297a 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -153,8 +153,8 @@ __cacheline_aligned_in_smp DEFINE_SEQLOCK(xtime_lock);
* - wall_to_monotonic is no longer the boot time, getboottime must be
* used instead.
*/
-struct timespec xtime __attribute__ ((aligned (16)));
-struct timespec wall_to_monotonic __attribute__ ((aligned (16)));
+static struct timespec xtime __attribute__ ((aligned (16)));
+static struct timespec wall_to_monotonic __attribute__ ((aligned (16)));
static struct timespec total_sleep_time;
/*
--
1.6.0.4
Provides an accessor function to replace hrtimer.c's
direct access of wall_to_monotonic.
This will allow wall_to_monotonic to be made static as
planned in Documentation/feature-removal-schedule.txt
Signed-off-by: John Stultz <[email protected]>
CC: Thomas Gleixner <[email protected]>
---
include/linux/time.h | 3 ++-
kernel/hrtimer.c | 9 ++++-----
kernel/time/timekeeping.c | 5 +++++
3 files changed, 11 insertions(+), 6 deletions(-)
diff --git a/include/linux/time.h b/include/linux/time.h
index ea3559f..d18edd8 100644
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -110,7 +110,8 @@ extern int timekeeping_suspended;
unsigned long get_seconds(void);
struct timespec current_kernel_time(void);
-struct timespec __current_kernel_time(void); /* does not hold xtime_lock */
+struct timespec __current_kernel_time(void); /* does not take xtime_lock */
+struct timespec __get_wall_to_monotonic(void); /* does not take xtime_lock */
struct timespec get_monotonic_coarse(void);
#define CURRENT_TIME (current_kernel_time())
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index 5c69e99..809f48c 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -90,7 +90,7 @@ static void hrtimer_get_softirq_time(struct hrtimer_cpu_base *base)
do {
seq = read_seqbegin(&xtime_lock);
xts = __current_kernel_time();
- tom = wall_to_monotonic;
+ tom = __get_wall_to_monotonic();
} while (read_seqretry(&xtime_lock, seq));
xtim = timespec_to_ktime(xts);
@@ -612,7 +612,7 @@ static int hrtimer_reprogram(struct hrtimer *timer,
static void retrigger_next_event(void *arg)
{
struct hrtimer_cpu_base *base;
- struct timespec realtime_offset;
+ struct timespec realtime_offset, wtm;
unsigned long seq;
if (!hrtimer_hres_active())
@@ -620,10 +620,9 @@ static void retrigger_next_event(void *arg)
do {
seq = read_seqbegin(&xtime_lock);
- set_normalized_timespec(&realtime_offset,
- -wall_to_monotonic.tv_sec,
- -wall_to_monotonic.tv_nsec);
+ wtm = __get_wall_to_monotonic();
} while (read_seqretry(&xtime_lock, seq));
+ set_normalized_timespec(&realtime_offset, -wtm.tv_sec, -wtm.tv_nsec);
base = &__get_cpu_var(hrtimer_bases);
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index fdfdeb6..8600218 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -905,6 +905,11 @@ struct timespec __current_kernel_time(void)
return xtime;
}
+struct timespec __get_wall_to_monotonic(void)
+{
+ return wall_to_monotonic;
+}
+
struct timespec current_kernel_time(void)
{
struct timespec now;
--
1.6.0.4
Currently powerpc's update_vsyscall calls an inline update_gtod.
However, both are straightforward, and there are no other users,
so this patch merges update_gtod into update_vsyscall.
Compiles, but otherwise untested.
Cc: Anton Blanchard <[email protected]>
Cc: Paul Mackerras <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Signed-off-by: John Stultz <[email protected]>
---
arch/powerpc/kernel/time.c | 55 ++++++++++++++++++++------------------------
1 files changed, 25 insertions(+), 30 deletions(-)
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 0441bbd..6fcd648 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -423,30 +423,6 @@ void udelay(unsigned long usecs)
}
EXPORT_SYMBOL(udelay);
-static inline void update_gtod(u64 new_tb_stamp, u64 new_stamp_xsec,
- u64 new_tb_to_xs)
-{
- /*
- * tb_update_count is used to allow the userspace gettimeofday code
- * to assure itself that it sees a consistent view of the tb_to_xs and
- * stamp_xsec variables. It reads the tb_update_count, then reads
- * tb_to_xs and stamp_xsec and then reads tb_update_count again. If
- * the two values of tb_update_count match and are even then the
- * tb_to_xs and stamp_xsec values are consistent. If not, then it
- * loops back and reads them again until this criteria is met.
- * We expect the caller to have done the first increment of
- * vdso_data->tb_update_count already.
- */
- vdso_data->tb_orig_stamp = new_tb_stamp;
- vdso_data->stamp_xsec = new_stamp_xsec;
- vdso_data->tb_to_xs = new_tb_to_xs;
- vdso_data->wtom_clock_sec = wall_to_monotonic.tv_sec;
- vdso_data->wtom_clock_nsec = wall_to_monotonic.tv_nsec;
- vdso_data->stamp_xtime = xtime;
- smp_wmb();
- ++(vdso_data->tb_update_count);
-}
-
#ifdef CONFIG_SMP
unsigned long profile_pc(struct pt_regs *regs)
{
@@ -876,7 +852,7 @@ static cycle_t timebase_read(struct clocksource *cs)
void update_vsyscall(struct timespec *wall_time, struct clocksource *clock,
u32 mult)
{
- u64 t2x, stamp_xsec;
+ u64 new_tb_to_xs, new_stamp_xsec;
if (clock != &clocksource_timebase)
return;
@@ -887,11 +863,30 @@ void update_vsyscall(struct timespec *wall_time, struct clocksource *clock,
/* XXX this assumes clock->shift == 22 */
/* 4611686018 ~= 2^(20+64-22) / 1e9 */
- t2x = (u64) mult * 4611686018ULL;
- stamp_xsec = (u64) xtime.tv_nsec * XSEC_PER_SEC;
- do_div(stamp_xsec, 1000000000);
- stamp_xsec += (u64) xtime.tv_sec * XSEC_PER_SEC;
- update_gtod(clock->cycle_last, stamp_xsec, t2x);
+ new_tb_to_xs = (u64) mult * 4611686018ULL;
+ new_stamp_xsec = (u64) xtime.tv_nsec * XSEC_PER_SEC;
+ do_div(new_stamp_xsec, 1000000000);
+ new_stamp_xsec += (u64) xtime.tv_sec * XSEC_PER_SEC;
+
+ /*
+ * tb_update_count is used to allow the userspace gettimeofday code
+ * to assure itself that it sees a consistent view of the tb_to_xs and
+ * stamp_xsec variables. It reads the tb_update_count, then reads
+ * tb_to_xs and stamp_xsec and then reads tb_update_count again. If
+ * the two values of tb_update_count match and are even then the
+ * tb_to_xs and stamp_xsec values are consistent. If not, then it
+ * loops back and reads them again until this criteria is met.
+ * We expect the caller to have done the first increment of
+ * vdso_data->tb_update_count already.
+ */
+ vdso_data->tb_orig_stamp = clock->cycle_last;
+ vdso_data->stamp_xsec = new_stamp_xsec;
+ vdso_data->tb_to_xs = new_tb_to_xs;
+ vdso_data->wtom_clock_sec = wall_to_monotonic.tv_sec;
+ vdso_data->wtom_clock_nsec = wall_to_monotonic.tv_nsec;
+ vdso_data->stamp_xtime = xtime;
+ smp_wmb();
+ ++(vdso_data->tb_update_count);
}
void update_vsyscall_tz(void)
--
1.6.0.4
This removes powerpc's direct xtime usage, allowing for further
generic timeekeping cleanups
Compiled but otherwise untested.
Cc: Paul Mackerras <[email protected]>
Cc: Anton Blanchard <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Signed-off-by: John Stultz <[email protected]>
---
arch/powerpc/kernel/time.c | 8 ++++----
1 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 6fcd648..0711d60 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -864,9 +864,9 @@ void update_vsyscall(struct timespec *wall_time, struct clocksource *clock,
/* XXX this assumes clock->shift == 22 */
/* 4611686018 ~= 2^(20+64-22) / 1e9 */
new_tb_to_xs = (u64) mult * 4611686018ULL;
- new_stamp_xsec = (u64) xtime.tv_nsec * XSEC_PER_SEC;
+ new_stamp_xsec = (u64) wall_time->tv_nsec * XSEC_PER_SEC;
do_div(new_stamp_xsec, 1000000000);
- new_stamp_xsec += (u64) xtime.tv_sec * XSEC_PER_SEC;
+ new_stamp_xsec += (u64) wall_time->tv_sec * XSEC_PER_SEC;
/*
* tb_update_count is used to allow the userspace gettimeofday code
@@ -884,7 +884,7 @@ void update_vsyscall(struct timespec *wall_time, struct clocksource *clock,
vdso_data->tb_to_xs = new_tb_to_xs;
vdso_data->wtom_clock_sec = wall_to_monotonic.tv_sec;
vdso_data->wtom_clock_nsec = wall_to_monotonic.tv_nsec;
- vdso_data->stamp_xtime = xtime;
+ vdso_data->stamp_xtime = *wall_time;
smp_wmb();
++(vdso_data->tb_update_count);
}
@@ -1093,7 +1093,7 @@ void __init time_init(void)
vdso_data->tb_orig_stamp = tb_last_jiffy;
vdso_data->tb_update_count = 0;
vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;
- vdso_data->stamp_xsec = (u64) xtime.tv_sec * XSEC_PER_SEC;
+ vdso_data->stamp_xsec = (u64) get_seconds() * XSEC_PER_SEC;
vdso_data->tb_to_xs = tb_to_xs;
write_sequnlock_irqrestore(&xtime_lock, flags);
--
1.6.0.4
update_vsyscall() did not provide the wall_to_monotoinc offset,
so arch specific implementations tend to reference wall_to_monotonic
directly. This limits future cleanups in the timekeeping core, so
this patch fixes the update_vsyscall interface to provide
wall_to_monotonic, allowing wall_to_monotonic to be made static
as planned in Documentation/feature-removal-schedule.txt
Signed-off-by: John Stultz <[email protected]>
CC: Martin Schwidefsky <[email protected]>
CC: Anton Blanchard <[email protected]>
CC: Paul Mackerras <[email protected]>
CC: Tony Luck <[email protected]>
CC: Thomas Gleixner <[email protected]>
---
arch/ia64/kernel/time.c | 7 ++++---
arch/powerpc/kernel/time.c | 8 ++++----
arch/s390/kernel/time.c | 8 ++++----
arch/x86/kernel/vsyscall_64.c | 6 +++---
include/linux/clocksource.h | 6 ++++--
kernel/time/timekeeping.c | 9 ++++++---
6 files changed, 25 insertions(+), 19 deletions(-)
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c
index 653b3c4..ed6f22e 100644
--- a/arch/ia64/kernel/time.c
+++ b/arch/ia64/kernel/time.c
@@ -471,7 +471,8 @@ void update_vsyscall_tz(void)
{
}
-void update_vsyscall(struct timespec *wall, struct clocksource *c, u32 mult)
+void update_vsyscall(struct timespec *wall, struct timespec *wtm,
+ struct clocksource *c, u32 mult)
{
unsigned long flags;
@@ -487,9 +488,9 @@ void update_vsyscall(struct timespec *wall, struct clocksource *c, u32 mult)
/* copy kernel time structures */
fsyscall_gtod_data.wall_time.tv_sec = wall->tv_sec;
fsyscall_gtod_data.wall_time.tv_nsec = wall->tv_nsec;
- fsyscall_gtod_data.monotonic_time.tv_sec = wall_to_monotonic.tv_sec
+ fsyscall_gtod_data.monotonic_time.tv_sec = wtm->tv_sec
+ wall->tv_sec;
- fsyscall_gtod_data.monotonic_time.tv_nsec = wall_to_monotonic.tv_nsec
+ fsyscall_gtod_data.monotonic_time.tv_nsec = wtm->tv_nsec
+ wall->tv_nsec;
/* normalize */
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 0711d60..e215f76 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -849,8 +849,8 @@ static cycle_t timebase_read(struct clocksource *cs)
return (cycle_t)get_tb();
}
-void update_vsyscall(struct timespec *wall_time, struct clocksource *clock,
- u32 mult)
+void update_vsyscall(struct timespec *wall_time, struct timespec *wtm,
+ struct clocksource *clock, u32 mult)
{
u64 new_tb_to_xs, new_stamp_xsec;
@@ -882,8 +882,8 @@ void update_vsyscall(struct timespec *wall_time, struct clocksource *clock,
vdso_data->tb_orig_stamp = clock->cycle_last;
vdso_data->stamp_xsec = new_stamp_xsec;
vdso_data->tb_to_xs = new_tb_to_xs;
- vdso_data->wtom_clock_sec = wall_to_monotonic.tv_sec;
- vdso_data->wtom_clock_nsec = wall_to_monotonic.tv_nsec;
+ vdso_data->wtom_clock_sec = wtm->tv_sec;
+ vdso_data->wtom_clock_nsec = wtm->tv_nsec;
vdso_data->stamp_xtime = *wall_time;
smp_wmb();
++(vdso_data->tb_update_count);
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index a2163c9..aeb30c6 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -207,8 +207,8 @@ struct clocksource * __init clocksource_default_clock(void)
return &clocksource_tod;
}
-void update_vsyscall(struct timespec *wall_time, struct clocksource *clock,
- u32 mult)
+void update_vsyscall(struct timespec *wall_time, struct timespec *wtm,
+ struct clocksource *clock, u32 mult)
{
if (clock != &clocksource_tod)
return;
@@ -219,8 +219,8 @@ void update_vsyscall(struct timespec *wall_time, struct clocksource *clock,
vdso_data->xtime_tod_stamp = clock->cycle_last;
vdso_data->xtime_clock_sec = wall_time->tv_sec;
vdso_data->xtime_clock_nsec = wall_time->tv_nsec;
- vdso_data->wtom_clock_sec = wall_to_monotonic.tv_sec;
- vdso_data->wtom_clock_nsec = wall_to_monotonic.tv_nsec;
+ vdso_data->wtom_clock_sec = wtm->tv_sec;
+ vdso_data->wtom_clock_nsec = wtm->tv_nsec;
vdso_data->ntp_mult = mult;
smp_wmb();
++vdso_data->tb_update_count;
diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
index 1c0c6ab..3499be3 100644
--- a/arch/x86/kernel/vsyscall_64.c
+++ b/arch/x86/kernel/vsyscall_64.c
@@ -73,8 +73,8 @@ void update_vsyscall_tz(void)
write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags);
}
-void update_vsyscall(struct timespec *wall_time, struct clocksource *clock,
- u32 mult)
+void update_vsyscall(struct timespec *wall_time, struct timespec *wtm,
+ struct clocksource *clock, u32 mult)
{
unsigned long flags;
@@ -87,7 +87,7 @@ void update_vsyscall(struct timespec *wall_time, struct clocksource *clock,
vsyscall_gtod_data.clock.shift = clock->shift;
vsyscall_gtod_data.wall_time_sec = wall_time->tv_sec;
vsyscall_gtod_data.wall_time_nsec = wall_time->tv_nsec;
- vsyscall_gtod_data.wall_to_monotonic = wall_to_monotonic;
+ vsyscall_gtod_data.wall_to_monotonic = *wtm;
vsyscall_gtod_data.wall_time_coarse = __current_kernel_time();
write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags);
}
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index 5ea3c60..21677d9 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -313,11 +313,13 @@ clocksource_calc_mult_shift(struct clocksource *cs, u32 freq, u32 minsec)
#ifdef CONFIG_GENERIC_TIME_VSYSCALL
extern void
-update_vsyscall(struct timespec *ts, struct clocksource *c, u32 mult);
+update_vsyscall(struct timespec *ts, struct timespec *wtm,
+ struct clocksource *c, u32 mult);
extern void update_vsyscall_tz(void);
#else
static inline void
-update_vsyscall(struct timespec *ts, struct clocksource *c, u32 mult)
+update_vsyscall(struct timespec *ts, struct timespec *wtm,
+ struct clocksource *c, u32 mult)
{
}
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index caf8d4d..fdfdeb6 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -170,7 +170,8 @@ void timekeeping_leap_insert(int leapsecond)
{
xtime.tv_sec += leapsecond;
wall_to_monotonic.tv_sec -= leapsecond;
- update_vsyscall(&xtime, timekeeper.clock, timekeeper.mult);
+ update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock,
+ timekeeper.mult);
}
#ifdef CONFIG_GENERIC_TIME
@@ -328,7 +329,8 @@ int do_settimeofday(struct timespec *tv)
timekeeper.ntp_error = 0;
ntp_clear();
- update_vsyscall(&xtime, timekeeper.clock, timekeeper.mult);
+ update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock,
+ timekeeper.mult);
write_sequnlock_irqrestore(&xtime_lock, flags);
@@ -856,7 +858,8 @@ void update_wall_time(void)
}
/* check to see if there is a new clocksource to use */
- update_vsyscall(&xtime, timekeeper.clock, timekeeper.mult);
+ update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock,
+ timekeeper.mult);
}
/**
--
1.6.0.4
This patch converts the um arch to use read_persistent_clock().
This allows it to avoid accessing xtime and wall_to_monotonic
directly.
This patch is un-tested, so any help by testers or maintainers would
be greatly appreciated!
Signed-off-by: John Stultz <[email protected]>
CC: Jeff Dike <[email protected]>
CC: Thomas Gleixner <[email protected]>
---
arch/um/kernel/time.c | 13 +++++++------
1 files changed, 7 insertions(+), 6 deletions(-)
diff --git a/arch/um/kernel/time.c b/arch/um/kernel/time.c
index c8b9c46..2b8b262 100644
--- a/arch/um/kernel/time.c
+++ b/arch/um/kernel/time.c
@@ -102,16 +102,17 @@ static void __init setup_itimer(void)
clockevents_register_device(&itimer_clockevent);
}
+void read_persistent_clock(struct timespec *ts)
+{
+ nsecs = os_nsecs();
+ set_normalized_timespec(ts, nsecs / NSEC_PER_SEC,
+ nsecs % NSEC_PER_SEC);
+}
+
void __init time_init(void)
{
long long nsecs;
timer_init();
-
- nsecs = os_nsecs();
- set_normalized_timespec(&wall_to_monotonic, -nsecs / NSEC_PER_SEC,
- -nsecs % NSEC_PER_SEC);
- set_normalized_timespec(&xtime, nsecs / NSEC_PER_SEC,
- nsecs % NSEC_PER_SEC);
late_time_init = setup_itimer;
}
--
1.6.0.4
On Saturday 05 June 2010, John Stultz wrote:
> do {
> seq = read_seqbegin(&xtime_lock);
> xts = __current_kernel_time();
> - tom = wall_to_monotonic;
> + tom = __get_wall_to_monotonic();
> } while (read_seqretry(&xtime_lock, seq));
>
Would it make sense to also limit the use of xtime_lock to the
timekeeping code? I suppose you could merge the various accessors
(current_kernel_time, get_monotonic_coarse, __current_kernel_time,
__get_wall_to_monotonic) with a single function doing
struct timespec current_kernel_time(struct timespec *tomono)
{
struct timespec now;
unsigned long seq;
do {
seq = read_seqbegin(&xtime_lock);
if (tomono)
wall_to_monotonic;
now = xtime;
} while (read_seqretry(&xtime_lock, seq));
return now;
}
Arnd
On Sat, 2010-06-05 at 12:23 +0200, Arnd Bergmann wrote:
> On Saturday 05 June 2010, John Stultz wrote:
> > do {
> > seq = read_seqbegin(&xtime_lock);
> > xts = __current_kernel_time();
> > - tom = wall_to_monotonic;
> > + tom = __get_wall_to_monotonic();
> > } while (read_seqretry(&xtime_lock, seq));
> >
>
> Would it make sense to also limit the use of xtime_lock to the
> timekeeping code? I suppose you could merge the various accessors
> (current_kernel_time, get_monotonic_coarse, __current_kernel_time,
> __get_wall_to_monotonic) with a single function doing
Yep. I hope to keep chipping at it to get there.
However xtime_lock protects a bit more then just the timekeeping
internals, and its currently not limiting the rework I'm hoping to do
with the timekeeping internal structures. So little bits at a time. :)
thanks
-john