2006-02-21 06:21:07

by john stultz

[permalink] [raw]
Subject: [-mm PATCHSET 0/11] Sync -mm w/ B19 timekeeping patches

Andrew,
The following patch set syncs my current B19 timekeeping work up with
the patches currently in -mm.

I'm normally a git person, but I finally sat down and started to learn quilt
for this (I know, its about time. I do like how fast quilt is!), so let me know if I went overboard with the many-tiny-patches thing.

Its fine by me if all of these small changes are folded into their parent
patch, but I'll leave it up to you to decide on that.

I've only sent the patches that were modified or added, but the full changelog can be seen below.

The full quilt patch tarball against 2.6.16-rc4 can be found:
http://sr71.net/~jstultz/tod/for-mm/

thanks
-john

Key:
= No changes
~ Trivial merge
! Modified
+ Added
- Dropped

=time-reduced-ntp-rework-part-1.patch
-time-reduced-ntp-rework-part-1-update.patch
+time-reduced-ntp-rework-part-1-fix-adjtimeadj.patch
Syncs up the time-reduced-ntp-rework-part1.patch with the
adjtime_adjustment() changes.


!time-reduced-ntp-rework-part-2.patch
+time-reduced-ntp-rework-part-2-fix-adjtimeadj.patch
Syncs up the time-reduced-ntp-rework-part1.patch with the
adjtime_adjustment() changes.

=time-clocksource-infrastructure.patch
+time-clocksource-infrastructure-remove-nsect.patch
Removes nsec_t

~time-generic-timekeeping-infrastructure.patch
+time-generic-timekeeping-infrastructure-remove-nsect.patch
Removes nsec_t
+time-generic-timekeeping-infrastructure-fix-ntp-synced.patch
Fixes potential bug where hwclock is not synced
+time-generic-timekeeping-infrastructure-set-wall-offset-helper.patch
Use helper function to cleanup code

~time-i386-conversion-part-1-move-timer_pitc-to-i8253c.patch
~time-i386-conversion-part-2-rework-tsc-support.patch
=time-i386-conversion-part-2-rework-tsc-support-section-fix.patch

=time-i386-conversion-part-3-enable-generic-timekeeping.patch
+time-i386-conversion-part-3-enable-generic-timekeeping-remove-nsect.patch
Removes nsec_t
+time-i386-conversion-part-3-enable-generic-timekeeping-backout-pmtmr-changes.patch
Backs out pmtmr changes that collided with x86-64

=time-i386-conversion-part-4-remove-old-timer_opts-code.patch
+time-i386-conversion-part-4-remove-old-timer_opts-code-del-timer-tsc.patch
Removes the timer_tsc.c file (somehow this got dropped?)

-time-i386-conversion-part-5-acpi-pm-variable-renaming-and-config-change.patch
-time-i386-conversion-part-5-acpi-pm-variable-renaming-and-config-change-x86_64-fix.patch
Unnecessary now that pmtmr changes are backed out


=time-i386-clocksource-drivers.patch
+time-i386-clocksource-drivers-backout-pmtmr-changes.patch
Backs out pmtmr changes that collided with x86-64

=time-fix-cpu-frequency-detection.patch
=time-delay-clocksource-selection-until-later-in-boot.patch
=x86-blacklist-tsc-from-systems-where-it-is-known-to-be-bad.patch
=i386-dont-disable-the-tsc-on-single-node-numaqs.patch


2006-02-21 06:21:13

by john stultz

[permalink] [raw]
Subject: [-mm PATCH 1/11] Time: reduced ntp rework part 1 - fix adjtimeadj

Syncs up the time-reduced-ntp-rework-part1.patch with the adjtime_adjustment() changes.

Signed-off-by: John Stultz <[email protected]>

kernel/timer.c | 28 +++++-----------------------
1 files changed, 5 insertions(+), 23 deletions(-)

Index: mm-merge/kernel/timer.c
===================================================================
--- mm-merge.orig/kernel/timer.c
+++ mm-merge/kernel/timer.c
@@ -590,7 +590,6 @@ static long time_adj; /* tick adjust (
long time_reftime; /* time at last adjustment (s) */
long time_adjust;
long time_next_adjust;
-long time_adjust_step; /* per tick time_adjust step */

/*
* this routine handles the overflow of the microsecond field
@@ -757,25 +756,7 @@ static void ntp_advance(unsigned long in

/* calculate the per tick singleshot adjtime adjustment step: */
while (interval_ns >= tick_nsec) {
- time_adjust_step = time_adjust;
- if (time_adjust_step) {
- /*
- * We are doing an adjtime thing.
- *
- * Prepare time_adjust_step to be within bounds.
- * Note that a positive time_adjust means we want
- * the clock to run faster.
- *
- * Limit the amount of the step to be in the range
- * -tickadj .. +tickadj:
- */
- time_adjust_step = min(time_adjust_step, (long)tickadj);
- time_adjust_step = max(time_adjust_step,
- (long)-tickadj);
-
- /* Reduce by this step the amount of time left: */
- time_adjust -= time_adjust_step;
- }
+ time_adjust -= adjtime_adjustment();
interval_ns -= tick_nsec;
}

@@ -851,11 +833,11 @@ static void update_wall_time(unsigned lo
{
do {
/*
- * Calculate the nsec delta using the precomputed NTP
+ * Calculate the nsec delta using the NTP
* adjustments:
- * tick_nsec, time_adjust_step, time_adj
+ * tick_nsec, adjtime_adjustment(), phase_advance()
*/
- long delta_nsec = tick_nsec + time_adjust_step * 1000;
+ long delta_nsec = tick_nsec + adjtime_adjustment() * 1000;
delta_nsec += phase_advance();

xtime_advance(delta_nsec);

2006-02-21 06:21:29

by john stultz

[permalink] [raw]
Subject: [-mm PATCH 3/11] Time: reduced ntp rework part 2 - fix adjtimeadj

Syncs up the time-reduced-ntp-rework-part2.patch with the adjtime_adjustment() changes.


kernel/timer.c | 6 +++---
1 files changed, 3 insertions(+), 3 deletions(-)

Index: mm-merge/kernel/timer.c
===================================================================
--- mm-merge.orig/kernel/timer.c
+++ mm-merge/kernel/timer.c
@@ -589,7 +589,6 @@ static long time_adj; /* tick adjust (
long time_reftime; /* time at last adjustment (s) */
long time_adjust;
long time_next_adjust;
-static long time_adjust_step; /* per tick time_adjust step */

static long total_sppm; /* shifted ppm sum of all adjustments */
static long offset_adj_ppm;
@@ -846,7 +845,7 @@ long ntp_get_ppm_adjustment(void)
void ntp_advance(unsigned long interval_ns)
{
static unsigned long interval_sum;
-
+ long time_adjust_step;
unsigned long flags;

write_seqlock_irqsave(&ntp_lock, flags);
@@ -854,9 +853,10 @@ void ntp_advance(unsigned long interval_
/* increment the interval sum: */
interval_sum += interval_ns;

+ time_adjust_step = adjtime_adjustment();
/* calculate the per tick singleshot adjtime adjustment step: */
while (interval_ns >= tick_nsec) {
- time_adjust -= adjtime_adjustment();
+ time_adjust -= time_adjust_step;
interval_ns -= tick_nsec;
}
/* usec/tick => ppm: */

2006-02-21 06:21:45

by john stultz

[permalink] [raw]
Subject: [-mm PATCH 6/11] Time: generic timekeeping infrastructure - fix ntp_synced

Call ntp_synced() is frequently enough. This fixes a potential bug where
when ntp_synced is called, it isn't close enough to the second boundery
(which is tested inside ntp_synced()). Without this its possible that since
we only called it once a second, it would never sync the persistent clock.

Signed-off-by: John Stultz <[email protected]>

kernel/time/timeofday.c | 8 ++++----
1 files changed, 4 insertions(+), 4 deletions(-)

Index: mm-merge/kernel/time/timeofday.c
===================================================================
--- mm-merge.orig/kernel/time/timeofday.c
+++ mm-merge/kernel/time/timeofday.c
@@ -534,7 +534,7 @@ static void timeofday_periodic_hook(unsi
/* advance the ntp state machine by ns interval: */
ntp_advance(delta_nsec);

- /* only call ntp_leapsecond and ntp_sync once a sec: */
+ /* only call ntp_leapsecond once a sec: */
second_check += delta_nsec;
if (second_check >= NSEC_PER_SEC) {
/* do ntp leap second processing: */
@@ -545,11 +545,11 @@ static void timeofday_periodic_hook(unsi
wall_time_ts.tv_sec += leapsecond;
monotonic_time_offset_ts.tv_sec += leapsecond;
}
- /* sync the persistent clock: */
- if (ntp_synced())
- sync_persistent_clock(wall_time_ts);
second_check -= NSEC_PER_SEC;
}
+ /* sync the persistent clock: */
+ if (ntp_synced())
+ sync_persistent_clock(wall_time_ts);

/* if necessary, switch clocksources: */
next = get_next_clocksource();

2006-02-21 06:22:56

by john stultz

[permalink] [raw]
Subject: [-mm PATCH 11/11]Time: i386 clocksource drivers - backout pmtmr changes

Sync up with changes to avoid pmtmr x86-64 collision.

In order to avoid bisection troubles, this patch must be folded into
its parent (time-i386-clocksource-drivers.patch) as the parent will
fail to build.

Signed-off-by: John Stultz <[email protected]>

drivers/clocksource/Makefile | 2 +-
drivers/clocksource/acpi_pm.c | 13 ++++---------
2 files changed, 5 insertions(+), 10 deletions(-)

Index: mm-merge/drivers/clocksource/acpi_pm.c
===================================================================
--- mm-merge.orig/drivers/clocksource/acpi_pm.c
+++ mm-merge/drivers/clocksource/acpi_pm.c
@@ -24,25 +24,20 @@
/* Number of PMTMR ticks expected during calibration run */
#define PMTMR_TICKS_PER_SEC 3579545

-#if (defined(CONFIG_X86) && (!defined(CONFIG_X86_64)))
-# include "mach_timer.h"
-# define PMTMR_EXPECTED_RATE ((PMTMR_TICKS_PER_SEC*CALIBRATE_TIME_MSEC)/1000)
-#endif
-
/*
* The I/O port the PMTMR resides at.
* The location is detected during setup_arch(),
* in arch/i386/acpi/boot.c
*/
-extern u32 acpi_pmtmr_ioport;
-extern int acpi_pmtmr_buggy;
+u32 pmtmr_ioport;
+int acpi_pmtmr_buggy;

#define ACPI_PM_MASK 0xFFFFFF /* limit it to 24 bits */

static inline u32 read_pmtmr(void)
{
/* mask the output to 24 bits */
- return inl(acpi_pmtmr_ioport) & ACPI_PM_MASK;
+ return inl(pmtmr_ioport) & ACPI_PM_MASK;
}

static cycle_t acpi_pm_read_verified(void)
@@ -85,7 +80,7 @@ static int __init init_acpi_pm_clocksour
u32 value1, value2;
unsigned int i;

- if (!acpi_pmtmr_ioport)
+ if (!pmtmr_ioport)
return -ENODEV;

clocksource_acpi_pm.mult = clocksource_hz2mult(PMTMR_TICKS_PER_SEC,
Index: mm-merge/drivers/clocksource/Makefile
===================================================================
--- mm-merge.orig/drivers/clocksource/Makefile
+++ mm-merge/drivers/clocksource/Makefile
@@ -1,2 +1,2 @@
obj-$(CONFIG_X86_CYCLONE_TIMER) += cyclone.o
-obj-$(CONFIG_ACPI) += acpi_pm.o
+obj-$(CONFIG_X86_PM_TIMER) += acpi_pm.o

2006-02-21 06:22:59

by john stultz

[permalink] [raw]
Subject: [-mm PATCH 10/11] Time: i386 conversion part 4 - del timer_tsc.c

Somehow the deletion of timer_tsc.c got dropped. This removes the file.

Signed-off-by: John Stultz <[email protected]>
arch/i386/kernel/timers/timer_tsc.c | 439 ------------------------------------
1 files changed, 439 deletions(-)

Index: mm-merge/arch/i386/kernel/timers/timer_tsc.c
===================================================================
--- mm-merge.orig/arch/i386/kernel/timers/timer_tsc.c
+++ /dev/null
@@ -1,439 +0,0 @@
-/*
- * This code largely moved from arch/i386/kernel/time.c.
- * See comments there for proper credits.
- *
- * 2004-06-25 Jesper Juhl
- * moved mark_offset_tsc below cpufreq_delayed_get to avoid gcc 3.4
- * failing to inline.
- */
-
-#include <linux/spinlock.h>
-#include <linux/init.h>
-#include <linux/timex.h>
-#include <linux/errno.h>
-#include <linux/cpufreq.h>
-#include <linux/string.h>
-#include <linux/jiffies.h>
-
-#include <asm/timer.h>
-#include <asm/io.h>
-/* processor.h for distable_tsc flag */
-#include <asm/processor.h>
-
-#include "io_ports.h"
-#include "mach_timer.h"
-
-#include <asm/hpet.h>
-#include <asm/i8253.h>
-
-#ifdef CONFIG_HPET_TIMER
-static unsigned long hpet_usec_quotient;
-static unsigned long hpet_last;
-static struct timer_opts timer_tsc;
-#endif
-
-static int use_tsc;
-/* Number of usecs that the last interrupt was delayed */
-static int delay_at_last_interrupt;
-
-static unsigned long last_tsc_low; /* lsb 32 bits of Time Stamp Counter */
-static unsigned long last_tsc_high; /* msb 32 bits of Time Stamp Counter */
-static unsigned long long monotonic_base;
-static seqlock_t monotonic_lock = SEQLOCK_UNLOCKED;
-
-/* Avoid compensating for lost ticks before TSCs are synched */
-static int detect_lost_ticks;
-static int __init start_lost_tick_compensation(void)
-{
- detect_lost_ticks = 1;
- return 0;
-}
-late_initcall(start_lost_tick_compensation);
-
-/* convert from cycles(64bits) => nanoseconds (64bits)
- * basic equation:
- * ns = cycles / (freq / ns_per_sec)
- * ns = cycles * (ns_per_sec / freq)
- * ns = cycles * (10^9 / (cpu_khz * 10^3))
- * ns = cycles * (10^6 / cpu_khz)
- *
- * Then we use scaling math (suggested by [email protected]) to get:
- * ns = cycles * (10^6 * SC / cpu_khz) / SC
- * ns = cycles * cyc2ns_scale / SC
- *
- * And since SC is a constant power of two, we can convert the div
- * into a shift.
- *
- * We can use khz divisor instead of mhz to keep a better percision, since
- * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits.
- * ([email protected])
- *
- * [email protected] "math is hard, lets go shopping!"
- */
-static unsigned long cyc2ns_scale;
-#define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */
-
-static inline void set_cyc2ns_scale(unsigned long cpu_khz)
-{
- cyc2ns_scale = (1000000 << CYC2NS_SCALE_FACTOR)/cpu_khz;
-}
-
-static inline unsigned long long cycles_2_ns(unsigned long long cyc)
-{
- return (cyc * cyc2ns_scale) >> CYC2NS_SCALE_FACTOR;
-}
-
-static int count2; /* counter for mark_offset_tsc() */
-
-/* Cached *multiplier* to convert TSC counts to microseconds.
- * (see the equation below).
- * Equal to 2^32 * (1 / (clocks per usec) ).
- * Initialized in time_init.
- */
-static unsigned long fast_gettimeoffset_quotient;
-
-static unsigned long get_offset_tsc(void)
-{
- register unsigned long eax, edx;
-
- /* Read the Time Stamp Counter */
-
- rdtsc(eax,edx);
-
- /* .. relative to previous jiffy (32 bits is enough) */
- eax -= last_tsc_low; /* tsc_low delta */
-
- /*
- * Time offset = (tsc_low delta) * fast_gettimeoffset_quotient
- * = (tsc_low delta) * (usecs_per_clock)
- * = (tsc_low delta) * (usecs_per_jiffy / clocks_per_jiffy)
- *
- * Using a mull instead of a divl saves up to 31 clock cycles
- * in the critical path.
- */
-
- __asm__("mull %2"
- :"=a" (eax), "=d" (edx)
- :"rm" (fast_gettimeoffset_quotient),
- "0" (eax));
-
- /* our adjusted time offset in microseconds */
- return delay_at_last_interrupt + edx;
-}
-
-static unsigned long long monotonic_clock_tsc(void)
-{
- unsigned long long last_offset, this_offset, base;
- unsigned seq;
-
- /* atomically read monotonic base & last_offset */
- do {
- seq = read_seqbegin(&monotonic_lock);
- last_offset = ((unsigned long long)last_tsc_high<<32)|last_tsc_low;
- base = monotonic_base;
- } while (read_seqretry(&monotonic_lock, seq));
-
- /* Read the Time Stamp Counter */
- rdtscll(this_offset);
-
- /* return the value in ns */
- return base + cycles_2_ns(this_offset - last_offset);
-}
-
-static void delay_tsc(unsigned long loops)
-{
- unsigned long bclock, now;
-
- rdtscl(bclock);
- do
- {
- rep_nop();
- rdtscl(now);
- } while ((now-bclock) < loops);
-}
-
-#ifdef CONFIG_HPET_TIMER
-static void mark_offset_tsc_hpet(void)
-{
- unsigned long long this_offset, last_offset;
- unsigned long offset, temp, hpet_current;
-
- write_seqlock(&monotonic_lock);
- last_offset = ((unsigned long long)last_tsc_high<<32)|last_tsc_low;
- /*
- * It is important that these two operations happen almost at
- * the same time. We do the RDTSC stuff first, since it's
- * faster. To avoid any inconsistencies, we need interrupts
- * disabled locally.
- */
- /*
- * Interrupts are just disabled locally since the timer irq
- * has the SA_INTERRUPT flag set. -arca
- */
- /* read Pentium cycle counter */
-
- hpet_current = hpet_readl(HPET_COUNTER);
- rdtsc(last_tsc_low, last_tsc_high);
-
- /* lost tick compensation */
- offset = hpet_readl(HPET_T0_CMP) - hpet_tick;
- if (unlikely(((offset - hpet_last) > hpet_tick) && (hpet_last != 0))
- && detect_lost_ticks) {
- int lost_ticks = (offset - hpet_last) / hpet_tick;
- jiffies_64 += lost_ticks;
- }
- hpet_last = hpet_current;
-
- /* update the monotonic base value */
- this_offset = ((unsigned long long)last_tsc_high<<32)|last_tsc_low;
- monotonic_base += cycles_2_ns(this_offset - last_offset);
- write_sequnlock(&monotonic_lock);
-
- /* calculate delay_at_last_interrupt */
- /*
- * Time offset = (hpet delta) * ( usecs per HPET clock )
- * = (hpet delta) * ( usecs per tick / HPET clocks per tick)
- * = (hpet delta) * ( hpet_usec_quotient ) / (2^32)
- * Where,
- * hpet_usec_quotient = (2^32 * usecs per tick)/HPET clocks per tick
- */
- delay_at_last_interrupt = hpet_current - offset;
- ASM_MUL64_REG(temp, delay_at_last_interrupt,
- hpet_usec_quotient, delay_at_last_interrupt);
-}
-#endif
-
-static void mark_offset_tsc(void)
-{
- unsigned long lost,delay;
- unsigned long delta = last_tsc_low;
- int count;
- int countmp;
- static int count1 = 0;
- unsigned long long this_offset, last_offset;
- static int lost_count = 0;
-
- write_seqlock(&monotonic_lock);
- last_offset = ((unsigned long long)last_tsc_high<<32)|last_tsc_low;
- /*
- * It is important that these two operations happen almost at
- * the same time. We do the RDTSC stuff first, since it's
- * faster. To avoid any inconsistencies, we need interrupts
- * disabled locally.
- */
-
- /*
- * Interrupts are just disabled locally since the timer irq
- * has the SA_INTERRUPT flag set. -arca
- */
-
- /* read Pentium cycle counter */
-
- rdtsc(last_tsc_low, last_tsc_high);
-
- spin_lock(&i8253_lock);
- outb_p(0x00, PIT_MODE); /* latch the count ASAP */
-
- count = inb_p(PIT_CH0); /* read the latched count */
- count |= inb(PIT_CH0) << 8;
-
- /*
- * VIA686a test code... reset the latch if count > max + 1
- * from timer_pit.c - cjb
- */
- if (count > LATCH) {
- outb_p(0x34, PIT_MODE);
- outb_p(LATCH & 0xff, PIT_CH0);
- outb(LATCH >> 8, PIT_CH0);
- count = LATCH - 1;
- }
-
- spin_unlock(&i8253_lock);
-
- if (pit_latch_buggy) {
- /* get center value of last 3 time lutch */
- if ((count2 >= count && count >= count1)
- || (count1 >= count && count >= count2)) {
- count2 = count1; count1 = count;
- } else if ((count1 >= count2 && count2 >= count)
- || (count >= count2 && count2 >= count1)) {
- countmp = count;count = count2;
- count2 = count1;count1 = countmp;
- } else {
- count2 = count1; count1 = count; count = count1;
- }
- }
-
- /* lost tick compensation */
- delta = last_tsc_low - delta;
- {
- register unsigned long eax, edx;
- eax = delta;
- __asm__("mull %2"
- :"=a" (eax), "=d" (edx)
- :"rm" (fast_gettimeoffset_quotient),
- "0" (eax));
- delta = edx;
- }
- delta += delay_at_last_interrupt;
- lost = delta/(1000000/HZ);
- delay = delta%(1000000/HZ);
- if (lost >= 2 && detect_lost_ticks) {
- jiffies_64 += lost-1;
-
- /* sanity check to ensure we're not always losing ticks */
- if (lost_count++ > 100) {
- printk(KERN_WARNING "Losing too many ticks!\n");
- printk(KERN_WARNING "TSC cannot be used as a timesource. \n");
- printk(KERN_WARNING "Possible reasons for this are:\n");
- printk(KERN_WARNING " You're running with Speedstep,\n");
- printk(KERN_WARNING " You don't have DMA enabled for your hard disk (see hdparm),\n");
- printk(KERN_WARNING " Incorrect TSC synchronization on an SMP system (see dmesg).\n");
- printk(KERN_WARNING "Falling back to a sane timesource now.\n");
-
- clock_fallback();
- }
- } else
- lost_count = 0;
- /* update the monotonic base value */
- this_offset = ((unsigned long long)last_tsc_high<<32)|last_tsc_low;
- monotonic_base += cycles_2_ns(this_offset - last_offset);
- write_sequnlock(&monotonic_lock);
-
- /* calculate delay_at_last_interrupt */
- count = ((LATCH-1) - count) * TICK_SIZE;
- delay_at_last_interrupt = (count + LATCH/2) / LATCH;
-
- /* catch corner case where tick rollover occured
- * between tsc and pit reads (as noted when
- * usec delta is > 90% # of usecs/tick)
- */
- if (lost && abs(delay - delay_at_last_interrupt) > (900000/HZ))
- jiffies_64++;
-}
-
-static int __init init_tsc(char* override)
-{
-
- /* check clock override */
- if (override[0] && strncmp(override,"tsc",3)) {
-#ifdef CONFIG_HPET_TIMER
- if (is_hpet_enabled()) {
- printk(KERN_ERR "Warning: clock= override failed. Defaulting to tsc\n");
- } else
-#endif
- {
- return -ENODEV;
- }
- }
-
- /*
- * If we have APM enabled or the CPU clock speed is variable
- * (CPU stops clock on HLT or slows clock to save power)
- * then the TSC timestamps may diverge by up to 1 jiffy from
- * 'real time' but nothing will break.
- * The most frequent case is that the CPU is "woken" from a halt
- * state by the timer interrupt itself, so we get 0 error. In the
- * rare cases where a driver would "wake" the CPU and request a
- * timestamp, the maximum error is < 1 jiffy. But timestamps are
- * still perfectly ordered.
- * Note that the TSC counter will be reset if APM suspends
- * to disk; this won't break the kernel, though, 'cuz we're
- * smart. See arch/i386/kernel/apm.c.
- */
- /*
- * Firstly we have to do a CPU check for chips with
- * a potentially buggy TSC. At this point we haven't run
- * the ident/bugs checks so we must run this hook as it
- * may turn off the TSC flag.
- *
- * NOTE: this doesn't yet handle SMP 486 machines where only
- * some CPU's have a TSC. Thats never worked and nobody has
- * moaned if you have the only one in the world - you fix it!
- */
-
- count2 = LATCH; /* initialize counter for mark_offset_tsc() */
-
- if (cpu_has_tsc) {
- unsigned long tsc_quotient;
-#ifdef CONFIG_HPET_TIMER
- if (is_hpet_enabled() && hpet_use_timer) {
- unsigned long result, remain;
- printk("Using TSC for gettimeofday\n");
- tsc_quotient = calibrate_tsc_hpet(NULL);
- timer_tsc.mark_offset = &mark_offset_tsc_hpet;
- /*
- * Math to calculate hpet to usec multiplier
- * Look for the comments at get_offset_tsc_hpet()
- */
- ASM_DIV64_REG(result, remain, hpet_tick,
- 0, KERNEL_TICK_USEC);
- if (remain > (hpet_tick >> 1))
- result++; /* rounding the result */
-
- hpet_usec_quotient = result;
- } else
-#endif
- {
- tsc_quotient = calibrate_tsc();
- }
-
- if (tsc_quotient) {
- fast_gettimeoffset_quotient = tsc_quotient;
- use_tsc = 1;
- /*
- * We could be more selective here I suspect
- * and just enable this for the next intel chips ?
- */
- /* report CPU clock rate in Hz.
- * The formula is (10^6 * 2^32) / (2^32 * 1 / (clocks/us)) =
- * clock/second. Our precision is about 100 ppm.
- */
- { unsigned long eax=0, edx=1000;
- __asm__("divl %2"
- :"=a" (cpu_khz), "=d" (edx)
- :"r" (tsc_quotient),
- "0" (eax), "1" (edx));
- printk("Detected %u.%03u MHz processor.\n",
- cpu_khz / 1000, cpu_khz % 1000);
- }
- set_cyc2ns_scale(cpu_khz);
- return 0;
- }
- }
- return -ENODEV;
-}
-
-static int tsc_resume(void)
-{
- write_seqlock(&monotonic_lock);
- /* Assume this is the last mark offset time */
- rdtsc(last_tsc_low, last_tsc_high);
-#ifdef CONFIG_HPET_TIMER
- if (is_hpet_enabled() && hpet_use_timer)
- hpet_last = hpet_readl(HPET_COUNTER);
-#endif
- write_sequnlock(&monotonic_lock);
- return 0;
-}
-
-
-
-
-/************************************************************/
-
-/* tsc timer_opts struct */
-static struct timer_opts timer_tsc = {
- .name = "tsc",
- .mark_offset = mark_offset_tsc,
- .get_offset = get_offset_tsc,
- .monotonic_clock = monotonic_clock_tsc,
- .delay = delay_tsc,
- .read_timer = read_timer_tsc,
- .resume = tsc_resume,
-};
-
-struct init_timer_opts __initdata timer_tsc_init = {
- .init = init_tsc,
- .opts = &timer_tsc,
-};

2006-02-21 06:22:57

by john stultz

[permalink] [raw]
Subject: [-mm PATCH 4/11] Time: clocksource infrastructure - remove nsec_t

Removes nsec_t usage as suggested by Roman Zippel
Also moves the cycle_t definition to clocksource.h

Signed-off-by: John Stultz <[email protected]>

include/linux/clocksource.h | 21 ++++++++++++---------
1 files changed, 12 insertions(+), 9 deletions(-)

Index: mm-merge/include/linux/clocksource.h
===================================================================
--- mm-merge.orig/include/linux/clocksource.h
+++ mm-merge/include/linux/clocksource.h
@@ -15,6 +15,9 @@
#include <asm/div64.h>
#include <asm/io.h>

+/* clocksource cycle base type */
+typedef u64 cycle_t;
+
/**
* struct clocksource - hardware abstraction for a free running counter
* Provides mostly state-free accessors to the underlying hardware.
@@ -169,14 +172,14 @@ static inline int ppm_to_mult_adj(struct
*
* XXX - This could use some mult_lxl_ll() asm optimization
*/
-static inline nsec_t cyc2ns(struct clocksource *cs, int ntp_adj, cycle_t cycles)
+static inline s64 cyc2ns(struct clocksource *cs, int ntp_adj, cycle_t cycles)
{
- u64 ret = (u64)cycles;
+ u64 ret = cycles;

ret *= (cs->mult + ntp_adj);
ret >>= cs->shift;

- return (nsec_t)ret;
+ return ret;
}

/**
@@ -192,10 +195,10 @@ static inline nsec_t cyc2ns(struct clock
*
* XXX - This could use some mult_lxl_ll() asm optimization.
*/
-static inline nsec_t cyc2ns_rem(struct clocksource *cs, int ntp_adj,
+static inline s64 cyc2ns_rem(struct clocksource *cs, int ntp_adj,
cycle_t cycles, u64* rem)
{
- u64 ret = (u64)cycles;
+ u64 ret = cycles;

ret *= (cs->mult + ntp_adj);
if (rem) {
@@ -204,7 +207,7 @@ static inline nsec_t cyc2ns_rem(struct c
}
ret >>= cs->shift;

- return (nsec_t)ret;
+ return ret;
}


@@ -224,7 +227,7 @@ static inline nsec_t cyc2ns_rem(struct c
*/
struct clocksource_interval {
cycle_t cycles;
- nsec_t nsecs;
+ s64 nsecs;
u64 remainder;
u64 remainder_ns_overflow;
};
@@ -278,10 +281,10 @@ calculate_clocksource_interval(struct cl
*
* Unless you're the timeofday_periodic_hook, you should not be using this!
*/
-static inline nsec_t cyc2ns_fixed_rem(struct clocksource_interval interval,
+static inline s64 cyc2ns_fixed_rem(struct clocksource_interval interval,
cycle_t *cycles, u64* rem)
{
- nsec_t delta_nsec = 0;
+ s64 delta_nsec = 0;

while (*cycles > interval.cycles) {
delta_nsec += interval.nsecs;

2006-02-21 06:22:20

by john stultz

[permalink] [raw]
Subject: [-mm PATCH 2/11] Time: reduced ntp rework part 2 (updated)

This patch replaces time-reduced-ntp-rework-part2.patch in 2.6.16-rc4-mm1.

Change the interrupt time NTP code, breaking out the leapsecond processing and
introduces an accessor to a shifted ppm adjustment value so they can be
re-used by the generic timekeeping infrastructure.

For correctness, I've also introduced a new lock, the ntp_lock, which protects
the NTP state machine when accessing it from the generic timekeeping code.
Previously the xtime_lock is used to protect the NTP state variables, but
since the generic timekeeping code does not utilize that lock, the new lock is
necessary.

This should not affect the existing behavior, but just separate the logical
functionality so it can be re-used by the generic timekeeping infrastructure.

Signed-off-by: John Stultz <[email protected]>
Acked-by: Ingo Molnar <[email protected]>
Signed-off-by: Adrian Bunk <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: John Stultz <[email protected]>
Index: mm-merge/include/linux/timex.h
===================================================================
--- mm-merge.orig/include/linux/timex.h
+++ mm-merge/include/linux/timex.h
@@ -260,6 +260,8 @@ extern long pps_calcnt; /* calibration
extern long pps_errcnt; /* calibration errors */
extern long pps_stbcnt; /* stability limit exceeded */

+extern seqlock_t ntp_lock;
+
/**
* ntp_clear - Clears the NTP state variables
*
@@ -267,21 +269,40 @@ extern long pps_stbcnt; /* stability li
*/
static inline void ntp_clear(void)
{
+ unsigned long flags;
+
+ write_seqlock_irqsave(&ntp_lock, flags);
time_adjust = 0; /* stop active adjtime() */
time_status |= STA_UNSYNC;
time_maxerror = NTP_PHASE_LIMIT;
time_esterror = NTP_PHASE_LIMIT;
+ write_sequnlock_irqrestore(&ntp_lock, flags);
}

/**
* ntp_synced - Returns 1 if the NTP status is not UNSYNC
- *
*/
static inline int ntp_synced(void)
{
return !(time_status & STA_UNSYNC);
}

+/**
+ * ntp_get_ppm_adjustment - Returns Shifted PPM adjustment
+ */
+extern long ntp_get_ppm_adjustment(void);
+
+/**
+ * ntp_advance - Advances the NTP state machine by interval_ns
+ */
+extern void ntp_advance(unsigned long interval_ns);
+
+/**
+ * ntp_leapsecond - NTP leapsecond processing code.
+ */
+extern int ntp_leapsecond(struct timespec now);
+
+
/* Required to safely shift negative values */
#define shift_right(x, s) ({ \
__typeof__(x) __x = (x); \
Index: mm-merge/kernel/time.c
===================================================================
--- mm-merge.orig/kernel/time.c
+++ mm-merge/kernel/time.c
@@ -259,6 +259,8 @@ int do_adjtimex(struct timex *txc)
return -EINVAL;

write_seqlock_irq(&xtime_lock);
+ write_seqlock(&ntp_lock);
+
result = time_state; /* mostly `TIME_OK' */

/* Save for later - semantics of adjtime is to return old value */
@@ -396,6 +398,7 @@ leave: if ((time_status & (STA_UNSYNC|ST
txc->calcnt = pps_calcnt;
txc->errcnt = pps_errcnt;
txc->stbcnt = pps_stbcnt;
+ write_sequnlock(&ntp_lock);
write_sequnlock_irq(&xtime_lock);
do_gettimeofday(&txc->time);
notify_arch_cmos_timer();
@@ -513,10 +516,7 @@ int do_settimeofday (struct timespec *tv
set_normalized_timespec(&xtime, sec, nsec);
set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);

- time_adjust = 0; /* stop active adjtime() */
- time_status |= STA_UNSYNC;
- time_maxerror = NTP_PHASE_LIMIT;
- time_esterror = NTP_PHASE_LIMIT;
+ ntp_clear();
time_interpolator_reset();
}
write_sequnlock_irq(&xtime_lock);
Index: mm-merge/kernel/timer.c
===================================================================
--- mm-merge.orig/kernel/timer.c
+++ mm-merge/kernel/timer.c
@@ -583,16 +583,99 @@ long time_tolerance = MAXFREQ; /* frequ
long time_precision = 1; /* clock precision (us) */
long time_maxerror = NTP_PHASE_LIMIT; /* maximum error (us) */
long time_esterror = NTP_PHASE_LIMIT; /* estimated error (us) */
-static long time_phase; /* phase offset (scaled us) */
long time_freq = (((NSEC_PER_SEC + HZ/2) % HZ - HZ/2) << SHIFT_USEC) / NSEC_PER_USEC;
/* frequency offset (scaled ppm)*/
static long time_adj; /* tick adjust (scaled 1 / HZ) */
long time_reftime; /* time at last adjustment (s) */
long time_adjust;
long time_next_adjust;
+static long time_adjust_step; /* per tick time_adjust step */
+
+static long total_sppm; /* shifted ppm sum of all adjustments */
+static long offset_adj_ppm;
+static long tick_adj_ppm;
+static long singleshot_adj_ppm;
+
+#define MAX_SINGLESHOT_ADJ 500 /* (ppm) */
+#define SEC_PER_DAY 86400
+#define END_OF_DAY(x) ((x) + SEC_PER_DAY - ((x) % SEC_PER_DAY) - 1)
+
+/* NTP lock, protects NTP state machine */
+seqlock_t ntp_lock = SEQLOCK_UNLOCKED;
+
+/**
+ * ntp_leapsecond - NTP leapsecond processing code.
+ * now: the current time
+ *
+ * Returns the number of seconds (-1, 0, or 1) that
+ * should be added to the current time to properly
+ * adjust for leapseconds.
+ */
+int ntp_leapsecond(struct timespec now)
+{
+ /*
+ * Leap second processing. If in leap-insert state at
+ * the end of the day, the system clock is set back one
+ * second; if in leap-delete state, the system clock is
+ * set ahead one second.
+ */
+ static time_t leaptime = 0;
+
+ unsigned long flags;
+ int ret = 0;
+
+ write_seqlock_irqsave(&ntp_lock, flags);
+
+ switch (time_state) {
+
+ case TIME_OK:
+ if (time_status & STA_INS) {
+ time_state = TIME_INS;
+ leaptime = END_OF_DAY(now.tv_sec);
+ } else if (time_status & STA_DEL) {
+ time_state = TIME_DEL;
+ leaptime = END_OF_DAY(now.tv_sec);
+ }
+ break;
+
+ case TIME_INS:
+ /* Once we are at (or past) leaptime, insert the second */
+ if (now.tv_sec >= leaptime) {
+ time_state = TIME_OOP;
+ printk(KERN_NOTICE "Clock: inserting leap second 23:59:60 UTC\n");
+ ret = -1;
+ }
+ break;
+
+ case TIME_DEL:
+ /* Once we are at (or past) leaptime, delete the second */
+ if (now.tv_sec >= leaptime) {
+ time_state = TIME_WAIT;
+ printk(KERN_NOTICE "Clock: deleting leap second 23:59:59 UTC\n");
+ ret = 1;
+ }
+ break;
+
+ case TIME_OOP:
+ /* Wait for the end of the leap second */
+ if (now.tv_sec > (leaptime + 1))
+ time_state = TIME_WAIT;
+ time_state = TIME_WAIT;
+ break;
+
+ case TIME_WAIT:
+ if (!(time_status & (STA_INS | STA_DEL)))
+ time_state = TIME_OK;
+ break;
+ }
+
+ write_sequnlock_irqrestore(&ntp_lock, flags);
+
+ return ret;
+}

/*
- * this routine handles the overflow of the microsecond field
+ * this routine handles the overflow of the nanosecond field
*
* The tricky bits of code to handle the accurate clock support
* were provided by Dave Mills ([email protected]) of NTP fame.
@@ -678,6 +761,13 @@ static void second_overflow(void)
time_offset -= ltemp;
time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);

+ offset_adj_ppm = shift_right(ltemp, SHIFT_UPDATE); /* ppm */
+
+ /* first calculate usec/user_tick offset: */
+ tick_adj_ppm = ((USEC_PER_SEC + USER_HZ/2)/USER_HZ) - tick_usec;
+ /* multiply by user_hz to get usec/sec => ppm: */
+ tick_adj_ppm *= USER_HZ;
+
/*
* Compute the frequency estimate and additional phase adjustment due
* to frequency error for the next second. When the PPS signal is
@@ -742,15 +832,25 @@ static long adjtime_adjustment(void)
}

/**
+ * ntp_get_ppm_adjustment - return shifted PPM adjustment
+ */
+long ntp_get_ppm_adjustment(void)
+{
+ return total_sppm;
+}
+
+/**
* ntp_advance - increments the NTP state machine
* @interval_ns: interval, in nanoseconds
- *
- * Must be holding the xtime writelock when calling.
*/
-static void ntp_advance(unsigned long interval_ns)
+void ntp_advance(unsigned long interval_ns)
{
static unsigned long interval_sum;

+ unsigned long flags;
+
+ write_seqlock_irqsave(&ntp_lock, flags);
+
/* increment the interval sum: */
interval_sum += interval_ns;

@@ -759,6 +859,8 @@ static void ntp_advance(unsigned long in
time_adjust -= adjtime_adjustment();
interval_ns -= tick_nsec;
}
+ /* usec/tick => ppm: */
+ singleshot_adj_ppm = time_adjust_step*(1000000/HZ);

/* Changes by adjtime() do not take effect till next tick. */
if (time_next_adjust != 0) {
@@ -770,6 +872,14 @@ static void ntp_advance(unsigned long in
interval_sum -= NSEC_PER_SEC;
second_overflow();
}
+
+ /* calculate the total continuous ppm adjustment: */
+ total_sppm = time_freq; /* already shifted by SHIFT_USEC */
+ total_sppm += offset_adj_ppm << SHIFT_USEC;
+ total_sppm += tick_adj_ppm << SHIFT_USEC;
+ total_sppm += singleshot_adj_ppm << SHIFT_USEC;
+
+ write_sequnlock_irqrestore(&ntp_lock, flags);
}


@@ -780,6 +890,8 @@ static void ntp_advance(unsigned long in
*/
static inline long phase_advance(void)
{
+ static long time_phase; /* phase offset (scaled us) */
+
long delta = 0;

time_phase += time_adj;
@@ -798,12 +910,28 @@ static inline long phase_advance(void)
*/
static inline void xtime_advance(long delta_nsec)
{
+ int leapsecond;
+
xtime.tv_nsec += delta_nsec;
if (likely(xtime.tv_nsec < NSEC_PER_SEC))
return;

xtime.tv_nsec -= NSEC_PER_SEC;
xtime.tv_sec++;
+
+ /* process leapsecond: */
+ leapsecond = ntp_leapsecond(xtime);
+ if (likely(!leapsecond))
+ return;
+
+ xtime.tv_sec += leapsecond;
+ wall_to_monotonic.tv_sec -= leapsecond;
+ /*
+ * Use of time interpolator for a gradual
+ * change of time:
+ */
+ time_interpolator_update(leapsecond*NSEC_PER_SEC);
+ clock_was_set();
}

/*

2006-02-21 06:24:43

by john stultz

[permalink] [raw]
Subject: [-mm PATCH 9/11] Time: i386 conversion part 3 - backout pmtmr changes

Backout pmtmr_ioport changes to avoid conflicting w/ x86-64

Signed-off-by: John Stultz <[email protected]>

arch/i386/kernel/acpi/boot.c | 2 +-
arch/i386/kernel/time.c | 4 ++--
2 files changed, 3 insertions(+), 3 deletions(-)

Index: mm-merge/arch/i386/kernel/acpi/boot.c
===================================================================
--- mm-merge.orig/arch/i386/kernel/acpi/boot.c
+++ mm-merge/arch/i386/kernel/acpi/boot.c
@@ -616,7 +616,7 @@ static int __init acpi_parse_hpet(unsign
#endif

#ifdef CONFIG_X86_PM_TIMER
-u32 pmtmr_ioport;
+extern u32 pmtmr_ioport;
#endif

static int __init acpi_parse_fadt(unsigned long phys, unsigned long size)

2006-02-21 06:22:58

by john stultz

[permalink] [raw]
Subject: [-mm PATCH 5/11] Time: generic timekeeping infrastructure - remove nsec_t

Removes nsec_t as suggested by Roman Zippel
Also removes cycle_t addition from timekeeping-infrastructure patch

Signed-off-by: John Stultz <[email protected]>


Index: mm-merge/kernel/time/timeofday.c
===================================================================
--- mm-merge.orig/kernel/time/timeofday.c
+++ mm-merge/kernel/time/timeofday.c
@@ -114,7 +114,7 @@ static enum {
TIME_SUSPENDED
} time_suspend_state = TIME_RUNNING;

-static nsec_t suspend_start;
+static s64 suspend_start;

/* [Soft-Timers]
* timeofday_timer:
@@ -155,10 +155,10 @@ static void update_legacy_time_values(vo
* called. Returns the number of nanoseconds since the
* last call to timeofday_periodic_hook() (adjusted by NTP scaling)
*/
-static inline nsec_t __get_nsec_offset(void)
+static inline s64 __get_nsec_offset(void)
{
cycle_t cycle_now, cycle_delta;
- nsec_t ns_offset;
+ s64 ns_offset;

/* read clocksource: */
cycle_now = read_clocksource(clock);
@@ -187,7 +187,7 @@ static inline nsec_t __get_nsec_offset(v
*/
static ktime_t __get_monotonic_clock(void)
{
- nsec_t offset = __get_nsec_offset();
+ s64 offset = __get_nsec_offset();
return ktime_add_ns(system_time, offset);
}

@@ -272,7 +272,7 @@ ktime_t get_realtime_offset(void)
void get_monotonic_clock_ts(struct timespec *ts)
{
unsigned long seq;
- nsec_t offset;
+ s64 offset;

do {
seq = read_seqbegin(&system_time_lock);
@@ -294,7 +294,7 @@ void get_monotonic_clock_ts(struct times
static inline void __get_realtime_clock_ts(struct timespec *ts)
{
unsigned long seq;
- nsec_t nsecs;
+ s64 nsecs;

do {
seq = read_seqbegin(&system_time_lock);
@@ -432,7 +439,7 @@ static int timeofday_suspend_hook(struct
*/
static int timeofday_resume_hook(struct sys_device *dev)
{
- nsec_t suspend_end, suspend_time;
+ s64 suspend_end, suspend_time;
unsigned long flags;

write_seqlock_irqsave(&system_time_lock, flags);
@@ -506,7 +513,7 @@ static void timeofday_periodic_hook(unsi
unsigned long flags;

cycle_t cycle_now, cycle_delta;
- nsec_t delta_nsec;
+ s64 delta_nsec;
static u64 remainder;

long leapsecond = 0;
@@ -517,7 +524,7 @@ static void timeofday_periodic_hook(unsi

int something_changed = 0;
struct clocksource old_clock;
- static nsec_t second_check;
+ static s64 second_check;

write_seqlock_irqsave(&system_time_lock, flags);

Index: mm-merge/include/linux/time.h
===================================================================
--- mm-merge.orig/include/linux/time.h
+++ mm-merge/include/linux/time.h
@@ -27,9 +27,6 @@ struct timezone {

#ifdef __KERNEL__

-/* timeofday base types */
-typedef u64 cycle_t;
-
/* Parameters used to convert the timespec values: */
#define MSEC_PER_SEC 1000L
#define USEC_PER_SEC 1000000L
@@ -152,9 +149,9 @@ extern struct timeval ns_to_timeval(cons
/**
* timespec_add_ns - Adds nanoseconds to a timespec
* @a: pointer to timespec to be incremented
- * @ns: the nanoseconds value to be added
+ * @ns: unsigned nanoseconds value to be added
*/
-static inline void timespec_add_ns(struct timespec *a, nsec_t ns)
+static inline void timespec_add_ns(struct timespec *a, u64 ns)
{
ns += a->tv_nsec;
while(unlikely(ns >= NSEC_PER_SEC)) {
Index: mm-merge/include/asm-generic/timeofday.h
===================================================================
--- mm-merge.orig/include/asm-generic/timeofday.h
+++ mm-merge/include/asm-generic/timeofday.h
@@ -15,7 +15,7 @@

#ifdef CONFIG_GENERIC_TIME
/* Required externs */
-extern nsec_t read_persistent_clock(void);
+extern s64 read_persistent_clock(void);
extern void sync_persistent_clock(struct timespec ts);

#ifdef CONFIG_GENERIC_TIME_VSYSCALL
Index: mm-merge/Documentation/timekeeping.txt
===================================================================
--- mm-merge.orig/Documentation/timekeeping.txt
+++ mm-merge/Documentation/timekeeping.txt
@@ -159,7 +159,7 @@ Porting an arch usually requires the fol

1. Define CONFIG_GENERIC_TIME in the arches Kconfig
2. Implementing the following functions
- nsec_t read_persistent_clock(void)
+ s64 read_persistent_clock(void)
void sync_persistent_clock(struct timespec ts)
3. Removing all of the arch specific timekeeping code
do_gettimeofday()

2006-02-21 06:22:21

by john stultz

[permalink] [raw]
Subject: [-mm PATCH 8/11] Time: i386 conversion part 3 - remove nsec_t

Removes nsec_t usage as suggested by Roman Zippel

Signed-off-by: John Stultz <[email protected]>


arch/i386/kernel/time.c | 4 ++--
1 files changed, 2 insertions(+), 2 deletions(-)

Index: mm-merge/arch/i386/kernel/time.c
===================================================================
--- mm-merge.orig/arch/i386/kernel/time.c
+++ mm-merge/arch/i386/kernel/time.c
@@ -223,9 +223,9 @@ unsigned long get_cmos_time(void)
EXPORT_SYMBOL(get_cmos_time);

/* arch specific timeofday hooks */
-nsec_t read_persistent_clock(void)
+s64 read_persistent_clock(void)
{
- return (nsec_t)get_cmos_time() * NSEC_PER_SEC;
+ return (s64)get_cmos_time() * NSEC_PER_SEC;
}

void sync_persistent_clock(struct timespec ts)

2006-02-21 06:22:20

by john stultz

[permalink] [raw]
Subject: [-mm PATCH 7/11] Time: generic timekeeping infrastructure - wall_offset helper cleanup

Cleans up some of the wall_time offset manipulations
with a __set_wall_time_offset() helper.

Also fixes some whitespaces.

Signed-off-by: John Stultz <[email protected]>

Documentation/timekeeping.txt | 2
include/asm-generic/timeofday.h | 2
include/linux/time.h | 7 --
kernel/time/clocksource.c | 1
kernel/time/timeofday.c | 97 +++++++++++++++++++---------------------
kernel/timer.c | 1
6 files changed, 53 insertions(+), 57 deletions(-)

Index: mm-merge/kernel/timer.c
===================================================================
--- mm-merge.orig/kernel/timer.c
+++ mm-merge/kernel/timer.c
@@ -882,7 +882,6 @@ void ntp_advance(unsigned long interval_
write_sequnlock_irqrestore(&ntp_lock, flags);
}

-
#ifdef CONFIG_GENERIC_TIME
# define update_wall_time(x) do { } while (0)
#else
Index: mm-merge/kernel/time/clocksource.c
===================================================================
--- mm-merge.orig/kernel/time/clocksource.c
+++ mm-merge/kernel/time/clocksource.c
@@ -155,6 +155,7 @@ int register_clocksource(struct clocksou
spin_unlock_irqrestore(&clocksource_lock, flags);
return ret;
}
+
EXPORT_SYMBOL(register_clocksource);

/**
Index: mm-merge/kernel/time/timeofday.c
===================================================================
--- mm-merge.orig/kernel/time/timeofday.c
+++ mm-merge/kernel/time/timeofday.c
@@ -338,6 +338,35 @@ void do_gettimeofday(struct timeval *tv)
EXPORT_SYMBOL(do_gettimeofday);

/**
+ * __increment_system_time - Increments system time
+ * @delta: nanosecond delta to add to the time variables
+ *
+ * Private helper that increments system_time and related
+ * timekeeping variables.
+ */
+static void __increment_system_time(s64 delta)
+{
+ system_time = ktime_add_ns(system_time, delta);
+ timespec_add_ns(&wall_time_ts, delta);
+ timespec_add_ns(&mono_time_ts, delta);
+}
+
+/**
+ * __set_wall_time_offset - Sets the wall time offset
+ * @delta: nanosecond delta to adjust to the time variables
+ *
+ * Private helper that adjusts wall_time_offset and related
+ * timekeeping variables.
+ */
+static void __set_wall_time_offset(ktime_t val)
+{
+ wall_time_offset = val;
+ wall_time_ts = ktime_to_timespec(ktime_add(system_time,
+ wall_time_offset));
+ monotonic_time_offset_ts = ktime_to_timespec(wall_time_offset);
+}
+
+/**
* do_settimeofday - Sets the time of day
* @tv: pointer to the timespec variable containing the new time
*
@@ -356,12 +385,7 @@ int do_settimeofday(struct timespec *tv)
write_seqlock_irqsave(&system_time_lock, flags);

/* calculate the new offset from the monotonic clock */
- wall_time_offset = ktime_sub(newtime, __get_monotonic_clock());
-
- /* update the internal timespec variables */
- wall_time_ts = ktime_to_timespec(ktime_add(system_time,
- wall_time_offset));
- monotonic_time_offset_ts = ktime_to_timespec(wall_time_offset);
+ __set_wall_time_offset(ktime_sub(newtime, __get_monotonic_clock()));

ntp_clear();
update_legacy_time_values();
@@ -377,20 +401,6 @@ int do_settimeofday(struct timespec *tv)
EXPORT_SYMBOL(do_settimeofday);

/**
- * __increment_system_time - Increments system time
- * @delta: nanosecond delta to add to the time variables
- *
- * Private helper that increments system_time and related
- * timekeeping variables.
- */
-static void __increment_system_time(nsec_t delta)
-{
- system_time = ktime_add_ns(system_time, delta);
- timespec_add_ns(&wall_time_ts, delta);
- timespec_add_ns(&mono_time_ts, delta);
-}
-
-/**
* timeofday_suspend_hook - allows the timeofday subsystem to be shutdown
* @dev: unused
* @state: unused
@@ -539,12 +549,9 @@ static void timeofday_periodic_hook(unsi
if (second_check >= NSEC_PER_SEC) {
/* do ntp leap second processing: */
leapsecond = ntp_leapsecond(wall_time_ts);
- if (leapsecond) {
- wall_time_offset = ktime_add_ns(wall_time_offset,
- leapsecond * NSEC_PER_SEC);
- wall_time_ts.tv_sec += leapsecond;
- monotonic_time_offset_ts.tv_sec += leapsecond;
- }
+ if (leapsecond)
+ __set_wall_time_offset(ktime_add_ns(wall_time_offset,
+ leapsecond * NSEC_PER_SEC));
second_check -= NSEC_PER_SEC;
}
/* sync the persistent clock: */
@@ -662,13 +669,8 @@ void __init timeofday_init(void)

/* initialize wall_time_offset to now: */
/* XXX - this should be something like ns_to_ktime() */
- wall_time_offset = ktime_add_ns(wall_time_offset,
- read_persistent_clock());
-
- /* initialize timespec values: */
- wall_time_ts = ktime_to_timespec(ktime_add(system_time,
- wall_time_offset));
- monotonic_time_offset_ts = ktime_to_timespec(wall_time_offset);
+ __set_wall_time_offset(ktime_add_ns(wall_time_offset,
+ read_persistent_clock()));

/* clear NTP scaling factor & state machine: */
ntp_adj = 0;