2007-05-22 15:20:00

by Jeremy Fitzhardinge

[permalink] [raw]
Subject: [patch 14/33] xen: xen time implementation

Xen maintains a base clock which measures nanoseconds since system
boot. This is provided to guests via a shared page which contains a
base time in ns, a tsc timestamp at that point and tsc frequency
parameters. Guests can compute the current time by reading the tsc
and using it to extrapolate the current time from the basetime. The
hypervisor makes sure that the frequency parameters are updated
regularly, paricularly if the tsc changes rate or stops.

This is implemented as a clocksource, so the interface to the rest of
the kernel is a simple clocksource which simply returns the current
time directly in nanoseconds.

Xen also provides a simple timer mechanism, which allows a timeout to
be set in the future. When that time arrives, a timer event is sent
to the guest. There are two timer interfaces:
- An old one which also delivers a stream of (unused) ticks at 100Hz,
and on the same event, the actual timer events. The 100Hz ticks
cause a lot of spurious wakeups, but are basically harmless.
- The new timer interface doesn't have the 100Hz ticks, and can also
fail if the specified time is in the past.

This code presents the Xen timer as a clockevent driver, and uses the
new interface by preference.

Signed-off-by: Jeremy Fitzhardinge <[email protected]>
Signed-off-by: Chris Wright <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: Thomas Gleixner <[email protected]>

---
arch/i386/xen/Makefile | 2
arch/i386/xen/enlighten.c | 6
arch/i386/xen/time.c | 402 +++++++++++++++++++++++++++++++++++++++++++++
3 files changed, 409 insertions(+), 1 deletion(-)

===================================================================
--- a/arch/i386/xen/Makefile
+++ b/arch/i386/xen/Makefile
@@ -1,2 +1,2 @@ obj-y := enlighten.o setup.o features.o
obj-y := enlighten.o setup.o features.o multicalls.o mmu.o \
- events.o
+ events.o time.o
===================================================================
--- a/arch/i386/xen/enlighten.c
+++ b/arch/i386/xen/enlighten.c
@@ -595,6 +595,12 @@ static const struct paravirt_ops xen_par
.arch_setup = xen_arch_setup,
.init_IRQ = xen_init_IRQ,

+ .time_init = xen_time_init,
+ .set_wallclock = xen_set_wallclock,
+ .get_wallclock = xen_get_wallclock,
+ .get_cpu_khz = xen_cpu_khz,
+ .sched_clock = xen_clocksource_read,
+
.cpuid = xen_cpuid,

.set_debugreg = xen_set_debugreg,
===================================================================
--- /dev/null
+++ b/arch/i386/xen/time.c
@@ -0,0 +1,402 @@
+/*
+ * Xen time implementation.
+ *
+ * This is implemented in terms of a clocksource driver which uses
+ * the hypervisor clock as a nanosecond timebase, and a clockevent
+ * driver which uses the hypervisor's timer mechanism.
+ *
+ * Jeremy Fitzhardinge <[email protected]>, XenSource Inc, 2007
+ */
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/clocksource.h>
+#include <linux/clockchips.h>
+
+#include <asm/xen/hypervisor.h>
+#include <asm/xen/hypercall.h>
+
+#include <xen/events.h>
+#include <xen/interface/xen.h>
+#include <xen/interface/vcpu.h>
+
+#include "xen-ops.h"
+
+#define XEN_SHIFT 22
+#define TIMER_SLOP 100000 /* Xen may fire a timer up to this many ns early */
+
+/* These are perodically updated in shared_info, and then copied here. */
+struct shadow_time_info {
+ u64 tsc_timestamp; /* TSC at last update of time vals. */
+ u64 system_timestamp; /* Time, in nanosecs, since boot. */
+ u32 tsc_to_nsec_mul;
+ int tsc_shift;
+ u32 version;
+};
+
+static DEFINE_PER_CPU(struct shadow_time_info, shadow_time);
+
+unsigned long xen_cpu_khz(void)
+{
+ u64 cpu_khz = 1000000ULL << 32;
+ const struct vcpu_time_info *info =
+ &HYPERVISOR_shared_info->vcpu_info[0].time;
+
+ do_div(cpu_khz, info->tsc_to_system_mul);
+ if (info->tsc_shift < 0)
+ cpu_khz <<= -info->tsc_shift;
+ else
+ cpu_khz >>= info->tsc_shift;
+
+ return cpu_khz;
+}
+
+/*
+ * Reads a consistent set of time-base values from Xen, into a shadow data
+ * area.
+ */
+static void get_time_values_from_xen(void)
+{
+ struct vcpu_time_info *src;
+ struct shadow_time_info *dst;
+
+ preempt_disable();
+
+ src = &__get_cpu_var(xen_vcpu)->time;
+ dst = &__get_cpu_var(shadow_time);
+
+ do {
+ dst->version = src->version;
+ rmb();
+ dst->tsc_timestamp = src->tsc_timestamp;
+ dst->system_timestamp = src->system_time;
+ dst->tsc_to_nsec_mul = src->tsc_to_system_mul;
+ dst->tsc_shift = src->tsc_shift;
+ rmb();
+ } while ((src->version & 1) | (dst->version ^ src->version));
+
+ preempt_enable();
+}
+
+/*
+ * Scale a 64-bit delta by scaling and multiplying by a 32-bit fraction,
+ * yielding a 64-bit result.
+ */
+static inline u64 scale_delta(u64 delta, u32 mul_frac, int shift)
+{
+ u64 product;
+#ifdef __i386__
+ u32 tmp1, tmp2;
+#endif
+
+ if (shift < 0)
+ delta >>= -shift;
+ else
+ delta <<= shift;
+
+#ifdef __i386__
+ __asm__ (
+ "mul %5 ; "
+ "mov %4,%%eax ; "
+ "mov %%edx,%4 ; "
+ "mul %5 ; "
+ "xor %5,%5 ; "
+ "add %4,%%eax ; "
+ "adc %5,%%edx ; "
+ : "=A" (product), "=r" (tmp1), "=r" (tmp2)
+ : "a" ((u32)delta), "1" ((u32)(delta >> 32)), "2" (mul_frac) );
+#elif __x86_64__
+ __asm__ (
+ "mul %%rdx ; shrd $32,%%rdx,%%rax"
+ : "=a" (product) : "0" (delta), "d" ((u64)mul_frac) );
+#else
+#error implement me!
+#endif
+
+ return product;
+}
+
+static u64 get_nsec_offset(struct shadow_time_info *shadow)
+{
+ u64 now, delta;
+ rdtscll(now);
+ delta = now - shadow->tsc_timestamp;
+ return scale_delta(delta, shadow->tsc_to_nsec_mul, shadow->tsc_shift);
+}
+
+cycle_t xen_clocksource_read(void)
+{
+ struct shadow_time_info *shadow = &get_cpu_var(shadow_time);
+ cycle_t ret;
+
+ get_time_values_from_xen();
+
+ ret = shadow->system_timestamp + get_nsec_offset(shadow);
+
+ put_cpu_var(shadow_time);
+
+ return ret;
+}
+
+static void xen_read_wallclock(struct timespec *ts)
+{
+ const struct shared_info *s = HYPERVISOR_shared_info;
+ u32 version;
+ u64 delta;
+ struct timespec now;
+
+ /* get wallclock at system boot */
+ do {
+ version = s->wc_version;
+ rmb();
+ now.tv_sec = s->wc_sec;
+ now.tv_nsec = s->wc_nsec;
+ rmb();
+ } while ((s->wc_version & 1) | (version ^ s->wc_version));
+
+ delta = xen_clocksource_read(); /* time since system boot */
+ delta += now.tv_sec * (u64)NSEC_PER_SEC + now.tv_nsec;
+
+ now.tv_nsec = do_div(delta, NSEC_PER_SEC);
+ now.tv_sec = delta;
+
+ set_normalized_timespec(ts, now.tv_sec, now.tv_nsec);
+}
+
+unsigned long xen_get_wallclock(void)
+{
+ struct timespec ts;
+
+ xen_read_wallclock(&ts);
+
+ return ts.tv_sec;
+}
+
+int xen_set_wallclock(unsigned long now)
+{
+ /* do nothing for domU */
+ return -1;
+}
+
+static struct clocksource xen_clocksource __read_mostly = {
+ .name = "xen",
+ .rating = 400,
+ .read = xen_clocksource_read,
+ .mask = ~0,
+ .mult = 1<<XEN_SHIFT, /* time directly in nanoseconds */
+ .shift = XEN_SHIFT,
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+/*
+ Xen clockevent implementation
+
+ Xen has two clockevent implementations:
+
+ The old timer_op one works with all released versions of Xen prior
+ to version 3.0.4. This version of the hypervisor provides a
+ single-shot timer with nanosecond resolution. However, sharing the
+ same event channel is a 100Hz tick which is delivered while the
+ vcpu is running. We don't care about or use this tick, but it will
+ cause the core time code to think the timer fired too soon, and
+ will end up resetting it each time. It could be filtered, but
+ doing so has complications when the ktime clocksource is not yet
+ the xen clocksource (ie, at boot time).
+
+ The new vcpu_op-based timer interface allows the tick timer period
+ to be changed or turned off. The tick timer is not useful as a
+ periodic timer because events are only delivered to running vcpus.
+ The one-shot timer can report when a timeout is in the past, so
+ set_next_event is capable of returning -ETIME when appropriate.
+ This interface is used when available.
+*/
+
+
+/*
+ Get a hypervisor absolute time. In theory we could maintain an
+ offset between the kernel's time and the hypervisor's time, and
+ apply that to a kernel's absolute timeout. Unfortunately the
+ hypervisor and kernel times can drift even if the kernel is using
+ the Xen clocksource, because ntp can warp the kernel's clocksource.
+*/
+static s64 get_abs_timeout(unsigned long delta)
+{
+ return xen_clocksource_read() + delta;
+}
+
+static void xen_timerop_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *evt)
+{
+ switch(mode) {
+ case CLOCK_EVT_MODE_PERIODIC:
+ /* unsupported */
+ WARN_ON(1);
+ break;
+
+ case CLOCK_EVT_MODE_ONESHOT:
+ break;
+
+ case CLOCK_EVT_MODE_UNUSED:
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ HYPERVISOR_set_timer_op(0); /* cancel timeout */
+ break;
+ }
+}
+
+static int xen_timerop_set_next_event(unsigned long delta,
+ struct clock_event_device *evt)
+{
+ WARN_ON(evt->mode != CLOCK_EVT_MODE_ONESHOT);
+
+ if (HYPERVISOR_set_timer_op(get_abs_timeout(delta)) < 0)
+ BUG();
+
+ /* We may have missed the deadline, but there's no real way of
+ knowing for sure. If the event was in the past, then we'll
+ get an immediate interrupt. */
+
+ return 0;
+}
+
+static const struct clock_event_device xen_timerop_clockevent = {
+ .name = "xen",
+ .features = CLOCK_EVT_FEAT_ONESHOT,
+
+ .max_delta_ns = 0xffffffff,
+ .min_delta_ns = TIMER_SLOP,
+
+ .mult = 1,
+ .shift = 0,
+ .rating = 500,
+
+ .set_mode = xen_timerop_set_mode,
+ .set_next_event = xen_timerop_set_next_event,
+};
+
+
+
+static void xen_vcpuop_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *evt)
+{
+ int cpu = smp_processor_id();
+
+ switch(mode) {
+ case CLOCK_EVT_MODE_PERIODIC:
+ WARN_ON(1); /* unsupported */
+ break;
+
+ case CLOCK_EVT_MODE_ONESHOT:
+ if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL))
+ BUG();
+ break;
+
+ case CLOCK_EVT_MODE_UNUSED:
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ if (HYPERVISOR_vcpu_op(VCPUOP_stop_singleshot_timer, cpu, NULL) ||
+ HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL))
+ BUG();
+ break;
+ }
+}
+
+static int xen_vcpuop_set_next_event(unsigned long delta,
+ struct clock_event_device *evt)
+{
+ int cpu = smp_processor_id();
+ struct vcpu_set_singleshot_timer single;
+ int ret;
+
+ WARN_ON(evt->mode != CLOCK_EVT_MODE_ONESHOT);
+
+ single.timeout_abs_ns = get_abs_timeout(delta);
+ single.flags = VCPU_SSHOTTMR_future;
+
+ ret = HYPERVISOR_vcpu_op(VCPUOP_set_singleshot_timer, cpu, &single);
+
+ BUG_ON(ret != 0 && ret != -ETIME);
+
+ return ret;
+}
+
+static const struct clock_event_device xen_vcpuop_clockevent = {
+ .name = "xen",
+ .features = CLOCK_EVT_FEAT_ONESHOT,
+
+ .max_delta_ns = 0xffffffff,
+ .min_delta_ns = TIMER_SLOP,
+
+ .mult = 1,
+ .shift = 0,
+ .rating = 500,
+
+ .set_mode = xen_vcpuop_set_mode,
+ .set_next_event = xen_vcpuop_set_next_event,
+};
+
+static const struct clock_event_device *xen_clockevent =
+ &xen_timerop_clockevent;
+static DEFINE_PER_CPU(struct clock_event_device, xen_clock_events);
+
+static irqreturn_t xen_timer_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *evt = &__get_cpu_var(xen_clock_events);
+ irqreturn_t ret;
+
+ ret = IRQ_NONE;
+ if (evt->event_handler) {
+ evt->event_handler(evt);
+ ret = IRQ_HANDLED;
+ }
+
+ return ret;
+}
+
+static void xen_setup_timer(int cpu)
+{
+ const char *name;
+ struct clock_event_device *evt;
+ int irq;
+
+ printk("installing Xen timer for CPU %d\n", cpu);
+
+ name = kasprintf(GFP_KERNEL, "timer%d", cpu);
+ if (!name)
+ name = "<timer kasprintf failed>";
+
+ irq = bind_virq_to_irqhandler(VIRQ_TIMER, cpu, xen_timer_interrupt,
+ IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
+ name, NULL);
+
+ evt = &get_cpu_var(xen_clock_events);
+ memcpy(evt, xen_clockevent, sizeof(*evt));
+
+ evt->cpumask = cpumask_of_cpu(cpu);
+ evt->irq = irq;
+ clockevents_register_device(evt);
+
+ put_cpu_var(xen_clock_events);
+}
+
+__init void xen_time_init(void)
+{
+ int cpu = smp_processor_id();
+
+ get_time_values_from_xen();
+
+ clocksource_register(&xen_clocksource);
+
+ if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL) == 0) {
+ /* Successfully turned off 100hz tick, so we have the
+ vcpuop-based timer interface */
+ printk(KERN_DEBUG "Xen: using vcpuop timer interface\n");
+ xen_clockevent = &xen_vcpuop_clockevent;
+ }
+
+ /* Set initial system time with full resolution */
+ xen_read_wallclock(&xtime);
+ set_normalized_timespec(&wall_to_monotonic,
+ -xtime.tv_sec, -xtime.tv_nsec);
+
+ tsc_disable = 0;
+
+ xen_setup_timer(cpu);
+}

--


2007-06-06 08:38:29

by Jan Beulich

[permalink] [raw]
Subject: Re: [Xen-devel] [patch 14/33] xen: xen time implementation

>+cycle_t xen_clocksource_read(void)
>+{
>+ struct shadow_time_info *shadow = &get_cpu_var(shadow_time);
>+ cycle_t ret;
>+
>+ get_time_values_from_xen();
>+
>+ ret = shadow->system_timestamp + get_nsec_offset(shadow);
>+
>+ put_cpu_var(shadow_time);
>+
>+ return ret;
>+}

I'm afraid this mechanism is pretty unreliable on SMP: getnstimeofday() and
do_gettimeofday() both use the difference between the last snapshot taken
and the current value read from the clock source. Since I had added this
clocksource code to our kernel, I had reproducible hangs on one of the
systems I regularly work with (you may have seen the respective thread
on xen-devel), which recently I finally found time to look into. The issue is
that on that system, transition into ACPI mode takes over 600ms (SMM
execution, and hence no interrupts delivered during that time), and with
Xen using the PIT (PM timer support was added by Keir as a result of this,
but that doesn't cure the problem here, it just reduces the likelihood it'll
be encountered) platform time and local time got pretty much out of sync.

Xen itself knows to deal with this (by using an error correction factor to
slow down the local [TSC-based] clock), but for the kernel such a situation
may be fatal: If clocksource->cycle_last was most recently set on a CPU
with shadow->tsc_to_nsec_mul sufficiently different from that where
getnstimeofday() is being used, timekeeping.c's __get_nsec_offset() will
calculate a huge nanosecond value (due to cyc2ns() doing unsigned
operations), worth abut 4000s. This value may then be used to set a
timeout that was intended to be a few milliseconds, effectively yielding
a hung app (and perhaps system).

I'm sure the time keeping code can't deal with negative values returned
from __get_nsec_offset() (timespec_add_ns() is an example, used in
__get_realtime_clock_ts()), otherwise a potential solution might have
been to set the clock source's multiplier and shift to one and zero
respectively. But I think that a clock source can be expected to be
monotonic anyway, which Xen's interpolation mechanism doesn't
guarantee across multiple CPUs. (I'm actually beginning to think that
this might also be the reason for certain test suites occasionally reporting
timeouts to fire early.)

Unfortunately so far I haven't been able to think of a reasonable solution
to this - a simplistic approach like making xen_clocksource_read() check
the value it is about to return against the last value it returned doesn't
seem to be a good idea (time might appear to have stopped over some
period of time otherwise), nor does attempting to adjust the shadowed
tsc_to_nsec_mul values (because the kernel can't know whether it should
boost the lagging CPU or throttle the rushing one).

Jan


2007-06-06 08:55:06

by Keir Fraser

[permalink] [raw]
Subject: Re: [Xen-devel] [patch 14/33] xen: xen time implementation

On 6/6/07 09:39, "Jan Beulich" <[email protected]> wrote:

> The issue is
> that on that system, transition into ACPI mode takes over 600ms (SMM
> execution, and hence no interrupts delivered during that time), and with
> Xen using the PIT (PM timer support was added by Keir as a result of this,
> but that doesn't cure the problem here, it just reduces the likelihood it'll
> be encountered) platform time and local time got pretty much out of sync.

If you have an ACPI PM timer in your system (and if you have SMM then your
system is almost certainly modern enough to have one) then surely the
problem is fixed for all practical purposes? The problem was overflow of a
fixed-width platform counter. The PIT wraps every ~50ms, but the ACPI PM
timer will wrap only every ~4s. It would be quite unreasonable for SMM to
take the CPU away for multiple seconds, even as a one-time boot operation.

-- Keir

2007-06-06 09:29:54

by Jan Beulich

[permalink] [raw]
Subject: Re: [Xen-devel] [patch 14/33] xen: xen time implementation

>>> Keir Fraser <[email protected]> 06.06.07 10:54 >>>
>On 6/6/07 09:39, "Jan Beulich" <[email protected]> wrote:
>
>> The issue is
>> that on that system, transition into ACPI mode takes over 600ms (SMM
>> execution, and hence no interrupts delivered during that time), and with
>> Xen using the PIT (PM timer support was added by Keir as a result of this,
>> but that doesn't cure the problem here, it just reduces the likelihood it'll
>> be encountered) platform time and local time got pretty much out of sync.
>
>If you have an ACPI PM timer in your system (and if you have SMM then your
>system is almost certainly modern enough to have one) then surely the
>problem is fixed for all practical purposes? The problem was overflow of a
>fixed-width platform counter. The PIT wraps every ~50ms, but the ACPI PM
>timer will wrap only every ~4s. It would be quite unreasonable for SMM to
>take the CPU away for multiple seconds, even as a one-time boot operation.

No, I don't think the problem's gone with the PM timer - it is just much less
likely. Since you depend on the TSC (which must generally be assumed be
unsyncronized across CPUs) and on the error correction factor (which shows
non-zero values every few seconds), getting the interpolated times on two
CPUs out of sync is still possible, and given the way the time keeping code
works even being off by just a single nanosecond may be fatal.

Jan

2007-06-06 09:56:44

by Keir Fraser

[permalink] [raw]
Subject: Re: [Xen-devel] [patch 14/33] xen: xen time implementation

On 6/6/07 10:30, "Jan Beulich" <[email protected]> wrote:

>> If you have an ACPI PM timer in your system (and if you have SMM then your
>> system is almost certainly modern enough to have one) then surely the
>> problem is fixed for all practical purposes? The problem was overflow of a
>> fixed-width platform counter. The PIT wraps every ~50ms, but the ACPI PM
>> timer will wrap only every ~4s. It would be quite unreasonable for SMM to
>> take the CPU away for multiple seconds, even as a one-time boot operation.
>
> No, I don't think the problem's gone with the PM timer - it is just much less
> likely. Since you depend on the TSC (which must generally be assumed be
> unsyncronized across CPUs) and on the error correction factor (which shows
> non-zero values every few seconds), getting the interpolated times on two
> CPUs out of sync is still possible, and given the way the time keeping code
> works even being off by just a single nanosecond may be fatal.

If the error across CPUS is +/- just a few microseconds at worst then having
the clocksource clamp to no less than the last timestamp returned seems a
reasonable fix. Time won't 'stop' for longer than the cross-CPU error, and
that should always be a tiny value.

-- Keir

2007-06-06 10:05:40

by Jeremy Fitzhardinge

[permalink] [raw]
Subject: Re: [Xen-devel] [patch 14/33] xen: xen time implementation

Jan Beulich wrote:
> Xen itself knows to deal with this (by using an error correction factor to
> slow down the local [TSC-based] clock), but for the kernel such a situation
> may be fatal: If clocksource->cycle_last was most recently set on a CPU
> with shadow->tsc_to_nsec_mul sufficiently different from that where
> getnstimeofday() is being used, timekeeping.c's __get_nsec_offset() will
> calculate a huge nanosecond value (due to cyc2ns() doing unsigned
> operations), worth abut 4000s. This value may then be used to set a
> timeout that was intended to be a few milliseconds, effectively yielding
> a hung app (and perhaps system).
>

Hm. I had a similar situation in the stolen time code, and I ended up
using signed values so I could clamp at zero. Though that might have
been another bug; either way, the clamp is still there.

I wonder if cyc2ns might not be better using signed operations? Or
perhaps better, the time code should endevour to do things on a
completely per-cpu basis (haven't really given this any thought).

> I'm sure the time keeping code can't deal with negative values returned
> from __get_nsec_offset() (timespec_add_ns() is an example, used in
> __get_realtime_clock_ts()), otherwise a potential solution might have
> been to set the clock source's multiplier and shift to one and zero
> respectively.

I don't quite follow you here, but wouldn't setting the multiplier/shift
to 1/0 preclude being able to warp the clocksource with ntp?

> But I think that a clock source can be expected to be
> monotonic anyway, which Xen's interpolation mechanism doesn't
> guarantee across multiple CPUs. (I'm actually beginning to think that
> this might also be the reason for certain test suites occasionally reporting
> timeouts to fire early.)
>

Does the kernel expect the tsc clocksource to be completely monotonic
across cpus? Any form of cpu-local clocksource is going to have this
problem; I wonder if clocksources can really only be useful if they're
always referring to a single system-wide time reference - seems like a
bit of a limitation.

> Unfortunately so far I haven't been able to think of a reasonable solution
> to this - a simplistic approach like making xen_clocksource_read() check
> the value it is about to return against the last value it returned doesn't
> seem to be a good idea (time might appear to have stopped over some
> period of time otherwise), nor does attempting to adjust the shadowed
> tsc_to_nsec_mul values (because the kernel can't know whether it should
> boost the lagging CPU or throttle the rushing one).

I once had some code in there to do that, implemented in very boneheaded
way with a spinlock to protect the "last time returned" variable. I
expect there's a better way to implement it.

J

2007-06-06 10:19:38

by Jan Beulich

[permalink] [raw]
Subject: Re: [Xen-devel] [patch 14/33] xen: xen time implementation

>> But I think that a clock source can be expected to be
>> monotonic anyway, which Xen's interpolation mechanism doesn't
>> guarantee across multiple CPUs. (I'm actually beginning to think that
>> this might also be the reason for certain test suites occasionally reporting
>> timeouts to fire early.)
>>
>
>Does the kernel expect the tsc clocksource to be completely monotonic
>across cpus? Any form of cpu-local clocksource is going to have this
>problem; I wonder if clocksources can really only be useful if they're
>always referring to a single system-wide time reference - seems like a
>bit of a limitation.

I suppose so - the clock source's rating gets set to zero if it can be predicted
that the TSCs aren't all synchronized, which should pretty much exclude the
use of this clock source for any other than fallback if there's really nothing
else available.

Jan

2007-06-06 10:26:41

by Andi Kleen

[permalink] [raw]
Subject: Re: [Xen-devel] [patch 14/33] xen: xen time implementation

On Wednesday 06 June 2007 12:05:22 Jeremy Fitzhardinge wrote:
> Jan Beulich wrote:
> > Xen itself knows to deal with this (by using an error correction factor to
> > slow down the local [TSC-based] clock), but for the kernel such a situation
> > may be fatal: If clocksource->cycle_last was most recently set on a CPU
> > with shadow->tsc_to_nsec_mul sufficiently different from that where
> > getnstimeofday() is being used, timekeeping.c's __get_nsec_offset() will
> > calculate a huge nanosecond value (due to cyc2ns() doing unsigned
> > operations), worth abut 4000s. This value may then be used to set a
> > timeout that was intended to be a few milliseconds, effectively yielding
> > a hung app (and perhaps system).
> >
>
> Hm. I had a similar situation in the stolen time code, and I ended up
> using signed values so I could clamp at zero. Though that might have
> been another bug; either way, the clamp is still there.
>
> I wonder if cyc2ns might not be better using signed operations? Or
> perhaps better, the time code should endevour to do things on a
> completely per-cpu basis (haven't really given this any thought).

This is being worked on.


> > Unfortunately so far I haven't been able to think of a reasonable solution
> > to this - a simplistic approach like making xen_clocksource_read() check
> > the value it is about to return against the last value it returned doesn't
> > seem to be a good idea (time might appear to have stopped over some
> > period of time otherwise), nor does attempting to adjust the shadowed
> > tsc_to_nsec_mul values (because the kernel can't know whether it should
> > boost the lagging CPU or throttle the rushing one).
>
> I once had some code in there to do that, implemented in very boneheaded
> way with a spinlock to protect the "last time returned" variable. I
> expect there's a better way to implement it.

But any per CPU setup likely needs this to avoid non monotonicity

-Andi

2007-06-06 10:59:59

by Jan Beulich

[permalink] [raw]
Subject: Re: [Xen-devel] [patch 14/33] xen: xen time implementation

>>> Keir Fraser <[email protected]> 06.06.07 11:56 >>>
>On 6/6/07 10:30, "Jan Beulich" <[email protected]> wrote:
>
>>> If you have an ACPI PM timer in your system (and if you have SMM then your
>>> system is almost certainly modern enough to have one) then surely the
>>> problem is fixed for all practical purposes? The problem was overflow of a
>>> fixed-width platform counter. The PIT wraps every ~50ms, but the ACPI PM
>>> timer will wrap only every ~4s. It would be quite unreasonable for SMM to
>>> take the CPU away for multiple seconds, even as a one-time boot operation.
>>
>> No, I don't think the problem's gone with the PM timer - it is just much less
>> likely. Since you depend on the TSC (which must generally be assumed be
>> unsyncronized across CPUs) and on the error correction factor (which shows
>> non-zero values every few seconds), getting the interpolated times on two
>> CPUs out of sync is still possible, and given the way the time keeping code
>> works even being off by just a single nanosecond may be fatal.
>
>If the error across CPUS is +/- just a few microseconds at worst then having
>the clocksource clamp to no less than the last timestamp returned seems a
>reasonable fix. Time won't 'stop' for longer than the cross-CPU error, and
>that should always be a tiny value.

Are you sure this is also true when e.g. a CPU gets throttled due to thermal
conditions? It is my understanding that both the duty cycle adjustment and
the frequency reduction would yield a reduced rate TSC, which would be
accounted for only the next time the local clock gets calibrated. Otherwise,
immediate calibration (and vcpu update) would need to be forced out of the
thermal interrupt.

Jan

2007-06-06 11:53:17

by Keir Fraser

[permalink] [raw]
Subject: Re: [Xen-devel] [patch 14/33] xen: xen time implementation




On 6/6/07 12:00, "Jan Beulich" <[email protected]> wrote:

>> If the error across CPUS is +/- just a few microseconds at worst then having
>> the clocksource clamp to no less than the last timestamp returned seems a
>> reasonable fix. Time won't 'stop' for longer than the cross-CPU error, and
>> that should always be a tiny value.
>
> Are you sure this is also true when e.g. a CPU gets throttled due to thermal
> conditions? It is my understanding that both the duty cycle adjustment and
> the frequency reduction would yield a reduced rate TSC, which would be
> accounted for only the next time the local clock gets calibrated. Otherwise,
> immediate calibration (and vcpu update) would need to be forced out of the
> thermal interrupt.

Yes, this could be an issue. Is there any way to get an interrupt or MCE
when thermal throttling occurs?

-- Keir

2007-06-06 12:18:48

by Andi Kleen

[permalink] [raw]
Subject: Re: [Xen-devel] [patch 14/33] xen: xen time implementation


>
> Yes, this could be an issue. Is there any way to get an interrupt or MCE
> when thermal throttling occurs?

Yes you can get an thermal interrupt from the local APIC. See the Linux
kernel source. Of course there would be still a race window.

On the other hand some timing issues on throttling are probably
the smallest of the users' problems when it really happens.

Standard Linux just ignores it.

-Andi


2007-06-06 12:46:25

by Jan Beulich

[permalink] [raw]
Subject: Re: [Xen-devel] [patch 14/33] xen: xen time implementation

>>> Andi Kleen <[email protected]> 06.06.07 14:18 >>>
>
>>
>> Yes, this could be an issue. Is there any way to get an interrupt or MCE
>> when thermal throttling occurs?
>
>Yes you can get an thermal interrupt from the local APIC. See the Linux
>kernel source. Of course there would be still a race window.
>
>On the other hand some timing issues on throttling are probably
>the smallest of the users' problems when it really happens.

Not if this results in your box hanging - I think throttling is exactly intended
to keep the box alive as long as possible (and I've seen throttling in action,
with the box happily recovering from the situation - after having seen it a
few times I checked and found the fan covered with dust).

>Standard Linux just ignores it.

Jan

2007-06-06 12:53:39

by Andi Kleen

[permalink] [raw]
Subject: Re: [Xen-devel] [patch 14/33] xen: xen time implementation

On Wednesday 06 June 2007 14:46:59 Jan Beulich wrote:
> >>> Andi Kleen <[email protected]> 06.06.07 14:18 >>>
> >
> >>
> >> Yes, this could be an issue. Is there any way to get an interrupt or MCE
> >> when thermal throttling occurs?
> >
> >Yes you can get an thermal interrupt from the local APIC. See the Linux
> >kernel source. Of course there would be still a race window.
> >
> >On the other hand some timing issues on throttling are probably
> >the smallest of the users' problems when it really happens.
>
> Not if this results in your box hanging

Yes it shouldn't hang. Just saying that some non monotonicity in the returned
values under this abnormal condition is probably not the world's end.

-Andi

2007-06-06 12:54:55

by Keir Fraser

[permalink] [raw]
Subject: Re: [Xen-devel] [patch 14/33] xen: xen time implementation




On 6/6/07 13:46, "Jan Beulich" <[email protected]> wrote:

>> On the other hand some timing issues on throttling are probably
>> the smallest of the users' problems when it really happens.
>
> Not if this results in your box hanging - I think throttling is exactly
> intended
> to keep the box alive as long as possible (and I've seen throttling in action,
> with the box happily recovering from the situation - after having seen it a
> few times I checked and found the fan covered with dust).

Clamping to last returned timestamp value would be fine here. Time would go
moderately haywire for a while (lurch forwards and then stop for a while),
but wouldn't go backwards and should recover reasonably within the timescale
of the thermal event itself. I don't see an issue with just implementing
this clamping if it is needed.

-- Keir

2007-06-06 14:15:49

by Jeremy Fitzhardinge

[permalink] [raw]
Subject: Re: [Xen-devel] [patch 14/33] xen: xen time implementation

Andi Kleen wrote:
>> I once had some code in there to do that, implemented in very boneheaded
>> way with a spinlock to protect the "last time returned" variable. I
>> expect there's a better way to implement it.
>>
>
> But any per CPU setup likely needs this to avoid non monotonicity

Yeah. The point I didn't quite make was that this should be something
that the clock core should handle rather than dealing with it in every
clocksource.

J