2010-07-13 02:25:45

by Zachary Amsden

[permalink] [raw]
Subject: KVM timekeeping fixes, V2

Discovered brammage in patches due to unresolved merge.
Also, had to move 09/18 past 08/18 to resolve compile issue.


2010-07-13 02:25:49

by Zachary Amsden

[permalink] [raw]
Subject: [PATCH 01/18] Make TSC offset writes non-preemptible

Ensure that the storing of the offset and the reading of the TSC
are never preempted by taking a spinlock. While the lock is overkill
now, it is useful later in this patch series.

Signed-off-by: Zachary Amsden <[email protected]>
---
arch/x86/include/asm/kvm_host.h | 4 +++-
arch/x86/kvm/svm.c | 31 ++++++++++++++++++-------------
arch/x86/kvm/vmx.c | 22 ++++++++--------------
arch/x86/kvm/x86.c | 17 ++++++++++++++++-
arch/x86/kvm/x86.h | 2 ++
5 files changed, 47 insertions(+), 29 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 502e53f..3b4efe2 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -394,8 +394,8 @@ struct kvm_arch {
gpa_t ept_identity_map_addr;

unsigned long irq_sources_bitmap;
- u64 vm_init_tsc;
s64 kvmclock_offset;
+ spinlock_t tsc_write_lock;

struct kvm_xen_hvm_config xen_hvm_config;

@@ -522,6 +522,8 @@ struct kvm_x86_ops {

bool (*has_wbinvd_exit)(void);

+ void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
+
const struct trace_print_flags *exit_reasons_str;
};

diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 56c9b6b..6671053 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -2543,20 +2543,9 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
struct vcpu_svm *svm = to_svm(vcpu);

switch (ecx) {
- case MSR_IA32_TSC: {
- u64 tsc_offset = data - native_read_tsc();
- u64 g_tsc_offset = 0;
-
- if (is_nested(svm)) {
- g_tsc_offset = svm->vmcb->control.tsc_offset -
- svm->nested.hsave->control.tsc_offset;
- svm->nested.hsave->control.tsc_offset = tsc_offset;
- }
-
- svm->vmcb->control.tsc_offset = tsc_offset + g_tsc_offset;
-
+ case MSR_IA32_TSC:
+ guest_write_tsc(vcpu, data);
break;
- }
case MSR_K6_STAR:
svm->vmcb->save.star = data;
break;
@@ -3429,6 +3418,20 @@ static bool svm_has_wbinvd_exit(void)
return true;
}

+static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
+{
+ struct vcpu_svm *svm = to_svm(vcpu);
+ u64 g_tsc_offset = 0;
+
+ if (is_nested(svm)) {
+ g_tsc_offset = svm->vmcb->control.tsc_offset -
+ svm->nested.hsave->control.tsc_offset;
+ svm->nested.hsave->control.tsc_offset = offset;
+ }
+
+ svm->vmcb->control.tsc_offset = offset + g_tsc_offset;
+}
+
static void svm_fpu_deactivate(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
@@ -3515,6 +3518,8 @@ static struct kvm_x86_ops svm_x86_ops = {
.set_supported_cpuid = svm_set_supported_cpuid,

.has_wbinvd_exit = svm_has_wbinvd_exit,
+
+ .write_tsc_offset = svm_write_tsc_offset,
};

static int __init svm_init(void)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 2fdcc98..9055ca6 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -1153,12 +1153,11 @@ static u64 guest_read_tsc(void)
}

/*
- * writes 'guest_tsc' into guest's timestamp counter "register"
- * guest_tsc = host_tsc + tsc_offset ==> tsc_offset = guest_tsc - host_tsc
+ * writes 'offset' into guest's timestamp counter offset register
*/
-static void guest_write_tsc(u64 guest_tsc, u64 host_tsc)
+static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
{
- vmcs_write64(TSC_OFFSET, guest_tsc - host_tsc);
+ vmcs_write64(TSC_OFFSET, offset);
}

/*
@@ -1231,7 +1230,6 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
struct shared_msr_entry *msr;
- u64 host_tsc;
int ret = 0;

switch (msr_index) {
@@ -1261,8 +1259,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
vmcs_writel(GUEST_SYSENTER_ESP, data);
break;
case MSR_IA32_TSC:
- rdtscll(host_tsc);
- guest_write_tsc(data, host_tsc);
+ guest_write_tsc(vcpu, data);
break;
case MSR_IA32_CR_PAT:
if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
@@ -2517,7 +2514,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
{
u32 host_sysenter_cs, msr_low, msr_high;
u32 junk;
- u64 host_pat, tsc_this, tsc_base;
+ u64 host_pat;
unsigned long a;
struct desc_ptr dt;
int i;
@@ -2658,12 +2655,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
vmx->vcpu.arch.cr4_guest_owned_bits |= X86_CR4_PGE;
vmcs_writel(CR4_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr4_guest_owned_bits);

- tsc_base = vmx->vcpu.kvm->arch.vm_init_tsc;
- rdtscll(tsc_this);
- if (tsc_this < vmx->vcpu.kvm->arch.vm_init_tsc)
- tsc_base = tsc_this;
-
- guest_write_tsc(0, tsc_base);
+ guest_write_tsc(&vmx->vcpu, 0);

return 0;
}
@@ -4358,6 +4350,8 @@ static struct kvm_x86_ops vmx_x86_ops = {
.set_supported_cpuid = vmx_set_supported_cpuid,

.has_wbinvd_exit = cpu_has_vmx_wbinvd_exit,
+
+ .write_tsc_offset = vmx_write_tsc_offset,
};

static int __init vmx_init(void)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 6ed3176..a2ee975 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -893,6 +893,21 @@ static void kvm_set_time_scale(uint32_t tsc_khz, struct pvclock_vcpu_time_info *

static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz);

+void guest_write_tsc(struct kvm_vcpu *vcpu, u64 data)
+{
+ struct kvm *kvm = vcpu->kvm;
+ u64 offset;
+
+ spin_lock(&kvm->arch.tsc_write_lock);
+ offset = data - native_read_tsc();
+ kvm_x86_ops->write_tsc_offset(vcpu, offset);
+ spin_unlock(&kvm->arch.tsc_write_lock);
+
+ /* Reset of TSC must disable overshoot protection below */
+ vcpu->arch.hv_clock.tsc_timestamp = 0;
+}
+EXPORT_SYMBOL_GPL(guest_write_tsc);
+
static void kvm_write_guest_time(struct kvm_vcpu *v)
{
struct timespec ts;
@@ -5487,7 +5502,7 @@ struct kvm *kvm_arch_create_vm(void)
/* Reserve bit 0 of irq_sources_bitmap for userspace irq source */
set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap);

- rdtscll(kvm->arch.vm_init_tsc);
+ spin_lock_init(&kvm->arch.tsc_write_lock);

return kvm;
}
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index b7a4047..f8d81f4 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -68,4 +68,6 @@ static inline int is_paging(struct kvm_vcpu *vcpu)
void kvm_before_handle_nmi(struct kvm_vcpu *vcpu);
void kvm_after_handle_nmi(struct kvm_vcpu *vcpu);

+void guest_write_tsc(struct kvm_vcpu *vcpu, u64 data);
+
#endif
--
1.7.1

2010-07-13 02:25:54

by Zachary Amsden

[permalink] [raw]
Subject: [PATCH 03/18] TSC reset compensation

Attempt to synchronize TSCs which are reset to the same value. In the
case of a reliable hardware TSC, we can just re-use the same offset, but
on non-reliable hardware, we can get closer by adjusting the offset to
match the elapsed time.

Signed-off-by: Zachary Amsden <[email protected]>
---
arch/x86/include/asm/kvm_host.h | 3 +++
arch/x86/kvm/x86.c | 31 ++++++++++++++++++++++++++++++-
2 files changed, 33 insertions(+), 1 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 3b4efe2..4b42893 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -396,6 +396,9 @@ struct kvm_arch {
unsigned long irq_sources_bitmap;
s64 kvmclock_offset;
spinlock_t tsc_write_lock;
+ u64 last_tsc_nsec;
+ u64 last_tsc_offset;
+ u64 last_tsc_write;

struct kvm_xen_hvm_config xen_hvm_config;

diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index a2ee975..bb7451b 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -896,10 +896,39 @@ static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz);
void guest_write_tsc(struct kvm_vcpu *vcpu, u64 data)
{
struct kvm *kvm = vcpu->kvm;
- u64 offset;
+ u64 offset, ns, elapsed;
+ struct timespec ts;

spin_lock(&kvm->arch.tsc_write_lock);
offset = data - native_read_tsc();
+ ktime_get_ts(&ts);
+ monotonic_to_bootbased(&ts);
+ ns = timespec_to_ns(&ts);
+ elapsed = ns - kvm->arch.last_tsc_nsec;
+
+ /*
+ * Special case: identical write to TSC within 5 seconds of
+ * another CPU is interpreted as an attempt to synchronize
+ * (the 5 seconds is to accomodate host load / swapping).
+ *
+ * In that case, for a reliable TSC, we can match TSC offsets,
+ * or make a best guest using kernel_ns value.
+ */
+ if (data == kvm->arch.last_tsc_write && elapsed < 5ULL * NSEC_PER_SEC) {
+ if (!check_tsc_unstable()) {
+ offset = kvm->arch.last_tsc_offset;
+ pr_debug("kvm: matched tsc offset for %llu\n", data);
+ } else {
+ u64 tsc_delta = elapsed * __get_cpu_var(cpu_tsc_khz);
+ tsc_delta = tsc_delta / USEC_PER_SEC;
+ offset += tsc_delta;
+ pr_debug("kvm: adjusted tsc offset by %llu\n", tsc_delta);
+ }
+ ns = kvm->arch.last_tsc_nsec;
+ }
+ kvm->arch.last_tsc_nsec = ns;
+ kvm->arch.last_tsc_write = data;
+ kvm->arch.last_tsc_offset = offset;
kvm_x86_ops->write_tsc_offset(vcpu, offset);
spin_unlock(&kvm->arch.tsc_write_lock);

--
1.7.1

2010-07-13 02:25:58

by Zachary Amsden

[permalink] [raw]
Subject: [PATCH 05/18] Warn about unstable TSC

If creating an SMP guest with unstable host TSC, issue a warning

Signed-off-by: Zachary Amsden <[email protected]>
---
arch/x86/kvm/x86.c | 4 ++++
1 files changed, 4 insertions(+), 0 deletions(-)

diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 2d5b97a..36ef649 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -5449,6 +5449,10 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
unsigned int id)
{
+ if (check_tsc_unstable() && id != 0)
+ printk_once(KERN_WARNING
+ "kvm: SMP vm created on host with unstable TSC; "
+ "guest TSC will not be reliable\n");
return kvm_x86_ops->vcpu_create(kvm, id);
}

--
1.7.1

2010-07-13 02:26:04

by Zachary Amsden

[permalink] [raw]
Subject: [PATCH 07/18] Fix deep C-state TSC desynchronization

When CPUs with unstable TSCs enter deep C-state, TSC may stop
running. This causes us to require resynchronization. Since
we can't tell when this may potentially happen, we assume the
worst by forcing re-compensation for it at every point the VCPU
task is descheduled.

Signed-off-by: Zachary Amsden <[email protected]>
---
arch/x86/kvm/x86.c | 2 +-
1 files changed, 1 insertions(+), 1 deletions(-)

diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index e718892..666aef3 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1863,7 +1863,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
}

kvm_x86_ops->vcpu_load(vcpu, cpu);
- if (unlikely(vcpu->cpu != cpu)) {
+ if (unlikely(vcpu->cpu != cpu) || check_tsc_unstable()) {
/* Make sure TSC doesn't go backwards */
s64 tsc_delta = !vcpu->arch.last_host_tsc ? 0 :
native_read_tsc() - vcpu->arch.last_host_tsc;
--
1.7.1

2010-07-13 02:26:10

by Zachary Amsden

[permalink] [raw]
Subject: [PATCH 10/18] Keep SMP VMs more in sync on unstable TSC

SMP VMs on machines with unstable TSC have their TSC offset adjusted by the
local offset delta from last measurement. This does not take into account how
long it has been since the measurement, leading to drift. Minimize the drift
by accounting for any time difference the kernel has observed.

Signed-off-by: Zachary Amsden <[email protected]>
---
arch/x86/include/asm/kvm_host.h | 1 +
arch/x86/kvm/x86.c | 8 +++++++-
2 files changed, 8 insertions(+), 1 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 324e892..d9e0aa0 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -339,6 +339,7 @@ struct kvm_vcpu_arch {
unsigned int time_offset;
struct page *time_page;
u64 last_host_tsc;
+ u64 last_host_ns;

bool nmi_pending;
bool nmi_injected;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 51d3f3e..a4d7cb9 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1888,8 +1888,13 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
native_read_tsc() - vcpu->arch.last_host_tsc;
if (tsc_delta < 0)
mark_tsc_unstable("KVM discovered backwards TSC");
- if (check_tsc_unstable())
+ if (check_tsc_unstable()) {
+ /* Subtract elapsed cycle time */
+ u64 ns = !vcpu->arch.last_host_ns ? 0 :
+ get_kernel_ns() - vcpu->arch.last_host_ns;
+ tsc_delta -= nsec_to_cycles(ns);
kvm_x86_ops->adjust_tsc_offset(vcpu, -tsc_delta);
+ }
kvm_migrate_timers(vcpu);
kvm_request_guest_time_update(vcpu);
vcpu->cpu = cpu;
@@ -1901,6 +1906,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
kvm_x86_ops->vcpu_put(vcpu);
kvm_put_guest_fpu(vcpu);
vcpu->arch.last_host_tsc = native_read_tsc();
+ vcpu->arch.last_host_ns = get_kernel_ns();
}

static int is_efer_nx(void)
--
1.7.1

2010-07-13 02:26:16

by Zachary Amsden

[permalink] [raw]
Subject: [PATCH 13/18] Move scale_delta into common header

The scale_delta function for shift / multiply with 31-bit
precision moves to a common header so it can be used by both
kernel and kvm module.

Signed-off-by: Zachary Amsden <[email protected]>
---
arch/x86/include/asm/pvclock.h | 38 ++++++++++++++++++++++++++++++++++++++
arch/x86/kernel/pvclock.c | 3 ++-
2 files changed, 40 insertions(+), 1 deletions(-)

diff --git a/arch/x86/include/asm/pvclock.h b/arch/x86/include/asm/pvclock.h
index cd02f32..7f7e577 100644
--- a/arch/x86/include/asm/pvclock.h
+++ b/arch/x86/include/asm/pvclock.h
@@ -12,4 +12,42 @@ void pvclock_read_wallclock(struct pvclock_wall_clock *wall,
struct pvclock_vcpu_time_info *vcpu,
struct timespec *ts);

+/*
+ * Scale a 64-bit delta by scaling and multiplying by a 32-bit fraction,
+ * yielding a 64-bit result.
+ */
+static inline u64 pvclock_scale_delta(u64 delta, u32 mul_frac, int shift)
+{
+ u64 product;
+#ifdef __i386__
+ u32 tmp1, tmp2;
+#endif
+
+ if (shift < 0)
+ delta >>= -shift;
+ else
+ delta <<= shift;
+
+#ifdef __i386__
+ __asm__ (
+ "mul %5 ; "
+ "mov %4,%%eax ; "
+ "mov %%edx,%4 ; "
+ "mul %5 ; "
+ "xor %5,%5 ; "
+ "add %4,%%eax ; "
+ "adc %5,%%edx ; "
+ : "=A" (product), "=r" (tmp1), "=r" (tmp2)
+ : "a" ((u32)delta), "1" ((u32)(delta >> 32)), "2" (mul_frac) );
+#elif defined(__x86_64__)
+ __asm__ (
+ "mul %%rdx ; shrd $32,%%rdx,%%rax"
+ : "=a" (product) : "0" (delta), "d" ((u64)mul_frac) );
+#else
+#error implement me!
+#endif
+
+ return product;
+}
+
#endif /* _ASM_X86_PVCLOCK_H */
diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
index 239427c..bab3b9e 100644
--- a/arch/x86/kernel/pvclock.c
+++ b/arch/x86/kernel/pvclock.c
@@ -82,7 +82,8 @@ static inline u64 scale_delta(u64 delta, u32 mul_frac, int shift)
static u64 pvclock_get_nsec_offset(struct pvclock_shadow_time *shadow)
{
u64 delta = native_read_tsc() - shadow->tsc_timestamp;
- return scale_delta(delta, shadow->tsc_to_nsec_mul, shadow->tsc_shift);
+ return pvclock_scale_delta(delta, shadow->tsc_to_nsec_mul,
+ shadow->tsc_shift);
}

/*
--
1.7.1

2010-07-13 02:26:20

by Zachary Amsden

[permalink] [raw]
Subject: [PATCH 15/18] Implement getnsboottime kernel API

Add a kernel call to get the number of nanoseconds since boot. This
is generally useful enough to make it a generic call.

Signed-off-by: Zachary Amsden <[email protected]>
---
include/linux/time.h | 1 +
kernel/time/timekeeping.c | 27 +++++++++++++++++++++++++++
2 files changed, 28 insertions(+), 0 deletions(-)

diff --git a/include/linux/time.h b/include/linux/time.h
index ea3559f..5d04108 100644
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -145,6 +145,7 @@ extern void getnstimeofday(struct timespec *tv);
extern void getrawmonotonic(struct timespec *ts);
extern void getboottime(struct timespec *ts);
extern void monotonic_to_bootbased(struct timespec *ts);
+extern s64 getnsboottime(void);

extern struct timespec timespec_trunc(struct timespec t, unsigned gran);
extern int timekeeping_valid_for_hres(void);
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index caf8d4d..d250f0a 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -285,6 +285,33 @@ void ktime_get_ts(struct timespec *ts)
}
EXPORT_SYMBOL_GPL(ktime_get_ts);

+
+/**
+ * getnsboottime - get the bootbased clock in nsec format
+ *
+ * The function calculates the bootbased clock from the realtime
+ * clock and the wall_to_monotonic offset and stores the result
+ * in normalized timespec format in the variable pointed to by @ts.
+ */
+s64 getnsboottime(void)
+{
+ unsigned int seq;
+ s64 secs, nsecs;
+
+ WARN_ON(timekeeping_suspended);
+
+ do {
+ seq = read_seqbegin(&xtime_lock);
+ secs = xtime.tv_sec + wall_to_monotonic.tv_sec;
+ secs += total_sleep_time.tv_sec;
+ nsecs = xtime.tv_nsec + wall_to_monotonic.tv_nsec;
+ nsecs += total_sleep_time.tv_nsec + timekeeping_get_ns();
+
+ } while (read_seqretry(&xtime_lock, seq));
+ return nsecs + (secs * NSEC_PER_SEC);
+}
+EXPORT_SYMBOL_GPL(getnsboottime);
+
/**
* do_gettimeofday - Returns the time of day in a timeval
* @tv: pointer to the timeval to be set
--
1.7.1

2010-07-13 02:26:31

by Zachary Amsden

[permalink] [raw]
Subject: [PATCH 18/18] Add timekeeping documentation

Basic informational document about x86 timekeeping and how KVM
is affected.

Signed-off-by: Zachary Amsden <[email protected]>
---
Documentation/kvm/timekeeping.txt | 613 +++++++++++++++++++++++++++++++++++++
1 files changed, 613 insertions(+), 0 deletions(-)
create mode 100644 Documentation/kvm/timekeeping.txt

diff --git a/Documentation/kvm/timekeeping.txt b/Documentation/kvm/timekeeping.txt
new file mode 100644
index 0000000..07dc58f
--- /dev/null
+++ b/Documentation/kvm/timekeeping.txt
@@ -0,0 +1,613 @@
+
+ Timekeeping Virtualization for X86-Based Architectures
+
+ Zachary Amsden <[email protected]>
+ Copyright (c) 2010, Red Hat. All rights reserved.
+
+1) Overview
+2) Timing Devices
+3) TSC Hardware
+4) Virtualization Problems
+
+=========================================================================
+
+1) Overview
+
+One of the most complicated parts of the X86 platform, and specifically,
+the virtualization of this platform is the plethora of timing devices available
+and the complexity of emulating those devices. In addition, virtualization of
+time introduces a new set of challenges because it introduces a multiplexed
+division of time beyond the control of the guest CPU.
+
+First, we will describe the various timekeeping hardware available, then
+present some of the problems which arise and solutions available, giving
+specific recommendations for certain classes of KVM guests.
+
+The purpose of this document is to collect data and information relevant to
+timekeeping which may be difficult to find elsewhere, specifically,
+information relevant to KVM and hardware-based virtualization.
+
+=========================================================================
+
+2) Timing Devices
+
+First we discuss the basic hardware devices available. TSC and the related
+KVM clock are special enough to warrant a full exposition and are described in
+the following section.
+
+2.1) i8254 - PIT
+
+One of the first timer devices available is the programmable interrupt timer,
+or PIT. The PIT has a fixed frequency 1.193182 MHz base clock and three
+channels which can be programmed to deliver periodic or one-shot interrupts.
+These three channels can be configured in different modes and have individual
+counters. Channel 1 and 2 were not available for general use in the original
+IBM PC, and historically were connected to control RAM refresh and the PC
+speaker. Now the PIT is typically integrated as part of an emulated chipset
+and a separate physical PIT is not used.
+
+The PIT uses I/O ports 0x40 - 0x43. Access to the 16-bit counters is done
+using single or multiple byte access to the I/O ports. There are 6 modes
+available, but not all modes are available to all timers, as only timer 2
+has a connected gate input, required for modes 1 and 5. The gate line is
+controlled by port 61h, bit 0, as illustrated in the following diagram.
+
+ -------------- ----------------
+| | | |
+| 1.1932 MHz |---------->| CLOCK OUT | ---------> IRQ 0
+| Clock | | | |
+ -------------- | +->| GATE TIMER 0 |
+ | ----------------
+ |
+ | ----------------
+ | | |
+ |------>| CLOCK OUT | ---------> 66.3 KHZ DRAM
+ | | | (aka /dev/null)
+ | +->| GATE TIMER 1 |
+ | ----------------
+ |
+ | ----------------
+ | | |
+ |------>| CLOCK OUT | ---------> Port 61h, bit 5
+ | | |
+Port 61h, bit 0 ---------->| GATE TIMER 2 | \_.---- ____
+ ---------------- _| )--|LPF|---Speaker
+ / *---- \___/
+Port 61h, bit 1 -----------------------------------/
+
+The timer modes are now described.
+
+Mode 0: Single Timeout. This is a one-shot software timeout that counts down
+ when the gate is high (always true for timers 0 and 1). When the count
+ reaches zero, the output goes high.
+
+Mode 1: Triggered One-shot. The output is intially set high. When the gate
+ line is set high, a countdown is initiated (which does not stop if the gate is
+ lowered), during which the output is set low. When the count reaches zero,
+ the output goes high.
+
+Mode 2: Rate Generator. The output is initially set high. When the countdown
+ reaches 1, the output goes low for one count and then returns high. The value
+ is reloaded and the countdown automatically resumes. If the gate line goes
+ low, the count is halted. If the output is low when the gate is lowered, the
+ output automatically goes high (this only affects timer 2).
+
+Mode 3: Square Wave. This generates a high / low square wave. The count
+ determines the length of the pulse, which alternates between high and low
+ when zero is reached. The count only proceeds when gate is high and is
+ automatically reloaded on reaching zero. The count is decremented twice at
+ each clock to generate a full high / low cycle at the full periodic rate.
+ If the count is even, the clock remains high for N/2 counts and low for N/2
+ counts; if the clock is odd, the clock is high for (N+1)/2 counts and low
+ for (N-1)/2 counts. Only even values are latched by the counter, so odd
+ values are not observed when reading. This is the intended mode for timer 2,
+ which generates sine-like tones by low-pass filtering the square wave output.
+
+Mode 4: Software Strobe. After programming this mode and loading the counter,
+ the output remains high until the counter reaches zero. Then the output
+ goes low for 1 clock cycle and returns high. The counter is not reloaded.
+ Counting only occurs when gate is high.
+
+Mode 5: Hardware Strobe. After programming and loading the counter, the
+ output remains high. When the gate is raised, a countdown is initiated
+ (which does not stop if the gate is lowered). When the counter reaches zero,
+ the output goes low for 1 clock cycle and then returns high. The counter is
+ not reloaded.
+
+In addition to normal binary counting, the PIT supports BCD counting. The
+command port, 0x43 is used to set the counter and mode for each of the three
+timers.
+
+PIT commands, issued to port 0x43, using the following bit encoding:
+
+Bit 7-4: Command (See table below)
+Bit 3-1: Mode (000 = Mode 0, 101 = Mode 5, 11X = undefined)
+Bit 0 : Binary (0) / BCD (1)
+
+Command table:
+
+0000 - Latch Timer 0 count for port 0x40
+ sample and hold the count to be read in port 0x40;
+ additional commands ignored until counter is read;
+ mode bits ignored.
+
+0001 - Set Timer 0 LSB mode for port 0x40
+ set timer to read LSB only and force MSB to zero;
+ mode bits set timer mode
+
+0010 - Set Timer 0 MSB mode for port 0x40
+ set timer to read MSB only and force LSB to zero;
+ mode bits set timer mode
+
+0011 - Set Timer 0 16-bit mode for port 0x40
+ set timer to read / write LSB first, then MSB;
+ mode bits set timer mode
+
+0100 - Latch Timer 1 count for port 0x41 - as described above
+0101 - Set Timer 1 LSB mode for port 0x41 - as described above
+0110 - Set Timer 1 MSB mode for port 0x41 - as described above
+0111 - Set Timer 1 16-bit mode for port 0x41 - as described above
+
+1000 - Latch Timer 2 count for port 0x42 - as described above
+1001 - Set Timer 2 LSB mode for port 0x42 - as described above
+1010 - Set Timer 2 MSB mode for port 0x42 - as described above
+1011 - Set Timer 2 16-bit mode for port 0x42 as described above
+
+1101 - General counter latch
+ Latch combination of counters into corresponding ports
+ Bit 3 = Counter 2
+ Bit 2 = Counter 1
+ Bit 1 = Counter 0
+ Bit 0 = Unused
+
+1110 - Latch timer status
+ Latch combination of counter mode into corresponding ports
+ Bit 3 = Counter 2
+ Bit 2 = Counter 1
+ Bit 1 = Counter 0
+
+ The output of ports 0x40-0x42 following this command will be:
+
+ Bit 7 = Output pin
+ Bit 6 = Count loaded (0 if timer has expired)
+ Bit 5-4 = Read / Write mode
+ 01 = MSB only
+ 10 = LSB only
+ 11 = LSB / MSB (16-bit)
+ Bit 3-1 = Mode
+ Bit 0 = Binary (0) / BCD mode (1)
+
+2.2) RTC
+
+The second device which was available in the original PC was the MC146818 real
+time clock. The original device is now obsolete, and usually emulated by the
+system chipset, sometimes by an HPET and some frankenstein IRQ routing.
+
+The RTC is accessed through CMOS variables, which uses an index register to
+control which bytes are read. Since there is only one index register, read
+of the CMOS and read of the RTC require lock protection (in addition, it is
+dangerous to allow userspace utilities such as hwclock to have direct RTC
+access, as they could corrupt kernel reads and writes of CMOS memory).
+
+The RTC generates an interrupt which is usually routed to IRQ 8. The interrupt
+can function as a periodic timer, an additional once a day alarm, and can issue
+interrupts after an update of the CMOS registers by the MC146818 is complete.
+The type of interrupt is signalled in the RTC status registers.
+
+The RTC will update the current time fields by battery power even while the
+system is off. The current time fields should not be read while an update is
+in progress, as indicated in the status register.
+
+The clock uses a 32.768kHz crystal, so bits 6-4 of register A should be
+programmed to a 32kHz divider if the RTC is to count seconds.
+
+This is the RAM map originally used for the RTC/CMOS:
+
+Location Size Description
+------------------------------------------
+00h byte Current second (BCD)
+01h byte Seconds alarm (BCD)
+02h byte Current minute (BCD)
+03h byte Minutes alarm (BCD)
+04h byte Current hour (BCD)
+05h byte Hours alarm (BCD)
+06h byte Current day of week (BCD)
+07h byte Current day of month (BCD)
+08h byte Current month (BCD)
+09h byte Current year (BCD)
+0Ah byte Register A
+ bit 7 = Update in progress
+ bit 6-4 = Divider for clock
+ 000 = 4.194 MHz
+ 001 = 1.049 MHz
+ 010 = 32 kHz
+ 10X = test modes
+ 110 = reset / disable
+ 111 = reset / disable
+ bit 3-0 = Rate selection for periodic interrupt
+ 000 = periodic timer disabled
+ 001 = 3.90625 uS
+ 010 = 7.8125 uS
+ 011 = .122070 mS
+ 100 = .244141 mS
+ ...
+ 1101 = 125 mS
+ 1110 = 250 mS
+ 1111 = 500 mS
+0Bh byte Register B
+ bit 7 = Run (0) / Halt (1)
+ bit 6 = Periodic interrupt enable
+ bit 5 = Alarm interrupt enable
+ bit 4 = Update-ended interrupt enable
+ bit 3 = Square wave interrupt enable
+ bit 2 = BCD calendar (0) / Binary (1)
+ bit 1 = 12-hour mode (0) / 24-hour mode (1)
+ bit 0 = 0 (DST off) / 1 (DST enabled)
+OCh byte Register C (read only)
+ bit 7 = interrupt request flag (IRQF)
+ bit 6 = periodic interrupt flag (PF)
+ bit 5 = alarm interrupt flag (AF)
+ bit 4 = update interrupt flag (UF)
+ bit 3-0 = reserved
+ODh byte Register D (read only)
+ bit 7 = RTC has power
+ bit 6-0 = reserved
+32h byte Current century BCD (*)
+ (*) location vendor specific and now determined from ACPI global tables
+
+2.3) APIC
+
+On Pentium and later processors, an on-board timer is available to each CPU
+as part of the Advanced Programmable Interrupt Controller. The APIC is
+accessed through memory-mapped registers and provides interrupt service to each
+CPU, used for IPIs and local timer interrupts.
+
+Although in theory the APIC is a safe and stable source for local interrupts,
+in practice, many bugs and glitches have occurred due to the special nature of
+the APIC CPU-local memory-mapped hardware. Beware that CPU errata may affect
+the use of the APIC and that workarounds may be required. In addition, some of
+these workarounds pose unique constraints for virtualization - requiring either
+extra overhead incurred from extra reads of memory-mapped I/O or additional
+functionality that may be more computationally expensive to implement.
+
+Since the APIC is documented quite well in the Intel and AMD manuals, we will
+avoid repititon of the detail here. It should be pointed out that the APIC
+timer is programmed through the LVT (local vector timer) register, is capable
+of one-shot or periodic operation, and is based on the bus clock divided down
+by the programmable divider register.
+
+2.4) HPET
+
+HPET is quite complex, and was originally intended to replace the PIT / RTC
+support of the X86 PC. It remains to be seen whether that will be the case, as
+the de facto standard of PC hardware is to emulate these older devices. Some
+systems designated as legacy free may support only the HPET as a hardware timer
+device.
+
+The HPET spec is rather loose and vague, requiring at least 3 hardware timers,
+but allowing implementation freedom to support many more. It also imposes no
+fixed rate on the timer frequency, but does impose some extremal values on
+frequency, error and slew.
+
+In general, the HPET is recommended as a high precision (compared to PIT /RTC)
+time source which is independent of local variation (as there is only one HPET
+in any given system). The HPET is also memory-mapped, and its presence is
+indicated through ACPI tables by the BIOS.
+
+Detailed specification of the HPET is beyond the current scope of this
+document, as it is also very well documented elsewhere.
+
+2.5) Offboard Timers
+
+Several cards, both proprietary (watchdog boards) and commonplace (e1000) have
+timing chips built into the cards which may have registers which are accessible
+to kernel or user drivers. To the author's knowledge, using these to generate
+a clocksource for a Linux or other kernel has not yet been attempted and is in
+general frowned upon as not playing by the agreed rules of the game. Such a
+timer device would require additional support to be virtualized properly and is
+not considered important at this time as no known operating system does this.
+
+=========================================================================
+
+3) TSC Hardware
+
+The TSC or time stamp counter is relatively simple in theory; it counts
+instruction cycles issued by the processor, which can be used as a measure of
+time. In practice, due to a number of problems, it is the most complicated
+timekeeping device to use.
+
+The TSC is represented internally as a 64-bit MSR which can be read with the
+RDMSR, RDTSC, or RDTSCP (when available) instructions. In the past, hardware
+limitations made it possible to write the TSC, but generally on old hardware it
+was only possible to write the low 32-bits of the 64-bit counter, and the upper
+32-bits of the counter were cleared. Now, however, on Intel processors family
+0Fh, for models 3, 4 and 6, and family 06h, models e and f, this restriction
+has been lifted and all 64-bits are writable. On AMD systems, the ability to
+write the TSC MSR is not an architectural guarantee.
+
+The TSC is accessible from CPL-0 and conditionally, for CPL > 0 software by
+means of the CR4.TSD bit, which when enabled, disables CPL > 0 TSC access.
+
+Some vendors have implemented an additional instruction, RDTSCP, which returns
+atomically not just the TSC, but an indicator which corresponds to the
+processor number. This can be used to index into an array of TSC variables to
+determine offset information in SMP systems where TSCs are not synchronized.
+The presence of this instruction must be determined by consulting CPUID feature
+bits.
+
+Both VMX and SVM provide extension fields in the virtualization hardware which
+allows the guest visible TSC to be offset by a constant. Newer implementations
+promise to allow the TSC to additionally be scaled, but this hardware is not
+yet widely available.
+
+3.1) TSC synchronization
+
+The TSC is a CPU-local clock in most implementations. This means, on SMP
+platforms, the TSCs of different CPUs may start at different times depending
+on when the CPUs are powered on. Generally, CPUs on the same die will share
+the same clock, however, this is not always the case.
+
+The BIOS may attempt to resynchronize the TSCs during the poweron process and
+the operating system or other system software may attempt to do this as well.
+Several hardware limitations make the problem worse - if it is not possible to
+write the full 64-bits of the TSC, it may be impossible to match the TSC in
+newly arriving CPUs to that of the rest of the system, resulting in
+unsynchronized TSCs. This may be done by BIOS or system software, but in
+practice, getting a perfectly synchronized TSC will not be possible unless all
+values are read from the same clock, which generally only is possible on single
+socket systems or those with special hardware support.
+
+3.2) TSC and CPU hotplug
+
+As touched on already, CPUs which arrive later than the boot time of the system
+may not have a TSC value that is synchronized with the rest of the system.
+Either system software, BIOS, or SMM code may actually try to establish the TSC
+to a value matching the rest of the system, but a perfect match is usually not
+a guarantee. This can have the effect of bringing a system from a state where
+TSC is synchronized back to a state where TSC synchronization flaws, however
+small, may be exposed to the OS and any virtualization environment.
+
+3.3) TSC and multi-socket / NUMA
+
+Multi-socket systems, especially large multi-socket systems are likely to have
+individual clocksources rather than a single, universally distributed clock.
+Since these clocks are driven by different crystals, they will not have
+perfectly matched frequency, and temperature and electrical variations will
+cause the CPU clocks, and thus the TSCs to drift over time. Depending on the
+exact clock and bus design, the drift may or may not be fixed in absolute
+error, and may accumulate over time.
+
+In addition, very large systems may deliberately slew the clocks of individual
+cores. This technique, known as spread-spectrum clocking, reduces EMI at the
+clock frequency and harmonics of it, which may be required to pass FCC
+standards for telecommunications and computer equipment.
+
+It is recommended not to trust the TSCs to remain synchronized on NUMA or
+multiple socket systems for these reasons.
+
+3.4) TSC and C-states
+
+C-states, or idling states of the processor, especially C1E and deeper sleep
+states may be problematic for TSC as well. The TSC may stop advancing in such
+a state, resulting in a TSC which is behind that of other CPUs when execution
+is resumed. Such CPUs must be detected and flagged by the operating system
+based on CPU and chipset identifications.
+
+The TSC in such a case may be corrected by catching it up to a known external
+clocksource.
+
+3.5) TSC frequency change / P-states
+
+To make things slightly more interesting, some CPUs may change frequency. They
+may or may not run the TSC at the same rate, and because the frequency change
+may be staggered or slewed, at some points in time, the TSC rate may not be
+known other than falling within a range of values. In this case, the TSC will
+not be a stable time source, and must be calibrated against a known, stable,
+external clock to be a usable source of time.
+
+Whether the TSC runs at a constant rate or scales with the P-state is model
+dependent and must be determined by inspecting CPUID, chipset or vendor
+specific MSR fields.
+
+In addition, some vendors have known bugs where the P-state is actually
+compensated for properly during normal operation, but when the processor is
+inactive, the P-state may be raised temporarily to service cache misses from
+other processors. In such cases, the TSC on halted CPUs could advance faster
+than that of non-halted processors. AMD Turion processors are known to have
+this problem.
+
+3.6) TSC and STPCLK / T-states
+
+External signals given to the processor may also have the affect of stopping
+the TSC. This is typically done for thermal emergency power control to prevent
+an overheating condition, and typically, there is no way to detect that this
+condition has happened.
+
+3.7) TSC virtualization - VMX
+
+VMX provides conditional trapping of RDTSC, RDMSR, WRMSR and RDTSCP
+instructions, which is enough for full virtualization of TSC in any manner. In
+addition, VMX allows passing through the host TSC plus an additional TSC_OFFSET
+field specified in the VMCS. Special instructions must be used to read and
+write the VMCS field.
+
+3.8) TSC virtualization - SVM
+
+SVM provides conditional trapping of RDTSC, RDMSR, WRMSR and RDTSCP
+instructions, which is enough for full virtualization of TSC in any manner. In
+addition, SVM allows passing through the host TSC plus an additional offset
+field specified in the SVM control block.
+
+3.9) TSC feature bits in Linux
+
+In summary, there is no way to guarantee the TSC remains in perfect
+synchronization unless it is explicitly guaranteed by the architecture. Even
+if so, the TSCs in multi-sockets or NUMA systems may still run independently
+despite being locally consistent.
+
+The following feature bits are used by Linux to signal various TSC attributes,
+but they can only be taken to be meaningful for UP or single node systems.
+
+X86_FEATURE_TSC : The TSC is available in hardware
+X86_FEATURE_RDTSCP : The RDTSCP instruction is available
+X86_FEATURE_CONSTANT_TSC : The TSC rate is unchanged with P-states
+X86_FEATURE_NONSTOP_TSC : The TSC does not stop in C-states
+X86_FEATURE_TSC_RELIABLE : TSC sync checks are skipped (VMware)
+
+4) Virtualization Problems
+
+Timekeeping is especially problematic for virtualization because a number of
+challenges arise. The most obvious problem is that time is now shared between
+the host and, potentially, a number of virtual machines. Thus the virtual
+operating system does not run with 100% usage of the CPU, despite the fact that
+it may very well make that assumption. It may expect it to remain true to very
+exacting bounds when interrupt sources are disabled, but in reality only its
+virtual interrupt sources are disabled, and the machine may still be preempted
+at any time. This causes problems as the passage of real time, the injection
+of machine interrupts and the associated clock sources are no longer completely
+synchronized with real time.
+
+This same problem can occur on native harware to a degree, as SMM mode may
+steal cycles from the naturally on X86 systems when SMM mode is used by the
+BIOS, but not in such an extreme fashion. However, the fact that SMM mode may
+cause similar problems to virtualization makes it a good justification for
+solving many of these problems on bare metal.
+
+4.1) Interrupt clocking
+
+One of the most immediate problems that occurs with legacy operating systems
+is that the system timekeeping routines are often designed to keep track of
+time by counting periodic interrupts. These interrupts may come from the PIT
+or the RTC, but the problem is the same: the host virtualization engine may not
+be able to deliver the proper number of interrupts per second, and so guest
+time may fall behind. This is especially problematic if a high interrupt rate
+is selected, such as 1000 HZ, which is unfortunately the default for many Linux
+guests.
+
+There are three approaches to solving this problem; first, it may be possible
+to simply ignore it. Guests which have a separate time source for tracking
+'wall clock' or 'real time' may not need any adjustment of their interrupts to
+maintain proper time. If this is not sufficient, it may be necessary to inject
+additional interrupts into the guest in order to increase the effective
+interrupt rate. This approach leads to complications in extreme conditions,
+where host load or guest lag is too much to compensate for, and thus another
+solution to the problem has risen: the guest may need to become aware of lost
+ticks and compensate for them internally. Although promising in theory, the
+implementation of this policy in Linux has been extremely error prone, and a
+number of buggy variants of lost tick compensation are distributed across
+commonly used Linux systems.
+
+Windows uses periodic RTC clocking as a means of keeping time internally, and
+thus requires interrupt slewing to keep proper time. It does use a low enough
+rate (ed: is it 18.2 Hz?) however that it has not yet been a problem in
+practice.
+
+4.2) TSC sampling and serialization
+
+As the highest precision time source available, the cycle counter of the CPU
+has aroused much interest from developers. As explained above, this timer has
+many problems unique to its nature as a local, potentially unstable and
+potentially unsynchronized source. One issue which is not unique to the TSC,
+but is highlighted because of its very precise nature is sampling delay. By
+definition, the counter, once read is already old. However, it is also
+possible for the counter to be read ahead of the actual use of the result.
+This is a consequence of the superscalar execution of the instruction stream,
+which may execute instructions out of order. Such execution is called
+non-serialized. Forcing serialized execution is necessary for precise
+measurement with the TSC, and requires a serializing instruction, such as CPUID
+or an MSR read.
+
+Since CPUID may actually be virtualized by a trap and emulate mechanism, this
+serialization can pose a performance issue for hardware virtualization. An
+accurate time stamp counter reading may therefore not always be available, and
+it may be necessary for an implementation to guard against "backwards" reads of
+the TSC as seen from other CPUs, even in an otherwise perfectly synchronized
+system.
+
+4.3) Timespec aliasing
+
+Additionally, this lack of serialization from the TSC poses another challenge
+when using results of the TSC when measured against another time source. As
+the TSC is much higher precision, many possible values of the TSC may be read
+while another clock is still expressing the same value.
+
+That is, you may read (T,T+10) while external clock C maintains the same value.
+Due to non-serialized reads, you may actually end up with a range which
+fluctuates - from (T-1.. T+10). Thus, any time calculated from a TSC, but
+calibrated against an external value may have a range of valid values.
+Re-calibrating this computation may actually cause time, as computed after the
+calibration, to go backwards, compared with time computed before the
+calibration.
+
+This problem is particularly pronounced with an internal time source in Linux,
+the kernel time, which is expressed in the theoretically high resolution
+timespec - but which advances in much larger granularity intervals, sometimes
+at the rate of jiffies, and possibly in catchup modes, at a much larger step.
+
+This aliasing requires care in the computation and recalibration of kvmclock
+and any other values derived from TSC computation (such as TSC virtualization
+itself).
+
+4.4) Migration
+
+Migration of a virtual machine raises problems for timekeeping in two ways.
+First, the migration itself may take time, during which interrupts cannot be
+delivered, and after which, the guest time may need to be caught up. NTP may
+be able to help to some degree here, as the clock correction required is
+typically small enough to fall in the NTP-correctable window.
+
+An additional concern is that timers based off the TSC (or HPET, if the raw bus
+clock is exposed) may now be running at different rates, requiring compensation
+in some may in the hypervisor by virtualizing these timers. In addition,
+migrating to a faster machine may preclude the use of a passthrough TSC, as a
+faster clock cannot be made visible to a guest without the potential of time
+advancing faster than usual. A slower clock is less of a problem, as it can
+always be caught up to the original rate. KVM clock avoids these problems by
+simply storing multipliers and offsets gainst the TSC for the guest to convert
+back into nanosecond resolution values.
+
+4.5) Scheduling
+
+Since scheduling may be based on precise timing and firing of interrupts, the
+scheduling algorithms of an operating system may be adversely affected by
+virtualization. In theory, the effect is random and should be universally
+distributed, but in contrived as well as real scenarios (guest device access,
+causes of virtualization exits, possible context switch), this may not always
+be the case. The effect of this has not been well studied.
+
+In an attempt to work around this, several implementations have provided a
+paravirtualized scheduler clock, which reveals the true amount of CPU time for
+which a virtual machine has been running.
+
+4.6) Watchdogs
+
+Watchdog timers, such as the lock detector in Linux may fire accidentally when
+running under hardware virtualization due to timer interrupts being delayed or
+misinterpretation of the passage of real time. Usually, these warnings are
+spurious and can be ignored, but in some circumstances it may be necessary to
+disable such detection.
+
+4.7) Delays and precision timing
+
+Precise timing and delays may not be possible in a virtualized system. This
+can happen if the system is controlling physical hardware, or issues delays to
+compensate for slower I/O to and from devices. The first issue is not solvable
+in general for a virtualized system; hardware control software can't be
+adequately virtualized without a full real-time operating system, which would
+require an RT aware virtualization platform.
+
+The second issue may cause performance problems, but this is unlikely to be a
+significant issue. In many cases these delays may be eliminated through
+configuration or paravirtualization.
+
+4.8) Covert channels and leaks
+
+In addition to the above problems, time information will inevitably leak to the
+guest about the host in anything but a perfect implementation of virtualized
+time. This may allow the guest to infer the presence of a hypervisor (as in a
+red-pill type detection), and it may allow information to leak between guests
+by using CPU utilization itself as a signalling channel. Preventing such
+problems would require completely isolated virtual time which may not track
+real time any longer. This may be useful in certain security or QA contexts,
+but in general isn't recommended for real-world deployment scenarios.
+
--
1.7.1

2010-07-13 02:26:28

by Zachary Amsden

[permalink] [raw]
Subject: [PATCH 17/18] Indicate reliable TSC in kvmclock

When no platform bugs have been detected, no TSC warps have been
detected, and the hardware guarantees to us TSC does not change
rate or stop with P-state or C-state changes, we can consider it reliable.

Signed-off-by: Zachary Amsden <[email protected]>
---
arch/x86/kvm/x86.c | 10 +++++++++-
1 files changed, 9 insertions(+), 1 deletions(-)

diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index e73ddf6..b3cc186 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -55,6 +55,7 @@
#include <asm/mce.h>
#include <asm/i387.h>
#include <asm/xcr.h>
+#include <asm/pvclock-abi.h>

#define MAX_IO_MSRS 256
#define CR0_RESERVED_BITS \
@@ -902,6 +903,13 @@ static inline int kvm_tsc_changes_freq(void)
return ret;
}

+static inline int kvm_tsc_reliable(void)
+{
+ return (boot_cpu_has(X86_FEATURE_CONSTANT_TSC) &&
+ boot_cpu_has(X86_FEATURE_NONSTOP_TSC) &&
+ !check_tsc_unstable());
+}
+
static inline u64 nsec_to_cycles(u64 nsec)
{
WARN_ON(preemptible());
@@ -1025,7 +1033,7 @@ static int kvm_write_guest_time(struct kvm_vcpu *v)
vcpu->hv_clock.tsc_timestamp = tsc_timestamp;
vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset;
vcpu->last_kernel_ns = kernel_ns;
- vcpu->hv_clock.flags = 0;
+ vcpu->hv_clock.flags = kvm_tsc_reliable() ? PVCLOCK_TSC_STABLE_BIT : 0;

/*
* The interface expects us to write an even number signaling that the
--
1.7.1

2010-07-13 02:26:59

by Zachary Amsden

[permalink] [raw]
Subject: [PATCH 16/18] Use getnsboottime in KVM

Signed-off-by: Zachary Amsden <[email protected]>
---
arch/x86/kvm/x86.c | 22 ++++++----------------
1 files changed, 6 insertions(+), 16 deletions(-)

diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index f9994c3..e73ddf6 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -891,16 +891,6 @@ static void kvm_set_time_scale(uint32_t tsc_khz, struct pvclock_vcpu_time_info *
hv_clock->tsc_to_system_mul);
}

-static inline u64 get_kernel_ns(void)
-{
- struct timespec ts;
-
- WARN_ON(preemptible());
- ktime_get_ts(&ts);
- monotonic_to_bootbased(&ts);
- return timespec_to_ns(&ts);
-}
-
static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz);

static inline int kvm_tsc_changes_freq(void)
@@ -930,7 +920,7 @@ void guest_write_tsc(struct kvm_vcpu *vcpu, u64 data)

spin_lock(&kvm->arch.tsc_write_lock);
offset = data - native_read_tsc();
- ns = get_kernel_ns();
+ ns = getnsboottime();
elapsed = ns - kvm->arch.last_tsc_nsec;
sdiff = data - kvm->arch.last_tsc_write;
if (sdiff < 0)
@@ -983,7 +973,7 @@ static int kvm_write_guest_time(struct kvm_vcpu *v)
/* Keep irq disabled to prevent changes to the clock */
local_irq_save(flags);
kvm_get_msr(v, MSR_IA32_TSC, &tsc_timestamp);
- kernel_ns = get_kernel_ns();
+ kernel_ns = getnsboottime();
this_tsc_khz = __get_cpu_var(cpu_tsc_khz);
local_irq_restore(flags);

@@ -1928,7 +1918,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
if (check_tsc_unstable()) {
/* Subtract elapsed cycle time */
u64 ns = !vcpu->arch.last_host_ns ? 0 :
- get_kernel_ns() - vcpu->arch.last_host_ns;
+ getnsboottime() - vcpu->arch.last_host_ns;
tsc_delta -= nsec_to_cycles(ns);
kvm_x86_ops->adjust_tsc_offset(vcpu, -tsc_delta);
}
@@ -1943,7 +1933,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
kvm_x86_ops->vcpu_put(vcpu);
kvm_put_guest_fpu(vcpu);
vcpu->arch.last_host_tsc = native_read_tsc();
- vcpu->arch.last_host_ns = get_kernel_ns();
+ vcpu->arch.last_host_ns = getnsboottime();
}

static int is_efer_nx(void)
@@ -3336,7 +3326,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
goto out;

r = 0;
- now_ns = get_kernel_ns();
+ now_ns = getnsboottime();
delta = user_ns.clock - now_ns;
kvm->arch.kvmclock_offset = delta;
break;
@@ -3345,7 +3335,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
struct kvm_clock_data user_ns;
u64 now_ns;

- now_ns = get_kernel_ns();
+ now_ns = getnsboottime();
user_ns.clock = kvm->arch.kvmclock_offset + now_ns;
user_ns.flags = 0;

--
1.7.1

2010-07-13 02:27:26

by Zachary Amsden

[permalink] [raw]
Subject: [PATCH 14/18] Fix a possible backwards warp of kvmclock

Kernel time, which advances in discrete steps may progress much slower
than TSC. As a result, when kvmclock is adjusted to a new base, the
apparent time to the guest, which runs at a much higher, nsec scaled
rate based on the current TSC, may have already been observed to have
a larger value (kernel_ns + scaled tsc) than the value to which we are
setting it (kernel_ns + 0).

We must instead compute the clock as potentially observed by the guest
for kernel_ns to make sure it does not go backwards.

Signed-off-by: Zachary Amsden <[email protected]>
---
arch/x86/include/asm/kvm_host.h | 2 +
arch/x86/kvm/x86.c | 43 +++++++++++++++++++++++++++++++++++++-
2 files changed, 43 insertions(+), 2 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index d9e0aa0..94dd16b 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -340,6 +340,8 @@ struct kvm_vcpu_arch {
struct page *time_page;
u64 last_host_tsc;
u64 last_host_ns;
+ u64 last_guest_tsc;
+ u64 last_kernel_ns;

bool nmi_pending;
bool nmi_injected;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 08bd15b..f9994c3 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -974,14 +974,15 @@ static int kvm_write_guest_time(struct kvm_vcpu *v)
struct kvm_vcpu_arch *vcpu = &v->arch;
void *shared_kaddr;
unsigned long this_tsc_khz;
- s64 kernel_ns;
+ s64 kernel_ns, max_kernel_ns;
+ u64 tsc_timestamp;

if ((!vcpu->time_page))
return 0;

/* Keep irq disabled to prevent changes to the clock */
local_irq_save(flags);
- kvm_get_msr(v, MSR_IA32_TSC, &vcpu->hv_clock.tsc_timestamp);
+ kvm_get_msr(v, MSR_IA32_TSC, &tsc_timestamp);
kernel_ns = get_kernel_ns();
this_tsc_khz = __get_cpu_var(cpu_tsc_khz);
local_irq_restore(flags);
@@ -991,13 +992,49 @@ static int kvm_write_guest_time(struct kvm_vcpu *v)
return 1;
}

+ /*
+ * Time as measured by the TSC may go backwards when resetting the base
+ * tsc_timestamp. The reason for this is that the TSC resolution is
+ * higher than the resolution of the other clock scales. Thus, many
+ * possible measurments of the TSC correspond to one measurement of any
+ * other clock, and so a spread of values is possible. This is not a
+ * problem for the computation of the nanosecond clock; with TSC rates
+ * around 1GHZ, there can only be a few cycles which correspond to one
+ * nanosecond value, and any path through this code will inevitably
+ * take longer than that. However, with the kernel_ns value itself,
+ * the precision may be much lower, down to HZ granularity. If the
+ * first sampling of TSC against kernel_ns ends in the low part of the
+ * range, and the second in the high end of the range, we can get:
+ *
+ * (TSC - offset_low) * S + kns_old > (TSC - offset_high) * S + kns_new
+ *
+ * As the sampling errors potentially range in the thousands of cycles,
+ * it is possible such a time value has already been observed by the
+ * guest. To protect against this, we must compute the system time as
+ * observed by the guest and ensure the new system time is greater.
+ */
+ max_kernel_ns = 0;
+ if (vcpu->hv_clock.tsc_timestamp && vcpu->last_guest_tsc) {
+ max_kernel_ns = vcpu->last_guest_tsc -
+ vcpu->hv_clock.tsc_timestamp;
+ max_kernel_ns = pvclock_scale_delta(max_kernel_ns,
+ vcpu->hv_clock.tsc_to_system_mul,
+ vcpu->hv_clock.tsc_shift);
+ max_kernel_ns += vcpu->last_kernel_ns;
+ }
+
if (unlikely(vcpu->hw_tsc_khz != this_tsc_khz)) {
kvm_set_time_scale(this_tsc_khz, &vcpu->hv_clock);
vcpu->hw_tsc_khz = this_tsc_khz;
}

+ if (max_kernel_ns > kernel_ns)
+ kernel_ns = max_kernel_ns;
+
/* With all the info we got, fill in the values */
+ vcpu->hv_clock.tsc_timestamp = tsc_timestamp;
vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset;
+ vcpu->last_kernel_ns = kernel_ns;
vcpu->hv_clock.flags = 0;

/*
@@ -4918,6 +4955,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
if (hw_breakpoint_active())
hw_breakpoint_restore();

+ kvm_get_msr(vcpu, MSR_IA32_TSC, &vcpu->arch.last_guest_tsc);
+
atomic_set(&vcpu->guest_mode, 0);
smp_wmb();
local_irq_enable();
--
1.7.1

2010-07-13 02:27:43

by Zachary Amsden

[permalink] [raw]
Subject: [PATCH 12/18] Add clock sync request to hardware enable

If there are active VCPUs which are marked as belonging to
a particular hardware CPU, request a clock sync for them when
enabling hardware; the TSC could be desynchronized on a newly
arriving CPU, and we need to recompute guests system time
relative to boot after a suspend event.

This covers both cases.

Note that it is acceptable to take the spinlock, as either
no other tasks will be running and no locks held (BSP after
resume), or other tasks will be guaranteed to drop the lock
relatively quickly (AP on CPU_STARTING).

Noting we now get clock synchronization requests for VCPUs
which are starting up (or restarting), it is tempting to
attempt to remove the arch/x86/kvm/x86.c CPU hot-notifiers
at this time, however it is not correct to do so; they are
required for systems with non-constant TSC as the frequency
may not be known immediately after the processor has started
until the cpufreq driver has had a chance to run and query
the chipset.

Updated: implement better locking semantics for hardware_enable

Removed the hack of dropping and retaking the lock by adding the
semantic that we always hold kvm_lock when hardware_enable is
called. The one place that doesn't need to worry about it is
resume, as resuming a frozen CPU, the spinlock won't be taken.

Signed-off-by: Zachary Amsden <[email protected]>
---
arch/x86/kvm/x86.c | 8 ++++++++
virt/kvm/kvm_main.c | 6 +++++-
2 files changed, 13 insertions(+), 1 deletions(-)

diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index a4d7cb9..08bd15b 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -5533,7 +5533,15 @@ int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu)

int kvm_arch_hardware_enable(void *garbage)
{
+ struct kvm *kvm;
+ struct kvm_vcpu *vcpu;
+ int i;
+
kvm_shared_msr_cpu_online();
+ list_for_each_entry(kvm, &vm_list, vm_list)
+ kvm_for_each_vcpu(i, vcpu, kvm)
+ if (vcpu->cpu == smp_processor_id())
+ kvm_request_guest_time_update(vcpu);
return kvm_x86_ops->hardware_enable(garbage);
}

diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index d4853a5..c0f4e70 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1961,7 +1961,9 @@ static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
case CPU_STARTING:
printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
cpu);
+ spin_lock(&kvm_lock);
hardware_enable(NULL);
+ spin_unlock(&kvm_lock);
break;
}
return NOTIFY_OK;
@@ -2166,8 +2168,10 @@ static int kvm_suspend(struct sys_device *dev, pm_message_t state)

static int kvm_resume(struct sys_device *dev)
{
- if (kvm_usage_count)
+ if (kvm_usage_count) {
+ WARN_ON(spin_is_locked(&kvm_lock));
hardware_enable(NULL);
+ }
return 0;
}

--
1.7.1

2010-07-13 02:27:59

by Zachary Amsden

[permalink] [raw]
Subject: [PATCH 11/18] Perform hardware_enable in CPU_STARTING callback

The CPU_STARTING callback was added upstream with the intention
of being used for KVM, specifically for the hardware enablement
that must be done before we can run in hardware virt. It had
bugs on the x86_64 architecture at the time, where it was called
after CPU_ONLINE. The arches have since merged and the bug is
gone.

It might be noted other features should probably start making
use of this callback; microcode updates in particular which
might be fixing important erratums would be best applied before
beginning to run user tasks.

Signed-off-by: Zachary Amsden <[email protected]>
---
virt/kvm/kvm_main.c | 5 ++---
1 files changed, 2 insertions(+), 3 deletions(-)

diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index b78b794..d4853a5 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1958,10 +1958,10 @@ static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
cpu);
hardware_disable(NULL);
break;
- case CPU_ONLINE:
+ case CPU_STARTING:
printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
cpu);
- smp_call_function_single(cpu, hardware_enable, NULL, 1);
+ hardware_enable(NULL);
break;
}
return NOTIFY_OK;
@@ -2096,7 +2096,6 @@ int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,

static struct notifier_block kvm_cpu_notifier = {
.notifier_call = kvm_cpu_hotplug,
- .priority = 20, /* must be > scheduler priority */
};

static int vm_stat_get(void *_offset, u64 *val)
--
1.7.1

2010-07-13 02:28:01

by Zachary Amsden

[permalink] [raw]
Subject: [PATCH 09/18] Robust TSC compensation

Make the match of TSC find TSC writes that are close to each other
instead of perfectly identical; this allows the compensator to also
work in migration / suspend scenarios.

Signed-off-by: Zachary Amsden <[email protected]>
---
arch/x86/kvm/x86.c | 14 ++++++++++----
1 files changed, 10 insertions(+), 4 deletions(-)

diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 79c4608..51d3f3e 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -926,21 +926,27 @@ void guest_write_tsc(struct kvm_vcpu *vcpu, u64 data)
struct kvm *kvm = vcpu->kvm;
u64 offset, ns, elapsed;
struct timespec ts;
+ s64 sdiff;

spin_lock(&kvm->arch.tsc_write_lock);
offset = data - native_read_tsc();
ns = get_kernel_ns();
elapsed = ns - kvm->arch.last_tsc_nsec;
+ sdiff = data - kvm->arch.last_tsc_write;
+ if (sdiff < 0)
+ sdiff = -sdiff;

/*
- * Special case: identical write to TSC within 5 seconds of
+ * Special case: close write to TSC within 5 seconds of
* another CPU is interpreted as an attempt to synchronize
- * (the 5 seconds is to accomodate host load / swapping).
+ * The 5 seconds is to accomodate host load / swapping as
+ * well as any reset of TSC during the boot process.
*
* In that case, for a reliable TSC, we can match TSC offsets,
- * or make a best guest using kernel_ns value.
+ * or make a best guest using elapsed value.
*/
- if (data == kvm->arch.last_tsc_write && elapsed < 5ULL * NSEC_PER_SEC) {
+ if (sdiff < nsec_to_cycles(5ULL * NSEC_PER_SEC) &&
+ elapsed < 5ULL * NSEC_PER_SEC) {
if (!check_tsc_unstable()) {
offset = kvm->arch.last_tsc_offset;
pr_debug("kvm: matched tsc offset for %llu\n", data);
--
1.7.1

2010-07-13 02:28:43

by Zachary Amsden

[permalink] [raw]
Subject: [PATCH 08/18] Add helper functions for time computation

Add a helper function to compute the kernel time and convert nanoseconds
back to CPU specific cycles. Note that these must not be called in preemptible
context, as that would mean the kernel could enter software suspend state,
which would cause non-atomic operation.

Also, convert the KVM_SET_CLOCK / KVM_GET_CLOCK ioctls to use the kernel
time helper, these should be bootbased as well.

Signed-off-by: Zachary Amsden <[email protected]>
---
arch/x86/kvm/x86.c | 47 ++++++++++++++++++++++++++++-------------------
1 files changed, 28 insertions(+), 19 deletions(-)

diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 666aef3..79c4608 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -891,6 +891,16 @@ static void kvm_set_time_scale(uint32_t tsc_khz, struct pvclock_vcpu_time_info *
hv_clock->tsc_to_system_mul);
}

+static inline u64 get_kernel_ns(void)
+{
+ struct timespec ts;
+
+ WARN_ON(preemptible());
+ ktime_get_ts(&ts);
+ monotonic_to_bootbased(&ts);
+ return timespec_to_ns(&ts);
+}
+
static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz);

static inline int kvm_tsc_changes_freq(void)
@@ -902,6 +912,15 @@ static inline int kvm_tsc_changes_freq(void)
return ret;
}

+static inline u64 nsec_to_cycles(u64 nsec)
+{
+ WARN_ON(preemptible());
+ if (kvm_tsc_changes_freq())
+ printk_once(KERN_WARNING
+ "kvm: unreliable cycle conversion on adjustable rate TSC\n");
+ return (nsec * __get_cpu_var(cpu_tsc_khz)) / USEC_PER_SEC;
+}
+
void guest_write_tsc(struct kvm_vcpu *vcpu, u64 data)
{
struct kvm *kvm = vcpu->kvm;
@@ -910,9 +929,7 @@ void guest_write_tsc(struct kvm_vcpu *vcpu, u64 data)

spin_lock(&kvm->arch.tsc_write_lock);
offset = data - native_read_tsc();
- ktime_get_ts(&ts);
- monotonic_to_bootbased(&ts);
- ns = timespec_to_ns(&ts);
+ ns = get_kernel_ns();
elapsed = ns - kvm->arch.last_tsc_nsec;

/*
@@ -928,10 +945,9 @@ void guest_write_tsc(struct kvm_vcpu *vcpu, u64 data)
offset = kvm->arch.last_tsc_offset;
pr_debug("kvm: matched tsc offset for %llu\n", data);
} else {
- u64 tsc_delta = elapsed * __get_cpu_var(cpu_tsc_khz);
- tsc_delta = tsc_delta / USEC_PER_SEC;
- offset += tsc_delta;
- pr_debug("kvm: adjusted tsc offset by %llu\n", tsc_delta);
+ u64 delta = nsec_to_cycles(elapsed);
+ offset += delta;
+ pr_debug("kvm: adjusted tsc offset by %llu\n", delta);
}
ns = kvm->arch.last_tsc_nsec;
}
@@ -948,11 +964,11 @@ EXPORT_SYMBOL_GPL(guest_write_tsc);

static int kvm_write_guest_time(struct kvm_vcpu *v)
{
- struct timespec ts;
unsigned long flags;
struct kvm_vcpu_arch *vcpu = &v->arch;
void *shared_kaddr;
unsigned long this_tsc_khz;
+ s64 kernel_ns;

if ((!vcpu->time_page))
return 0;
@@ -960,8 +976,7 @@ static int kvm_write_guest_time(struct kvm_vcpu *v)
/* Keep irq disabled to prevent changes to the clock */
local_irq_save(flags);
kvm_get_msr(v, MSR_IA32_TSC, &vcpu->hv_clock.tsc_timestamp);
- ktime_get_ts(&ts);
- monotonic_to_bootbased(&ts);
+ kernel_ns = get_kernel_ns();
this_tsc_khz = __get_cpu_var(cpu_tsc_khz);
local_irq_restore(flags);

@@ -976,9 +991,7 @@ static int kvm_write_guest_time(struct kvm_vcpu *v)
}

/* With all the info we got, fill in the values */
- vcpu->hv_clock.system_time = ts.tv_nsec +
- (NSEC_PER_SEC * (u64)ts.tv_sec) + v->kvm->arch.kvmclock_offset;
-
+ vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset;
vcpu->hv_clock.flags = 0;

/*
@@ -3261,7 +3274,6 @@ long kvm_arch_vm_ioctl(struct file *filp,
break;
}
case KVM_SET_CLOCK: {
- struct timespec now;
struct kvm_clock_data user_ns;
u64 now_ns;
s64 delta;
@@ -3275,19 +3287,16 @@ long kvm_arch_vm_ioctl(struct file *filp,
goto out;

r = 0;
- ktime_get_ts(&now);
- now_ns = timespec_to_ns(&now);
+ now_ns = get_kernel_ns();
delta = user_ns.clock - now_ns;
kvm->arch.kvmclock_offset = delta;
break;
}
case KVM_GET_CLOCK: {
- struct timespec now;
struct kvm_clock_data user_ns;
u64 now_ns;

- ktime_get_ts(&now);
- now_ns = timespec_to_ns(&now);
+ now_ns = get_kernel_ns();
user_ns.clock = kvm->arch.kvmclock_offset + now_ns;
user_ns.flags = 0;

--
1.7.1

2010-07-13 02:29:00

by Zachary Amsden

[permalink] [raw]
Subject: [PATCH 06/18] Unify TSC logic

Move the TSC control logic from the vendor backends into x86.c
by adding adjust_tsc_offset to x86 ops. Now all TSC decisions
can be done in one place.

Also, rename some variable in the VCPU structure to more accurately
reflect their actual content.

Signed-off-by: Zachary Amsden <[email protected]>
---
arch/x86/include/asm/kvm_host.h | 5 +++--
arch/x86/kvm/svm.c | 26 ++++++++++----------------
arch/x86/kvm/vmx.c | 22 ++++++++--------------
arch/x86/kvm/x86.c | 18 +++++++++++++++---
4 files changed, 36 insertions(+), 35 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 4b42893..324e892 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -255,7 +255,6 @@ struct kvm_mmu {
};

struct kvm_vcpu_arch {
- u64 host_tsc;
/*
* rip and regs accesses must go through
* kvm_{register,rip}_{read,write} functions.
@@ -336,9 +335,10 @@ struct kvm_vcpu_arch {

gpa_t time;
struct pvclock_vcpu_time_info hv_clock;
- unsigned int hv_clock_tsc_khz;
+ unsigned int hw_tsc_khz;
unsigned int time_offset;
struct page *time_page;
+ u64 last_host_tsc;

bool nmi_pending;
bool nmi_injected;
@@ -520,6 +520,7 @@ struct kvm_x86_ops {
u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio);
int (*get_lpage_level)(void);
bool (*rdtscp_supported)(void);
+ void (*adjust_tsc_offset)(struct kvm_vcpu *vcpu, s64 adjustment);

void (*set_supported_cpuid)(u32 func, struct kvm_cpuid_entry2 *entry);

diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 7e7889d..b52222c 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -948,20 +948,6 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
int i;

if (unlikely(cpu != vcpu->cpu)) {
- u64 delta;
-
- if (check_tsc_unstable()) {
- /*
- * Make sure that the guest sees a monotonically
- * increasing TSC.
- */
- delta = vcpu->arch.host_tsc - native_read_tsc();
- svm->vmcb->control.tsc_offset += delta;
- if (is_nested(svm))
- svm->nested.hsave->control.tsc_offset += delta;
- }
- vcpu->cpu = cpu;
- kvm_migrate_timers(vcpu);
svm->asid_generation = 0;
}

@@ -977,8 +963,6 @@ static void svm_vcpu_put(struct kvm_vcpu *vcpu)
++vcpu->stat.host_state_reload;
for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
-
- vcpu->arch.host_tsc = native_read_tsc();
}

static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu)
@@ -3432,6 +3416,15 @@ static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
svm->vmcb->control.tsc_offset = offset + g_tsc_offset;
}

+static void svm_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment)
+{
+ struct vcpu_svm *svm = to_svm(vcpu);
+
+ svm->vmcb->control.tsc_offset += adjustment;
+ if (is_nested(svm))
+ svm->nested.hsave->control.tsc_offset += adjustment;
+}
+
static void svm_fpu_deactivate(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
@@ -3520,6 +3513,7 @@ static struct kvm_x86_ops svm_x86_ops = {
.has_wbinvd_exit = svm_has_wbinvd_exit,

.write_tsc_offset = svm_write_tsc_offset,
+ .adjust_tsc_offset = svm_adjust_tsc_offset,
};

static int __init svm_init(void)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 9055ca6..d8e23ab 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -504,7 +504,6 @@ static void __vcpu_clear(void *arg)
vmcs_clear(vmx->vmcs);
if (per_cpu(current_vmcs, cpu) == vmx->vmcs)
per_cpu(current_vmcs, cpu) = NULL;
- rdtscll(vmx->vcpu.arch.host_tsc);
list_del(&vmx->local_vcpus_link);
vmx->vcpu.cpu = -1;
vmx->launched = 0;
@@ -887,7 +886,6 @@ static void vmx_load_host_state(struct vcpu_vmx *vmx)
static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
- u64 tsc_this, delta, new_offset;
u64 phys_addr = __pa(per_cpu(vmxarea, cpu));

if (!vmm_exclusive)
@@ -904,14 +902,12 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
struct desc_ptr dt;
unsigned long sysenter_esp;

- kvm_migrate_timers(vcpu);
kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
local_irq_disable();
list_add(&vmx->local_vcpus_link,
&per_cpu(vcpus_on_cpu, cpu));
local_irq_enable();

- vcpu->cpu = cpu;
/*
* Linux uses per-cpu TSS and GDT, so set these when switching
* processors.
@@ -922,16 +918,6 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)

rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
-
- /*
- * Make sure the time stamp counter is monotonous.
- */
- rdtscll(tsc_this);
- if (tsc_this < vcpu->arch.host_tsc) {
- delta = vcpu->arch.host_tsc - tsc_this;
- new_offset = vmcs_read64(TSC_OFFSET) + delta;
- vmcs_write64(TSC_OFFSET, new_offset);
- }
}
}

@@ -1160,6 +1146,12 @@ static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
vmcs_write64(TSC_OFFSET, offset);
}

+static void vmx_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment)
+{
+ u64 offset = vmcs_read64(TSC_OFFSET);
+ vmcs_write64(TSC_OFFSET, offset + adjustment);
+}
+
/*
* Reads an msr value (of 'msr_index') into 'pdata'.
* Returns 0 on success, non-0 otherwise.
@@ -4113,6 +4105,7 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)

cpu = get_cpu();
vmx_vcpu_load(&vmx->vcpu, cpu);
+ vmx->vcpu.cpu = cpu;
err = vmx_vcpu_setup(vmx);
vmx_vcpu_put(&vmx->vcpu);
put_cpu();
@@ -4352,6 +4345,7 @@ static struct kvm_x86_ops vmx_x86_ops = {
.has_wbinvd_exit = cpu_has_vmx_wbinvd_exit,

.write_tsc_offset = vmx_write_tsc_offset,
+ .adjust_tsc_offset = vmx_adjust_tsc_offset,
};

static int __init vmx_init(void)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 36ef649..e718892 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -970,9 +970,9 @@ static int kvm_write_guest_time(struct kvm_vcpu *v)
return 1;
}

- if (unlikely(vcpu->hv_clock_tsc_khz != this_tsc_khz)) {
+ if (unlikely(vcpu->hw_tsc_khz != this_tsc_khz)) {
kvm_set_time_scale(this_tsc_khz, &vcpu->hv_clock);
- vcpu->hv_clock_tsc_khz = this_tsc_khz;
+ vcpu->hw_tsc_khz = this_tsc_khz;
}

/* With all the info we got, fill in the values */
@@ -1863,13 +1863,25 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
}

kvm_x86_ops->vcpu_load(vcpu, cpu);
- kvm_request_guest_time_update(vcpu);
+ if (unlikely(vcpu->cpu != cpu)) {
+ /* Make sure TSC doesn't go backwards */
+ s64 tsc_delta = !vcpu->arch.last_host_tsc ? 0 :
+ native_read_tsc() - vcpu->arch.last_host_tsc;
+ if (tsc_delta < 0)
+ mark_tsc_unstable("KVM discovered backwards TSC");
+ if (check_tsc_unstable())
+ kvm_x86_ops->adjust_tsc_offset(vcpu, -tsc_delta);
+ kvm_migrate_timers(vcpu);
+ kvm_request_guest_time_update(vcpu);
+ vcpu->cpu = cpu;
+ }
}

void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
{
kvm_x86_ops->vcpu_put(vcpu);
kvm_put_guest_fpu(vcpu);
+ vcpu->arch.last_host_tsc = native_read_tsc();
}

static int is_efer_nx(void)
--
1.7.1

2010-07-13 02:29:39

by Zachary Amsden

[permalink] [raw]
Subject: [PATCH 04/18] Make cpu_tsc_khz updates use local CPU

This simplifies much of the init code; we can now simply always
call tsc_khz_changed, optionally passing it a new value, or letting
it figure out the existing value (while interrupts are disabled, and
thus, by inference from the rule, not raceful against CPU hotplug or
frequency updates, which will issue IPIs to the local CPU to perform
this very same task).

Signed-off-by: Zachary Amsden <[email protected]>
---
arch/x86/kvm/x86.c | 157 +++++++++++++++++++++++++++++++++++++--------------
1 files changed, 114 insertions(+), 43 deletions(-)

diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index bb7451b..2d5b97a 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -893,6 +893,15 @@ static void kvm_set_time_scale(uint32_t tsc_khz, struct pvclock_vcpu_time_info *

static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz);

+static inline int kvm_tsc_changes_freq(void)
+{
+ int cpu = get_cpu();
+ int ret = !boot_cpu_has(X86_FEATURE_CONSTANT_TSC) &&
+ cpufreq_quick_get(cpu) != 0;
+ put_cpu();
+ return ret;
+}
+
void guest_write_tsc(struct kvm_vcpu *vcpu, u64 data)
{
struct kvm *kvm = vcpu->kvm;
@@ -937,7 +946,7 @@ void guest_write_tsc(struct kvm_vcpu *vcpu, u64 data)
}
EXPORT_SYMBOL_GPL(guest_write_tsc);

-static void kvm_write_guest_time(struct kvm_vcpu *v)
+static int kvm_write_guest_time(struct kvm_vcpu *v)
{
struct timespec ts;
unsigned long flags;
@@ -946,24 +955,27 @@ static void kvm_write_guest_time(struct kvm_vcpu *v)
unsigned long this_tsc_khz;

if ((!vcpu->time_page))
- return;
-
- this_tsc_khz = get_cpu_var(cpu_tsc_khz);
- if (unlikely(vcpu->hv_clock_tsc_khz != this_tsc_khz)) {
- kvm_set_time_scale(this_tsc_khz, &vcpu->hv_clock);
- vcpu->hv_clock_tsc_khz = this_tsc_khz;
- }
- put_cpu_var(cpu_tsc_khz);
+ return 0;

/* Keep irq disabled to prevent changes to the clock */
local_irq_save(flags);
kvm_get_msr(v, MSR_IA32_TSC, &vcpu->hv_clock.tsc_timestamp);
ktime_get_ts(&ts);
monotonic_to_bootbased(&ts);
+ this_tsc_khz = __get_cpu_var(cpu_tsc_khz);
local_irq_restore(flags);

- /* With all the info we got, fill in the values */
+ if (unlikely(this_tsc_khz == 0)) {
+ kvm_make_request(KVM_REQ_KVMCLOCK_UPDATE, v);
+ return 1;
+ }

+ if (unlikely(vcpu->hv_clock_tsc_khz != this_tsc_khz)) {
+ kvm_set_time_scale(this_tsc_khz, &vcpu->hv_clock);
+ vcpu->hv_clock_tsc_khz = this_tsc_khz;
+ }
+
+ /* With all the info we got, fill in the values */
vcpu->hv_clock.system_time = ts.tv_nsec +
(NSEC_PER_SEC * (u64)ts.tv_sec) + v->kvm->arch.kvmclock_offset;

@@ -984,6 +996,7 @@ static void kvm_write_guest_time(struct kvm_vcpu *v)
kunmap_atomic(shared_kaddr, KM_USER0);

mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT);
+ return 0;
}

static int kvm_request_guest_time_update(struct kvm_vcpu *v)
@@ -1850,12 +1863,6 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
}

kvm_x86_ops->vcpu_load(vcpu, cpu);
- if (unlikely(per_cpu(cpu_tsc_khz, cpu) == 0)) {
- unsigned long khz = cpufreq_quick_get(cpu);
- if (!khz)
- khz = tsc_khz;
- per_cpu(cpu_tsc_khz, cpu) = khz;
- }
kvm_request_guest_time_update(vcpu);
}

@@ -4131,9 +4138,23 @@ int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port)
}
EXPORT_SYMBOL_GPL(kvm_fast_pio_out);

-static void bounce_off(void *info)
+static void tsc_bad(void *info)
+{
+ __get_cpu_var(cpu_tsc_khz) = 0;
+}
+
+static void tsc_khz_changed(void *data)
{
- /* nothing */
+ struct cpufreq_freqs *freq = data;
+ unsigned long khz = 0;
+
+ if (data)
+ khz = freq->new;
+ else if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
+ khz = cpufreq_quick_get(raw_smp_processor_id());
+ if (!khz)
+ khz = tsc_khz;
+ __get_cpu_var(cpu_tsc_khz) = khz;
}

static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
@@ -4144,11 +4165,51 @@ static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long va
struct kvm_vcpu *vcpu;
int i, send_ipi = 0;

+ /*
+ * We allow guests to temporarily run on slowing clocks,
+ * provided we notify them after, or to run on accelerating
+ * clocks, provided we notify them before. Thus time never
+ * goes backwards.
+ *
+ * However, we have a problem. We can't atomically update
+ * the frequency of a given CPU from this function; it is
+ * merely a notifier, which can be called from any CPU.
+ * Changing the TSC frequency at arbitrary points in time
+ * requires a recomputation of local variables related to
+ * the TSC for each VCPU. We must flag these local variables
+ * to be updated and be sure the update takes place with the
+ * new frequency before any guests proceed.
+ *
+ * Unfortunately, the combination of hotplug CPU and frequency
+ * change creates an intractable locking scenario; the order
+ * of when these callouts happen is undefined with respect to
+ * CPU hotplug, and they can race with each other. As such,
+ * merely setting per_cpu(cpu_tsc_khz) = X during a hotadd is
+ * undefined; you can actually have a CPU frequency change take
+ * place in between the computation of X and the setting of the
+ * variable. To protect against this problem, all updates of
+ * the per_cpu tsc_khz variable are done in an interrupt
+ * protected IPI, and all callers wishing to update the value
+ * must wait for a synchronous IPI to complete (which is trivial
+ * if the caller is on the CPU already). This establishes the
+ * necessary total order on variable updates.
+ *
+ * Note that because a guest time update may take place
+ * anytime after the setting of the VCPU's request bit, the
+ * correct TSC value must be set before the request. However,
+ * to ensure the update actually makes it to any guest which
+ * starts running in hardware virtualization between the set
+ * and the acquisition of the spinlock, we must also ping the
+ * CPU after setting the request bit.
+ *
+ */
+
if (val == CPUFREQ_PRECHANGE && freq->old > freq->new)
return 0;
if (val == CPUFREQ_POSTCHANGE && freq->old < freq->new)
return 0;
- per_cpu(cpu_tsc_khz, freq->cpu) = freq->new;
+
+ smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1);

spin_lock(&kvm_lock);
list_for_each_entry(kvm, &vm_list, vm_list) {
@@ -4158,7 +4219,7 @@ static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long va
if (!kvm_request_guest_time_update(vcpu))
continue;
if (vcpu->cpu != smp_processor_id())
- send_ipi++;
+ send_ipi = 1;
}
}
spin_unlock(&kvm_lock);
@@ -4176,32 +4237,48 @@ static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long va
* guest context is entered kvmclock will be updated,
* so the guest will not see stale values.
*/
- smp_call_function_single(freq->cpu, bounce_off, NULL, 1);
+ smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1);
}
return 0;
}

static struct notifier_block kvmclock_cpufreq_notifier_block = {
- .notifier_call = kvmclock_cpufreq_notifier
+ .notifier_call = kvmclock_cpufreq_notifier
+};
+
+static int kvmclock_cpu_notifier(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ unsigned int cpu = (unsigned long)hcpu;
+
+ switch (action) {
+ case CPU_ONLINE:
+ case CPU_DOWN_FAILED:
+ smp_call_function_single(cpu, tsc_khz_changed, NULL, 1);
+ break;
+ case CPU_DOWN_PREPARE:
+ smp_call_function_single(cpu, tsc_bad, NULL, 1);
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block kvmclock_cpu_notifier_block = {
+ .notifier_call = kvmclock_cpu_notifier,
+ .priority = -INT_MAX
};

static void kvm_timer_init(void)
{
int cpu;

+ register_hotcpu_notifier(&kvmclock_cpu_notifier_block);
if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block,
CPUFREQ_TRANSITION_NOTIFIER);
- for_each_online_cpu(cpu) {
- unsigned long khz = cpufreq_get(cpu);
- if (!khz)
- khz = tsc_khz;
- per_cpu(cpu_tsc_khz, cpu) = khz;
- }
- } else {
- for_each_possible_cpu(cpu)
- per_cpu(cpu_tsc_khz, cpu) = tsc_khz;
}
+ for_each_online_cpu(cpu)
+ smp_call_function_single(cpu, tsc_khz_changed, NULL, 1);
}

static DEFINE_PER_CPU(struct kvm_vcpu *, current_vcpu);
@@ -4303,6 +4380,7 @@ void kvm_arch_exit(void)
if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block,
CPUFREQ_TRANSITION_NOTIFIER);
+ unregister_hotcpu_notifier(&kvmclock_cpu_notifier_block);
kvm_x86_ops = NULL;
kvm_mmu_module_exit();
}
@@ -4718,8 +4796,11 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
kvm_mmu_unload(vcpu);
if (kvm_check_request(KVM_REQ_MIGRATE_TIMER, vcpu))
__kvm_migrate_timers(vcpu);
- if (kvm_check_request(KVM_REQ_KVMCLOCK_UPDATE, vcpu))
- kvm_write_guest_time(vcpu);
+ if (kvm_check_request(KVM_REQ_KVMCLOCK_UPDATE, vcpu)) {
+ r = kvm_write_guest_time(vcpu);
+ if (unlikely(r))
+ goto out;
+ }
if (kvm_check_request(KVM_REQ_MMU_SYNC, vcpu))
kvm_mmu_sync_roots(vcpu);
if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
@@ -5415,17 +5496,7 @@ int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu)

int kvm_arch_hardware_enable(void *garbage)
{
- /*
- * Since this may be called from a hotplug notifcation,
- * we can't get the CPU frequency directly.
- */
- if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
- int cpu = raw_smp_processor_id();
- per_cpu(cpu_tsc_khz, cpu) = 0;
- }
-
kvm_shared_msr_cpu_online();
-
return kvm_x86_ops->hardware_enable(garbage);
}

--
1.7.1

2010-07-13 02:29:57

by Zachary Amsden

[permalink] [raw]
Subject: [PATCH 02/18] Fix SVM VMCB reset

On reset, VMCB TSC should be set to zero. Instead, code was setting
tsc_offset to zero, which passes through the underlying TSC.

Signed-off-by: Zachary Amsden <[email protected]>
---
arch/x86/kvm/svm.c | 2 +-
1 files changed, 1 insertions(+), 1 deletions(-)

diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 6671053..7e7889d 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -767,7 +767,7 @@ static void init_vmcb(struct vcpu_svm *svm)

control->iopm_base_pa = iopm_base;
control->msrpm_base_pa = __pa(svm->msrpm);
- control->tsc_offset = 0;
+ guest_write_tsc(&svm->vcpu, 0);
control->int_ctl = V_INTR_MASKING_MASK;

init_seg(&save->es);
--
1.7.1

2010-07-13 20:53:20

by Marcelo Tosatti

[permalink] [raw]
Subject: Re: [PATCH 09/18] Robust TSC compensation

On Mon, Jul 12, 2010 at 04:25:29PM -1000, Zachary Amsden wrote:
> Make the match of TSC find TSC writes that are close to each other
> instead of perfectly identical; this allows the compensator to also
> work in migration / suspend scenarios.
>
> Signed-off-by: Zachary Amsden <[email protected]>
> ---
> arch/x86/kvm/x86.c | 14 ++++++++++----
> 1 files changed, 10 insertions(+), 4 deletions(-)
>
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index 79c4608..51d3f3e 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -926,21 +926,27 @@ void guest_write_tsc(struct kvm_vcpu *vcpu, u64 data)
> struct kvm *kvm = vcpu->kvm;
> u64 offset, ns, elapsed;
> struct timespec ts;
> + s64 sdiff;
>
> spin_lock(&kvm->arch.tsc_write_lock);
> offset = data - native_read_tsc();
> ns = get_kernel_ns();
> elapsed = ns - kvm->arch.last_tsc_nsec;
> + sdiff = data - kvm->arch.last_tsc_write;
> + if (sdiff < 0)
> + sdiff = -sdiff;
>
> /*
> - * Special case: identical write to TSC within 5 seconds of
> + * Special case: close write to TSC within 5 seconds of
> * another CPU is interpreted as an attempt to synchronize
> - * (the 5 seconds is to accomodate host load / swapping).
> + * The 5 seconds is to accomodate host load / swapping as
> + * well as any reset of TSC during the boot process.
> *
> * In that case, for a reliable TSC, we can match TSC offsets,
> - * or make a best guest using kernel_ns value.
> + * or make a best guest using elapsed value.
> */
> - if (data == kvm->arch.last_tsc_write && elapsed < 5ULL * NSEC_PER_SEC) {
> + if (sdiff < nsec_to_cycles(5ULL * NSEC_PER_SEC) &&
> + elapsed < 5ULL * NSEC_PER_SEC) {
> if (!check_tsc_unstable()) {
> offset = kvm->arch.last_tsc_offset;
> pr_debug("kvm: matched tsc offset for %llu\n", data);

What prevents a vcpu from seeing its TSC go backwards, in case the first
write in the 5 second window is smaller than the victim vcpu's last
visible TSC value ?

2010-07-13 21:15:44

by Zachary Amsden

[permalink] [raw]
Subject: Re: [PATCH 09/18] Robust TSC compensation

On 07/13/2010 10:34 AM, Marcelo Tosatti wrote:
> On Mon, Jul 12, 2010 at 04:25:29PM -1000, Zachary Amsden wrote:
>
>> Make the match of TSC find TSC writes that are close to each other
>> instead of perfectly identical; this allows the compensator to also
>> work in migration / suspend scenarios.
>>
>> Signed-off-by: Zachary Amsden<[email protected]>
>> ---
>> arch/x86/kvm/x86.c | 14 ++++++++++----
>> 1 files changed, 10 insertions(+), 4 deletions(-)
>>
>> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
>> index 79c4608..51d3f3e 100644
>> --- a/arch/x86/kvm/x86.c
>> +++ b/arch/x86/kvm/x86.c
>> @@ -926,21 +926,27 @@ void guest_write_tsc(struct kvm_vcpu *vcpu, u64 data)
>> struct kvm *kvm = vcpu->kvm;
>> u64 offset, ns, elapsed;
>> struct timespec ts;
>> + s64 sdiff;
>>
>> spin_lock(&kvm->arch.tsc_write_lock);
>> offset = data - native_read_tsc();
>> ns = get_kernel_ns();
>> elapsed = ns - kvm->arch.last_tsc_nsec;
>> + sdiff = data - kvm->arch.last_tsc_write;
>> + if (sdiff< 0)
>> + sdiff = -sdiff;
>>
>> /*
>> - * Special case: identical write to TSC within 5 seconds of
>> + * Special case: close write to TSC within 5 seconds of
>> * another CPU is interpreted as an attempt to synchronize
>> - * (the 5 seconds is to accomodate host load / swapping).
>> + * The 5 seconds is to accomodate host load / swapping as
>> + * well as any reset of TSC during the boot process.
>> *
>> * In that case, for a reliable TSC, we can match TSC offsets,
>> - * or make a best guest using kernel_ns value.
>> + * or make a best guest using elapsed value.
>> */
>> - if (data == kvm->arch.last_tsc_write&& elapsed< 5ULL * NSEC_PER_SEC) {
>> + if (sdiff< nsec_to_cycles(5ULL * NSEC_PER_SEC)&&
>> + elapsed< 5ULL * NSEC_PER_SEC) {
>> if (!check_tsc_unstable()) {
>> offset = kvm->arch.last_tsc_offset;
>> pr_debug("kvm: matched tsc offset for %llu\n", data);
>>
> What prevents a vcpu from seeing its TSC go backwards, in case the first
> write in the 5 second window is smaller than the victim vcpu's last
> visible TSC value ?
>

Nothing, unfortunately. However, the TSC would already have to be out
of sync in order for the problem to occur. It can never happen in
normal circumstances on a stable hardware TSC except in one case;
migration. During the CPU state transfer phase of migration, however,
all the VCPUs should already be stopped, so the maximum TSC that can be
observed by any CPU is bounded.

The problem, of course is that the TSC write will latch the first TSC to
be written, which, if you stop in order, and start in order, will be the
lowest TSC; so later VCPUs can observe a negative TSC drift (timing is
additionally complicated by the VCPU teardown / create time).

There are a couple solutions; some of which are ugly and some of which
are very ugly.

1) Make some global state available about whether the VM is running or
not, use the global TSC lock and record the last TSC when the VM is
stopped. Return this TSC from any reads when the VM is stopped. I'm
not sure the kernel model is equipped to do this properly; in theory,
userspace could stop and start running VCPUs using the ioctls whenever
it feels like and requires no protocol to do so.

2) Make userspace deal with it; when starting up a VM, read the VCPU
state for all VCPUs in first, then take the maximum TSC and set all TSCs
to this value before starting the VCPUs. I'm not sure the userspace
model is equipped to do this properly, it could start running earlier
CPUs before reading later CPU states...

3) Drop passthrough TSC altogether and switch to trap / emulate TSC.

4) Pray to a deity of your choice.

Of course, none of these solutions work for a guest which deliberately
runs with desynchronized TSCs, but we needn't really be concerned with
that, no guest does it on purpose.

2010-07-13 21:33:36

by Rik van Riel

[permalink] [raw]
Subject: Re: [PATCH 01/18] Make TSC offset writes non-preemptible

On 07/12/2010 10:25 PM, Zachary Amsden wrote:
> Ensure that the storing of the offset and the reading of the TSC
> are never preempted by taking a spinlock. While the lock is overkill
> now, it is useful later in this patch series.
>
> Signed-off-by: Zachary Amsden<[email protected]>

Reviewed-by: Rik van Riel <[email protected]>

--
All rights reversed

2010-07-13 21:38:02

by Rik van Riel

[permalink] [raw]
Subject: Re: [PATCH 02/18] Fix SVM VMCB reset

On 07/12/2010 10:25 PM, Zachary Amsden wrote:
> On reset, VMCB TSC should be set to zero. Instead, code was setting
> tsc_offset to zero, which passes through the underlying TSC.
>
> Signed-off-by: Zachary Amsden<[email protected]>

Acked-by: Rik van Riel <[email protected]>


--
All rights reversed

2010-07-13 21:45:55

by Zachary Amsden

[permalink] [raw]
Subject: Re: [PATCH 09/18] Robust TSC compensation

On 07/13/2010 11:42 AM, David S. Ahern wrote:
>
> On 07/13/10 15:15, Zachary Amsden wrote:
>
>
>>> What prevents a vcpu from seeing its TSC go backwards, in case the first
>>> write in the 5 second window is smaller than the victim vcpu's last
>>> visible TSC value ?
>>>
>>>
>> Nothing, unfortunately. However, the TSC would already have to be out
>> of sync in order for the problem to occur. It can never happen in
>> normal circumstances on a stable hardware TSC except in one case;
>> migration. During the CPU state transfer phase of migration, however,
>>
> What about across processor sockets? Aren't CPUs brought up at different
> points such that their TSCs start at different times?
>

Yes, that's called an unsynchronized TSC. In that case, the
compensation does the best it can based on time since the first TSC
write, but it will never be exact.

2010-07-13 21:50:04

by David Ahern

[permalink] [raw]
Subject: Re: [PATCH 09/18] Robust TSC compensation



On 07/13/10 15:15, Zachary Amsden wrote:

>> What prevents a vcpu from seeing its TSC go backwards, in case the first
>> write in the 5 second window is smaller than the victim vcpu's last
>> visible TSC value ?
>>
>
> Nothing, unfortunately. However, the TSC would already have to be out
> of sync in order for the problem to occur. It can never happen in
> normal circumstances on a stable hardware TSC except in one case;
> migration. During the CPU state transfer phase of migration, however,

What about across processor sockets? Aren't CPUs brought up at different
points such that their TSCs start at different times?

David

2010-07-13 22:11:19

by Rik van Riel

[permalink] [raw]
Subject: Re: [PATCH 03/18] TSC reset compensation

On 07/12/2010 10:25 PM, Zachary Amsden wrote:
> Attempt to synchronize TSCs which are reset to the same value. In the
> case of a reliable hardware TSC, we can just re-use the same offset, but
> on non-reliable hardware, we can get closer by adjusting the offset to
> match the elapsed time.
>
> Signed-off-by: Zachary Amsden<[email protected]>

Acked-by: Rik van Riel <[email protected]>

--
All rights reversed

2010-07-13 23:32:51

by Zachary Amsden

[permalink] [raw]
Subject: Re: [PATCH 09/18] Robust TSC compensation

On 07/13/2010 11:42 AM, David S. Ahern wrote:
>
> On 07/13/10 15:15, Zachary Amsden wrote:
>
>
>>> What prevents a vcpu from seeing its TSC go backwards, in case the first
>>> write in the 5 second window is smaller than the victim vcpu's last
>>> visible TSC value ?
>>>
>>>
>> Nothing, unfortunately. However, the TSC would already have to be out
>> of sync in order for the problem to occur. It can never happen in
>> normal circumstances on a stable hardware TSC except in one case;
>> migration. During the CPU state transfer phase of migration, however,
>>
> What about across processor sockets? Aren't CPUs brought up at different
> points such that their TSCs start at different times?
>

It depends on the platform. But it doesn't matter. The definition we
use is different start TSCs == out of sync. Some systems have
synchronized TSCs, some do not.

See patch 18/18 - "Timekeeping documentation" for details.

2010-07-14 07:33:04

by Takuya Yoshikawa

[permalink] [raw]
Subject: Re: [PATCH 18/18] Add timekeeping documentation

Hi,

(2010/07/13 11:25), Zachary Amsden wrote:
> +
> +2.3) APIC
> +
> +On Pentium and later processors, an on-board timer is available to each CPU
> +as part of the Advanced Programmable Interrupt Controller. The APIC is
> +accessed through memory-mapped registers and provides interrupt service to each
> +CPU, used for IPIs and local timer interrupts.
> +
> +Although in theory the APIC is a safe and stable source for local interrupts,
> +in practice, many bugs and glitches have occurred due to the special nature of
> +the APIC CPU-local memory-mapped hardware. Beware that CPU errata may affect
> +the use of the APIC and that workarounds may be required. In addition, some of
> +these workarounds pose unique constraints for virtualization - requiring either
> +extra overhead incurred from extra reads of memory-mapped I/O or additional
> +functionality that may be more computationally expensive to implement.
> +
> +Since the APIC is documented quite well in the Intel and AMD manuals, we will
> +avoid repititon of the detail here. It should be pointed out that the APIC

repetition?

> +timer is programmed through the LVT (local vector timer) register, is capable
> +of one-shot or periodic operation, and is based on the bus clock divided down
> +by the programmable divider register.
> +
> +2.4) HPET
> +
> +HPET is quite complex, and was originally intended to replace the PIT / RTC
> +support of the X86 PC. It remains to be seen whether that will be the case, as
> +the de facto standard of PC hardware is to emulate these older devices. Some
> +systems designated as legacy free may support only the HPET as a hardware timer
> +device.
> +
> +The HPET spec is rather loose and vague, requiring at least 3 hardware timers,
> +but allowing implementation freedom to support many more. It also imposes no
> +fixed rate on the timer frequency, but does impose some extremal values on
> +frequency, error and slew.
> +
> +In general, the HPET is recommended as a high precision (compared to PIT /RTC)
> +time source which is independent of local variation (as there is only one HPET
> +in any given system). The HPET is also memory-mapped, and its presence is
> +indicated through ACPI tables by the BIOS.
> +
> +Detailed specification of the HPET is beyond the current scope of this
> +document, as it is also very well documented elsewhere.
> +


> +3.6) TSC and STPCLK / T-states
> +
> +External signals given to the processor may also have the affect of stopping

effect?

> +the TSC. This is typically done for thermal emergency power control to prevent
> +an overheating condition, and typically, there is no way to detect that this
> +condition has happened.
> +


> +4.4) Migration
> +
> +Migration of a virtual machine raises problems for timekeeping in two ways.
> +First, the migration itself may take time, during which interrupts cannot be
> +delivered, and after which, the guest time may need to be caught up. NTP may
> +be able to help to some degree here, as the clock correction required is
> +typically small enough to fall in the NTP-correctable window.
> +
> +An additional concern is that timers based off the TSC (or HPET, if the raw bus
> +clock is exposed) may now be running at different rates, requiring compensation
> +in some may in the hypervisor by virtualizing these timers. In addition,

way?

> +migrating to a faster machine may preclude the use of a passthrough TSC, as a
> +faster clock cannot be made visible to a guest without the potential of time
> +advancing faster than usual. A slower clock is less of a problem, as it can
> +always be caught up to the original rate. KVM clock avoids these problems by
> +simply storing multipliers and offsets gainst the TSC for the guest to convert

against?

> +back into nanosecond resolution values.
> +


Takuya

-- I'm not English speaker, so not so sure about some places.

2010-07-14 14:41:23

by Rik van Riel

[permalink] [raw]
Subject: Re: [PATCH 04/18] Make cpu_tsc_khz updates use local CPU

On 07/12/2010 10:25 PM, Zachary Amsden wrote:
> This simplifies much of the init code; we can now simply always
> call tsc_khz_changed, optionally passing it a new value, or letting
> it figure out the existing value (while interrupts are disabled, and
> thus, by inference from the rule, not raceful against CPU hotplug or
> frequency updates, which will issue IPIs to the local CPU to perform
> this very same task).
>
> Signed-off-by: Zachary Amsden<[email protected]>

Acked-by: Rik van Riel <[email protected]>

--
All rights reversed

2010-07-14 15:02:35

by Rik van Riel

[permalink] [raw]
Subject: Re: [PATCH 05/18] Warn about unstable TSC

On 07/12/2010 10:25 PM, Zachary Amsden wrote:
> If creating an SMP guest with unstable host TSC, issue a warning
>
> Signed-off-by: Zachary Amsden<[email protected]>

Acked-by: Rik van Riel <[email protected]>

--
All rights reversed

2010-07-14 15:53:55

by Rik van Riel

[permalink] [raw]
Subject: Re: [PATCH 06/18] Unify TSC logic

On 07/12/2010 10:25 PM, Zachary Amsden wrote:
> Move the TSC control logic from the vendor backends into x86.c
> by adding adjust_tsc_offset to x86 ops. Now all TSC decisions
> can be done in one place.
>
> Also, rename some variable in the VCPU structure to more accurately
> reflect their actual content.
>
> Signed-off-by: Zachary Amsden<[email protected]>

Acked-by: Rik van Riel <[email protected]>

--
All rights reversed

2010-07-14 16:14:56

by Rik van Riel

[permalink] [raw]
Subject: Re: [PATCH 07/18] Fix deep C-state TSC desynchronization

On 07/12/2010 10:25 PM, Zachary Amsden wrote:
> When CPUs with unstable TSCs enter deep C-state, TSC may stop
> running. This causes us to require resynchronization. Since
> we can't tell when this may potentially happen, we assume the
> worst by forcing re-compensation for it at every point the VCPU
> task is descheduled.
>
> Signed-off-by: Zachary Amsden<[email protected]>

Reviewed-by: Rik van Riel <[email protected]>

--
All rights reversed

2010-07-14 19:02:37

by Rik van Riel

[permalink] [raw]
Subject: Re: [PATCH 08/18] Add helper functions for time computation

On 07/12/2010 10:25 PM, Zachary Amsden wrote:
> Add a helper function to compute the kernel time and convert nanoseconds
> back to CPU specific cycles. Note that these must not be called in preemptible
> context, as that would mean the kernel could enter software suspend state,
> which would cause non-atomic operation.
>
> Also, convert the KVM_SET_CLOCK / KVM_GET_CLOCK ioctls to use the kernel
> time helper, these should be bootbased as well.
>
> Signed-off-by: Zachary Amsden<[email protected]>

Acked-by: Rik van Riel <[email protected]>

--
All rights reversed

2010-07-14 20:28:09

by Zachary Amsden

[permalink] [raw]
Subject: Re: [PATCH 18/18] Add timekeeping documentation

On 07/13/2010 09:16 PM, Takuya Yoshikawa wrote:
> Hi,
>
> (2010/07/13 11:25), Zachary Amsden wrote:
>> +
>> +2.3) APIC
>> +
>> +On Pentium and later processors, an on-board timer is available to
>> each CPU
>> +as part of the Advanced Programmable Interrupt Controller. The APIC is
>> +accessed through memory-mapped registers and provides interrupt
>> service to each
>> +CPU, used for IPIs and local timer interrupts.
>> +
>> +Although in theory the APIC is a safe and stable source for local
>> interrupts,
>> +in practice, many bugs and glitches have occurred due to the special
>> nature of
>> +the APIC CPU-local memory-mapped hardware. Beware that CPU errata
>> may affect
>> +the use of the APIC and that workarounds may be required. In
>> addition, some of
>> +these workarounds pose unique constraints for virtualization -
>> requiring either
>> +extra overhead incurred from extra reads of memory-mapped I/O or
>> additional
>> +functionality that may be more computationally expensive to implement.
>> +
>> +Since the APIC is documented quite well in the Intel and AMD
>> manuals, we will
>> +avoid repititon of the detail here. It should be pointed out that
>> the APIC
>
> repetition?
>
>> +timer is programmed through the LVT (local vector timer) register,
>> is capable
>> +of one-shot or periodic operation, and is based on the bus clock
>> divided down
>> +by the programmable divider register.
>> +
>> +2.4) HPET
>> +
>> +HPET is quite complex, and was originally intended to replace the
>> PIT / RTC
>> +support of the X86 PC. It remains to be seen whether that will be
>> the case, as
>> +the de facto standard of PC hardware is to emulate these older
>> devices. Some
>> +systems designated as legacy free may support only the HPET as a
>> hardware timer
>> +device.
>> +
>> +The HPET spec is rather loose and vague, requiring at least 3
>> hardware timers,
>> +but allowing implementation freedom to support many more. It also
>> imposes no
>> +fixed rate on the timer frequency, but does impose some extremal
>> values on
>> +frequency, error and slew.
>> +
>> +In general, the HPET is recommended as a high precision (compared to
>> PIT /RTC)
>> +time source which is independent of local variation (as there is
>> only one HPET
>> +in any given system). The HPET is also memory-mapped, and its
>> presence is
>> +indicated through ACPI tables by the BIOS.
>> +
>> +Detailed specification of the HPET is beyond the current scope of this
>> +document, as it is also very well documented elsewhere.
>> +
>
>
>> +3.6) TSC and STPCLK / T-states
>> +
>> +External signals given to the processor may also have the affect of
>> stopping
>
> effect?
>
>> +the TSC. This is typically done for thermal emergency power control
>> to prevent
>> +an overheating condition, and typically, there is no way to detect
>> that this
>> +condition has happened.
>> +
>
>
>> +4.4) Migration
>> +
>> +Migration of a virtual machine raises problems for timekeeping in
>> two ways.
>> +First, the migration itself may take time, during which interrupts
>> cannot be
>> +delivered, and after which, the guest time may need to be caught
>> up. NTP may
>> +be able to help to some degree here, as the clock correction
>> required is
>> +typically small enough to fall in the NTP-correctable window.
>> +
>> +An additional concern is that timers based off the TSC (or HPET, if
>> the raw bus
>> +clock is exposed) may now be running at different rates, requiring
>> compensation
>> +in some may in the hypervisor by virtualizing these timers. In
>> addition,
>
> way?
>
>> +migrating to a faster machine may preclude the use of a passthrough
>> TSC, as a
>> +faster clock cannot be made visible to a guest without the potential
>> of time
>> +advancing faster than usual. A slower clock is less of a problem,
>> as it can
>> +always be caught up to the original rate. KVM clock avoids these
>> problems by
>> +simply storing multipliers and offsets gainst the TSC for the guest
>> to convert
>
> against?
>
>> +back into nanosecond resolution values.
>> +
>
>
> Takuya
>
> -- I'm not English speaker, so not so sure about some places.

Thanks, you found some mistakes anyway, will fix.

Cheers,

Zach

2010-07-14 22:33:35

by Rik van Riel

[permalink] [raw]
Subject: Re: [PATCH 09/18] Robust TSC compensation

On 07/12/2010 10:25 PM, Zachary Amsden wrote:
> Make the match of TSC find TSC writes that are close to each other
> instead of perfectly identical; this allows the compensator to also
> work in migration / suspend scenarios.
>
> Signed-off-by: Zachary Amsden<[email protected]>

I don't see a real alternative, so ...

Reviewed-by: Rik van Riel <[email protected]>

--
All rights reversed

2010-07-15 02:14:05

by Rik van Riel

[permalink] [raw]
Subject: Re: [PATCH 10/18] Keep SMP VMs more in sync on unstable TSC

On 07/12/2010 10:25 PM, Zachary Amsden wrote:
> SMP VMs on machines with unstable TSC have their TSC offset adjusted by the
> local offset delta from last measurement. This does not take into account how
> long it has been since the measurement, leading to drift. Minimize the drift
> by accounting for any time difference the kernel has observed.
>
> Signed-off-by: Zachary Amsden<[email protected]>

Acked-by: Rik van Riel <[email protected]>

--
All rights reversed

2010-07-15 02:15:36

by Rik van Riel

[permalink] [raw]
Subject: Re: [PATCH 11/18] Perform hardware_enable in CPU_STARTING callback

On 07/12/2010 10:25 PM, Zachary Amsden wrote:
> The CPU_STARTING callback was added upstream with the intention
> of being used for KVM, specifically for the hardware enablement
> that must be done before we can run in hardware virt. It had
> bugs on the x86_64 architecture at the time, where it was called
> after CPU_ONLINE. The arches have since merged and the bug is
> gone.
>
> It might be noted other features should probably start making
> use of this callback; microcode updates in particular which
> might be fixing important erratums would be best applied before
> beginning to run user tasks.
>
> Signed-off-by: Zachary Amsden<[email protected]>

Reviewed-by: Rik van Riel <[email protected]>

--
All rights reversed

2010-07-15 02:32:49

by Rik van Riel

[permalink] [raw]
Subject: Re: [PATCH 12/18] Add clock sync request to hardware enable

On 07/12/2010 10:25 PM, Zachary Amsden wrote:
> If there are active VCPUs which are marked as belonging to
> a particular hardware CPU, request a clock sync for them when
> enabling hardware; the TSC could be desynchronized on a newly
> arriving CPU, and we need to recompute guests system time
> relative to boot after a suspend event.
>
> This covers both cases.
>
> Note that it is acceptable to take the spinlock, as either
> no other tasks will be running and no locks held (BSP after
> resume), or other tasks will be guaranteed to drop the lock
> relatively quickly (AP on CPU_STARTING).

Acked-by: Rik van Riel <[email protected]>

--
All rights reversed

2010-07-15 02:35:10

by Rik van Riel

[permalink] [raw]
Subject: Re: [PATCH 13/18] Move scale_delta into common header

On 07/12/2010 10:25 PM, Zachary Amsden wrote:
> The scale_delta function for shift / multiply with 31-bit
> precision moves to a common header so it can be used by both
> kernel and kvm module.
>
> Signed-off-by: Zachary Amsden<[email protected]>

Acked-by: Rik van Riel <[email protected]>

--
All rights reversed

2010-07-15 02:37:06

by Rik van Riel

[permalink] [raw]
Subject: Re: [PATCH 14/18] Fix a possible backwards warp of kvmclock

On 07/12/2010 10:25 PM, Zachary Amsden wrote:
> Kernel time, which advances in discrete steps may progress much slower
> than TSC. As a result, when kvmclock is adjusted to a new base, the
> apparent time to the guest, which runs at a much higher, nsec scaled
> rate based on the current TSC, may have already been observed to have
> a larger value (kernel_ns + scaled tsc) than the value to which we are
> setting it (kernel_ns + 0).
>
> We must instead compute the clock as potentially observed by the guest
> for kernel_ns to make sure it does not go backwards.
>
> Signed-off-by: Zachary Amsden<[email protected]>

Acked-by: Rik van Riel <[email protected]>

--
All rights reversed

2010-07-15 02:41:36

by Rik van Riel

[permalink] [raw]
Subject: Re: [PATCH 15/18] Implement getnsboottime kernel API

On 07/12/2010 10:25 PM, Zachary Amsden wrote:
> Add a kernel call to get the number of nanoseconds since boot. This
> is generally useful enough to make it a generic call.
>
> Signed-off-by: Zachary Amsden<[email protected]>

Acked-by: Rik van Riel <[email protected]>

--
All rights reversed

2010-07-15 02:43:07

by Rik van Riel

[permalink] [raw]
Subject: Re: [PATCH 16/18] Use getnsboottime in KVM

On 07/12/2010 10:25 PM, Zachary Amsden wrote:
> Signed-off-by: Zachary Amsden<[email protected]>

Would be nice to have a commit message the next time you
submit this :)

> arch/x86/kvm/x86.c | 22 ++++++----------------
> 1 files changed, 6 insertions(+), 16 deletions(-)

Reviewed-by: Rik van Riel <[email protected]>

--
All rights reversed

2010-07-15 02:45:01

by Rik van Riel

[permalink] [raw]
Subject: Re: [PATCH 17/18] Indicate reliable TSC in kvmclock

On 07/12/2010 10:25 PM, Zachary Amsden wrote:
> When no platform bugs have been detected, no TSC warps have been
> detected, and the hardware guarantees to us TSC does not change
> rate or stop with P-state or C-state changes, we can consider it reliable.
>
> Signed-off-by: Zachary Amsden<[email protected]>

Acked-by: Rik van Riel <[email protected]>

--
All rights reversed

2010-07-16 13:19:45

by Joerg Roedel

[permalink] [raw]
Subject: Re: KVM timekeeping fixes, V2

On Mon, Jul 12, 2010 at 04:25:20PM -1000, Zachary Amsden wrote:
> Discovered brammage in patches due to unresolved merge.
> Also, had to move 09/18 past 08/18 to resolve compile issue.

Have you tested this patchset with Nested SVM? We had TSC handling
related bugs there in the past and should make sure to keep it working.

Joerg

2010-07-16 17:20:41

by Zachary Amsden

[permalink] [raw]
Subject: Re: KVM timekeeping fixes, V2

On 07/16/2010 03:19 AM, Joerg Roedel wrote:
> On Mon, Jul 12, 2010 at 04:25:20PM -1000, Zachary Amsden wrote:
>
>> Discovered brammage in patches due to unresolved merge.
>> Also, had to move 09/18 past 08/18 to resolve compile issue.
>>
> Have you tested this patchset with Nested SVM? We had TSC handling
> related bugs there in the past and should make sure to keep it working.
>

I've been very careful to keep nested SVM safe, but I've not got a good
test for that. Is there any test suite for the nested case?

It took a lot of guessing to figure out why it works and why the code
looks imbalanced, but I now understand the way it is now is logical and
correct.

2010-07-16 19:26:39

by Joerg Roedel

[permalink] [raw]
Subject: Re: KVM timekeeping fixes, V2

On Fri, Jul 16, 2010 at 07:20:32AM -1000, Zachary Amsden wrote:

> I've been very careful to keep nested SVM safe, but I've not got a good
> test for that. Is there any test suite for the nested case?

To test this you can boot a nested Linux guest and let both, L1 and L2
guest use kvm_clock. Then put some load into the L2 guest and see if the
L2 or the L1 freezes hard (which happens with kvm_clock when the TSC
went backwards for one of them).

Joerg

2010-07-18 14:22:33

by Avi Kivity

[permalink] [raw]
Subject: Re: KVM timekeeping fixes, V2

On 07/16/2010 10:26 PM, Joerg Roedel wrote:
> On Fri, Jul 16, 2010 at 07:20:32AM -1000, Zachary Amsden wrote:
>
>
>> I've been very careful to keep nested SVM safe, but I've not got a good
>> test for that. Is there any test suite for the nested case?
>>
> To test this you can boot a nested Linux guest and let both, L1 and L2
> guest use kvm_clock. Then put some load into the L2 guest and see if the
> L2 or the L1 freezes hard (which happens with kvm_clock when the TSC
> went backwards for one of them).
>
>

With recent guests, they won't freeze any more, since we detect the tsc
going backwards and compensate (in a brute-force way, nothing clever).

But you can printk the maximum compensation and see if it's something
unreasonable.

--
error compiling committee.c: too many arguments to function

2010-07-18 14:28:19

by Avi Kivity

[permalink] [raw]
Subject: Re: [PATCH 01/18] Make TSC offset writes non-preemptible

On 07/13/2010 05:25 AM, Zachary Amsden wrote:
> Ensure that the storing of the offset and the reading of the TSC
> are never preempted by taking a spinlock. While the lock is overkill
> now, it is useful later in this patch series.
>
>
>

This patch has both code movement and semantic changes. Please split
into two so it's easier to understand which are which.

> /*
> - * writes 'guest_tsc' into guest's timestamp counter "register"
> - * guest_tsc = host_tsc + tsc_offset ==> tsc_offset = guest_tsc - host_tsc
> + * writes 'offset' into guest's timestamp counter offset register
> */
> -static void guest_write_tsc(u64 guest_tsc, u64 host_tsc)
> +static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
>

Shouldn't offset be an s64? Doesn't matter in practice, of course, but
offset would usually be negative, not some huge number that happens to
roll the guest tsc around.

> {
> - vmcs_write64(TSC_OFFSET, guest_tsc - host_tsc);
> + vmcs_write64(TSC_OFFSET, offset);
> }
>
>
>

>
> +void guest_write_tsc(struct kvm_vcpu *vcpu, u64 data)
> +{
> + struct kvm *kvm = vcpu->kvm;
> + u64 offset;
> +
> + spin_lock(&kvm->arch.tsc_write_lock);
> + offset = data - native_read_tsc();
> + kvm_x86_ops->write_tsc_offset(vcpu, offset);
> + spin_unlock(&kvm->arch.tsc_write_lock);
> +
> + /* Reset of TSC must disable overshoot protection below */
> + vcpu->arch.hv_clock.tsc_timestamp = 0;
> +}
> +EXPORT_SYMBOL_GPL(guest_write_tsc);
> +
>

Exported symbols should start with kvm_.


--
error compiling committee.c: too many arguments to function

2010-07-18 14:30:43

by Avi Kivity

[permalink] [raw]
Subject: Re: [PATCH 01/18] Make TSC offset writes non-preemptible

On 07/13/2010 05:25 AM, Zachary Amsden wrote:
>
> +void guest_write_tsc(struct kvm_vcpu *vcpu, u64 data)
> +{
> + struct kvm *kvm = vcpu->kvm;
> + u64 offset;
> +
> + spin_lock(&kvm->arch.tsc_write_lock);
>

Perhaps spin_lock_irq() for even more accuracy?

spin_lock_irq_nmi_smi_hypervisor(), if you can find it.

> + offset = data - native_read_tsc();
> + kvm_x86_ops->write_tsc_offset(vcpu, offset);
> + spin_unlock(&kvm->arch.tsc_write_lock);
> +
> + /* Reset of TSC must disable overshoot protection below */
> + vcpu->arch.hv_clock.tsc_timestamp = 0;
> +}
> +EXPORT_SYMBOL_GPL(guest_write_tsc);
> +
>


--
error compiling committee.c: too many arguments to function

2010-07-18 14:35:06

by Avi Kivity

[permalink] [raw]
Subject: Re: [PATCH 03/18] TSC reset compensation

On 07/13/2010 05:25 AM, Zachary Amsden wrote:
> Attempt to synchronize TSCs which are reset to the same value. In the
> case of a reliable hardware TSC, we can just re-use the same offset, but
> on non-reliable hardware, we can get closer by adjusting the offset to
> match the elapsed time.
>
> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> index 3b4efe2..4b42893 100644
> --- a/arch/x86/include/asm/kvm_host.h
> +++ b/arch/x86/include/asm/kvm_host.h
> @@ -396,6 +396,9 @@ struct kvm_arch {
> unsigned long irq_sources_bitmap;
> s64 kvmclock_offset;
> spinlock_t tsc_write_lock;
> + u64 last_tsc_nsec;
> + u64 last_tsc_offset;
> + u64 last_tsc_write;
>

So that we know what the lock protects, let's have

struct kvm_global_tsc {
spinlock_t lock;
...
} tsc;

> @@ -896,10 +896,39 @@ static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz);
> void guest_write_tsc(struct kvm_vcpu *vcpu, u64 data)
> {
> struct kvm *kvm = vcpu->kvm;
> - u64 offset;
> + u64 offset, ns, elapsed;
> + struct timespec ts;
>
> spin_lock(&kvm->arch.tsc_write_lock);
> offset = data - native_read_tsc();
> + ktime_get_ts(&ts);
> + monotonic_to_bootbased(&ts);
> + ns = timespec_to_ns(&ts);
> + elapsed = ns - kvm->arch.last_tsc_nsec;
> +
> + /*
> + * Special case: identical write to TSC within 5 seconds of
> + * another CPU is interpreted as an attempt to synchronize
> + * (the 5 seconds is to accomodate host load / swapping).
> + *
> + * In that case, for a reliable TSC, we can match TSC offsets,
> + * or make a best guest using kernel_ns value.
> + */
> + if (data == kvm->arch.last_tsc_write&& elapsed< 5ULL * NSEC_PER_SEC) {
> + if (!check_tsc_unstable()) {
> + offset = kvm->arch.last_tsc_offset;
> + pr_debug("kvm: matched tsc offset for %llu\n", data);
> + } else {
> + u64 tsc_delta = elapsed * __get_cpu_var(cpu_tsc_khz);
> + tsc_delta = tsc_delta / USEC_PER_SEC;
> + offset += tsc_delta;
> + pr_debug("kvm: adjusted tsc offset by %llu\n", tsc_delta);
> + }
> + ns = kvm->arch.last_tsc_nsec;
> + }
> + kvm->arch.last_tsc_nsec = ns;
> + kvm->arch.last_tsc_write = data;
> + kvm->arch.last_tsc_offset = offset;
>

We'd have a false alarm here during a reset within 5 seconds of boot.
Does it matter? Easy to work around by forgetting the state during reset.

> kvm_x86_ops->write_tsc_offset(vcpu, offset);
> spin_unlock(&kvm->arch.tsc_write_lock);
>
>


--
error compiling committee.c: too many arguments to function

2010-07-18 14:45:12

by Avi Kivity

[permalink] [raw]
Subject: Re: [PATCH 04/18] Make cpu_tsc_khz updates use local CPU

On 07/13/2010 05:25 AM, Zachary Amsden wrote:
> This simplifies much of the init code; we can now simply always
> call tsc_khz_changed, optionally passing it a new value, or letting
> it figure out the existing value (while interrupts are disabled, and
> thus, by inference from the rule, not raceful against CPU hotplug or
> frequency updates, which will issue IPIs to the local CPU to perform
> this very same task).
>
>
> @@ -893,6 +893,15 @@ static void kvm_set_time_scale(uint32_t tsc_khz, struct pvclock_vcpu_time_info *
>
> static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz);
>
> +static inline int kvm_tsc_changes_freq(void)
> +{
> + int cpu = get_cpu();
> + int ret = !boot_cpu_has(X86_FEATURE_CONSTANT_TSC)&&
> + cpufreq_quick_get(cpu) != 0;
> + put_cpu();
> + return ret;
> +}
> +
> void guest_write_tsc(struct kvm_vcpu *vcpu, u64 data)
> {
> struct kvm *kvm = vcpu->kvm;
> @@ -937,7 +946,7 @@ void guest_write_tsc(struct kvm_vcpu *vcpu, u64 data)
> }
> EXPORT_SYMBOL_GPL(guest_write_tsc);
>
> -static void kvm_write_guest_time(struct kvm_vcpu *v)
> +static int kvm_write_guest_time(struct kvm_vcpu *v)
> {
> struct timespec ts;
> unsigned long flags;
> @@ -946,24 +955,27 @@ static void kvm_write_guest_time(struct kvm_vcpu *v)
> unsigned long this_tsc_khz;
>
> if ((!vcpu->time_page))
> - return;
> -
> - this_tsc_khz = get_cpu_var(cpu_tsc_khz);
> - if (unlikely(vcpu->hv_clock_tsc_khz != this_tsc_khz)) {
> - kvm_set_time_scale(this_tsc_khz,&vcpu->hv_clock);
> - vcpu->hv_clock_tsc_khz = this_tsc_khz;
> - }
> - put_cpu_var(cpu_tsc_khz);
> + return 0;
>
> /* Keep irq disabled to prevent changes to the clock */
> local_irq_save(flags);
> kvm_get_msr(v, MSR_IA32_TSC,&vcpu->hv_clock.tsc_timestamp);
> ktime_get_ts(&ts);
> monotonic_to_bootbased(&ts);
> + this_tsc_khz = __get_cpu_var(cpu_tsc_khz);
> local_irq_restore(flags);
>
> - /* With all the info we got, fill in the values */
> + if (unlikely(this_tsc_khz == 0)) {
> + kvm_make_request(KVM_REQ_KVMCLOCK_UPDATE, v);
> + return 1;
> + }
>

Presumably, this will spin until cpufreq writes the frequency?

> @@ -4131,9 +4138,23 @@ int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port)
> }
> EXPORT_SYMBOL_GPL(kvm_fast_pio_out);
>
> -static void bounce_off(void *info)
> +static void tsc_bad(void *info)
> +{
> + __get_cpu_var(cpu_tsc_khz) = 0;
> +}
> +
> +static void tsc_khz_changed(void *data)
> {
> - /* nothing */
> + struct cpufreq_freqs *freq = data;
> + unsigned long khz = 0;
> +
> + if (data)
> + khz = freq->new;
> + else if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
> + khz = cpufreq_quick_get(raw_smp_processor_id());
> + if (!khz)
> + khz = tsc_khz;
> + __get_cpu_var(cpu_tsc_khz) = khz;
> }
>

Do we really need to cache cpufreq_quick_get()? If it's really quick,
why not just use it everywhere instead of cacheing it? Not a comment on
this patch.

--
error compiling committee.c: too many arguments to function

2010-07-18 14:47:47

by Avi Kivity

[permalink] [raw]
Subject: Re: [PATCH 05/18] Warn about unstable TSC

On 07/13/2010 05:25 AM, Zachary Amsden wrote:
> If creating an SMP guest with unstable host TSC, issue a warning
>
> Signed-off-by: Zachary Amsden<[email protected]>
> ---
> arch/x86/kvm/x86.c | 4 ++++
> 1 files changed, 4 insertions(+), 0 deletions(-)
>
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index 2d5b97a..36ef649 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -5449,6 +5449,10 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
> struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
> unsigned int id)
> {
> + if (check_tsc_unstable()&& id != 0)
> + printk_once(KERN_WARNING
> + "kvm: SMP vm created on host with unstable TSC; "
> + "guest TSC will not be reliable\n");
> return kvm_x86_ops->vcpu_create(kvm, id);
> }
>
>

'id' check not accurate (you can have a uniprocessor guest with id =
1). Better check online_vcpus.



--
error compiling committee.c: too many arguments to function

2010-07-18 14:52:58

by Avi Kivity

[permalink] [raw]
Subject: Re: [PATCH 09/18] Robust TSC compensation

On 07/13/2010 05:25 AM, Zachary Amsden wrote:
> Make the match of TSC find TSC writes that are close to each other
> instead of perfectly identical; this allows the compensator to also
> work in migration / suspend scenarios.
>
>

What scenario exactly?

> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -926,21 +926,27 @@ void guest_write_tsc(struct kvm_vcpu *vcpu, u64 data)
> struct kvm *kvm = vcpu->kvm;
> u64 offset, ns, elapsed;
> struct timespec ts;
> + s64 sdiff;
>
> spin_lock(&kvm->arch.tsc_write_lock);
> offset = data - native_read_tsc();
> ns = get_kernel_ns();
> elapsed = ns - kvm->arch.last_tsc_nsec;
> + sdiff = data - kvm->arch.last_tsc_write;
> + if (sdiff< 0)
> + sdiff = -sdiff;
>
> /*
> - * Special case: identical write to TSC within 5 seconds of
> + * Special case: close write to TSC within 5 seconds of
> * another CPU is interpreted as an attempt to synchronize
> - * (the 5 seconds is to accomodate host load / swapping).
> + * The 5 seconds is to accomodate host load / swapping as
> + * well as any reset of TSC during the boot process.
> *
> * In that case, for a reliable TSC, we can match TSC offsets,
> - * or make a best guest using kernel_ns value.
> + * or make a best guest using elapsed value.
> */
> - if (data == kvm->arch.last_tsc_write&& elapsed< 5ULL * NSEC_PER_SEC) {
> + if (sdiff< nsec_to_cycles(5ULL * NSEC_PER_SEC)&&
> + elapsed< 5ULL * NSEC_PER_SEC) {
> if (!check_tsc_unstable()) {
> offset = kvm->arch.last_tsc_offset;
> pr_debug("kvm: matched tsc offset for %llu\n", data);
>

Don't we have to adjust offset to the required different between tsc?
Or do we assume, that if the guest wrote close enough values, it is
trying to cleverly compensate for IPI latency?

--
error compiling committee.c: too many arguments to function

2010-07-18 15:07:48

by Avi Kivity

[permalink] [raw]
Subject: Re: [PATCH 15/18] Implement getnsboottime kernel API

On 07/13/2010 05:25 AM, Zachary Amsden wrote:
> Add a kernel call to get the number of nanoseconds since boot. This
> is generally useful enough to make it a generic call.
>
>

Please copy one of the timekeeping keepers to get their ack.

--
error compiling committee.c: too many arguments to function

2010-07-18 15:08:54

by Avi Kivity

[permalink] [raw]
Subject: Re: KVM timekeeping fixes, V2

On 07/13/2010 05:25 AM, Zachary Amsden wrote:
> Discovered brammage in patches due to unresolved merge.
> Also, had to move 09/18 past 08/18 to resolve compile issue.
>

Ok, so I made some comments out of habit, but they're all very minor
compared to the complexity of the subject.

--
error compiling committee.c: too many arguments to function

2010-07-19 08:11:48

by Zachary Amsden

[permalink] [raw]
Subject: Re: KVM timekeeping fixes, V2

On 07/18/2010 05:08 AM, Avi Kivity wrote:
> On 07/13/2010 05:25 AM, Zachary Amsden wrote:
>> Discovered brammage in patches due to unresolved merge.
>> Also, had to move 09/18 past 08/18 to resolve compile issue.
>
> Ok, so I made some comments out of habit, but they're all very minor
> compared to the complexity of the subject.
>

It's okay, there will be a V3 coming now.

2010-07-19 20:01:16

by Zachary Amsden

[permalink] [raw]
Subject: Re: [PATCH 03/18] TSC reset compensation

On 07/18/2010 04:34 AM, Avi Kivity wrote:
> On 07/13/2010 05:25 AM, Zachary Amsden wrote:
>> Attempt to synchronize TSCs which are reset to the same value. In the
>> case of a reliable hardware TSC, we can just re-use the same offset, but
>> on non-reliable hardware, we can get closer by adjusting the offset to
>> match the elapsed time.
>>
>> diff --git a/arch/x86/include/asm/kvm_host.h
>> b/arch/x86/include/asm/kvm_host.h
>> index 3b4efe2..4b42893 100644
>> --- a/arch/x86/include/asm/kvm_host.h
>> +++ b/arch/x86/include/asm/kvm_host.h
>> @@ -396,6 +396,9 @@ struct kvm_arch {
>> unsigned long irq_sources_bitmap;
>> s64 kvmclock_offset;
>> spinlock_t tsc_write_lock;
>> + u64 last_tsc_nsec;
>> + u64 last_tsc_offset;
>> + u64 last_tsc_write;
>
> So that we know what the lock protects, let's have
>
> struct kvm_global_tsc {
> spinlock_t lock;
> ...
> } tsc;
>
>> @@ -896,10 +896,39 @@ static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz);
>> void guest_write_tsc(struct kvm_vcpu *vcpu, u64 data)
>> {
>> struct kvm *kvm = vcpu->kvm;
>> - u64 offset;
>> + u64 offset, ns, elapsed;
>> + struct timespec ts;
>>
>> spin_lock(&kvm->arch.tsc_write_lock);
>> offset = data - native_read_tsc();
>> + ktime_get_ts(&ts);
>> + monotonic_to_bootbased(&ts);
>> + ns = timespec_to_ns(&ts);
>> + elapsed = ns - kvm->arch.last_tsc_nsec;
>> +
>> + /*
>> + * Special case: identical write to TSC within 5 seconds of
>> + * another CPU is interpreted as an attempt to synchronize
>> + * (the 5 seconds is to accomodate host load / swapping).
>> + *
>> + * In that case, for a reliable TSC, we can match TSC offsets,
>> + * or make a best guest using kernel_ns value.
>> + */
>> + if (data == kvm->arch.last_tsc_write&& elapsed< 5ULL *
>> NSEC_PER_SEC) {
>> + if (!check_tsc_unstable()) {
>> + offset = kvm->arch.last_tsc_offset;
>> + pr_debug("kvm: matched tsc offset for %llu\n", data);
>> + } else {
>> + u64 tsc_delta = elapsed * __get_cpu_var(cpu_tsc_khz);
>> + tsc_delta = tsc_delta / USEC_PER_SEC;
>> + offset += tsc_delta;
>> + pr_debug("kvm: adjusted tsc offset by %llu\n", tsc_delta);
>> + }
>> + ns = kvm->arch.last_tsc_nsec;
>> + }
>> + kvm->arch.last_tsc_nsec = ns;
>> + kvm->arch.last_tsc_write = data;
>> + kvm->arch.last_tsc_offset = offset;
>
> We'd have a false alarm here during a reset within 5 seconds of boot.
> Does it matter? Easy to work around by forgetting the state during
> reset.
>

Not forgetting, but ignoring; reset within 5 seconds will not reset TSC,
which normally is fine. The problem is that one CPU could reset within
5 seconds and one slightly after. Forgetting during reset is a good
solution.

2010-07-19 20:06:18

by Zachary Amsden

[permalink] [raw]
Subject: Re: [PATCH 04/18] Make cpu_tsc_khz updates use local CPU

On 07/18/2010 04:45 AM, Avi Kivity wrote:
> On 07/13/2010 05:25 AM, Zachary Amsden wrote:
>> This simplifies much of the init code; we can now simply always
>> call tsc_khz_changed, optionally passing it a new value, or letting
>> it figure out the existing value (while interrupts are disabled, and
>> thus, by inference from the rule, not raceful against CPU hotplug or
>> frequency updates, which will issue IPIs to the local CPU to perform
>> this very same task).
>>
>>
>> @@ -893,6 +893,15 @@ static void kvm_set_time_scale(uint32_t tsc_khz,
>> struct pvclock_vcpu_time_info *
>>
>> static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz);
>>
>> +static inline int kvm_tsc_changes_freq(void)
>> +{
>> + int cpu = get_cpu();
>> + int ret = !boot_cpu_has(X86_FEATURE_CONSTANT_TSC)&&
>> + cpufreq_quick_get(cpu) != 0;
>> + put_cpu();
>> + return ret;
>> +}
>> +
>> void guest_write_tsc(struct kvm_vcpu *vcpu, u64 data)
>> {
>> struct kvm *kvm = vcpu->kvm;
>> @@ -937,7 +946,7 @@ void guest_write_tsc(struct kvm_vcpu *vcpu, u64
>> data)
>> }
>> EXPORT_SYMBOL_GPL(guest_write_tsc);
>>
>> -static void kvm_write_guest_time(struct kvm_vcpu *v)
>> +static int kvm_write_guest_time(struct kvm_vcpu *v)
>> {
>> struct timespec ts;
>> unsigned long flags;
>> @@ -946,24 +955,27 @@ static void kvm_write_guest_time(struct
>> kvm_vcpu *v)
>> unsigned long this_tsc_khz;
>>
>> if ((!vcpu->time_page))
>> - return;
>> -
>> - this_tsc_khz = get_cpu_var(cpu_tsc_khz);
>> - if (unlikely(vcpu->hv_clock_tsc_khz != this_tsc_khz)) {
>> - kvm_set_time_scale(this_tsc_khz,&vcpu->hv_clock);
>> - vcpu->hv_clock_tsc_khz = this_tsc_khz;
>> - }
>> - put_cpu_var(cpu_tsc_khz);
>> + return 0;
>>
>> /* Keep irq disabled to prevent changes to the clock */
>> local_irq_save(flags);
>> kvm_get_msr(v, MSR_IA32_TSC,&vcpu->hv_clock.tsc_timestamp);
>> ktime_get_ts(&ts);
>> monotonic_to_bootbased(&ts);
>> + this_tsc_khz = __get_cpu_var(cpu_tsc_khz);
>> local_irq_restore(flags);
>>
>> - /* With all the info we got, fill in the values */
>> + if (unlikely(this_tsc_khz == 0)) {
>> + kvm_make_request(KVM_REQ_KVMCLOCK_UPDATE, v);
>> + return 1;
>> + }
>
> Presumably, this will spin until cpufreq writes the frequency?

Only during CPU re-add; we can only get here if running before cpu
notifiers have told us about the new CPU.

>
>> @@ -4131,9 +4138,23 @@ int kvm_fast_pio_out(struct kvm_vcpu *vcpu,
>> int size, unsigned short port)
>> }
>> EXPORT_SYMBOL_GPL(kvm_fast_pio_out);
>>
>> -static void bounce_off(void *info)
>> +static void tsc_bad(void *info)
>> +{
>> + __get_cpu_var(cpu_tsc_khz) = 0;
>> +}
>> +
>> +static void tsc_khz_changed(void *data)
>> {
>> - /* nothing */
>> + struct cpufreq_freqs *freq = data;
>> + unsigned long khz = 0;
>> +
>> + if (data)
>> + khz = freq->new;
>> + else if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
>> + khz = cpufreq_quick_get(raw_smp_processor_id());
>> + if (!khz)
>> + khz = tsc_khz;
>> + __get_cpu_var(cpu_tsc_khz) = khz;
>> }
>
> Do we really need to cache cpufreq_quick_get()? If it's really quick,
> why not just use it everywhere instead of cacheing it? Not a comment
> on this patch.
>

If cpufreq is compiled in, but disabled, it returns zero, so we need
some sort of logic.

2010-07-19 20:39:57

by Zachary Amsden

[permalink] [raw]
Subject: Re: [PATCH 09/18] Robust TSC compensation

On 07/18/2010 04:52 AM, Avi Kivity wrote:
> On 07/13/2010 05:25 AM, Zachary Amsden wrote:
>> Make the match of TSC find TSC writes that are close to each other
>> instead of perfectly identical; this allows the compensator to also
>> work in migration / suspend scenarios.
>>
>
> What scenario exactly?

After migration, qemu will write back MSRs, including TSC to the VCPUs.
They won't have exactly matching values, because they get read out at
different times (actually, because the TSC for the VCPUs never stops,
they can have wildly different times if there was some host overload /
swap / suspend event).

When restarting the CPUs, qemu will try to write back the TSC and then
we end up desynchronizing the system.

It's an ugly problem, and this is an ugly solution.

Better would be to "stop" the VCPUs (requires some kernel
synchronization to determine TSC stop point), or to simply take the
maximum TSC in qemu and write that to all of the CPUs (this assumes the
guest wants to have TSCs in sync at all).

Both methods have to assume small deltas in TSC are unintentional
effects in order to correctly resynchronize.

>
>> --- a/arch/x86/kvm/x86.c
>> +++ b/arch/x86/kvm/x86.c
>> @@ -926,21 +926,27 @@ void guest_write_tsc(struct kvm_vcpu *vcpu, u64
>> data)
>> struct kvm *kvm = vcpu->kvm;
>> u64 offset, ns, elapsed;
>> struct timespec ts;
>> + s64 sdiff;
>>
>> spin_lock(&kvm->arch.tsc_write_lock);
>> offset = data - native_read_tsc();
>> ns = get_kernel_ns();
>> elapsed = ns - kvm->arch.last_tsc_nsec;
>> + sdiff = data - kvm->arch.last_tsc_write;
>> + if (sdiff< 0)
>> + sdiff = -sdiff;
>>
>> /*
>> - * Special case: identical write to TSC within 5 seconds of
>> + * Special case: close write to TSC within 5 seconds of
>> * another CPU is interpreted as an attempt to synchronize
>> - * (the 5 seconds is to accomodate host load / swapping).
>> + * The 5 seconds is to accomodate host load / swapping as
>> + * well as any reset of TSC during the boot process.
>> *
>> * In that case, for a reliable TSC, we can match TSC offsets,
>> - * or make a best guest using kernel_ns value.
>> + * or make a best guest using elapsed value.
>> */
>> - if (data == kvm->arch.last_tsc_write&& elapsed< 5ULL *
>> NSEC_PER_SEC) {
>> + if (sdiff< nsec_to_cycles(5ULL * NSEC_PER_SEC)&&
>> + elapsed< 5ULL * NSEC_PER_SEC) {
>> if (!check_tsc_unstable()) {
>> offset = kvm->arch.last_tsc_offset;
>> pr_debug("kvm: matched tsc offset for %llu\n", data);
>
> Don't we have to adjust offset to the required different between tsc?
> Or do we assume, that if the guest wrote close enough values, it is
> trying to cleverly compensate for IPI latency?
>

No, we have to assume that any small (small being defined as < 5 second)
difference is unintentional. It's not perfect and is certainly error
prone (without one of the two assists from qemu that I mention above).

I think qemu should probably take the maximum TSC and apply it to all VCPUs.

Zach

2010-07-20 08:53:58

by Avi Kivity

[permalink] [raw]
Subject: Re: [PATCH 04/18] Make cpu_tsc_khz updates use local CPU

On 07/19/2010 11:06 PM, Zachary Amsden wrote:
>>> +static void tsc_khz_changed(void *data)
>>> {
>>> - /* nothing */
>>> + struct cpufreq_freqs *freq = data;
>>> + unsigned long khz = 0;
>>> +
>>> + if (data)
>>> + khz = freq->new;
>>> + else if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
>>> + khz = cpufreq_quick_get(raw_smp_processor_id());
>>> + if (!khz)
>>> + khz = tsc_khz;
>>> + __get_cpu_var(cpu_tsc_khz) = khz;
>>> }
>>
>> Do we really need to cache cpufreq_quick_get()? If it's really
>> quick, why not just use it everywhere instead of cacheing it? Not a
>> comment on this patch.
>>
>
>
> If cpufreq is compiled in, but disabled, it returns zero, so we need
> some sort of logic.

Maybe it's better to put it into cpufreq_quick_get(). Inconsistent APIs
that appear to work are bad.

--
error compiling committee.c: too many arguments to function

2010-07-20 21:57:08

by Zachary Amsden

[permalink] [raw]
Subject: Re: [PATCH 04/18] Make cpu_tsc_khz updates use local CPU

On 07/19/2010 10:53 PM, Avi Kivity wrote:
> On 07/19/2010 11:06 PM, Zachary Amsden wrote:
>>>> +static void tsc_khz_changed(void *data)
>>>> {
>>>> - /* nothing */
>>>> + struct cpufreq_freqs *freq = data;
>>>> + unsigned long khz = 0;
>>>> +
>>>> + if (data)
>>>> + khz = freq->new;
>>>> + else if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
>>>> + khz = cpufreq_quick_get(raw_smp_processor_id());
>>>> + if (!khz)
>>>> + khz = tsc_khz;
>>>> + __get_cpu_var(cpu_tsc_khz) = khz;
>>>> }
>>>
>>> Do we really need to cache cpufreq_quick_get()? If it's really
>>> quick, why not just use it everywhere instead of cacheing it? Not a
>>> comment on this patch.
>>>
>>
>>
>> If cpufreq is compiled in, but disabled, it returns zero, so we need
>> some sort of logic.
>
> Maybe it's better to put it into cpufreq_quick_get(). Inconsistent
> APIs that appear to work are bad.
>

I don't think it's quite so simple; cpufreq is platform independent and
tsc_khz is a platform specific export. It seems cpufreq is designed to
return zero when disabled and we're the unusual ones for wanting to use it.

Zach