This series adds PLE(pause loop exit) logic from VMX to SVM.
We have noticed considerable reduction in number of VMEXITS due to pause
interceptions after these changes. Here are the numbers on one guest with
32 vcpus on AMD EPYC system. We have used boot parameter idle=poll to
simulate extensive pauses on the guest.
Here are VMEXITS in 10 seconds interval.
#VMEXITS(in 10s) Before the change After the change
Pauses 810199 504
Total 882184 325415
v2:
Handled most of the Radim's comments. Here are the changes.
1. Fixed the module parameters to unsigned variants
1. Kept the default pause_filter_count = 3000 for AMD.
2. Removed the ple_window_actual_max parameter. Added a check
in __grow_ple_window to avoid overflow.
3. Fixed the naming conventions for SVM module parameters.
They are now called as pause_filter_count, pause_filter_thresh,
pause_filter_shrink and pause_filter_grow.
4. Taken care of intercept setting and clearing.
5. Fixed few more text changes.
v1:
Initial RFC version
Babu Moger (5):
KVM: VMX: Fix the module parameters for vmx
KVM: VMX: Remove ple_window_actual_max
KVM: VMX: Bring the common code to header file
KVM: SVM: Add pause filter threshold
KVM: SVM: Implement pause loop exit logic in SVM
arch/x86/include/asm/svm.h | 3 +-
arch/x86/kvm/svm.c | 108 ++++++++++++++++++++++++++++++++++++++++++++-
arch/x86/kvm/vmx.c | 82 +++++++---------------------------
arch/x86/kvm/x86.h | 37 ++++++++++++++++
4 files changed, 161 insertions(+), 69 deletions(-)
--
1.8.3.1
Get rid of ple_window_actual_max, because its benefits are really
minuscule and the logic is complicated.
The overflows(and underflow) are controlled in __ple_window_grow
and _ple_window_shrink respectively.
Suggested-by: Radim Krčmář <[email protected]>
Signed-off-by: Babu Moger <[email protected]>
---
arch/x86/kvm/vmx.c | 22 +---------------------
1 file changed, 1 insertion(+), 21 deletions(-)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index c9a9080..b992d81 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -178,7 +178,6 @@
module_param(ple_window_shrink, uint, 0444);
/* Default is to compute the maximum so we can never overflow. */
-static uint ple_window_actual_max = KVM_VMX_DEFAULT_PLE_WINDOW_MAX;
static uint ple_window_max = KVM_VMX_DEFAULT_PLE_WINDOW_MAX;
module_param(ple_window_max, uint, 0444);
@@ -6645,14 +6644,12 @@ static uint __grow_ple_window(uint val)
if (ple_window_grow < 1)
return ple_window;
- val = min(val, ple_window_actual_max);
-
if (ple_window_grow < ple_window)
val *= ple_window_grow;
else
val += ple_window_grow;
- return val;
+ return min(val, ple_window_max);
}
static uint __shrink_ple_window(uint val, uint modifier, uint minimum)
@@ -6696,21 +6693,6 @@ static void shrink_ple_window(struct kvm_vcpu *vcpu)
}
/*
- * ple_window_actual_max is computed to be one grow_ple_window() below
- * ple_window_max. (See __grow_ple_window for the reason.)
- * This prevents overflows, because ple_window_max is int.
- * ple_window_max effectively rounded down to a multiple of ple_window_grow in
- * this process.
- * ple_window_max is also prevented from setting vmx->ple_window < ple_window.
- */
-static void update_ple_window_actual_max(void)
-{
- ple_window_actual_max =
- __shrink_ple_window(max(ple_window_max, ple_window),
- ple_window_grow, INT_MIN);
-}
-
-/*
* Handler for POSTED_INTERRUPT_WAKEUP_VECTOR.
*/
static void wakeup_handler(void)
@@ -6876,8 +6858,6 @@ static __init int hardware_setup(void)
else
kvm_disable_tdp();
- update_ple_window_actual_max();
-
/*
* Only enable PML when hardware supports PML feature, and both EPT
* and EPT A/D bit features are enabled -- PML depends on them to work.
--
1.8.3.1
This patch adds the support for pause filtering threshold. This feature
support is indicated by CPUID Fn8000_000A_EDX. See AMD APM Vol 2 Section
15.14.4 Pause Intercept Filtering for more details.
In this mode, a 16-bit pause filter threshold field is added in VMCB.
The threshold value is a cycle count that is used to reset the pause
counter. As with simple pause filtering, VMRUN loads the pause count
value from VMCB into an internal counter. Then, on each pause instruction
the hardware checks the elapsed number of cycles since the most recent
pause instruction against the pause Filter Threshold. If the elapsed cycle
count is greater than the pause filter threshold, then the internal pause
count is reloaded from VMCB and execution continues. If the elapsed cycle
count is less than the pause filter threshold, then the internal pause
count is decremented. If the count value is less than zero and pause
intercept is enabled, a #VMEXIT is triggered. If advanced pause filtering
is supported and pause filter threshold field is set to zero, the filter
will operate in the simpler, count only mode.
Signed-off-by: Babu Moger <[email protected]>
---
arch/x86/include/asm/svm.h | 3 ++-
arch/x86/kvm/svm.c | 2 ++
2 files changed, 4 insertions(+), 1 deletion(-)
diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
index 78dd9df..7a3d9c7 100644
--- a/arch/x86/include/asm/svm.h
+++ b/arch/x86/include/asm/svm.h
@@ -60,7 +60,8 @@ struct __attribute__ ((__packed__)) vmcb_control_area {
u32 intercept_dr;
u32 intercept_exceptions;
u64 intercept;
- u8 reserved_1[42];
+ u8 reserved_1[40];
+ u16 pause_filter_thresh;
u16 pause_filter_count;
u64 iopm_base_pa;
u64 msrpm_base_pa;
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index f40d0da..50a4e95 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -4175,6 +4175,8 @@ static void dump_vmcb(struct kvm_vcpu *vcpu)
pr_err("%-20s%08x\n", "exceptions:", control->intercept_exceptions);
pr_err("%-20s%016llx\n", "intercepts:", control->intercept);
pr_err("%-20s%d\n", "pause filter count:", control->pause_filter_count);
+ pr_err("%-20s%d\n", "pause filter threshold:",
+ control->pause_filter_thresh);
pr_err("%-20s%016llx\n", "iopm_base_pa:", control->iopm_base_pa);
pr_err("%-20s%016llx\n", "msrpm_base_pa:", control->msrpm_base_pa);
pr_err("%-20s%016llx\n", "tsc_offset:", control->tsc_offset);
--
1.8.3.1
Bring the PLE(pause loop exit) logic to AMD svm driver.
While testing, we found this helping in situations where numerous
pauses are generated. Without these patches we could see continuos
VMEXITS due to pause interceptions. Tested it on AMD EPYC server with
boot parameter idle=poll on a VM with 32 vcpus to simulate extensive
pause behaviour. Here are VMEXITS in 10 seconds interval.
#VMEXITS Before the change After the change
Pauses 810199 504
Total 882184 325415
Signed-off-by: Babu Moger <[email protected]>
---
arch/x86/kvm/svm.c | 106 ++++++++++++++++++++++++++++++++++++++++++++++++++++-
arch/x86/kvm/x86.h | 2 +
2 files changed, 106 insertions(+), 2 deletions(-)
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 50a4e95..4636504 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -263,6 +263,54 @@ struct amd_svm_iommu_ir {
static bool npt_enabled;
#endif
+/*
+ * These 2 parameters are used to config the controls for Pause-Loop Exiting:
+ * pause_filter_count: On processors that support Pause filtering(indicated
+ * by CPUID Fn8000_000A_EDX), the VMCB provides a 16 bit pause filter
+ * count value. On VMRUN this value is loaded into an internal counter.
+ * Each time a pause instruction is executed, this counter is decremented
+ * until it reaches zero at which time a #VMEXIT is generated if pause
+ * intercept is enabled. Refer to AMD APM Vol 2 Section 15.14.4 Pause
+ * Intercept Filtering for more details.
+ * This also indicate if ple logic enabled.
+ *
+ * pause_filter_thresh: In addition, some processor families support advanced
+ * pause filtering (indicated by CPUID Fn8000_000A_EDX) upper bound on
+ * the amount of time a guest is allowed to execute in a pause loop.
+ * In this mode, a 16-bit pause filter threshold field is added in the
+ * VMCB. The threshold value is a cycle count that is used to reset the
+ * pause counter. As with simple pause filtering, VMRUN loads the pause
+ * count value from VMCB into an internal counter. Then, on each pause
+ * instruction the hardware checks the elapsed number of cycles since
+ * the most recent pause instruction against the pause filter threshold.
+ * If the elapsed cycle count is greater than the pause filter threshold,
+ * then the internal pause count is reloaded from the VMCB and execution
+ * continues. If the elapsed cycle count is less than the pause filter
+ * threshold, then the internal pause count is decremented. If the count
+ * value is less than zero and PAUSE intercept is enabled, a #VMEXIT is
+ * triggered. If advanced pause filtering is supported and pause filter
+ * threshold field is set to zero, the filter will operate in the simpler,
+ * count only mode.
+ */
+
+static ushort pause_filter_thresh = KVM_DEFAULT_PLE_GAP;
+module_param(pause_filter_thresh, ushort, 0444);
+
+static ushort pause_filter_count = KVM_SVM_DEFAULT_PLE_WINDOW;
+module_param(pause_filter_count, ushort, 0444);
+
+/* Default doubles per-vcpu window every exit. */
+static ushort pause_filter_count_grow = KVM_DEFAULT_PLE_WINDOW_GROW;
+module_param(pause_filter_count_grow, ushort, 0444);
+
+/* Default resets per-vcpu window every exit to pause_filter_count. */
+static ushort pause_filter_count_shrink = KVM_DEFAULT_PLE_WINDOW_SHRINK;
+module_param(pause_filter_count_shrink, ushort, 0444);
+
+/* Default is to compute the maximum so we can never overflow. */
+static ushort pause_filter_count_max = KVM_SVM_DEFAULT_PLE_WINDOW_MAX;
+module_param(pause_filter_count_max, ushort, 0444);
+
/* allow nested paging (virtualized MMU) for all guests */
static int npt = true;
module_param(npt, int, S_IRUGO);
@@ -1046,6 +1094,42 @@ static int avic_ga_log_notifier(u32 ga_tag)
return 0;
}
+static void grow_ple_window(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_svm *svm = to_svm(vcpu);
+ struct vmcb_control_area *control = &svm->vmcb->control;
+ int old = control->pause_filter_count;
+
+ control->pause_filter_count = __grow_ple_window(old,
+ pause_filter_count,
+ pause_filter_count_grow,
+ pause_filter_count_max);
+
+ if (control->pause_filter_count != old)
+ mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
+
+ trace_kvm_ple_window_grow(vcpu->vcpu_id,
+ control->pause_filter_count, old);
+}
+
+static void shrink_ple_window(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_svm *svm = to_svm(vcpu);
+ struct vmcb_control_area *control = &svm->vmcb->control;
+ int old = control->pause_filter_count;
+
+ control->pause_filter_count =
+ __shrink_ple_window(old,
+ pause_filter_count,
+ pause_filter_count_shrink,
+ 0);
+ if (control->pause_filter_count != old)
+ mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
+
+ trace_kvm_ple_window_shrink(vcpu->vcpu_id,
+ control->pause_filter_count, old);
+}
+
static __init int svm_hardware_setup(void)
{
int cpu;
@@ -1076,6 +1160,14 @@ static __init int svm_hardware_setup(void)
kvm_tsc_scaling_ratio_frac_bits = 32;
}
+ /* Check for pause filtering support */
+ if (!boot_cpu_has(X86_FEATURE_PAUSEFILTER)) {
+ pause_filter_count = 0;
+ pause_filter_thresh = 0;
+ } else if (!boot_cpu_has(X86_FEATURE_PFTHRESHOLD)) {
+ pause_filter_thresh = 0;
+ }
+
if (nested) {
printk(KERN_INFO "kvm: Nested Virtualization enabled\n");
kvm_enable_efer_bits(EFER_SVME | EFER_LMSLE);
@@ -1308,9 +1400,14 @@ static void init_vmcb(struct vcpu_svm *svm)
svm->nested.vmcb = 0;
svm->vcpu.arch.hflags = 0;
- if (boot_cpu_has(X86_FEATURE_PAUSEFILTER)) {
- control->pause_filter_count = 3000;
+ /* Check for pause filtering support */
+ if (pause_filter_count) {
+ control->pause_filter_count = pause_filter_count;
+ if (pause_filter_thresh)
+ control->pause_filter_thresh = pause_filter_thresh;
set_intercept(svm, INTERCEPT_PAUSE);
+ } else {
+ clr_intercept(svm, INTERCEPT_PAUSE);
}
if (kvm_vcpu_apicv_active(&svm->vcpu))
@@ -3802,6 +3899,9 @@ static int pause_interception(struct vcpu_svm *svm)
struct kvm_vcpu *vcpu = &svm->vcpu;
bool in_kernel = (svm_get_cpl(vcpu) == 0);
+ if (pause_filter_thresh)
+ grow_ple_window(vcpu);
+
kvm_vcpu_on_spin(vcpu, in_kernel);
return 1;
}
@@ -5424,6 +5524,8 @@ static void svm_handle_external_intr(struct kvm_vcpu *vcpu)
static void svm_sched_in(struct kvm_vcpu *vcpu, int cpu)
{
+ if (pause_filter_thresh)
+ shrink_ple_window(vcpu);
}
static inline void avic_post_state_restore(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index ca051a2..e6d0259 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -14,6 +14,8 @@
#define KVM_DEFAULT_PLE_WINDOW_SHRINK 0
#define KVM_VMX_DEFAULT_PLE_WINDOW_MAX \
(INT_MAX / KVM_DEFAULT_PLE_WINDOW_GROW)
+#define KVM_SVM_DEFAULT_PLE_WINDOW_MAX USHRT_MAX
+#define KVM_SVM_DEFAULT_PLE_WINDOW 3000
static inline uint __grow_ple_window(uint val, uint base,
uint modifier, uint max)
--
1.8.3.1
This patch brings some of the code from vmx to x86.h header file. Now, we
can share this code between vmx and svm. Modified couple functions to make
it common.
Signed-off-by: Babu Moger <[email protected]>
---
arch/x86/kvm/vmx.c | 48 +++++++++---------------------------------------
arch/x86/kvm/x86.h | 35 +++++++++++++++++++++++++++++++++++
2 files changed, 44 insertions(+), 39 deletions(-)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index b992d81..ba826b6 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -156,25 +156,18 @@
* Time is measured based on a counter that runs at the same rate as the TSC,
* refer SDM volume 3b section 21.6.13 & 22.1.3.
*/
-#define KVM_VMX_DEFAULT_PLE_GAP 128
-#define KVM_VMX_DEFAULT_PLE_WINDOW 4096
-#define KVM_VMX_DEFAULT_PLE_WINDOW_GROW 2
-#define KVM_VMX_DEFAULT_PLE_WINDOW_SHRINK 0
-#define KVM_VMX_DEFAULT_PLE_WINDOW_MAX \
- INT_MAX / KVM_VMX_DEFAULT_PLE_WINDOW_GROW
-
-static uint ple_gap = KVM_VMX_DEFAULT_PLE_GAP;
+static uint ple_gap = KVM_DEFAULT_PLE_GAP;
module_param(ple_gap, uint, 0444);
static uint ple_window = KVM_VMX_DEFAULT_PLE_WINDOW;
module_param(ple_window, uint, 0444);
/* Default doubles per-vcpu window every exit. */
-static uint ple_window_grow = KVM_VMX_DEFAULT_PLE_WINDOW_GROW;
+static uint ple_window_grow = KVM_DEFAULT_PLE_WINDOW_GROW;
module_param(ple_window_grow, uint, 0444);
/* Default resets per-vcpu window every exit to ple_window. */
-static uint ple_window_shrink = KVM_VMX_DEFAULT_PLE_WINDOW_SHRINK;
+static uint ple_window_shrink = KVM_DEFAULT_PLE_WINDOW_SHRINK;
module_param(ple_window_shrink, uint, 0444);
/* Default is to compute the maximum so we can never overflow. */
@@ -6639,38 +6632,14 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
return ret;
}
-static uint __grow_ple_window(uint val)
-{
- if (ple_window_grow < 1)
- return ple_window;
-
- if (ple_window_grow < ple_window)
- val *= ple_window_grow;
- else
- val += ple_window_grow;
-
- return min(val, ple_window_max);
-}
-
-static uint __shrink_ple_window(uint val, uint modifier, uint minimum)
-{
- if (modifier < 1)
- return ple_window;
-
- if (modifier < ple_window)
- val /= modifier;
- else
- val -= modifier;
-
- return max(val, minimum);
-}
-
static void grow_ple_window(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
int old = vmx->ple_window;
- vmx->ple_window = __grow_ple_window(old);
+ vmx->ple_window = __grow_ple_window(old, ple_window,
+ ple_window_grow,
+ ple_window_max);
if (vmx->ple_window != old)
vmx->ple_window_dirty = true;
@@ -6683,8 +6652,9 @@ static void shrink_ple_window(struct kvm_vcpu *vcpu)
struct vcpu_vmx *vmx = to_vmx(vcpu);
int old = vmx->ple_window;
- vmx->ple_window = __shrink_ple_window(old,
- ple_window_shrink, ple_window);
+ vmx->ple_window = __shrink_ple_window(old, ple_window,
+ ple_window_shrink,
+ 0);
if (vmx->ple_window != old)
vmx->ple_window_dirty = true;
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index d0b95b7..ca051a2 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -8,6 +8,41 @@
#include <asm/pvclock.h>
#include "kvm_cache_regs.h"
+#define KVM_DEFAULT_PLE_GAP 128
+#define KVM_VMX_DEFAULT_PLE_WINDOW 4096
+#define KVM_DEFAULT_PLE_WINDOW_GROW 2
+#define KVM_DEFAULT_PLE_WINDOW_SHRINK 0
+#define KVM_VMX_DEFAULT_PLE_WINDOW_MAX \
+ (INT_MAX / KVM_DEFAULT_PLE_WINDOW_GROW)
+
+static inline uint __grow_ple_window(uint val, uint base,
+ uint modifier, uint max)
+{
+ if (modifier < 1)
+ return base;
+
+ if (modifier < base)
+ val *= modifier;
+ else
+ val += modifier;
+
+ return min(val, max);
+}
+
+static inline uint __shrink_ple_window(uint val, uint base,
+ uint modifier, uint min)
+{
+ if (modifier < 1)
+ return base;
+
+ if (modifier < base)
+ val /= modifier;
+ else
+ val -= modifier;
+
+ return max(val, min);
+}
+
#define MSR_IA32_CR_PAT_DEFAULT 0x0007040600070406ULL
static inline void kvm_clear_exception_queue(struct kvm_vcpu *vcpu)
--
1.8.3.1
The vmx module parameters are supposed to be unsigned variants.
Also fixed the checkpatch errors like the one below.
WARNING: Symbolic permissions 'S_IRUGO' are not preferred. Consider using octal permissions '0444'.
+module_param(ple_gap, uint, S_IRUGO);
Signed-off-by: Babu Moger <[email protected]>
---
arch/x86/kvm/vmx.c | 26 +++++++++++++-------------
1 file changed, 13 insertions(+), 13 deletions(-)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index c829d89..c9a9080 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -163,24 +163,24 @@
#define KVM_VMX_DEFAULT_PLE_WINDOW_MAX \
INT_MAX / KVM_VMX_DEFAULT_PLE_WINDOW_GROW
-static int ple_gap = KVM_VMX_DEFAULT_PLE_GAP;
-module_param(ple_gap, int, S_IRUGO);
+static uint ple_gap = KVM_VMX_DEFAULT_PLE_GAP;
+module_param(ple_gap, uint, 0444);
-static int ple_window = KVM_VMX_DEFAULT_PLE_WINDOW;
-module_param(ple_window, int, S_IRUGO);
+static uint ple_window = KVM_VMX_DEFAULT_PLE_WINDOW;
+module_param(ple_window, uint, 0444);
/* Default doubles per-vcpu window every exit. */
-static int ple_window_grow = KVM_VMX_DEFAULT_PLE_WINDOW_GROW;
-module_param(ple_window_grow, int, S_IRUGO);
+static uint ple_window_grow = KVM_VMX_DEFAULT_PLE_WINDOW_GROW;
+module_param(ple_window_grow, uint, 0444);
/* Default resets per-vcpu window every exit to ple_window. */
-static int ple_window_shrink = KVM_VMX_DEFAULT_PLE_WINDOW_SHRINK;
-module_param(ple_window_shrink, int, S_IRUGO);
+static uint ple_window_shrink = KVM_VMX_DEFAULT_PLE_WINDOW_SHRINK;
+module_param(ple_window_shrink, uint, 0444);
/* Default is to compute the maximum so we can never overflow. */
-static int ple_window_actual_max = KVM_VMX_DEFAULT_PLE_WINDOW_MAX;
-static int ple_window_max = KVM_VMX_DEFAULT_PLE_WINDOW_MAX;
-module_param(ple_window_max, int, S_IRUGO);
+static uint ple_window_actual_max = KVM_VMX_DEFAULT_PLE_WINDOW_MAX;
+static uint ple_window_max = KVM_VMX_DEFAULT_PLE_WINDOW_MAX;
+module_param(ple_window_max, uint, 0444);
extern const ulong vmx_return;
@@ -6640,7 +6640,7 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
return ret;
}
-static int __grow_ple_window(int val)
+static uint __grow_ple_window(uint val)
{
if (ple_window_grow < 1)
return ple_window;
@@ -6655,7 +6655,7 @@ static int __grow_ple_window(int val)
return val;
}
-static int __shrink_ple_window(int val, int modifier, int minimum)
+static uint __shrink_ple_window(uint val, uint modifier, uint minimum)
{
if (modifier < 1)
return ple_window;
--
1.8.3.1
2018-03-16 16:37-0400, Babu Moger:
> Get rid of ple_window_actual_max, because its benefits are really
> minuscule and the logic is complicated.
>
> The overflows(and underflow) are controlled in __ple_window_grow
> and _ple_window_shrink respectively.
>
> Suggested-by: Radim Krčmář <[email protected]>
> Signed-off-by: Babu Moger <[email protected]>
> ---
> arch/x86/kvm/vmx.c | 22 +---------------------
> 1 file changed, 1 insertion(+), 21 deletions(-)
>
> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
> @@ -178,7 +178,6 @@
> module_param(ple_window_shrink, uint, 0444);
>
> /* Default is to compute the maximum so we can never overflow. */
> -static uint ple_window_actual_max = KVM_VMX_DEFAULT_PLE_WINDOW_MAX;
> static uint ple_window_max = KVM_VMX_DEFAULT_PLE_WINDOW_MAX;
> module_param(ple_window_max, uint, 0444);
>
> @@ -6645,14 +6644,12 @@ static uint __grow_ple_window(uint val)
> if (ple_window_grow < 1)
> return ple_window;
>
> - val = min(val, ple_window_actual_max);
> -
> if (ple_window_grow < ple_window)
> val *= ple_window_grow;
> else
> val += ple_window_grow;
>
> - return val;
> + return min(val, ple_window_max);
I added a bit of logic to avoid wraparounds.
2018-03-16 16:37-0400, Babu Moger:
> This patch brings some of the code from vmx to x86.h header file. Now, we
> can share this code between vmx and svm. Modified couple functions to make
> it common.
>
> Signed-off-by: Babu Moger <[email protected]>
> ---
> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
> @@ -6683,8 +6652,9 @@ static void shrink_ple_window(struct kvm_vcpu *vcpu)
> struct vcpu_vmx *vmx = to_vmx(vcpu);
> int old = vmx->ple_window;
>
> - vmx->ple_window = __shrink_ple_window(old,
> - ple_window_shrink, ple_window);
> + vmx->ple_window = __shrink_ple_window(old, ple_window,
> + ple_window_shrink,
> + 0);
I have preserved the old minumum (ple_window, so we could't get
uselessly small values).
2018-03-16 16:37-0400, Babu Moger:
> Bring the PLE(pause loop exit) logic to AMD svm driver.
>
> While testing, we found this helping in situations where numerous
> pauses are generated. Without these patches we could see continuos
> VMEXITS due to pause interceptions. Tested it on AMD EPYC server with
> boot parameter idle=poll on a VM with 32 vcpus to simulate extensive
> pause behaviour. Here are VMEXITS in 10 seconds interval.
>
> #VMEXITS Before the change After the change
> Pauses 810199 504
> Total 882184 325415
>
> Signed-off-by: Babu Moger <[email protected]>
> ---
> diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
> @@ -1046,6 +1094,42 @@ static int avic_ga_log_notifier(u32 ga_tag)
> return 0;
> }
>
> +static void grow_ple_window(struct kvm_vcpu *vcpu)
> +{
> + struct vcpu_svm *svm = to_svm(vcpu);
> + struct vmcb_control_area *control = &svm->vmcb->control;
> + int old = control->pause_filter_count;
> +
> + control->pause_filter_count = __grow_ple_window(old,
> + pause_filter_count,
> + pause_filter_count_grow,
> + pause_filter_count_max);
> +
> + if (control->pause_filter_count != old)
> + mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
> +
> + trace_kvm_ple_window_grow(vcpu->vcpu_id,
> + control->pause_filter_count, old);
> +}
> +
> +static void shrink_ple_window(struct kvm_vcpu *vcpu)
> +{
> + struct vcpu_svm *svm = to_svm(vcpu);
> + struct vmcb_control_area *control = &svm->vmcb->control;
> + int old = control->pause_filter_count;
> +
> + control->pause_filter_count =
> + __shrink_ple_window(old,
> + pause_filter_count,
> + pause_filter_count_shrink,
> + 0);
I've used pause_filter_count as minumum here as well and in all patches
used 'unsigned int' instead of 'uint' in the code too match the rest of
the kernel.
The series is in kvm/queue, please look at the changes and tell me if
you'd like something done differently, thanks.
> -----Original Message-----
> From: Radim Kr?m?? <[email protected]>
> Sent: Wednesday, March 28, 2018 3:27 PM
> To: Moger, Babu <[email protected]>
> Cc: [email protected]; [email protected]; [email protected];
> [email protected]; [email protected]; [email protected];
> [email protected]; [email protected]
> Subject: Re: [PATCH v2 3/5] KVM: VMX: Bring the common code to header
> file
>
> 2018-03-16 16:37-0400, Babu Moger:
> > This patch brings some of the code from vmx to x86.h header file. Now, we
> > can share this code between vmx and svm. Modified couple functions to
> make
> > it common.
> >
> > Signed-off-by: Babu Moger <[email protected]>
> > ---
> > diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
> > @@ -6683,8 +6652,9 @@ static void shrink_ple_window(struct kvm_vcpu
> *vcpu)
> > struct vcpu_vmx *vmx = to_vmx(vcpu);
> > int old = vmx->ple_window;
> >
> > - vmx->ple_window = __shrink_ple_window(old,
> > - ple_window_shrink, ple_window);
> > + vmx->ple_window = __shrink_ple_window(old, ple_window,
> > + ple_window_shrink,
> > + 0);
>
> I have preserved the old minumum (ple_window, so we could't get
> uselessly small values).
Ok. Looks good. Thanks
> -----Original Message-----
> From: Radim Kr?m?? <[email protected]>
> Sent: Wednesday, March 28, 2018 3:31 PM
> To: Moger, Babu <[email protected]>
> Cc: [email protected]; [email protected]; [email protected];
> [email protected]; [email protected]; [email protected];
> [email protected]; [email protected]
> Subject: Re: [PATCH v2 5/5] KVM: SVM: Implement pause loop exit logic in
> SVM
>
> 2018-03-16 16:37-0400, Babu Moger:
> > Bring the PLE(pause loop exit) logic to AMD svm driver.
> >
> > While testing, we found this helping in situations where numerous
> > pauses are generated. Without these patches we could see continuos
> > VMEXITS due to pause interceptions. Tested it on AMD EPYC server with
> > boot parameter idle=poll on a VM with 32 vcpus to simulate extensive
> > pause behaviour. Here are VMEXITS in 10 seconds interval.
> >
> > #VMEXITS Before the change After the change
> > Pauses 810199 504
> > Total 882184 325415
> >
> > Signed-off-by: Babu Moger <[email protected]>
> > ---
> > diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
> > @@ -1046,6 +1094,42 @@ static int avic_ga_log_notifier(u32 ga_tag)
> > return 0;
> > }
> >
> > +static void grow_ple_window(struct kvm_vcpu *vcpu)
> > +{
> > + struct vcpu_svm *svm = to_svm(vcpu);
> > + struct vmcb_control_area *control = &svm->vmcb->control;
> > + int old = control->pause_filter_count;
> > +
> > + control->pause_filter_count = __grow_ple_window(old,
> > + pause_filter_count,
> > +
> pause_filter_count_grow,
> > +
> pause_filter_count_max);
> > +
> > + if (control->pause_filter_count != old)
> > + mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
> > +
> > + trace_kvm_ple_window_grow(vcpu->vcpu_id,
> > + control->pause_filter_count, old);
> > +}
> > +
> > +static void shrink_ple_window(struct kvm_vcpu *vcpu)
> > +{
> > + struct vcpu_svm *svm = to_svm(vcpu);
> > + struct vmcb_control_area *control = &svm->vmcb->control;
> > + int old = control->pause_filter_count;
> > +
> > + control->pause_filter_count =
> > + __shrink_ple_window(old,
> > + pause_filter_count,
> > + pause_filter_count_shrink,
> > + 0);
>
> I've used pause_filter_count as minumum here as well and in all patches
> used 'unsigned int' instead of 'uint' in the code too match the rest of
> the kernel.
>
> The series is in kvm/queue, please look at the changes and tell me if
> you'd like something done differently, thanks.
Ok. Looks good to me. Thanks.