2020-05-04 04:16:56

by Singh, Balbir

[permalink] [raw]
Subject: [PATCH v5 4/6] arch/x86/kvm: Refactor L1D flushing

Move out the initialization function to l1d_flush_init_once()
so that it can be reused for subsequent patches. The side-effect
of this patch is that the memory allocated for l1d flush pages
is no longer freed up and the memory allocated once is shared
amongst callers.

l1d_flush_sw/hw() are now abstracted under arch_l1d_flush().
vmx_l1d_flush_mutex however continues to exist as it also used
from other code paths.

Suggested-by: Thomas Gleixner <[email protected]>
Signed-off-by: Balbir Singh <[email protected]>
---
arch/x86/include/asm/cacheflush.h | 12 ++++---
arch/x86/kernel/l1d_flush.c | 56 ++++++++++++++++++++++++-------
arch/x86/kvm/vmx/vmx.c | 20 ++---------
3 files changed, 53 insertions(+), 35 deletions(-)

diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
index 21cc3b28fa63..851d8f1ab827 100644
--- a/arch/x86/include/asm/cacheflush.h
+++ b/arch/x86/include/asm/cacheflush.h
@@ -7,11 +7,13 @@
#include <asm/special_insns.h>

#define L1D_CACHE_ORDER 4
+
+enum l1d_flush_options {
+ L1D_FLUSH_POPULATE_TLB = 0x1,
+};
+
void clflush_cache_range(void *addr, unsigned int size);
-void l1d_flush_populate_tlb(void *l1d_flush_pages);
-void *l1d_flush_alloc_pages(void);
-void l1d_flush_cleanup_pages(void *l1d_flush_pages);
-void l1d_flush_sw(void *l1d_flush_pages);
-int l1d_flush_hw(void);
+int l1d_flush_init_once(void);
+void arch_l1d_flush(enum l1d_flush_options options);

#endif /* _ASM_X86_CACHEFLUSH_H */
diff --git a/arch/x86/kernel/l1d_flush.c b/arch/x86/kernel/l1d_flush.c
index 5871794f890d..7fec0cc8f85c 100644
--- a/arch/x86/kernel/l1d_flush.c
+++ b/arch/x86/kernel/l1d_flush.c
@@ -1,7 +1,7 @@
#include <linux/mm.h>
#include <asm/cacheflush.h>

-void *l1d_flush_alloc_pages(void)
+static void *l1d_flush_alloc_pages(void)
{
struct page *page;
void *l1d_flush_pages = NULL;
@@ -27,20 +27,14 @@ void *l1d_flush_alloc_pages(void)
}
return l1d_flush_pages;
}
-EXPORT_SYMBOL_GPL(l1d_flush_alloc_pages);

-void l1d_flush_cleanup_pages(void *l1d_flush_pages)
-{
- free_pages((unsigned long)l1d_flush_pages, L1D_CACHE_ORDER);
-}
-EXPORT_SYMBOL_GPL(l1d_flush_cleanup_pages);

/*
* Not all users of l1d flush would want to populate the TLB first
* split out the function so that callers can optionally flush the L1D
* cache via sw without prefetching the TLB.
*/
-void l1d_flush_populate_tlb(void *l1d_flush_pages)
+static void l1d_flush_populate_tlb(void *l1d_flush_pages)
{
int size = PAGE_SIZE << L1D_CACHE_ORDER;

@@ -58,9 +52,8 @@ void l1d_flush_populate_tlb(void *l1d_flush_pages)
[size] "r" (size)
: "eax", "ebx", "ecx", "edx");
}
-EXPORT_SYMBOL_GPL(l1d_flush_populate_tlb);

-int l1d_flush_hw(void)
+static int l1d_flush_hw(void)
{
if (static_cpu_has(X86_FEATURE_FLUSH_L1D)) {
wrmsrl(MSR_IA32_FLUSH_CMD, L1D_FLUSH);
@@ -68,9 +61,8 @@ int l1d_flush_hw(void)
}
return -ENOTSUPP;
}
-EXPORT_SYMBOL_GPL(l1d_flush_hw);

-void l1d_flush_sw(void *l1d_flush_pages)
+static void l1d_flush_sw(void *l1d_flush_pages)
{
int size = PAGE_SIZE << L1D_CACHE_ORDER;

@@ -87,4 +79,42 @@ void l1d_flush_sw(void *l1d_flush_pages)
[size] "r" (size)
: "eax", "ecx");
}
-EXPORT_SYMBOL_GPL(l1d_flush_sw);
+
+static void *l1d_flush_pages;
+static DEFINE_MUTEX(l1d_flush_mutex);
+
+/*
+ * Initialize and setup L1D flush once, each caller will reuse the
+ * l1d_flush_pages for flushing, no per CPU allocations or NUMA aware
+ * allocations at the moment.
+ */
+int l1d_flush_init_once(void)
+{
+ int ret = 0;
+
+ if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
+ return -ENOTSUPP;
+
+ if (static_cpu_has(X86_FEATURE_FLUSH_L1D) || l1d_flush_pages)
+ return ret;
+
+ mutex_lock(&l1d_flush_mutex);
+ if (!l1d_flush_pages)
+ l1d_flush_pages = l1d_flush_alloc_pages();
+ ret = l1d_flush_pages ? 0 : -ENOMEM;
+ mutex_unlock(&l1d_flush_mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(l1d_flush_init_once);
+
+void arch_l1d_flush(enum l1d_flush_options options)
+{
+ if (!l1d_flush_hw())
+ return;
+
+ if (options & L1D_FLUSH_POPULATE_TLB)
+ l1d_flush_populate_tlb(l1d_flush_pages);
+
+ l1d_flush_sw(l1d_flush_pages);
+}
+EXPORT_SYMBOL_GPL(arch_l1d_flush);
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 4f95927aad4c..d56702578588 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -203,8 +203,6 @@ static const struct {
[VMENTER_L1D_FLUSH_NOT_REQUIRED] = {"not required", false},
};

-static void *vmx_l1d_flush_pages;
-
static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf)
{
if (!boot_cpu_has_bug(X86_BUG_L1TF)) {
@@ -247,12 +245,9 @@ static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf)
l1tf = VMENTER_L1D_FLUSH_ALWAYS;
}

- if (l1tf != VMENTER_L1D_FLUSH_NEVER && !vmx_l1d_flush_pages &&
- !boot_cpu_has(X86_FEATURE_FLUSH_L1D)) {
- vmx_l1d_flush_pages = l1d_flush_alloc_pages();
- if (!vmx_l1d_flush_pages)
+ if (l1tf != VMENTER_L1D_FLUSH_NEVER)
+ if (l1d_flush_init_once())
return -ENOMEM;
- }

l1tf_vmx_mitigation = l1tf;

@@ -6058,12 +6053,7 @@ static void vmx_l1d_flush(struct kvm_vcpu *vcpu)
}

vcpu->stat.l1d_flush++;
-
- if (!l1d_flush_hw())
- return;
-
- l1d_flush_populate_tlb(vmx_l1d_flush_pages);
- l1d_flush_sw(vmx_l1d_flush_pages);
+ arch_l1d_flush(L1D_FLUSH_POPULATE_TLB);
}

static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
@@ -8056,10 +8046,6 @@ static struct kvm_x86_init_ops vmx_init_ops __initdata = {

static void vmx_cleanup_l1d_flush(void)
{
- if (vmx_l1d_flush_pages) {
- l1d_flush_cleanup_pages(vmx_l1d_flush_pages);
- vmx_l1d_flush_pages = NULL;
- }
/* Restore state so sysfs ignores VMX */
l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
}
--
2.17.1


2020-05-05 13:34:39

by kernel test robot

[permalink] [raw]
Subject: Re: [PATCH v5 4/6] arch/x86/kvm: Refactor L1D flushing

Hi Balbir,

I love your patch! Perhaps something to improve:

[auto build test WARNING on tip/auto-latest]
[also build test WARNING on linus/master v5.7-rc4 next-20200505]
[cannot apply to kvm/linux-next tip/x86/core]
[if your patch is applied to the wrong git tree, please drop us a note to help
improve the system. BTW, we also suggest to use '--base' option to specify the
base tree in git format-patch, please see https://stackoverflow.com/a/37406982]

url: https://github.com/0day-ci/linux/commits/Balbir-Singh/Optionally-flush-L1D-on-context-switch/20200505-044116
base: https://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git 9a31ac1743a00b816d5393acf61ce16713d319a1

If you fix the issue, kindly add following tag as appropriate
Reported-by: kbuild test robot <[email protected]>


cppcheck warnings: (new ones prefixed by >>)

>> arch/x86/kernel/l1d_flush.c:7:8: warning: Local variable 'l1d_flush_pages' shadows outer variable [shadowVariable]
void *l1d_flush_pages = NULL;
^
arch/x86/kernel/l1d_flush.c:83:14: note: Shadowed declaration
static void *l1d_flush_pages;
^
arch/x86/kernel/l1d_flush.c:7:8: note: Shadow variable
void *l1d_flush_pages = NULL;
^

vim +/l1d_flush_pages +7 arch/x86/kernel/l1d_flush.c

5228007f200e15 Balbir Singh 2020-05-04 3
604f3d173bf63d Balbir Singh 2020-05-04 4 static void *l1d_flush_alloc_pages(void)
5228007f200e15 Balbir Singh 2020-05-04 5 {
5228007f200e15 Balbir Singh 2020-05-04 6 struct page *page;
5228007f200e15 Balbir Singh 2020-05-04 @7 void *l1d_flush_pages = NULL;
5228007f200e15 Balbir Singh 2020-05-04 8 int i;
5228007f200e15 Balbir Singh 2020-05-04 9
5228007f200e15 Balbir Singh 2020-05-04 10 /*
5228007f200e15 Balbir Singh 2020-05-04 11 * This allocation for l1d_flush_pages is not tied to a VM/task's
5228007f200e15 Balbir Singh 2020-05-04 12 * lifetime and so should not be charged to a memcg.
5228007f200e15 Balbir Singh 2020-05-04 13 */
5228007f200e15 Balbir Singh 2020-05-04 14 page = alloc_pages(GFP_KERNEL, L1D_CACHE_ORDER);
5228007f200e15 Balbir Singh 2020-05-04 15 if (!page)
5228007f200e15 Balbir Singh 2020-05-04 16 return NULL;
5228007f200e15 Balbir Singh 2020-05-04 17 l1d_flush_pages = page_address(page);
5228007f200e15 Balbir Singh 2020-05-04 18
5228007f200e15 Balbir Singh 2020-05-04 19 /*
5228007f200e15 Balbir Singh 2020-05-04 20 * Initialize each page with a different pattern in
5228007f200e15 Balbir Singh 2020-05-04 21 * order to protect against KSM in the nested
5228007f200e15 Balbir Singh 2020-05-04 22 * virtualization case.
5228007f200e15 Balbir Singh 2020-05-04 23 */
5228007f200e15 Balbir Singh 2020-05-04 24 for (i = 0; i < 1u << L1D_CACHE_ORDER; ++i) {
5228007f200e15 Balbir Singh 2020-05-04 25 memset(l1d_flush_pages + i * PAGE_SIZE, i + 1,
5228007f200e15 Balbir Singh 2020-05-04 26 PAGE_SIZE);
5228007f200e15 Balbir Singh 2020-05-04 27 }
5228007f200e15 Balbir Singh 2020-05-04 28 return l1d_flush_pages;
5228007f200e15 Balbir Singh 2020-05-04 29 }
5228007f200e15 Balbir Singh 2020-05-04 30

:::::: The code at line 7 was first introduced by commit
:::::: 5228007f200e157bfc7a4add32def7c7f32c0550 arch/x86/kvm: Refactor l1d flush lifecycle management

:::::: TO: Balbir Singh <[email protected]>
:::::: CC: 0day robot <[email protected]>

---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/[email protected]