Split out the allocation and free routines to be used in a follow
up set of patches (to reuse for L1D flushing).
Signed-off-by: Balbir Singh <[email protected]>
Reviewed-by: Kees Cook <[email protected]>
---
arch/x86/include/asm/cacheflush.h | 3 +++
arch/x86/kernel/Makefile | 1 +
arch/x86/kernel/l1d_flush.c | 36 +++++++++++++++++++++++++++++++
arch/x86/kvm/vmx/vmx.c | 25 +++------------------
4 files changed, 43 insertions(+), 22 deletions(-)
create mode 100644 arch/x86/kernel/l1d_flush.c
diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
index 63feaf2a5f93..bac56fcd9790 100644
--- a/arch/x86/include/asm/cacheflush.h
+++ b/arch/x86/include/asm/cacheflush.h
@@ -6,6 +6,9 @@
#include <asm-generic/cacheflush.h>
#include <asm/special_insns.h>
+#define L1D_CACHE_ORDER 4
void clflush_cache_range(void *addr, unsigned int size);
+void *l1d_flush_alloc_pages(void);
+void l1d_flush_cleanup_pages(void *l1d_flush_pages);
#endif /* _ASM_X86_CACHEFLUSH_H */
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index e77261db2391..c17c1e3c1a0b 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -157,3 +157,4 @@ ifeq ($(CONFIG_X86_64),y)
endif
obj-$(CONFIG_IMA_SECURE_AND_OR_TRUSTED_BOOT) += ima_arch.o
+obj-y += l1d_flush.o
diff --git a/arch/x86/kernel/l1d_flush.c b/arch/x86/kernel/l1d_flush.c
new file mode 100644
index 000000000000..d605878c8f28
--- /dev/null
+++ b/arch/x86/kernel/l1d_flush.c
@@ -0,0 +1,36 @@
+#include <linux/mm.h>
+#include <asm/cacheflush.h>
+
+void *l1d_flush_alloc_pages(void)
+{
+ struct page *page;
+ void *l1d_flush_pages = NULL;
+ int i;
+
+ /*
+ * This allocation for l1d_flush_pages is not tied to a VM/task's
+ * lifetime and so should not be charged to a memcg.
+ */
+ page = alloc_pages(GFP_KERNEL, L1D_CACHE_ORDER);
+ if (!page)
+ return NULL;
+ l1d_flush_pages = page_address(page);
+
+ /*
+ * Initialize each page with a different pattern in
+ * order to protect against KSM in the nested
+ * virtualization case.
+ */
+ for (i = 0; i < 1u << L1D_CACHE_ORDER; ++i) {
+ memset(l1d_flush_pages + i * PAGE_SIZE, i + 1,
+ PAGE_SIZE);
+ }
+ return l1d_flush_pages;
+}
+EXPORT_SYMBOL_GPL(l1d_flush_alloc_pages);
+
+void l1d_flush_cleanup_pages(void *l1d_flush_pages)
+{
+ free_pages((unsigned long)l1d_flush_pages, L1D_CACHE_ORDER);
+}
+EXPORT_SYMBOL_GPL(l1d_flush_cleanup_pages);
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 00d31a5e0089..f35654db904a 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -203,14 +203,10 @@ static const struct {
[VMENTER_L1D_FLUSH_NOT_REQUIRED] = {"not required", false},
};
-#define L1D_CACHE_ORDER 4
static void *vmx_l1d_flush_pages;
static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf)
{
- struct page *page;
- unsigned int i;
-
if (!boot_cpu_has_bug(X86_BUG_L1TF)) {
l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_NOT_REQUIRED;
return 0;
@@ -253,24 +249,9 @@ static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf)
if (l1tf != VMENTER_L1D_FLUSH_NEVER && !vmx_l1d_flush_pages &&
!boot_cpu_has(X86_FEATURE_FLUSH_L1D)) {
- /*
- * This allocation for vmx_l1d_flush_pages is not tied to a VM
- * lifetime and so should not be charged to a memcg.
- */
- page = alloc_pages(GFP_KERNEL, L1D_CACHE_ORDER);
- if (!page)
+ vmx_l1d_flush_pages = l1d_flush_alloc_pages();
+ if (!vmx_l1d_flush_pages)
return -ENOMEM;
- vmx_l1d_flush_pages = page_address(page);
-
- /*
- * Initialize each page with a different pattern in
- * order to protect against KSM in the nested
- * virtualization case.
- */
- for (i = 0; i < 1u << L1D_CACHE_ORDER; ++i) {
- memset(vmx_l1d_flush_pages + i * PAGE_SIZE, i + 1,
- PAGE_SIZE);
- }
}
l1tf_vmx_mitigation = l1tf;
@@ -8099,7 +8080,7 @@ static struct kvm_x86_init_ops vmx_init_ops __initdata = {
static void vmx_cleanup_l1d_flush(void)
{
if (vmx_l1d_flush_pages) {
- free_pages((unsigned long)vmx_l1d_flush_pages, L1D_CACHE_ORDER);
+ l1d_flush_cleanup_pages(vmx_l1d_flush_pages);
vmx_l1d_flush_pages = NULL;
}
/* Restore state so sysfs ignores VMX */
--
2.17.1
Balbir Singh <[email protected]> writes:
> Subject: [PATCH v6 1/6] arch/x86/kvm: Refactor....
arch/x86/kvm: is really not the correct subsystem prefix...
I'll fix it up this time.
Balbir Singh <[email protected]> writes:
> +++ b/arch/x86/kernel/l1d_flush.c
> @@ -0,0 +1,36 @@
Lacks
+// SPDX-License-Identifier: GPL-2.0-only
On Wed, 2020-05-13 at 15:35 +0200, Thomas Gleixner wrote:
> CAUTION: This email originated from outside of the organization. Do
> not click links or open attachments unless you can confirm the sender
> and know the content is safe.
>
>
>
> Balbir Singh <[email protected]> writes:
>
> > Subject: [PATCH v6 1/6] arch/x86/kvm: Refactor....
>
> arch/x86/kvm: is really not the correct subsystem prefix...
>
> I'll fix it up this time.
Thanks, noted!
Balbir Singh.
On Wed, 2020-05-13 at 15:53 +0200, Thomas Gleixner wrote:
>
>
> Balbir Singh <[email protected]> writes:
> > +++ b/arch/x86/kernel/l1d_flush.c
> > @@ -0,0 +1,36 @@
>
> Lacks
>
> +// SPDX-License-Identifier: GPL-2.0-only
>
Agreed, it should match the license in arch/x86/kvm/vmx/vmx.c
Thanks,
Balbir
The following commit has been merged into the x86/mm branch of tip:
Commit-ID: b9b3bc1c30be1f056c1c0564bc7268820ea8bf70
Gitweb: https://git.kernel.org/tip/b9b3bc1c30be1f056c1c0564bc7268820ea8bf70
Author: Balbir Singh <[email protected]>
AuthorDate: Sun, 10 May 2020 11:47:58 +10:00
Committer: Thomas Gleixner <[email protected]>
CommitterDate: Wed, 13 May 2020 18:12:18 +02:00
x86/kvm: Refactor L1D flush page management
Split out the allocation and free routines and move them into builtin code
so they can be reused for the upcoming paranoid L1D flush on context switch
mitigation.
[ tglx: Add missing SPDX identifier and massage subject and changelog ]
Signed-off-by: Balbir Singh <[email protected]>
Signed-off-by: Thomas Gleixner <[email protected]>
Reviewed-by: Kees Cook <[email protected]>
Link: https://lkml.kernel.org/r/[email protected]
---
arch/x86/include/asm/cacheflush.h | 3 ++-
arch/x86/kernel/Makefile | 1 +-
arch/x86/kernel/l1d_flush.c | 39 ++++++++++++++++++++++++++++++-
arch/x86/kvm/vmx/vmx.c | 25 ++-----------------
4 files changed, 46 insertions(+), 22 deletions(-)
create mode 100644 arch/x86/kernel/l1d_flush.c
diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
index 63feaf2..bac56fc 100644
--- a/arch/x86/include/asm/cacheflush.h
+++ b/arch/x86/include/asm/cacheflush.h
@@ -6,6 +6,9 @@
#include <asm-generic/cacheflush.h>
#include <asm/special_insns.h>
+#define L1D_CACHE_ORDER 4
void clflush_cache_range(void *addr, unsigned int size);
+void *l1d_flush_alloc_pages(void);
+void l1d_flush_cleanup_pages(void *l1d_flush_pages);
#endif /* _ASM_X86_CACHEFLUSH_H */
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index ba89cab..c04d218 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -156,3 +156,4 @@ ifeq ($(CONFIG_X86_64),y)
endif
obj-$(CONFIG_IMA_SECURE_AND_OR_TRUSTED_BOOT) += ima_arch.o
+obj-y += l1d_flush.o
diff --git a/arch/x86/kernel/l1d_flush.c b/arch/x86/kernel/l1d_flush.c
new file mode 100644
index 0000000..4f298b7
--- /dev/null
+++ b/arch/x86/kernel/l1d_flush.c
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/mm.h>
+
+#include <asm/cacheflush.h>
+
+void *l1d_flush_alloc_pages(void)
+{
+ struct page *page;
+ void *l1d_flush_pages = NULL;
+ int i;
+
+ /*
+ * This allocation for l1d_flush_pages is not tied to a VM/task's
+ * lifetime and so should not be charged to a memcg.
+ */
+ page = alloc_pages(GFP_KERNEL, L1D_CACHE_ORDER);
+ if (!page)
+ return NULL;
+ l1d_flush_pages = page_address(page);
+
+ /*
+ * Initialize each page with a different pattern in
+ * order to protect against KSM in the nested
+ * virtualization case.
+ */
+ for (i = 0; i < 1u << L1D_CACHE_ORDER; ++i) {
+ memset(l1d_flush_pages + i * PAGE_SIZE, i + 1,
+ PAGE_SIZE);
+ }
+ return l1d_flush_pages;
+}
+EXPORT_SYMBOL_GPL(l1d_flush_alloc_pages);
+
+void l1d_flush_cleanup_pages(void *l1d_flush_pages)
+{
+ free_pages((unsigned long)l1d_flush_pages, L1D_CACHE_ORDER);
+}
+EXPORT_SYMBOL_GPL(l1d_flush_cleanup_pages);
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 8305097..225aa82 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -203,14 +203,10 @@ static const struct {
[VMENTER_L1D_FLUSH_NOT_REQUIRED] = {"not required", false},
};
-#define L1D_CACHE_ORDER 4
static void *vmx_l1d_flush_pages;
static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf)
{
- struct page *page;
- unsigned int i;
-
if (!boot_cpu_has_bug(X86_BUG_L1TF)) {
l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_NOT_REQUIRED;
return 0;
@@ -253,24 +249,9 @@ static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf)
if (l1tf != VMENTER_L1D_FLUSH_NEVER && !vmx_l1d_flush_pages &&
!boot_cpu_has(X86_FEATURE_FLUSH_L1D)) {
- /*
- * This allocation for vmx_l1d_flush_pages is not tied to a VM
- * lifetime and so should not be charged to a memcg.
- */
- page = alloc_pages(GFP_KERNEL, L1D_CACHE_ORDER);
- if (!page)
+ vmx_l1d_flush_pages = l1d_flush_alloc_pages();
+ if (!vmx_l1d_flush_pages)
return -ENOMEM;
- vmx_l1d_flush_pages = page_address(page);
-
- /*
- * Initialize each page with a different pattern in
- * order to protect against KSM in the nested
- * virtualization case.
- */
- for (i = 0; i < 1u << L1D_CACHE_ORDER; ++i) {
- memset(vmx_l1d_flush_pages + i * PAGE_SIZE, i + 1,
- PAGE_SIZE);
- }
}
l1tf_vmx_mitigation = l1tf;
@@ -8026,7 +8007,7 @@ static struct kvm_x86_init_ops vmx_init_ops __initdata = {
static void vmx_cleanup_l1d_flush(void)
{
if (vmx_l1d_flush_pages) {
- free_pages((unsigned long)vmx_l1d_flush_pages, L1D_CACHE_ORDER);
+ l1d_flush_cleanup_pages(vmx_l1d_flush_pages);
vmx_l1d_flush_pages = NULL;
}
/* Restore state so sysfs ignores VMX */