2023-01-12 16:48:13

by Isaku Yamahata

[permalink] [raw]
Subject: [PATCH v11 030/113] KVM: x86/mmu: Replace hardcoded value 0 for the initial value for SPTE

From: Isaku Yamahata <[email protected]>

The TDX support will need the "suppress #VE" bit (bit 63) set as the
initial value for SPTE. To reduce code change size, introduce a new macro
SHADOW_NONPRESENT_VALUE for the initial value for the shadow page table
entry (SPTE) and replace hard-coded value 0 for it. Initialize shadow page
tables with their value.

The plan is to unconditionally set the "suppress #VE" bit for both AMD and
Intel as: 1) AMD hardware uses the bit 63 as NX for present SPTE and
ignored for non-present SPTE; 2) for conventional VMX guests, KVM never
enables the "EPT-violation #VE" in VMCS control and "suppress #VE" bit is
ignored by hardware.

Signed-off-by: Sean Christopherson <[email protected]>
Signed-off-by: Isaku Yamahata <[email protected]>
---
arch/x86/kvm/mmu/mmu.c | 50 ++++++++++++++++++++++++++++++----
arch/x86/kvm/mmu/paging_tmpl.h | 3 +-
arch/x86/kvm/mmu/spte.h | 2 ++
arch/x86/kvm/mmu/tdp_mmu.c | 15 +++++-----
4 files changed, 56 insertions(+), 14 deletions(-)

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 15d0e8f11d53..59befdfeec23 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -540,9 +540,9 @@ static u64 mmu_spte_clear_track_bits(struct kvm *kvm, u64 *sptep)

if (!is_shadow_present_pte(old_spte) ||
!spte_has_volatile_bits(old_spte))
- __update_clear_spte_fast(sptep, 0ull);
+ __update_clear_spte_fast(sptep, SHADOW_NONPRESENT_VALUE);
else
- old_spte = __update_clear_spte_slow(sptep, 0ull);
+ old_spte = __update_clear_spte_slow(sptep, SHADOW_NONPRESENT_VALUE);

if (!is_shadow_present_pte(old_spte))
return old_spte;
@@ -576,7 +576,7 @@ static u64 mmu_spte_clear_track_bits(struct kvm *kvm, u64 *sptep)
*/
static void mmu_spte_clear_no_track(u64 *sptep)
{
- __update_clear_spte_fast(sptep, 0ull);
+ __update_clear_spte_fast(sptep, SHADOW_NONPRESENT_VALUE);
}

static u64 mmu_spte_get_lockless(u64 *sptep)
@@ -644,6 +644,39 @@ static void walk_shadow_page_lockless_end(struct kvm_vcpu *vcpu)
}
}

+#ifdef CONFIG_X86_64
+static inline void kvm_init_shadow_page(void *page)
+{
+ memset64(page, SHADOW_NONPRESENT_VALUE, 4096 / 8);
+}
+
+static int mmu_topup_shadow_page_cache(struct kvm_vcpu *vcpu)
+{
+ struct kvm_mmu_memory_cache *mc = &vcpu->arch.mmu_shadow_page_cache;
+ int start, end, i, r;
+
+ start = kvm_mmu_memory_cache_nr_free_objects(mc);
+ r = kvm_mmu_topup_memory_cache(mc, PT64_ROOT_MAX_LEVEL);
+
+ /*
+ * Note, topup may have allocated objects even if it failed to allocate
+ * the minimum number of objects required to make forward progress _at
+ * this time_. Initialize newly allocated objects even on failure, as
+ * userspace can free memory and rerun the vCPU in response to -ENOMEM.
+ */
+ end = kvm_mmu_memory_cache_nr_free_objects(mc);
+ for (i = start; i < end; i++)
+ kvm_init_shadow_page(mc->objects[i]);
+ return r;
+}
+#else
+static int mmu_topup_shadow_page_cache(struct kvm_vcpu *vcpu)
+{
+ return kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_shadow_page_cache,
+ PT64_ROOT_MAX_LEVEL);
+}
+#endif /* CONFIG_X86_64 */
+
static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu, bool maybe_indirect)
{
int r;
@@ -653,8 +686,7 @@ static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu, bool maybe_indirect)
1 + PT64_ROOT_MAX_LEVEL + PTE_PREFETCH_NUM);
if (r)
return r;
- r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_shadow_page_cache,
- PT64_ROOT_MAX_LEVEL);
+ r = mmu_topup_shadow_page_cache(vcpu);
if (r)
return r;
if (maybe_indirect) {
@@ -5920,7 +5952,13 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu)
vcpu->arch.mmu_page_header_cache.kmem_cache = mmu_page_header_cache;
vcpu->arch.mmu_page_header_cache.gfp_zero = __GFP_ZERO;

- vcpu->arch.mmu_shadow_page_cache.gfp_zero = __GFP_ZERO;
+ /*
+ * When X86_64, initial SEPT entries are initialized with
+ * SHADOW_NONPRESENT_VALUE. Otherwise zeroed. See
+ * mmu_topup_shadow_page_cache().
+ */
+ if (!IS_ENABLED(CONFIG_X86_64))
+ vcpu->arch.mmu_shadow_page_cache.gfp_zero = __GFP_ZERO;

vcpu->arch.mmu = &vcpu->arch.root_mmu;
vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
index 0f6455072055..42d7106c7350 100644
--- a/arch/x86/kvm/mmu/paging_tmpl.h
+++ b/arch/x86/kvm/mmu/paging_tmpl.h
@@ -1036,7 +1036,8 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
gpa_t pte_gpa;
gfn_t gfn;

- if (!sp->spt[i])
+ /* spt[i] has initial value of shadow page table allocation */
+ if (sp->spt[i] == SHADOW_NONPRESENT_VALUE)
continue;

pte_gpa = first_pte_gpa + i * sizeof(pt_element_t);
diff --git a/arch/x86/kvm/mmu/spte.h b/arch/x86/kvm/mmu/spte.h
index 0d8deefee66c..f190eaf6b2b5 100644
--- a/arch/x86/kvm/mmu/spte.h
+++ b/arch/x86/kvm/mmu/spte.h
@@ -148,6 +148,8 @@ static_assert(MMIO_SPTE_GEN_LOW_BITS == 8 && MMIO_SPTE_GEN_HIGH_BITS == 11);

#define MMIO_SPTE_GEN_MASK GENMASK_ULL(MMIO_SPTE_GEN_LOW_BITS + MMIO_SPTE_GEN_HIGH_BITS - 1, 0)

+#define SHADOW_NONPRESENT_VALUE 0ULL
+
extern u64 __read_mostly shadow_host_writable_mask;
extern u64 __read_mostly shadow_mmu_writable_mask;
extern u64 __read_mostly shadow_nx_mask;
diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index 12e430a4ebc3..9cf5844dd34a 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -701,7 +701,7 @@ static inline int tdp_mmu_zap_spte_atomic(struct kvm *kvm,
* here since the SPTE is going from non-present to non-present. Use
* the raw write helper to avoid an unnecessary check on volatile bits.
*/
- __kvm_tdp_mmu_write_spte(iter->sptep, 0);
+ __kvm_tdp_mmu_write_spte(iter->sptep, SHADOW_NONPRESENT_VALUE);

return 0;
}
@@ -878,8 +878,8 @@ static void __tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root,
continue;

if (!shared)
- tdp_mmu_set_spte(kvm, &iter, 0);
- else if (tdp_mmu_set_spte_atomic(kvm, &iter, 0))
+ tdp_mmu_set_spte(kvm, &iter, SHADOW_NONPRESENT_VALUE);
+ else if (tdp_mmu_set_spte_atomic(kvm, &iter, SHADOW_NONPRESENT_VALUE))
goto retry;
}
}
@@ -935,8 +935,9 @@ bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
if (WARN_ON_ONCE(!is_shadow_present_pte(old_spte)))
return false;

- __tdp_mmu_set_spte(kvm, kvm_mmu_page_as_id(sp), sp->ptep, old_spte, 0,
- sp->gfn, sp->role.level + 1, true, true);
+ __tdp_mmu_set_spte(kvm, kvm_mmu_page_as_id(sp), sp->ptep, old_spte,
+ SHADOW_NONPRESENT_VALUE, sp->gfn, sp->role.level + 1,
+ true, true);

return true;
}
@@ -970,7 +971,7 @@ static bool tdp_mmu_zap_leafs(struct kvm *kvm, struct kvm_mmu_page *root,
!is_last_spte(iter.old_spte, iter.level))
continue;

- tdp_mmu_set_spte(kvm, &iter, 0);
+ tdp_mmu_set_spte(kvm, &iter, SHADOW_NONPRESENT_VALUE);
flush = true;
}

@@ -1339,7 +1340,7 @@ static bool set_spte_gfn(struct kvm *kvm, struct tdp_iter *iter,
* invariant that the PFN of a present * leaf SPTE can never change.
* See __handle_changed_spte().
*/
- tdp_mmu_set_spte(kvm, iter, 0);
+ tdp_mmu_set_spte(kvm, iter, SHADOW_NONPRESENT_VALUE);

if (!pte_write(range->pte)) {
new_spte = kvm_mmu_changed_pte_notifier_make_spte(iter->old_spte,
--
2.25.1


2023-01-25 09:24:58

by Zhi Wang

[permalink] [raw]
Subject: Re: [PATCH v11 030/113] KVM: x86/mmu: Replace hardcoded value 0 for the initial value for SPTE

On Thu, 12 Jan 2023 08:31:38 -0800
[email protected] wrote:

This refactor patch is quite hacky.

Why not change the purpose of vcpu->arch.mmu_shadow_page.gfp_zero and let the
callers respect that the initial value of spte can be configurable? It will be
generic and not TDX-specific, then kvm_init_shadow_page() is not required,
mmu_topup_shadow_page_cache() can be left un-touched as the refactor can cover
other architectures.

1) Let it store the expected nonpresent value and rename it to nonpresent_spte.

2) Let mmu_spte_clear_track_bits(), mmu_spte_clear_no_track() and all
the other places where assume 0 as initial value, respect nonpreset_spte.

3) Let kvm_mmu_topup_memory_cache() to respect nonpresent_spte: a. using GFP_ZERO
if the nonpresent_spte is zero. b. memset the page if nonpresent_spte is *not*
zero.

Now the initial value is configurable, configure the nonpresent_spte in the TDX
initialization path before the first topup in the next patch.

> From: Isaku Yamahata <[email protected]>
>
> The TDX support will need the "suppress #VE" bit (bit 63) set as the
> initial value for SPTE. To reduce code change size, introduce a new macro
> SHADOW_NONPRESENT_VALUE for the initial value for the shadow page table
> entry (SPTE) and replace hard-coded value 0 for it. Initialize shadow page
> tables with their value.
>
> The plan is to unconditionally set the "suppress #VE" bit for both AMD and
> Intel as: 1) AMD hardware uses the bit 63 as NX for present SPTE and
> ignored for non-present SPTE; 2) for conventional VMX guests, KVM never
> enables the "EPT-violation #VE" in VMCS control and "suppress #VE" bit is
> ignored by hardware.
>
> Signed-off-by: Sean Christopherson <[email protected]>
> Signed-off-by: Isaku Yamahata <[email protected]>
> ---
> arch/x86/kvm/mmu/mmu.c | 50 ++++++++++++++++++++++++++++++----
> arch/x86/kvm/mmu/paging_tmpl.h | 3 +-
> arch/x86/kvm/mmu/spte.h | 2 ++
> arch/x86/kvm/mmu/tdp_mmu.c | 15 +++++-----
> 4 files changed, 56 insertions(+), 14 deletions(-)
>
> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> index 15d0e8f11d53..59befdfeec23 100644
> --- a/arch/x86/kvm/mmu/mmu.c
> +++ b/arch/x86/kvm/mmu/mmu.c
> @@ -540,9 +540,9 @@ static u64 mmu_spte_clear_track_bits(struct kvm *kvm, u64 *sptep)
>
> if (!is_shadow_present_pte(old_spte) ||
> !spte_has_volatile_bits(old_spte))
> - __update_clear_spte_fast(sptep, 0ull);
> + __update_clear_spte_fast(sptep, SHADOW_NONPRESENT_VALUE);
> else
> - old_spte = __update_clear_spte_slow(sptep, 0ull);
> + old_spte = __update_clear_spte_slow(sptep, SHADOW_NONPRESENT_VALUE);
>
> if (!is_shadow_present_pte(old_spte))
> return old_spte;
> @@ -576,7 +576,7 @@ static u64 mmu_spte_clear_track_bits(struct kvm *kvm, u64 *sptep)
> */
> static void mmu_spte_clear_no_track(u64 *sptep)
> {
> - __update_clear_spte_fast(sptep, 0ull);
> + __update_clear_spte_fast(sptep, SHADOW_NONPRESENT_VALUE);
> }
>
> static u64 mmu_spte_get_lockless(u64 *sptep)
> @@ -644,6 +644,39 @@ static void walk_shadow_page_lockless_end(struct kvm_vcpu *vcpu)
> }
> }
>
> +#ifdef CONFIG_X86_64
> +static inline void kvm_init_shadow_page(void *page)
> +{
> + memset64(page, SHADOW_NONPRESENT_VALUE, 4096 / 8);
> +}
> +
> +static int mmu_topup_shadow_page_cache(struct kvm_vcpu *vcpu)
> +{
> + struct kvm_mmu_memory_cache *mc = &vcpu->arch.mmu_shadow_page_cache;
> + int start, end, i, r;
> +
> + start = kvm_mmu_memory_cache_nr_free_objects(mc);
> + r = kvm_mmu_topup_memory_cache(mc, PT64_ROOT_MAX_LEVEL);
> +
> + /*
> + * Note, topup may have allocated objects even if it failed to allocate
> + * the minimum number of objects required to make forward progress _at
> + * this time_. Initialize newly allocated objects even on failure, as
> + * userspace can free memory and rerun the vCPU in response to -ENOMEM.
> + */
> + end = kvm_mmu_memory_cache_nr_free_objects(mc);
> + for (i = start; i < end; i++)
> + kvm_init_shadow_page(mc->objects[i]);
> + return r;
> +}
> +#else
> +static int mmu_topup_shadow_page_cache(struct kvm_vcpu *vcpu)
> +{
> + return kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_shadow_page_cache,
> + PT64_ROOT_MAX_LEVEL);
> +}
> +#endif /* CONFIG_X86_64 */
> +
> static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu, bool maybe_indirect)
> {
> int r;
> @@ -653,8 +686,7 @@ static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu, bool maybe_indirect)
> 1 + PT64_ROOT_MAX_LEVEL + PTE_PREFETCH_NUM);
> if (r)
> return r;
> - r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_shadow_page_cache,
> - PT64_ROOT_MAX_LEVEL);
> + r = mmu_topup_shadow_page_cache(vcpu);
> if (r)
> return r;
> if (maybe_indirect) {
> @@ -5920,7 +5952,13 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu)
> vcpu->arch.mmu_page_header_cache.kmem_cache = mmu_page_header_cache;
> vcpu->arch.mmu_page_header_cache.gfp_zero = __GFP_ZERO;
>
> - vcpu->arch.mmu_shadow_page_cache.gfp_zero = __GFP_ZERO;
> + /*
> + * When X86_64, initial SEPT entries are initialized with
> + * SHADOW_NONPRESENT_VALUE. Otherwise zeroed. See
> + * mmu_topup_shadow_page_cache().
> + */
> + if (!IS_ENABLED(CONFIG_X86_64))
> + vcpu->arch.mmu_shadow_page_cache.gfp_zero = __GFP_ZERO;
>
> vcpu->arch.mmu = &vcpu->arch.root_mmu;
> vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
> diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
> index 0f6455072055..42d7106c7350 100644
> --- a/arch/x86/kvm/mmu/paging_tmpl.h
> +++ b/arch/x86/kvm/mmu/paging_tmpl.h
> @@ -1036,7 +1036,8 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
> gpa_t pte_gpa;
> gfn_t gfn;
>
> - if (!sp->spt[i])
> + /* spt[i] has initial value of shadow page table allocation */
> + if (sp->spt[i] == SHADOW_NONPRESENT_VALUE)
> continue;
>
> pte_gpa = first_pte_gpa + i * sizeof(pt_element_t);
> diff --git a/arch/x86/kvm/mmu/spte.h b/arch/x86/kvm/mmu/spte.h
> index 0d8deefee66c..f190eaf6b2b5 100644
> --- a/arch/x86/kvm/mmu/spte.h
> +++ b/arch/x86/kvm/mmu/spte.h
> @@ -148,6 +148,8 @@ static_assert(MMIO_SPTE_GEN_LOW_BITS == 8 && MMIO_SPTE_GEN_HIGH_BITS == 11);
>
> #define MMIO_SPTE_GEN_MASK GENMASK_ULL(MMIO_SPTE_GEN_LOW_BITS + MMIO_SPTE_GEN_HIGH_BITS - 1, 0)
>
> +#define SHADOW_NONPRESENT_VALUE 0ULL
> +
> extern u64 __read_mostly shadow_host_writable_mask;
> extern u64 __read_mostly shadow_mmu_writable_mask;
> extern u64 __read_mostly shadow_nx_mask;
> diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
> index 12e430a4ebc3..9cf5844dd34a 100644
> --- a/arch/x86/kvm/mmu/tdp_mmu.c
> +++ b/arch/x86/kvm/mmu/tdp_mmu.c
> @@ -701,7 +701,7 @@ static inline int tdp_mmu_zap_spte_atomic(struct kvm *kvm,
> * here since the SPTE is going from non-present to non-present. Use
> * the raw write helper to avoid an unnecessary check on volatile bits.
> */
> - __kvm_tdp_mmu_write_spte(iter->sptep, 0);
> + __kvm_tdp_mmu_write_spte(iter->sptep, SHADOW_NONPRESENT_VALUE);
>
> return 0;
> }
> @@ -878,8 +878,8 @@ static void __tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root,
> continue;
>
> if (!shared)
> - tdp_mmu_set_spte(kvm, &iter, 0);
> - else if (tdp_mmu_set_spte_atomic(kvm, &iter, 0))
> + tdp_mmu_set_spte(kvm, &iter, SHADOW_NONPRESENT_VALUE);
> + else if (tdp_mmu_set_spte_atomic(kvm, &iter, SHADOW_NONPRESENT_VALUE))
> goto retry;
> }
> }
> @@ -935,8 +935,9 @@ bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
> if (WARN_ON_ONCE(!is_shadow_present_pte(old_spte)))
> return false;
>
> - __tdp_mmu_set_spte(kvm, kvm_mmu_page_as_id(sp), sp->ptep, old_spte, 0,
> - sp->gfn, sp->role.level + 1, true, true);
> + __tdp_mmu_set_spte(kvm, kvm_mmu_page_as_id(sp), sp->ptep, old_spte,
> + SHADOW_NONPRESENT_VALUE, sp->gfn, sp->role.level + 1,
> + true, true);
>
> return true;
> }
> @@ -970,7 +971,7 @@ static bool tdp_mmu_zap_leafs(struct kvm *kvm, struct kvm_mmu_page *root,
> !is_last_spte(iter.old_spte, iter.level))
> continue;
>
> - tdp_mmu_set_spte(kvm, &iter, 0);
> + tdp_mmu_set_spte(kvm, &iter, SHADOW_NONPRESENT_VALUE);
> flush = true;
> }
>
> @@ -1339,7 +1340,7 @@ static bool set_spte_gfn(struct kvm *kvm, struct tdp_iter *iter,
> * invariant that the PFN of a present * leaf SPTE can never change.
> * See __handle_changed_spte().
> */
> - tdp_mmu_set_spte(kvm, iter, 0);
> + tdp_mmu_set_spte(kvm, iter, SHADOW_NONPRESENT_VALUE);
>
> if (!pte_write(range->pte)) {
> new_spte = kvm_mmu_changed_pte_notifier_make_spte(iter->old_spte,


2023-01-25 17:22:21

by Sean Christopherson

[permalink] [raw]
Subject: Re: [PATCH v11 030/113] KVM: x86/mmu: Replace hardcoded value 0 for the initial value for SPTE

On Wed, Jan 25, 2023, Zhi Wang wrote:
> On Thu, 12 Jan 2023 08:31:38 -0800
> [email protected] wrote:
>
> This refactor patch is quite hacky.
>
> Why not change the purpose of vcpu->arch.mmu_shadow_page.gfp_zero and let the
> callers respect that the initial value of spte can be configurable? It will be
> generic and not TDX-specific, then kvm_init_shadow_page() is not required,
> mmu_topup_shadow_page_cache() can be left un-touched as the refactor can cover
> other architectures.
>
> 1) Let it store the expected nonpresent value and rename it to nonpresent_spte.


I agree that handling this in the common code would be cleaner, but repurposing
gfp_zero gets kludgy because it would require a magic value to say "don't initialize
the data", e.g. x86's mmu_shadowed_info_cache isn't pre-filled.

And supporting a custom 64-bit init value for kmem_cache-backed caches would require
restricting such caches to be a multiple of 8 bytes in size.

How about this? Lightly tested.

From: Sean Christopherson <[email protected]>
Date: Wed, 25 Jan 2023 16:55:01 +0000
Subject: [PATCH] KVM: Allow page-sized MMU caches to be initialized with
custom 64-bit values

Add support to MMU caches for initializing a page with a custom 64-bit
value, e.g. to pre-fill an entire page table with non-zero PTE values.
The functionality will be used by x86 to support Intel's TDX, which needs
to set bit 63 in all non-present PTEs in order to prevent !PRESENT page
faults from getting reflected into the guest (Intel's EPT Violation #VE
architecture made the less than brilliant decision of having the per-PTE
behavior be opt-out instead of opt-in).

Signed-off-by: Sean Christopherson <[email protected]>
---
include/linux/kvm_types.h | 1 +
virt/kvm/kvm_main.c | 16 ++++++++++++++--
2 files changed, 15 insertions(+), 2 deletions(-)

diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h
index 76de36e56cdf..67972db17b55 100644
--- a/include/linux/kvm_types.h
+++ b/include/linux/kvm_types.h
@@ -94,6 +94,7 @@ struct kvm_mmu_memory_cache {
int nobjs;
gfp_t gfp_zero;
gfp_t gfp_custom;
+ u64 init_value;
struct kmem_cache *kmem_cache;
int capacity;
void **objects;
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index d255964ec331..78f1e49179a7 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -380,12 +380,17 @@ static void kvm_flush_shadow_all(struct kvm *kvm)
static inline void *mmu_memory_cache_alloc_obj(struct kvm_mmu_memory_cache *mc,
gfp_t gfp_flags)
{
+ void *page;
+
gfp_flags |= mc->gfp_zero;

if (mc->kmem_cache)
return kmem_cache_alloc(mc->kmem_cache, gfp_flags);
- else
- return (void *)__get_free_page(gfp_flags);
+
+ page = (void *)__get_free_page(gfp_flags);
+ if (page && mc->init_value)
+ memset64(page, mc->init_value, PAGE_SIZE / sizeof(mc->init_value));
+ return page;
}

int __kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int capacity, int min)
@@ -400,6 +405,13 @@ int __kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int capacity,
if (WARN_ON_ONCE(!capacity))
return -EIO;

+ /*
+ * Custom init values can be used only for page allocations,
+ * and obviously conflict with __GFP_ZERO.
+ */
+ if (WARN_ON_ONCE(mc->init_value && (mc->kmem_cache || mc->gfp_zero)))
+ return -EIO;
+
mc->objects = kvmalloc_array(sizeof(void *), capacity, gfp);
if (!mc->objects)
return -ENOMEM;

base-commit: 503f0315c97739d3f8e645c500d81757dfbf76be
--


2023-01-26 21:38:40

by Kai Huang

[permalink] [raw]
Subject: Re: [PATCH v11 030/113] KVM: x86/mmu: Replace hardcoded value 0 for the initial value for SPTE

On Wed, 2023-01-25 at 17:22 +0000, Sean Christopherson wrote:
> On Wed, Jan 25, 2023, Zhi Wang wrote:
> > On Thu, 12 Jan 2023 08:31:38 -0800
> > [email protected] wrote:
> >
> > This refactor patch is quite hacky.
> >
> > Why not change the purpose of vcpu->arch.mmu_shadow_page.gfp_zero and let the
> > callers respect that the initial value of spte can be configurable? It will be
> > generic and not TDX-specific, then kvm_init_shadow_page() is not required,
> > mmu_topup_shadow_page_cache() can be left un-touched as the refactor can cover
> > other architectures.
> >
> > 1) Let it store the expected nonpresent value and rename it to nonpresent_spte.
>
>
> I agree that handling this in the common code would be cleaner, but repurposing
> gfp_zero gets kludgy because it would require a magic value to say "don't initialize
> the data", e.g. x86's mmu_shadowed_info_cache isn't pre-filled.
>
> And supporting a custom 64-bit init value for kmem_cache-backed caches would require
> restricting such caches to be a multiple of 8 bytes in size.
>
> How about this? Lightly tested.
>
> From: Sean Christopherson <[email protected]>
> Date: Wed, 25 Jan 2023 16:55:01 +0000
> Subject: [PATCH] KVM: Allow page-sized MMU caches to be initialized with
> custom 64-bit values
>
> Add support to MMU caches for initializing a page with a custom 64-bit
> value, e.g. to pre-fill an entire page table with non-zero PTE values.
> The functionality will be used by x86 to support Intel's TDX, which needs
> to set bit 63 in all non-present PTEs in order to prevent !PRESENT page
> faults from getting reflected into the guest (Intel's EPT Violation #VE
> architecture made the less than brilliant decision of having the per-PTE
> behavior be opt-out instead of opt-in).
>
> Signed-off-by: Sean Christopherson <[email protected]>
> ---
> include/linux/kvm_types.h | 1 +
> virt/kvm/kvm_main.c | 16 ++++++++++++++--
> 2 files changed, 15 insertions(+), 2 deletions(-)
>
> diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h
> index 76de36e56cdf..67972db17b55 100644
> --- a/include/linux/kvm_types.h
> +++ b/include/linux/kvm_types.h
> @@ -94,6 +94,7 @@ struct kvm_mmu_memory_cache {
> int nobjs;
> gfp_t gfp_zero;
> gfp_t gfp_custom;
> + u64 init_value;
> struct kmem_cache *kmem_cache;
> int capacity;
> void **objects;
> diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
> index d255964ec331..78f1e49179a7 100644
> --- a/virt/kvm/kvm_main.c
> +++ b/virt/kvm/kvm_main.c
> @@ -380,12 +380,17 @@ static void kvm_flush_shadow_all(struct kvm *kvm)
> static inline void *mmu_memory_cache_alloc_obj(struct kvm_mmu_memory_cache *mc,
> gfp_t gfp_flags)
> {
> + void *page;
> +
> gfp_flags |= mc->gfp_zero;
>
> if (mc->kmem_cache)
> return kmem_cache_alloc(mc->kmem_cache, gfp_flags);
> - else
> - return (void *)__get_free_page(gfp_flags);
> +
> + page = (void *)__get_free_page(gfp_flags);
> + if (page && mc->init_value)
> + memset64(page, mc->init_value, PAGE_SIZE / sizeof(mc->init_value));
> + return page;
> }
>
> int __kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int capacity, int min)
> @@ -400,6 +405,13 @@ int __kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int capacity,
> if (WARN_ON_ONCE(!capacity))
> return -EIO;
>
> + /*
> + * Custom init values can be used only for page allocations,
> + * and obviously conflict with __GFP_ZERO.
> + */
> + if (WARN_ON_ONCE(mc->init_value && (mc->kmem_cache || mc->gfp_zero)))
> + return -EIO;
> +
> mc->objects = kvmalloc_array(sizeof(void *), capacity, gfp);
> if (!mc->objects)
> return -ENOMEM;
>
> base-commit: 503f0315c97739d3f8e645c500d81757dfbf76be

init_value and gfp_zone is kinda redundant. How about removing gfp_zero
completely?

mmu_memory_cache_alloc_obj(...)
{
...
if (!mc->init_value)
gfp_flags |= __GFP_ZERO;
...
}

And in kvm_mmu_create() you initialize all caches' init_value explicitly.

2023-01-26 22:01:51

by Sean Christopherson

[permalink] [raw]
Subject: Re: [PATCH v11 030/113] KVM: x86/mmu: Replace hardcoded value 0 for the initial value for SPTE

On Thu, Jan 26, 2023, Huang, Kai wrote:
> On Wed, 2023-01-25 at 17:22 +0000, Sean Christopherson wrote:
> > I agree that handling this in the common code would be cleaner, but repurposing
> > gfp_zero gets kludgy because it would require a magic value to say "don't initialize
> > the data", e.g. x86's mmu_shadowed_info_cache isn't pre-filled.

...

> > @@ -400,6 +405,13 @@ int __kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int capacity,
> > if (WARN_ON_ONCE(!capacity))
> > return -EIO;
> >
> > + /*
> > + * Custom init values can be used only for page allocations,
> > + * and obviously conflict with __GFP_ZERO.
> > + */
> > + if (WARN_ON_ONCE(mc->init_value && (mc->kmem_cache || mc->gfp_zero)))
> > + return -EIO;
> > +
> > mc->objects = kvmalloc_array(sizeof(void *), capacity, gfp);
> > if (!mc->objects)
> > return -ENOMEM;
> >
> > base-commit: 503f0315c97739d3f8e645c500d81757dfbf76be
>
> init_value and gfp_zone is kinda redundant. How about removing gfp_zero
> completely?
>
> mmu_memory_cache_alloc_obj(...)
> {
> ...
> if (!mc->init_value)
> gfp_flags |= __GFP_ZERO;
> ...
> }
>
> And in kvm_mmu_create() you initialize all caches' init_value explicitly.

No, as mentioned above there's also a "don't initialize the data" case. Leaving
init_value=0 means those users would see unnecessary zeroing, and again I don't
want to use a magic value to say "don't initialize".

2023-01-27 21:37:01

by Zhi Wang

[permalink] [raw]
Subject: Re: [PATCH v11 030/113] KVM: x86/mmu: Replace hardcoded value 0 for the initial value for SPTE

On Wed, 25 Jan 2023 17:22:08 +0000
Sean Christopherson <[email protected]> wrote:

> On Wed, Jan 25, 2023, Zhi Wang wrote:
> > On Thu, 12 Jan 2023 08:31:38 -0800
> > [email protected] wrote:
> >
> > This refactor patch is quite hacky.
> >
> > Why not change the purpose of vcpu->arch.mmu_shadow_page.gfp_zero and let the
> > callers respect that the initial value of spte can be configurable? It will be
> > generic and not TDX-specific, then kvm_init_shadow_page() is not required,
> > mmu_topup_shadow_page_cache() can be left un-touched as the refactor can cover
> > other architectures.
> >
> > 1) Let it store the expected nonpresent value and rename it to nonpresent_spte.
>
>
> I agree that handling this in the common code would be cleaner, but repurposing
> gfp_zero gets kludgy because it would require a magic value to say "don't initialize
> the data", e.g. x86's mmu_shadowed_info_cache isn't pre-filled.
>
> And supporting a custom 64-bit init value for kmem_cache-backed caches would require
> restricting such caches to be a multiple of 8 bytes in size.
>
> How about this? Lightly tested.
>
> From: Sean Christopherson <[email protected]>
> Date: Wed, 25 Jan 2023 16:55:01 +0000
> Subject: [PATCH] KVM: Allow page-sized MMU caches to be initialized with
> custom 64-bit values
>

It looks good enough so far although it only supports 64bit init value. But
it can be extended in the future.

Just want to make sure people are thinking the same:

1) Keep the changes of SHADOW_NONPRESENT_VALUE and REMOVED_SPTE in TDX patch.
init_value stays as a generic feature in the kvm mmu cache layer. It is *not*
going to replace SHADOW_NONPRESENT_VALUE.

2) TDX kvm_x86_vcpu_create sets the SHADOW_NONPRESENT value into init_value.

3) mmu cache topping up function initializes the page according to init_value
with Sean's patch.

> Add support to MMU caches for initializing a page with a custom 64-bit
> value, e.g. to pre-fill an entire page table with non-zero PTE values.
> The functionality will be used by x86 to support Intel's TDX, which needs
> to set bit 63 in all non-present PTEs in order to prevent !PRESENT page
> faults from getting reflected into the guest (Intel's EPT Violation #VE
> architecture made the less than brilliant decision of having the per-PTE
> behavior be opt-out instead of opt-in).
>
> Signed-off-by: Sean Christopherson <[email protected]>
> ---
> include/linux/kvm_types.h | 1 +
> virt/kvm/kvm_main.c | 16 ++++++++++++++--
> 2 files changed, 15 insertions(+), 2 deletions(-)
>
> diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h
> index 76de36e56cdf..67972db17b55 100644
> --- a/include/linux/kvm_types.h
> +++ b/include/linux/kvm_types.h
> @@ -94,6 +94,7 @@ struct kvm_mmu_memory_cache {
> int nobjs;
> gfp_t gfp_zero;
> gfp_t gfp_custom;
> + u64 init_value;
> struct kmem_cache *kmem_cache;
> int capacity;
> void **objects;
> diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
> index d255964ec331..78f1e49179a7 100644
> --- a/virt/kvm/kvm_main.c
> +++ b/virt/kvm/kvm_main.c
> @@ -380,12 +380,17 @@ static void kvm_flush_shadow_all(struct kvm *kvm)
> static inline void *mmu_memory_cache_alloc_obj(struct kvm_mmu_memory_cache *mc,
> gfp_t gfp_flags)
> {
> + void *page;
> +
> gfp_flags |= mc->gfp_zero;
>
> if (mc->kmem_cache)
> return kmem_cache_alloc(mc->kmem_cache, gfp_flags);
> - else
> - return (void *)__get_free_page(gfp_flags);
> +
> + page = (void *)__get_free_page(gfp_flags);
> + if (page && mc->init_value)
> + memset64(page, mc->init_value, PAGE_SIZE / sizeof(mc->init_value));
> + return page;
> }
>
> int __kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int capacity, int min)
> @@ -400,6 +405,13 @@ int __kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int capacity,
> if (WARN_ON_ONCE(!capacity))
> return -EIO;
>
> + /*
> + * Custom init values can be used only for page allocations,
> + * and obviously conflict with __GFP_ZERO.
> + */
> + if (WARN_ON_ONCE(mc->init_value && (mc->kmem_cache || mc->gfp_zero)))
> + return -EIO;
> +
> mc->objects = kvmalloc_array(sizeof(void *), capacity, gfp);
> if (!mc->objects)
> return -ENOMEM;
>
> base-commit: 503f0315c97739d3f8e645c500d81757dfbf76be


2023-02-27 21:50:55

by Isaku Yamahata

[permalink] [raw]
Subject: Re: [PATCH v11 030/113] KVM: x86/mmu: Replace hardcoded value 0 for the initial value for SPTE

On Fri, Jan 27, 2023 at 11:36:52PM +0200,
Zhi Wang <[email protected]> wrote:

> On Wed, 25 Jan 2023 17:22:08 +0000
> Sean Christopherson <[email protected]> wrote:
>
> > On Wed, Jan 25, 2023, Zhi Wang wrote:
> > > On Thu, 12 Jan 2023 08:31:38 -0800
> > > [email protected] wrote:
> > >
> > > This refactor patch is quite hacky.
> > >
> > > Why not change the purpose of vcpu->arch.mmu_shadow_page.gfp_zero and let the
> > > callers respect that the initial value of spte can be configurable? It will be
> > > generic and not TDX-specific, then kvm_init_shadow_page() is not required,
> > > mmu_topup_shadow_page_cache() can be left un-touched as the refactor can cover
> > > other architectures.
> > >
> > > 1) Let it store the expected nonpresent value and rename it to nonpresent_spte.
> >
> >
> > I agree that handling this in the common code would be cleaner, but repurposing
> > gfp_zero gets kludgy because it would require a magic value to say "don't initialize
> > the data", e.g. x86's mmu_shadowed_info_cache isn't pre-filled.
> >
> > And supporting a custom 64-bit init value for kmem_cache-backed caches would require
> > restricting such caches to be a multiple of 8 bytes in size.
> >
> > How about this? Lightly tested.
> >
> > From: Sean Christopherson <[email protected]>
> > Date: Wed, 25 Jan 2023 16:55:01 +0000
> > Subject: [PATCH] KVM: Allow page-sized MMU caches to be initialized with
> > custom 64-bit values
> >
>
> It looks good enough so far although it only supports 64bit init value. But
> it can be extended in the future.
>
> Just want to make sure people are thinking the same:
>
> 1) Keep the changes of SHADOW_NONPRESENT_VALUE and REMOVED_SPTE in TDX patch.
> init_value stays as a generic feature in the kvm mmu cache layer. It is *not*
> going to replace SHADOW_NONPRESENT_VALUE.
>
> 2) TDX kvm_x86_vcpu_create sets the SHADOW_NONPRESENT value into init_value.
>
> 3) mmu cache topping up function initializes the page according to init_value
> with Sean's patch.

The patch worked for me. Included into v12 patch series.
--
Isaku Yamahata <[email protected]>

2023-02-27 21:52:21

by Isaku Yamahata

[permalink] [raw]
Subject: Re: [PATCH v11 030/113] KVM: x86/mmu: Replace hardcoded value 0 for the initial value for SPTE

On Thu, Jan 26, 2023 at 10:01:41PM +0000,
Sean Christopherson <[email protected]> wrote:

> On Thu, Jan 26, 2023, Huang, Kai wrote:
> > On Wed, 2023-01-25 at 17:22 +0000, Sean Christopherson wrote:
> > > I agree that handling this in the common code would be cleaner, but repurposing
> > > gfp_zero gets kludgy because it would require a magic value to say "don't initialize
> > > the data", e.g. x86's mmu_shadowed_info_cache isn't pre-filled.
>
> ...
>
> > > @@ -400,6 +405,13 @@ int __kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int capacity,
> > > if (WARN_ON_ONCE(!capacity))
> > > return -EIO;
> > >
> > > + /*
> > > + * Custom init values can be used only for page allocations,
> > > + * and obviously conflict with __GFP_ZERO.
> > > + */
> > > + if (WARN_ON_ONCE(mc->init_value && (mc->kmem_cache || mc->gfp_zero)))
> > > + return -EIO;
> > > +
> > > mc->objects = kvmalloc_array(sizeof(void *), capacity, gfp);
> > > if (!mc->objects)
> > > return -ENOMEM;
> > >
> > > base-commit: 503f0315c97739d3f8e645c500d81757dfbf76be
> >
> > init_value and gfp_zone is kinda redundant. How about removing gfp_zero
> > completely?
> >
> > mmu_memory_cache_alloc_obj(...)
> > {
> > ...
> > if (!mc->init_value)
> > gfp_flags |= __GFP_ZERO;
> > ...
> > }
> >
> > And in kvm_mmu_create() you initialize all caches' init_value explicitly.
>
> No, as mentioned above there's also a "don't initialize the data" case. Leaving
> init_value=0 means those users would see unnecessary zeroing, and again I don't
> want to use a magic value to say "don't initialize".

The abuses of gfp_flags as zeroing doesn't work for Secure-EPT page table.
It doesn't need zeroing without initial value. So I used the patch as is.
--
Isaku Yamahata <[email protected]>