In the interest of devloping a version of __make_spte that can function
without a vCPU pointer, factor out the shadow_zero_mask to be an
additional argument to the function.
No functional change intended.
Signed-off-by: Ben Gardon <[email protected]>
---
arch/x86/kvm/mmu/spte.c | 10 ++++++----
arch/x86/kvm/mmu/spte.h | 2 +-
2 files changed, 7 insertions(+), 5 deletions(-)
diff --git a/arch/x86/kvm/mmu/spte.c b/arch/x86/kvm/mmu/spte.c
index 931cf93c3b7e..ef2d85577abb 100644
--- a/arch/x86/kvm/mmu/spte.c
+++ b/arch/x86/kvm/mmu/spte.c
@@ -94,7 +94,7 @@ bool __make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
const struct kvm_memory_slot *slot, unsigned int pte_access,
gfn_t gfn, kvm_pfn_t pfn, u64 old_spte, bool prefetch,
bool can_unsync, bool host_writable, u64 mt_mask,
- u64 *new_spte)
+ struct rsvd_bits_validate *shadow_zero_check, u64 *new_spte)
{
int level = sp->role.level;
u64 spte = SPTE_MMU_PRESENT_MASK;
@@ -177,9 +177,9 @@ bool __make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
if (prefetch)
spte = mark_spte_for_access_track(spte);
- WARN_ONCE(is_rsvd_spte(&vcpu->arch.mmu->shadow_zero_check, spte, level),
+ WARN_ONCE(is_rsvd_spte(shadow_zero_check, spte, level),
"spte = 0x%llx, level = %d, rsvd bits = 0x%llx", spte, level,
- get_rsvd_bits(&vcpu->arch.mmu->shadow_zero_check, spte, level));
+ get_rsvd_bits(shadow_zero_check, spte, level));
if ((spte & PT_WRITABLE_MASK) && kvm_slot_dirty_track_enabled(slot)) {
/* Enforced by kvm_mmu_hugepage_adjust. */
@@ -199,10 +199,12 @@ bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
{
u64 mt_mask = static_call(kvm_x86_get_mt_mask)(vcpu, gfn,
kvm_is_mmio_pfn(pfn));
+ struct rsvd_bits_validate *shadow_zero_check =
+ &vcpu->arch.mmu->shadow_zero_check;
return __make_spte(vcpu, sp, slot, pte_access, gfn, pfn, old_spte,
prefetch, can_unsync, host_writable, mt_mask,
- new_spte);
+ shadow_zero_check, new_spte);
}
diff --git a/arch/x86/kvm/mmu/spte.h b/arch/x86/kvm/mmu/spte.h
index d051f955699e..e8a051188eb6 100644
--- a/arch/x86/kvm/mmu/spte.h
+++ b/arch/x86/kvm/mmu/spte.h
@@ -414,7 +414,7 @@ bool __make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
const struct kvm_memory_slot *slot, unsigned int pte_access,
gfn_t gfn, kvm_pfn_t pfn, u64 old_spte, bool prefetch,
bool can_unsync, bool host_writable, u64 mt_mask,
- u64 *new_spte);
+ struct rsvd_bits_validate *shadow_zero_check, u64 *new_spte);
bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
const struct kvm_memory_slot *slot,
unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn,
--
2.35.1.894.gb6a874cedc-goog
On Mon, Mar 21, 2022, Ben Gardon wrote:
> In the interest of devloping a version of __make_spte that can function
> without a vCPU pointer, factor out the shadow_zero_mask to be an
> additional argument to the function.
>
> No functional change intended.
>
> Signed-off-by: Ben Gardon <[email protected]>
> ---
> arch/x86/kvm/mmu/spte.c | 10 ++++++----
> arch/x86/kvm/mmu/spte.h | 2 +-
> 2 files changed, 7 insertions(+), 5 deletions(-)
>
> diff --git a/arch/x86/kvm/mmu/spte.c b/arch/x86/kvm/mmu/spte.c
> index 931cf93c3b7e..ef2d85577abb 100644
> --- a/arch/x86/kvm/mmu/spte.c
> +++ b/arch/x86/kvm/mmu/spte.c
> @@ -94,7 +94,7 @@ bool __make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
> const struct kvm_memory_slot *slot, unsigned int pte_access,
> gfn_t gfn, kvm_pfn_t pfn, u64 old_spte, bool prefetch,
> bool can_unsync, bool host_writable, u64 mt_mask,
> - u64 *new_spte)
> + struct rsvd_bits_validate *shadow_zero_check, u64 *new_spte)
Can we name the new param "rsvd_bits"? As mentioned in the other patch, it's not
a pure "are these bits zero" check.
> {
> int level = sp->role.level;
> u64 spte = SPTE_MMU_PRESENT_MASK;
> @@ -177,9 +177,9 @@ bool __make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
> if (prefetch)
> spte = mark_spte_for_access_track(spte);
>
> - WARN_ONCE(is_rsvd_spte(&vcpu->arch.mmu->shadow_zero_check, spte, level),
> + WARN_ONCE(is_rsvd_spte(shadow_zero_check, spte, level),
> "spte = 0x%llx, level = %d, rsvd bits = 0x%llx", spte, level,
> - get_rsvd_bits(&vcpu->arch.mmu->shadow_zero_check, spte, level));
> + get_rsvd_bits(shadow_zero_check, spte, level));
>
> if ((spte & PT_WRITABLE_MASK) && kvm_slot_dirty_track_enabled(slot)) {
> /* Enforced by kvm_mmu_hugepage_adjust. */
> @@ -199,10 +199,12 @@ bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
> {
> u64 mt_mask = static_call(kvm_x86_get_mt_mask)(vcpu, gfn,
> kvm_is_mmio_pfn(pfn));
> + struct rsvd_bits_validate *shadow_zero_check =
> + &vcpu->arch.mmu->shadow_zero_check;
>
> return __make_spte(vcpu, sp, slot, pte_access, gfn, pfn, old_spte,
> prefetch, can_unsync, host_writable, mt_mask,
> - new_spte);
> + shadow_zero_check, new_spte);
I don't see any reason to snapshot the reserved bits, IMO this is much more
readable overall:
u64 mt_mask = static_call(kvm_x86_get_mt_mask)(vcpu, gfn,
kvm_is_mmio_pfn(pfn));
return __make_spte(vcpu->kvm, sp, slot, pte_access, gfn, pfn, old_spte,
prefetch, can_unsync, host_writable, mt_mask,
&vcpu->arch.mmu->shadow_zero_check, new_spte);
And it avoids propagating the shadow_zero_check naming.
> diff --git a/arch/x86/kvm/mmu/spte.h b/arch/x86/kvm/mmu/spte.h
> index d051f955699e..e8a051188eb6 100644
> --- a/arch/x86/kvm/mmu/spte.h
> +++ b/arch/x86/kvm/mmu/spte.h
> @@ -414,7 +414,7 @@ bool __make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
> const struct kvm_memory_slot *slot, unsigned int pte_access,
> gfn_t gfn, kvm_pfn_t pfn, u64 old_spte, bool prefetch,
> bool can_unsync, bool host_writable, u64 mt_mask,
> - u64 *new_spte);
> + struct rsvd_bits_validate *shadow_zero_check, u64 *new_spte);
> bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
> const struct kvm_memory_slot *slot,
> unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn,
> --
> 2.35.1.894.gb6a874cedc-goog
>