Export kvm_is_mmio_pfn from spte.c. It will be used in a subsequent
commit for in-place lpage promotion when disabling dirty logging.
Signed-off-by: Ben Gardon <[email protected]>
---
arch/x86/kvm/mmu/spte.c | 2 +-
arch/x86/kvm/mmu/spte.h | 1 +
2 files changed, 2 insertions(+), 1 deletion(-)
diff --git a/arch/x86/kvm/mmu/spte.c b/arch/x86/kvm/mmu/spte.c
index 45e9c0c3932e..8e9b827c4ed5 100644
--- a/arch/x86/kvm/mmu/spte.c
+++ b/arch/x86/kvm/mmu/spte.c
@@ -69,7 +69,7 @@ u64 make_mmio_spte(struct kvm_vcpu *vcpu, u64 gfn, unsigned int access)
return spte;
}
-static bool kvm_is_mmio_pfn(kvm_pfn_t pfn)
+bool kvm_is_mmio_pfn(kvm_pfn_t pfn)
{
if (pfn_valid(pfn))
return !is_zero_pfn(pfn) && PageReserved(pfn_to_page(pfn)) &&
diff --git a/arch/x86/kvm/mmu/spte.h b/arch/x86/kvm/mmu/spte.h
index cee02fe63429..e058a85e6c66 100644
--- a/arch/x86/kvm/mmu/spte.h
+++ b/arch/x86/kvm/mmu/spte.h
@@ -443,4 +443,5 @@ u64 kvm_mmu_changed_pte_notifier_make_spte(u64 old_spte, kvm_pfn_t new_pfn);
void kvm_mmu_reset_all_pte_masks(void);
+bool kvm_is_mmio_pfn(kvm_pfn_t pfn);
#endif
--
2.35.1.894.gb6a874cedc-goog
On Mon, Mar 21, 2022, Ben Gardon wrote:
> Export kvm_is_mmio_pfn from spte.c. It will be used in a subsequent
> commit for in-place lpage promotion when disabling dirty logging.
Rather than force the promotion path to call kvm_is_mmio_pfn(), what about:
a. Truly exporting the helper, i.e. EXPORT_SYMBOL_GPL
b. Move this patch earlier in the series, before "KVM: x86/mmu: Factor out part of
vmx_get_mt_mask which does not depend on vcpu"
c. In the same patch, drop the "is_mmio" param from kvm_x86_ops.get_mt_mask()
and have vmx_get_mt_mask() call it directly.
That way the call to kvm_is_mmio_pfn() is avoided when running on AMD hosts
(ignoring the shadow_me_mask thing, which I have a separate tweak for). The
worst case scenario for a lookup is actually quite expensive, e.g. retpoline and
a spinlock.
> Signed-off-by: Ben Gardon <[email protected]>
> ---
> arch/x86/kvm/mmu/spte.c | 2 +-
> arch/x86/kvm/mmu/spte.h | 1 +
> 2 files changed, 2 insertions(+), 1 deletion(-)
>
> diff --git a/arch/x86/kvm/mmu/spte.c b/arch/x86/kvm/mmu/spte.c
> index 45e9c0c3932e..8e9b827c4ed5 100644
> --- a/arch/x86/kvm/mmu/spte.c
> +++ b/arch/x86/kvm/mmu/spte.c
> @@ -69,7 +69,7 @@ u64 make_mmio_spte(struct kvm_vcpu *vcpu, u64 gfn, unsigned int access)
> return spte;
> }
>
> -static bool kvm_is_mmio_pfn(kvm_pfn_t pfn)
> +bool kvm_is_mmio_pfn(kvm_pfn_t pfn)
> {
> if (pfn_valid(pfn))
> return !is_zero_pfn(pfn) && PageReserved(pfn_to_page(pfn)) &&
> diff --git a/arch/x86/kvm/mmu/spte.h b/arch/x86/kvm/mmu/spte.h
> index cee02fe63429..e058a85e6c66 100644
> --- a/arch/x86/kvm/mmu/spte.h
> +++ b/arch/x86/kvm/mmu/spte.h
> @@ -443,4 +443,5 @@ u64 kvm_mmu_changed_pte_notifier_make_spte(u64 old_spte, kvm_pfn_t new_pfn);
>
> void kvm_mmu_reset_all_pte_masks(void);
>
> +bool kvm_is_mmio_pfn(kvm_pfn_t pfn);
> #endif
> --
> 2.35.1.894.gb6a874cedc-goog
>