2020-06-23 19:44:26

by Sean Christopherson

[permalink] [raw]
Subject: [PATCH 0/2] KVM: x86/mmu: Optimizations for kvm_get_mmu_page()

Avoid multiple hash lookups in kvm_get_mmu_page(), and tweak the cache
loop to optimize it for TDP.

Sean Christopherson (2):
KVM: x86/mmu: Avoid multiple hash lookups in kvm_get_mmu_page()
KVM: x86/mmu: Optimize MMU page cache lookup for fully direct MMUs

arch/x86/kvm/mmu/mmu.c | 26 ++++++++++++++++----------
1 file changed, 16 insertions(+), 10 deletions(-)

--
2.26.0


2020-06-23 19:44:40

by Sean Christopherson

[permalink] [raw]
Subject: [PATCH 1/2] KVM: x86/mmu: Avoid multiple hash lookups in kvm_get_mmu_page()

Refactor for_each_valid_sp() to take the list of shadow pages instead of
retrieving it from a gfn to avoid doing the gfn->list hash and lookup
multiple times during kvm_get_mmu_page().

Cc: Peter Feiner <[email protected]>
Cc: Jon Cargille <[email protected]>
Cc: Jim Mattson <[email protected]>
Signed-off-by: Sean Christopherson <[email protected]>
---
arch/x86/kvm/mmu/mmu.c | 17 +++++++++--------
1 file changed, 9 insertions(+), 8 deletions(-)

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 3dd0af7e7515..67f8f82e9783 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -2258,15 +2258,14 @@ static bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
static void kvm_mmu_commit_zap_page(struct kvm *kvm,
struct list_head *invalid_list);

-
-#define for_each_valid_sp(_kvm, _sp, _gfn) \
- hlist_for_each_entry(_sp, \
- &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)], hash_link) \
+#define for_each_valid_sp(_kvm, _sp, _list) \
+ hlist_for_each_entry(_sp, _list, hash_link) \
if (is_obsolete_sp((_kvm), (_sp))) { \
} else

#define for_each_gfn_indirect_valid_sp(_kvm, _sp, _gfn) \
- for_each_valid_sp(_kvm, _sp, _gfn) \
+ for_each_valid_sp(_kvm, _sp, \
+ &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)]) \
if ((_sp)->gfn != (_gfn) || (_sp)->role.direct) {} else

static inline bool is_ept_sp(struct kvm_mmu_page *sp)
@@ -2477,6 +2476,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
unsigned int access)
{
union kvm_mmu_page_role role;
+ struct hlist_head *sp_list;
unsigned quadrant;
struct kvm_mmu_page *sp;
bool need_sync = false;
@@ -2496,7 +2496,9 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
role.quadrant = quadrant;
}
- for_each_valid_sp(vcpu->kvm, sp, gfn) {
+
+ sp_list = &vcpu->kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)];
+ for_each_valid_sp(vcpu->kvm, sp, sp_list) {
if (sp->gfn != gfn) {
collisions++;
continue;
@@ -2533,8 +2535,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,

sp->gfn = gfn;
sp->role = role;
- hlist_add_head(&sp->hash_link,
- &vcpu->kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)]);
+ hlist_add_head(&sp->hash_link, sp_list);
if (!direct) {
/*
* we should do write protection before syncing pages
--
2.26.0

2020-06-23 21:17:44

by Jon Cargille

[permalink] [raw]
Subject: Re: [PATCH 1/2] KVM: x86/mmu: Avoid multiple hash lookups in kvm_get_mmu_page()

LGTM.

Reviewed-By: Jon Cargille <[email protected]>


On Tue, Jun 23, 2020 at 12:40 PM Sean Christopherson
<[email protected]> wrote:
>
> Refactor for_each_valid_sp() to take the list of shadow pages instead of
> retrieving it from a gfn to avoid doing the gfn->list hash and lookup
> multiple times during kvm_get_mmu_page().
>
> Cc: Peter Feiner <[email protected]>
> Cc: Jon Cargille <[email protected]>
> Cc: Jim Mattson <[email protected]>
> Signed-off-by: Sean Christopherson <[email protected]>
> ---
> arch/x86/kvm/mmu/mmu.c | 17 +++++++++--------
> 1 file changed, 9 insertions(+), 8 deletions(-)
>
> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> index 3dd0af7e7515..67f8f82e9783 100644
> --- a/arch/x86/kvm/mmu/mmu.c
> +++ b/arch/x86/kvm/mmu/mmu.c
> @@ -2258,15 +2258,14 @@ static bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
> static void kvm_mmu_commit_zap_page(struct kvm *kvm,
> struct list_head *invalid_list);
>
> -
> -#define for_each_valid_sp(_kvm, _sp, _gfn) \
> - hlist_for_each_entry(_sp, \
> - &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)], hash_link) \
> +#define for_each_valid_sp(_kvm, _sp, _list) \
> + hlist_for_each_entry(_sp, _list, hash_link) \
> if (is_obsolete_sp((_kvm), (_sp))) { \
> } else
>
> #define for_each_gfn_indirect_valid_sp(_kvm, _sp, _gfn) \
> - for_each_valid_sp(_kvm, _sp, _gfn) \
> + for_each_valid_sp(_kvm, _sp, \
> + &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)]) \
> if ((_sp)->gfn != (_gfn) || (_sp)->role.direct) {} else
>
> static inline bool is_ept_sp(struct kvm_mmu_page *sp)
> @@ -2477,6 +2476,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
> unsigned int access)
> {
> union kvm_mmu_page_role role;
> + struct hlist_head *sp_list;
> unsigned quadrant;
> struct kvm_mmu_page *sp;
> bool need_sync = false;
> @@ -2496,7 +2496,9 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
> quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
> role.quadrant = quadrant;
> }
> - for_each_valid_sp(vcpu->kvm, sp, gfn) {
> +
> + sp_list = &vcpu->kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)];
> + for_each_valid_sp(vcpu->kvm, sp, sp_list) {
> if (sp->gfn != gfn) {
> collisions++;
> continue;
> @@ -2533,8 +2535,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
>
> sp->gfn = gfn;
> sp->role = role;
> - hlist_add_head(&sp->hash_link,
> - &vcpu->kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)]);
> + hlist_add_head(&sp->hash_link, sp_list);
> if (!direct) {
> /*
> * we should do write protection before syncing pages
> --
> 2.26.0
>

2020-07-03 17:18:40

by Paolo Bonzini

[permalink] [raw]
Subject: Re: [PATCH 0/2] KVM: x86/mmu: Optimizations for kvm_get_mmu_page()

On 23/06/20 21:40, Sean Christopherson wrote:
> Avoid multiple hash lookups in kvm_get_mmu_page(), and tweak the cache
> loop to optimize it for TDP.
>
> Sean Christopherson (2):
> KVM: x86/mmu: Avoid multiple hash lookups in kvm_get_mmu_page()
> KVM: x86/mmu: Optimize MMU page cache lookup for fully direct MMUs
>
> arch/x86/kvm/mmu/mmu.c | 26 ++++++++++++++++----------
> 1 file changed, 16 insertions(+), 10 deletions(-)
>

Queued, thanks.

Paolo