2023-09-28 16:34:53

by Paolo Bonzini

[permalink] [raw]
Subject: [PATCH 1/3] KVM: x86/mmu: remove unnecessary "bool shared" argument from functions

Neither tdp_mmu_next_root nor kvm_tdp_mmu_put_root need to know
if the lock is taken for read or write. Either way, protection
is achieved via RCU and tdp_mmu_pages_lock. Remove the argument
and just assert that the lock is taken.

Signed-off-by: Paolo Bonzini <[email protected]>
---
arch/x86/kvm/mmu/mmu.c | 2 +-
arch/x86/kvm/mmu/tdp_mmu.c | 34 +++++++++++++++++++++-------------
arch/x86/kvm/mmu/tdp_mmu.h | 3 +--
3 files changed, 23 insertions(+), 16 deletions(-)

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index f7901cb4d2fa..64b1bdba943e 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -3548,7 +3548,7 @@ static void mmu_free_root_page(struct kvm *kvm, hpa_t *root_hpa,
return;

if (is_tdp_mmu_page(sp))
- kvm_tdp_mmu_put_root(kvm, sp, false);
+ kvm_tdp_mmu_put_root(kvm, sp);
else if (!--sp->root_count && sp->role.invalid)
kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);

diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index 6cd4dd631a2f..ab0876015be7 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -73,10 +73,13 @@ static void tdp_mmu_free_sp_rcu_callback(struct rcu_head *head)
tdp_mmu_free_sp(sp);
}

-void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
- bool shared)
+void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root)
{
- kvm_lockdep_assert_mmu_lock_held(kvm, shared);
+ /*
+ * Either read or write is okay, but the lock is needed because
+ * writers might not take tdp_mmu_pages_lock.
+ */
+ lockdep_assert_held(&kvm->mmu_lock);

if (!refcount_dec_and_test(&root->tdp_mmu_root_count))
return;
@@ -106,10 +109,16 @@ void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
*/
static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
struct kvm_mmu_page *prev_root,
- bool shared, bool only_valid)
+ bool only_valid)
{
struct kvm_mmu_page *next_root;

+ /*
+ * While the roots themselves are RCU-protected, fields such as
+ * role.invalid are protected by mmu_lock.
+ */
+ lockdep_assert_held(&kvm->mmu_lock);
+
rcu_read_lock();

if (prev_root)
@@ -132,7 +141,7 @@ static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
rcu_read_unlock();

if (prev_root)
- kvm_tdp_mmu_put_root(kvm, prev_root, shared);
+ kvm_tdp_mmu_put_root(kvm, prev_root);

return next_root;
}
@@ -144,13 +153,12 @@ static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
* recent root. (Unless keeping a live reference is desirable.)
*
* If shared is set, this function is operating under the MMU lock in read
- * mode. In the unlikely event that this thread must free a root, the lock
- * will be temporarily dropped and reacquired in write mode.
+ * mode.
*/
#define __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared, _only_valid)\
- for (_root = tdp_mmu_next_root(_kvm, NULL, _shared, _only_valid); \
- _root; \
- _root = tdp_mmu_next_root(_kvm, _root, _shared, _only_valid)) \
+ for (_root = tdp_mmu_next_root(_kvm, NULL, _only_valid); \
+ _root; \
+ _root = tdp_mmu_next_root(_kvm, _root, _only_valid)) \
if (kvm_lockdep_assert_mmu_lock_held(_kvm, _shared) && \
kvm_mmu_page_as_id(_root) != _as_id) { \
} else
@@ -159,9 +167,9 @@ static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
__for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared, true)

#define for_each_tdp_mmu_root_yield_safe(_kvm, _root, _shared) \
- for (_root = tdp_mmu_next_root(_kvm, NULL, _shared, false); \
- _root; \
- _root = tdp_mmu_next_root(_kvm, _root, _shared, false)) \
+ for (_root = tdp_mmu_next_root(_kvm, NULL, false); \
+ _root; \
+ _root = tdp_mmu_next_root(_kvm, _root, false))
if (!kvm_lockdep_assert_mmu_lock_held(_kvm, _shared)) { \
} else

diff --git a/arch/x86/kvm/mmu/tdp_mmu.h b/arch/x86/kvm/mmu/tdp_mmu.h
index 733a3aef3a96..20d97aa46c49 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.h
+++ b/arch/x86/kvm/mmu/tdp_mmu.h
@@ -17,8 +17,7 @@ __must_check static inline bool kvm_tdp_mmu_get_root(struct kvm_mmu_page *root)
return refcount_inc_not_zero(&root->tdp_mmu_root_count);
}

-void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
- bool shared);
+void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root);

bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, gfn_t start, gfn_t end, bool flush);
bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp);
--
2.39.1



2023-09-29 01:01:57

by Maxim Levitsky

[permalink] [raw]
Subject: Re: [PATCH 1/3] KVM: x86/mmu: remove unnecessary "bool shared" argument from functions

У чт, 2023-09-28 у 12:29 -0400, Paolo Bonzini пише:
> Neither tdp_mmu_next_root nor kvm_tdp_mmu_put_root need to know
> if the lock is taken for read or write. Either way, protection
> is achieved via RCU and tdp_mmu_pages_lock. Remove the argument
> and just assert that the lock is taken.
>
> Signed-off-by: Paolo Bonzini <[email protected]>
> ---
> arch/x86/kvm/mmu/mmu.c | 2 +-
> arch/x86/kvm/mmu/tdp_mmu.c | 34 +++++++++++++++++++++-------------
> arch/x86/kvm/mmu/tdp_mmu.h | 3 +--
> 3 files changed, 23 insertions(+), 16 deletions(-)
>
> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> index f7901cb4d2fa..64b1bdba943e 100644
> --- a/arch/x86/kvm/mmu/mmu.c
> +++ b/arch/x86/kvm/mmu/mmu.c
> @@ -3548,7 +3548,7 @@ static void mmu_free_root_page(struct kvm *kvm, hpa_t *root_hpa,
> return;
>
> if (is_tdp_mmu_page(sp))
> - kvm_tdp_mmu_put_root(kvm, sp, false);
> + kvm_tdp_mmu_put_root(kvm, sp);
> else if (!--sp->root_count && sp->role.invalid)
> kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
>
> diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
> index 6cd4dd631a2f..ab0876015be7 100644
> --- a/arch/x86/kvm/mmu/tdp_mmu.c
> +++ b/arch/x86/kvm/mmu/tdp_mmu.c
> @@ -73,10 +73,13 @@ static void tdp_mmu_free_sp_rcu_callback(struct rcu_head *head)
> tdp_mmu_free_sp(sp);
> }
>
> -void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
> - bool shared)
> +void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root)
> {
> - kvm_lockdep_assert_mmu_lock_held(kvm, shared);
> + /*
> + * Either read or write is okay, but the lock is needed because
> + * writers might not take tdp_mmu_pages_lock.
> + */
> + lockdep_assert_held(&kvm->mmu_lock);

I double checked all callers and indeed at least the read lock is held.

>
> if (!refcount_dec_and_test(&root->tdp_mmu_root_count))
> return;
> @@ -106,10 +109,16 @@ void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
> */
> static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
> struct kvm_mmu_page *prev_root,
> - bool shared, bool only_valid)
> + bool only_valid)
> {
> struct kvm_mmu_page *next_root;
>
> + /*
> + * While the roots themselves are RCU-protected, fields such as
> + * role.invalid are protected by mmu_lock.
> + */
> + lockdep_assert_held(&kvm->mmu_lock);
> +
> rcu_read_lock();
>
> if (prev_root)
> @@ -132,7 +141,7 @@ static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
> rcu_read_unlock();
>
> if (prev_root)
> - kvm_tdp_mmu_put_root(kvm, prev_root, shared);
> + kvm_tdp_mmu_put_root(kvm, prev_root);
>
> return next_root;
> }
> @@ -144,13 +153,12 @@ static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
> * recent root. (Unless keeping a live reference is desirable.)
> *
> * If shared is set, this function is operating under the MMU lock in read
> - * mode. In the unlikely event that this thread must free a root, the lock
> - * will be temporarily dropped and reacquired in write mode.
> + * mode.
> */
> #define __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared, _only_valid)\
> - for (_root = tdp_mmu_next_root(_kvm, NULL, _shared, _only_valid); \
> - _root; \
> - _root = tdp_mmu_next_root(_kvm, _root, _shared, _only_valid)) \
> + for (_root = tdp_mmu_next_root(_kvm, NULL, _only_valid); \
> + _root; \
> + _root = tdp_mmu_next_root(_kvm, _root, _only_valid)) \
> if (kvm_lockdep_assert_mmu_lock_held(_kvm, _shared) && \
> kvm_mmu_page_as_id(_root) != _as_id) { \
> } else
> @@ -159,9 +167,9 @@ static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
> __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared, true)
>
> #define for_each_tdp_mmu_root_yield_safe(_kvm, _root, _shared) \
> - for (_root = tdp_mmu_next_root(_kvm, NULL, _shared, false); \
> - _root; \
> - _root = tdp_mmu_next_root(_kvm, _root, _shared, false)) \
> + for (_root = tdp_mmu_next_root(_kvm, NULL, false); \
> + _root; \
> + _root = tdp_mmu_next_root(_kvm, _root, false))
> if (!kvm_lockdep_assert_mmu_lock_held(_kvm, _shared)) { \
> } else
>
> diff --git a/arch/x86/kvm/mmu/tdp_mmu.h b/arch/x86/kvm/mmu/tdp_mmu.h
> index 733a3aef3a96..20d97aa46c49 100644
> --- a/arch/x86/kvm/mmu/tdp_mmu.h
> +++ b/arch/x86/kvm/mmu/tdp_mmu.h
> @@ -17,8 +17,7 @@ __must_check static inline bool kvm_tdp_mmu_get_root(struct kvm_mmu_page *root)
> return refcount_inc_not_zero(&root->tdp_mmu_root_count);
> }
>
> -void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
> - bool shared);
> +void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root);
>
> bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, gfn_t start, gfn_t end, bool flush);
> bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp);


I don't know all of the details of the kvm mmu, so I might have missed something,
but still I need to get back to reviewing....

Reviewed-by: Maxim Levitsky <[email protected]>


Best regards,
Maxim Levitsky

2023-09-29 22:48:14

by Sean Christopherson

[permalink] [raw]
Subject: Re: [PATCH 1/3] KVM: x86/mmu: remove unnecessary "bool shared" argument from functions

On Thu, Sep 28, 2023, Paolo Bonzini wrote:
> Neither tdp_mmu_next_root nor kvm_tdp_mmu_put_root need to know
> if the lock is taken for read or write. Either way, protection
> is achieved via RCU and tdp_mmu_pages_lock. Remove the argument
> and just assert that the lock is taken.
>
> Signed-off-by: Paolo Bonzini <[email protected]>
> ---
> +void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root)
> {
> - kvm_lockdep_assert_mmu_lock_held(kvm, shared);
> + /*
> + * Either read or write is okay, but the lock is needed because
> + * writers might not take tdp_mmu_pages_lock.
> + */

Nit, I'd prefer to say mmu_lock instead of "the lock", and be very explicit about
writers not needing to take tdp_mmu_pages_lock, e.g.

/*
* Either read or write is okay, but mmu_lock must be held as writers
* are not required to take tdp_mmu_pages_lock.
*/


> + lockdep_assert_held(&kvm->mmu_lock);
>
> if (!refcount_dec_and_test(&root->tdp_mmu_root_count))
> return;