Add a dedicated helper for zapping a TDP MMU root, and use it in the three
flows that do "zap_all" and intentionally do not do a TLB flush if SPTEs
are zapped (zapping an entire root is safe if and only if it cannot be in
use by any vCPU). Because a TLB flush is never required, unconditionally
pass "false" to tdp_mmu_iter_cond_resched() when potentially yielding.
Opportunistically document why KVM must not yield when zapping roots that
are being zapped by kvm_tdp_mmu_put_root(), i.e. roots whose refcount has
reached zero, and further harden the flow to detect improper KVM behavior
with respect to roots that are supposed to be unreachable.
In addition to hardening zapping of roots, isolating zapping of roots
will allow future simplification of zap_gfn_range() by having it zap only
leaf SPTEs, and by removing its tricky "zap all" heuristic. By having
all paths that truly need to free _all_ SPs flow through the dedicated
root zapper, the generic zapper can be freed of those concerns.
Signed-off-by: Sean Christopherson <[email protected]>
---
arch/x86/kvm/mmu/tdp_mmu.c | 98 +++++++++++++++++++++++++++++++-------
1 file changed, 82 insertions(+), 16 deletions(-)
diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index 87706e9cc6f3..c5df9a552470 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -56,10 +56,6 @@ void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm)
rcu_barrier();
}
-static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
- gfn_t start, gfn_t end, bool can_yield, bool flush,
- bool shared);
-
static void tdp_mmu_free_sp(struct kvm_mmu_page *sp)
{
free_page((unsigned long)sp->spt);
@@ -82,6 +78,9 @@ static void tdp_mmu_free_sp_rcu_callback(struct rcu_head *head)
tdp_mmu_free_sp(sp);
}
+static void tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root,
+ bool shared);
+
void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
bool shared)
{
@@ -104,7 +103,7 @@ void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
* intermediate paging structures, that may be zapped, as such entries
* are associated with the ASID on both VMX and SVM.
*/
- (void)zap_gfn_range(kvm, root, 0, -1ull, false, false, shared);
+ tdp_mmu_zap_root(kvm, root, shared);
call_rcu(&root->rcu_head, tdp_mmu_free_sp_rcu_callback);
}
@@ -751,6 +750,76 @@ static inline bool __must_check tdp_mmu_iter_cond_resched(struct kvm *kvm,
return iter->yielded;
}
+static inline gfn_t tdp_mmu_max_gfn_host(void)
+{
+ /*
+ * Bound TDP MMU walks at host.MAXPHYADDR, guest accesses beyond that
+ * will hit a #PF(RSVD) and never hit an EPT Violation/Misconfig / #NPF,
+ * and so KVM will never install a SPTE for such addresses.
+ */
+ return 1ULL << (shadow_phys_bits - PAGE_SHIFT);
+}
+
+static void tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root,
+ bool shared)
+{
+ bool root_is_unreachable = !refcount_read(&root->tdp_mmu_root_count);
+ struct tdp_iter iter;
+
+ gfn_t end = tdp_mmu_max_gfn_host();
+ gfn_t start = 0;
+
+ kvm_lockdep_assert_mmu_lock_held(kvm, shared);
+
+ rcu_read_lock();
+
+ /*
+ * No need to try to step down in the iterator when zapping an entire
+ * root, zapping an upper-level SPTE will recurse on its children.
+ */
+ for_each_tdp_pte_min_level(iter, root, root->role.level, start, end) {
+retry:
+ /*
+ * Yielding isn't allowed when zapping an unreachable root as
+ * the root won't be processed by mmu_notifier callbacks. When
+ * handling an unmap/release mmu_notifier command, KVM must
+ * drop all references to relevant pages prior to completing
+ * the callback. Dropping mmu_lock can result in zapping SPTEs
+ * for an unreachable root after a relevant callback completes,
+ * which leads to use-after-free as zapping a SPTE triggers
+ * "writeback" of dirty/accessed bits to the SPTE's associated
+ * struct page.
+ */
+ if (!root_is_unreachable &&
+ tdp_mmu_iter_cond_resched(kvm, &iter, false, shared))
+ continue;
+
+ if (!is_shadow_present_pte(iter.old_spte))
+ continue;
+
+ if (!shared) {
+ tdp_mmu_set_spte(kvm, &iter, 0);
+ } else if (tdp_mmu_set_spte_atomic(kvm, &iter, 0)) {
+ /*
+ * cmpxchg() shouldn't fail if the root is unreachable.
+ * Retry so as not to leak the page and its children.
+ */
+ WARN_ONCE(root_is_unreachable,
+ "Contended TDP MMU SPTE in unreachable root.");
+ goto retry;
+ }
+
+ /*
+ * WARN if the root is invalid and is unreachable, all SPTEs
+ * should've been zapped by kvm_tdp_mmu_zap_invalidated_roots(),
+ * and inserting new SPTEs under an invalid root is a KVM bug.
+ */
+ WARN_ON_ONCE(root_is_unreachable && root->role.invalid);
+ }
+
+ rcu_read_unlock();
+}
+
bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
{
u64 old_spte;
@@ -799,8 +868,7 @@ static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
gfn_t start, gfn_t end, bool can_yield, bool flush,
bool shared)
{
- gfn_t max_gfn_host = 1ULL << (shadow_phys_bits - PAGE_SHIFT);
- bool zap_all = (start == 0 && end >= max_gfn_host);
+ bool zap_all = (start == 0 && end >= tdp_mmu_max_gfn_host());
struct tdp_iter iter;
/*
@@ -809,12 +877,7 @@ static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
*/
int min_level = zap_all ? root->role.level : PG_LEVEL_4K;
- /*
- * Bound the walk at host.MAXPHYADDR, guest accesses beyond that will
- * hit a #PF(RSVD) and never get to an EPT Violation/Misconfig / #NPF,
- * and so KVM will never install a SPTE for such addresses.
- */
- end = min(end, max_gfn_host);
+ end = min(end, tdp_mmu_max_gfn_host());
kvm_lockdep_assert_mmu_lock_held(kvm, shared);
@@ -874,6 +937,7 @@ bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id, gfn_t start,
void kvm_tdp_mmu_zap_all(struct kvm *kvm)
{
+ struct kvm_mmu_page *root;
int i;
/*
@@ -881,8 +945,10 @@ void kvm_tdp_mmu_zap_all(struct kvm *kvm)
* is being destroyed or the userspace VMM has exited. In both cases,
* KVM_RUN is unreachable, i.e. no vCPUs will ever service the request.
*/
- for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
- (void)kvm_tdp_mmu_zap_gfn_range(kvm, i, 0, -1ull, false);
+ for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
+ for_each_tdp_mmu_root_yield_safe(kvm, root, i, false)
+ tdp_mmu_zap_root(kvm, root, false);
+ }
}
/*
@@ -908,7 +974,7 @@ void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm)
* will still flush on yield, but that's a minor performance
* blip and not a functional issue.
*/
- (void)zap_gfn_range(kvm, root, 0, -1ull, true, false, true);
+ tdp_mmu_zap_root(kvm, root, true);
/*
* Put the reference acquired in kvm_tdp_mmu_invalidate_roots().
--
2.35.1.574.g5d30c73bfb-goog
On Fri, Feb 25, 2022 at 4:16 PM Sean Christopherson <[email protected]> wrote:
>
> Add a dedicated helper for zapping a TDP MMU root, and use it in the three
> flows that do "zap_all" and intentionally do not do a TLB flush if SPTEs
> are zapped (zapping an entire root is safe if and only if it cannot be in
> use by any vCPU). Because a TLB flush is never required, unconditionally
> pass "false" to tdp_mmu_iter_cond_resched() when potentially yielding.
>
> Opportunistically document why KVM must not yield when zapping roots that
> are being zapped by kvm_tdp_mmu_put_root(), i.e. roots whose refcount has
> reached zero, and further harden the flow to detect improper KVM behavior
> with respect to roots that are supposed to be unreachable.
>
> In addition to hardening zapping of roots, isolating zapping of roots
> will allow future simplification of zap_gfn_range() by having it zap only
> leaf SPTEs, and by removing its tricky "zap all" heuristic. By having
> all paths that truly need to free _all_ SPs flow through the dedicated
> root zapper, the generic zapper can be freed of those concerns.
>
> Signed-off-by: Sean Christopherson <[email protected]>
Reviewed-by: Ben Gardon <[email protected]>
Nice!
> ---
> arch/x86/kvm/mmu/tdp_mmu.c | 98 +++++++++++++++++++++++++++++++-------
> 1 file changed, 82 insertions(+), 16 deletions(-)
>
> diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
> index 87706e9cc6f3..c5df9a552470 100644
> --- a/arch/x86/kvm/mmu/tdp_mmu.c
> +++ b/arch/x86/kvm/mmu/tdp_mmu.c
> @@ -56,10 +56,6 @@ void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm)
> rcu_barrier();
> }
>
> -static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
> - gfn_t start, gfn_t end, bool can_yield, bool flush,
> - bool shared);
> -
> static void tdp_mmu_free_sp(struct kvm_mmu_page *sp)
> {
> free_page((unsigned long)sp->spt);
> @@ -82,6 +78,9 @@ static void tdp_mmu_free_sp_rcu_callback(struct rcu_head *head)
> tdp_mmu_free_sp(sp);
> }
>
> +static void tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root,
> + bool shared);
> +
> void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
> bool shared)
> {
> @@ -104,7 +103,7 @@ void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
> * intermediate paging structures, that may be zapped, as such entries
> * are associated with the ASID on both VMX and SVM.
> */
> - (void)zap_gfn_range(kvm, root, 0, -1ull, false, false, shared);
> + tdp_mmu_zap_root(kvm, root, shared);
>
> call_rcu(&root->rcu_head, tdp_mmu_free_sp_rcu_callback);
> }
> @@ -751,6 +750,76 @@ static inline bool __must_check tdp_mmu_iter_cond_resched(struct kvm *kvm,
> return iter->yielded;
> }
>
> +static inline gfn_t tdp_mmu_max_gfn_host(void)
> +{
> + /*
> + * Bound TDP MMU walks at host.MAXPHYADDR, guest accesses beyond that
> + * will hit a #PF(RSVD) and never hit an EPT Violation/Misconfig / #NPF,
> + * and so KVM will never install a SPTE for such addresses.
> + */
> + return 1ULL << (shadow_phys_bits - PAGE_SHIFT);
> +}
> +
> +static void tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root,
> + bool shared)
> +{
> + bool root_is_unreachable = !refcount_read(&root->tdp_mmu_root_count);
> + struct tdp_iter iter;
> +
> + gfn_t end = tdp_mmu_max_gfn_host();
> + gfn_t start = 0;
> +
> + kvm_lockdep_assert_mmu_lock_held(kvm, shared);
> +
> + rcu_read_lock();
> +
> + /*
> + * No need to try to step down in the iterator when zapping an entire
> + * root, zapping an upper-level SPTE will recurse on its children.
> + */
> + for_each_tdp_pte_min_level(iter, root, root->role.level, start, end) {
> +retry:
> + /*
> + * Yielding isn't allowed when zapping an unreachable root as
> + * the root won't be processed by mmu_notifier callbacks. When
> + * handling an unmap/release mmu_notifier command, KVM must
> + * drop all references to relevant pages prior to completing
> + * the callback. Dropping mmu_lock can result in zapping SPTEs
> + * for an unreachable root after a relevant callback completes,
> + * which leads to use-after-free as zapping a SPTE triggers
> + * "writeback" of dirty/accessed bits to the SPTE's associated
> + * struct page.
> + */
> + if (!root_is_unreachable &&
> + tdp_mmu_iter_cond_resched(kvm, &iter, false, shared))
> + continue;
> +
> + if (!is_shadow_present_pte(iter.old_spte))
> + continue;
> +
> + if (!shared) {
> + tdp_mmu_set_spte(kvm, &iter, 0);
> + } else if (tdp_mmu_set_spte_atomic(kvm, &iter, 0)) {
> + /*
> + * cmpxchg() shouldn't fail if the root is unreachable.
> + * Retry so as not to leak the page and its children.
> + */
> + WARN_ONCE(root_is_unreachable,
> + "Contended TDP MMU SPTE in unreachable root.");
> + goto retry;
> + }
> +
> + /*
> + * WARN if the root is invalid and is unreachable, all SPTEs
> + * should've been zapped by kvm_tdp_mmu_zap_invalidated_roots(),
> + * and inserting new SPTEs under an invalid root is a KVM bug.
> + */
> + WARN_ON_ONCE(root_is_unreachable && root->role.invalid);
> + }
> +
> + rcu_read_unlock();
> +}
> +
> bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
> {
> u64 old_spte;
> @@ -799,8 +868,7 @@ static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
> gfn_t start, gfn_t end, bool can_yield, bool flush,
> bool shared)
> {
> - gfn_t max_gfn_host = 1ULL << (shadow_phys_bits - PAGE_SHIFT);
> - bool zap_all = (start == 0 && end >= max_gfn_host);
> + bool zap_all = (start == 0 && end >= tdp_mmu_max_gfn_host());
> struct tdp_iter iter;
>
> /*
> @@ -809,12 +877,7 @@ static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
> */
> int min_level = zap_all ? root->role.level : PG_LEVEL_4K;
>
> - /*
> - * Bound the walk at host.MAXPHYADDR, guest accesses beyond that will
> - * hit a #PF(RSVD) and never get to an EPT Violation/Misconfig / #NPF,
> - * and so KVM will never install a SPTE for such addresses.
> - */
> - end = min(end, max_gfn_host);
> + end = min(end, tdp_mmu_max_gfn_host());
>
> kvm_lockdep_assert_mmu_lock_held(kvm, shared);
>
> @@ -874,6 +937,7 @@ bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id, gfn_t start,
>
> void kvm_tdp_mmu_zap_all(struct kvm *kvm)
> {
> + struct kvm_mmu_page *root;
> int i;
>
> /*
> @@ -881,8 +945,10 @@ void kvm_tdp_mmu_zap_all(struct kvm *kvm)
> * is being destroyed or the userspace VMM has exited. In both cases,
> * KVM_RUN is unreachable, i.e. no vCPUs will ever service the request.
> */
> - for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
> - (void)kvm_tdp_mmu_zap_gfn_range(kvm, i, 0, -1ull, false);
> + for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
> + for_each_tdp_mmu_root_yield_safe(kvm, root, i, false)
> + tdp_mmu_zap_root(kvm, root, false);
> + }
> }
>
> /*
> @@ -908,7 +974,7 @@ void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm)
> * will still flush on yield, but that's a minor performance
> * blip and not a functional issue.
> */
> - (void)zap_gfn_range(kvm, root, 0, -1ull, true, false, true);
> + tdp_mmu_zap_root(kvm, root, true);
>
> /*
> * Put the reference acquired in kvm_tdp_mmu_invalidate_roots().
> --
> 2.35.1.574.g5d30c73bfb-goog
>
On Sat, Feb 26, 2022, Sean Christopherson wrote:
> Add a dedicated helper for zapping a TDP MMU root, and use it in the three
> flows that do "zap_all" and intentionally do not do a TLB flush if SPTEs
> are zapped (zapping an entire root is safe if and only if it cannot be in
> use by any vCPU). Because a TLB flush is never required, unconditionally
> pass "false" to tdp_mmu_iter_cond_resched() when potentially yielding.
>
> Opportunistically document why KVM must not yield when zapping roots that
> are being zapped by kvm_tdp_mmu_put_root(), i.e. roots whose refcount has
> reached zero, and further harden the flow to detect improper KVM behavior
> with respect to roots that are supposed to be unreachable.
>
> In addition to hardening zapping of roots, isolating zapping of roots
> will allow future simplification of zap_gfn_range() by having it zap only
> leaf SPTEs, and by removing its tricky "zap all" heuristic. By having
> all paths that truly need to free _all_ SPs flow through the dedicated
> root zapper, the generic zapper can be freed of those concerns.
>
> Signed-off-by: Sean Christopherson <[email protected]>
> ---
> arch/x86/kvm/mmu/tdp_mmu.c | 98 +++++++++++++++++++++++++++++++-------
> 1 file changed, 82 insertions(+), 16 deletions(-)
>
> diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
> index 87706e9cc6f3..c5df9a552470 100644
> --- a/arch/x86/kvm/mmu/tdp_mmu.c
> +++ b/arch/x86/kvm/mmu/tdp_mmu.c
> @@ -56,10 +56,6 @@ void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm)
> rcu_barrier();
> }
>
> -static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
> - gfn_t start, gfn_t end, bool can_yield, bool flush,
> - bool shared);
> -
> static void tdp_mmu_free_sp(struct kvm_mmu_page *sp)
> {
> free_page((unsigned long)sp->spt);
> @@ -82,6 +78,9 @@ static void tdp_mmu_free_sp_rcu_callback(struct rcu_head *head)
> tdp_mmu_free_sp(sp);
> }
>
> +static void tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root,
> + bool shared);
> +
> void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
> bool shared)
> {
> @@ -104,7 +103,7 @@ void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
> * intermediate paging structures, that may be zapped, as such entries
> * are associated with the ASID on both VMX and SVM.
> */
> - (void)zap_gfn_range(kvm, root, 0, -1ull, false, false, shared);
> + tdp_mmu_zap_root(kvm, root, shared);
>
> call_rcu(&root->rcu_head, tdp_mmu_free_sp_rcu_callback);
> }
> @@ -751,6 +750,76 @@ static inline bool __must_check tdp_mmu_iter_cond_resched(struct kvm *kvm,
> return iter->yielded;
> }
>
> +static inline gfn_t tdp_mmu_max_gfn_host(void)
> +{
> + /*
> + * Bound TDP MMU walks at host.MAXPHYADDR, guest accesses beyond that
> + * will hit a #PF(RSVD) and never hit an EPT Violation/Misconfig / #NPF,
> + * and so KVM will never install a SPTE for such addresses.
> + */
> + return 1ULL << (shadow_phys_bits - PAGE_SHIFT);
> +}
> +
> +static void tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root,
> + bool shared)
> +{
> + bool root_is_unreachable = !refcount_read(&root->tdp_mmu_root_count);
> + struct tdp_iter iter;
> +
> + gfn_t end = tdp_mmu_max_gfn_host();
> + gfn_t start = 0;
> +
> + kvm_lockdep_assert_mmu_lock_held(kvm, shared);
> +
> + rcu_read_lock();
> +
> + /*
> + * No need to try to step down in the iterator when zapping an entire
> + * root, zapping an upper-level SPTE will recurse on its children.
> + */
> + for_each_tdp_pte_min_level(iter, root, root->role.level, start, end) {
> +retry:
> + /*
> + * Yielding isn't allowed when zapping an unreachable root as
> + * the root won't be processed by mmu_notifier callbacks. When
> + * handling an unmap/release mmu_notifier command, KVM must
> + * drop all references to relevant pages prior to completing
> + * the callback. Dropping mmu_lock can result in zapping SPTEs
> + * for an unreachable root after a relevant callback completes,
> + * which leads to use-after-free as zapping a SPTE triggers
> + * "writeback" of dirty/accessed bits to the SPTE's associated
> + * struct page.
> + */
I have a quick question here: when the roots are unreachable, we can't
yield, understand that after reading the comments. However, what if
there are too many SPTEs that need to be zapped that requires yielding.
In this case, I guess we will have a RCU warning, which is unavoidable,
right?
> + if (!root_is_unreachable &&
> + tdp_mmu_iter_cond_resched(kvm, &iter, false, shared))
> + continue;
> +
> + if (!is_shadow_present_pte(iter.old_spte))
> + continue;
> +
> + if (!shared) {
> + tdp_mmu_set_spte(kvm, &iter, 0);
> + } else if (tdp_mmu_set_spte_atomic(kvm, &iter, 0)) {
> + /*
> + * cmpxchg() shouldn't fail if the root is unreachable.
> + * Retry so as not to leak the page and its children.
> + */
> + WARN_ONCE(root_is_unreachable,
> + "Contended TDP MMU SPTE in unreachable root.");
> + goto retry;
> + }
> +
> + /*
> + * WARN if the root is invalid and is unreachable, all SPTEs
> + * should've been zapped by kvm_tdp_mmu_zap_invalidated_roots(),
> + * and inserting new SPTEs under an invalid root is a KVM bug.
> + */
> + WARN_ON_ONCE(root_is_unreachable && root->role.invalid);
> + }
> +
> + rcu_read_unlock();
> +}
> +
> bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
> {
> u64 old_spte;
> @@ -799,8 +868,7 @@ static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
> gfn_t start, gfn_t end, bool can_yield, bool flush,
> bool shared)
> {
> - gfn_t max_gfn_host = 1ULL << (shadow_phys_bits - PAGE_SHIFT);
> - bool zap_all = (start == 0 && end >= max_gfn_host);
> + bool zap_all = (start == 0 && end >= tdp_mmu_max_gfn_host());
> struct tdp_iter iter;
>
> /*
> @@ -809,12 +877,7 @@ static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
> */
> int min_level = zap_all ? root->role.level : PG_LEVEL_4K;
>
> - /*
> - * Bound the walk at host.MAXPHYADDR, guest accesses beyond that will
> - * hit a #PF(RSVD) and never get to an EPT Violation/Misconfig / #NPF,
> - * and so KVM will never install a SPTE for such addresses.
> - */
> - end = min(end, max_gfn_host);
> + end = min(end, tdp_mmu_max_gfn_host());
>
> kvm_lockdep_assert_mmu_lock_held(kvm, shared);
>
> @@ -874,6 +937,7 @@ bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id, gfn_t start,
>
> void kvm_tdp_mmu_zap_all(struct kvm *kvm)
> {
> + struct kvm_mmu_page *root;
> int i;
>
> /*
> @@ -881,8 +945,10 @@ void kvm_tdp_mmu_zap_all(struct kvm *kvm)
> * is being destroyed or the userspace VMM has exited. In both cases,
> * KVM_RUN is unreachable, i.e. no vCPUs will ever service the request.
> */
> - for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
> - (void)kvm_tdp_mmu_zap_gfn_range(kvm, i, 0, -1ull, false);
> + for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
> + for_each_tdp_mmu_root_yield_safe(kvm, root, i, false)
> + tdp_mmu_zap_root(kvm, root, false);
> + }
> }
>
> /*
> @@ -908,7 +974,7 @@ void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm)
> * will still flush on yield, but that's a minor performance
> * blip and not a functional issue.
> */
> - (void)zap_gfn_range(kvm, root, 0, -1ull, true, false, true);
> + tdp_mmu_zap_root(kvm, root, true);
>
> /*
> * Put the reference acquired in kvm_tdp_mmu_invalidate_roots().
> --
> 2.35.1.574.g5d30c73bfb-goog
>
On Thu, Mar 03, 2022, Mingwei Zhang wrote:
> On Sat, Feb 26, 2022, Sean Christopherson wrote:
> > Add a dedicated helper for zapping a TDP MMU root, and use it in the three
> > flows that do "zap_all" and intentionally do not do a TLB flush if SPTEs
> > are zapped (zapping an entire root is safe if and only if it cannot be in
> > use by any vCPU). Because a TLB flush is never required, unconditionally
> > pass "false" to tdp_mmu_iter_cond_resched() when potentially yielding.
> >
> > Opportunistically document why KVM must not yield when zapping roots that
> > are being zapped by kvm_tdp_mmu_put_root(), i.e. roots whose refcount has
> > reached zero, and further harden the flow to detect improper KVM behavior
> > with respect to roots that are supposed to be unreachable.
> >
> > In addition to hardening zapping of roots, isolating zapping of roots
> > will allow future simplification of zap_gfn_range() by having it zap only
> > leaf SPTEs, and by removing its tricky "zap all" heuristic. By having
> > all paths that truly need to free _all_ SPs flow through the dedicated
> > root zapper, the generic zapper can be freed of those concerns.
> >
> > Signed-off-by: Sean Christopherson <[email protected]>
> > ---
> > arch/x86/kvm/mmu/tdp_mmu.c | 98 +++++++++++++++++++++++++++++++-------
> > 1 file changed, 82 insertions(+), 16 deletions(-)
> >
> > diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
> > index 87706e9cc6f3..c5df9a552470 100644
> > --- a/arch/x86/kvm/mmu/tdp_mmu.c
> > +++ b/arch/x86/kvm/mmu/tdp_mmu.c
> > @@ -56,10 +56,6 @@ void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm)
> > rcu_barrier();
> > }
> >
> > -static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
> > - gfn_t start, gfn_t end, bool can_yield, bool flush,
> > - bool shared);
> > -
> > static void tdp_mmu_free_sp(struct kvm_mmu_page *sp)
> > {
> > free_page((unsigned long)sp->spt);
> > @@ -82,6 +78,9 @@ static void tdp_mmu_free_sp_rcu_callback(struct rcu_head *head)
> > tdp_mmu_free_sp(sp);
> > }
> >
> > +static void tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root,
> > + bool shared);
> > +
> > void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
> > bool shared)
> > {
> > @@ -104,7 +103,7 @@ void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
> > * intermediate paging structures, that may be zapped, as such entries
> > * are associated with the ASID on both VMX and SVM.
> > */
> > - (void)zap_gfn_range(kvm, root, 0, -1ull, false, false, shared);
> > + tdp_mmu_zap_root(kvm, root, shared);
> >
> > call_rcu(&root->rcu_head, tdp_mmu_free_sp_rcu_callback);
> > }
> > @@ -751,6 +750,76 @@ static inline bool __must_check tdp_mmu_iter_cond_resched(struct kvm *kvm,
> > return iter->yielded;
> > }
> >
> > +static inline gfn_t tdp_mmu_max_gfn_host(void)
> > +{
> > + /*
> > + * Bound TDP MMU walks at host.MAXPHYADDR, guest accesses beyond that
> > + * will hit a #PF(RSVD) and never hit an EPT Violation/Misconfig / #NPF,
> > + * and so KVM will never install a SPTE for such addresses.
> > + */
> > + return 1ULL << (shadow_phys_bits - PAGE_SHIFT);
> > +}
> > +
> > +static void tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root,
> > + bool shared)
> > +{
> > + bool root_is_unreachable = !refcount_read(&root->tdp_mmu_root_count);
> > + struct tdp_iter iter;
> > +
> > + gfn_t end = tdp_mmu_max_gfn_host();
> > + gfn_t start = 0;
> > +
> > + kvm_lockdep_assert_mmu_lock_held(kvm, shared);
> > +
> > + rcu_read_lock();
> > +
> > + /*
> > + * No need to try to step down in the iterator when zapping an entire
> > + * root, zapping an upper-level SPTE will recurse on its children.
> > + */
> > + for_each_tdp_pte_min_level(iter, root, root->role.level, start, end) {
> > +retry:
> > + /*
> > + * Yielding isn't allowed when zapping an unreachable root as
> > + * the root won't be processed by mmu_notifier callbacks. When
> > + * handling an unmap/release mmu_notifier command, KVM must
> > + * drop all references to relevant pages prior to completing
> > + * the callback. Dropping mmu_lock can result in zapping SPTEs
> > + * for an unreachable root after a relevant callback completes,
> > + * which leads to use-after-free as zapping a SPTE triggers
> > + * "writeback" of dirty/accessed bits to the SPTE's associated
> > + * struct page.
> > + */
>
> I have a quick question here: when the roots are unreachable, we can't
> yield, understand that after reading the comments. However, what if
> there are too many SPTEs that need to be zapped that requires yielding.
> In this case, I guess we will have a RCU warning, which is unavoidable,
> right?
I will take that back. I think the subsequent patches solve the problem
using two passes.
> > + if (!root_is_unreachable &&
> > + tdp_mmu_iter_cond_resched(kvm, &iter, false, shared))
> > + continue;
> > +
> > + if (!is_shadow_present_pte(iter.old_spte))
> > + continue;
> > +
> > + if (!shared) {
> > + tdp_mmu_set_spte(kvm, &iter, 0);
> > + } else if (tdp_mmu_set_spte_atomic(kvm, &iter, 0)) {
> > + /*
> > + * cmpxchg() shouldn't fail if the root is unreachable.
> > + * Retry so as not to leak the page and its children.
> > + */
> > + WARN_ONCE(root_is_unreachable,
> > + "Contended TDP MMU SPTE in unreachable root.");
> > + goto retry;
> > + }
> > +
> > + /*
> > + * WARN if the root is invalid and is unreachable, all SPTEs
> > + * should've been zapped by kvm_tdp_mmu_zap_invalidated_roots(),
> > + * and inserting new SPTEs under an invalid root is a KVM bug.
> > + */
> > + WARN_ON_ONCE(root_is_unreachable && root->role.invalid);
> > + }
> > +
> > + rcu_read_unlock();
> > +}
> > +
> > bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
> > {
> > u64 old_spte;
> > @@ -799,8 +868,7 @@ static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
> > gfn_t start, gfn_t end, bool can_yield, bool flush,
> > bool shared)
> > {
> > - gfn_t max_gfn_host = 1ULL << (shadow_phys_bits - PAGE_SHIFT);
> > - bool zap_all = (start == 0 && end >= max_gfn_host);
> > + bool zap_all = (start == 0 && end >= tdp_mmu_max_gfn_host());
> > struct tdp_iter iter;
> >
> > /*
> > @@ -809,12 +877,7 @@ static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
> > */
> > int min_level = zap_all ? root->role.level : PG_LEVEL_4K;
> >
> > - /*
> > - * Bound the walk at host.MAXPHYADDR, guest accesses beyond that will
> > - * hit a #PF(RSVD) and never get to an EPT Violation/Misconfig / #NPF,
> > - * and so KVM will never install a SPTE for such addresses.
> > - */
> > - end = min(end, max_gfn_host);
> > + end = min(end, tdp_mmu_max_gfn_host());
> >
> > kvm_lockdep_assert_mmu_lock_held(kvm, shared);
> >
> > @@ -874,6 +937,7 @@ bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id, gfn_t start,
> >
> > void kvm_tdp_mmu_zap_all(struct kvm *kvm)
> > {
> > + struct kvm_mmu_page *root;
> > int i;
> >
> > /*
> > @@ -881,8 +945,10 @@ void kvm_tdp_mmu_zap_all(struct kvm *kvm)
> > * is being destroyed or the userspace VMM has exited. In both cases,
> > * KVM_RUN is unreachable, i.e. no vCPUs will ever service the request.
> > */
> > - for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
> > - (void)kvm_tdp_mmu_zap_gfn_range(kvm, i, 0, -1ull, false);
> > + for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
> > + for_each_tdp_mmu_root_yield_safe(kvm, root, i, false)
> > + tdp_mmu_zap_root(kvm, root, false);
> > + }
> > }
> >
> > /*
> > @@ -908,7 +974,7 @@ void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm)
> > * will still flush on yield, but that's a minor performance
> > * blip and not a functional issue.
> > */
> > - (void)zap_gfn_range(kvm, root, 0, -1ull, true, false, true);
> > + tdp_mmu_zap_root(kvm, root, true);
> >
> > /*
> > * Put the reference acquired in kvm_tdp_mmu_invalidate_roots().
> > --
> > 2.35.1.574.g5d30c73bfb-goog
> >
On Thu, Mar 03, 2022, Mingwei Zhang wrote:
> On Thu, Mar 03, 2022, Mingwei Zhang wrote:
> > > + /*
> > > + * No need to try to step down in the iterator when zapping an entire
> > > + * root, zapping an upper-level SPTE will recurse on its children.
> > > + */
> > > + for_each_tdp_pte_min_level(iter, root, root->role.level, start, end) {
> > > +retry:
> > > + /*
> > > + * Yielding isn't allowed when zapping an unreachable root as
> > > + * the root won't be processed by mmu_notifier callbacks. When
> > > + * handling an unmap/release mmu_notifier command, KVM must
> > > + * drop all references to relevant pages prior to completing
> > > + * the callback. Dropping mmu_lock can result in zapping SPTEs
> > > + * for an unreachable root after a relevant callback completes,
> > > + * which leads to use-after-free as zapping a SPTE triggers
> > > + * "writeback" of dirty/accessed bits to the SPTE's associated
> > > + * struct page.
> > > + */
> >
> > I have a quick question here: when the roots are unreachable, we can't
> > yield, understand that after reading the comments. However, what if
> > there are too many SPTEs that need to be zapped that requires yielding.
> > In this case, I guess we will have a RCU warning, which is unavoidable,
> > right?
>
> I will take that back. I think the subsequent patches solve the problem
> using two passes.
Yes, but it's worth noting that the yielding problem is also solved by keeping
roots reachable while they're being zapped (also done in later patches). That
way if a mmu_notifier event comes along, it can guarantee the SPTEs it cares about
are zapped (and their metadata flushed) even if the MMU root is no longer usable
by a vCPU.