2023-09-14 15:48:30

by Paul Durrant

[permalink] [raw]
Subject: [PATCH 2/8] KVM: pfncache: add a mark-dirty helper

From: Paul Durrant <[email protected]>

At the moment pages are marked dirty by open-coded calls to
mark_page_dirty_in_slot(), directly deferefencing the gpa and memslot
from the cache. After a subsequent patch these may not always be set
so add a helper now so that caller will protected from need to know
about this detail.

Signed-off-by: Paul Durrant <[email protected]>
---
Cc: Sean Christopherson <[email protected]>
Cc: Paolo Bonzini <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: Borislav Petkov <[email protected]>
Cc: Dave Hansen <[email protected]>
Cc: "H. Peter Anvin" <[email protected]>
Cc: David Woodhouse <[email protected]>
Cc: [email protected]
---
arch/x86/kvm/x86.c | 2 +-
arch/x86/kvm/xen.c | 13 ++++++-------
include/linux/kvm_host.h | 7 +++++++
virt/kvm/pfncache.c | 6 ++++++
4 files changed, 20 insertions(+), 8 deletions(-)

diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 6c9c81e82e65..d669a8801265 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -3137,7 +3137,7 @@ static void kvm_setup_guest_pvclock(struct kvm_vcpu *v,

guest_hv_clock->version = ++vcpu->hv_clock.version;

- mark_page_dirty_in_slot(v->kvm, gpc->memslot, gpc->gpa >> PAGE_SHIFT);
+ kvm_gpc_mark_dirty(gpc);
read_unlock_irqrestore(&gpc->lock, flags);

trace_kvm_pvclock_update(v->vcpu_id, &vcpu->hv_clock);
diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c
index 40edf4d1974c..33fddd29824b 100644
--- a/arch/x86/kvm/xen.c
+++ b/arch/x86/kvm/xen.c
@@ -430,14 +430,13 @@ static void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, bool atomic)
smp_wmb();
}

- if (user_len2)
+ if (user_len2) {
+ kvm_gpc_mark_dirty(gpc2);
read_unlock(&gpc2->lock);
+ }

+ kvm_gpc_mark_dirty(gpc1);
read_unlock_irqrestore(&gpc1->lock, flags);
-
- mark_page_dirty_in_slot(v->kvm, gpc1->memslot, gpc1->gpa >> PAGE_SHIFT);
- if (user_len2)
- mark_page_dirty_in_slot(v->kvm, gpc2->memslot, gpc2->gpa >> PAGE_SHIFT);
}

void kvm_xen_update_runstate(struct kvm_vcpu *v, int state)
@@ -543,13 +542,13 @@ void kvm_xen_inject_pending_events(struct kvm_vcpu *v)
: "0" (evtchn_pending_sel32));
WRITE_ONCE(vi->evtchn_upcall_pending, 1);
}
+
+ kvm_gpc_mark_dirty(gpc);
read_unlock_irqrestore(&gpc->lock, flags);

/* For the per-vCPU lapic vector, deliver it as MSI. */
if (v->arch.xen.upcall_vector)
kvm_xen_inject_vcpu_vector(v);
-
- mark_page_dirty_in_slot(v->kvm, gpc->memslot, gpc->gpa >> PAGE_SHIFT);
}

int __kvm_xen_has_interrupt(struct kvm_vcpu *v)
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index fb6c6109fdca..c71e8fbccaaf 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -1367,6 +1367,13 @@ int kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, unsigned long len);
*/
void kvm_gpc_deactivate(struct gfn_to_pfn_cache *gpc);

+/**
+ * kvm_gpc_mark_dirty - mark a cached page as dirty.
+ *
+ * @gpc: struct gfn_to_pfn_cache object.
+ */
+void kvm_gpc_mark_dirty(struct gfn_to_pfn_cache *gpc);
+
void kvm_sigset_activate(struct kvm_vcpu *vcpu);
void kvm_sigset_deactivate(struct kvm_vcpu *vcpu);

diff --git a/virt/kvm/pfncache.c b/virt/kvm/pfncache.c
index 0f36acdf577f..b68ed7fa56a2 100644
--- a/virt/kvm/pfncache.c
+++ b/virt/kvm/pfncache.c
@@ -386,6 +386,12 @@ int kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len)
}
EXPORT_SYMBOL_GPL(kvm_gpc_activate);

+void kvm_gpc_mark_dirty(struct gfn_to_pfn_cache *gpc)
+{
+ mark_page_dirty_in_slot(gpc->kvm, gpc->memslot, gpc->gpa >> PAGE_SHIFT);
+}
+EXPORT_SYMBOL_GPL(kvm_gpc_mark_dirty);
+
void kvm_gpc_deactivate(struct gfn_to_pfn_cache *gpc)
{
struct kvm *kvm = gpc->kvm;
--
2.39.2


2023-09-14 19:46:37

by David Woodhouse

[permalink] [raw]
Subject: Re: [PATCH 2/8] KVM: pfncache: add a mark-dirty helper

On Thu, 2023-09-14 at 08:49 +0000, Paul Durrant wrote:
> --- a/arch/x86/kvm/xen.c
> +++ b/arch/x86/kvm/xen.c
> @@ -430,14 +430,13 @@ static void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, bool atomic)
>                 smp_wmb();
>         }
>  
> -       if (user_len2)
> +       if (user_len2) {
> +               kvm_gpc_mark_dirty(gpc2);
>                 read_unlock(&gpc2->lock);
> +       }
>  
> +       kvm_gpc_mark_dirty(gpc1);
>         read_unlock_irqrestore(&gpc1->lock, flags);
> -
> -       mark_page_dirty_in_slot(v->kvm, gpc1->memslot, gpc1->gpa >> PAGE_SHIFT);
> -       if (user_len2)
> -               mark_page_dirty_in_slot(v->kvm, gpc2->memslot, gpc2->gpa >> PAGE_SHIFT);
>  }
>  
>  void kvm_xen_update_runstate(struct kvm_vcpu *v, int state)

ISTR there was a reason why the mark_page_dirty_in_slot() was called
*after* unlocking. Although now I say it, that seems wrong... is that
because the spinlock is only protecting the uHVA→kHVA mapping, while
the memslot/gpa are going to remain valid even after unlock, because
those are protected by sRCU?


Attachments:
smime.p7s (5.83 kB)

2023-09-14 22:31:10

by Paul Durrant

[permalink] [raw]
Subject: Re: [PATCH 2/8] KVM: pfncache: add a mark-dirty helper

On 14/09/2023 10:21, David Woodhouse wrote:
> On Thu, 2023-09-14 at 08:49 +0000, Paul Durrant wrote:
>> --- a/arch/x86/kvm/xen.c
>> +++ b/arch/x86/kvm/xen.c
>> @@ -430,14 +430,13 @@ static void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, bool atomic)
>>                 smp_wmb();
>>         }
>>
>> -       if (user_len2)
>> +       if (user_len2) {
>> +               kvm_gpc_mark_dirty(gpc2);
>>                 read_unlock(&gpc2->lock);
>> +       }
>>
>> +       kvm_gpc_mark_dirty(gpc1);
>>         read_unlock_irqrestore(&gpc1->lock, flags);
>> -
>> -       mark_page_dirty_in_slot(v->kvm, gpc1->memslot, gpc1->gpa >> PAGE_SHIFT);
>> -       if (user_len2)
>> -               mark_page_dirty_in_slot(v->kvm, gpc2->memslot, gpc2->gpa >> PAGE_SHIFT);
>>  }
>>
>>  void kvm_xen_update_runstate(struct kvm_vcpu *v, int state)
>
> ISTR there was a reason why the mark_page_dirty_in_slot() was called
> *after* unlocking. Although now I say it, that seems wrong... is that
> because the spinlock is only protecting the uHVA→kHVA mapping, while
> the memslot/gpa are going to remain valid even after unlock, because
> those are protected by sRCU?

Without the lock you could see an inconsistent GPA and memslot so I
think you could theoretically calculate a bogus rel_gfn and walk off the
end of the dirty bitmap. Hence moving the call inside the lock while I
was in the neighbourhood seemed like a good idea. I could call it out in
the commit comment if you'd like.

Paul