2024-01-15 12:59:16

by Paul Durrant

[permalink] [raw]
Subject: [PATCH v12 08/20] KVM: pfncache: allow a cache to be activated with a fixed (userspace) HVA

From: Paul Durrant <[email protected]>

Some pfncache pages may actually be overlays on guest memory that have a
fixed HVA within the VMM. It's pointless to invalidate such cached
mappings if the overlay is moved so allow a cache to be activated directly
with the HVA to cater for such cases. A subsequent patch will make use
of this facility.

Signed-off-by: Paul Durrant <[email protected]>
Reviewed-by: David Woodhouse <[email protected]>
---
Cc: Sean Christopherson <[email protected]>
Cc: Paolo Bonzini <[email protected]>
Cc: David Woodhouse <[email protected]>

v11:
- Fixed kvm_gpc_check() to ignore memslot generation if the cache is not
activated with a GPA. (This breakage occured during the re-work for v8).

v9:
- Pass both GPA and HVA into __kvm_gpc_refresh() rather than overloading
the address paraneter and using a bool flag to indicated what it is.

v8:
- Re-worked to avoid messing with struct gfn_to_pfn_cache.
---
include/linux/kvm_host.h | 20 +++++++++++++++++++-
virt/kvm/pfncache.c | 40 +++++++++++++++++++++++++++++++---------
2 files changed, 50 insertions(+), 10 deletions(-)

diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index f2354f808d04..7994c4d16783 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -1344,6 +1344,22 @@ void kvm_gpc_init(struct gfn_to_pfn_cache *gpc, struct kvm *kvm);
*/
int kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len);

+/**
+ * kvm_gpc_activate_hva - prepare a cached kernel mapping and HPA for a given HVA.
+ *
+ * @gpc: struct gfn_to_pfn_cache object.
+ * @hva: userspace virtual address to map.
+ * @len: sanity check; the range being access must fit a single page.
+ *
+ * @return: 0 for success.
+ * -EINVAL for a mapping which would cross a page boundary.
+ * -EFAULT for an untranslatable guest physical address.
+ *
+ * The semantics of this function are the same as those of kvm_gpc_activate(). It
+ * merely bypasses a layer of address translation.
+ */
+int kvm_gpc_activate_hva(struct gfn_to_pfn_cache *gpc, unsigned long hva, unsigned long len);
+
/**
* kvm_gpc_check - check validity of a gfn_to_pfn_cache.
*
@@ -1398,7 +1414,9 @@ void kvm_gpc_deactivate(struct gfn_to_pfn_cache *gpc);
static inline void kvm_gpc_mark_dirty(struct gfn_to_pfn_cache *gpc)
{
lockdep_assert_held(&gpc->lock);
- mark_page_dirty_in_slot(gpc->kvm, gpc->memslot, gpc->gpa >> PAGE_SHIFT);
+
+ if (gpc->gpa != KVM_XEN_INVALID_GPA)
+ mark_page_dirty_in_slot(gpc->kvm, gpc->memslot, gpc->gpa >> PAGE_SHIFT);
}

void kvm_sigset_activate(struct kvm_vcpu *vcpu);
diff --git a/virt/kvm/pfncache.c b/virt/kvm/pfncache.c
index 97eec8ee3449..ae822bff812f 100644
--- a/virt/kvm/pfncache.c
+++ b/virt/kvm/pfncache.c
@@ -48,7 +48,10 @@ bool kvm_gpc_check(struct gfn_to_pfn_cache *gpc, unsigned long len)
if (!gpc->active)
return false;

- if (gpc->generation != slots->generation || kvm_is_error_hva(gpc->uhva))
+ if (gpc->gpa != KVM_XEN_INVALID_GPA && gpc->generation != slots->generation)
+ return false;
+
+ if (kvm_is_error_hva(gpc->uhva))
return false;

if (offset_in_page(gpc->uhva) + len > PAGE_SIZE)
@@ -209,11 +212,13 @@ static kvm_pfn_t hva_to_pfn_retry(struct gfn_to_pfn_cache *gpc)
return -EFAULT;
}

-static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa,
+static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long uhva,
unsigned long len)
{
struct kvm_memslots *slots = kvm_memslots(gpc->kvm);
- unsigned long page_offset = offset_in_page(gpa);
+ unsigned long page_offset = (gpa != KVM_XEN_INVALID_GPA) ?
+ offset_in_page(gpa) :
+ offset_in_page(uhva);
bool unmap_old = false;
unsigned long old_uhva;
kvm_pfn_t old_pfn;
@@ -246,9 +251,15 @@ static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa,
old_khva = (void *)PAGE_ALIGN_DOWN((uintptr_t)gpc->khva);
old_uhva = PAGE_ALIGN_DOWN(gpc->uhva);

- /* Refresh the userspace HVA if necessary */
- if (gpc->gpa != gpa || gpc->generation != slots->generation ||
- kvm_is_error_hva(gpc->uhva)) {
+ if (gpa == KVM_XEN_INVALID_GPA) {
+ gpc->gpa = KVM_XEN_INVALID_GPA;
+ gpc->uhva = PAGE_ALIGN_DOWN(uhva);
+
+ if (gpc->uhva != old_uhva)
+ hva_change = true;
+ } else if (gpc->gpa != gpa ||
+ gpc->generation != slots->generation ||
+ kvm_is_error_hva(gpc->uhva)) {
gfn_t gfn = gpa_to_gfn(gpa);

gpc->gpa = gpa;
@@ -319,7 +330,7 @@ static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa,

int kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, unsigned long len)
{
- return __kvm_gpc_refresh(gpc, gpc->gpa, len);
+ return __kvm_gpc_refresh(gpc, gpc->gpa, gpc->uhva, len);
}

void kvm_gpc_init(struct gfn_to_pfn_cache *gpc, struct kvm *kvm)
@@ -332,7 +343,8 @@ void kvm_gpc_init(struct gfn_to_pfn_cache *gpc, struct kvm *kvm)
gpc->uhva = KVM_HVA_ERR_BAD;
}

-int kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len)
+static int __kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long uhva,
+ unsigned long len)
{
struct kvm *kvm = gpc->kvm;

@@ -353,7 +365,17 @@ int kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len)
gpc->active = true;
write_unlock_irq(&gpc->lock);
}
- return __kvm_gpc_refresh(gpc, gpa, len);
+ return __kvm_gpc_refresh(gpc, gpa, uhva, len);
+}
+
+int kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len)
+{
+ return __kvm_gpc_activate(gpc, gpa, KVM_HVA_ERR_BAD, len);
+}
+
+int kvm_gpc_activate_hva(struct gfn_to_pfn_cache *gpc, unsigned long uhva, unsigned long len)
+{
+ return __kvm_gpc_activate(gpc, KVM_XEN_INVALID_GPA, uhva, len);
}

void kvm_gpc_deactivate(struct gfn_to_pfn_cache *gpc)
--
2.39.2



2024-02-07 04:03:37

by Sean Christopherson

[permalink] [raw]
Subject: Re: [PATCH v12 08/20] KVM: pfncache: allow a cache to be activated with a fixed (userspace) HVA

+s390 folks (question on kvm_is_error_gpa() for ya)

On Mon, Jan 15, 2024, Paul Durrant wrote:
> @@ -1398,7 +1414,9 @@ void kvm_gpc_deactivate(struct gfn_to_pfn_cache *gpc);
> static inline void kvm_gpc_mark_dirty(struct gfn_to_pfn_cache *gpc)
> {
> lockdep_assert_held(&gpc->lock);
> - mark_page_dirty_in_slot(gpc->kvm, gpc->memslot, gpc->gpa >> PAGE_SHIFT);
> +
> + if (gpc->gpa != KVM_XEN_INVALID_GPA)

KVM_XEN_INVALID_GPA absolutely doesn't belong in common code. Not to mention
that it will break when Paolo (rightly) moves it to an x86 header.

https://lore.kernel.org/all/[email protected]

> + mark_page_dirty_in_slot(gpc->kvm, gpc->memslot, gpc->gpa >> PAGE_SHIFT);
> }
>
> void kvm_sigset_activate(struct kvm_vcpu *vcpu);
> diff --git a/virt/kvm/pfncache.c b/virt/kvm/pfncache.c
> index 97eec8ee3449..ae822bff812f 100644
> --- a/virt/kvm/pfncache.c
> +++ b/virt/kvm/pfncache.c
> @@ -48,7 +48,10 @@ bool kvm_gpc_check(struct gfn_to_pfn_cache *gpc, unsigned long len)
> if (!gpc->active)
> return false;
>
> - if (gpc->generation != slots->generation || kvm_is_error_hva(gpc->uhva))
> + if (gpc->gpa != KVM_XEN_INVALID_GPA && gpc->generation != slots->generation)

This needs a comment. I know what it's doing, but it wasn't obvious at first
glance, and it definitely won't be intuitive for readers that aren't intimately
familiar with memslots.

> + return false;
> +
> + if (kvm_is_error_hva(gpc->uhva))
> return false;
>
> if (offset_in_page(gpc->uhva) + len > PAGE_SIZE)
> @@ -209,11 +212,13 @@ static kvm_pfn_t hva_to_pfn_retry(struct gfn_to_pfn_cache *gpc)
> return -EFAULT;
> }
>
> -static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa,
> +static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long uhva,
> unsigned long len)
> {
> struct kvm_memslots *slots = kvm_memslots(gpc->kvm);
> - unsigned long page_offset = offset_in_page(gpa);
> + unsigned long page_offset = (gpa != KVM_XEN_INVALID_GPA) ?
> + offset_in_page(gpa) :
> + offset_in_page(uhva);

This formatting is funky. I also think it would be worth adding a helper to pair
with kvm_is_error_hva().

But! kvm_is_error_gpa() already exists, and it very, very sneakily does a memslot
lookup and checks for a valid HVA.

s390 people, any objection to renaming kvm_is_error_gpa() to something like
kvm_gpa_has_memslot() or kvm_gpa_is_in_memslot()? s390 is the only code that
uses the existing helper.

That would both to free up the name to pair with kvm_is_error_hva(), and would
make it obvious what the helper does; I was quite surprised that "error" means
"is covered by a valid memslot".

Back to this code, then we can have a slightly cleaner:

unsigned long page_offset = kvm_is_error_gpa(gpa) ? offset_in_page(gpa) :
offset_in_page(uhva);


And I think it's worth asserting that exactly _one_ of GPA or HVA is valid, e.g.
to ensure KVM doesn't end up with botched offsets, and to make it a bit more
clear what's going on.


if (WARN_ON_ONCE(kvm_is_error_gpa(gpa) == kvm_is_error_hva(uhva))
return -EINVAL;

2024-02-07 04:15:40

by David Woodhouse

[permalink] [raw]
Subject: Re: [PATCH v12 08/20] KVM: pfncache: allow a cache to be activated with a fixed (userspace) HVA

On Tue, 2024-02-06 at 20:03 -0800, Sean Christopherson wrote:
> +s390 folks (question on kvm_is_error_gpa() for ya)
>
> On Mon, Jan 15, 2024, Paul Durrant wrote:
> > @@ -1398,7 +1414,9 @@ void kvm_gpc_deactivate(struct gfn_to_pfn_cache *gpc);
> >   static inline void kvm_gpc_mark_dirty(struct gfn_to_pfn_cache *gpc)
> >   {
> >         lockdep_assert_held(&gpc->lock);
> > -       mark_page_dirty_in_slot(gpc->kvm, gpc->memslot, gpc->gpa >> PAGE_SHIFT);
> > +
> > +       if (gpc->gpa != KVM_XEN_INVALID_GPA)
>
> KVM_XEN_INVALID_GPA absolutely doesn't belong in common code.  Not to mention
> that it will break when Paolo (rightly) moves it to an x86 header.
>
> https://lore.kernel.org/all/[email protected]

We can use plain INVALID_GPA for that, I think. ISTR the reason we have
a separate KVM_XEN_INVALID_GPA is because that's a userspace API.

..

> But! kvm_is_error_gpa() already exists, and it very, very sneakily
> does a memslot lookup and checks for a valid HVA.

Hm, that doesn't sound as fast as simple comparison. We also can't do
it from kvm_gpc_check(), can we?


Attachments:
smime.p7s (5.83 kB)

2024-02-14 15:23:41

by Paul Durrant

[permalink] [raw]
Subject: Re: [PATCH v12 08/20] KVM: pfncache: allow a cache to be activated with a fixed (userspace) HVA

On 07/02/2024 04:03, Sean Christopherson wrote:
> +s390 folks (question on kvm_is_error_gpa() for ya)
>
> On Mon, Jan 15, 2024, Paul Durrant wrote:
>> @@ -1398,7 +1414,9 @@ void kvm_gpc_deactivate(struct gfn_to_pfn_cache *gpc);
>> static inline void kvm_gpc_mark_dirty(struct gfn_to_pfn_cache *gpc)
>> {
>> lockdep_assert_held(&gpc->lock);
>> - mark_page_dirty_in_slot(gpc->kvm, gpc->memslot, gpc->gpa >> PAGE_SHIFT);
>> +
>> + if (gpc->gpa != KVM_XEN_INVALID_GPA)
>
> KVM_XEN_INVALID_GPA absolutely doesn't belong in common code. Not to mention
> that it will break when Paolo (rightly) moves it to an x86 header.
>
> https://lore.kernel.org/all/[email protected]
>
>> + mark_page_dirty_in_slot(gpc->kvm, gpc->memslot, gpc->gpa >> PAGE_SHIFT);
>> }
>>
>> void kvm_sigset_activate(struct kvm_vcpu *vcpu);
>> diff --git a/virt/kvm/pfncache.c b/virt/kvm/pfncache.c
>> index 97eec8ee3449..ae822bff812f 100644
>> --- a/virt/kvm/pfncache.c
>> +++ b/virt/kvm/pfncache.c
>> @@ -48,7 +48,10 @@ bool kvm_gpc_check(struct gfn_to_pfn_cache *gpc, unsigned long len)
>> if (!gpc->active)
>> return false;
>>
>> - if (gpc->generation != slots->generation || kvm_is_error_hva(gpc->uhva))
>> + if (gpc->gpa != KVM_XEN_INVALID_GPA && gpc->generation != slots->generation)
>
> This needs a comment. I know what it's doing, but it wasn't obvious at first
> glance, and it definitely won't be intuitive for readers that aren't intimately
> familiar with memslots.
>
>> + return false;
>> +
>> + if (kvm_is_error_hva(gpc->uhva))
>> return false;
>>
>> if (offset_in_page(gpc->uhva) + len > PAGE_SIZE)
>> @@ -209,11 +212,13 @@ static kvm_pfn_t hva_to_pfn_retry(struct gfn_to_pfn_cache *gpc)
>> return -EFAULT;
>> }
>>
>> -static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa,
>> +static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long uhva,
>> unsigned long len)
>> {
>> struct kvm_memslots *slots = kvm_memslots(gpc->kvm);
>> - unsigned long page_offset = offset_in_page(gpa);
>> + unsigned long page_offset = (gpa != KVM_XEN_INVALID_GPA) ?
>> + offset_in_page(gpa) :
>> + offset_in_page(uhva);
>
> This formatting is funky. I also think it would be worth adding a helper to pair
> with kvm_is_error_hva().
>
> But! kvm_is_error_gpa() already exists, and it very, very sneakily does a memslot
> lookup and checks for a valid HVA.
>
> s390 people, any objection to renaming kvm_is_error_gpa() to something like
> kvm_gpa_has_memslot() or kvm_gpa_is_in_memslot()? s390 is the only code that
> uses the existing helper.
>
> That would both to free up the name to pair with kvm_is_error_hva(), and would
> make it obvious what the helper does; I was quite surprised that "error" means
> "is covered by a valid memslot".
>

Seemingly no response to this; I'll define a local helper rather than
re-working the open-coded tests to check against INVALID_GPA. This can
then be trivially replaced if need be.

> Back to this code, then we can have a slightly cleaner:
>
> unsigned long page_offset = kvm_is_error_gpa(gpa) ? offset_in_page(gpa) :
> offset_in_page(uhva);
>
>
> And I think it's worth asserting that exactly _one_ of GPA or HVA is valid, e.g.
> to ensure KVM doesn't end up with botched offsets, and to make it a bit more
> clear what's going on.
>
>
> if (WARN_ON_ONCE(kvm_is_error_gpa(gpa) == kvm_is_error_hva(uhva))
> return -EINVAL;

Sure.


2024-02-14 16:03:01

by Sean Christopherson

[permalink] [raw]
Subject: Re: [PATCH v12 08/20] KVM: pfncache: allow a cache to be activated with a fixed (userspace) HVA

On Tue, Feb 06, 2024, David Woodhouse wrote:
> On Tue, 2024-02-06 at 20:03 -0800, Sean Christopherson wrote:
> > +s390 folks (question on kvm_is_error_gpa() for ya)
> >
> > On Mon, Jan 15, 2024, Paul Durrant wrote:
> > > @@ -1398,7 +1414,9 @@ void kvm_gpc_deactivate(struct gfn_to_pfn_cache *gpc);
> > >   static inline void kvm_gpc_mark_dirty(struct gfn_to_pfn_cache *gpc)
> > >   {
> > >         lockdep_assert_held(&gpc->lock);
> > > -       mark_page_dirty_in_slot(gpc->kvm, gpc->memslot, gpc->gpa >> PAGE_SHIFT);
> > > +
> > > +       if (gpc->gpa != KVM_XEN_INVALID_GPA)
> >
> > KVM_XEN_INVALID_GPA absolutely doesn't belong in common code.  Not to mention
> > that it will break when Paolo (rightly) moves it to an x86 header.
> >
> > https://lore.kernel.org/all/[email protected]
>
> We can use plain INVALID_GPA for that, I think. ISTR the reason we have
> a separate KVM_XEN_INVALID_GPA is because that's a userspace API.
>
> ...
>
> > But! kvm_is_error_gpa() already exists, and it very, very sneakily
> > does a memslot lookup and checks for a valid HVA.
>
> Hm, that doesn't sound as fast as simple comparison. We also can't do
> it from kvm_gpc_check(), can we?

You snipped the part where I suggested renaming the existing kvm_is_error_gpa().

I am suggesting we do the below (and obviously rename the s390 usage, too), and
then the gpc code can use use kvm_is_error_gpa().

diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index bbfefd7e612f..e1df988e4d57 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -148,6 +148,11 @@ static inline bool kvm_is_error_hva(unsigned long addr)

#endif

+static inline bool kvm_is_error_gpa(gpa_t gpa)
+{
+ return gpa == INVALID_GPA;
+}
+
#define KVM_ERR_PTR_BAD_PAGE (ERR_PTR(-ENOENT))

static inline bool is_error_page(struct page *page)
@@ -1787,7 +1792,7 @@ static inline hpa_t pfn_to_hpa(kvm_pfn_t pfn)
return (hpa_t)pfn << PAGE_SHIFT;
}

-static inline bool kvm_is_error_gpa(struct kvm *kvm, gpa_t gpa)
+static inline bool kvm_gpa_is_in_memslot(struct kvm *kvm, gpa_t gpa)
{
unsigned long hva = gfn_to_hva(kvm, gpa_to_gfn(gpa));


2024-02-14 16:09:32

by Paul Durrant

[permalink] [raw]
Subject: Re: [PATCH v12 08/20] KVM: pfncache: allow a cache to be activated with a fixed (userspace) HVA

On 14/02/2024 16:01, Sean Christopherson wrote:
> On Tue, Feb 06, 2024, David Woodhouse wrote:
>> On Tue, 2024-02-06 at 20:03 -0800, Sean Christopherson wrote:
>>> +s390 folks (question on kvm_is_error_gpa() for ya)
>>>
>>> On Mon, Jan 15, 2024, Paul Durrant wrote:
>>>> @@ -1398,7 +1414,9 @@ void kvm_gpc_deactivate(struct gfn_to_pfn_cache *gpc);
>>>>   static inline void kvm_gpc_mark_dirty(struct gfn_to_pfn_cache *gpc)
>>>>   {
>>>>         lockdep_assert_held(&gpc->lock);
>>>> -       mark_page_dirty_in_slot(gpc->kvm, gpc->memslot, gpc->gpa >> PAGE_SHIFT);
>>>> +
>>>> +       if (gpc->gpa != KVM_XEN_INVALID_GPA)
>>>
>>> KVM_XEN_INVALID_GPA absolutely doesn't belong in common code.  Not to mention
>>> that it will break when Paolo (rightly) moves it to an x86 header.
>>>
>>> https://lore.kernel.org/all/[email protected]
>>
>> We can use plain INVALID_GPA for that, I think. ISTR the reason we have
>> a separate KVM_XEN_INVALID_GPA is because that's a userspace API.
>>
>> ...
>>
>>> But! kvm_is_error_gpa() already exists, and it very, very sneakily
>>> does a memslot lookup and checks for a valid HVA.
>>
>> Hm, that doesn't sound as fast as simple comparison. We also can't do
>> it from kvm_gpc_check(), can we?
>
> You snipped the part where I suggested renaming the existing kvm_is_error_gpa().
>
> I am suggesting we do the below (and obviously rename the s390 usage, too), and
> then the gpc code can use use kvm_is_error_gpa().
>
> diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
> index bbfefd7e612f..e1df988e4d57 100644
> --- a/include/linux/kvm_host.h
> +++ b/include/linux/kvm_host.h
> @@ -148,6 +148,11 @@ static inline bool kvm_is_error_hva(unsigned long addr)
>
> #endif
>
> +static inline bool kvm_is_error_gpa(gpa_t gpa)
> +{
> + return gpa == INVALID_GPA;
> +}
> +

Are you ok with a local kvm_gpc_is_error_gpa() or somesuch until there
is agreement with s390?

Paul

> #define KVM_ERR_PTR_BAD_PAGE (ERR_PTR(-ENOENT))
>
> static inline bool is_error_page(struct page *page)
> @@ -1787,7 +1792,7 @@ static inline hpa_t pfn_to_hpa(kvm_pfn_t pfn)
> return (hpa_t)pfn << PAGE_SHIFT;
> }
>
> -static inline bool kvm_is_error_gpa(struct kvm *kvm, gpa_t gpa)
> +static inline bool kvm_gpa_is_in_memslot(struct kvm *kvm, gpa_t gpa)
> {
> unsigned long hva = gfn_to_hva(kvm, gpa_to_gfn(gpa));
>


2024-02-14 16:20:36

by Sean Christopherson

[permalink] [raw]
Subject: Re: [PATCH v12 08/20] KVM: pfncache: allow a cache to be activated with a fixed (userspace) HVA

On Wed, Feb 14, 2024, Paul Durrant wrote:
> On 07/02/2024 04:03, Sean Christopherson wrote:
> > +s390 folks (question on kvm_is_error_gpa() for ya)
> > But! kvm_is_error_gpa() already exists, and it very, very sneakily does a memslot
> > lookup and checks for a valid HVA.
> >
> > s390 people, any objection to renaming kvm_is_error_gpa() to something like
> > kvm_gpa_has_memslot() or kvm_gpa_is_in_memslot()? s390 is the only code that
> > uses the existing helper.
> >
> > That would both to free up the name to pair with kvm_is_error_hva(), and would
> > make it obvious what the helper does; I was quite surprised that "error" means
> > "is covered by a valid memslot".
> >
>
> Seemingly no response to this; I'll define a local helper rather than
> re-working the open-coded tests to check against INVALID_GPA. This can then
> be trivially replaced if need be.

How about we force a decision with a patch? This should be easy enough to slot
in, and I would be quite surprised if s390 is overly attached to kvm_is_error_gpa().

From: Sean Christopherson <[email protected]>
Date: Wed, 14 Feb 2024 08:05:49 -0800
Subject: [PATCH] KVM: s390: Refactor kvm_is_error_gpa() into
kvm_is_gpa_in_memslot()

Rename kvm_is_error_gpa() to kvm_is_gpa_in_memslot() and invert the
polarity accordingly in order to (a) free up kvm_is_error_gpa() to match
with kvm_is_error_{hva,page}(), and (b) to make it more obvious that the
helper is doing a memslot lookup, i.e. not simply checking for INVALID_GPA.

No functional change intended.

Signed-off-by: Sean Christopherson <[email protected]>
---
arch/s390/kvm/diag.c | 2 +-
arch/s390/kvm/gaccess.c | 14 +++++++-------
arch/s390/kvm/kvm-s390.c | 4 ++--
arch/s390/kvm/priv.c | 4 ++--
arch/s390/kvm/sigp.c | 2 +-
include/linux/kvm_host.h | 4 ++--
6 files changed, 15 insertions(+), 15 deletions(-)

diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c
index 3c65b8258ae6..2a32438e09ce 100644
--- a/arch/s390/kvm/diag.c
+++ b/arch/s390/kvm/diag.c
@@ -102,7 +102,7 @@ static int __diag_page_ref_service(struct kvm_vcpu *vcpu)
parm.token_addr & 7 || parm.zarch != 0x8000000000000000ULL)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);

- if (kvm_is_error_gpa(vcpu->kvm, parm.token_addr))
+ if (!kvm_is_gpa_in_memslot(vcpu->kvm, parm.token_addr))
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);

vcpu->arch.pfault_token = parm.token_addr;
diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c
index 5bfcc50c1a68..415c99649e43 100644
--- a/arch/s390/kvm/gaccess.c
+++ b/arch/s390/kvm/gaccess.c
@@ -664,7 +664,7 @@ static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva,
case ASCE_TYPE_REGION1: {
union region1_table_entry rfte;

- if (kvm_is_error_gpa(vcpu->kvm, ptr))
+ if (!kvm_is_gpa_in_memslot(vcpu->kvm, ptr))
return PGM_ADDRESSING;
if (deref_table(vcpu->kvm, ptr, &rfte.val))
return -EFAULT;
@@ -682,7 +682,7 @@ static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva,
case ASCE_TYPE_REGION2: {
union region2_table_entry rste;

- if (kvm_is_error_gpa(vcpu->kvm, ptr))
+ if (!kvm_is_gpa_in_memslot(vcpu->kvm, ptr))
return PGM_ADDRESSING;
if (deref_table(vcpu->kvm, ptr, &rste.val))
return -EFAULT;
@@ -700,7 +700,7 @@ static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva,
case ASCE_TYPE_REGION3: {
union region3_table_entry rtte;

- if (kvm_is_error_gpa(vcpu->kvm, ptr))
+ if (!kvm_is_gpa_in_memslot(vcpu->kvm, ptr))
return PGM_ADDRESSING;
if (deref_table(vcpu->kvm, ptr, &rtte.val))
return -EFAULT;
@@ -728,7 +728,7 @@ static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva,
case ASCE_TYPE_SEGMENT: {
union segment_table_entry ste;

- if (kvm_is_error_gpa(vcpu->kvm, ptr))
+ if (!kvm_is_gpa_in_memslot(vcpu->kvm, ptr))
return PGM_ADDRESSING;
if (deref_table(vcpu->kvm, ptr, &ste.val))
return -EFAULT;
@@ -748,7 +748,7 @@ static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva,
ptr = ste.fc0.pto * (PAGE_SIZE / 2) + vaddr.px * 8;
}
}
- if (kvm_is_error_gpa(vcpu->kvm, ptr))
+ if (!kvm_is_gpa_in_memslot(vcpu->kvm, ptr))
return PGM_ADDRESSING;
if (deref_table(vcpu->kvm, ptr, &pte.val))
return -EFAULT;
@@ -770,7 +770,7 @@ static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva,
*prot = PROT_TYPE_IEP;
return PGM_PROTECTION;
}
- if (kvm_is_error_gpa(vcpu->kvm, raddr.addr))
+ if (!kvm_is_gpa_in_memslot(vcpu->kvm, raddr.addr))
return PGM_ADDRESSING;
*gpa = raddr.addr;
return 0;
@@ -957,7 +957,7 @@ static int guest_range_to_gpas(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar,
return rc;
} else {
gpa = kvm_s390_real_to_abs(vcpu, ga);
- if (kvm_is_error_gpa(vcpu->kvm, gpa)) {
+ if (!kvm_is_gpa_in_memslot(vcpu->kvm, gpa)) {
rc = PGM_ADDRESSING;
prot = PROT_NONE;
}
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index ea63ac769889..3e5a1d7aa81a 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -2878,7 +2878,7 @@ static int kvm_s390_vm_mem_op_abs(struct kvm *kvm, struct kvm_s390_mem_op *mop)

srcu_idx = srcu_read_lock(&kvm->srcu);

- if (kvm_is_error_gpa(kvm, mop->gaddr)) {
+ if (!kvm_is_gpa_in_memslot(kvm, mop->gaddr)) {
r = PGM_ADDRESSING;
goto out_unlock;
}
@@ -2940,7 +2940,7 @@ static int kvm_s390_vm_mem_op_cmpxchg(struct kvm *kvm, struct kvm_s390_mem_op *m

srcu_idx = srcu_read_lock(&kvm->srcu);

- if (kvm_is_error_gpa(kvm, mop->gaddr)) {
+ if (!kvm_is_gpa_in_memslot(kvm, mop->gaddr)) {
r = PGM_ADDRESSING;
goto out_unlock;
}
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index f875a404a0a0..1be19cc9d73c 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -149,7 +149,7 @@ static int handle_set_prefix(struct kvm_vcpu *vcpu)
* first page, since address is 8k aligned and memory pieces are always
* at least 1MB aligned and have at least a size of 1MB.
*/
- if (kvm_is_error_gpa(vcpu->kvm, address))
+ if (!kvm_is_gpa_in_memslot(vcpu->kvm, address))
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);

kvm_s390_set_prefix(vcpu, address);
@@ -464,7 +464,7 @@ static int handle_test_block(struct kvm_vcpu *vcpu)
return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
addr = kvm_s390_real_to_abs(vcpu, addr);

- if (kvm_is_error_gpa(vcpu->kvm, addr))
+ if (!kvm_is_gpa_in_memslot(vcpu->kvm, addr))
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
/*
* We don't expect errors on modern systems, and do not care
diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c
index d9696b530064..55c34cb35428 100644
--- a/arch/s390/kvm/sigp.c
+++ b/arch/s390/kvm/sigp.c
@@ -172,7 +172,7 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu,
* first page, since address is 8k aligned and memory pieces are always
* at least 1MB aligned and have at least a size of 1MB.
*/
- if (kvm_is_error_gpa(vcpu->kvm, irq.u.prefix.address)) {
+ if (!kvm_is_gpa_in_memslot(vcpu->kvm, irq.u.prefix.address)) {
*reg &= 0xffffffff00000000UL;
*reg |= SIGP_STATUS_INVALID_PARAMETER;
return SIGP_CC_STATUS_STORED;
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 7e7fd25b09b3..d175b64488ec 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -1788,11 +1788,11 @@ static inline hpa_t pfn_to_hpa(kvm_pfn_t pfn)
return (hpa_t)pfn << PAGE_SHIFT;
}

-static inline bool kvm_is_error_gpa(struct kvm *kvm, gpa_t gpa)
+static inline bool kvm_is_gpa_in_memslot(struct kvm *kvm, gpa_t gpa)
{
unsigned long hva = gfn_to_hva(kvm, gpa_to_gfn(gpa));

- return kvm_is_error_hva(hva);
+ return !kvm_is_error_hva(hva);
}

enum kvm_stat_kind {

base-commit: 687d8f4c3dea0758afd748968d91288220bbe7e3
--


2024-02-14 16:36:34

by Paul Durrant

[permalink] [raw]
Subject: Re: [PATCH v12 08/20] KVM: pfncache: allow a cache to be activated with a fixed (userspace) HVA

On 14/02/2024 16:20, Sean Christopherson wrote:
> On Wed, Feb 14, 2024, Paul Durrant wrote:
>> On 07/02/2024 04:03, Sean Christopherson wrote:
>>> +s390 folks (question on kvm_is_error_gpa() for ya)
>>> But! kvm_is_error_gpa() already exists, and it very, very sneakily does a memslot
>>> lookup and checks for a valid HVA.
>>>
>>> s390 people, any objection to renaming kvm_is_error_gpa() to something like
>>> kvm_gpa_has_memslot() or kvm_gpa_is_in_memslot()? s390 is the only code that
>>> uses the existing helper.
>>>
>>> That would both to free up the name to pair with kvm_is_error_hva(), and would
>>> make it obvious what the helper does; I was quite surprised that "error" means
>>> "is covered by a valid memslot".
>>>
>>
>> Seemingly no response to this; I'll define a local helper rather than
>> re-working the open-coded tests to check against INVALID_GPA. This can then
>> be trivially replaced if need be.
>
> How about we force a decision with a patch? This should be easy enough to slot
> in, and I would be quite surprised if s390 is overly attached to kvm_is_error_gpa().
>

Ok. I'll slot that in.

> From: Sean Christopherson <[email protected]>
> Date: Wed, 14 Feb 2024 08:05:49 -0800
> Subject: [PATCH] KVM: s390: Refactor kvm_is_error_gpa() into
> kvm_is_gpa_in_memslot()
>
> Rename kvm_is_error_gpa() to kvm_is_gpa_in_memslot() and invert the
> polarity accordingly in order to (a) free up kvm_is_error_gpa() to match
> with kvm_is_error_{hva,page}(), and (b) to make it more obvious that the
> helper is doing a memslot lookup, i.e. not simply checking for INVALID_GPA.
>
> No functional change intended.
>
> Signed-off-by: Sean Christopherson <[email protected]>
> ---
> arch/s390/kvm/diag.c | 2 +-
> arch/s390/kvm/gaccess.c | 14 +++++++-------
> arch/s390/kvm/kvm-s390.c | 4 ++--
> arch/s390/kvm/priv.c | 4 ++--
> arch/s390/kvm/sigp.c | 2 +-
> include/linux/kvm_host.h | 4 ++--
> 6 files changed, 15 insertions(+), 15 deletions(-)
>
> diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c
> index 3c65b8258ae6..2a32438e09ce 100644
> --- a/arch/s390/kvm/diag.c
> +++ b/arch/s390/kvm/diag.c
> @@ -102,7 +102,7 @@ static int __diag_page_ref_service(struct kvm_vcpu *vcpu)
> parm.token_addr & 7 || parm.zarch != 0x8000000000000000ULL)
> return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
>
> - if (kvm_is_error_gpa(vcpu->kvm, parm.token_addr))
> + if (!kvm_is_gpa_in_memslot(vcpu->kvm, parm.token_addr))
> return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
>
> vcpu->arch.pfault_token = parm.token_addr;
> diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c
> index 5bfcc50c1a68..415c99649e43 100644
> --- a/arch/s390/kvm/gaccess.c
> +++ b/arch/s390/kvm/gaccess.c
> @@ -664,7 +664,7 @@ static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva,
> case ASCE_TYPE_REGION1: {
> union region1_table_entry rfte;
>
> - if (kvm_is_error_gpa(vcpu->kvm, ptr))
> + if (!kvm_is_gpa_in_memslot(vcpu->kvm, ptr))
> return PGM_ADDRESSING;
> if (deref_table(vcpu->kvm, ptr, &rfte.val))
> return -EFAULT;
> @@ -682,7 +682,7 @@ static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva,
> case ASCE_TYPE_REGION2: {
> union region2_table_entry rste;
>
> - if (kvm_is_error_gpa(vcpu->kvm, ptr))
> + if (!kvm_is_gpa_in_memslot(vcpu->kvm, ptr))
> return PGM_ADDRESSING;
> if (deref_table(vcpu->kvm, ptr, &rste.val))
> return -EFAULT;
> @@ -700,7 +700,7 @@ static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva,
> case ASCE_TYPE_REGION3: {
> union region3_table_entry rtte;
>
> - if (kvm_is_error_gpa(vcpu->kvm, ptr))
> + if (!kvm_is_gpa_in_memslot(vcpu->kvm, ptr))
> return PGM_ADDRESSING;
> if (deref_table(vcpu->kvm, ptr, &rtte.val))
> return -EFAULT;
> @@ -728,7 +728,7 @@ static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva,
> case ASCE_TYPE_SEGMENT: {
> union segment_table_entry ste;
>
> - if (kvm_is_error_gpa(vcpu->kvm, ptr))
> + if (!kvm_is_gpa_in_memslot(vcpu->kvm, ptr))
> return PGM_ADDRESSING;
> if (deref_table(vcpu->kvm, ptr, &ste.val))
> return -EFAULT;
> @@ -748,7 +748,7 @@ static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva,
> ptr = ste.fc0.pto * (PAGE_SIZE / 2) + vaddr.px * 8;
> }
> }
> - if (kvm_is_error_gpa(vcpu->kvm, ptr))
> + if (!kvm_is_gpa_in_memslot(vcpu->kvm, ptr))
> return PGM_ADDRESSING;
> if (deref_table(vcpu->kvm, ptr, &pte.val))
> return -EFAULT;
> @@ -770,7 +770,7 @@ static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva,
> *prot = PROT_TYPE_IEP;
> return PGM_PROTECTION;
> }
> - if (kvm_is_error_gpa(vcpu->kvm, raddr.addr))
> + if (!kvm_is_gpa_in_memslot(vcpu->kvm, raddr.addr))
> return PGM_ADDRESSING;
> *gpa = raddr.addr;
> return 0;
> @@ -957,7 +957,7 @@ static int guest_range_to_gpas(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar,
> return rc;
> } else {
> gpa = kvm_s390_real_to_abs(vcpu, ga);
> - if (kvm_is_error_gpa(vcpu->kvm, gpa)) {
> + if (!kvm_is_gpa_in_memslot(vcpu->kvm, gpa)) {
> rc = PGM_ADDRESSING;
> prot = PROT_NONE;
> }
> diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
> index ea63ac769889..3e5a1d7aa81a 100644
> --- a/arch/s390/kvm/kvm-s390.c
> +++ b/arch/s390/kvm/kvm-s390.c
> @@ -2878,7 +2878,7 @@ static int kvm_s390_vm_mem_op_abs(struct kvm *kvm, struct kvm_s390_mem_op *mop)
>
> srcu_idx = srcu_read_lock(&kvm->srcu);
>
> - if (kvm_is_error_gpa(kvm, mop->gaddr)) {
> + if (!kvm_is_gpa_in_memslot(kvm, mop->gaddr)) {
> r = PGM_ADDRESSING;
> goto out_unlock;
> }
> @@ -2940,7 +2940,7 @@ static int kvm_s390_vm_mem_op_cmpxchg(struct kvm *kvm, struct kvm_s390_mem_op *m
>
> srcu_idx = srcu_read_lock(&kvm->srcu);
>
> - if (kvm_is_error_gpa(kvm, mop->gaddr)) {
> + if (!kvm_is_gpa_in_memslot(kvm, mop->gaddr)) {
> r = PGM_ADDRESSING;
> goto out_unlock;
> }
> diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
> index f875a404a0a0..1be19cc9d73c 100644
> --- a/arch/s390/kvm/priv.c
> +++ b/arch/s390/kvm/priv.c
> @@ -149,7 +149,7 @@ static int handle_set_prefix(struct kvm_vcpu *vcpu)
> * first page, since address is 8k aligned and memory pieces are always
> * at least 1MB aligned and have at least a size of 1MB.
> */
> - if (kvm_is_error_gpa(vcpu->kvm, address))
> + if (!kvm_is_gpa_in_memslot(vcpu->kvm, address))
> return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
>
> kvm_s390_set_prefix(vcpu, address);
> @@ -464,7 +464,7 @@ static int handle_test_block(struct kvm_vcpu *vcpu)
> return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
> addr = kvm_s390_real_to_abs(vcpu, addr);
>
> - if (kvm_is_error_gpa(vcpu->kvm, addr))
> + if (!kvm_is_gpa_in_memslot(vcpu->kvm, addr))
> return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
> /*
> * We don't expect errors on modern systems, and do not care
> diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c
> index d9696b530064..55c34cb35428 100644
> --- a/arch/s390/kvm/sigp.c
> +++ b/arch/s390/kvm/sigp.c
> @@ -172,7 +172,7 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu,
> * first page, since address is 8k aligned and memory pieces are always
> * at least 1MB aligned and have at least a size of 1MB.
> */
> - if (kvm_is_error_gpa(vcpu->kvm, irq.u.prefix.address)) {
> + if (!kvm_is_gpa_in_memslot(vcpu->kvm, irq.u.prefix.address)) {
> *reg &= 0xffffffff00000000UL;
> *reg |= SIGP_STATUS_INVALID_PARAMETER;
> return SIGP_CC_STATUS_STORED;
> diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
> index 7e7fd25b09b3..d175b64488ec 100644
> --- a/include/linux/kvm_host.h
> +++ b/include/linux/kvm_host.h
> @@ -1788,11 +1788,11 @@ static inline hpa_t pfn_to_hpa(kvm_pfn_t pfn)
> return (hpa_t)pfn << PAGE_SHIFT;
> }
>
> -static inline bool kvm_is_error_gpa(struct kvm *kvm, gpa_t gpa)
> +static inline bool kvm_is_gpa_in_memslot(struct kvm *kvm, gpa_t gpa)
> {
> unsigned long hva = gfn_to_hva(kvm, gpa_to_gfn(gpa));
>
> - return kvm_is_error_hva(hva);
> + return !kvm_is_error_hva(hva);
> }
>
> enum kvm_stat_kind {
>
> base-commit: 687d8f4c3dea0758afd748968d91288220bbe7e3