2024-02-03 00:15:25

by Sean Christopherson

[permalink] [raw]
Subject: [PATCH v8 04/10] KVM: selftests: Add support for allocating/managing protected guest memory

From: Peter Gonda <[email protected]>

Add support for differentiating between protected (a.k.a. private, a.k.a.
encrypted) memory and normal (a.k.a. shared) memory for VMs that support
protected guest memory, e.g. x86's SEV. Provide and manage a common
bitmap for tracking whether a given physical page resides in protected
memory, as support for protected memory isn't x86 specific, i.e. adding a
arch hook would be a net negative now, and in the future.

Cc: Paolo Bonzini <[email protected]>
Cc: Sean Christopherson <[email protected]>
Cc: Vishal Annapurve <[email protected]>
Cc: Ackerley Tng <[email protected]>
cc: Andrew Jones <[email protected]>
Cc: Tom Lendacky <[email protected]>
Cc: Michael Roth <[email protected]>
Originally-by: Michael Roth <[email protected]>
Signed-off-by: Peter Gonda <[email protected]>
Co-developed-by: Sean Christopherson <[email protected]>
Signed-off-by: Sean Christopherson <[email protected]>
---
.../selftests/kvm/include/kvm_util_base.h | 25 +++++++++++++++++--
tools/testing/selftests/kvm/lib/kvm_util.c | 22 +++++++++++++---
2 files changed, 41 insertions(+), 6 deletions(-)

diff --git a/tools/testing/selftests/kvm/include/kvm_util_base.h b/tools/testing/selftests/kvm/include/kvm_util_base.h
index d9dc31af2f96..a82149305349 100644
--- a/tools/testing/selftests/kvm/include/kvm_util_base.h
+++ b/tools/testing/selftests/kvm/include/kvm_util_base.h
@@ -46,6 +46,7 @@ typedef uint64_t vm_vaddr_t; /* Virtual Machine (Guest) virtual address */
struct userspace_mem_region {
struct kvm_userspace_memory_region2 region;
struct sparsebit *unused_phy_pages;
+ struct sparsebit *protected_phy_pages;
int fd;
off_t offset;
enum vm_mem_backing_src_type backing_src_type;
@@ -573,6 +574,13 @@ void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type,
uint64_t guest_paddr, uint32_t slot, uint64_t npages,
uint32_t flags, int guest_memfd_fd, uint64_t guest_memfd_offset);

+#ifndef vm_arch_has_protected_memory
+static inline bool vm_arch_has_protected_memory(struct kvm_vm *vm)
+{
+ return false;
+}
+#endif
+
void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags);
void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa);
void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot);
@@ -836,10 +844,23 @@ const char *exit_reason_str(unsigned int exit_reason);

vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min,
uint32_t memslot);
-vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
- vm_paddr_t paddr_min, uint32_t memslot);
+vm_paddr_t __vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
+ vm_paddr_t paddr_min, uint32_t memslot,
+ bool protected);
vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm);

+static inline vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
+ vm_paddr_t paddr_min, uint32_t memslot)
+{
+ /*
+ * By default, allocate memory as protected for VMs that support
+ * protected memory, as the majority of memory for such VMs is
+ * protected, i.e. using shared memory is effectively opt-in.
+ */
+ return __vm_phy_pages_alloc(vm, num, paddr_min, memslot,
+ vm_arch_has_protected_memory(vm));
+}
+
/*
* ____vm_create() does KVM_CREATE_VM and little else. __vm_create() also
* loads the test binary into guest memory and creates an IRQ chip (x86 only).
diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
index a53caf81eb87..ea677aa019ef 100644
--- a/tools/testing/selftests/kvm/lib/kvm_util.c
+++ b/tools/testing/selftests/kvm/lib/kvm_util.c
@@ -717,6 +717,7 @@ static void __vm_mem_region_delete(struct kvm_vm *vm,
vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION2, &region->region);

sparsebit_free(&region->unused_phy_pages);
+ sparsebit_free(&region->protected_phy_pages);
ret = munmap(region->mmap_start, region->mmap_size);
TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("munmap()", ret));
if (region->fd >= 0) {
@@ -1098,6 +1099,8 @@ void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type,
}

region->unused_phy_pages = sparsebit_alloc();
+ if (vm_arch_has_protected_memory(vm))
+ region->protected_phy_pages = sparsebit_alloc();
sparsebit_set_num(region->unused_phy_pages,
guest_paddr >> vm->page_shift, npages);
region->region.slot = slot;
@@ -1924,6 +1927,10 @@ void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
region->host_mem);
fprintf(stream, "%*sunused_phy_pages: ", indent + 2, "");
sparsebit_dump(stream, region->unused_phy_pages, 0);
+ if (region->protected_phy_pages) {
+ fprintf(stream, "%*sprotected_phy_pages: ", indent + 2, "");
+ sparsebit_dump(stream, region->protected_phy_pages, 0);
+ }
}
fprintf(stream, "%*sMapped Virtual Pages:\n", indent, "");
sparsebit_dump(stream, vm->vpages_mapped, indent + 2);
@@ -2025,6 +2032,7 @@ const char *exit_reason_str(unsigned int exit_reason)
* num - number of pages
* paddr_min - Physical address minimum
* memslot - Memory region to allocate page from
+ * protected - True if the pages will be used as protected/private memory
*
* Output Args: None
*
@@ -2036,8 +2044,9 @@ const char *exit_reason_str(unsigned int exit_reason)
* and their base address is returned. A TEST_ASSERT failure occurs if
* not enough pages are available at or above paddr_min.
*/
-vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
- vm_paddr_t paddr_min, uint32_t memslot)
+vm_paddr_t __vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
+ vm_paddr_t paddr_min, uint32_t memslot,
+ bool protected)
{
struct userspace_mem_region *region;
sparsebit_idx_t pg, base;
@@ -2050,8 +2059,10 @@ vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
paddr_min, vm->page_size);

region = memslot2region(vm, memslot);
+ TEST_ASSERT(!protected || region->protected_phy_pages,
+ "Region doesn't support protected memory");
+
base = pg = paddr_min >> vm->page_shift;
-
do {
for (; pg < base + num; ++pg) {
if (!sparsebit_is_set(region->unused_phy_pages, pg)) {
@@ -2070,8 +2081,11 @@ vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
abort();
}

- for (pg = base; pg < base + num; ++pg)
+ for (pg = base; pg < base + num; ++pg) {
sparsebit_clear(region->unused_phy_pages, pg);
+ if (protected)
+ sparsebit_set(region->protected_phy_pages, pg);
+ }

return base * vm->page_size;
}
--
2.43.0.594.gd9cf4e227d-goog



2024-02-15 05:27:39

by Itaru Kitayama

[permalink] [raw]
Subject: Re: [PATCH v8 04/10] KVM: selftests: Add support for allocating/managing protected guest memory

On Fri, Feb 02, 2024 at 04:09:10PM -0800, Sean Christopherson wrote:
> From: Peter Gonda <[email protected]>
>
> Add support for differentiating between protected (a.k.a. private, a.k.a.
> encrypted) memory and normal (a.k.a. shared) memory for VMs that support
> protected guest memory, e.g. x86's SEV. Provide and manage a common
> bitmap for tracking whether a given physical page resides in protected
> memory, as support for protected memory isn't x86 specific, i.e. adding a
> arch hook would be a net negative now, and in the future.
>
> Cc: Paolo Bonzini <[email protected]>
> Cc: Sean Christopherson <[email protected]>
> Cc: Vishal Annapurve <[email protected]>
> Cc: Ackerley Tng <[email protected]>
> cc: Andrew Jones <[email protected]>
> Cc: Tom Lendacky <[email protected]>
> Cc: Michael Roth <[email protected]>
> Originally-by: Michael Roth <[email protected]>
> Signed-off-by: Peter Gonda <[email protected]>
> Co-developed-by: Sean Christopherson <[email protected]>
> Signed-off-by: Sean Christopherson <[email protected]>
> ---
> .../selftests/kvm/include/kvm_util_base.h | 25 +++++++++++++++++--
> tools/testing/selftests/kvm/lib/kvm_util.c | 22 +++++++++++++---
> 2 files changed, 41 insertions(+), 6 deletions(-)
>
> diff --git a/tools/testing/selftests/kvm/include/kvm_util_base.h b/tools/testing/selftests/kvm/include/kvm_util_base.h
> index d9dc31af2f96..a82149305349 100644
> --- a/tools/testing/selftests/kvm/include/kvm_util_base.h
> +++ b/tools/testing/selftests/kvm/include/kvm_util_base.h
> @@ -46,6 +46,7 @@ typedef uint64_t vm_vaddr_t; /* Virtual Machine (Guest) virtual address */
> struct userspace_mem_region {
> struct kvm_userspace_memory_region2 region;
> struct sparsebit *unused_phy_pages;
> + struct sparsebit *protected_phy_pages;
> int fd;
> off_t offset;
> enum vm_mem_backing_src_type backing_src_type;
> @@ -573,6 +574,13 @@ void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type,
> uint64_t guest_paddr, uint32_t slot, uint64_t npages,
> uint32_t flags, int guest_memfd_fd, uint64_t guest_memfd_offset);
>
> +#ifndef vm_arch_has_protected_memory
> +static inline bool vm_arch_has_protected_memory(struct kvm_vm *vm)
> +{
> + return false;
> +}
> +#endif
> +
> void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags);
> void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa);
> void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot);
> @@ -836,10 +844,23 @@ const char *exit_reason_str(unsigned int exit_reason);
>
> vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min,
> uint32_t memslot);
> -vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
> - vm_paddr_t paddr_min, uint32_t memslot);
> +vm_paddr_t __vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
> + vm_paddr_t paddr_min, uint32_t memslot,
> + bool protected);
> vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm);
>
> +static inline vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
> + vm_paddr_t paddr_min, uint32_t memslot)
> +{
> + /*
> + * By default, allocate memory as protected for VMs that support
> + * protected memory, as the majority of memory for such VMs is
> + * protected, i.e. using shared memory is effectively opt-in.
> + */
> + return __vm_phy_pages_alloc(vm, num, paddr_min, memslot,
> + vm_arch_has_protected_memory(vm));
> +}
> +
> /*
> * ____vm_create() does KVM_CREATE_VM and little else. __vm_create() also
> * loads the test binary into guest memory and creates an IRQ chip (x86 only).
> diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
> index a53caf81eb87..ea677aa019ef 100644
> --- a/tools/testing/selftests/kvm/lib/kvm_util.c
> +++ b/tools/testing/selftests/kvm/lib/kvm_util.c
> @@ -717,6 +717,7 @@ static void __vm_mem_region_delete(struct kvm_vm *vm,
> vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION2, &region->region);
>
> sparsebit_free(&region->unused_phy_pages);
> + sparsebit_free(&region->protected_phy_pages);
> ret = munmap(region->mmap_start, region->mmap_size);
> TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("munmap()", ret));
> if (region->fd >= 0) {
> @@ -1098,6 +1099,8 @@ void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type,
> }
>
> region->unused_phy_pages = sparsebit_alloc();
> + if (vm_arch_has_protected_memory(vm))
> + region->protected_phy_pages = sparsebit_alloc();
> sparsebit_set_num(region->unused_phy_pages,
> guest_paddr >> vm->page_shift, npages);
> region->region.slot = slot;
> @@ -1924,6 +1927,10 @@ void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
> region->host_mem);
> fprintf(stream, "%*sunused_phy_pages: ", indent + 2, "");
> sparsebit_dump(stream, region->unused_phy_pages, 0);
> + if (region->protected_phy_pages) {
> + fprintf(stream, "%*sprotected_phy_pages: ", indent + 2, "");
> + sparsebit_dump(stream, region->protected_phy_pages, 0);
> + }
> }
> fprintf(stream, "%*sMapped Virtual Pages:\n", indent, "");
> sparsebit_dump(stream, vm->vpages_mapped, indent + 2);
> @@ -2025,6 +2032,7 @@ const char *exit_reason_str(unsigned int exit_reason)
> * num - number of pages
> * paddr_min - Physical address minimum
> * memslot - Memory region to allocate page from
> + * protected - True if the pages will be used as protected/private memory
> *
> * Output Args: None
> *
> @@ -2036,8 +2044,9 @@ const char *exit_reason_str(unsigned int exit_reason)
> * and their base address is returned. A TEST_ASSERT failure occurs if
> * not enough pages are available at or above paddr_min.
> */
> -vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
> - vm_paddr_t paddr_min, uint32_t memslot)
> +vm_paddr_t __vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
> + vm_paddr_t paddr_min, uint32_t memslot,
> + bool protected)
> {
> struct userspace_mem_region *region;
> sparsebit_idx_t pg, base;
> @@ -2050,8 +2059,10 @@ vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
> paddr_min, vm->page_size);
>
> region = memslot2region(vm, memslot);
> + TEST_ASSERT(!protected || region->protected_phy_pages,
> + "Region doesn't support protected memory");
> +
> base = pg = paddr_min >> vm->page_shift;
> -
> do {
> for (; pg < base + num; ++pg) {
> if (!sparsebit_is_set(region->unused_phy_pages, pg)) {
> @@ -2070,8 +2081,11 @@ vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
> abort();
> }
>
> - for (pg = base; pg < base + num; ++pg)
> + for (pg = base; pg < base + num; ++pg) {
> sparsebit_clear(region->unused_phy_pages, pg);
> + if (protected)
> + sparsebit_set(region->protected_phy_pages, pg);
> + }
>
> return base * vm->page_size;
> }

Reviewed-by: Itaru Kitayama <[email protected]>

> --
> 2.43.0.594.gd9cf4e227d-goog
>