2023-07-25 22:04:43

by Ryan Afranji

[permalink] [raw]
Subject: [PATCH v4 07/28] KVM: selftests: TDX: Update load_td_memory_region for VM memory backed by guest memfd

From: Ackerley Tng <[email protected]>

If guest memory is backed by restricted memfd

+ UPM is being used, hence encrypted memory region has to be
registered
+ Can avoid making a copy of guest memory before getting TDX to
initialize the memory region

Signed-off-by: Ackerley Tng <[email protected]>
Change-Id: I43a5a444d5d2b5bf0d6750f6ef82c16e3d7d755e
Signed-off-by: Ryan Afranji <[email protected]>
---
.../selftests/kvm/lib/x86_64/tdx/tdx_util.c | 41 +++++++++++++++----
1 file changed, 32 insertions(+), 9 deletions(-)

diff --git a/tools/testing/selftests/kvm/lib/x86_64/tdx/tdx_util.c b/tools/testing/selftests/kvm/lib/x86_64/tdx/tdx_util.c
index 95c6fb263583..c30801f4f759 100644
--- a/tools/testing/selftests/kvm/lib/x86_64/tdx/tdx_util.c
+++ b/tools/testing/selftests/kvm/lib/x86_64/tdx/tdx_util.c
@@ -199,6 +199,21 @@ static void tdx_td_finalizemr(struct kvm_vm *vm)
tdx_ioctl(vm->fd, KVM_TDX_FINALIZE_VM, 0, NULL);
}

+/*
+ * Other ioctls
+ */
+
+/**
+ * Register a memory region that may contain encrypted data in KVM.
+ */
+static void register_encrypted_memory_region(
+ struct kvm_vm *vm, struct userspace_mem_region *region)
+{
+ vm_set_memory_attributes(vm, region->region.guest_phys_addr,
+ region->region.memory_size,
+ KVM_MEMORY_ATTRIBUTE_PRIVATE);
+}
+
/*
* TD creation/setup/finalization
*/
@@ -383,30 +398,38 @@ static void load_td_memory_region(struct kvm_vm *vm,
if (!sparsebit_any_set(pages))
return;

+
+ if (region->region.gmem_fd != -1)
+ register_encrypted_memory_region(vm, region);
+
sparsebit_for_each_set_range(pages, i, j) {
const uint64_t size_to_load = (j - i + 1) * vm->page_size;
const uint64_t offset =
(i - lowest_page_in_region) * vm->page_size;
const uint64_t hva = hva_base + offset;
const uint64_t gpa = gpa_base + offset;
- void *source_addr;
+ void *source_addr = (void *)hva;

/*
* KVM_TDX_INIT_MEM_REGION ioctl cannot encrypt memory in place,
* hence we have to make a copy if there's only one backing
* memory source
*/
- source_addr = mmap(NULL, size_to_load, PROT_READ | PROT_WRITE,
- MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
- TEST_ASSERT(
- source_addr,
- "Could not allocate memory for loading memory region");
-
- memcpy(source_addr, (void *)hva, size_to_load);
+ if (region->region.gmem_fd == -1) {
+ source_addr = mmap(NULL, size_to_load, PROT_READ | PROT_WRITE,
+ MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
+ TEST_ASSERT(
+ source_addr,
+ "Could not allocate memory for loading memory region");
+
+ memcpy(source_addr, (void *)hva, size_to_load);
+ memset((void *)hva, 0, size_to_load);
+ }

tdx_init_mem_region(vm, source_addr, gpa, size_to_load);

- munmap(source_addr, size_to_load);
+ if (region->region.gmem_fd == -1)
+ munmap(source_addr, size_to_load);
}
}

--
2.41.0.487.g6d72f3e995-goog