2022-02-13 06:17:52

by Janis Schoetterl-Glausch

[permalink] [raw]
Subject: [PATCH v4 10/10] KVM: s390: selftests: Test memops with storage keys

Test vm and vcpu memops with storage keys, both successful accesses
as well as various exception conditions.

Signed-off-by: Janis Schoetterl-Glausch <[email protected]>
---
tools/testing/selftests/kvm/s390x/memop.c | 558 +++++++++++++++++++---
1 file changed, 495 insertions(+), 63 deletions(-)

diff --git a/tools/testing/selftests/kvm/s390x/memop.c b/tools/testing/selftests/kvm/s390x/memop.c
index 9f49ead380ab..ac08fd5aa746 100644
--- a/tools/testing/selftests/kvm/s390x/memop.c
+++ b/tools/testing/selftests/kvm/s390x/memop.c
@@ -13,28 +13,304 @@
#include "test_util.h"
#include "kvm_util.h"

+#define PAGE_SHIFT 12
+#define PAGE_SIZE (1 << PAGE_SHIFT)
+#define PAGE_MASK (~(PAGE_SIZE - 1))
+#define CR0_FETCH_PROTECTION_OVERRIDE (1UL << (63 - 38))
+#define CR0_STORAGE_PROTECTION_OVERRIDE (1UL << (63 - 39))
+
#define VCPU_ID 1

+const uint64_t last_page_addr = UINT64_MAX - PAGE_SIZE + 1;
+
static uint8_t mem1[65536];
static uint8_t mem2[65536];

+static void set_storage_key_range(void *addr, size_t len, u8 key)
+{
+ uintptr_t _addr, abs, i;
+
+ _addr = (uintptr_t)addr;
+ for (i = _addr & PAGE_MASK; i < _addr + len; i += PAGE_SIZE) {
+ abs = i;
+ asm volatile (
+ "lra %[abs], 0(0,%[abs])\n"
+ " sske %[key], %[abs]\n"
+ : [abs] "+&a" (abs)
+ : [key] "r" (key)
+ : "cc"
+ );
+ }
+}
+
static void guest_code(void)
+{
+ /* Set storage key */
+ set_storage_key_range(mem1, sizeof(mem1), 0x90);
+ set_storage_key_range(mem2, sizeof(mem2), 0x90);
+ GUEST_SYNC(0);
+
+ /* Write, read back, without keys */
+ memcpy(mem2, mem1, sizeof(mem2));
+ GUEST_SYNC(10);
+
+ /* Write, read back, key 0 */
+ memcpy(mem2, mem1, sizeof(mem2));
+ GUEST_SYNC(20);
+
+ /* Write, read back, matching key, 1 page */
+ memcpy(mem2, mem1, sizeof(mem2));
+ GUEST_SYNC(30);
+
+ /* Write, read back, matching key, all pages */
+ memcpy(mem2, mem1, sizeof(mem2));
+ GUEST_SYNC(40);
+
+ /* Set fetch protection */
+ set_storage_key_range(0, 1, 0x18);
+ GUEST_SYNC(50);
+
+ /* Enable fetch protection override */
+ GUEST_SYNC(60);
+
+ /* Enable storage protection override, set fetch protection*/
+ set_storage_key_range(mem1, sizeof(mem1), 0x98);
+ set_storage_key_range(mem2, sizeof(mem2), 0x98);
+ GUEST_SYNC(70);
+
+ /* Write, read back, mismatching key,
+ * storage protection override, all pages
+ */
+ memcpy(mem2, mem1, sizeof(mem2));
+ GUEST_SYNC(80);
+
+ /* VM memop, write, read back, matching key */
+ memcpy(mem2, mem1, sizeof(mem2));
+ GUEST_SYNC(90);
+
+ /* VM memop, write, read back, key 0 */
+ memcpy(mem2, mem1, sizeof(mem2));
+ /* VM memop, fail to read from 0 absolute/virtual, mismatching key,
+ * fetch protection override does not apply to VM memops
+ */
+ asm volatile ("sske %1,%0\n"
+ : : "r"(0), "r"(0x18) : "cc"
+ );
+ GUEST_SYNC(100);
+
+ /* Enable AR mode */
+ GUEST_SYNC(110);
+
+ /* Disable AR mode */
+ GUEST_SYNC(120);
+}
+
+static void reroll_mem1(void)
{
int i;

- for (;;) {
- for (i = 0; i < sizeof(mem2); i++)
- mem2[i] = mem1[i];
- GUEST_SYNC(0);
- }
+ for (i = 0; i < sizeof(mem1); i++)
+ mem1[i] = rand();
+}
+
+static int _vcpu_read_guest(struct kvm_vm *vm, void *host_addr,
+ uintptr_t guest_addr, size_t len)
+{
+ struct kvm_s390_mem_op ksmo = {
+ .gaddr = guest_addr,
+ .flags = 0,
+ .size = len,
+ .op = KVM_S390_MEMOP_LOGICAL_READ,
+ .buf = (uintptr_t)host_addr,
+ .ar = 0,
+ };
+
+ return _vcpu_ioctl(vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo);
+}
+
+static void vcpu_read_guest(struct kvm_vm *vm, void *host_addr,
+ uintptr_t guest_addr, size_t len)
+{
+ int rv;
+
+ rv = _vcpu_read_guest(vm, host_addr, guest_addr, len);
+ TEST_ASSERT(rv == 0, "vcpu memop read failed: reason = %d\n", rv);
+}
+
+static int _vcpu_read_guest_key(struct kvm_vm *vm, void *host_addr,
+ uintptr_t guest_addr, size_t len, u8 access_key)
+{
+ struct kvm_s390_mem_op ksmo = {0};
+
+ ksmo.gaddr = guest_addr;
+ ksmo.flags = KVM_S390_MEMOP_F_SKEY_PROTECTION;
+ ksmo.size = len;
+ ksmo.op = KVM_S390_MEMOP_LOGICAL_READ;
+ ksmo.buf = (uintptr_t)host_addr;
+ ksmo.ar = 0;
+ ksmo.key = access_key;
+
+ return _vcpu_ioctl(vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo);
+}
+
+static void vcpu_read_guest_key(struct kvm_vm *vm, void *host_addr,
+ uintptr_t guest_addr, size_t len, u8 access_key)
+{
+ int rv;
+
+ rv = _vcpu_read_guest_key(vm, host_addr, guest_addr, len, access_key);
+ TEST_ASSERT(rv == 0, "vcpu memop read failed: reason = %d\n", rv);
+}
+
+static int _vcpu_write_guest(struct kvm_vm *vm, uintptr_t guest_addr,
+ void *host_addr, size_t len)
+{
+ struct kvm_s390_mem_op ksmo = {
+ .gaddr = guest_addr,
+ .flags = 0,
+ .size = len,
+ .op = KVM_S390_MEMOP_LOGICAL_WRITE,
+ .buf = (uintptr_t)host_addr,
+ .ar = 0,
+ };
+ return _vcpu_ioctl(vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo);
+}
+
+static void vcpu_write_guest(struct kvm_vm *vm, uintptr_t guest_addr,
+ void *host_addr, size_t len)
+{
+ int rv;
+
+ rv = _vcpu_write_guest(vm, guest_addr, host_addr, len);
+ TEST_ASSERT(rv == 0, "vcpu memop write failed: reason = %d\n", rv);
+}
+
+static int _vcpu_write_guest_key(struct kvm_vm *vm, uintptr_t guest_addr,
+ void *host_addr, size_t len, u8 access_key)
+{
+ struct kvm_s390_mem_op ksmo = {0};
+
+ ksmo.gaddr = guest_addr;
+ ksmo.flags = KVM_S390_MEMOP_F_SKEY_PROTECTION;
+ ksmo.size = len;
+ ksmo.op = KVM_S390_MEMOP_LOGICAL_WRITE;
+ ksmo.buf = (uintptr_t)host_addr;
+ ksmo.ar = 0;
+ ksmo.key = access_key;
+
+ return _vcpu_ioctl(vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo);
+}
+
+static void vcpu_write_guest_key(struct kvm_vm *vm, uintptr_t guest_addr,
+ void *host_addr, size_t len, u8 access_key)
+{
+ int rv;
+
+ rv = _vcpu_write_guest_key(vm, guest_addr, host_addr, len, access_key);
+ TEST_ASSERT(rv == 0, "vcpu memop write failed: reason = %d\n", rv);
+}
+
+static int _vm_read_guest_key(struct kvm_vm *vm, void *host_addr,
+ uintptr_t guest_addr, size_t len, u8 access_key)
+{
+ struct kvm_s390_mem_op ksmo = {0};
+
+ ksmo.gaddr = guest_addr;
+ ksmo.flags = KVM_S390_MEMOP_F_SKEY_PROTECTION;
+ ksmo.size = len;
+ ksmo.op = KVM_S390_MEMOP_ABSOLUTE_READ;
+ ksmo.buf = (uintptr_t)host_addr;
+ ksmo.key = access_key;
+
+ return _vm_ioctl(vm, KVM_S390_MEM_OP, &ksmo);
+}
+
+static void vm_read_guest_key(struct kvm_vm *vm, void *host_addr,
+ uintptr_t guest_addr, size_t len, u8 access_key)
+{
+ int rv;
+
+ rv = _vm_read_guest_key(vm, host_addr, guest_addr, len, access_key);
+ TEST_ASSERT(rv == 0, "vm memop read failed: reason = %d\n", rv);
+}
+
+static int _vm_write_guest_key(struct kvm_vm *vm, uintptr_t guest_addr,
+ void *host_addr, size_t len, u8 access_key)
+{
+ struct kvm_s390_mem_op ksmo = {0};
+
+ ksmo.gaddr = guest_addr;
+ ksmo.flags = KVM_S390_MEMOP_F_SKEY_PROTECTION;
+ ksmo.size = len;
+ ksmo.op = KVM_S390_MEMOP_ABSOLUTE_WRITE;
+ ksmo.buf = (uintptr_t)host_addr;
+ ksmo.key = access_key;
+
+ return _vm_ioctl(vm, KVM_S390_MEM_OP, &ksmo);
+}
+
+static void vm_write_guest_key(struct kvm_vm *vm, uintptr_t guest_addr,
+ void *host_addr, size_t len, u8 access_key)
+{
+ int rv;
+
+ rv = _vm_write_guest_key(vm, guest_addr, host_addr, len, access_key);
+ TEST_ASSERT(rv == 0, "vm memop write failed: reason = %d\n", rv);
}

+enum access_mode {
+ ACCESS_READ,
+ ACCESS_WRITE
+};
+
+static int _vm_check_guest_key(struct kvm_vm *vm, enum access_mode mode,
+ uintptr_t guest_addr, size_t len, u8 access_key)
+{
+ struct kvm_s390_mem_op ksmo = {0};
+
+ ksmo.gaddr = guest_addr;
+ ksmo.flags = KVM_S390_MEMOP_F_CHECK_ONLY | KVM_S390_MEMOP_F_SKEY_PROTECTION;
+ ksmo.size = len;
+ if (mode == ACCESS_READ)
+ ksmo.op = KVM_S390_MEMOP_ABSOLUTE_READ;
+ else
+ ksmo.op = KVM_S390_MEMOP_ABSOLUTE_WRITE;
+ ksmo.key = access_key;
+
+ return _vm_ioctl(vm, KVM_S390_MEM_OP, &ksmo);
+}
+
+static void vm_check_guest_key(struct kvm_vm *vm, enum access_mode mode,
+ uintptr_t guest_addr, size_t len, u8 access_key)
+{
+ int rv;
+
+ rv = _vm_check_guest_key(vm, mode, guest_addr, len, access_key);
+ TEST_ASSERT(rv == 0, "vm memop write failed: reason = %d\n", rv);
+}
+
+#define HOST_SYNC(vmp, stage) \
+({ \
+ struct kvm_vm *__vm = (vmp); \
+ struct ucall uc; \
+ int __stage = (stage); \
+ \
+ vcpu_run(__vm, VCPU_ID); \
+ get_ucall(__vm, VCPU_ID, &uc); \
+ ASSERT_EQ(uc.cmd, UCALL_SYNC); \
+ ASSERT_EQ(uc.args[1], __stage); \
+}) \
+
int main(int argc, char *argv[])
{
struct kvm_vm *vm;
struct kvm_run *run;
struct kvm_s390_mem_op ksmo;
- int rv, i, maxsize;
+ bool has_skey_ext;
+ vm_vaddr_t guest_mem1;
+ vm_vaddr_t guest_mem2;
+ vm_paddr_t guest_mem1_abs;
+ int rv, maxsize;

setbuf(stdout, NULL); /* Tell stdout not to buffer its content */

@@ -45,67 +321,225 @@ int main(int argc, char *argv[])
}
if (maxsize > sizeof(mem1))
maxsize = sizeof(mem1);
+ has_skey_ext = kvm_check_cap(KVM_CAP_S390_MEM_OP_EXTENSION);
+ if (!has_skey_ext)
+ print_skip("Storage key extension not supported");

/* Create VM */
vm = vm_create_default(VCPU_ID, 0, guest_code);
run = vcpu_state(vm, VCPU_ID);
+ guest_mem1 = (uintptr_t)mem1;
+ guest_mem2 = (uintptr_t)mem2;
+ guest_mem1_abs = addr_gva2gpa(vm, guest_mem1);

- for (i = 0; i < sizeof(mem1); i++)
- mem1[i] = i * i + i;
+ /* Set storage key */
+ HOST_SYNC(vm, 0);

- /* Set the first array */
- ksmo.gaddr = addr_gva2gpa(vm, (uintptr_t)mem1);
- ksmo.flags = 0;
- ksmo.size = maxsize;
- ksmo.op = KVM_S390_MEMOP_LOGICAL_WRITE;
- ksmo.buf = (uintptr_t)mem1;
- ksmo.ar = 0;
- vcpu_ioctl(vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo);
+ /* Write, read back, without keys */
+ reroll_mem1();
+ vcpu_write_guest(vm, guest_mem1, mem1, maxsize);
+ HOST_SYNC(vm, 10); // Copy in vm
+ memset(mem2, 0xaa, sizeof(mem2));
+ vcpu_read_guest(vm, mem2, guest_mem2, maxsize);
+ TEST_ASSERT(!memcmp(mem1, mem2, maxsize),
+ "Memory contents do not match!");

- /* Let the guest code copy the first array to the second */
- vcpu_run(vm, VCPU_ID);
- TEST_ASSERT(run->exit_reason == KVM_EXIT_S390_SIEIC,
- "Unexpected exit reason: %u (%s)\n",
- run->exit_reason,
- exit_reason_str(run->exit_reason));
+ if (has_skey_ext) {
+ vm_vaddr_t guest_0_page = vm_vaddr_alloc(vm, PAGE_SIZE, 0);
+ vm_vaddr_t guest_last_page = vm_vaddr_alloc(vm, PAGE_SIZE, last_page_addr);
+ vm_paddr_t guest_mem2_abs = addr_gva2gpa(vm, guest_mem2);

- memset(mem2, 0xaa, sizeof(mem2));
+ /* Write, read back, key 0 */
+ reroll_mem1();
+ vcpu_write_guest_key(vm, guest_mem1, mem1, maxsize, 0);
+ HOST_SYNC(vm, 20); // Copy in vm
+ memset(mem2, 0xaa, sizeof(mem2));
+ vcpu_read_guest_key(vm, mem2, guest_mem2, maxsize, 0);
+ TEST_ASSERT(!memcmp(mem1, mem2, maxsize),
+ "Memory contents do not match!");

- /* Get the second array */
- ksmo.gaddr = (uintptr_t)mem2;
- ksmo.flags = 0;
- ksmo.size = maxsize;
- ksmo.op = KVM_S390_MEMOP_LOGICAL_READ;
- ksmo.buf = (uintptr_t)mem2;
- ksmo.ar = 0;
- vcpu_ioctl(vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo);
+ /* Write, read back, matching key, 1 page */
+ reroll_mem1();
+ vcpu_write_guest_key(vm, guest_mem1, mem1, PAGE_SIZE, 9);
+ HOST_SYNC(vm, 30); // Copy in vm
+ memset(mem2, 0xaa, sizeof(mem2));
+ vcpu_read_guest_key(vm, mem2, guest_mem2, PAGE_SIZE, 9);
+ TEST_ASSERT(!memcmp(mem1, mem2, PAGE_SIZE),
+ "Memory contents do not match!");

- TEST_ASSERT(!memcmp(mem1, mem2, maxsize),
- "Memory contents do not match!");
+ /* Write, read back, matching key, all pages */
+ reroll_mem1();
+ vcpu_write_guest_key(vm, guest_mem1, mem1, maxsize, 9);
+ HOST_SYNC(vm, 40); // Copy in vm
+ memset(mem2, 0xaa, sizeof(mem2));
+ vcpu_read_guest_key(vm, mem2, guest_mem2, maxsize, 9);
+ TEST_ASSERT(!memcmp(mem1, mem2, maxsize),
+ "Memory contents do not match!");

- /* Check error conditions - first bad size: */
- ksmo.gaddr = (uintptr_t)mem1;
- ksmo.flags = 0;
- ksmo.size = -1;
- ksmo.op = KVM_S390_MEMOP_LOGICAL_WRITE;
- ksmo.buf = (uintptr_t)mem1;
- ksmo.ar = 0;
- rv = _vcpu_ioctl(vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo);
+ /* Fail to write, read back old value, mismatching key */
+ rv = _vcpu_write_guest_key(vm, guest_mem1, mem1, maxsize, 2);
+ TEST_ASSERT(rv == 4, "Store should result in protection exception");
+ memset(mem2, 0xaa, sizeof(mem2));
+ vcpu_read_guest_key(vm, mem2, guest_mem2, maxsize, 2);
+ TEST_ASSERT(!memcmp(mem1, mem2, maxsize),
+ "Memory contents do not match!");
+
+ /* Set fetch protection */
+ HOST_SYNC(vm, 50);
+
+ /* Write without key, read back, matching key, fetch protection */
+ reroll_mem1();
+ vcpu_write_guest(vm, guest_0_page, mem1, PAGE_SIZE);
+ memset(mem2, 0xaa, sizeof(mem2));
+ /* Lets not copy in the guest, in case guest_0_page != 0 */
+ vcpu_read_guest_key(vm, mem2, guest_0_page, PAGE_SIZE, 1);
+ TEST_ASSERT(!memcmp(mem1, mem2, PAGE_SIZE),
+ "Memory contents do not match!");
+
+ /* Fail to read, mismatching key, fetch protection */
+ rv = _vcpu_read_guest_key(vm, mem2, guest_0_page, PAGE_SIZE, 2);
+ TEST_ASSERT(rv == 4, "Fetch should result in protection exception");
+
+ /* Enable fetch protection override */
+ run->s.regs.crs[0] |= CR0_FETCH_PROTECTION_OVERRIDE;
+ run->kvm_dirty_regs = KVM_SYNC_CRS;
+ HOST_SYNC(vm, 60);
+
+ if (guest_0_page != 0)
+ print_skip("Did not allocate page at 0 for fetch protection override test");
+
+ /* Write without key, read back, mismatching key,
+ * fetch protection override, 1 page
+ */
+ if (guest_0_page == 0) {
+ reroll_mem1();
+ vcpu_write_guest(vm, guest_0_page, mem1, PAGE_SIZE);
+ memset(mem2, 0xaa, sizeof(mem2));
+ /* Lets not copy in the guest, in case guest_0_page != 0 */
+ vcpu_read_guest_key(vm, mem2, guest_0_page, 2048, 2);
+ TEST_ASSERT(!memcmp(mem1, mem2, 2048),
+ "Memory contents do not match!");
+ }
+
+ /* Fail to read, mismatching key,
+ * fetch protection override address exceeded, 1 page
+ */
+ if (guest_0_page == 0) {
+ rv = _vcpu_read_guest_key(vm, mem2, 0, 2048 + 1, 2);
+ TEST_ASSERT(rv == 4,
+ "Fetch should result in protection exception");
+ }
+
+ if (guest_last_page != last_page_addr)
+ print_skip("Did not allocate last page for fetch protection override test");
+
+ /* Write without key, read back, mismatching key,
+ * fetch protection override, 2 pages, last page not fetch protected
+ */
+ reroll_mem1();
+ vcpu_write_guest(vm, guest_last_page, mem1, PAGE_SIZE);
+ vcpu_write_guest(vm, guest_0_page, mem1 + PAGE_SIZE, PAGE_SIZE);
+ if (guest_0_page == 0 && guest_last_page == last_page_addr) {
+ memset(mem2, 0xaa, sizeof(mem2));
+ /* Lets not copy in the guest, in case guest_0_page != 0 */
+ vcpu_read_guest_key(vm, mem2, last_page_addr,
+ PAGE_SIZE + 2048, 2);
+ TEST_ASSERT(!memcmp(mem1, mem2, PAGE_SIZE + 2048),
+ "Memory contents do not match!");
+ }
+
+ /* Fail to read, mismatching key, fetch protection override address
+ * exceeded, 2 pages, last page not fetch protected
+ */
+ if (guest_0_page == 0 && guest_last_page == last_page_addr) {
+ rv = _vcpu_read_guest_key(vm, mem2, last_page_addr,
+ PAGE_SIZE + 2048 + 1, 2);
+ TEST_ASSERT(rv == 4,
+ "Fetch should result in protection exception");
+ }
+
+ /* Enable storage protection override, set fetch protection*/
+ run->s.regs.crs[0] |= CR0_STORAGE_PROTECTION_OVERRIDE;
+ run->kvm_dirty_regs = KVM_SYNC_CRS;
+ HOST_SYNC(vm, 70);
+
+ /* Write, read back, mismatching key,
+ * storage protection override, all pages
+ */
+ reroll_mem1();
+ vcpu_write_guest_key(vm, guest_mem1, mem1, maxsize, 2);
+ HOST_SYNC(vm, 80); // Copy in vm
+ memset(mem2, 0xaa, sizeof(mem2));
+ vcpu_read_guest_key(vm, mem2, guest_mem2, maxsize, 2);
+ TEST_ASSERT(!memcmp(mem1, mem2, maxsize),
+ "Memory contents do not match!");
+
+ /* VM memop, write, read back, matching key */
+ reroll_mem1();
+ vm_write_guest_key(vm, guest_mem1_abs, mem1, maxsize, 9);
+ HOST_SYNC(vm, 90); // Copy in vm
+ memset(mem2, 0xaa, sizeof(mem2));
+ vm_read_guest_key(vm, mem2, guest_mem2_abs, maxsize, 9);
+ TEST_ASSERT(!memcmp(mem1, mem2, maxsize),
+ "Memory contents do not match!");
+ vm_check_guest_key(vm, ACCESS_WRITE, guest_mem1_abs, maxsize, 9);
+ vm_check_guest_key(vm, ACCESS_READ, guest_mem2_abs, maxsize, 9);
+
+ /* VM memop, write, read back, key 0 */
+ reroll_mem1();
+ vm_write_guest_key(vm, guest_mem1_abs, mem1, maxsize, 0);
+ HOST_SYNC(vm, 100); // Copy in vm
+ memset(mem2, 0xaa, sizeof(mem2));
+ vm_read_guest_key(vm, mem2, guest_mem2_abs, maxsize, 0);
+ TEST_ASSERT(!memcmp(mem1, mem2, maxsize),
+ "Memory contents do not match!");
+ rv = _vm_check_guest_key(vm, ACCESS_READ, guest_mem1_abs, maxsize, 9);
+ TEST_ASSERT(rv == 0, "Check should succeed");
+ vm_check_guest_key(vm, ACCESS_WRITE, guest_mem1_abs, maxsize, 0);
+ vm_check_guest_key(vm, ACCESS_READ, guest_mem2_abs, maxsize, 0);
+
+ /* VM memop, fail to write, fail to read, mismatching key,
+ * storage protection override does not apply to VM memops
+ */
+ rv = _vm_write_guest_key(vm, guest_mem1_abs, mem1, maxsize, 2);
+ TEST_ASSERT(rv == 4, "Store should result in protection exception");
+ rv = _vm_read_guest_key(vm, mem2, guest_mem2_abs, maxsize, 2);
+ TEST_ASSERT(rv == 4, "Fetch should result in protection exception");
+ rv = _vm_check_guest_key(vm, ACCESS_WRITE, guest_mem1_abs, maxsize, 2);
+ TEST_ASSERT(rv == 4, "Check should indicate protection exception");
+ rv = _vm_check_guest_key(vm, ACCESS_READ, guest_mem2_abs, maxsize, 2);
+ TEST_ASSERT(rv == 4, "Check should indicate protection exception");
+
+ /* VM memop, fail to read from 0 absolute/virtual, mismatching key,
+ * fetch protection override does not apply to VM memops
+ */
+ rv = _vm_read_guest_key(vm, mem2, 0, 2048, 2);
+ TEST_ASSERT(rv != 0, "Fetch should result in exception");
+ rv = _vm_read_guest_key(vm, mem2, addr_gva2gpa(vm, 0), 2048, 2);
+ TEST_ASSERT(rv == 4, "Fetch should result in protection exception");
+ } else {
+ struct ucall uc;
+
+ do {
+ vcpu_run(vm, VCPU_ID);
+ get_ucall(vm, VCPU_ID, &uc);
+ ASSERT_EQ(uc.cmd, UCALL_SYNC);
+ } while (uc.args[1] < 100);
+ }
+
+ /* Check error conditions */
+
+ /* Bad size: */
+ rv = _vcpu_write_guest(vm, (uintptr_t)mem1, mem1, -1);
TEST_ASSERT(rv == -1 && errno == E2BIG, "ioctl allows insane sizes");

/* Zero size: */
- ksmo.gaddr = (uintptr_t)mem1;
- ksmo.flags = 0;
- ksmo.size = 0;
- ksmo.op = KVM_S390_MEMOP_LOGICAL_WRITE;
- ksmo.buf = (uintptr_t)mem1;
- ksmo.ar = 0;
- rv = _vcpu_ioctl(vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo);
+ rv = _vcpu_write_guest(vm, (uintptr_t)mem1, mem1, 0);
TEST_ASSERT(rv == -1 && (errno == EINVAL || errno == ENOMEM),
"ioctl allows 0 as size");

/* Bad flags: */
- ksmo.gaddr = (uintptr_t)mem1;
+ ksmo.gaddr = guest_mem1;
ksmo.flags = -1;
ksmo.size = maxsize;
ksmo.op = KVM_S390_MEMOP_LOGICAL_WRITE;
@@ -115,7 +549,7 @@ int main(int argc, char *argv[])
TEST_ASSERT(rv == -1 && errno == EINVAL, "ioctl allows all flags");

/* Bad operation: */
- ksmo.gaddr = (uintptr_t)mem1;
+ ksmo.gaddr = guest_mem1;
ksmo.flags = 0;
ksmo.size = maxsize;
ksmo.op = -1;
@@ -135,21 +569,17 @@ int main(int argc, char *argv[])
TEST_ASSERT(rv > 0, "ioctl does not report bad guest memory access");

/* Bad host address: */
- ksmo.gaddr = (uintptr_t)mem1;
- ksmo.flags = 0;
- ksmo.size = maxsize;
- ksmo.op = KVM_S390_MEMOP_LOGICAL_WRITE;
- ksmo.buf = 0;
- ksmo.ar = 0;
- rv = _vcpu_ioctl(vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo);
+ rv = _vcpu_write_guest(vm, guest_mem1, 0, maxsize);
TEST_ASSERT(rv == -1 && errno == EFAULT,
"ioctl does not report bad host memory address");

- /* Bad access register: */
+ /* Enable AR mode */
run->psw_mask &= ~(3UL << (63 - 17));
- run->psw_mask |= 1UL << (63 - 17); /* Enable AR mode */
- vcpu_run(vm, VCPU_ID); /* To sync new state to SIE block */
- ksmo.gaddr = (uintptr_t)mem1;
+ run->psw_mask |= 1UL << (63 - 17);
+ HOST_SYNC(vm, 110);
+
+ /* Bad access register: */
+ ksmo.gaddr = guest_mem1;
ksmo.flags = 0;
ksmo.size = maxsize;
ksmo.op = KVM_S390_MEMOP_LOGICAL_WRITE;
@@ -157,8 +587,10 @@ int main(int argc, char *argv[])
ksmo.ar = 17;
rv = _vcpu_ioctl(vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo);
TEST_ASSERT(rv == -1 && errno == EINVAL, "ioctl allows ARs > 15");
- run->psw_mask &= ~(3UL << (63 - 17)); /* Disable AR mode */
- vcpu_run(vm, VCPU_ID); /* Run to sync new state */
+
+ /* Disable AR mode */
+ run->psw_mask &= ~(3UL << (63 - 17));
+ HOST_SYNC(vm, 120);

kvm_vm_free(vm);

--
2.32.0


2022-02-17 22:39:56

by Janis Schoetterl-Glausch

[permalink] [raw]
Subject:

Subject: [PATCH 0/2] memop selftest for storage key checking

As previously mentioned, I rewrote the memop selftest.
It makes heavy use of macros, but that should be fine for a test.
Feedback appreciated.
Requires Thomas' recent SIDA memop selftest patch.

Janis Schoetterl-Glausch (2):
KVM: s390: selftests: Refactor memop test
KVM: s390: selftests: Test vm and vcpu memop with keys

tools/testing/selftests/kvm/s390x/memop.c | 741 ++++++++++++++++++----
1 file changed, 623 insertions(+), 118 deletions(-)

--
2.32.0

2022-02-17 22:56:03

by Janis Schoetterl-Glausch

[permalink] [raw]
Subject: [PATCH 2/2] KVM: s390: selftests: Test vm and vcpu memop with keys

Test storage key checking for both vm and vcpu MEM_OP ioctls.
Test both error and non error conditions.

Signed-off-by: Janis Schoetterl-Glausch <[email protected]>
---
tools/testing/selftests/kvm/s390x/memop.c | 342 +++++++++++++++++++++-
1 file changed, 328 insertions(+), 14 deletions(-)

diff --git a/tools/testing/selftests/kvm/s390x/memop.c b/tools/testing/selftests/kvm/s390x/memop.c
index 4510418d73e6..bc12a9238967 100644
--- a/tools/testing/selftests/kvm/s390x/memop.c
+++ b/tools/testing/selftests/kvm/s390x/memop.c
@@ -201,6 +201,8 @@ static int err_memop_ioctl(struct test_vcpu vcpu, struct kvm_s390_mem_op *ksmo)
#define PAGE_SHIFT 12
#define PAGE_SIZE (1ULL << PAGE_SHIFT)
#define PAGE_MASK (~(PAGE_SIZE - 1))
+#define CR0_FETCH_PROTECTION_OVERRIDE (1UL << (63 - 38))
+#define CR0_STORAGE_PROTECTION_OVERRIDE (1UL << (63 - 39))

#define ASSERT_MEM_EQ(p1, p2, size) \
TEST_ASSERT(!memcmp(p1, p2, size), "Memory contents do not match!")
@@ -235,6 +237,11 @@ static struct test_default test_default_init(void *guest_code)
return t;
}

+static vm_vaddr_t test_vaddr_alloc(struct test_vcpu vm, size_t size, vm_vaddr_t vaddr_min)
+{
+ return vm_vaddr_alloc(vm.vm, size, vaddr_min);
+}
+
static void test_vm_free(struct test_vcpu vm)
{
kvm_vm_free(vm.vm);
@@ -257,6 +264,8 @@ enum stage {
STAGE_INITED,
/* Guest did nothing */
STAGE_IDLED,
+ /* Guest set storage keys (specifics up to test case) */
+ STAGE_SKEYS_SET,
/* Guest copied memory (locations up to test case) */
STAGE_COPIED,
};
@@ -276,6 +285,20 @@ enum stage {
ASSERT_MEM_EQ(mem1, mem2, __size); \
})

+#define DEFAULT_READ(copy_cpu, mop_cpu, mop_target_p, size, ...) \
+({ \
+ struct test_vcpu __copy_cpu = (copy_cpu), __mop_cpu = (mop_cpu); \
+ enum mop_target __target = (mop_target_p); \
+ uint32_t __size = (size); \
+ \
+ prepare_mem12(); \
+ CHECK_N_DO(MOP, __mop_cpu, __target, WRITE, mem1, __size, \
+ GADDR_V(mem1)); \
+ HOST_SYNC(__copy_cpu, STAGE_COPIED); \
+ CHECK_N_DO(MOP, __mop_cpu, __target, READ, mem2, __size, ##__VA_ARGS__);\
+ ASSERT_MEM_EQ(mem1, mem2, __size); \
+})
+
static void guest_copy(void)
{
GUEST_SYNC(STAGE_INITED);
@@ -294,6 +317,269 @@ static void test_copy(void)
test_vm_free(t.vm);
}

+static void set_storage_key_range(void *addr, size_t len, uint8_t key)
+{
+ uintptr_t _addr, abs, i;
+ int not_mapped = 0;
+
+ _addr = (uintptr_t)addr;
+ for (i = _addr & PAGE_MASK; i < _addr + len; i += PAGE_SIZE) {
+ abs = i;
+ asm volatile (
+ "lra %[abs], 0(0,%[abs])\n"
+ " jz 0f\n"
+ " llill %[not_mapped],1\n"
+ " j 1f\n"
+ "0: sske %[key], %[abs]\n"
+ "1:"
+ : [abs] "+&a" (abs), [not_mapped] "+r" (not_mapped)
+ : [key] "r" (key)
+ : "cc"
+ );
+ GUEST_ASSERT_EQ(not_mapped, 0);
+ }
+}
+
+static void guest_copy_key(void)
+{
+ set_storage_key_range(mem1, sizeof(mem1), 0x90);
+ set_storage_key_range(mem2, sizeof(mem2), 0x90);
+ GUEST_SYNC(STAGE_SKEYS_SET);
+
+ for (;;) {
+ memcpy(&mem2, &mem1, sizeof(mem2));
+ GUEST_SYNC(STAGE_COPIED);
+ }
+}
+
+static void test_copy_key(void)
+{
+ struct test_default t = test_default_init(guest_copy_key);
+
+ HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
+
+ /* vm, no key */
+ DEFAULT_WRITE_READ(t.vcpu, t.vm, ABSOLUTE, t.size);
+
+ /* vm/vcpu, machting key or key 0 */
+ DEFAULT_WRITE_READ(t.vcpu, t.vcpu, LOGICAL, t.size, KEY(0));
+ DEFAULT_WRITE_READ(t.vcpu, t.vcpu, LOGICAL, t.size, KEY(9));
+ DEFAULT_WRITE_READ(t.vcpu, t.vm, ABSOLUTE, t.size, KEY(0));
+ DEFAULT_WRITE_READ(t.vcpu, t.vm, ABSOLUTE, t.size, KEY(9));
+ /*
+ * There used to be different code paths for key handling depending on
+ * if the region crossed a page boundary.
+ * There currently are not, but the more tests the merrier.
+ */
+ DEFAULT_WRITE_READ(t.vcpu, t.vcpu, LOGICAL, 1, KEY(0));
+ DEFAULT_WRITE_READ(t.vcpu, t.vcpu, LOGICAL, 1, KEY(9));
+ DEFAULT_WRITE_READ(t.vcpu, t.vm, ABSOLUTE, 1, KEY(0));
+ DEFAULT_WRITE_READ(t.vcpu, t.vm, ABSOLUTE, 1, KEY(9));
+
+ /* vm/vcpu, mismatching keys on read, but no fetch protection */
+ DEFAULT_READ(t.vcpu, t.vcpu, LOGICAL, t.size, GADDR_V(mem2), KEY(2));
+ DEFAULT_READ(t.vcpu, t.vm, ABSOLUTE, t.size, GADDR_V(mem1), KEY(2));
+
+ test_vm_free(t.vm);
+}
+
+static void guest_copy_key_fetch_prot(void)
+{
+ /*
+ * For some reason combining the first sync with override enablement
+ * results in an exception when calling HOST_SYNC.
+ */
+ GUEST_SYNC(STAGE_INITED);
+ /* Storage protection override applies to both store and fetch. */
+ set_storage_key_range(mem1, sizeof(mem1), 0x98);
+ set_storage_key_range(mem2, sizeof(mem2), 0x98);
+ GUEST_SYNC(STAGE_SKEYS_SET);
+
+ for (;;) {
+ memcpy(&mem2, &mem1, sizeof(mem2));
+ GUEST_SYNC(STAGE_COPIED);
+ }
+}
+
+static void test_copy_key_storage_prot_override(void)
+{
+ struct test_default t = test_default_init(guest_copy_key_fetch_prot);
+
+ HOST_SYNC(t.vcpu, STAGE_INITED);
+ t.run->s.regs.crs[0] |= CR0_STORAGE_PROTECTION_OVERRIDE;
+ t.run->kvm_dirty_regs = KVM_SYNC_CRS;
+ HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
+
+ /* vcpu, mismatching keys, storage protection override in effect */
+ DEFAULT_WRITE_READ(t.vcpu, t.vcpu, LOGICAL, t.size, KEY(2));
+
+ test_vm_free(t.vm);
+}
+
+static void test_copy_key_fetch_prot(void)
+{
+ struct test_default t = test_default_init(guest_copy_key_fetch_prot);
+
+ HOST_SYNC(t.vcpu, STAGE_INITED);
+ HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
+
+ /* vm/vcpu, matching key, fetch protection in effect */
+ DEFAULT_READ(t.vcpu, t.vcpu, LOGICAL, t.size, GADDR_V(mem2), KEY(9));
+ DEFAULT_READ(t.vcpu, t.vm, ABSOLUTE, t.size, GADDR_V(mem2), KEY(9));
+
+ test_vm_free(t.vm);
+}
+
+#define ERR_PROT_MOP(...) \
+({ \
+ int rv; \
+ \
+ rv = ERR_MOP(__VA_ARGS__); \
+ TEST_ASSERT(rv == 4, "Should result in protection exception"); \
+})
+
+static void test_errors_key(void)
+{
+ struct test_default t = test_default_init(guest_copy_key_fetch_prot);
+
+ HOST_SYNC(t.vcpu, STAGE_INITED);
+ HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
+
+ /* vm/vcpu, mismatching keys, fetch protection in effect */
+ CHECK_N_DO(ERR_PROT_MOP, t.vcpu, LOGICAL, WRITE, mem1, t.size, GADDR_V(mem1), KEY(2));
+ CHECK_N_DO(ERR_PROT_MOP, t.vcpu, LOGICAL, READ, mem2, t.size, GADDR_V(mem2), KEY(2));
+ CHECK_N_DO(ERR_PROT_MOP, t.vm, ABSOLUTE, WRITE, mem1, t.size, GADDR_V(mem1), KEY(2));
+ CHECK_N_DO(ERR_PROT_MOP, t.vm, ABSOLUTE, READ, mem2, t.size, GADDR_V(mem2), KEY(2));
+
+ test_vm_free(t.vm);
+}
+
+static void test_errors_key_storage_prot_override(void)
+{
+ struct test_default t = test_default_init(guest_copy_key_fetch_prot);
+
+ HOST_SYNC(t.vcpu, STAGE_INITED);
+ t.run->s.regs.crs[0] |= CR0_STORAGE_PROTECTION_OVERRIDE;
+ t.run->kvm_dirty_regs = KVM_SYNC_CRS;
+ HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
+
+ /* vm, mismatching keys, storage protection override not applicable to vm */
+ CHECK_N_DO(ERR_PROT_MOP, t.vm, ABSOLUTE, WRITE, mem1, t.size, GADDR_V(mem1), KEY(2));
+ CHECK_N_DO(ERR_PROT_MOP, t.vm, ABSOLUTE, READ, mem2, t.size, GADDR_V(mem2), KEY(2));
+
+ test_vm_free(t.vm);
+}
+
+const uint64_t last_page_addr = -PAGE_SIZE;
+
+static void guest_copy_key_fetch_prot_override(void)
+{
+ int i;
+ char *page_0 = 0;
+
+ GUEST_SYNC(STAGE_INITED);
+ set_storage_key_range(0, PAGE_SIZE, 0x18);
+ set_storage_key_range((void *)last_page_addr, PAGE_SIZE, 0x0);
+ asm volatile ("sske %[key],%[addr]\n" :: [addr] "r"(0), [key] "r"(0x18) : "cc");
+ GUEST_SYNC(STAGE_SKEYS_SET);
+
+ for (;;) {
+ for (i = 0; i < PAGE_SIZE; i++)
+ page_0[i] = mem1[i];
+ GUEST_SYNC(STAGE_COPIED);
+ }
+}
+
+static void test_copy_key_fetch_prot_override(void)
+{
+ struct test_default t = test_default_init(guest_copy_key_fetch_prot_override);
+ vm_vaddr_t guest_0_page, guest_last_page;
+
+ guest_0_page = test_vaddr_alloc(t.vm, PAGE_SIZE, 0);
+ guest_last_page = test_vaddr_alloc(t.vm, PAGE_SIZE, last_page_addr);
+ if (guest_0_page != 0 || guest_last_page != last_page_addr) {
+ print_skip("did not allocate guest pages at required positions");
+ goto out;
+ }
+
+ HOST_SYNC(t.vcpu, STAGE_INITED);
+ t.run->s.regs.crs[0] |= CR0_FETCH_PROTECTION_OVERRIDE;
+ t.run->kvm_dirty_regs = KVM_SYNC_CRS;
+ HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
+
+ /* vcpu, mismatching keys on fetch, fetch protection override applies */
+ prepare_mem12();
+ MOP(t.vcpu, LOGICAL, WRITE, mem1, PAGE_SIZE, GADDR_V(mem1));
+ HOST_SYNC(t.vcpu, STAGE_COPIED);
+ CHECK_N_DO(MOP, t.vcpu, LOGICAL, READ, mem2, 2048, GADDR_V(guest_0_page), KEY(2));
+ ASSERT_MEM_EQ(mem1, mem2, 2048);
+
+ /*
+ * vcpu, mismatching keys on fetch, fetch protection override applies,
+ * wraparound
+ */
+ prepare_mem12();
+ MOP(t.vcpu, LOGICAL, WRITE, mem1, 2 * PAGE_SIZE, GADDR_V(guest_last_page));
+ HOST_SYNC(t.vcpu, STAGE_COPIED);
+ CHECK_N_DO(MOP, t.vcpu, LOGICAL, READ, mem2, PAGE_SIZE + 2048,
+ GADDR_V(guest_last_page), KEY(2));
+ ASSERT_MEM_EQ(mem1, mem2, 2048);
+
+out:
+ test_vm_free(t.vm);
+}
+
+static void test_errors_key_fetch_prot_override_not_enabled(void)
+{
+ struct test_default t = test_default_init(guest_copy_key_fetch_prot_override);
+ vm_vaddr_t guest_0_page, guest_last_page;
+
+ guest_0_page = test_vaddr_alloc(t.vm, PAGE_SIZE, 0);
+ guest_last_page = test_vaddr_alloc(t.vm, PAGE_SIZE, last_page_addr);
+ if (guest_0_page != 0 || guest_last_page != last_page_addr) {
+ print_skip("did not allocate guest pages at required positions");
+ goto out;
+ }
+ HOST_SYNC(t.vcpu, STAGE_INITED);
+ HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
+
+ /* vcpu, mismatching keys on fetch, fetch protection override not enabled */
+ CHECK_N_DO(ERR_PROT_MOP, t.vcpu, LOGICAL, READ, mem2, 2048, GADDR_V(0), KEY(2));
+
+out:
+ test_vm_free(t.vm);
+}
+
+static void test_errors_key_fetch_prot_override_enabled(void)
+{
+ struct test_default t = test_default_init(guest_copy_key_fetch_prot_override);
+ vm_vaddr_t guest_0_page, guest_last_page;
+
+ guest_0_page = test_vaddr_alloc(t.vm, PAGE_SIZE, 0);
+ guest_last_page = test_vaddr_alloc(t.vm, PAGE_SIZE, last_page_addr);
+ if (guest_0_page != 0 || guest_last_page != last_page_addr) {
+ print_skip("did not allocate guest pages at required positions");
+ goto out;
+ }
+ HOST_SYNC(t.vcpu, STAGE_INITED);
+ t.run->s.regs.crs[0] |= CR0_FETCH_PROTECTION_OVERRIDE;
+ t.run->kvm_dirty_regs = KVM_SYNC_CRS;
+ HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
+
+ /*
+ * vcpu, mismatching keys on fetch,
+ * fetch protection override does not apply because memory range acceeded
+ */
+ CHECK_N_DO(ERR_PROT_MOP, t.vcpu, LOGICAL, READ, mem2, 2048 + 1, GADDR_V(0), KEY(2));
+ CHECK_N_DO(ERR_PROT_MOP, t.vcpu, LOGICAL, READ, mem2, PAGE_SIZE + 2048 + 1,
+ GADDR_V(guest_last_page), KEY(2));
+ CHECK_N_DO(ERR_PROT_MOP, t.vm, ABSOLUTE, READ, mem2, 2048, GADDR(0), KEY(2));
+ CHECK_N_DO(ERR_PROT_MOP, t.vm, ABSOLUTE, READ, mem2, 2048, GADDR_V(guest_0_page), KEY(2));
+
+out:
+ test_vm_free(t.vm);
+}
+
static void guest_idle(void)
{
GUEST_SYNC(STAGE_INITED);
@@ -301,38 +587,53 @@ static void guest_idle(void)
GUEST_SYNC(STAGE_IDLED);
}

-static void test_errors(void)
+static void _test_errors_common(struct test_vcpu vcpu, enum mop_target target, int size)
{
- struct test_default t = test_default_init(guest_idle);
int rv;

- HOST_SYNC(t.vcpu, STAGE_INITED);
-
- rv = ERR_MOP(t.vcpu, LOGICAL, WRITE, mem1, -1, GADDR_V(mem1));
+ rv = ERR_MOP(vcpu, target, WRITE, mem1, -1, GADDR_V(mem1));
TEST_ASSERT(rv == -1 && errno == E2BIG, "ioctl allows insane sizes");

/* Zero size: */
- rv = ERR_MOP(t.vcpu, LOGICAL, WRITE, mem1, 0, GADDR_V(mem1));
+ rv = ERR_MOP(vcpu, target, WRITE, mem1, 0, GADDR_V(mem1));
TEST_ASSERT(rv == -1 && (errno == EINVAL || errno == ENOMEM),
"ioctl allows 0 as size");

/* Bad flags: */
- rv = ERR_MOP(t.vcpu, LOGICAL, WRITE, mem1, t.size, GADDR_V(mem1), SET_FLAGS(-1));
+ rv = ERR_MOP(vcpu, target, WRITE, mem1, size, GADDR_V(mem1), SET_FLAGS(-1));
TEST_ASSERT(rv == -1 && errno == EINVAL, "ioctl allows all flags");

- /* Bad operation: */
- rv = ERR_MOP(t.vcpu, INVALID, WRITE, mem1, t.size, GADDR_V(mem1));
- TEST_ASSERT(rv == -1 && errno == EINVAL, "ioctl allows bad operations");
-
/* Bad guest address: */
- rv = ERR_MOP(t.vcpu, LOGICAL, WRITE, mem1, t.size, GADDR((void *)~0xfffUL), CHECK_ONLY);
+ rv = ERR_MOP(vcpu, target, WRITE, mem1, size, GADDR((void *)~0xfffUL), CHECK_ONLY);
TEST_ASSERT(rv > 0, "ioctl does not report bad guest memory access");

/* Bad host address: */
- rv = ERR_MOP(t.vcpu, LOGICAL, WRITE, 0, t.size, GADDR_V(mem1));
+ rv = ERR_MOP(vcpu, target, WRITE, 0, size, GADDR_V(mem1));
TEST_ASSERT(rv == -1 && errno == EFAULT,
"ioctl does not report bad host memory address");

+ /* Bad key: */
+ rv = ERR_MOP(vcpu, target, WRITE, mem1, size, GADDR_V(mem1), KEY(17));
+ TEST_ASSERT(rv == -1 && errno == EINVAL, "ioctl allows invalid key");
+}
+
+static void test_errors(void)
+{
+ struct test_default t = test_default_init(guest_idle);
+ int rv;
+
+ HOST_SYNC(t.vcpu, STAGE_INITED);
+
+ _test_errors_common(t.vcpu, LOGICAL, t.size);
+ _test_errors_common(t.vm, ABSOLUTE, t.size);
+
+ /* Bad operation: */
+ rv = ERR_MOP(t.vcpu, INVALID, WRITE, mem1, t.size, GADDR_V(mem1));
+ TEST_ASSERT(rv == -1 && errno == EINVAL, "ioctl allows bad operations");
+ /* virtual addresses are not translated when passing INVALID */
+ rv = ERR_MOP(t.vm, INVALID, WRITE, mem1, PAGE_SIZE, GADDR(0));
+ TEST_ASSERT(rv == -1 && errno == EINVAL, "ioctl allows bad operations");
+
/* Bad access register: */
t.run->psw_mask &= ~(3UL << (63 - 17));
t.run->psw_mask |= 1UL << (63 - 17); /* Enable AR mode */
@@ -355,17 +656,30 @@ static void test_errors(void)

int main(int argc, char *argv[])
{
- int memop_cap;
+ int memop_cap, extension_cap;

setbuf(stdout, NULL); /* Tell stdout not to buffer its content */

memop_cap = kvm_check_cap(KVM_CAP_S390_MEM_OP);
+ extension_cap = kvm_check_cap(KVM_CAP_S390_MEM_OP_EXTENSION);
if (!memop_cap) {
print_skip("CAP_S390_MEM_OP not supported");
exit(KSFT_SKIP);
}

test_copy();
+ if (extension_cap > 0) {
+ test_copy_key();
+ test_copy_key_storage_prot_override();
+ test_copy_key_fetch_prot();
+ test_copy_key_fetch_prot_override();
+ test_errors_key();
+ test_errors_key_storage_prot_override();
+ test_errors_key_fetch_prot_override_not_enabled();
+ test_errors_key_fetch_prot_override_enabled();
+ } else {
+ print_skip("storage key memop extension not supported");
+ }
test_errors();

return 0;
--
2.32.0

2022-02-18 00:16:35

by Janis Schoetterl-Glausch

[permalink] [raw]
Subject: [PATCH 1/2] KVM: s390: selftests: Refactor memop test

Introduce macro for performing MEM_OP ioctls in a concise way.
Split test cases into multiple host/guest pairs making them independent.
Make various minor improvements.
All in all this lays the groundwork for future extensions.

Signed-off-by: Janis Schoetterl-Glausch <[email protected]>
---
tools/testing/selftests/kvm/s390x/memop.c | 427 ++++++++++++++++------
1 file changed, 309 insertions(+), 118 deletions(-)

diff --git a/tools/testing/selftests/kvm/s390x/memop.c b/tools/testing/selftests/kvm/s390x/memop.c
index d19c3ffdea3f..4510418d73e6 100644
--- a/tools/testing/selftests/kvm/s390x/memop.c
+++ b/tools/testing/selftests/kvm/s390x/memop.c
@@ -13,169 +13,360 @@
#include "test_util.h"
#include "kvm_util.h"

+enum mop_target {
+ LOGICAL,
+ SIDA,
+ ABSOLUTE,
+ INVALID,
+};
+
+enum mop_access_mode {
+ READ,
+ WRITE,
+};
+
+struct mop_desc {
+ uintptr_t gaddr;
+ uintptr_t gaddr_v;
+ uint64_t set_flags;
+ unsigned int f_check : 1;
+ unsigned int f_inject : 1;
+ unsigned int f_key : 1;
+ unsigned int _gaddr_v : 1;
+ unsigned int _set_flags : 1;
+ unsigned int _sida_offset : 1;
+ unsigned int _ar : 1;
+ uint32_t size;
+ enum mop_target target;
+ enum mop_access_mode mode;
+ void *buf;
+ uint32_t sida_offset;
+ uint8_t ar;
+ uint8_t key;
+};
+
+static struct kvm_s390_mem_op ksmo_from_desc(struct mop_desc desc)
+{
+ struct kvm_s390_mem_op ksmo = {
+ .gaddr = (uintptr_t)desc.gaddr,
+ .size = desc.size,
+ .buf = ((uintptr_t)desc.buf),
+ .reserved = "ignored_ignored_ignored_ignored"
+ };
+
+ switch (desc.target) {
+ case LOGICAL:
+ if (desc.mode == READ)
+ ksmo.op = KVM_S390_MEMOP_LOGICAL_READ;
+ if (desc.mode == WRITE)
+ ksmo.op = KVM_S390_MEMOP_LOGICAL_WRITE;
+ break;
+ case SIDA:
+ if (desc.mode == READ)
+ ksmo.op = KVM_S390_MEMOP_SIDA_READ;
+ if (desc.mode == WRITE)
+ ksmo.op = KVM_S390_MEMOP_SIDA_WRITE;
+ break;
+ case ABSOLUTE:
+ if (desc.mode == READ)
+ ksmo.op = KVM_S390_MEMOP_ABSOLUTE_READ;
+ if (desc.mode == WRITE)
+ ksmo.op = KVM_S390_MEMOP_ABSOLUTE_WRITE;
+ break;
+ case INVALID:
+ ksmo.op = -1;
+ }
+ if (desc.f_check)
+ ksmo.flags |= KVM_S390_MEMOP_F_CHECK_ONLY;
+ if (desc.f_inject)
+ ksmo.flags |= KVM_S390_MEMOP_F_INJECT_EXCEPTION;
+ if (desc._set_flags)
+ ksmo.flags = desc.set_flags;
+ if (desc.f_key) {
+ ksmo.flags |= KVM_S390_MEMOP_F_SKEY_PROTECTION;
+ ksmo.key = desc.key;
+ }
+ if (desc._ar)
+ ksmo.ar = desc.ar;
+ else
+ ksmo.ar = 0;
+ if (desc._sida_offset)
+ ksmo.sida_offset = desc.sida_offset;
+
+ return ksmo;
+}
+
+/* vcpu dummy id signifying that vm instead of vcpu ioctl is to occur */
+const uint32_t VM_VCPU_ID = (uint32_t)-1;
+
+struct test_vcpu {
+ struct kvm_vm *vm;
+ uint32_t id;
+};
+
+#define PRINT_MEMOP false
+static void print_memop(uint32_t vcpu_id, const struct kvm_s390_mem_op *ksmo)
+{
+ if (!PRINT_MEMOP)
+ return;
+
+ if (vcpu_id == VM_VCPU_ID)
+ printf("vm memop(");
+ else
+ printf("vcpu memop(");
+ switch (ksmo->op) {
+ case KVM_S390_MEMOP_LOGICAL_READ:
+ printf("LOGICAL, READ, ");
+ break;
+ case KVM_S390_MEMOP_LOGICAL_WRITE:
+ printf("LOGICAL, WRITE, ");
+ break;
+ case KVM_S390_MEMOP_SIDA_READ:
+ printf("SIDA, READ, ");
+ break;
+ case KVM_S390_MEMOP_SIDA_WRITE:
+ printf("SIDA, WRITE, ");
+ break;
+ case KVM_S390_MEMOP_ABSOLUTE_READ:
+ printf("ABSOLUTE, READ, ");
+ break;
+ case KVM_S390_MEMOP_ABSOLUTE_WRITE:
+ printf("ABSOLUTE, WRITE, ");
+ break;
+ }
+ printf("gaddr=%llu, size=%u, buf=%llu, ar=%u, key=%u",
+ ksmo->gaddr, ksmo->size, ksmo->buf, ksmo->ar, ksmo->key);
+ if (ksmo->flags & KVM_S390_MEMOP_F_CHECK_ONLY)
+ printf(", CHECK_ONLY");
+ if (ksmo->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION)
+ printf(", INJECT_EXCEPTION");
+ if (ksmo->flags & KVM_S390_MEMOP_F_SKEY_PROTECTION)
+ printf(", SKEY_PROTECTION");
+ puts(")");
+}
+
+static void memop_ioctl(struct test_vcpu vcpu, struct kvm_s390_mem_op *ksmo)
+{
+ if (vcpu.id == VM_VCPU_ID)
+ vm_ioctl(vcpu.vm, KVM_S390_MEM_OP, ksmo);
+ else
+ vcpu_ioctl(vcpu.vm, vcpu.id, KVM_S390_MEM_OP, ksmo);
+}
+
+static int err_memop_ioctl(struct test_vcpu vcpu, struct kvm_s390_mem_op *ksmo)
+{
+ if (vcpu.id == VM_VCPU_ID)
+ return _vm_ioctl(vcpu.vm, KVM_S390_MEM_OP, ksmo);
+ else
+ return _vcpu_ioctl(vcpu.vm, vcpu.id, KVM_S390_MEM_OP, ksmo);
+}
+
+#define MEMOP(err, vcpu_p, mop_target_p, access_mode_p, buf_p, size_p, ...) \
+({ \
+ struct test_vcpu __vcpu = (vcpu_p); \
+ struct mop_desc __desc = { \
+ .target = (mop_target_p), \
+ .mode = (access_mode_p), \
+ .buf = (buf_p), \
+ .size = (size_p), \
+ __VA_ARGS__ \
+ }; \
+ struct kvm_s390_mem_op __ksmo; \
+ \
+ if (__desc._gaddr_v) { \
+ if (__desc.target == ABSOLUTE) \
+ __desc.gaddr = addr_gva2gpa(__vcpu.vm, __desc.gaddr_v); \
+ else \
+ __desc.gaddr = __desc.gaddr_v; \
+ } \
+ __ksmo = ksmo_from_desc(__desc); \
+ print_memop(__vcpu.id, &__ksmo); \
+ err##memop_ioctl(__vcpu, &__ksmo); \
+})
+
+#define MOP(...) MEMOP(, __VA_ARGS__)
+#define ERR_MOP(...) MEMOP(err_, __VA_ARGS__)
+
+#define GADDR(a) .gaddr = ((uintptr_t)a)
+#define GADDR_V(v) ._gaddr_v = 1, .gaddr_v = ((uintptr_t)v)
+#define CHECK_ONLY .f_check = 1
+#define SET_FLAGS(f) ._set_flags = 1, .set_flags = (f)
+#define SIDA_OFFSET(o) ._sida_offset = 1, .sida_offset = (o)
+#define AR(a) ._ar = 1, .ar = (a)
+#define KEY(a) .f_key = 1, .key = (a)
+
+#define CHECK_N_DO(f, ...) ({ f(__VA_ARGS__, CHECK_ONLY); f(__VA_ARGS__); })
+
#define VCPU_ID 1
+#define PAGE_SHIFT 12
+#define PAGE_SIZE (1ULL << PAGE_SHIFT)
+#define PAGE_MASK (~(PAGE_SIZE - 1))
+
+#define ASSERT_MEM_EQ(p1, p2, size) \
+ TEST_ASSERT(!memcmp(p1, p2, size), "Memory contents do not match!")

static uint8_t mem1[65536];
static uint8_t mem2[65536];

-static void guest_code(void)
+static void prepare_mem12(void)
{
int i;

- for (;;) {
- for (i = 0; i < sizeof(mem2); i++)
- mem2[i] = mem1[i];
- GUEST_SYNC(0);
- }
+ for (i = 0; i < sizeof(mem1); i++)
+ mem1[i] = rand();
+ memset(mem2, 0xaa, sizeof(mem2));
}

-int main(int argc, char *argv[])
-{
- struct kvm_vm *vm;
+struct test_default {
+ struct test_vcpu vm;
+ struct test_vcpu vcpu;
struct kvm_run *run;
- struct kvm_s390_mem_op ksmo;
- int rv, i, maxsize;
+ int size;
+};

- setbuf(stdout, NULL); /* Tell stdout not to buffer its content */
+static struct test_default test_default_init(void *guest_code)
+{
+ struct test_default t;

- maxsize = kvm_check_cap(KVM_CAP_S390_MEM_OP);
- if (!maxsize) {
- print_skip("CAP_S390_MEM_OP not supported");
- exit(KSFT_SKIP);
- }
- if (maxsize > sizeof(mem1))
- maxsize = sizeof(mem1);
+ t.size = min((size_t)kvm_check_cap(KVM_CAP_S390_MEM_OP), sizeof(mem1));
+ t.vm = (struct test_vcpu) { vm_create_default(VCPU_ID, 0, guest_code), VM_VCPU_ID };
+ t.vcpu = (struct test_vcpu) { t.vm.vm, VCPU_ID };
+ t.run = vcpu_state(t.vm.vm, VCPU_ID);
+ return t;
+}

- /* Create VM */
- vm = vm_create_default(VCPU_ID, 0, guest_code);
- run = vcpu_state(vm, VCPU_ID);
+static void test_vm_free(struct test_vcpu vm)
+{
+ kvm_vm_free(vm.vm);
+}

- for (i = 0; i < sizeof(mem1); i++)
- mem1[i] = i * i + i;
-
- /* Set the first array */
- ksmo.gaddr = addr_gva2gpa(vm, (uintptr_t)mem1);
- ksmo.flags = 0;
- ksmo.size = maxsize;
- ksmo.op = KVM_S390_MEMOP_LOGICAL_WRITE;
- ksmo.buf = (uintptr_t)mem1;
- ksmo.ar = 0;
- vcpu_ioctl(vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo);
-
- /* Let the guest code copy the first array to the second */
- vcpu_run(vm, VCPU_ID);
- TEST_ASSERT(run->exit_reason == KVM_EXIT_S390_SIEIC,
- "Unexpected exit reason: %u (%s)\n",
- run->exit_reason,
- exit_reason_str(run->exit_reason));
+#define HOST_SYNC(vcpu_p, stage) \
+({ \
+ struct test_vcpu __vcpu = (vcpu_p); \
+ struct ucall uc; \
+ int __stage = (stage); \
+ \
+ vcpu_run(__vcpu.vm, __vcpu.id); \
+ get_ucall(__vcpu.vm, __vcpu.id, &uc); \
+ ASSERT_EQ(uc.cmd, UCALL_SYNC); \
+ ASSERT_EQ(uc.args[1], __stage); \
+}) \

- memset(mem2, 0xaa, sizeof(mem2));
+enum stage {
+ /* Synced state set by host, e.g. DAT */
+ STAGE_INITED,
+ /* Guest did nothing */
+ STAGE_IDLED,
+ /* Guest copied memory (locations up to test case) */
+ STAGE_COPIED,
+};
+
+#define DEFAULT_WRITE_READ(copy_cpu, mop_cpu, mop_target_p, size, ...) \
+({ \
+ struct test_vcpu __copy_cpu = (copy_cpu), __mop_cpu = (mop_cpu); \
+ enum mop_target __target = (mop_target_p); \
+ uint32_t __size = (size); \
+ \
+ prepare_mem12(); \
+ CHECK_N_DO(MOP, __mop_cpu, __target, WRITE, mem1, __size, \
+ GADDR_V(mem1), ##__VA_ARGS__); \
+ HOST_SYNC(__copy_cpu, STAGE_COPIED); \
+ CHECK_N_DO(MOP, __mop_cpu, __target, READ, mem2, __size, \
+ GADDR_V(mem2), ##__VA_ARGS__); \
+ ASSERT_MEM_EQ(mem1, mem2, __size); \
+})
+
+static void guest_copy(void)
+{
+ GUEST_SYNC(STAGE_INITED);
+ memcpy(&mem2, &mem1, sizeof(mem2));
+ GUEST_SYNC(STAGE_COPIED);
+}
+
+static void test_copy(void)
+{
+ struct test_default t = test_default_init(guest_copy);
+
+ HOST_SYNC(t.vcpu, STAGE_INITED);
+
+ DEFAULT_WRITE_READ(t.vcpu, t.vcpu, LOGICAL, t.size);
+
+ test_vm_free(t.vm);
+}

- /* Get the second array */
- ksmo.gaddr = (uintptr_t)mem2;
- ksmo.flags = 0;
- ksmo.size = maxsize;
- ksmo.op = KVM_S390_MEMOP_LOGICAL_READ;
- ksmo.buf = (uintptr_t)mem2;
- ksmo.ar = 0;
- vcpu_ioctl(vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo);
-
- TEST_ASSERT(!memcmp(mem1, mem2, maxsize),
- "Memory contents do not match!");
-
- /* Check error conditions - first bad size: */
- ksmo.gaddr = (uintptr_t)mem1;
- ksmo.flags = 0;
- ksmo.size = -1;
- ksmo.op = KVM_S390_MEMOP_LOGICAL_WRITE;
- ksmo.buf = (uintptr_t)mem1;
- ksmo.ar = 0;
- rv = _vcpu_ioctl(vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo);
+static void guest_idle(void)
+{
+ GUEST_SYNC(STAGE_INITED);
+ for (;;)
+ GUEST_SYNC(STAGE_IDLED);
+}
+
+static void test_errors(void)
+{
+ struct test_default t = test_default_init(guest_idle);
+ int rv;
+
+ HOST_SYNC(t.vcpu, STAGE_INITED);
+
+ rv = ERR_MOP(t.vcpu, LOGICAL, WRITE, mem1, -1, GADDR_V(mem1));
TEST_ASSERT(rv == -1 && errno == E2BIG, "ioctl allows insane sizes");

/* Zero size: */
- ksmo.gaddr = (uintptr_t)mem1;
- ksmo.flags = 0;
- ksmo.size = 0;
- ksmo.op = KVM_S390_MEMOP_LOGICAL_WRITE;
- ksmo.buf = (uintptr_t)mem1;
- ksmo.ar = 0;
- rv = _vcpu_ioctl(vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo);
+ rv = ERR_MOP(t.vcpu, LOGICAL, WRITE, mem1, 0, GADDR_V(mem1));
TEST_ASSERT(rv == -1 && (errno == EINVAL || errno == ENOMEM),
"ioctl allows 0 as size");

/* Bad flags: */
- ksmo.gaddr = (uintptr_t)mem1;
- ksmo.flags = -1;
- ksmo.size = maxsize;
- ksmo.op = KVM_S390_MEMOP_LOGICAL_WRITE;
- ksmo.buf = (uintptr_t)mem1;
- ksmo.ar = 0;
- rv = _vcpu_ioctl(vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo);
+ rv = ERR_MOP(t.vcpu, LOGICAL, WRITE, mem1, t.size, GADDR_V(mem1), SET_FLAGS(-1));
TEST_ASSERT(rv == -1 && errno == EINVAL, "ioctl allows all flags");

/* Bad operation: */
- ksmo.gaddr = (uintptr_t)mem1;
- ksmo.flags = 0;
- ksmo.size = maxsize;
- ksmo.op = -1;
- ksmo.buf = (uintptr_t)mem1;
- ksmo.ar = 0;
- rv = _vcpu_ioctl(vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo);
+ rv = ERR_MOP(t.vcpu, INVALID, WRITE, mem1, t.size, GADDR_V(mem1));
TEST_ASSERT(rv == -1 && errno == EINVAL, "ioctl allows bad operations");

/* Bad guest address: */
- ksmo.gaddr = ~0xfffUL;
- ksmo.flags = KVM_S390_MEMOP_F_CHECK_ONLY;
- ksmo.size = maxsize;
- ksmo.op = KVM_S390_MEMOP_LOGICAL_WRITE;
- ksmo.buf = (uintptr_t)mem1;
- ksmo.ar = 0;
- rv = _vcpu_ioctl(vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo);
+ rv = ERR_MOP(t.vcpu, LOGICAL, WRITE, mem1, t.size, GADDR((void *)~0xfffUL), CHECK_ONLY);
TEST_ASSERT(rv > 0, "ioctl does not report bad guest memory access");

/* Bad host address: */
- ksmo.gaddr = (uintptr_t)mem1;
- ksmo.flags = 0;
- ksmo.size = maxsize;
- ksmo.op = KVM_S390_MEMOP_LOGICAL_WRITE;
- ksmo.buf = 0;
- ksmo.ar = 0;
- rv = _vcpu_ioctl(vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo);
+ rv = ERR_MOP(t.vcpu, LOGICAL, WRITE, 0, t.size, GADDR_V(mem1));
TEST_ASSERT(rv == -1 && errno == EFAULT,
"ioctl does not report bad host memory address");

/* Bad access register: */
- run->psw_mask &= ~(3UL << (63 - 17));
- run->psw_mask |= 1UL << (63 - 17); /* Enable AR mode */
- vcpu_run(vm, VCPU_ID); /* To sync new state to SIE block */
- ksmo.gaddr = (uintptr_t)mem1;
- ksmo.flags = 0;
- ksmo.size = maxsize;
- ksmo.op = KVM_S390_MEMOP_LOGICAL_WRITE;
- ksmo.buf = (uintptr_t)mem1;
- ksmo.ar = 17;
- rv = _vcpu_ioctl(vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo);
+ t.run->psw_mask &= ~(3UL << (63 - 17));
+ t.run->psw_mask |= 1UL << (63 - 17); /* Enable AR mode */
+ HOST_SYNC(t.vcpu, STAGE_IDLED);
+ rv = ERR_MOP(t.vcpu, LOGICAL, WRITE, mem1, t.size, GADDR(mem1), AR(17));
TEST_ASSERT(rv == -1 && errno == EINVAL, "ioctl allows ARs > 15");
- run->psw_mask &= ~(3UL << (63 - 17)); /* Disable AR mode */
- vcpu_run(vm, VCPU_ID); /* Run to sync new state */
+ t.run->psw_mask &= ~(3UL << (63 - 17)); /* Disable AR mode */
+ HOST_SYNC(t.vcpu, STAGE_IDLED);

/* Check that the SIDA calls are rejected for non-protected guests */
- ksmo.gaddr = 0;
- ksmo.flags = 0;
- ksmo.size = 8;
- ksmo.op = KVM_S390_MEMOP_SIDA_READ;
- ksmo.buf = (uintptr_t)mem1;
- ksmo.sida_offset = 0x1c0;
- rv = _vcpu_ioctl(vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo);
+ rv = ERR_MOP(t.vcpu, SIDA, READ, mem1, 8, GADDR(0), SIDA_OFFSET(0x1c0));
TEST_ASSERT(rv == -1 && errno == EINVAL,
"ioctl does not reject SIDA_READ in non-protected mode");
- ksmo.op = KVM_S390_MEMOP_SIDA_WRITE;
- rv = _vcpu_ioctl(vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo);
+ rv = ERR_MOP(t.vcpu, SIDA, WRITE, mem1, 8, GADDR(0), SIDA_OFFSET(0x1c0));
TEST_ASSERT(rv == -1 && errno == EINVAL,
"ioctl does not reject SIDA_WRITE in non-protected mode");

- kvm_vm_free(vm);
+ test_vm_free(t.vm);
+}
+
+int main(int argc, char *argv[])
+{
+ int memop_cap;
+
+ setbuf(stdout, NULL); /* Tell stdout not to buffer its content */
+
+ memop_cap = kvm_check_cap(KVM_CAP_S390_MEM_OP);
+ if (!memop_cap) {
+ print_skip("CAP_S390_MEM_OP not supported");
+ exit(KSFT_SKIP);
+ }
+
+ test_copy();
+ test_errors();

return 0;
}
--
2.32.0

2022-02-26 01:39:57

by Janis Schoetterl-Glausch

[permalink] [raw]
Subject: [PATCH v2 0/5] memop selftest for storage key checking

Refactor memop selftest and add tests.
Add storage key tests, both for success as well as failure cases.
Similarly test both vcpu and vm ioctls.

v1 -> v2
* restructure commits
* get rid of test_* wrapper functions that hid vm.vm
* minor changes

v0 -> v2
* complete rewrite

v1: https://lore.kernel.org/kvm/[email protected]/
v0: https://lore.kernel.org/kvm/[email protected]/

Janis Schoetterl-Glausch (5):
KVM: s390: selftests: Split memop tests
KVM: s390: selftests: Add macro as abstraction for MEM_OP
KVM: s390: selftests: Add named stages for memop test
KVM: s390: selftests: Add more copy memop tests
KVM: s390: selftests: Add error memop tests

tools/testing/selftests/kvm/s390x/memop.c | 734 ++++++++++++++++++----
1 file changed, 616 insertions(+), 118 deletions(-)


base-commit: ee6a569d3bf64c9676eee3eecb861fb01cc11311
--
2.32.0

2022-02-26 01:44:07

by Janis Schoetterl-Glausch

[permalink] [raw]
Subject: [PATCH v2 4/5] KVM: s390: selftests: Add more copy memop tests

Do not just test the actual copy, but also that success is indicated
when using the check only flag.
Add copy test with storage key checking enabled, including tests for
storage and fetch protection override.
These test cover both logical vcpu ioctls as well as absolute vm ioctls.

Signed-off-by: Janis Schoetterl-Glausch <[email protected]>
---
tools/testing/selftests/kvm/s390x/memop.c | 243 ++++++++++++++++++++--
1 file changed, 230 insertions(+), 13 deletions(-)

diff --git a/tools/testing/selftests/kvm/s390x/memop.c b/tools/testing/selftests/kvm/s390x/memop.c
index d01e48c7c5e8..088d1cc61709 100644
--- a/tools/testing/selftests/kvm/s390x/memop.c
+++ b/tools/testing/selftests/kvm/s390x/memop.c
@@ -195,13 +195,21 @@ static int err_memop_ioctl(struct test_vcpu vcpu, struct kvm_s390_mem_op *ksmo)
#define AR(a) ._ar = 1, .ar = (a)
#define KEY(a) .f_key = 1, .key = (a)

+#define CHECK_N_DO(f, ...) ({ f(__VA_ARGS__, CHECK_ONLY); f(__VA_ARGS__); })
+
#define VCPU_ID 1
+#define PAGE_SHIFT 12
+#define PAGE_SIZE (1ULL << PAGE_SHIFT)
+#define PAGE_MASK (~(PAGE_SIZE - 1))
+#define CR0_FETCH_PROTECTION_OVERRIDE (1UL << (63 - 38))
+#define CR0_STORAGE_PROTECTION_OVERRIDE (1UL << (63 - 39))

static uint8_t mem1[65536];
static uint8_t mem2[65536];

struct test_default {
struct kvm_vm *kvm_vm;
+ struct test_vcpu vm;
struct test_vcpu vcpu;
struct kvm_run *run;
int size;
@@ -213,6 +221,7 @@ static struct test_default test_default_init(void *guest_code)

t.size = min((size_t)kvm_check_cap(KVM_CAP_S390_MEM_OP), sizeof(mem1));
t.kvm_vm = vm_create_default(VCPU_ID, 0, guest_code);
+ t.vm = (struct test_vcpu) { t.kvm_vm, VM_VCPU_ID };
t.vcpu = (struct test_vcpu) { t.kvm_vm, VCPU_ID };
t.run = vcpu_state(t.kvm_vm, VCPU_ID);
return t;
@@ -223,6 +232,8 @@ enum stage {
STAGE_INITED,
/* Guest did nothing */
STAGE_IDLED,
+ /* Guest set storage keys (specifics up to test case) */
+ STAGE_SKEYS_SET,
/* Guest copied memory (locations up to test case) */
STAGE_COPIED,
};
@@ -239,6 +250,47 @@ enum stage {
ASSERT_EQ(uc.args[1], __stage); \
}) \

+static void prepare_mem12(void)
+{
+ int i;
+
+ for (i = 0; i < sizeof(mem1); i++)
+ mem1[i] = rand();
+ memset(mem2, 0xaa, sizeof(mem2));
+}
+
+#define ASSERT_MEM_EQ(p1, p2, size) \
+ TEST_ASSERT(!memcmp(p1, p2, size), "Memory contents do not match!")
+
+#define DEFAULT_WRITE_READ(copy_cpu, mop_cpu, mop_target_p, size, ...) \
+({ \
+ struct test_vcpu __copy_cpu = (copy_cpu), __mop_cpu = (mop_cpu); \
+ enum mop_target __target = (mop_target_p); \
+ uint32_t __size = (size); \
+ \
+ prepare_mem12(); \
+ CHECK_N_DO(MOP, __mop_cpu, __target, WRITE, mem1, __size, \
+ GADDR_V(mem1), ##__VA_ARGS__); \
+ HOST_SYNC(__copy_cpu, STAGE_COPIED); \
+ CHECK_N_DO(MOP, __mop_cpu, __target, READ, mem2, __size, \
+ GADDR_V(mem2), ##__VA_ARGS__); \
+ ASSERT_MEM_EQ(mem1, mem2, __size); \
+})
+
+#define DEFAULT_READ(copy_cpu, mop_cpu, mop_target_p, size, ...) \
+({ \
+ struct test_vcpu __copy_cpu = (copy_cpu), __mop_cpu = (mop_cpu); \
+ enum mop_target __target = (mop_target_p); \
+ uint32_t __size = (size); \
+ \
+ prepare_mem12(); \
+ CHECK_N_DO(MOP, __mop_cpu, __target, WRITE, mem1, __size, \
+ GADDR_V(mem1)); \
+ HOST_SYNC(__copy_cpu, STAGE_COPIED); \
+ CHECK_N_DO(MOP, __mop_cpu, __target, READ, mem2, __size, ##__VA_ARGS__);\
+ ASSERT_MEM_EQ(mem1, mem2, __size); \
+})
+
static void guest_copy(void)
{
GUEST_SYNC(STAGE_INITED);
@@ -249,30 +301,186 @@ static void guest_copy(void)
static void test_copy(void)
{
struct test_default t = test_default_init(guest_copy);
- int i;

- for (i = 0; i < sizeof(mem1); i++)
- mem1[i] = i * i + i;
+ HOST_SYNC(t.vcpu, STAGE_INITED);
+
+ DEFAULT_WRITE_READ(t.vcpu, t.vcpu, LOGICAL, t.size);
+
+ kvm_vm_free(t.kvm_vm);
+}
+
+static void set_storage_key_range(void *addr, size_t len, uint8_t key)
+{
+ uintptr_t _addr, abs, i;
+ int not_mapped = 0;
+
+ _addr = (uintptr_t)addr;
+ for (i = _addr & PAGE_MASK; i < _addr + len; i += PAGE_SIZE) {
+ abs = i;
+ asm volatile (
+ "lra %[abs], 0(0,%[abs])\n"
+ " jz 0f\n"
+ " llill %[not_mapped],1\n"
+ " j 1f\n"
+ "0: sske %[key], %[abs]\n"
+ "1:"
+ : [abs] "+&a" (abs), [not_mapped] "+r" (not_mapped)
+ : [key] "r" (key)
+ : "cc"
+ );
+ GUEST_ASSERT_EQ(not_mapped, 0);
+ }
+}
+
+static void guest_copy_key(void)
+{
+ set_storage_key_range(mem1, sizeof(mem1), 0x90);
+ set_storage_key_range(mem2, sizeof(mem2), 0x90);
+ GUEST_SYNC(STAGE_SKEYS_SET);
+
+ for (;;) {
+ memcpy(&mem2, &mem1, sizeof(mem2));
+ GUEST_SYNC(STAGE_COPIED);
+ }
+}
+
+static void test_copy_key(void)
+{
+ struct test_default t = test_default_init(guest_copy_key);
+
+ HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
+
+ /* vm, no key */
+ DEFAULT_WRITE_READ(t.vcpu, t.vm, ABSOLUTE, t.size);
+
+ /* vm/vcpu, machting key or key 0 */
+ DEFAULT_WRITE_READ(t.vcpu, t.vcpu, LOGICAL, t.size, KEY(0));
+ DEFAULT_WRITE_READ(t.vcpu, t.vcpu, LOGICAL, t.size, KEY(9));
+ DEFAULT_WRITE_READ(t.vcpu, t.vm, ABSOLUTE, t.size, KEY(0));
+ DEFAULT_WRITE_READ(t.vcpu, t.vm, ABSOLUTE, t.size, KEY(9));
+ /*
+ * There used to be different code paths for key handling depending on
+ * if the region crossed a page boundary.
+ * There currently are not, but the more tests the merrier.
+ */
+ DEFAULT_WRITE_READ(t.vcpu, t.vcpu, LOGICAL, 1, KEY(0));
+ DEFAULT_WRITE_READ(t.vcpu, t.vcpu, LOGICAL, 1, KEY(9));
+ DEFAULT_WRITE_READ(t.vcpu, t.vm, ABSOLUTE, 1, KEY(0));
+ DEFAULT_WRITE_READ(t.vcpu, t.vm, ABSOLUTE, 1, KEY(9));
+
+ /* vm/vcpu, mismatching keys on read, but no fetch protection */
+ DEFAULT_READ(t.vcpu, t.vcpu, LOGICAL, t.size, GADDR_V(mem2), KEY(2));
+ DEFAULT_READ(t.vcpu, t.vm, ABSOLUTE, t.size, GADDR_V(mem1), KEY(2));
+
+ kvm_vm_free(t.kvm_vm);
+}
+
+static void guest_copy_key_fetch_prot(void)
+{
+ /*
+ * For some reason combining the first sync with override enablement
+ * results in an exception when calling HOST_SYNC.
+ */
+ GUEST_SYNC(STAGE_INITED);
+ /* Storage protection override applies to both store and fetch. */
+ set_storage_key_range(mem1, sizeof(mem1), 0x98);
+ set_storage_key_range(mem2, sizeof(mem2), 0x98);
+ GUEST_SYNC(STAGE_SKEYS_SET);
+
+ for (;;) {
+ memcpy(&mem2, &mem1, sizeof(mem2));
+ GUEST_SYNC(STAGE_COPIED);
+ }
+}
+
+static void test_copy_key_storage_prot_override(void)
+{
+ struct test_default t = test_default_init(guest_copy_key_fetch_prot);

HOST_SYNC(t.vcpu, STAGE_INITED);
+ t.run->s.regs.crs[0] |= CR0_STORAGE_PROTECTION_OVERRIDE;
+ t.run->kvm_dirty_regs = KVM_SYNC_CRS;
+ HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);

- /* Set the first array */
- MOP(t.vcpu, LOGICAL, WRITE, mem1, t.size, GADDR_V(mem1));
+ /* vcpu, mismatching keys, storage protection override in effect */
+ DEFAULT_WRITE_READ(t.vcpu, t.vcpu, LOGICAL, t.size, KEY(2));

- /* Let the guest code copy the first array to the second */
- HOST_SYNC(t.vcpu, STAGE_COPIED);
+ kvm_vm_free(t.kvm_vm);
+}

- memset(mem2, 0xaa, sizeof(mem2));
+static void test_copy_key_fetch_prot(void)
+{
+ struct test_default t = test_default_init(guest_copy_key_fetch_prot);

- /* Get the second array */
- MOP(t.vcpu, LOGICAL, READ, mem2, t.size, GADDR_V(mem2));
+ HOST_SYNC(t.vcpu, STAGE_INITED);
+ HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);

- TEST_ASSERT(!memcmp(mem1, mem2, t.size),
- "Memory contents do not match!");
+ /* vm/vcpu, matching key, fetch protection in effect */
+ DEFAULT_READ(t.vcpu, t.vcpu, LOGICAL, t.size, GADDR_V(mem2), KEY(9));
+ DEFAULT_READ(t.vcpu, t.vm, ABSOLUTE, t.size, GADDR_V(mem2), KEY(9));

kvm_vm_free(t.kvm_vm);
}

+const uint64_t last_page_addr = -PAGE_SIZE;
+
+static void guest_copy_key_fetch_prot_override(void)
+{
+ int i;
+ char *page_0 = 0;
+
+ GUEST_SYNC(STAGE_INITED);
+ set_storage_key_range(0, PAGE_SIZE, 0x18);
+ set_storage_key_range((void *)last_page_addr, PAGE_SIZE, 0x0);
+ asm volatile ("sske %[key],%[addr]\n" :: [addr] "r"(0), [key] "r"(0x18) : "cc");
+ GUEST_SYNC(STAGE_SKEYS_SET);
+
+ for (;;) {
+ for (i = 0; i < PAGE_SIZE; i++)
+ page_0[i] = mem1[i];
+ GUEST_SYNC(STAGE_COPIED);
+ }
+}
+
+static void test_copy_key_fetch_prot_override(void)
+{
+ struct test_default t = test_default_init(guest_copy_key_fetch_prot_override);
+ vm_vaddr_t guest_0_page, guest_last_page;
+
+ guest_0_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, 0);
+ guest_last_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, last_page_addr);
+ if (guest_0_page != 0 || guest_last_page != last_page_addr) {
+ print_skip("did not allocate guest pages at required positions");
+ goto out;
+ }
+
+ HOST_SYNC(t.vcpu, STAGE_INITED);
+ t.run->s.regs.crs[0] |= CR0_FETCH_PROTECTION_OVERRIDE;
+ t.run->kvm_dirty_regs = KVM_SYNC_CRS;
+ HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
+
+ /* vcpu, mismatching keys on fetch, fetch protection override applies */
+ prepare_mem12();
+ MOP(t.vcpu, LOGICAL, WRITE, mem1, PAGE_SIZE, GADDR_V(mem1));
+ HOST_SYNC(t.vcpu, STAGE_COPIED);
+ CHECK_N_DO(MOP, t.vcpu, LOGICAL, READ, mem2, 2048, GADDR_V(guest_0_page), KEY(2));
+ ASSERT_MEM_EQ(mem1, mem2, 2048);
+
+ /*
+ * vcpu, mismatching keys on fetch, fetch protection override applies,
+ * wraparound
+ */
+ prepare_mem12();
+ MOP(t.vcpu, LOGICAL, WRITE, mem1, 2 * PAGE_SIZE, GADDR_V(guest_last_page));
+ HOST_SYNC(t.vcpu, STAGE_COPIED);
+ CHECK_N_DO(MOP, t.vcpu, LOGICAL, READ, mem2, PAGE_SIZE + 2048,
+ GADDR_V(guest_last_page), KEY(2));
+ ASSERT_MEM_EQ(mem1, mem2, 2048);
+
+out:
+ kvm_vm_free(t.kvm_vm);
+}
+
static void guest_idle(void)
{
GUEST_SYNC(STAGE_INITED); /* for consistencies sake */
@@ -335,17 +543,26 @@ static void test_errors(void)

int main(int argc, char *argv[])
{
- int memop_cap;
+ int memop_cap, extension_cap;

setbuf(stdout, NULL); /* Tell stdout not to buffer its content */

memop_cap = kvm_check_cap(KVM_CAP_S390_MEM_OP);
+ extension_cap = kvm_check_cap(KVM_CAP_S390_MEM_OP_EXTENSION);
if (!memop_cap) {
print_skip("CAP_S390_MEM_OP not supported");
exit(KSFT_SKIP);
}

test_copy();
+ if (extension_cap > 0) {
+ test_copy_key();
+ test_copy_key_storage_prot_override();
+ test_copy_key_fetch_prot();
+ test_copy_key_fetch_prot_override();
+ } else {
+ print_skip("storage key memop extension not supported");
+ }
test_errors();

return 0;
--
2.32.0

2022-02-26 02:10:37

by Janis Schoetterl-Glausch

[permalink] [raw]
Subject: [PATCH v2 2/5] KVM: s390: selftests: Add macro as abstraction for MEM_OP

In order to achieve good test coverage we need to be able to invoke the
MEM_OP ioctl with all possible parametrizations.
However, for a given test, we want to be concise and not specify a long
list of default values for parameters not relevant for the test, so the
readers attention is not needlessly diverted.
Add a macro that enables this and convert the existing test to use it.
The macro emulates named arguments and hides some of the ioctl's
redundancy, e.g. sets the key flag if an access key is specified.

Signed-off-by: Janis Schoetterl-Glausch <[email protected]>
---
tools/testing/selftests/kvm/s390x/memop.c | 272 ++++++++++++++++------
1 file changed, 197 insertions(+), 75 deletions(-)

diff --git a/tools/testing/selftests/kvm/s390x/memop.c b/tools/testing/selftests/kvm/s390x/memop.c
index b9b673acb766..e2ad3d70bae4 100644
--- a/tools/testing/selftests/kvm/s390x/memop.c
+++ b/tools/testing/selftests/kvm/s390x/memop.c
@@ -13,6 +13,188 @@
#include "test_util.h"
#include "kvm_util.h"

+enum mop_target {
+ LOGICAL,
+ SIDA,
+ ABSOLUTE,
+ INVALID,
+};
+
+enum mop_access_mode {
+ READ,
+ WRITE,
+};
+
+struct mop_desc {
+ uintptr_t gaddr;
+ uintptr_t gaddr_v;
+ uint64_t set_flags;
+ unsigned int f_check : 1;
+ unsigned int f_inject : 1;
+ unsigned int f_key : 1;
+ unsigned int _gaddr_v : 1;
+ unsigned int _set_flags : 1;
+ unsigned int _sida_offset : 1;
+ unsigned int _ar : 1;
+ uint32_t size;
+ enum mop_target target;
+ enum mop_access_mode mode;
+ void *buf;
+ uint32_t sida_offset;
+ uint8_t ar;
+ uint8_t key;
+};
+
+static struct kvm_s390_mem_op ksmo_from_desc(struct mop_desc desc)
+{
+ struct kvm_s390_mem_op ksmo = {
+ .gaddr = (uintptr_t)desc.gaddr,
+ .size = desc.size,
+ .buf = ((uintptr_t)desc.buf),
+ .reserved = "ignored_ignored_ignored_ignored"
+ };
+
+ switch (desc.target) {
+ case LOGICAL:
+ if (desc.mode == READ)
+ ksmo.op = KVM_S390_MEMOP_LOGICAL_READ;
+ if (desc.mode == WRITE)
+ ksmo.op = KVM_S390_MEMOP_LOGICAL_WRITE;
+ break;
+ case SIDA:
+ if (desc.mode == READ)
+ ksmo.op = KVM_S390_MEMOP_SIDA_READ;
+ if (desc.mode == WRITE)
+ ksmo.op = KVM_S390_MEMOP_SIDA_WRITE;
+ break;
+ case ABSOLUTE:
+ if (desc.mode == READ)
+ ksmo.op = KVM_S390_MEMOP_ABSOLUTE_READ;
+ if (desc.mode == WRITE)
+ ksmo.op = KVM_S390_MEMOP_ABSOLUTE_WRITE;
+ break;
+ case INVALID:
+ ksmo.op = -1;
+ }
+ if (desc.f_check)
+ ksmo.flags |= KVM_S390_MEMOP_F_CHECK_ONLY;
+ if (desc.f_inject)
+ ksmo.flags |= KVM_S390_MEMOP_F_INJECT_EXCEPTION;
+ if (desc._set_flags)
+ ksmo.flags = desc.set_flags;
+ if (desc.f_key) {
+ ksmo.flags |= KVM_S390_MEMOP_F_SKEY_PROTECTION;
+ ksmo.key = desc.key;
+ }
+ if (desc._ar)
+ ksmo.ar = desc.ar;
+ else
+ ksmo.ar = 0;
+ if (desc._sida_offset)
+ ksmo.sida_offset = desc.sida_offset;
+
+ return ksmo;
+}
+
+/* vcpu dummy id signifying that vm instead of vcpu ioctl is to occur */
+const uint32_t VM_VCPU_ID = (uint32_t)-1;
+
+struct test_vcpu {
+ struct kvm_vm *vm;
+ uint32_t id;
+};
+
+#define PRINT_MEMOP false
+static void print_memop(uint32_t vcpu_id, const struct kvm_s390_mem_op *ksmo)
+{
+ if (!PRINT_MEMOP)
+ return;
+
+ if (vcpu_id == VM_VCPU_ID)
+ printf("vm memop(");
+ else
+ printf("vcpu memop(");
+ switch (ksmo->op) {
+ case KVM_S390_MEMOP_LOGICAL_READ:
+ printf("LOGICAL, READ, ");
+ break;
+ case KVM_S390_MEMOP_LOGICAL_WRITE:
+ printf("LOGICAL, WRITE, ");
+ break;
+ case KVM_S390_MEMOP_SIDA_READ:
+ printf("SIDA, READ, ");
+ break;
+ case KVM_S390_MEMOP_SIDA_WRITE:
+ printf("SIDA, WRITE, ");
+ break;
+ case KVM_S390_MEMOP_ABSOLUTE_READ:
+ printf("ABSOLUTE, READ, ");
+ break;
+ case KVM_S390_MEMOP_ABSOLUTE_WRITE:
+ printf("ABSOLUTE, WRITE, ");
+ break;
+ }
+ printf("gaddr=%llu, size=%u, buf=%llu, ar=%u, key=%u",
+ ksmo->gaddr, ksmo->size, ksmo->buf, ksmo->ar, ksmo->key);
+ if (ksmo->flags & KVM_S390_MEMOP_F_CHECK_ONLY)
+ printf(", CHECK_ONLY");
+ if (ksmo->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION)
+ printf(", INJECT_EXCEPTION");
+ if (ksmo->flags & KVM_S390_MEMOP_F_SKEY_PROTECTION)
+ printf(", SKEY_PROTECTION");
+ puts(")");
+}
+
+static void memop_ioctl(struct test_vcpu vcpu, struct kvm_s390_mem_op *ksmo)
+{
+ if (vcpu.id == VM_VCPU_ID)
+ vm_ioctl(vcpu.vm, KVM_S390_MEM_OP, ksmo);
+ else
+ vcpu_ioctl(vcpu.vm, vcpu.id, KVM_S390_MEM_OP, ksmo);
+}
+
+static int err_memop_ioctl(struct test_vcpu vcpu, struct kvm_s390_mem_op *ksmo)
+{
+ if (vcpu.id == VM_VCPU_ID)
+ return _vm_ioctl(vcpu.vm, KVM_S390_MEM_OP, ksmo);
+ else
+ return _vcpu_ioctl(vcpu.vm, vcpu.id, KVM_S390_MEM_OP, ksmo);
+}
+
+#define MEMOP(err, vcpu_p, mop_target_p, access_mode_p, buf_p, size_p, ...) \
+({ \
+ struct test_vcpu __vcpu = (vcpu_p); \
+ struct mop_desc __desc = { \
+ .target = (mop_target_p), \
+ .mode = (access_mode_p), \
+ .buf = (buf_p), \
+ .size = (size_p), \
+ __VA_ARGS__ \
+ }; \
+ struct kvm_s390_mem_op __ksmo; \
+ \
+ if (__desc._gaddr_v) { \
+ if (__desc.target == ABSOLUTE) \
+ __desc.gaddr = addr_gva2gpa(__vcpu.vm, __desc.gaddr_v); \
+ else \
+ __desc.gaddr = __desc.gaddr_v; \
+ } \
+ __ksmo = ksmo_from_desc(__desc); \
+ print_memop(__vcpu.id, &__ksmo); \
+ err##memop_ioctl(__vcpu, &__ksmo); \
+})
+
+#define MOP(...) MEMOP(, __VA_ARGS__)
+#define ERR_MOP(...) MEMOP(err_, __VA_ARGS__)
+
+#define GADDR(a) .gaddr = ((uintptr_t)a)
+#define GADDR_V(v) ._gaddr_v = 1, .gaddr_v = ((uintptr_t)v)
+#define CHECK_ONLY .f_check = 1
+#define SET_FLAGS(f) ._set_flags = 1, .set_flags = (f)
+#define SIDA_OFFSET(o) ._sida_offset = 1, .sida_offset = (o)
+#define AR(a) ._ar = 1, .ar = (a)
+#define KEY(a) .f_key = 1, .key = (a)
+
#define VCPU_ID 1

static uint8_t mem1[65536];
@@ -20,6 +202,7 @@ static uint8_t mem2[65536];

struct test_default {
struct kvm_vm *kvm_vm;
+ struct test_vcpu vcpu;
struct kvm_run *run;
int size;
};
@@ -30,6 +213,7 @@ static struct test_default test_default_init(void *guest_code)

t.size = min((size_t)kvm_check_cap(KVM_CAP_S390_MEM_OP), sizeof(mem1));
t.kvm_vm = vm_create_default(VCPU_ID, 0, guest_code);
+ t.vcpu = (struct test_vcpu) { t.kvm_vm, VCPU_ID };
t.run = vcpu_state(t.kvm_vm, VCPU_ID);
return t;
}
@@ -43,20 +227,14 @@ static void guest_copy(void)
static void test_copy(void)
{
struct test_default t = test_default_init(guest_copy);
- struct kvm_s390_mem_op ksmo;
int i;

for (i = 0; i < sizeof(mem1); i++)
mem1[i] = i * i + i;

/* Set the first array */
- ksmo.gaddr = addr_gva2gpa(t.kvm_vm, (uintptr_t)mem1);
- ksmo.flags = 0;
- ksmo.size = t.size;
- ksmo.op = KVM_S390_MEMOP_LOGICAL_WRITE;
- ksmo.buf = (uintptr_t)mem1;
- ksmo.ar = 0;
- vcpu_ioctl(t.kvm_vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo);
+ MOP(t.vcpu, LOGICAL, WRITE, mem1, t.size,
+ GADDR(addr_gva2gpa(t.kvm_vm, (uintptr_t)mem1)));

/* Let the guest code copy the first array to the second */
vcpu_run(t.kvm_vm, VCPU_ID);
@@ -68,13 +246,7 @@ static void test_copy(void)
memset(mem2, 0xaa, sizeof(mem2));

/* Get the second array */
- ksmo.gaddr = (uintptr_t)mem2;
- ksmo.flags = 0;
- ksmo.size = t.size;
- ksmo.op = KVM_S390_MEMOP_LOGICAL_READ;
- ksmo.buf = (uintptr_t)mem2;
- ksmo.ar = 0;
- vcpu_ioctl(t.kvm_vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo);
+ MOP(t.vcpu, LOGICAL, READ, mem2, t.size, GADDR_V(mem2));

TEST_ASSERT(!memcmp(mem1, mem2, t.size),
"Memory contents do not match!");
@@ -91,68 +263,31 @@ static void guest_idle(void)
static void test_errors(void)
{
struct test_default t = test_default_init(guest_idle);
- struct kvm_s390_mem_op ksmo;
int rv;

- /* Check error conditions - first bad size: */
- ksmo.gaddr = (uintptr_t)mem1;
- ksmo.flags = 0;
- ksmo.size = -1;
- ksmo.op = KVM_S390_MEMOP_LOGICAL_WRITE;
- ksmo.buf = (uintptr_t)mem1;
- ksmo.ar = 0;
- rv = _vcpu_ioctl(t.kvm_vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo);
+ /* Bad size: */
+ rv = ERR_MOP(t.vcpu, LOGICAL, WRITE, mem1, -1, GADDR_V(mem1));
TEST_ASSERT(rv == -1 && errno == E2BIG, "ioctl allows insane sizes");

/* Zero size: */
- ksmo.gaddr = (uintptr_t)mem1;
- ksmo.flags = 0;
- ksmo.size = 0;
- ksmo.op = KVM_S390_MEMOP_LOGICAL_WRITE;
- ksmo.buf = (uintptr_t)mem1;
- ksmo.ar = 0;
- rv = _vcpu_ioctl(t.kvm_vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo);
+ rv = ERR_MOP(t.vcpu, LOGICAL, WRITE, mem1, 0, GADDR_V(mem1));
TEST_ASSERT(rv == -1 && (errno == EINVAL || errno == ENOMEM),
"ioctl allows 0 as size");

/* Bad flags: */
- ksmo.gaddr = (uintptr_t)mem1;
- ksmo.flags = -1;
- ksmo.size = t.size;
- ksmo.op = KVM_S390_MEMOP_LOGICAL_WRITE;
- ksmo.buf = (uintptr_t)mem1;
- ksmo.ar = 0;
- rv = _vcpu_ioctl(t.kvm_vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo);
+ rv = ERR_MOP(t.vcpu, LOGICAL, WRITE, mem1, t.size, GADDR_V(mem1), SET_FLAGS(-1));
TEST_ASSERT(rv == -1 && errno == EINVAL, "ioctl allows all flags");

/* Bad operation: */
- ksmo.gaddr = (uintptr_t)mem1;
- ksmo.flags = 0;
- ksmo.size = t.size;
- ksmo.op = -1;
- ksmo.buf = (uintptr_t)mem1;
- ksmo.ar = 0;
- rv = _vcpu_ioctl(t.kvm_vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo);
+ rv = ERR_MOP(t.vcpu, INVALID, WRITE, mem1, t.size, GADDR_V(mem1));
TEST_ASSERT(rv == -1 && errno == EINVAL, "ioctl allows bad operations");

/* Bad guest address: */
- ksmo.gaddr = ~0xfffUL;
- ksmo.flags = KVM_S390_MEMOP_F_CHECK_ONLY;
- ksmo.size = t.size;
- ksmo.op = KVM_S390_MEMOP_LOGICAL_WRITE;
- ksmo.buf = (uintptr_t)mem1;
- ksmo.ar = 0;
- rv = _vcpu_ioctl(t.kvm_vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo);
+ rv = ERR_MOP(t.vcpu, LOGICAL, WRITE, mem1, t.size, GADDR((void *)~0xfffUL), CHECK_ONLY);
TEST_ASSERT(rv > 0, "ioctl does not report bad guest memory access");

/* Bad host address: */
- ksmo.gaddr = (uintptr_t)mem1;
- ksmo.flags = 0;
- ksmo.size = t.size;
- ksmo.op = KVM_S390_MEMOP_LOGICAL_WRITE;
- ksmo.buf = 0;
- ksmo.ar = 0;
- rv = _vcpu_ioctl(t.kvm_vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo);
+ rv = ERR_MOP(t.vcpu, LOGICAL, WRITE, 0, t.size, GADDR_V(mem1));
TEST_ASSERT(rv == -1 && errno == EFAULT,
"ioctl does not report bad host memory address");

@@ -160,29 +295,16 @@ static void test_errors(void)
t.run->psw_mask &= ~(3UL << (63 - 17));
t.run->psw_mask |= 1UL << (63 - 17); /* Enable AR mode */
vcpu_run(t.kvm_vm, VCPU_ID); /* To sync new state to SIE block */
- ksmo.gaddr = (uintptr_t)mem1;
- ksmo.flags = 0;
- ksmo.size = t.size;
- ksmo.op = KVM_S390_MEMOP_LOGICAL_WRITE;
- ksmo.buf = (uintptr_t)mem1;
- ksmo.ar = 17;
- rv = _vcpu_ioctl(t.kvm_vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo);
+ rv = ERR_MOP(t.vcpu, LOGICAL, WRITE, mem1, t.size, GADDR_V(mem1), AR(17));
TEST_ASSERT(rv == -1 && errno == EINVAL, "ioctl allows ARs > 15");
t.run->psw_mask &= ~(3UL << (63 - 17)); /* Disable AR mode */
vcpu_run(t.kvm_vm, VCPU_ID); /* Run to sync new state */

/* Check that the SIDA calls are rejected for non-protected guests */
- ksmo.gaddr = 0;
- ksmo.flags = 0;
- ksmo.size = 8;
- ksmo.op = KVM_S390_MEMOP_SIDA_READ;
- ksmo.buf = (uintptr_t)mem1;
- ksmo.sida_offset = 0x1c0;
- rv = _vcpu_ioctl(t.kvm_vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo);
+ rv = ERR_MOP(t.vcpu, SIDA, READ, mem1, 8, GADDR(0), SIDA_OFFSET(0x1c0));
TEST_ASSERT(rv == -1 && errno == EINVAL,
"ioctl does not reject SIDA_READ in non-protected mode");
- ksmo.op = KVM_S390_MEMOP_SIDA_WRITE;
- rv = _vcpu_ioctl(t.kvm_vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo);
+ rv = ERR_MOP(t.vcpu, SIDA, WRITE, mem1, 8, GADDR(0), SIDA_OFFSET(0x1c0));
TEST_ASSERT(rv == -1 && errno == EINVAL,
"ioctl does not reject SIDA_WRITE in non-protected mode");

--
2.32.0

2022-03-08 23:16:38

by Christian Borntraeger

[permalink] [raw]
Subject: Re: [PATCH v2 0/5] memop selftest for storage key checking

Can you send this as a separate thread (not inside the original thread). Otherwise things like b4 get confused and people might also overlook this.


Am 25.02.22 um 16:53 schrieb Janis Schoetterl-Glausch:
> Refactor memop selftest and add tests.
> Add storage key tests, both for success as well as failure cases.
> Similarly test both vcpu and vm ioctls.
>
> v1 -> v2
> * restructure commits
> * get rid of test_* wrapper functions that hid vm.vm
> * minor changes
>
> v0 -> v2
> * complete rewrite
>
> v1: https://lore.kernel.org/kvm/[email protected]/
> v0: https://lore.kernel.org/kvm/[email protected]/
>
> Janis Schoetterl-Glausch (5):
> KVM: s390: selftests: Split memop tests
> KVM: s390: selftests: Add macro as abstraction for MEM_OP
> KVM: s390: selftests: Add named stages for memop test
> KVM: s390: selftests: Add more copy memop tests
> KVM: s390: selftests: Add error memop tests
>
> tools/testing/selftests/kvm/s390x/memop.c | 734 ++++++++++++++++++----
> 1 file changed, 616 insertions(+), 118 deletions(-)
>
>
> base-commit: ee6a569d3bf64c9676eee3eecb861fb01cc11311

2022-03-08 23:28:56

by Shuah Khan

[permalink] [raw]
Subject: Re: [PATCH v2 0/5] memop selftest for storage key checking

On 3/8/22 3:16 AM, Christian Borntraeger wrote:
> Can you send this as a separate thread (not inside the original thread). Otherwise things like b4 get confused and people might also overlook this.
>
>

+1 - I missed the v2 until now.

> Am 25.02.22 um 16:53 schrieb Janis Schoetterl-Glausch:
>> Refactor memop selftest and add tests.
>> Add storage key tests, both for success as well as failure cases.
>> Similarly test both vcpu and vm ioctls.
>>
>> v1 -> v2
>>   * restructure commits
>>   * get rid of test_* wrapper functions that hid vm.vm
>>   * minor changes
>>

thanks,
-- Shuah