2022-02-08 06:46:32

by Janis Schoetterl-Glausch

[permalink] [raw]
Subject: [PATCH v2 04/11] KVM: s390: selftests: Test TEST PROTECTION emulation

Test the emulation of TEST PROTECTION in the presence of storage keys.
Emulation only occurs under certain conditions, one of which is the host
page being protected.
Trigger this by protecting the test pages via mprotect.

Signed-off-by: Janis Schoetterl-Glausch <[email protected]>
---
tools/testing/selftests/kvm/.gitignore | 1 +
tools/testing/selftests/kvm/Makefile | 1 +
tools/testing/selftests/kvm/s390x/tprot.c | 227 ++++++++++++++++++++++
3 files changed, 229 insertions(+)
create mode 100644 tools/testing/selftests/kvm/s390x/tprot.c

diff --git a/tools/testing/selftests/kvm/.gitignore b/tools/testing/selftests/kvm/.gitignore
index dce7de7755e6..7903580a48ac 100644
--- a/tools/testing/selftests/kvm/.gitignore
+++ b/tools/testing/selftests/kvm/.gitignore
@@ -8,6 +8,7 @@
/s390x/memop
/s390x/resets
/s390x/sync_regs_test
+/s390x/tprot
/x86_64/amx_test
/x86_64/cpuid_test
/x86_64/cr4_cpuid_sync_test
diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
index 0e4926bc9a58..086f490e808d 100644
--- a/tools/testing/selftests/kvm/Makefile
+++ b/tools/testing/selftests/kvm/Makefile
@@ -121,6 +121,7 @@ TEST_GEN_PROGS_aarch64 += kvm_binary_stats_test
TEST_GEN_PROGS_s390x = s390x/memop
TEST_GEN_PROGS_s390x += s390x/resets
TEST_GEN_PROGS_s390x += s390x/sync_regs_test
+TEST_GEN_PROGS_s390x += s390x/tprot
TEST_GEN_PROGS_s390x += demand_paging_test
TEST_GEN_PROGS_s390x += dirty_log_test
TEST_GEN_PROGS_s390x += kvm_create_max_vcpus
diff --git a/tools/testing/selftests/kvm/s390x/tprot.c b/tools/testing/selftests/kvm/s390x/tprot.c
new file mode 100644
index 000000000000..c097b9db495e
--- /dev/null
+++ b/tools/testing/selftests/kvm/s390x/tprot.c
@@ -0,0 +1,227 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Test TEST PROTECTION emulation.
+ *
+ * Copyright IBM Corp. 2021
+ */
+
+#include <sys/mman.h>
+#include "test_util.h"
+#include "kvm_util.h"
+
+#define PAGE_SHIFT 12
+#define PAGE_SIZE (1 << PAGE_SHIFT)
+#define CR0_FETCH_PROTECTION_OVERRIDE (1UL << (63 - 38))
+#define CR0_STORAGE_PROTECTION_OVERRIDE (1UL << (63 - 39))
+
+#define VCPU_ID 1
+
+static __aligned(PAGE_SIZE) uint8_t pages[2][PAGE_SIZE];
+static uint8_t *const page_store_prot = pages[0];
+static uint8_t *const page_fetch_prot = pages[1];
+
+/* Nonzero return value indicates that address not mapped */
+static int set_storage_key(void *addr, uint8_t key)
+{
+ int not_mapped = 0;
+
+ asm volatile (
+ "lra %[addr], 0(0,%[addr])\n"
+ " jz 0f\n"
+ " llill %[not_mapped],1\n"
+ " j 1f\n"
+ "0: sske %[key], %[addr]\n"
+ "1:"
+ : [addr] "+&a" (addr), [not_mapped] "+r" (not_mapped)
+ : [key] "r" (key)
+ : "cc"
+ );
+ return -not_mapped;
+}
+
+enum permission {
+ READ_WRITE = 0,
+ READ = 1,
+ RW_PROTECTED = 2,
+ TRANSL_UNAVAIL = 3,
+};
+
+static enum permission test_protection(void *addr, uint8_t key)
+{
+ uint64_t mask;
+
+ asm volatile (
+ "tprot %[addr], 0(%[key])\n"
+ " ipm %[mask]\n"
+ : [mask] "=r" (mask)
+ : [addr] "Q" (*(char *)addr),
+ [key] "a" (key)
+ : "cc"
+ );
+
+ return (enum permission)(mask >> 28);
+}
+
+enum stage {
+ STAGE_END,
+ STAGE_INIT_SIMPLE,
+ TEST_SIMPLE,
+ STAGE_INIT_FETCH_PROT_OVERRIDE,
+ TEST_FETCH_PROT_OVERRIDE,
+ TEST_STORAGE_PROT_OVERRIDE,
+};
+
+struct test {
+ enum stage stage;
+ void *addr;
+ uint8_t key;
+ enum permission expected;
+} tests[] = {
+ /*
+ * We perform each test in the array by executing TEST PROTECTION on
+ * the specified addr with the specified key and checking if the returned
+ * permissions match the expected value.
+ * Both guest and host cooperate to set up the required test conditions.
+ * A central condition is that the page targeted by addr has to be DAT
+ * protected in the host mappings, in order for KVM to emulate the
+ * TEST PROTECTION instruction.
+ * Since the page tables are shared, the host uses mprotect to achieve
+ * this.
+ *
+ * Test resulting in RW_PROTECTED/TRANSL_UNAVAIL will be interpreted
+ * by SIE, not KVM, but there is no harm in testing them also.
+ * See Enhanced Suppression-on-Protection Facilities in the
+ * Interpretive-Execution Mode
+ */
+ /*
+ * guest: set storage key of page_store_prot to 1
+ * storage key of page_fetch_prot to 9 and enable
+ * protection for it
+ * STAGE_INIT_SIMPLE
+ * host: write protect both via mprotect
+ */
+ /* access key 0 matches any storage key -> RW */
+ { TEST_SIMPLE, page_store_prot, 0x00, READ_WRITE },
+ /* access key matches storage key -> RW */
+ { TEST_SIMPLE, page_store_prot, 0x10, READ_WRITE },
+ /* mismatched keys, but no fetch protection -> RO */
+ { TEST_SIMPLE, page_store_prot, 0x20, READ },
+ /* access key 0 matches any storage key -> RW */
+ { TEST_SIMPLE, page_fetch_prot, 0x00, READ_WRITE },
+ /* access key matches storage key -> RW */
+ { TEST_SIMPLE, page_fetch_prot, 0x90, READ_WRITE },
+ /* mismatched keys, fetch protection -> inaccessible */
+ { TEST_SIMPLE, page_fetch_prot, 0x10, RW_PROTECTED },
+ /* page 0 not mapped yet -> translation not available */
+ { TEST_SIMPLE, (void *)0x00, 0x10, TRANSL_UNAVAIL },
+ /*
+ * host: try to map page 0
+ * guest: set storage key of page 0 to 9 and enable fetch protection
+ * STAGE_INIT_FETCH_PROT_OVERRIDE
+ * host: write protect page 0
+ * enable fetch protection override
+ */
+ /* mismatched keys, fetch protection, but override applies -> RO */
+ { TEST_FETCH_PROT_OVERRIDE, (void *)0x00, 0x10, READ },
+ /* mismatched keys, fetch protection, override applies to 0-2048 only -> inaccessible */
+ { TEST_FETCH_PROT_OVERRIDE, (void *)2049, 0x10, RW_PROTECTED },
+ /*
+ * host: enable storage protection override
+ */
+ /* mismatched keys, but override applies (storage key 9) -> RW */
+ { TEST_STORAGE_PROT_OVERRIDE, page_fetch_prot, 0x10, READ_WRITE },
+ /* mismatched keys, no fetch protection, override doesn't apply -> RO */
+ { TEST_STORAGE_PROT_OVERRIDE, page_store_prot, 0x20, READ },
+ /* mismatched keys, but override applies (storage key 9) -> RW */
+ { TEST_STORAGE_PROT_OVERRIDE, (void *)2049, 0x10, READ_WRITE },
+ /* end marker */
+ { STAGE_END, 0, 0, 0 },
+};
+
+static enum stage perform_next_stage(int *i, bool mapped_0)
+{
+ enum stage stage = tests[*i].stage;
+ enum permission result;
+ bool skip;
+
+ for (; tests[*i].stage == stage; (*i)++) {
+ /*
+ * Some fetch protection override tests require that page 0
+ * be mapped, however, when the hosts tries to map that page via
+ * vm_vaddr_alloc, it may happen that some other page gets mapped
+ * instead.
+ * In order to skip these tests we detect this inside the guest
+ */
+ skip = tests[*i].addr < (void *)4096 &&
+ tests[*i].expected != TRANSL_UNAVAIL &&
+ !mapped_0;
+ if (!skip) {
+ result = test_protection(tests[*i].addr, tests[*i].key);
+ GUEST_ASSERT_2(result == tests[*i].expected, *i, result);
+ }
+ }
+ return stage;
+}
+
+static void guest_code(void)
+{
+ bool mapped_0;
+ int i = 0;
+
+ GUEST_ASSERT_EQ(set_storage_key(page_store_prot, 0x10), 0);
+ GUEST_ASSERT_EQ(set_storage_key(page_fetch_prot, 0x98), 0);
+ GUEST_SYNC(STAGE_INIT_SIMPLE);
+ GUEST_SYNC(perform_next_stage(&i, false));
+
+ /* Fetch-protection override */
+ mapped_0 = !set_storage_key((void *)0, 0x98);
+ GUEST_SYNC(STAGE_INIT_FETCH_PROT_OVERRIDE);
+ GUEST_SYNC(perform_next_stage(&i, mapped_0));
+
+ /* Storage-protection override */
+ GUEST_SYNC(perform_next_stage(&i, mapped_0));
+}
+
+#define HOST_SYNC(vmp, stage) \
+({ \
+ struct kvm_vm *__vm = (vmp); \
+ struct ucall uc; \
+ int __stage = (stage); \
+ \
+ vcpu_run(__vm, VCPU_ID); \
+ get_ucall(__vm, VCPU_ID, &uc); \
+ if (uc.cmd == UCALL_ABORT) { \
+ TEST_FAIL("line %lu: %s, hints: %lu, %lu", uc.args[1], \
+ (const char *)uc.args[0], uc.args[2], uc.args[3]); \
+ } \
+ ASSERT_EQ(uc.cmd, UCALL_SYNC); \
+ ASSERT_EQ(uc.args[1], __stage); \
+})
+
+int main(int argc, char *argv[])
+{
+ struct kvm_vm *vm;
+ struct kvm_run *run;
+ vm_vaddr_t guest_0_page;
+
+ vm = vm_create_default(VCPU_ID, 0, guest_code);
+ run = vcpu_state(vm, VCPU_ID);
+
+ HOST_SYNC(vm, STAGE_INIT_SIMPLE);
+ mprotect(addr_gva2hva(vm, (vm_vaddr_t)pages), PAGE_SIZE * 2, PROT_READ);
+ HOST_SYNC(vm, TEST_SIMPLE);
+
+ guest_0_page = vm_vaddr_alloc(vm, PAGE_SIZE, 0);
+ if (guest_0_page != 0)
+ print_skip("Did not allocate page at 0 for fetch protection override tests");
+ HOST_SYNC(vm, STAGE_INIT_FETCH_PROT_OVERRIDE);
+ if (guest_0_page == 0)
+ mprotect(addr_gva2hva(vm, (vm_vaddr_t)0), PAGE_SIZE, PROT_READ);
+ run->s.regs.crs[0] |= CR0_FETCH_PROTECTION_OVERRIDE;
+ run->kvm_dirty_regs = KVM_SYNC_CRS;
+ HOST_SYNC(vm, TEST_FETCH_PROT_OVERRIDE);
+
+ run->s.regs.crs[0] |= CR0_STORAGE_PROTECTION_OVERRIDE;
+ run->kvm_dirty_regs = KVM_SYNC_CRS;
+ HOST_SYNC(vm, TEST_STORAGE_PROT_OVERRIDE);
+}
--
2.32.0



2022-02-09 10:15:24

by Janosch Frank

[permalink] [raw]
Subject: Re: [PATCH v2 04/11] KVM: s390: selftests: Test TEST PROTECTION emulation

On 2/7/22 17:59, Janis Schoetterl-Glausch wrote:
> Test the emulation of TEST PROTECTION in the presence of storage keys.
> Emulation only occurs under certain conditions, one of which is the host
> page being protected.
> Trigger this by protecting the test pages via mprotect.
>
> Signed-off-by: Janis Schoetterl-Glausch <[email protected]>

That was way more understandable with the additions of the comments,
thanks for taking the time to add them.

Reviewed-by: Janosch Frank <[email protected]>

> ---
> tools/testing/selftests/kvm/.gitignore | 1 +
> tools/testing/selftests/kvm/Makefile | 1 +
> tools/testing/selftests/kvm/s390x/tprot.c | 227 ++++++++++++++++++++++
> 3 files changed, 229 insertions(+)
> create mode 100644 tools/testing/selftests/kvm/s390x/tprot.c
>
> diff --git a/tools/testing/selftests/kvm/.gitignore b/tools/testing/selftests/kvm/.gitignore
> index dce7de7755e6..7903580a48ac 100644
> --- a/tools/testing/selftests/kvm/.gitignore
> +++ b/tools/testing/selftests/kvm/.gitignore
> @@ -8,6 +8,7 @@
> /s390x/memop
> /s390x/resets
> /s390x/sync_regs_test
> +/s390x/tprot
> /x86_64/amx_test
> /x86_64/cpuid_test
> /x86_64/cr4_cpuid_sync_test
> diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
> index 0e4926bc9a58..086f490e808d 100644
> --- a/tools/testing/selftests/kvm/Makefile
> +++ b/tools/testing/selftests/kvm/Makefile
> @@ -121,6 +121,7 @@ TEST_GEN_PROGS_aarch64 += kvm_binary_stats_test
> TEST_GEN_PROGS_s390x = s390x/memop
> TEST_GEN_PROGS_s390x += s390x/resets
> TEST_GEN_PROGS_s390x += s390x/sync_regs_test
> +TEST_GEN_PROGS_s390x += s390x/tprot
> TEST_GEN_PROGS_s390x += demand_paging_test
> TEST_GEN_PROGS_s390x += dirty_log_test
> TEST_GEN_PROGS_s390x += kvm_create_max_vcpus
> diff --git a/tools/testing/selftests/kvm/s390x/tprot.c b/tools/testing/selftests/kvm/s390x/tprot.c
> new file mode 100644
> index 000000000000..c097b9db495e
> --- /dev/null
> +++ b/tools/testing/selftests/kvm/s390x/tprot.c
> @@ -0,0 +1,227 @@
> +// SPDX-License-Identifier: GPL-2.0-or-later
> +/*
> + * Test TEST PROTECTION emulation.
> + *
> + * Copyright IBM Corp. 2021
> + */
> +
> +#include <sys/mman.h>
> +#include "test_util.h"
> +#include "kvm_util.h"
> +
> +#define PAGE_SHIFT 12
> +#define PAGE_SIZE (1 << PAGE_SHIFT)
> +#define CR0_FETCH_PROTECTION_OVERRIDE (1UL << (63 - 38))
> +#define CR0_STORAGE_PROTECTION_OVERRIDE (1UL << (63 - 39))
> +
> +#define VCPU_ID 1
> +
> +static __aligned(PAGE_SIZE) uint8_t pages[2][PAGE_SIZE];
> +static uint8_t *const page_store_prot = pages[0];
> +static uint8_t *const page_fetch_prot = pages[1];
> +
> +/* Nonzero return value indicates that address not mapped */
> +static int set_storage_key(void *addr, uint8_t key)
> +{
> + int not_mapped = 0;
> +
> + asm volatile (
> + "lra %[addr], 0(0,%[addr])\n"
> + " jz 0f\n"
> + " llill %[not_mapped],1\n"
> + " j 1f\n"
> + "0: sske %[key], %[addr]\n"
> + "1:"
> + : [addr] "+&a" (addr), [not_mapped] "+r" (not_mapped)
> + : [key] "r" (key)
> + : "cc"
> + );
> + return -not_mapped;
> +}
> +
> +enum permission {
> + READ_WRITE = 0,
> + READ = 1,
> + RW_PROTECTED = 2,
> + TRANSL_UNAVAIL = 3,
> +};
> +
> +static enum permission test_protection(void *addr, uint8_t key)
> +{
> + uint64_t mask;
> +
> + asm volatile (
> + "tprot %[addr], 0(%[key])\n"
> + " ipm %[mask]\n"
> + : [mask] "=r" (mask)
> + : [addr] "Q" (*(char *)addr),
> + [key] "a" (key)
> + : "cc"
> + );
> +
> + return (enum permission)(mask >> 28);
> +}
> +
> +enum stage {
> + STAGE_END,
> + STAGE_INIT_SIMPLE,
> + TEST_SIMPLE,
> + STAGE_INIT_FETCH_PROT_OVERRIDE,
> + TEST_FETCH_PROT_OVERRIDE,
> + TEST_STORAGE_PROT_OVERRIDE,
> +};
> +
> +struct test {
> + enum stage stage;
> + void *addr;
> + uint8_t key;
> + enum permission expected;
> +} tests[] = {
> + /*
> + * We perform each test in the array by executing TEST PROTECTION on
> + * the specified addr with the specified key and checking if the returned
> + * permissions match the expected value.
> + * Both guest and host cooperate to set up the required test conditions.
> + * A central condition is that the page targeted by addr has to be DAT
> + * protected in the host mappings, in order for KVM to emulate the
> + * TEST PROTECTION instruction.
> + * Since the page tables are shared, the host uses mprotect to achieve
> + * this.
> + *
> + * Test resulting in RW_PROTECTED/TRANSL_UNAVAIL will be interpreted
> + * by SIE, not KVM, but there is no harm in testing them also.
> + * See Enhanced Suppression-on-Protection Facilities in the
> + * Interpretive-Execution Mode
> + */
> + /*
> + * guest: set storage key of page_store_prot to 1
> + * storage key of page_fetch_prot to 9 and enable
> + * protection for it
> + * STAGE_INIT_SIMPLE
> + * host: write protect both via mprotect
> + */
> + /* access key 0 matches any storage key -> RW */
> + { TEST_SIMPLE, page_store_prot, 0x00, READ_WRITE },
> + /* access key matches storage key -> RW */
> + { TEST_SIMPLE, page_store_prot, 0x10, READ_WRITE },
> + /* mismatched keys, but no fetch protection -> RO */
> + { TEST_SIMPLE, page_store_prot, 0x20, READ },
> + /* access key 0 matches any storage key -> RW */
> + { TEST_SIMPLE, page_fetch_prot, 0x00, READ_WRITE },
> + /* access key matches storage key -> RW */
> + { TEST_SIMPLE, page_fetch_prot, 0x90, READ_WRITE },
> + /* mismatched keys, fetch protection -> inaccessible */
> + { TEST_SIMPLE, page_fetch_prot, 0x10, RW_PROTECTED },
> + /* page 0 not mapped yet -> translation not available */
> + { TEST_SIMPLE, (void *)0x00, 0x10, TRANSL_UNAVAIL },
> + /*
> + * host: try to map page 0
> + * guest: set storage key of page 0 to 9 and enable fetch protection
> + * STAGE_INIT_FETCH_PROT_OVERRIDE
> + * host: write protect page 0
> + * enable fetch protection override
> + */
> + /* mismatched keys, fetch protection, but override applies -> RO */
> + { TEST_FETCH_PROT_OVERRIDE, (void *)0x00, 0x10, READ },
> + /* mismatched keys, fetch protection, override applies to 0-2048 only -> inaccessible */
> + { TEST_FETCH_PROT_OVERRIDE, (void *)2049, 0x10, RW_PROTECTED },
> + /*
> + * host: enable storage protection override
> + */
> + /* mismatched keys, but override applies (storage key 9) -> RW */
> + { TEST_STORAGE_PROT_OVERRIDE, page_fetch_prot, 0x10, READ_WRITE },
> + /* mismatched keys, no fetch protection, override doesn't apply -> RO */
> + { TEST_STORAGE_PROT_OVERRIDE, page_store_prot, 0x20, READ },
> + /* mismatched keys, but override applies (storage key 9) -> RW */
> + { TEST_STORAGE_PROT_OVERRIDE, (void *)2049, 0x10, READ_WRITE },
> + /* end marker */
> + { STAGE_END, 0, 0, 0 },
> +};
> +
> +static enum stage perform_next_stage(int *i, bool mapped_0)
> +{
> + enum stage stage = tests[*i].stage;
> + enum permission result;
> + bool skip;
> +
> + for (; tests[*i].stage == stage; (*i)++) {
> + /*
> + * Some fetch protection override tests require that page 0
> + * be mapped, however, when the hosts tries to map that page via
> + * vm_vaddr_alloc, it may happen that some other page gets mapped
> + * instead.
> + * In order to skip these tests we detect this inside the guest
> + */
> + skip = tests[*i].addr < (void *)4096 &&
> + tests[*i].expected != TRANSL_UNAVAIL &&
> + !mapped_0;
> + if (!skip) {
> + result = test_protection(tests[*i].addr, tests[*i].key);
> + GUEST_ASSERT_2(result == tests[*i].expected, *i, result);
> + }
> + }
> + return stage;
> +}
> +
> +static void guest_code(void)
> +{
> + bool mapped_0;
> + int i = 0;
> +
> + GUEST_ASSERT_EQ(set_storage_key(page_store_prot, 0x10), 0);
> + GUEST_ASSERT_EQ(set_storage_key(page_fetch_prot, 0x98), 0);
> + GUEST_SYNC(STAGE_INIT_SIMPLE);
> + GUEST_SYNC(perform_next_stage(&i, false));
> +
> + /* Fetch-protection override */
> + mapped_0 = !set_storage_key((void *)0, 0x98);
> + GUEST_SYNC(STAGE_INIT_FETCH_PROT_OVERRIDE);
> + GUEST_SYNC(perform_next_stage(&i, mapped_0));
> +
> + /* Storage-protection override */
> + GUEST_SYNC(perform_next_stage(&i, mapped_0));
> +}
> +
> +#define HOST_SYNC(vmp, stage) \
> +({ \
> + struct kvm_vm *__vm = (vmp); \
> + struct ucall uc; \
> + int __stage = (stage); \
> + \
> + vcpu_run(__vm, VCPU_ID); \
> + get_ucall(__vm, VCPU_ID, &uc); \
> + if (uc.cmd == UCALL_ABORT) { \
> + TEST_FAIL("line %lu: %s, hints: %lu, %lu", uc.args[1], \
> + (const char *)uc.args[0], uc.args[2], uc.args[3]); \
> + } \
> + ASSERT_EQ(uc.cmd, UCALL_SYNC); \
> + ASSERT_EQ(uc.args[1], __stage); \
> +})
> +
> +int main(int argc, char *argv[])
> +{
> + struct kvm_vm *vm;
> + struct kvm_run *run;
> + vm_vaddr_t guest_0_page;
> +
> + vm = vm_create_default(VCPU_ID, 0, guest_code);
> + run = vcpu_state(vm, VCPU_ID);
> +
> + HOST_SYNC(vm, STAGE_INIT_SIMPLE);
> + mprotect(addr_gva2hva(vm, (vm_vaddr_t)pages), PAGE_SIZE * 2, PROT_READ);
> + HOST_SYNC(vm, TEST_SIMPLE);
> +
> + guest_0_page = vm_vaddr_alloc(vm, PAGE_SIZE, 0);
> + if (guest_0_page != 0)
> + print_skip("Did not allocate page at 0 for fetch protection override tests");
> + HOST_SYNC(vm, STAGE_INIT_FETCH_PROT_OVERRIDE);
> + if (guest_0_page == 0)
> + mprotect(addr_gva2hva(vm, (vm_vaddr_t)0), PAGE_SIZE, PROT_READ);
> + run->s.regs.crs[0] |= CR0_FETCH_PROTECTION_OVERRIDE;
> + run->kvm_dirty_regs = KVM_SYNC_CRS;
> + HOST_SYNC(vm, TEST_FETCH_PROT_OVERRIDE);
> +
> + run->s.regs.crs[0] |= CR0_STORAGE_PROTECTION_OVERRIDE;
> + run->kvm_dirty_regs = KVM_SYNC_CRS;
> + HOST_SYNC(vm, TEST_STORAGE_PROT_OVERRIDE);
> +}