2023-06-09 02:08:09

by Haibo Xu

[permalink] [raw]
Subject: [PATCH v3 00/10] RISCV: Add KVM_GET_REG_LIST API

KVM_GET_REG_LIST will dump all register IDs that are available to
KVM_GET/SET_ONE_REG and It's very useful to identify some platform
regression issue during VM migration.

Patch 1-7 re-structured the get-reg-list test in aarch64 to make some
of the code as common test framework that can be shared by riscv.

Patch 8 enabled the KVM_GET_REG_LIST API in riscv and patch 9-10 added
the corresponding kselftest for checking possible register regressions.

The get-reg-list kvm selftest was ported from aarch64 and tested with
Linux 6.4-rc5 on a Qemu riscv64 virt machine.

---
Changed since v2:
* Rebase to Linux 6.4-rc5
* Filter out ZICBO* config and ISA_EXT registers report if the
extensions were not supported in host
* Enable AIA CSR test
* Move vCPU extension check_supported() to finalize_vcpu() per
Andrew's suggestion
* Switch to use KVM_REG_SIZE_ULONG for most registers' definition

---
Changed since v1:
* rebase to Andrew's changes
* fix coding style

Andrew Jones (7):
KVM: arm64: selftests: Replace str_with_index with strdup_printf
KVM: arm64: selftests: Drop SVE cap check in print_reg
KVM: arm64: selftests: Remove print_reg's dependency on vcpu_config
KVM: arm64: selftests: Rename vcpu_config and add to kvm_util.h
KVM: arm64: selftests: Delete core_reg_fixup
KVM: arm64: selftests: Split get-reg-list test code
KVM: arm64: selftests: Finish generalizing get-reg-list

Haibo Xu (3):
KVM: riscv: Add KVM_GET_REG_LIST API support
KVM: riscv: selftests: Skip some registers set operation
KVM: riscv: selftests: Add get-reg-list test

Documentation/virt/kvm/api.rst | 2 +-
arch/riscv/kvm/vcpu.c | 378 +++++++++++
tools/testing/selftests/kvm/Makefile | 11 +-
.../selftests/kvm/aarch64/get-reg-list.c | 540 ++--------------
tools/testing/selftests/kvm/get-reg-list.c | 421 ++++++++++++
.../selftests/kvm/include/kvm_util_base.h | 16 +
.../selftests/kvm/include/riscv/processor.h | 3 +
.../testing/selftests/kvm/include/test_util.h | 2 +
tools/testing/selftests/kvm/lib/test_util.c | 15 +
.../selftests/kvm/riscv/get-reg-list.c | 611 ++++++++++++++++++
10 files changed, 1499 insertions(+), 500 deletions(-)
create mode 100644 tools/testing/selftests/kvm/get-reg-list.c
create mode 100644 tools/testing/selftests/kvm/riscv/get-reg-list.c

--
2.34.1



2023-06-09 02:08:11

by Haibo Xu

[permalink] [raw]
Subject: [PATCH v3 01/10] KVM: arm64: selftests: Replace str_with_index with strdup_printf

From: Andrew Jones <[email protected]>

The original author of aarch64/get-reg-list.c (me) was wearing
tunnel vision goggles when implementing str_with_index(). There's
no reason to have such a special case string function. Instead,
take inspiration from glib and implement strdup_printf. The
implementation builds on vasprintf() which requires _GNU_SOURCE,
but we require _GNU_SOURCE in most files already.

Signed-off-by: Andrew Jones <[email protected]>
Signed-off-by: Haibo Xu <[email protected]>
---
.../selftests/kvm/aarch64/get-reg-list.c | 23 ++++---------------
.../testing/selftests/kvm/include/test_util.h | 2 ++
tools/testing/selftests/kvm/lib/test_util.c | 15 ++++++++++++
3 files changed, 22 insertions(+), 18 deletions(-)

diff --git a/tools/testing/selftests/kvm/aarch64/get-reg-list.c b/tools/testing/selftests/kvm/aarch64/get-reg-list.c
index d4e1f4af29d6..c152523a5ed4 100644
--- a/tools/testing/selftests/kvm/aarch64/get-reg-list.c
+++ b/tools/testing/selftests/kvm/aarch64/get-reg-list.c
@@ -132,19 +132,6 @@ static bool find_reg(__u64 regs[], __u64 nr_regs, __u64 reg)
return false;
}

-static const char *str_with_index(const char *template, __u64 index)
-{
- char *str, *p;
- int n;
-
- str = strdup(template);
- p = strstr(str, "##");
- n = sprintf(p, "%lld", index);
- strcat(p + n, strstr(template, "##") + 2);
-
- return (const char *)str;
-}
-
#define REG_MASK (KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_COPROC_MASK)

#define CORE_REGS_XX_NR_WORDS 2
@@ -163,7 +150,7 @@ static const char *core_id_to_str(struct vcpu_config *c, __u64 id)
KVM_REG_ARM_CORE_REG(regs.regs[30]):
idx = (core_off - KVM_REG_ARM_CORE_REG(regs.regs[0])) / CORE_REGS_XX_NR_WORDS;
TEST_ASSERT(idx < 31, "%s: Unexpected regs.regs index: %lld", config_name(c), idx);
- return str_with_index("KVM_REG_ARM_CORE_REG(regs.regs[##])", idx);
+ return strdup_printf("KVM_REG_ARM_CORE_REG(regs.regs[%lld])", idx);
case KVM_REG_ARM_CORE_REG(regs.sp):
return "KVM_REG_ARM_CORE_REG(regs.sp)";
case KVM_REG_ARM_CORE_REG(regs.pc):
@@ -178,12 +165,12 @@ static const char *core_id_to_str(struct vcpu_config *c, __u64 id)
KVM_REG_ARM_CORE_REG(spsr[KVM_NR_SPSR - 1]):
idx = (core_off - KVM_REG_ARM_CORE_REG(spsr[0])) / CORE_SPSR_XX_NR_WORDS;
TEST_ASSERT(idx < KVM_NR_SPSR, "%s: Unexpected spsr index: %lld", config_name(c), idx);
- return str_with_index("KVM_REG_ARM_CORE_REG(spsr[##])", idx);
+ return strdup_printf("KVM_REG_ARM_CORE_REG(spsr[%lld])", idx);
case KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]) ...
KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]):
idx = (core_off - KVM_REG_ARM_CORE_REG(fp_regs.vregs[0])) / CORE_FPREGS_XX_NR_WORDS;
TEST_ASSERT(idx < 32, "%s: Unexpected fp_regs.vregs index: %lld", config_name(c), idx);
- return str_with_index("KVM_REG_ARM_CORE_REG(fp_regs.vregs[##])", idx);
+ return strdup_printf("KVM_REG_ARM_CORE_REG(fp_regs.vregs[%lld])", idx);
case KVM_REG_ARM_CORE_REG(fp_regs.fpsr):
return "KVM_REG_ARM_CORE_REG(fp_regs.fpsr)";
case KVM_REG_ARM_CORE_REG(fp_regs.fpcr):
@@ -212,13 +199,13 @@ static const char *sve_id_to_str(struct vcpu_config *c, __u64 id)
n = (id >> 5) & (KVM_ARM64_SVE_NUM_ZREGS - 1);
TEST_ASSERT(id == KVM_REG_ARM64_SVE_ZREG(n, 0),
"%s: Unexpected bits set in SVE ZREG id: 0x%llx", config_name(c), id);
- return str_with_index("KVM_REG_ARM64_SVE_ZREG(##, 0)", n);
+ return strdup_printf("KVM_REG_ARM64_SVE_ZREG(%lld, 0)", n);
case KVM_REG_ARM64_SVE_PREG_BASE ...
KVM_REG_ARM64_SVE_PREG_BASE + (1ULL << 5) * KVM_ARM64_SVE_NUM_PREGS - 1:
n = (id >> 5) & (KVM_ARM64_SVE_NUM_PREGS - 1);
TEST_ASSERT(id == KVM_REG_ARM64_SVE_PREG(n, 0),
"%s: Unexpected bits set in SVE PREG id: 0x%llx", config_name(c), id);
- return str_with_index("KVM_REG_ARM64_SVE_PREG(##, 0)", n);
+ return strdup_printf("KVM_REG_ARM64_SVE_PREG(%lld, 0)", n);
case KVM_REG_ARM64_SVE_FFR_BASE:
TEST_ASSERT(id == KVM_REG_ARM64_SVE_FFR(0),
"%s: Unexpected bits set in SVE FFR id: 0x%llx", config_name(c), id);
diff --git a/tools/testing/selftests/kvm/include/test_util.h b/tools/testing/selftests/kvm/include/test_util.h
index a6e9f215ce70..7e0182f837b5 100644
--- a/tools/testing/selftests/kvm/include/test_util.h
+++ b/tools/testing/selftests/kvm/include/test_util.h
@@ -186,4 +186,6 @@ static inline uint32_t atoi_non_negative(const char *name, const char *num_str)
return num;
}

+char *strdup_printf(const char *fmt, ...) __attribute__((format(printf, 1, 2), nonnull(1)));
+
#endif /* SELFTEST_KVM_TEST_UTIL_H */
diff --git a/tools/testing/selftests/kvm/lib/test_util.c b/tools/testing/selftests/kvm/lib/test_util.c
index b772193f6c18..3e36019eeb4a 100644
--- a/tools/testing/selftests/kvm/lib/test_util.c
+++ b/tools/testing/selftests/kvm/lib/test_util.c
@@ -5,6 +5,9 @@
* Copyright (C) 2020, Google LLC.
*/

+#define _GNU_SOURCE
+#include <stdio.h>
+#include <stdarg.h>
#include <assert.h>
#include <ctype.h>
#include <limits.h>
@@ -377,3 +380,15 @@ int atoi_paranoid(const char *num_str)

return num;
}
+
+char *strdup_printf(const char *fmt, ...)
+{
+ va_list ap;
+ char *str;
+
+ va_start(ap, fmt);
+ vasprintf(&str, fmt, ap);
+ va_end(ap);
+
+ return str;
+}
--
2.34.1


2023-06-09 02:08:13

by Haibo Xu

[permalink] [raw]
Subject: [PATCH v3 02/10] KVM: arm64: selftests: Drop SVE cap check in print_reg

From: Andrew Jones <[email protected]>

The check doesn't prove much anyway, as the reg lists could be
messed up too. Just drop the check to simplify making print_reg
more independent.

Signed-off-by: Andrew Jones <[email protected]>
Signed-off-by: Haibo Xu <[email protected]>
---
.../testing/selftests/kvm/aarch64/get-reg-list.c | 15 +--------------
1 file changed, 1 insertion(+), 14 deletions(-)

diff --git a/tools/testing/selftests/kvm/aarch64/get-reg-list.c b/tools/testing/selftests/kvm/aarch64/get-reg-list.c
index c152523a5ed4..915272c342f9 100644
--- a/tools/testing/selftests/kvm/aarch64/get-reg-list.c
+++ b/tools/testing/selftests/kvm/aarch64/get-reg-list.c
@@ -100,16 +100,6 @@ static const char *config_name(struct vcpu_config *c)
return c->name;
}

-static bool has_cap(struct vcpu_config *c, long capability)
-{
- struct reg_sublist *s;
-
- for_each_sublist(c, s)
- if (s->capability == capability)
- return true;
- return false;
-}
-
static bool filter_reg(__u64 reg)
{
/*
@@ -287,10 +277,7 @@ static void print_reg(struct vcpu_config *c, __u64 id)
printf("\tKVM_REG_ARM_FW_FEAT_BMAP_REG(%lld),\n", id & 0xffff);
break;
case KVM_REG_ARM64_SVE:
- if (has_cap(c, KVM_CAP_ARM_SVE))
- printf("\t%s,\n", sve_id_to_str(c, id));
- else
- TEST_FAIL("%s: KVM_REG_ARM64_SVE is an unexpected coproc type in reg id: 0x%llx", config_name(c), id);
+ printf("\t%s,\n", sve_id_to_str(c, id));
break;
default:
TEST_FAIL("%s: Unexpected coproc type: 0x%llx in reg id: 0x%llx",
--
2.34.1


2023-06-09 02:09:08

by Haibo Xu

[permalink] [raw]
Subject: [PATCH v3 05/10] KVM: arm64: selftests: Delete core_reg_fixup

From: Andrew Jones <[email protected]>

core_reg_fixup() complicates sharing the get-reg-list test with
other architectures. Rather than work at keeping it, with plenty
of #ifdeffery, just delete it, as it's unlikely to test a kernel
based on anything older than v5.2 with the get-reg-list test,
which is a test meant to check for regressions in new kernels.
(And, an older version of the test can still be used for older
kernels if necessary.)

Signed-off-by: Andrew Jones <[email protected]>
Signed-off-by: Haibo Xu <[email protected]>
---
.../selftests/kvm/aarch64/get-reg-list.c | 83 +++----------------
1 file changed, 10 insertions(+), 73 deletions(-)

diff --git a/tools/testing/selftests/kvm/aarch64/get-reg-list.c b/tools/testing/selftests/kvm/aarch64/get-reg-list.c
index aae2056379f7..c8b44389d2ee 100644
--- a/tools/testing/selftests/kvm/aarch64/get-reg-list.c
+++ b/tools/testing/selftests/kvm/aarch64/get-reg-list.c
@@ -17,12 +17,10 @@
* by running the test with the --list command line argument.
*
* Note, the blessed list should be created from the oldest possible
- * kernel. We can't go older than v4.15, though, because that's the first
- * release to expose the ID system registers in KVM_GET_REG_LIST, see
- * commit 93390c0a1b20 ("arm64: KVM: Hide unsupported AArch64 CPU features
- * from guests"). Also, one must use the --core-reg-fixup command line
- * option when running on an older kernel that doesn't include df205b5c6328
- * ("KVM: arm64: Filter out invalid core register IDs in KVM_GET_REG_LIST")
+ * kernel. We can't go older than v5.2, though, because that's the first
+ * release which includes df205b5c6328 ("KVM: arm64: Filter out invalid
+ * core register IDs in KVM_GET_REG_LIST"). Without that commit the core
+ * registers won't match expectations.
*/
#include <stdio.h>
#include <stdlib.h>
@@ -269,63 +267,6 @@ static void print_reg(const char *prefix, __u64 id)
}
}

-/*
- * Older kernels listed each 32-bit word of CORE registers separately.
- * For 64 and 128-bit registers we need to ignore the extra words. We
- * also need to fixup the sizes, because the older kernels stated all
- * registers were 64-bit, even when they weren't.
- */
-static void core_reg_fixup(void)
-{
- struct kvm_reg_list *tmp;
- __u64 id, core_off;
- int i;
-
- tmp = calloc(1, sizeof(*tmp) + reg_list->n * sizeof(__u64));
-
- for (i = 0; i < reg_list->n; ++i) {
- id = reg_list->reg[i];
-
- if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM_CORE) {
- tmp->reg[tmp->n++] = id;
- continue;
- }
-
- core_off = id & ~REG_MASK;
-
- switch (core_off) {
- case 0x52: case 0xd2: case 0xd6:
- /*
- * These offsets are pointing at padding.
- * We need to ignore them too.
- */
- continue;
- case KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]) ...
- KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]):
- if (core_off & 3)
- continue;
- id &= ~KVM_REG_SIZE_MASK;
- id |= KVM_REG_SIZE_U128;
- tmp->reg[tmp->n++] = id;
- continue;
- case KVM_REG_ARM_CORE_REG(fp_regs.fpsr):
- case KVM_REG_ARM_CORE_REG(fp_regs.fpcr):
- id &= ~KVM_REG_SIZE_MASK;
- id |= KVM_REG_SIZE_U32;
- tmp->reg[tmp->n++] = id;
- continue;
- default:
- if (core_off & 1)
- continue;
- tmp->reg[tmp->n++] = id;
- break;
- }
- }
-
- free(reg_list);
- reg_list = tmp;
-}
-
static void prepare_vcpu_init(struct vcpu_reg_list *c, struct kvm_vcpu_init *init)
{
struct vcpu_reg_sublist *s;
@@ -364,7 +305,6 @@ static void check_supported(struct vcpu_reg_list *c)

static bool print_list;
static bool print_filtered;
-static bool fixup_core_regs;

static void run_test(struct vcpu_reg_list *c)
{
@@ -385,9 +325,6 @@ static void run_test(struct vcpu_reg_list *c)

reg_list = vcpu_get_reg_list(vcpu);

- if (fixup_core_regs)
- core_reg_fixup();
-
if (print_list || print_filtered) {
putchar('\n');
for_each_reg(i) {
@@ -515,7 +452,7 @@ static void help(void)

printf(
"\n"
- "usage: get-reg-list [--config=<selection>] [--list] [--list-filtered] [--core-reg-fixup]\n\n"
+ "usage: get-reg-list [--config=<selection>] [--list] [--list-filtered]\n\n"
" --config=<selection> Used to select a specific vcpu configuration for the test/listing\n"
" '<selection>' may be\n");

@@ -529,7 +466,6 @@ static void help(void)
"\n"
" --list Print the register list rather than test it (requires --config)\n"
" --list-filtered Print registers that would normally be filtered out (requires --config)\n"
- " --core-reg-fixup Needed when running on old kernels with broken core reg listings\n"
"\n"
);
}
@@ -561,9 +497,7 @@ int main(int ac, char **av)
pid_t pid;

for (i = 1; i < ac; ++i) {
- if (strcmp(av[i], "--core-reg-fixup") == 0)
- fixup_core_regs = true;
- else if (strncmp(av[i], "--config", 8) == 0)
+ if (strncmp(av[i], "--config", 8) == 0)
sel = parse_config(av[i]);
else if (strcmp(av[i], "--list") == 0)
print_list = true;
@@ -606,8 +540,11 @@ int main(int ac, char **av)
}

/*
- * The current blessed list was primed with the output of kernel version
+ * The original blessed list was primed with the output of kernel version
* v4.15 with --core-reg-fixup and then later updated with new registers.
+ * (The --core-reg-fixup option and it's fixup function have been removed
+ * from the test, as it's unlikely to use this type of test on a kernel
+ * older than v5.2.)
*
* The blessed list is up to date with kernel version v6.4 (or so we hope)
*/
--
2.34.1


2023-06-09 02:16:56

by Haibo Xu

[permalink] [raw]
Subject: [PATCH v3 04/10] KVM: arm64: selftests: Rename vcpu_config and add to kvm_util.h

From: Andrew Jones <[email protected]>

Rename vcpu_config to vcpu_reg_list to be more specific and add
it to kvm_util.h. While it may not get used outside get-reg-list
tests, exporting it doesn't hurt, as long as it has a unique enough
name. This is a step in the direction of sharing most of the get-
reg-list test code between architectures.

Signed-off-by: Andrew Jones <[email protected]>
Signed-off-by: Haibo Xu <[email protected]>
---
.../selftests/kvm/aarch64/get-reg-list.c | 60 +++++++------------
.../selftests/kvm/include/kvm_util_base.h | 16 +++++
2 files changed, 38 insertions(+), 38 deletions(-)

diff --git a/tools/testing/selftests/kvm/aarch64/get-reg-list.c b/tools/testing/selftests/kvm/aarch64/get-reg-list.c
index 424285d39965..aae2056379f7 100644
--- a/tools/testing/selftests/kvm/aarch64/get-reg-list.c
+++ b/tools/testing/selftests/kvm/aarch64/get-reg-list.c
@@ -37,23 +37,7 @@
static struct kvm_reg_list *reg_list;
static __u64 *blessed_reg, blessed_n;

-struct reg_sublist {
- const char *name;
- long capability;
- int feature;
- bool finalize;
- __u64 *regs;
- __u64 regs_n;
- __u64 *rejects_set;
- __u64 rejects_set_n;
-};
-
-struct vcpu_config {
- char *name;
- struct reg_sublist sublists[];
-};
-
-static struct vcpu_config *vcpu_configs[];
+static struct vcpu_reg_list *vcpu_configs[];
static int vcpu_configs_n;

#define for_each_sublist(c, s) \
@@ -74,9 +58,9 @@ static int vcpu_configs_n;
for_each_reg_filtered(i) \
if (!find_reg(blessed_reg, blessed_n, reg_list->reg[i]))

-static const char *config_name(struct vcpu_config *c)
+static const char *config_name(struct vcpu_reg_list *c)
{
- struct reg_sublist *s;
+ struct vcpu_reg_sublist *s;
int len = 0;

if (c->name)
@@ -342,18 +326,18 @@ static void core_reg_fixup(void)
reg_list = tmp;
}

-static void prepare_vcpu_init(struct vcpu_config *c, struct kvm_vcpu_init *init)
+static void prepare_vcpu_init(struct vcpu_reg_list *c, struct kvm_vcpu_init *init)
{
- struct reg_sublist *s;
+ struct vcpu_reg_sublist *s;

for_each_sublist(c, s)
if (s->capability)
init->features[s->feature / 32] |= 1 << (s->feature % 32);
}

-static void finalize_vcpu(struct kvm_vcpu *vcpu, struct vcpu_config *c)
+static void finalize_vcpu(struct kvm_vcpu *vcpu, struct vcpu_reg_list *c)
{
- struct reg_sublist *s;
+ struct vcpu_reg_sublist *s;
int feature;

for_each_sublist(c, s) {
@@ -364,9 +348,9 @@ static void finalize_vcpu(struct kvm_vcpu *vcpu, struct vcpu_config *c)
}
}

-static void check_supported(struct vcpu_config *c)
+static void check_supported(struct vcpu_reg_list *c)
{
- struct reg_sublist *s;
+ struct vcpu_reg_sublist *s;

for_each_sublist(c, s) {
if (!s->capability)
@@ -382,14 +366,14 @@ static bool print_list;
static bool print_filtered;
static bool fixup_core_regs;

-static void run_test(struct vcpu_config *c)
+static void run_test(struct vcpu_reg_list *c)
{
struct kvm_vcpu_init init = { .target = -1, };
int new_regs = 0, missing_regs = 0, i, n;
int failed_get = 0, failed_set = 0, failed_reject = 0;
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
- struct reg_sublist *s;
+ struct vcpu_reg_sublist *s;

check_supported(c);

@@ -526,7 +510,7 @@ static void run_test(struct vcpu_config *c)

static void help(void)
{
- struct vcpu_config *c;
+ struct vcpu_reg_list *c;
int i;

printf(
@@ -550,9 +534,9 @@ static void help(void)
);
}

-static struct vcpu_config *parse_config(const char *config)
+static struct vcpu_reg_list *parse_config(const char *config)
{
- struct vcpu_config *c;
+ struct vcpu_reg_list *c;
int i;

if (config[8] != '=')
@@ -572,7 +556,7 @@ static struct vcpu_config *parse_config(const char *config)

int main(int ac, char **av)
{
- struct vcpu_config *c, *sel = NULL;
+ struct vcpu_reg_list *c, *sel = NULL;
int i, ret = 0;
pid_t pid;

@@ -1053,14 +1037,14 @@ static __u64 pauth_generic_regs[] = {
.regs_n = ARRAY_SIZE(pauth_generic_regs), \
}

-static struct vcpu_config vregs_config = {
+static struct vcpu_reg_list vregs_config = {
.sublists = {
BASE_SUBLIST,
VREGS_SUBLIST,
{0},
},
};
-static struct vcpu_config vregs_pmu_config = {
+static struct vcpu_reg_list vregs_pmu_config = {
.sublists = {
BASE_SUBLIST,
VREGS_SUBLIST,
@@ -1068,14 +1052,14 @@ static struct vcpu_config vregs_pmu_config = {
{0},
},
};
-static struct vcpu_config sve_config = {
+static struct vcpu_reg_list sve_config = {
.sublists = {
BASE_SUBLIST,
SVE_SUBLIST,
{0},
},
};
-static struct vcpu_config sve_pmu_config = {
+static struct vcpu_reg_list sve_pmu_config = {
.sublists = {
BASE_SUBLIST,
SVE_SUBLIST,
@@ -1083,7 +1067,7 @@ static struct vcpu_config sve_pmu_config = {
{0},
},
};
-static struct vcpu_config pauth_config = {
+static struct vcpu_reg_list pauth_config = {
.sublists = {
BASE_SUBLIST,
VREGS_SUBLIST,
@@ -1091,7 +1075,7 @@ static struct vcpu_config pauth_config = {
{0},
},
};
-static struct vcpu_config pauth_pmu_config = {
+static struct vcpu_reg_list pauth_pmu_config = {
.sublists = {
BASE_SUBLIST,
VREGS_SUBLIST,
@@ -1101,7 +1085,7 @@ static struct vcpu_config pauth_pmu_config = {
},
};

-static struct vcpu_config *vcpu_configs[] = {
+static struct vcpu_reg_list *vcpu_configs[] = {
&vregs_config,
&vregs_pmu_config,
&sve_config,
diff --git a/tools/testing/selftests/kvm/include/kvm_util_base.h b/tools/testing/selftests/kvm/include/kvm_util_base.h
index a089c356f354..ac4aaa21deee 100644
--- a/tools/testing/selftests/kvm/include/kvm_util_base.h
+++ b/tools/testing/selftests/kvm/include/kvm_util_base.h
@@ -15,6 +15,7 @@
#include <linux/kernel.h>
#include <linux/kvm.h>
#include "linux/rbtree.h"
+#include <linux/types.h>

#include <asm/atomic.h>

@@ -124,6 +125,21 @@ struct kvm_vm {
uint32_t memslots[NR_MEM_REGIONS];
};

+struct vcpu_reg_sublist {
+ const char *name;
+ long capability;
+ int feature;
+ bool finalize;
+ __u64 *regs;
+ __u64 regs_n;
+ __u64 *rejects_set;
+ __u64 rejects_set_n;
+};
+
+struct vcpu_reg_list {
+ char *name;
+ struct vcpu_reg_sublist sublists[];
+};

#define kvm_for_each_vcpu(vm, i, vcpu) \
for ((i) = 0; (i) <= (vm)->last_vcpu_id; (i)++) \
--
2.34.1


2023-06-09 02:21:34

by Haibo Xu

[permalink] [raw]
Subject: [PATCH v3 06/10] KVM: arm64: selftests: Split get-reg-list test code

From: Andrew Jones <[email protected]>

Split the arch-neutral test code out of aarch64/get-reg-list.c into
get-reg-list.c. To do this we invent a new make variable
$(SPLIT_TESTS) which expects common parts to be in the KVM selftests
root and the counterparts to have the same name, but be in
$(ARCH_DIR).

There's still some work to be done to de-aarch64 the common
get-reg-list.c, but we leave that to the next patch to avoid
modifying too much code while moving it.

Signed-off-by: Andrew Jones <[email protected]>
Signed-off-by: Haibo Xu <[email protected]>
---
tools/testing/selftests/kvm/Makefile | 10 +-
.../selftests/kvm/aarch64/get-reg-list.c | 361 +----------------
tools/testing/selftests/kvm/get-reg-list.c | 371 ++++++++++++++++++
3 files changed, 385 insertions(+), 357 deletions(-)
create mode 100644 tools/testing/selftests/kvm/get-reg-list.c

diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
index 4761b768b773..d90cad19c9ee 100644
--- a/tools/testing/selftests/kvm/Makefile
+++ b/tools/testing/selftests/kvm/Makefile
@@ -139,7 +139,6 @@ TEST_GEN_PROGS_EXTENDED_x86_64 += x86_64/nx_huge_pages_test
TEST_GEN_PROGS_aarch64 += aarch64/aarch32_id_regs
TEST_GEN_PROGS_aarch64 += aarch64/arch_timer
TEST_GEN_PROGS_aarch64 += aarch64/debug-exceptions
-TEST_GEN_PROGS_aarch64 += aarch64/get-reg-list
TEST_GEN_PROGS_aarch64 += aarch64/hypercalls
TEST_GEN_PROGS_aarch64 += aarch64/page_fault_test
TEST_GEN_PROGS_aarch64 += aarch64/psci_test
@@ -151,6 +150,7 @@ TEST_GEN_PROGS_aarch64 += access_tracking_perf_test
TEST_GEN_PROGS_aarch64 += demand_paging_test
TEST_GEN_PROGS_aarch64 += dirty_log_test
TEST_GEN_PROGS_aarch64 += dirty_log_perf_test
+TEST_GEN_PROGS_aarch64 += get-reg-list
TEST_GEN_PROGS_aarch64 += kvm_create_max_vcpus
TEST_GEN_PROGS_aarch64 += kvm_page_table_test
TEST_GEN_PROGS_aarch64 += memslot_modification_stress_test
@@ -179,6 +179,8 @@ TEST_GEN_PROGS_riscv += kvm_page_table_test
TEST_GEN_PROGS_riscv += set_memory_region_test
TEST_GEN_PROGS_riscv += kvm_binary_stats_test

+SPLIT_TESTS += get-reg-list
+
TEST_PROGS += $(TEST_PROGS_$(ARCH_DIR))
TEST_GEN_PROGS += $(TEST_GEN_PROGS_$(ARCH_DIR))
TEST_GEN_PROGS_EXTENDED += $(TEST_GEN_PROGS_EXTENDED_$(ARCH_DIR))
@@ -224,8 +226,12 @@ LIBKVM_C_OBJ := $(patsubst %.c, $(OUTPUT)/%.o, $(LIBKVM_C))
LIBKVM_S_OBJ := $(patsubst %.S, $(OUTPUT)/%.o, $(LIBKVM_S))
LIBKVM_STRING_OBJ := $(patsubst %.c, $(OUTPUT)/%.o, $(LIBKVM_STRING))
LIBKVM_OBJS = $(LIBKVM_C_OBJ) $(LIBKVM_S_OBJ) $(LIBKVM_STRING_OBJ)
+SPLIT_TESTS_TARGETS := $(patsubst %, $(OUTPUT)/%, $(SPLIT_TESTS))
+SPLIT_TESTS_OBJS := $(patsubst %, $(ARCH_DIR)/%.o, $(SPLIT_TESTS))
+
+EXTRA_CLEAN += $(LIBKVM_OBJS) $(SPLIT_TESTS_OBJS) cscope.*

-EXTRA_CLEAN += $(LIBKVM_OBJS) cscope.*
+$(SPLIT_TESTS_TARGETS): $(SPLIT_TESTS_OBJS)

x := $(shell mkdir -p $(sort $(dir $(LIBKVM_C_OBJ) $(LIBKVM_S_OBJ))))
$(LIBKVM_C_OBJ): $(OUTPUT)/%.o: %.c
diff --git a/tools/testing/selftests/kvm/aarch64/get-reg-list.c b/tools/testing/selftests/kvm/aarch64/get-reg-list.c
index c8b44389d2ee..aaf035c969ec 100644
--- a/tools/testing/selftests/kvm/aarch64/get-reg-list.c
+++ b/tools/testing/selftests/kvm/aarch64/get-reg-list.c
@@ -4,85 +4,18 @@
*
* Copyright (C) 2020, Red Hat, Inc.
*
- * When attempting to migrate from a host with an older kernel to a host
- * with a newer kernel we allow the newer kernel on the destination to
- * list new registers with get-reg-list. We assume they'll be unused, at
- * least until the guest reboots, and so they're relatively harmless.
- * However, if the destination host with the newer kernel is missing
- * registers which the source host with the older kernel has, then that's
- * a regression in get-reg-list. This test checks for that regression by
- * checking the current list against a blessed list. We should never have
- * missing registers, but if new ones appear then they can probably be
- * added to the blessed list. A completely new blessed list can be created
- * by running the test with the --list command line argument.
- *
- * Note, the blessed list should be created from the oldest possible
- * kernel. We can't go older than v5.2, though, because that's the first
+ * While the blessed list should be created from the oldest possible
+ * kernel, we can't go older than v5.2, though, because that's the first
* release which includes df205b5c6328 ("KVM: arm64: Filter out invalid
* core register IDs in KVM_GET_REG_LIST"). Without that commit the core
* registers won't match expectations.
*/
#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <unistd.h>
-#include <sys/types.h>
-#include <sys/wait.h>
#include "kvm_util.h"
#include "test_util.h"
#include "processor.h"

-static struct kvm_reg_list *reg_list;
-static __u64 *blessed_reg, blessed_n;
-
-static struct vcpu_reg_list *vcpu_configs[];
-static int vcpu_configs_n;
-
-#define for_each_sublist(c, s) \
- for ((s) = &(c)->sublists[0]; (s)->regs; ++(s))
-
-#define for_each_reg(i) \
- for ((i) = 0; (i) < reg_list->n; ++(i))
-
-#define for_each_reg_filtered(i) \
- for_each_reg(i) \
- if (!filter_reg(reg_list->reg[i]))
-
-#define for_each_missing_reg(i) \
- for ((i) = 0; (i) < blessed_n; ++(i)) \
- if (!find_reg(reg_list->reg, reg_list->n, blessed_reg[i]))
-
-#define for_each_new_reg(i) \
- for_each_reg_filtered(i) \
- if (!find_reg(blessed_reg, blessed_n, reg_list->reg[i]))
-
-static const char *config_name(struct vcpu_reg_list *c)
-{
- struct vcpu_reg_sublist *s;
- int len = 0;
-
- if (c->name)
- return c->name;
-
- for_each_sublist(c, s)
- len += strlen(s->name) + 1;
-
- c->name = malloc(len);
-
- len = 0;
- for_each_sublist(c, s) {
- if (!strcmp(s->name, "base"))
- continue;
- strcat(c->name + len, s->name);
- len += strlen(s->name) + 1;
- c->name[len - 1] = '+';
- }
- c->name[len - 1] = '\0';
-
- return c->name;
-}
-
-static bool filter_reg(__u64 reg)
+bool filter_reg(__u64 reg)
{
/*
* DEMUX register presence depends on the host's CLIDR_EL1.
@@ -94,16 +27,6 @@ static bool filter_reg(__u64 reg)
return false;
}

-static bool find_reg(__u64 regs[], __u64 nr_regs, __u64 reg)
-{
- int i;
-
- for (i = 0; i < nr_regs; ++i)
- if (reg == regs[i])
- return true;
- return false;
-}
-
#define REG_MASK (KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_COPROC_MASK)

#define CORE_REGS_XX_NR_WORDS 2
@@ -187,7 +110,7 @@ static const char *sve_id_to_str(const char *prefix, __u64 id)
return NULL;
}

-static void print_reg(const char *prefix, __u64 id)
+void print_reg(const char *prefix, __u64 id)
{
unsigned op0, op1, crn, crm, op2;
const char *reg_size = NULL;
@@ -267,278 +190,6 @@ static void print_reg(const char *prefix, __u64 id)
}
}

-static void prepare_vcpu_init(struct vcpu_reg_list *c, struct kvm_vcpu_init *init)
-{
- struct vcpu_reg_sublist *s;
-
- for_each_sublist(c, s)
- if (s->capability)
- init->features[s->feature / 32] |= 1 << (s->feature % 32);
-}
-
-static void finalize_vcpu(struct kvm_vcpu *vcpu, struct vcpu_reg_list *c)
-{
- struct vcpu_reg_sublist *s;
- int feature;
-
- for_each_sublist(c, s) {
- if (s->finalize) {
- feature = s->feature;
- vcpu_ioctl(vcpu, KVM_ARM_VCPU_FINALIZE, &feature);
- }
- }
-}
-
-static void check_supported(struct vcpu_reg_list *c)
-{
- struct vcpu_reg_sublist *s;
-
- for_each_sublist(c, s) {
- if (!s->capability)
- continue;
-
- __TEST_REQUIRE(kvm_has_cap(s->capability),
- "%s: %s not available, skipping tests\n",
- config_name(c), s->name);
- }
-}
-
-static bool print_list;
-static bool print_filtered;
-
-static void run_test(struct vcpu_reg_list *c)
-{
- struct kvm_vcpu_init init = { .target = -1, };
- int new_regs = 0, missing_regs = 0, i, n;
- int failed_get = 0, failed_set = 0, failed_reject = 0;
- struct kvm_vcpu *vcpu;
- struct kvm_vm *vm;
- struct vcpu_reg_sublist *s;
-
- check_supported(c);
-
- vm = vm_create_barebones();
- prepare_vcpu_init(c, &init);
- vcpu = __vm_vcpu_add(vm, 0);
- aarch64_vcpu_setup(vcpu, &init);
- finalize_vcpu(vcpu, c);
-
- reg_list = vcpu_get_reg_list(vcpu);
-
- if (print_list || print_filtered) {
- putchar('\n');
- for_each_reg(i) {
- __u64 id = reg_list->reg[i];
- if ((print_list && !filter_reg(id)) ||
- (print_filtered && filter_reg(id)))
- print_reg(config_name(c), id);
- }
- putchar('\n');
- return;
- }
-
- /*
- * We only test that we can get the register and then write back the
- * same value. Some registers may allow other values to be written
- * back, but others only allow some bits to be changed, and at least
- * for ID registers set will fail if the value does not exactly match
- * what was returned by get. If registers that allow other values to
- * be written need to have the other values tested, then we should
- * create a new set of tests for those in a new independent test
- * executable.
- */
- for_each_reg(i) {
- uint8_t addr[2048 / 8];
- struct kvm_one_reg reg = {
- .id = reg_list->reg[i],
- .addr = (__u64)&addr,
- };
- bool reject_reg = false;
- int ret;
-
- ret = __vcpu_get_reg(vcpu, reg_list->reg[i], &addr);
- if (ret) {
- printf("%s: Failed to get ", config_name(c));
- print_reg(config_name(c), reg.id);
- putchar('\n');
- ++failed_get;
- }
-
- /* rejects_set registers are rejected after KVM_ARM_VCPU_FINALIZE */
- for_each_sublist(c, s) {
- if (s->rejects_set && find_reg(s->rejects_set, s->rejects_set_n, reg.id)) {
- reject_reg = true;
- ret = __vcpu_ioctl(vcpu, KVM_SET_ONE_REG, &reg);
- if (ret != -1 || errno != EPERM) {
- printf("%s: Failed to reject (ret=%d, errno=%d) ", config_name(c), ret, errno);
- print_reg(config_name(c), reg.id);
- putchar('\n');
- ++failed_reject;
- }
- break;
- }
- }
-
- if (!reject_reg) {
- ret = __vcpu_ioctl(vcpu, KVM_SET_ONE_REG, &reg);
- if (ret) {
- printf("%s: Failed to set ", config_name(c));
- print_reg(config_name(c), reg.id);
- putchar('\n');
- ++failed_set;
- }
- }
- }
-
- for_each_sublist(c, s)
- blessed_n += s->regs_n;
- blessed_reg = calloc(blessed_n, sizeof(__u64));
-
- n = 0;
- for_each_sublist(c, s) {
- for (i = 0; i < s->regs_n; ++i)
- blessed_reg[n++] = s->regs[i];
- }
-
- for_each_new_reg(i)
- ++new_regs;
-
- for_each_missing_reg(i)
- ++missing_regs;
-
- if (new_regs || missing_regs) {
- n = 0;
- for_each_reg_filtered(i)
- ++n;
-
- printf("%s: Number blessed registers: %5lld\n", config_name(c), blessed_n);
- printf("%s: Number registers: %5lld (includes %lld filtered registers)\n",
- config_name(c), reg_list->n, reg_list->n - n);
- }
-
- if (new_regs) {
- printf("\n%s: There are %d new registers.\n"
- "Consider adding them to the blessed reg "
- "list with the following lines:\n\n", config_name(c), new_regs);
- for_each_new_reg(i)
- print_reg(config_name(c), reg_list->reg[i]);
- putchar('\n');
- }
-
- if (missing_regs) {
- printf("\n%s: There are %d missing registers.\n"
- "The following lines are missing registers:\n\n", config_name(c), missing_regs);
- for_each_missing_reg(i)
- print_reg(config_name(c), blessed_reg[i]);
- putchar('\n');
- }
-
- TEST_ASSERT(!missing_regs && !failed_get && !failed_set && !failed_reject,
- "%s: There are %d missing registers; "
- "%d registers failed get; %d registers failed set; %d registers failed reject",
- config_name(c), missing_regs, failed_get, failed_set, failed_reject);
-
- pr_info("%s: PASS\n", config_name(c));
- blessed_n = 0;
- free(blessed_reg);
- free(reg_list);
- kvm_vm_free(vm);
-}
-
-static void help(void)
-{
- struct vcpu_reg_list *c;
- int i;
-
- printf(
- "\n"
- "usage: get-reg-list [--config=<selection>] [--list] [--list-filtered]\n\n"
- " --config=<selection> Used to select a specific vcpu configuration for the test/listing\n"
- " '<selection>' may be\n");
-
- for (i = 0; i < vcpu_configs_n; ++i) {
- c = vcpu_configs[i];
- printf(
- " '%s'\n", config_name(c));
- }
-
- printf(
- "\n"
- " --list Print the register list rather than test it (requires --config)\n"
- " --list-filtered Print registers that would normally be filtered out (requires --config)\n"
- "\n"
- );
-}
-
-static struct vcpu_reg_list *parse_config(const char *config)
-{
- struct vcpu_reg_list *c;
- int i;
-
- if (config[8] != '=')
- help(), exit(1);
-
- for (i = 0; i < vcpu_configs_n; ++i) {
- c = vcpu_configs[i];
- if (strcmp(config_name(c), &config[9]) == 0)
- break;
- }
-
- if (i == vcpu_configs_n)
- help(), exit(1);
-
- return c;
-}
-
-int main(int ac, char **av)
-{
- struct vcpu_reg_list *c, *sel = NULL;
- int i, ret = 0;
- pid_t pid;
-
- for (i = 1; i < ac; ++i) {
- if (strncmp(av[i], "--config", 8) == 0)
- sel = parse_config(av[i]);
- else if (strcmp(av[i], "--list") == 0)
- print_list = true;
- else if (strcmp(av[i], "--list-filtered") == 0)
- print_filtered = true;
- else if (strcmp(av[i], "--help") == 0 || strcmp(av[1], "-h") == 0)
- help(), exit(0);
- else
- help(), exit(1);
- }
-
- if (print_list || print_filtered) {
- /*
- * We only want to print the register list of a single config.
- */
- if (!sel)
- help(), exit(1);
- }
-
- for (i = 0; i < vcpu_configs_n; ++i) {
- c = vcpu_configs[i];
- if (sel && c != sel)
- continue;
-
- pid = fork();
-
- if (!pid) {
- run_test(c);
- exit(0);
- } else {
- int wstatus;
- pid_t wpid = wait(&wstatus);
- TEST_ASSERT(wpid == pid && WIFEXITED(wstatus), "wait: Unexpected return");
- if (WEXITSTATUS(wstatus) && WEXITSTATUS(wstatus) != KSFT_SKIP)
- ret = KSFT_FAIL;
- }
- }
-
- return ret;
-}
-
/*
* The original blessed list was primed with the output of kernel version
* v4.15 with --core-reg-fixup and then later updated with new registers.
@@ -1022,7 +673,7 @@ static struct vcpu_reg_list pauth_pmu_config = {
},
};

-static struct vcpu_reg_list *vcpu_configs[] = {
+struct vcpu_reg_list *vcpu_configs[] = {
&vregs_config,
&vregs_pmu_config,
&sve_config,
@@ -1030,4 +681,4 @@ static struct vcpu_reg_list *vcpu_configs[] = {
&pauth_config,
&pauth_pmu_config,
};
-static int vcpu_configs_n = ARRAY_SIZE(vcpu_configs);
+int vcpu_configs_n = ARRAY_SIZE(vcpu_configs);
diff --git a/tools/testing/selftests/kvm/get-reg-list.c b/tools/testing/selftests/kvm/get-reg-list.c
new file mode 100644
index 000000000000..69bb91087081
--- /dev/null
+++ b/tools/testing/selftests/kvm/get-reg-list.c
@@ -0,0 +1,371 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Check for KVM_GET_REG_LIST regressions.
+ *
+ * Copyright (C) 2020, Red Hat, Inc.
+ *
+ * When attempting to migrate from a host with an older kernel to a host
+ * with a newer kernel we allow the newer kernel on the destination to
+ * list new registers with get-reg-list. We assume they'll be unused, at
+ * least until the guest reboots, and so they're relatively harmless.
+ * However, if the destination host with the newer kernel is missing
+ * registers which the source host with the older kernel has, then that's
+ * a regression in get-reg-list. This test checks for that regression by
+ * checking the current list against a blessed list. We should never have
+ * missing registers, but if new ones appear then they can probably be
+ * added to the blessed list. A completely new blessed list can be created
+ * by running the test with the --list command line argument.
+ *
+ * The blessed list should be created from the oldest possible kernel.
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include "kvm_util.h"
+#include "test_util.h"
+#include "processor.h"
+
+static struct kvm_reg_list *reg_list;
+static __u64 *blessed_reg, blessed_n;
+
+extern struct vcpu_reg_list *vcpu_configs[];
+extern int vcpu_configs_n;
+
+#define for_each_sublist(c, s) \
+ for ((s) = &(c)->sublists[0]; (s)->regs; ++(s))
+
+#define for_each_reg(i) \
+ for ((i) = 0; (i) < reg_list->n; ++(i))
+
+#define for_each_reg_filtered(i) \
+ for_each_reg(i) \
+ if (!filter_reg(reg_list->reg[i]))
+
+#define for_each_missing_reg(i) \
+ for ((i) = 0; (i) < blessed_n; ++(i)) \
+ if (!find_reg(reg_list->reg, reg_list->n, blessed_reg[i]))
+
+#define for_each_new_reg(i) \
+ for_each_reg_filtered(i) \
+ if (!find_reg(blessed_reg, blessed_n, reg_list->reg[i]))
+
+static const char *config_name(struct vcpu_reg_list *c)
+{
+ struct vcpu_reg_sublist *s;
+ int len = 0;
+
+ if (c->name)
+ return c->name;
+
+ for_each_sublist(c, s)
+ len += strlen(s->name) + 1;
+
+ c->name = malloc(len);
+
+ len = 0;
+ for_each_sublist(c, s) {
+ if (!strcmp(s->name, "base"))
+ continue;
+ strcat(c->name + len, s->name);
+ len += strlen(s->name) + 1;
+ c->name[len - 1] = '+';
+ }
+ c->name[len - 1] = '\0';
+
+ return c->name;
+}
+
+bool __weak filter_reg(__u64 reg)
+{
+ return false;
+}
+
+static bool find_reg(__u64 regs[], __u64 nr_regs, __u64 reg)
+{
+ int i;
+
+ for (i = 0; i < nr_regs; ++i)
+ if (reg == regs[i])
+ return true;
+ return false;
+}
+
+void __weak print_reg(const char *prefix, __u64 id)
+{
+ printf("\t0x%llx,\n", id);
+}
+
+static void prepare_vcpu_init(struct vcpu_reg_list *c, struct kvm_vcpu_init *init)
+{
+ struct vcpu_reg_sublist *s;
+
+ for_each_sublist(c, s)
+ if (s->capability)
+ init->features[s->feature / 32] |= 1 << (s->feature % 32);
+}
+
+static void finalize_vcpu(struct kvm_vcpu *vcpu, struct vcpu_reg_list *c)
+{
+ struct vcpu_reg_sublist *s;
+ int feature;
+
+ for_each_sublist(c, s) {
+ if (s->finalize) {
+ feature = s->feature;
+ vcpu_ioctl(vcpu, KVM_ARM_VCPU_FINALIZE, &feature);
+ }
+ }
+}
+
+static void check_supported(struct vcpu_reg_list *c)
+{
+ struct vcpu_reg_sublist *s;
+
+ for_each_sublist(c, s) {
+ if (!s->capability)
+ continue;
+
+ __TEST_REQUIRE(kvm_has_cap(s->capability),
+ "%s: %s not available, skipping tests\n",
+ config_name(c), s->name);
+ }
+}
+
+static bool print_list;
+static bool print_filtered;
+
+static void run_test(struct vcpu_reg_list *c)
+{
+ struct kvm_vcpu_init init = { .target = -1, };
+ int new_regs = 0, missing_regs = 0, i, n;
+ int failed_get = 0, failed_set = 0, failed_reject = 0;
+ struct kvm_vcpu *vcpu;
+ struct kvm_vm *vm;
+ struct vcpu_reg_sublist *s;
+
+ check_supported(c);
+
+ vm = vm_create_barebones();
+ prepare_vcpu_init(c, &init);
+ vcpu = __vm_vcpu_add(vm, 0);
+ aarch64_vcpu_setup(vcpu, &init);
+ finalize_vcpu(vcpu, c);
+
+ reg_list = vcpu_get_reg_list(vcpu);
+
+ if (print_list || print_filtered) {
+ putchar('\n');
+ for_each_reg(i) {
+ __u64 id = reg_list->reg[i];
+ if ((print_list && !filter_reg(id)) ||
+ (print_filtered && filter_reg(id)))
+ print_reg(config_name(c), id);
+ }
+ putchar('\n');
+ return;
+ }
+
+ /*
+ * We only test that we can get the register and then write back the
+ * same value. Some registers may allow other values to be written
+ * back, but others only allow some bits to be changed, and at least
+ * for ID registers set will fail if the value does not exactly match
+ * what was returned by get. If registers that allow other values to
+ * be written need to have the other values tested, then we should
+ * create a new set of tests for those in a new independent test
+ * executable.
+ */
+ for_each_reg(i) {
+ uint8_t addr[2048 / 8];
+ struct kvm_one_reg reg = {
+ .id = reg_list->reg[i],
+ .addr = (__u64)&addr,
+ };
+ bool reject_reg = false;
+ int ret;
+
+ ret = __vcpu_get_reg(vcpu, reg_list->reg[i], &addr);
+ if (ret) {
+ printf("%s: Failed to get ", config_name(c));
+ print_reg(config_name(c), reg.id);
+ putchar('\n');
+ ++failed_get;
+ }
+
+ /* rejects_set registers are rejected after KVM_ARM_VCPU_FINALIZE */
+ for_each_sublist(c, s) {
+ if (s->rejects_set && find_reg(s->rejects_set, s->rejects_set_n, reg.id)) {
+ reject_reg = true;
+ ret = __vcpu_ioctl(vcpu, KVM_SET_ONE_REG, &reg);
+ if (ret != -1 || errno != EPERM) {
+ printf("%s: Failed to reject (ret=%d, errno=%d) ", config_name(c), ret, errno);
+ print_reg(config_name(c), reg.id);
+ putchar('\n');
+ ++failed_reject;
+ }
+ break;
+ }
+ }
+
+ if (!reject_reg) {
+ ret = __vcpu_ioctl(vcpu, KVM_SET_ONE_REG, &reg);
+ if (ret) {
+ printf("%s: Failed to set ", config_name(c));
+ print_reg(config_name(c), reg.id);
+ putchar('\n');
+ ++failed_set;
+ }
+ }
+ }
+
+ for_each_sublist(c, s)
+ blessed_n += s->regs_n;
+ blessed_reg = calloc(blessed_n, sizeof(__u64));
+
+ n = 0;
+ for_each_sublist(c, s) {
+ for (i = 0; i < s->regs_n; ++i)
+ blessed_reg[n++] = s->regs[i];
+ }
+
+ for_each_new_reg(i)
+ ++new_regs;
+
+ for_each_missing_reg(i)
+ ++missing_regs;
+
+ if (new_regs || missing_regs) {
+ n = 0;
+ for_each_reg_filtered(i)
+ ++n;
+
+ printf("%s: Number blessed registers: %5lld\n", config_name(c), blessed_n);
+ printf("%s: Number registers: %5lld (includes %lld filtered registers)\n",
+ config_name(c), reg_list->n, reg_list->n - n);
+ }
+
+ if (new_regs) {
+ printf("\n%s: There are %d new registers.\n"
+ "Consider adding them to the blessed reg "
+ "list with the following lines:\n\n", config_name(c), new_regs);
+ for_each_new_reg(i)
+ print_reg(config_name(c), reg_list->reg[i]);
+ putchar('\n');
+ }
+
+ if (missing_regs) {
+ printf("\n%s: There are %d missing registers.\n"
+ "The following lines are missing registers:\n\n", config_name(c), missing_regs);
+ for_each_missing_reg(i)
+ print_reg(config_name(c), blessed_reg[i]);
+ putchar('\n');
+ }
+
+ TEST_ASSERT(!missing_regs && !failed_get && !failed_set && !failed_reject,
+ "%s: There are %d missing registers; "
+ "%d registers failed get; %d registers failed set; %d registers failed reject",
+ config_name(c), missing_regs, failed_get, failed_set, failed_reject);
+
+ pr_info("%s: PASS\n", config_name(c));
+ blessed_n = 0;
+ free(blessed_reg);
+ free(reg_list);
+ kvm_vm_free(vm);
+}
+
+static void help(void)
+{
+ struct vcpu_reg_list *c;
+ int i;
+
+ printf(
+ "\n"
+ "usage: get-reg-list [--config=<selection>] [--list] [--list-filtered]\n\n"
+ " --config=<selection> Used to select a specific vcpu configuration for the test/listing\n"
+ " '<selection>' may be\n");
+
+ for (i = 0; i < vcpu_configs_n; ++i) {
+ c = vcpu_configs[i];
+ printf(
+ " '%s'\n", config_name(c));
+ }
+
+ printf(
+ "\n"
+ " --list Print the register list rather than test it (requires --config)\n"
+ " --list-filtered Print registers that would normally be filtered out (requires --config)\n"
+ "\n"
+ );
+}
+
+static struct vcpu_reg_list *parse_config(const char *config)
+{
+ struct vcpu_reg_list *c = NULL;
+ int i;
+
+ if (config[8] != '=')
+ help(), exit(1);
+
+ for (i = 0; i < vcpu_configs_n; ++i) {
+ c = vcpu_configs[i];
+ if (strcmp(config_name(c), &config[9]) == 0)
+ break;
+ }
+
+ if (i == vcpu_configs_n)
+ help(), exit(1);
+
+ return c;
+}
+
+int main(int ac, char **av)
+{
+ struct vcpu_reg_list *c, *sel = NULL;
+ int i, ret = 0;
+ pid_t pid;
+
+ for (i = 1; i < ac; ++i) {
+ if (strncmp(av[i], "--config", 8) == 0)
+ sel = parse_config(av[i]);
+ else if (strcmp(av[i], "--list") == 0)
+ print_list = true;
+ else if (strcmp(av[i], "--list-filtered") == 0)
+ print_filtered = true;
+ else if (strcmp(av[i], "--help") == 0 || strcmp(av[1], "-h") == 0)
+ help(), exit(0);
+ else
+ help(), exit(1);
+ }
+
+ if (print_list || print_filtered) {
+ /*
+ * We only want to print the register list of a single config.
+ */
+ if (!sel)
+ help(), exit(1);
+ }
+
+ for (i = 0; i < vcpu_configs_n; ++i) {
+ c = vcpu_configs[i];
+ if (sel && c != sel)
+ continue;
+
+ pid = fork();
+
+ if (!pid) {
+ run_test(c);
+ exit(0);
+ } else {
+ int wstatus;
+ pid_t wpid = wait(&wstatus);
+ TEST_ASSERT(wpid == pid && WIFEXITED(wstatus), "wait: Unexpected return");
+ if (WEXITSTATUS(wstatus) && WEXITSTATUS(wstatus) != KSFT_SKIP)
+ ret = KSFT_FAIL;
+ }
+ }
+
+ return ret;
+}
--
2.34.1


2023-06-09 02:23:02

by Haibo Xu

[permalink] [raw]
Subject: [PATCH v3 07/10] KVM: arm64: selftests: Finish generalizing get-reg-list

From: Andrew Jones <[email protected]>

Add some unfortunate #ifdeffery to ensure the common get-reg-list.c
can be compiled and run with other architectures. The next
architecture to support get-reg-list should now only need to provide
$(ARCH_DIR)/get-reg-list.c where arch-specific print_reg() and
vcpu_configs[] get defined.

Signed-off-by: Andrew Jones <[email protected]>
Signed-off-by: Haibo Xu <[email protected]>
---
tools/testing/selftests/kvm/get-reg-list.c | 24 ++++++++++++++++++----
1 file changed, 20 insertions(+), 4 deletions(-)

diff --git a/tools/testing/selftests/kvm/get-reg-list.c b/tools/testing/selftests/kvm/get-reg-list.c
index 69bb91087081..c4bd5a5259da 100644
--- a/tools/testing/selftests/kvm/get-reg-list.c
+++ b/tools/testing/selftests/kvm/get-reg-list.c
@@ -98,6 +98,7 @@ void __weak print_reg(const char *prefix, __u64 id)
printf("\t0x%llx,\n", id);
}

+#ifdef __aarch64__
static void prepare_vcpu_init(struct vcpu_reg_list *c, struct kvm_vcpu_init *init)
{
struct vcpu_reg_sublist *s;
@@ -120,6 +121,24 @@ static void finalize_vcpu(struct kvm_vcpu *vcpu, struct vcpu_reg_list *c)
}
}

+static struct kvm_vcpu *vcpu_config_get_vcpu(struct vcpu_reg_list *c, struct kvm_vm *vm)
+{
+ struct kvm_vcpu_init init = { .target = -1, };
+ struct kvm_vcpu *vcpu;
+
+ prepare_vcpu_init(c, &init);
+ vcpu = __vm_vcpu_add(vm, 0);
+ aarch64_vcpu_setup(vcpu, &init);
+
+ return vcpu;
+}
+#else
+static struct kvm_vcpu *vcpu_config_get_vcpu(struct vcpu_reg_list *c, struct kvm_vm *vm)
+{
+ return __vm_vcpu_add(vm, 0);
+}
+#endif
+
static void check_supported(struct vcpu_reg_list *c)
{
struct vcpu_reg_sublist *s;
@@ -139,7 +158,6 @@ static bool print_filtered;

static void run_test(struct vcpu_reg_list *c)
{
- struct kvm_vcpu_init init = { .target = -1, };
int new_regs = 0, missing_regs = 0, i, n;
int failed_get = 0, failed_set = 0, failed_reject = 0;
struct kvm_vcpu *vcpu;
@@ -149,9 +167,7 @@ static void run_test(struct vcpu_reg_list *c)
check_supported(c);

vm = vm_create_barebones();
- prepare_vcpu_init(c, &init);
- vcpu = __vm_vcpu_add(vm, 0);
- aarch64_vcpu_setup(vcpu, &init);
+ vcpu = vcpu_config_get_vcpu(c, vm);
finalize_vcpu(vcpu, c);

reg_list = vcpu_get_reg_list(vcpu);
--
2.34.1


2023-06-09 02:23:12

by Haibo Xu

[permalink] [raw]
Subject: [PATCH v3 03/10] KVM: arm64: selftests: Remove print_reg's dependency on vcpu_config

From: Andrew Jones <[email protected]>

print_reg() and its helpers only use the vcpu_config pointer for
config_name(). So just pass the config name in instead, which is used
as a prefix in asserts. print_reg() can now be compiled independently
of config_name().

Signed-off-by: Andrew Jones <[email protected]>
Signed-off-by: Haibo Xu <[email protected]>
---
.../selftests/kvm/aarch64/get-reg-list.c | 52 +++++++++----------
1 file changed, 26 insertions(+), 26 deletions(-)

diff --git a/tools/testing/selftests/kvm/aarch64/get-reg-list.c b/tools/testing/selftests/kvm/aarch64/get-reg-list.c
index 915272c342f9..424285d39965 100644
--- a/tools/testing/selftests/kvm/aarch64/get-reg-list.c
+++ b/tools/testing/selftests/kvm/aarch64/get-reg-list.c
@@ -128,7 +128,7 @@ static bool find_reg(__u64 regs[], __u64 nr_regs, __u64 reg)
#define CORE_SPSR_XX_NR_WORDS 2
#define CORE_FPREGS_XX_NR_WORDS 4

-static const char *core_id_to_str(struct vcpu_config *c, __u64 id)
+static const char *core_id_to_str(const char *prefix, __u64 id)
{
__u64 core_off = id & ~REG_MASK, idx;

@@ -139,7 +139,7 @@ static const char *core_id_to_str(struct vcpu_config *c, __u64 id)
case KVM_REG_ARM_CORE_REG(regs.regs[0]) ...
KVM_REG_ARM_CORE_REG(regs.regs[30]):
idx = (core_off - KVM_REG_ARM_CORE_REG(regs.regs[0])) / CORE_REGS_XX_NR_WORDS;
- TEST_ASSERT(idx < 31, "%s: Unexpected regs.regs index: %lld", config_name(c), idx);
+ TEST_ASSERT(idx < 31, "%s: Unexpected regs.regs index: %lld", prefix, idx);
return strdup_printf("KVM_REG_ARM_CORE_REG(regs.regs[%lld])", idx);
case KVM_REG_ARM_CORE_REG(regs.sp):
return "KVM_REG_ARM_CORE_REG(regs.sp)";
@@ -154,12 +154,12 @@ static const char *core_id_to_str(struct vcpu_config *c, __u64 id)
case KVM_REG_ARM_CORE_REG(spsr[0]) ...
KVM_REG_ARM_CORE_REG(spsr[KVM_NR_SPSR - 1]):
idx = (core_off - KVM_REG_ARM_CORE_REG(spsr[0])) / CORE_SPSR_XX_NR_WORDS;
- TEST_ASSERT(idx < KVM_NR_SPSR, "%s: Unexpected spsr index: %lld", config_name(c), idx);
+ TEST_ASSERT(idx < KVM_NR_SPSR, "%s: Unexpected spsr index: %lld", prefix, idx);
return strdup_printf("KVM_REG_ARM_CORE_REG(spsr[%lld])", idx);
case KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]) ...
KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]):
idx = (core_off - KVM_REG_ARM_CORE_REG(fp_regs.vregs[0])) / CORE_FPREGS_XX_NR_WORDS;
- TEST_ASSERT(idx < 32, "%s: Unexpected fp_regs.vregs index: %lld", config_name(c), idx);
+ TEST_ASSERT(idx < 32, "%s: Unexpected fp_regs.vregs index: %lld", prefix, idx);
return strdup_printf("KVM_REG_ARM_CORE_REG(fp_regs.vregs[%lld])", idx);
case KVM_REG_ARM_CORE_REG(fp_regs.fpsr):
return "KVM_REG_ARM_CORE_REG(fp_regs.fpsr)";
@@ -167,11 +167,11 @@ static const char *core_id_to_str(struct vcpu_config *c, __u64 id)
return "KVM_REG_ARM_CORE_REG(fp_regs.fpcr)";
}

- TEST_FAIL("%s: Unknown core reg id: 0x%llx", config_name(c), id);
+ TEST_FAIL("%s: Unknown core reg id: 0x%llx", prefix, id);
return NULL;
}

-static const char *sve_id_to_str(struct vcpu_config *c, __u64 id)
+static const char *sve_id_to_str(const char *prefix, __u64 id)
{
__u64 sve_off, n, i;

@@ -181,37 +181,37 @@ static const char *sve_id_to_str(struct vcpu_config *c, __u64 id)
sve_off = id & ~(REG_MASK | ((1ULL << 5) - 1));
i = id & (KVM_ARM64_SVE_MAX_SLICES - 1);

- TEST_ASSERT(i == 0, "%s: Currently we don't expect slice > 0, reg id 0x%llx", config_name(c), id);
+ TEST_ASSERT(i == 0, "%s: Currently we don't expect slice > 0, reg id 0x%llx", prefix, id);

switch (sve_off) {
case KVM_REG_ARM64_SVE_ZREG_BASE ...
KVM_REG_ARM64_SVE_ZREG_BASE + (1ULL << 5) * KVM_ARM64_SVE_NUM_ZREGS - 1:
n = (id >> 5) & (KVM_ARM64_SVE_NUM_ZREGS - 1);
TEST_ASSERT(id == KVM_REG_ARM64_SVE_ZREG(n, 0),
- "%s: Unexpected bits set in SVE ZREG id: 0x%llx", config_name(c), id);
+ "%s: Unexpected bits set in SVE ZREG id: 0x%llx", prefix, id);
return strdup_printf("KVM_REG_ARM64_SVE_ZREG(%lld, 0)", n);
case KVM_REG_ARM64_SVE_PREG_BASE ...
KVM_REG_ARM64_SVE_PREG_BASE + (1ULL << 5) * KVM_ARM64_SVE_NUM_PREGS - 1:
n = (id >> 5) & (KVM_ARM64_SVE_NUM_PREGS - 1);
TEST_ASSERT(id == KVM_REG_ARM64_SVE_PREG(n, 0),
- "%s: Unexpected bits set in SVE PREG id: 0x%llx", config_name(c), id);
+ "%s: Unexpected bits set in SVE PREG id: 0x%llx", prefix, id);
return strdup_printf("KVM_REG_ARM64_SVE_PREG(%lld, 0)", n);
case KVM_REG_ARM64_SVE_FFR_BASE:
TEST_ASSERT(id == KVM_REG_ARM64_SVE_FFR(0),
- "%s: Unexpected bits set in SVE FFR id: 0x%llx", config_name(c), id);
+ "%s: Unexpected bits set in SVE FFR id: 0x%llx", prefix, id);
return "KVM_REG_ARM64_SVE_FFR(0)";
}

return NULL;
}

-static void print_reg(struct vcpu_config *c, __u64 id)
+static void print_reg(const char *prefix, __u64 id)
{
unsigned op0, op1, crn, crm, op2;
const char *reg_size = NULL;

TEST_ASSERT((id & KVM_REG_ARCH_MASK) == KVM_REG_ARM64,
- "%s: KVM_REG_ARM64 missing in reg id: 0x%llx", config_name(c), id);
+ "%s: KVM_REG_ARM64 missing in reg id: 0x%llx", prefix, id);

switch (id & KVM_REG_SIZE_MASK) {
case KVM_REG_SIZE_U8:
@@ -243,16 +243,16 @@ static void print_reg(struct vcpu_config *c, __u64 id)
break;
default:
TEST_FAIL("%s: Unexpected reg size: 0x%llx in reg id: 0x%llx",
- config_name(c), (id & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT, id);
+ prefix, (id & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT, id);
}

switch (id & KVM_REG_ARM_COPROC_MASK) {
case KVM_REG_ARM_CORE:
- printf("\tKVM_REG_ARM64 | %s | KVM_REG_ARM_CORE | %s,\n", reg_size, core_id_to_str(c, id));
+ printf("\tKVM_REG_ARM64 | %s | KVM_REG_ARM_CORE | %s,\n", reg_size, core_id_to_str(prefix, id));
break;
case KVM_REG_ARM_DEMUX:
TEST_ASSERT(!(id & ~(REG_MASK | KVM_REG_ARM_DEMUX_ID_MASK | KVM_REG_ARM_DEMUX_VAL_MASK)),
- "%s: Unexpected bits set in DEMUX reg id: 0x%llx", config_name(c), id);
+ "%s: Unexpected bits set in DEMUX reg id: 0x%llx", prefix, id);
printf("\tKVM_REG_ARM64 | %s | KVM_REG_ARM_DEMUX | KVM_REG_ARM_DEMUX_ID_CCSIDR | %lld,\n",
reg_size, id & KVM_REG_ARM_DEMUX_VAL_MASK);
break;
@@ -263,25 +263,25 @@ static void print_reg(struct vcpu_config *c, __u64 id)
crm = (id & KVM_REG_ARM64_SYSREG_CRM_MASK) >> KVM_REG_ARM64_SYSREG_CRM_SHIFT;
op2 = (id & KVM_REG_ARM64_SYSREG_OP2_MASK) >> KVM_REG_ARM64_SYSREG_OP2_SHIFT;
TEST_ASSERT(id == ARM64_SYS_REG(op0, op1, crn, crm, op2),
- "%s: Unexpected bits set in SYSREG reg id: 0x%llx", config_name(c), id);
+ "%s: Unexpected bits set in SYSREG reg id: 0x%llx", prefix, id);
printf("\tARM64_SYS_REG(%d, %d, %d, %d, %d),\n", op0, op1, crn, crm, op2);
break;
case KVM_REG_ARM_FW:
TEST_ASSERT(id == KVM_REG_ARM_FW_REG(id & 0xffff),
- "%s: Unexpected bits set in FW reg id: 0x%llx", config_name(c), id);
+ "%s: Unexpected bits set in FW reg id: 0x%llx", prefix, id);
printf("\tKVM_REG_ARM_FW_REG(%lld),\n", id & 0xffff);
break;
case KVM_REG_ARM_FW_FEAT_BMAP:
TEST_ASSERT(id == KVM_REG_ARM_FW_FEAT_BMAP_REG(id & 0xffff),
- "%s: Unexpected bits set in the bitmap feature FW reg id: 0x%llx", config_name(c), id);
+ "%s: Unexpected bits set in the bitmap feature FW reg id: 0x%llx", prefix, id);
printf("\tKVM_REG_ARM_FW_FEAT_BMAP_REG(%lld),\n", id & 0xffff);
break;
case KVM_REG_ARM64_SVE:
- printf("\t%s,\n", sve_id_to_str(c, id));
+ printf("\t%s,\n", sve_id_to_str(prefix, id));
break;
default:
TEST_FAIL("%s: Unexpected coproc type: 0x%llx in reg id: 0x%llx",
- config_name(c), (id & KVM_REG_ARM_COPROC_MASK) >> KVM_REG_ARM_COPROC_SHIFT, id);
+ prefix, (id & KVM_REG_ARM_COPROC_MASK) >> KVM_REG_ARM_COPROC_SHIFT, id);
}
}

@@ -410,7 +410,7 @@ static void run_test(struct vcpu_config *c)
__u64 id = reg_list->reg[i];
if ((print_list && !filter_reg(id)) ||
(print_filtered && filter_reg(id)))
- print_reg(c, id);
+ print_reg(config_name(c), id);
}
putchar('\n');
return;
@@ -438,7 +438,7 @@ static void run_test(struct vcpu_config *c)
ret = __vcpu_get_reg(vcpu, reg_list->reg[i], &addr);
if (ret) {
printf("%s: Failed to get ", config_name(c));
- print_reg(c, reg.id);
+ print_reg(config_name(c), reg.id);
putchar('\n');
++failed_get;
}
@@ -450,7 +450,7 @@ static void run_test(struct vcpu_config *c)
ret = __vcpu_ioctl(vcpu, KVM_SET_ONE_REG, &reg);
if (ret != -1 || errno != EPERM) {
printf("%s: Failed to reject (ret=%d, errno=%d) ", config_name(c), ret, errno);
- print_reg(c, reg.id);
+ print_reg(config_name(c), reg.id);
putchar('\n');
++failed_reject;
}
@@ -462,7 +462,7 @@ static void run_test(struct vcpu_config *c)
ret = __vcpu_ioctl(vcpu, KVM_SET_ONE_REG, &reg);
if (ret) {
printf("%s: Failed to set ", config_name(c));
- print_reg(c, reg.id);
+ print_reg(config_name(c), reg.id);
putchar('\n');
++failed_set;
}
@@ -500,7 +500,7 @@ static void run_test(struct vcpu_config *c)
"Consider adding them to the blessed reg "
"list with the following lines:\n\n", config_name(c), new_regs);
for_each_new_reg(i)
- print_reg(c, reg_list->reg[i]);
+ print_reg(config_name(c), reg_list->reg[i]);
putchar('\n');
}

@@ -508,7 +508,7 @@ static void run_test(struct vcpu_config *c)
printf("\n%s: There are %d missing registers.\n"
"The following lines are missing registers:\n\n", config_name(c), missing_regs);
for_each_missing_reg(i)
- print_reg(c, blessed_reg[i]);
+ print_reg(config_name(c), blessed_reg[i]);
putchar('\n');
}

--
2.34.1


2023-06-09 02:23:29

by Haibo Xu

[permalink] [raw]
Subject: [PATCH v3 09/10] KVM: riscv: selftests: Skip some registers set operation

Set operation on some riscv registers(mostly pesudo ones) was not
supported and should be skipped in the get-reg-list test. Just
reuse the rejects_set utilities to handle it in riscv.

Signed-off-by: Haibo Xu <[email protected]>
Reviewed-by: Andrew Jones <[email protected]>
---
tools/testing/selftests/kvm/get-reg-list.c | 20 +++++++++++++-------
1 file changed, 13 insertions(+), 7 deletions(-)

diff --git a/tools/testing/selftests/kvm/get-reg-list.c b/tools/testing/selftests/kvm/get-reg-list.c
index c4bd5a5259da..abacb95c21c6 100644
--- a/tools/testing/selftests/kvm/get-reg-list.c
+++ b/tools/testing/selftests/kvm/get-reg-list.c
@@ -211,16 +211,22 @@ static void run_test(struct vcpu_reg_list *c)
++failed_get;
}

- /* rejects_set registers are rejected after KVM_ARM_VCPU_FINALIZE */
+ /*
+ * rejects_set registers are rejected after KVM_ARM_VCPU_FINALIZE on aarch64,
+ * or registers that should skip set operation on riscv.
+ */
for_each_sublist(c, s) {
if (s->rejects_set && find_reg(s->rejects_set, s->rejects_set_n, reg.id)) {
reject_reg = true;
- ret = __vcpu_ioctl(vcpu, KVM_SET_ONE_REG, &reg);
- if (ret != -1 || errno != EPERM) {
- printf("%s: Failed to reject (ret=%d, errno=%d) ", config_name(c), ret, errno);
- print_reg(config_name(c), reg.id);
- putchar('\n');
- ++failed_reject;
+ if ((reg.id & KVM_REG_ARCH_MASK) == KVM_REG_ARM64) {
+ ret = __vcpu_ioctl(vcpu, KVM_SET_ONE_REG, &reg);
+ if (ret != -1 || errno != EPERM) {
+ printf("%s: Failed to reject (ret=%d, errno=%d) ",
+ config_name(c), ret, errno);
+ print_reg(config_name(c), reg.id);
+ putchar('\n');
+ ++failed_reject;
+ }
}
break;
}
--
2.34.1


2023-06-09 02:23:29

by Haibo Xu

[permalink] [raw]
Subject: [PATCH v3 08/10] KVM: riscv: Add KVM_GET_REG_LIST API support

KVM_GET_REG_LIST API will return all registers that are available to
KVM_GET/SET_ONE_REG APIs. It's very useful to identify some platform
regression issue during VM migration.

Since this API was already supported on arm64, it is straightforward
to enable it on riscv with similar code structure.

Signed-off-by: Haibo Xu <[email protected]>
Reviewed-by: Andrew Jones <[email protected]>
---
Documentation/virt/kvm/api.rst | 2 +-
arch/riscv/kvm/vcpu.c | 378 +++++++++++++++++++++++++++++++++
2 files changed, 379 insertions(+), 1 deletion(-)

diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst
index add067793b90..280e89abd004 100644
--- a/Documentation/virt/kvm/api.rst
+++ b/Documentation/virt/kvm/api.rst
@@ -3499,7 +3499,7 @@ VCPU matching underlying host.
---------------------

:Capability: basic
-:Architectures: arm64, mips
+:Architectures: arm64, mips, riscv
:Type: vcpu ioctl
:Parameters: struct kvm_reg_list (in/out)
:Returns: 0 on success; -1 on error
diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c
index 8bd9f2a8a0b9..24ee4a1635a3 100644
--- a/arch/riscv/kvm/vcpu.c
+++ b/arch/riscv/kvm/vcpu.c
@@ -657,6 +657,366 @@ static int kvm_riscv_vcpu_set_reg_isa_ext(struct kvm_vcpu *vcpu,
return 0;
}

+static int copy_config_reg_indices(const struct kvm_vcpu *vcpu,
+ u64 __user *uindices)
+{
+ unsigned int i;
+ int n = 0;
+
+ for (i = 0; i < sizeof(struct kvm_riscv_config)/sizeof(unsigned long);
+ i++) {
+ u64 size;
+ u64 reg;
+
+ /*
+ * Avoid reporting config reg if the corresponding extension
+ * was not available.
+ */
+ if (i == KVM_REG_RISCV_CONFIG_REG(zicbom_block_size) &&
+ !riscv_isa_extension_available(vcpu->arch.isa, ZICBOM))
+ continue;
+ else if (i == KVM_REG_RISCV_CONFIG_REG(zicboz_block_size) &&
+ !riscv_isa_extension_available(vcpu->arch.isa, ZICBOZ))
+ continue;
+
+ size = IS_ENABLED(CONFIG_32BIT) ? KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
+ reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CONFIG | i;
+
+ if (uindices) {
+ if (put_user(reg, uindices))
+ return -EFAULT;
+ uindices++;
+ }
+
+ n++;
+ }
+
+ return n;
+}
+
+static unsigned long num_config_regs(const struct kvm_vcpu *vcpu)
+{
+ return copy_config_reg_indices(vcpu, NULL);
+}
+
+static inline unsigned long num_core_regs(void)
+{
+ return sizeof(struct kvm_riscv_core) / sizeof(unsigned long);
+}
+
+static int copy_core_reg_indices(u64 __user *uindices)
+{
+ unsigned int i;
+ int n = num_core_regs();
+
+ for (i = 0; i < n; i++) {
+ u64 size = IS_ENABLED(CONFIG_32BIT) ? KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
+ u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CORE | i;
+
+ if (uindices) {
+ if (put_user(reg, uindices))
+ return -EFAULT;
+ uindices++;
+ }
+ }
+
+ return n;
+}
+
+static inline unsigned long num_csr_regs(void)
+{
+ unsigned long n = sizeof(struct kvm_riscv_csr) / sizeof(unsigned long);
+
+ if (kvm_riscv_aia_available())
+ n += sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long);
+
+ return n;
+}
+
+static int copy_csr_reg_indices(u64 __user *uindices)
+{
+ unsigned int i;
+ int n1 = sizeof(struct kvm_riscv_csr) / sizeof(unsigned long);
+ int n2 = 0;
+
+ /* copy general csr regs */
+ for (i = 0; i < n1; i++) {
+ u64 size = IS_ENABLED(CONFIG_32BIT) ? KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
+ u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CSR |
+ KVM_REG_RISCV_CSR_GENERAL | i;
+
+ if (uindices) {
+ if (put_user(reg, uindices))
+ return -EFAULT;
+ uindices++;
+ }
+ }
+
+ /* copy AIA csr regs */
+ if (kvm_riscv_aia_available()) {
+ n2 = sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long);
+
+ for (i = 0; i < n2; i++) {
+ u64 size = IS_ENABLED(CONFIG_32BIT) ? KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
+ u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CSR |
+ KVM_REG_RISCV_CSR_AIA | i;
+
+ if (uindices) {
+ if (put_user(reg, uindices))
+ return -EFAULT;
+ uindices++;
+ }
+ }
+ }
+
+ return n1 + n2;
+}
+
+static inline unsigned long num_timer_regs(void)
+{
+ return sizeof(struct kvm_riscv_timer) / sizeof(u64);
+}
+
+static int copy_timer_reg_indices(u64 __user *uindices)
+{
+ unsigned int i;
+ int n = num_timer_regs();
+
+ for (i = 0; i < n; i++) {
+ u64 reg = KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | i;
+
+ if (uindices) {
+ if (put_user(reg, uindices))
+ return -EFAULT;
+ uindices++;
+ }
+ }
+
+ return n;
+}
+
+static inline unsigned long num_fp_f_regs(const struct kvm_vcpu *vcpu)
+{
+ const struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
+
+ if (riscv_isa_extension_available(vcpu->arch.isa, f))
+ return sizeof(cntx->fp.f) / sizeof(u32);
+ else
+ return 0;
+}
+
+static int copy_fp_f_reg_indices(const struct kvm_vcpu *vcpu,
+ u64 __user *uindices)
+{
+ unsigned int i;
+ int n = num_fp_f_regs(vcpu);
+
+ for (i = 0; i < n; i++) {
+ u64 reg = KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | i;
+
+ if (uindices) {
+ if (put_user(reg, uindices))
+ return -EFAULT;
+ uindices++;
+ }
+ }
+
+ return n;
+}
+
+static inline unsigned long num_fp_d_regs(const struct kvm_vcpu *vcpu)
+{
+ const struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
+
+ if (riscv_isa_extension_available(vcpu->arch.isa, d))
+ return sizeof(cntx->fp.d.f) / sizeof(u64) + 1;
+ else
+ return 0;
+}
+
+static int copy_fp_d_reg_indices(const struct kvm_vcpu *vcpu,
+ u64 __user *uindices)
+{
+ unsigned int i;
+ int n = num_fp_d_regs(vcpu);
+ u64 reg;
+
+ /* copy fp.d.f indices */
+ for (i = 0; i < n-1; i++) {
+ reg = KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | i;
+
+ if (uindices) {
+ if (put_user(reg, uindices))
+ return -EFAULT;
+ uindices++;
+ }
+ }
+
+ /* copy fp.d.fcsr indices */
+ reg = KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_D | i;
+ if (uindices) {
+ if (put_user(reg, uindices))
+ return -EFAULT;
+ }
+
+ return n;
+}
+
+static int copy_isa_ext_reg_indices(u64 __user *uindices)
+{
+ unsigned int i, n = 0;
+ unsigned long host_isa_ext;
+
+ for (i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++) {
+ u64 size = IS_ENABLED(CONFIG_32BIT) ? KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
+ u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_ISA_EXT | i;
+
+ host_isa_ext = kvm_isa_ext_arr[i];
+ if (!__riscv_isa_extension_available(NULL, host_isa_ext))
+ continue;
+
+ if (uindices) {
+ if (put_user(reg, uindices))
+ return -EFAULT;
+ uindices++;
+ }
+
+ n++;
+ }
+
+ return n;
+}
+
+static inline unsigned long num_isa_ext_regs(void)
+{
+ return copy_isa_ext_reg_indices(NULL);
+}
+
+static inline unsigned long num_sbi_ext_regs(void)
+{
+ /*
+ * number of KVM_REG_RISCV_SBI_SINGLE +
+ * 2 x (number of KVM_REG_RISCV_SBI_MULTI)
+ */
+ return KVM_RISCV_SBI_EXT_MAX + 2*(KVM_REG_RISCV_SBI_MULTI_REG_LAST+1);
+}
+
+static int copy_sbi_ext_reg_indices(u64 __user *uindices)
+{
+ unsigned int i;
+ int n;
+
+ /* copy KVM_REG_RISCV_SBI_SINGLE */
+ n = KVM_RISCV_SBI_EXT_MAX;
+ for (i = 0; i < n; i++) {
+ u64 size = IS_ENABLED(CONFIG_32BIT) ? KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
+ u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_SBI_EXT |
+ KVM_REG_RISCV_SBI_SINGLE | i;
+
+ if (uindices) {
+ if (put_user(reg, uindices))
+ return -EFAULT;
+ uindices++;
+ }
+ }
+
+ /* copy KVM_REG_RISCV_SBI_MULTI */
+ n = KVM_REG_RISCV_SBI_MULTI_REG_LAST + 1;
+ for (i = 0; i < n; i++) {
+ u64 size = IS_ENABLED(CONFIG_32BIT) ? KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
+ u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_SBI_EXT |
+ KVM_REG_RISCV_SBI_MULTI_EN | i;
+
+ if (uindices) {
+ if (put_user(reg, uindices))
+ return -EFAULT;
+ uindices++;
+ }
+
+ reg = KVM_REG_RISCV | size | KVM_REG_RISCV_SBI_EXT |
+ KVM_REG_RISCV_SBI_MULTI_DIS | i;
+
+ if (uindices) {
+ if (put_user(reg, uindices))
+ return -EFAULT;
+ uindices++;
+ }
+ }
+
+ return num_sbi_ext_regs();
+}
+
+/*
+ * kvm_riscv_vcpu_num_regs - how many registers do we present via KVM_GET/SET_ONE_REG
+ *
+ * This is for all registers.
+ */
+static unsigned long kvm_riscv_vcpu_num_regs(struct kvm_vcpu *vcpu)
+{
+ unsigned long res = 0;
+
+ res += num_config_regs(vcpu);
+ res += num_core_regs();
+ res += num_csr_regs();
+ res += num_timer_regs();
+ res += num_fp_f_regs(vcpu);
+ res += num_fp_d_regs(vcpu);
+ res += num_isa_ext_regs();
+ res += num_sbi_ext_regs();
+
+ return res;
+}
+
+/*
+ * kvm_riscv_vcpu_copy_reg_indices - get indices of all registers.
+ */
+static int kvm_riscv_vcpu_copy_reg_indices(struct kvm_vcpu *vcpu,
+ u64 __user *uindices)
+{
+ int ret;
+
+ ret = copy_config_reg_indices(vcpu, uindices);
+ if (ret < 0)
+ return ret;
+ uindices += ret;
+
+ ret = copy_core_reg_indices(uindices);
+ if (ret < 0)
+ return ret;
+ uindices += ret;
+
+ ret = copy_csr_reg_indices(uindices);
+ if (ret < 0)
+ return ret;
+ uindices += ret;
+
+ ret = copy_timer_reg_indices(uindices);
+ if (ret < 0)
+ return ret;
+ uindices += ret;
+
+ ret = copy_fp_f_reg_indices(vcpu, uindices);
+ if (ret < 0)
+ return ret;
+ uindices += ret;
+
+ ret = copy_fp_d_reg_indices(vcpu, uindices);
+ if (ret < 0)
+ return ret;
+ uindices += ret;
+
+ ret = copy_isa_ext_reg_indices(uindices);
+ if (ret < 0)
+ return ret;
+ uindices += ret;
+
+ ret = copy_sbi_ext_reg_indices(uindices);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
static int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg)
{
@@ -758,6 +1118,24 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
r = kvm_riscv_vcpu_get_reg(vcpu, &reg);
break;
}
+ case KVM_GET_REG_LIST: {
+ struct kvm_reg_list __user *user_list = argp;
+ struct kvm_reg_list reg_list;
+ unsigned int n;
+
+ r = -EFAULT;
+ if (copy_from_user(&reg_list, user_list, sizeof(reg_list)))
+ break;
+ n = reg_list.n;
+ reg_list.n = kvm_riscv_vcpu_num_regs(vcpu);
+ if (copy_to_user(user_list, &reg_list, sizeof(reg_list)))
+ break;
+ r = -E2BIG;
+ if (n < reg_list.n)
+ break;
+ r = kvm_riscv_vcpu_copy_reg_indices(vcpu, user_list->reg);
+ break;
+ }
default:
break;
}
--
2.34.1


2023-06-09 02:25:19

by Haibo Xu

[permalink] [raw]
Subject: [PATCH v3 10/10] KVM: riscv: selftests: Add get-reg-list test

get-reg-list test is used to check for KVM registers regressions
during VM migration which happens when destination host kernel
missing registers that the source host kernel has. The blessed
list registers was created by running on v6.4-rc5.

Signed-off-by: Haibo Xu <[email protected]>
---
tools/testing/selftests/kvm/Makefile | 1 +
tools/testing/selftests/kvm/get-reg-list.c | 28 +
.../selftests/kvm/include/riscv/processor.h | 3 +
.../selftests/kvm/riscv/get-reg-list.c | 611 ++++++++++++++++++
4 files changed, 643 insertions(+)
create mode 100644 tools/testing/selftests/kvm/riscv/get-reg-list.c

diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
index d90cad19c9ee..f7bcda903dd9 100644
--- a/tools/testing/selftests/kvm/Makefile
+++ b/tools/testing/selftests/kvm/Makefile
@@ -174,6 +174,7 @@ TEST_GEN_PROGS_s390x += kvm_binary_stats_test

TEST_GEN_PROGS_riscv += demand_paging_test
TEST_GEN_PROGS_riscv += dirty_log_test
+TEST_GEN_PROGS_riscv += get-reg-list
TEST_GEN_PROGS_riscv += kvm_create_max_vcpus
TEST_GEN_PROGS_riscv += kvm_page_table_test
TEST_GEN_PROGS_riscv += set_memory_region_test
diff --git a/tools/testing/selftests/kvm/get-reg-list.c b/tools/testing/selftests/kvm/get-reg-list.c
index abacb95c21c6..73f40e0842b8 100644
--- a/tools/testing/selftests/kvm/get-reg-list.c
+++ b/tools/testing/selftests/kvm/get-reg-list.c
@@ -133,6 +133,34 @@ static struct kvm_vcpu *vcpu_config_get_vcpu(struct vcpu_reg_list *c, struct kvm
return vcpu;
}
#else
+static inline bool vcpu_has_ext(struct kvm_vcpu *vcpu, int ext)
+{
+ int ret;
+ unsigned long value;
+
+ ret = __vcpu_get_reg(vcpu, RISCV_ISA_EXT_REG(ext), &value);
+ if (ret) {
+ printf("Failed to get ext %d", ext);
+ return false;
+ }
+
+ return !!value;
+}
+
+static void finalize_vcpu(struct kvm_vcpu *vcpu, struct vcpu_reg_list *c)
+{
+ struct vcpu_reg_sublist *s;
+
+ for_each_sublist(c, s) {
+ if (!s->feature)
+ continue;
+
+ __TEST_REQUIRE(vcpu_has_ext(vcpu, s->feature),
+ "%s: %s not available, skipping tests\n",
+ config_name(c), s->name);
+ }
+}
+
static struct kvm_vcpu *vcpu_config_get_vcpu(struct vcpu_reg_list *c, struct kvm_vm *vm)
{
return __vm_vcpu_add(vm, 0);
diff --git a/tools/testing/selftests/kvm/include/riscv/processor.h b/tools/testing/selftests/kvm/include/riscv/processor.h
index d00d213c3805..5b62a3d2aa9b 100644
--- a/tools/testing/selftests/kvm/include/riscv/processor.h
+++ b/tools/testing/selftests/kvm/include/riscv/processor.h
@@ -38,6 +38,9 @@ static inline uint64_t __kvm_reg_id(uint64_t type, uint64_t idx,
KVM_REG_RISCV_TIMER_REG(name), \
KVM_REG_SIZE_U64)

+#define RISCV_ISA_EXT_REG(idx) __kvm_reg_id(KVM_REG_RISCV_ISA_EXT, \
+ idx, KVM_REG_SIZE_ULONG)
+
/* L3 index Bit[47:39] */
#define PGTBL_L3_INDEX_MASK 0x0000FF8000000000ULL
#define PGTBL_L3_INDEX_SHIFT 39
diff --git a/tools/testing/selftests/kvm/riscv/get-reg-list.c b/tools/testing/selftests/kvm/riscv/get-reg-list.c
new file mode 100644
index 000000000000..0f371d99d471
--- /dev/null
+++ b/tools/testing/selftests/kvm/riscv/get-reg-list.c
@@ -0,0 +1,611 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Check for KVM_GET_REG_LIST regressions.
+ *
+ * Copyright (C) 2020, Red Hat, Inc.
+ * Copyright (c) 2023 Intel Corporation
+ *
+ */
+#include <stdio.h>
+#include "kvm_util.h"
+#include "test_util.h"
+#include "processor.h"
+
+#define REG_MASK (KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK)
+
+static const char *config_id_to_str(__u64 id)
+{
+ /* reg_off is the offset into struct kvm_riscv_config */
+ __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_CONFIG);
+
+ switch (reg_off) {
+ case KVM_REG_RISCV_CONFIG_REG(isa):
+ return "KVM_REG_RISCV_CONFIG_REG(isa)";
+ case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size):
+ return "KVM_REG_RISCV_CONFIG_REG(zicbom_block_size)";
+ case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size):
+ return "KVM_REG_RISCV_CONFIG_REG(zicboz_block_size)";
+ case KVM_REG_RISCV_CONFIG_REG(mvendorid):
+ return "KVM_REG_RISCV_CONFIG_REG(mvendorid)";
+ case KVM_REG_RISCV_CONFIG_REG(marchid):
+ return "KVM_REG_RISCV_CONFIG_REG(marchid)";
+ case KVM_REG_RISCV_CONFIG_REG(mimpid):
+ return "KVM_REG_RISCV_CONFIG_REG(mimpid)";
+ }
+
+ /*
+ * Config regs would grow regularly with new pseudo reg added, so
+ * just show raw id to indicate a new pseudo config reg.
+ */
+ return strdup_printf("KVM_REG_RISCV_CONFIG_REG(%lld) /* UNKNOWN */", reg_off);
+}
+
+static const char *core_id_to_str(const char *prefix, __u64 id)
+{
+ /* reg_off is the offset into struct kvm_riscv_core */
+ __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_CORE);
+
+ switch (reg_off) {
+ case KVM_REG_RISCV_CORE_REG(regs.pc):
+ return "KVM_REG_RISCV_CORE_REG(regs.pc)";
+ case KVM_REG_RISCV_CORE_REG(regs.ra):
+ return "KVM_REG_RISCV_CORE_REG(regs.ra)";
+ case KVM_REG_RISCV_CORE_REG(regs.sp):
+ return "KVM_REG_RISCV_CORE_REG(regs.sp)";
+ case KVM_REG_RISCV_CORE_REG(regs.gp):
+ return "KVM_REG_RISCV_CORE_REG(regs.gp)";
+ case KVM_REG_RISCV_CORE_REG(regs.tp):
+ return "KVM_REG_RISCV_CORE_REG(regs.tp)";
+ case KVM_REG_RISCV_CORE_REG(regs.t0) ... KVM_REG_RISCV_CORE_REG(regs.t2):
+ return strdup_printf("KVM_REG_RISCV_CORE_REG(regs.t%lld)",
+ reg_off - KVM_REG_RISCV_CORE_REG(regs.t0));
+ case KVM_REG_RISCV_CORE_REG(regs.s0) ... KVM_REG_RISCV_CORE_REG(regs.s1):
+ return strdup_printf("KVM_REG_RISCV_CORE_REG(regs.s%lld)",
+ reg_off - KVM_REG_RISCV_CORE_REG(regs.s0));
+ case KVM_REG_RISCV_CORE_REG(regs.a0) ... KVM_REG_RISCV_CORE_REG(regs.a7):
+ return strdup_printf("KVM_REG_RISCV_CORE_REG(regs.a%lld)",
+ reg_off - KVM_REG_RISCV_CORE_REG(regs.a0));
+ case KVM_REG_RISCV_CORE_REG(regs.s2) ... KVM_REG_RISCV_CORE_REG(regs.s11):
+ return strdup_printf("KVM_REG_RISCV_CORE_REG(regs.s%lld)",
+ reg_off - KVM_REG_RISCV_CORE_REG(regs.s2) + 2);
+ case KVM_REG_RISCV_CORE_REG(regs.t3) ... KVM_REG_RISCV_CORE_REG(regs.t6):
+ return strdup_printf("KVM_REG_RISCV_CORE_REG(regs.t%lld)",
+ reg_off - KVM_REG_RISCV_CORE_REG(regs.t3) + 3);
+ case KVM_REG_RISCV_CORE_REG(mode):
+ return "KVM_REG_RISCV_CORE_REG(mode)";
+ }
+
+ TEST_FAIL("%s: Unknown core reg id: 0x%llx", prefix, id);
+ return NULL;
+}
+
+#define RISCV_CSR_GENERAL(csr) \
+ "KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(" #csr ")"
+#define RISCV_CSR_AIA(csr) \
+ "KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_REG(" #csr ")"
+
+static const char *general_csr_id_to_str(__u64 reg_off)
+{
+ /* reg_off is the offset into struct kvm_riscv_csr */
+ switch (reg_off) {
+ case KVM_REG_RISCV_CSR_REG(sstatus):
+ return RISCV_CSR_GENERAL(sstatus);
+ case KVM_REG_RISCV_CSR_REG(sie):
+ return RISCV_CSR_GENERAL(sie);
+ case KVM_REG_RISCV_CSR_REG(stvec):
+ return RISCV_CSR_GENERAL(stvec);
+ case KVM_REG_RISCV_CSR_REG(sscratch):
+ return RISCV_CSR_GENERAL(sscratch);
+ case KVM_REG_RISCV_CSR_REG(sepc):
+ return RISCV_CSR_GENERAL(sepc);
+ case KVM_REG_RISCV_CSR_REG(scause):
+ return RISCV_CSR_GENERAL(scause);
+ case KVM_REG_RISCV_CSR_REG(stval):
+ return RISCV_CSR_GENERAL(stval);
+ case KVM_REG_RISCV_CSR_REG(sip):
+ return RISCV_CSR_GENERAL(sip);
+ case KVM_REG_RISCV_CSR_REG(satp):
+ return RISCV_CSR_GENERAL(satp);
+ case KVM_REG_RISCV_CSR_REG(scounteren):
+ return RISCV_CSR_GENERAL(scounteren);
+ }
+
+ TEST_FAIL("Unknown general csr reg: 0x%llx", reg_off);
+ return NULL;
+}
+
+static const char *aia_csr_id_to_str(__u64 reg_off)
+{
+ /* reg_off is the offset into struct kvm_riscv_aia_csr */
+ switch (reg_off) {
+ case KVM_REG_RISCV_CSR_AIA_REG(siselect):
+ return RISCV_CSR_AIA(siselect);
+ case KVM_REG_RISCV_CSR_AIA_REG(iprio1):
+ return RISCV_CSR_AIA(iprio1);
+ case KVM_REG_RISCV_CSR_AIA_REG(iprio2):
+ return RISCV_CSR_AIA(iprio2);
+ case KVM_REG_RISCV_CSR_AIA_REG(sieh):
+ return RISCV_CSR_AIA(sieh);
+ case KVM_REG_RISCV_CSR_AIA_REG(siph):
+ return RISCV_CSR_AIA(siph);
+ case KVM_REG_RISCV_CSR_AIA_REG(iprio1h):
+ return RISCV_CSR_AIA(iprio1h);
+ case KVM_REG_RISCV_CSR_AIA_REG(iprio2h):
+ return RISCV_CSR_AIA(iprio2h);
+ }
+
+ TEST_FAIL("Unknown aia csr reg: 0x%llx", reg_off);
+ return NULL;
+}
+
+static const char *csr_id_to_str(const char *prefix, __u64 id)
+{
+ __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_CSR);
+ __u64 reg_subtype = reg_off & KVM_REG_RISCV_SUBTYPE_MASK;
+
+ reg_off &= ~KVM_REG_RISCV_SUBTYPE_MASK;
+
+ switch (reg_subtype) {
+ case KVM_REG_RISCV_CSR_GENERAL:
+ return general_csr_id_to_str(reg_off);
+ case KVM_REG_RISCV_CSR_AIA:
+ return aia_csr_id_to_str(reg_off);
+ }
+
+ TEST_FAIL("%s: Unknown csr subtype: 0x%llx", prefix, reg_subtype);
+ return NULL;
+}
+
+static const char *timer_id_to_str(const char *prefix, __u64 id)
+{
+ /* reg_off is the offset into struct kvm_riscv_timer */
+ __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_TIMER);
+
+ switch (reg_off) {
+ case KVM_REG_RISCV_TIMER_REG(frequency):
+ return "KVM_REG_RISCV_TIMER_REG(frequency)";
+ case KVM_REG_RISCV_TIMER_REG(time):
+ return "KVM_REG_RISCV_TIMER_REG(time)";
+ case KVM_REG_RISCV_TIMER_REG(compare):
+ return "KVM_REG_RISCV_TIMER_REG(compare)";
+ case KVM_REG_RISCV_TIMER_REG(state):
+ return "KVM_REG_RISCV_TIMER_REG(state)";
+ }
+
+ TEST_FAIL("%s: Unknown timer reg id: 0x%llx", prefix, id);
+ return NULL;
+}
+
+static const char *fp_f_id_to_str(const char *prefix, __u64 id)
+{
+ /* reg_off is the offset into struct __riscv_f_ext_state */
+ __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_FP_F);
+
+ switch (reg_off) {
+ case KVM_REG_RISCV_FP_F_REG(f[0]) ...
+ KVM_REG_RISCV_FP_F_REG(f[31]):
+ return strdup_printf("KVM_REG_RISCV_FP_F_REG(f[%lld])", reg_off);
+ case KVM_REG_RISCV_FP_F_REG(fcsr):
+ return "KVM_REG_RISCV_FP_F_REG(fcsr)";
+ }
+
+ TEST_FAIL("%s: Unknown fp_f reg id: 0x%llx", prefix, id);
+ return NULL;
+}
+
+static const char *fp_d_id_to_str(const char *prefix, __u64 id)
+{
+ /* reg_off is the offset into struct __riscv_d_ext_state */
+ __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_FP_D);
+
+ switch (reg_off) {
+ case KVM_REG_RISCV_FP_D_REG(f[0]) ...
+ KVM_REG_RISCV_FP_D_REG(f[31]):
+ return strdup_printf("KVM_REG_RISCV_FP_D_REG(f[%lld])", reg_off);
+ case KVM_REG_RISCV_FP_D_REG(fcsr):
+ return "KVM_REG_RISCV_FP_D_REG(fcsr)";
+ }
+
+ TEST_FAIL("%s: Unknown fp_d reg id: 0x%llx", prefix, id);
+ return NULL;
+}
+
+static const char *isa_ext_id_to_str(__u64 id)
+{
+ /* reg_off is the offset into unsigned long kvm_isa_ext_arr[] */
+ __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_ISA_EXT);
+
+ static const char * const kvm_isa_ext_reg_name[] = {
+ "KVM_RISCV_ISA_EXT_A",
+ "KVM_RISCV_ISA_EXT_C",
+ "KVM_RISCV_ISA_EXT_D",
+ "KVM_RISCV_ISA_EXT_F",
+ "KVM_RISCV_ISA_EXT_H",
+ "KVM_RISCV_ISA_EXT_I",
+ "KVM_RISCV_ISA_EXT_M",
+ "KVM_RISCV_ISA_EXT_SVPBMT",
+ "KVM_RISCV_ISA_EXT_SSTC",
+ "KVM_RISCV_ISA_EXT_SVINVAL",
+ "KVM_RISCV_ISA_EXT_ZIHINTPAUSE",
+ "KVM_RISCV_ISA_EXT_ZICBOM",
+ "KVM_RISCV_ISA_EXT_ZICBOZ",
+ "KVM_RISCV_ISA_EXT_ZBB",
+ "KVM_RISCV_ISA_EXT_SSAIA",
+ };
+
+ if (reg_off >= ARRAY_SIZE(kvm_isa_ext_reg_name)) {
+ /*
+ * isa_ext regs would grow regularly with new isa extension added, so
+ * just show "reg" to indicate a new extension.
+ */
+ return strdup_printf("%lld /* UNKNOWN */", reg_off);
+ }
+
+ return kvm_isa_ext_reg_name[reg_off];
+}
+
+static const char *sbi_ext_single_id_to_str(__u64 reg_off)
+{
+ /* reg_off is KVM_RISCV_SBI_EXT_ID */
+ static const char * const kvm_sbi_ext_reg_name[] = {
+ "KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_V01",
+ "KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_TIME",
+ "KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_IPI",
+ "KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_RFENCE",
+ "KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_SRST",
+ "KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_HSM",
+ "KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_PMU",
+ "KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_EXPERIMENTAL",
+ "KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_VENDOR",
+ };
+
+ if (reg_off >= ARRAY_SIZE(kvm_sbi_ext_reg_name)) {
+ /*
+ * sbi_ext regs would grow regularly with new sbi extension added, so
+ * just show "reg" to indicate a new extension.
+ */
+ return strdup_printf("KVM_REG_RISCV_SBI_SINGLE | %lld /* UNKNOWN */", reg_off);
+ }
+
+ return kvm_sbi_ext_reg_name[reg_off];
+}
+
+static const char *sbi_ext_multi_id_to_str(__u64 reg_subtype, __u64 reg_off)
+{
+ if (reg_off > KVM_REG_RISCV_SBI_MULTI_REG_LAST) {
+ /*
+ * sbi_ext regs would grow regularly with new sbi extension added, so
+ * just show "reg" to indicate a new extension.
+ */
+ return strdup_printf("%lld /* UNKNOWN */", reg_off);
+ }
+
+ switch (reg_subtype) {
+ case KVM_REG_RISCV_SBI_MULTI_EN:
+ return strdup_printf("KVM_REG_RISCV_SBI_MULTI_EN | %lld", reg_off);
+ case KVM_REG_RISCV_SBI_MULTI_DIS:
+ return strdup_printf("KVM_REG_RISCV_SBI_MULTI_DIS | %lld", reg_off);
+ }
+
+ return NULL;
+}
+
+static const char *sbi_ext_id_to_str(const char *prefix, __u64 id)
+{
+ __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_SBI_EXT);
+ __u64 reg_subtype = reg_off & KVM_REG_RISCV_SUBTYPE_MASK;
+
+ reg_off &= ~KVM_REG_RISCV_SUBTYPE_MASK;
+
+ switch (reg_subtype) {
+ case KVM_REG_RISCV_SBI_SINGLE:
+ return sbi_ext_single_id_to_str(reg_off);
+ case KVM_REG_RISCV_SBI_MULTI_EN:
+ case KVM_REG_RISCV_SBI_MULTI_DIS:
+ return sbi_ext_multi_id_to_str(reg_subtype, reg_off);
+ }
+
+ TEST_FAIL("%s: Unknown sbi ext subtype: 0x%llx", prefix, reg_subtype);
+ return NULL;
+}
+
+void print_reg(const char *prefix, __u64 id)
+{
+ const char *reg_size = NULL;
+
+ TEST_ASSERT((id & KVM_REG_ARCH_MASK) == KVM_REG_RISCV,
+ "%s: KVM_REG_RISCV missing in reg id: 0x%llx", prefix, id);
+
+ switch (id & KVM_REG_SIZE_MASK) {
+ case KVM_REG_SIZE_U32:
+ reg_size = "KVM_REG_SIZE_U32";
+ break;
+ case KVM_REG_SIZE_U64:
+ reg_size = "KVM_REG_SIZE_U64";
+ break;
+ case KVM_REG_SIZE_U128:
+ reg_size = "KVM_REG_SIZE_U128";
+ break;
+ default:
+ TEST_FAIL("%s: Unexpected reg size: 0x%llx in reg id: 0x%llx",
+ prefix, (id & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT, id);
+ }
+
+ switch (id & KVM_REG_RISCV_TYPE_MASK) {
+ case KVM_REG_RISCV_CONFIG:
+ printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_CONFIG | %s,\n",
+ reg_size, config_id_to_str(id));
+ break;
+ case KVM_REG_RISCV_CORE:
+ printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_CORE | %s,\n",
+ reg_size, core_id_to_str(prefix, id));
+ break;
+ case KVM_REG_RISCV_CSR:
+ printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_CSR | %s,\n",
+ reg_size, csr_id_to_str(prefix, id));
+ break;
+ case KVM_REG_RISCV_TIMER:
+ printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_TIMER | %s,\n",
+ reg_size, timer_id_to_str(prefix, id));
+ break;
+ case KVM_REG_RISCV_FP_F:
+ printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_FP_F | %s,\n",
+ reg_size, fp_f_id_to_str(prefix, id));
+ break;
+ case KVM_REG_RISCV_FP_D:
+ printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_FP_D | %s,\n",
+ reg_size, fp_d_id_to_str(prefix, id));
+ break;
+ case KVM_REG_RISCV_ISA_EXT:
+ printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_ISA_EXT | %s,\n",
+ reg_size, isa_ext_id_to_str(id));
+ break;
+ case KVM_REG_RISCV_SBI_EXT:
+ printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_SBI_EXT | %s,\n",
+ reg_size, sbi_ext_id_to_str(prefix, id));
+ break;
+ default:
+ TEST_FAIL("%s: Unexpected reg type: 0x%llx in reg id: 0x%llx", prefix,
+ (id & KVM_REG_RISCV_TYPE_MASK) >> KVM_REG_RISCV_TYPE_SHIFT, id);
+ }
+}
+
+/*
+ * The current blessed list was primed with the output of kernel version
+ * v6.4-rc5 and then later updated with new registers.
+ */
+static __u64 base_regs[] = {
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(isa),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(mvendorid),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(marchid),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(mimpid),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.pc),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.ra),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.sp),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.gp),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.tp),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t0),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t1),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t2),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s0),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s1),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a0),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a1),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a2),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a3),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a4),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a5),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a6),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a7),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s2),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s3),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s4),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s5),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s6),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s7),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s8),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s9),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s10),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s11),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t3),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t4),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t5),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t6),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(mode),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(sstatus),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(sie),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(stvec),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(sscratch),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(sepc),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(scause),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(stval),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(sip),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(satp),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(scounteren),
+ KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | KVM_REG_RISCV_TIMER_REG(frequency),
+ KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | KVM_REG_RISCV_TIMER_REG(time),
+ KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | KVM_REG_RISCV_TIMER_REG(compare),
+ KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | KVM_REG_RISCV_TIMER_REG(state),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_A,
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_C,
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_D,
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_F,
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_I,
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_M,
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_V01,
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_TIME,
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_IPI,
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_RFENCE,
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_SRST,
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_HSM,
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_PMU,
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_EXPERIMENTAL,
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_VENDOR,
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_MULTI_EN | 0,
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_MULTI_DIS | 0,
+};
+
+/*
+ * The rejects_set list registers that should skip set test.
+ * - KVM_REG_RISCV_TIMER_REG(state): set would fail if it was not initialized properly.
+ * - KVM_REG_RISCV_TIMER_REG(frequency): set not supported
+ * - KVM_REG_RISCV_CONFIG_REG(zicbom_block_size): set not supported
+ * - KVM_REG_RISCV_CONFIG_REG(zicboz_block_size): set not supported
+ * - KVM_RISCV_ISA_EXT_SVPBMT: set not supported
+ * - KVM_RISCV_ISA_EXT_SVINVA: set not supported
+ * - KVM_RISCV_ISA_EXT_SSAIA: set not supported
+ */
+static __u64 base_rejects_set[] = {
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(zicbom_block_size),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(zicboz_block_size),
+ KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | KVM_REG_RISCV_TIMER_REG(frequency),
+ KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | KVM_REG_RISCV_TIMER_REG(state),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SVPBMT,
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SVINVAL,
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SSAIA,
+};
+
+static __u64 zicbom_regs[] = {
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(zicbom_block_size),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZICBOM,
+};
+
+static __u64 zicboz_regs[] = {
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(zicboz_block_size),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZICBOZ,
+};
+
+static __u64 aia_csr_regs[] = {
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(siselect),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio1),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio2),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(sieh),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(siph),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio1h),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio2h),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SSAIA,
+};
+
+static __u64 fp_f_regs[] = {
+ KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[0]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[1]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[2]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[3]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[4]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[5]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[6]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[7]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[8]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[9]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[10]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[11]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[12]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[13]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[14]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[15]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[16]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[17]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[18]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[19]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[20]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[21]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[22]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[23]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[24]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[25]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[26]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[27]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[28]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[29]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[30]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[31]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(fcsr),
+};
+
+static __u64 fp_d_regs[] = {
+ KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[0]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[1]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[2]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[3]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[4]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[5]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[6]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[7]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[8]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[9]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[10]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[11]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[12]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[13]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[14]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[15]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[16]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[17]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[18]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[19]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[20]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[21]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[22]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[23]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[24]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[25]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[26]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[27]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[28]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[29]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[30]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[31]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(fcsr),
+};
+
+#define BASE_SUBLIST \
+ {"base", .regs = base_regs, .regs_n = ARRAY_SIZE(base_regs), \
+ .rejects_set = base_rejects_set, .rejects_set_n = ARRAY_SIZE(base_rejects_set),}
+#define ZICBOM_REGS_SUBLIST \
+ {"zicbom", .feature = KVM_RISCV_ISA_EXT_ZICBOM, .regs = zicbom_regs, \
+ .regs_n = ARRAY_SIZE(zicbom_regs),}
+#define ZICBOZ_REGS_SUBLIST \
+ {"zicboz", .feature = KVM_RISCV_ISA_EXT_ZICBOZ, .regs = zicboz_regs, \
+ .regs_n = ARRAY_SIZE(zicboz_regs),}
+#define AIA_REGS_SUBLIST \
+ {"aia", .feature = KVM_RISCV_ISA_EXT_SSAIA, .regs = aia_csr_regs, \
+ .regs_n = ARRAY_SIZE(aia_csr_regs),}
+#define FP_F_REGS_SUBLIST \
+ {"fp_f", .feature = KVM_RISCV_ISA_EXT_F, .regs = fp_f_regs, \
+ .regs_n = ARRAY_SIZE(fp_f_regs),}
+#define FP_D_REGS_SUBLIST \
+ {"fp_d", .feature = KVM_RISCV_ISA_EXT_D, .regs = fp_d_regs, \
+ .regs_n = ARRAY_SIZE(fp_d_regs),}
+
+static struct vcpu_reg_list zicbo_config = {
+ .sublists = {
+ BASE_SUBLIST,
+ ZICBOM_REGS_SUBLIST,
+ ZICBOZ_REGS_SUBLIST,
+ {0},
+ },
+};
+
+static struct vcpu_reg_list aia_config = {
+ .sublists = {
+ BASE_SUBLIST,
+ AIA_REGS_SUBLIST,
+ {0},
+ },
+};
+
+static struct vcpu_reg_list fp_f_d_config = {
+ .sublists = {
+ BASE_SUBLIST,
+ FP_F_REGS_SUBLIST,
+ FP_D_REGS_SUBLIST,
+ {0},
+ },
+};
+
+struct vcpu_reg_list *vcpu_configs[] = {
+ &zicbo_config,
+ &aia_config,
+ &fp_f_d_config,
+};
+int vcpu_configs_n = ARRAY_SIZE(vcpu_configs);
--
2.34.1


2023-06-09 09:51:33

by Andrew Jones

[permalink] [raw]
Subject: Re: [PATCH v3 09/10] KVM: riscv: selftests: Skip some registers set operation

On Fri, Jun 09, 2023 at 10:12:17AM +0800, Haibo Xu wrote:
> Set operation on some riscv registers(mostly pesudo ones) was not
> supported and should be skipped in the get-reg-list test. Just
> reuse the rejects_set utilities to handle it in riscv.
>
> Signed-off-by: Haibo Xu <[email protected]>
> Reviewed-by: Andrew Jones <[email protected]>
> ---
> tools/testing/selftests/kvm/get-reg-list.c | 20 +++++++++++++-------
> 1 file changed, 13 insertions(+), 7 deletions(-)
>
> diff --git a/tools/testing/selftests/kvm/get-reg-list.c b/tools/testing/selftests/kvm/get-reg-list.c
> index c4bd5a5259da..abacb95c21c6 100644
> --- a/tools/testing/selftests/kvm/get-reg-list.c
> +++ b/tools/testing/selftests/kvm/get-reg-list.c
> @@ -211,16 +211,22 @@ static void run_test(struct vcpu_reg_list *c)
> ++failed_get;
> }
>
> - /* rejects_set registers are rejected after KVM_ARM_VCPU_FINALIZE */
> + /*
> + * rejects_set registers are rejected after KVM_ARM_VCPU_FINALIZE on aarch64,
> + * or registers that should skip set operation on riscv.
> + */
> for_each_sublist(c, s) {
> if (s->rejects_set && find_reg(s->rejects_set, s->rejects_set_n, reg.id)) {
> reject_reg = true;
> - ret = __vcpu_ioctl(vcpu, KVM_SET_ONE_REG, &reg);
> - if (ret != -1 || errno != EPERM) {
> - printf("%s: Failed to reject (ret=%d, errno=%d) ", config_name(c), ret, errno);
> - print_reg(config_name(c), reg.id);
> - putchar('\n');
> - ++failed_reject;
> + if ((reg.id & KVM_REG_ARCH_MASK) == KVM_REG_ARM64) {
> + ret = __vcpu_ioctl(vcpu, KVM_SET_ONE_REG, &reg);
> + if (ret != -1 || errno != EPERM) {
> + printf("%s: Failed to reject (ret=%d, errno=%d) ",
> + config_name(c), ret, errno);
> + print_reg(config_name(c), reg.id);
> + putchar('\n');
> + ++failed_reject;
> + }

Thinking about this some more, shouldn't we attempt the set ioctl for
riscv reject registers as well, but look for different error numbers?

Thanks,
drew

2023-06-09 12:35:21

by Andrew Jones

[permalink] [raw]
Subject: Re: [PATCH v3 07/10] KVM: arm64: selftests: Finish generalizing get-reg-list

On Fri, Jun 09, 2023 at 10:12:15AM +0800, Haibo Xu wrote:
> From: Andrew Jones <[email protected]>
>
> Add some unfortunate #ifdeffery to ensure the common get-reg-list.c
> can be compiled and run with other architectures. The next
> architecture to support get-reg-list should now only need to provide
> $(ARCH_DIR)/get-reg-list.c where arch-specific print_reg() and
> vcpu_configs[] get defined.
>
> Signed-off-by: Andrew Jones <[email protected]>
> Signed-off-by: Haibo Xu <[email protected]>
> ---
> tools/testing/selftests/kvm/get-reg-list.c | 24 ++++++++++++++++++----
> 1 file changed, 20 insertions(+), 4 deletions(-)
>
> diff --git a/tools/testing/selftests/kvm/get-reg-list.c b/tools/testing/selftests/kvm/get-reg-list.c
> index 69bb91087081..c4bd5a5259da 100644
> --- a/tools/testing/selftests/kvm/get-reg-list.c
> +++ b/tools/testing/selftests/kvm/get-reg-list.c
> @@ -98,6 +98,7 @@ void __weak print_reg(const char *prefix, __u64 id)
> printf("\t0x%llx,\n", id);
> }
>
> +#ifdef __aarch64__
> static void prepare_vcpu_init(struct vcpu_reg_list *c, struct kvm_vcpu_init *init)
> {
> struct vcpu_reg_sublist *s;
> @@ -120,6 +121,24 @@ static void finalize_vcpu(struct kvm_vcpu *vcpu, struct vcpu_reg_list *c)
> }
> }
>
> +static struct kvm_vcpu *vcpu_config_get_vcpu(struct vcpu_reg_list *c, struct kvm_vm *vm)
> +{
> + struct kvm_vcpu_init init = { .target = -1, };
> + struct kvm_vcpu *vcpu;
> +
> + prepare_vcpu_init(c, &init);
> + vcpu = __vm_vcpu_add(vm, 0);
> + aarch64_vcpu_setup(vcpu, &init);
> +
> + return vcpu;
> +}
> +#else
> +static struct kvm_vcpu *vcpu_config_get_vcpu(struct vcpu_reg_list *c, struct kvm_vm *vm)
> +{
> + return __vm_vcpu_add(vm, 0);
> +}
> +#endif
> +
> static void check_supported(struct vcpu_reg_list *c)
> {
> struct vcpu_reg_sublist *s;
> @@ -139,7 +158,6 @@ static bool print_filtered;
>
> static void run_test(struct vcpu_reg_list *c)
> {
> - struct kvm_vcpu_init init = { .target = -1, };
> int new_regs = 0, missing_regs = 0, i, n;
> int failed_get = 0, failed_set = 0, failed_reject = 0;
> struct kvm_vcpu *vcpu;
> @@ -149,9 +167,7 @@ static void run_test(struct vcpu_reg_list *c)
> check_supported(c);
>
> vm = vm_create_barebones();
> - prepare_vcpu_init(c, &init);
> - vcpu = __vm_vcpu_add(vm, 0);
> - aarch64_vcpu_setup(vcpu, &init);
> + vcpu = vcpu_config_get_vcpu(c, vm);
> finalize_vcpu(vcpu, c);

I just noticed that this has been modified from what I posted to leave
the finalize_vcpu() call here, despite it now being inside the #ifdef
__aarch64__. That breaks the purpose of the patch. Please make sure this
file compiles for other architectures without requiring additional
patches, which would keep the commit message honest. You can either
revert this to what I posted, and then readd the finalize_vcpu() call in
another patch, or you can add a finalize_vcpu() stub to the #else part
of the ifdef in this patch.

Also please don't modify patches authored by others without calling out
the modifications somewhere, either the commit message or under the ---
of the patch or in the cover letter.

Thanks,
drew

2023-06-09 13:50:45

by Andrew Jones

[permalink] [raw]
Subject: Re: [PATCH v3 10/10] KVM: riscv: selftests: Add get-reg-list test

On Fri, Jun 09, 2023 at 10:12:18AM +0800, Haibo Xu wrote:
> get-reg-list test is used to check for KVM registers regressions
> during VM migration which happens when destination host kernel
> missing registers that the source host kernel has. The blessed
> list registers was created by running on v6.4-rc5.
>
> Signed-off-by: Haibo Xu <[email protected]>
> ---
> tools/testing/selftests/kvm/Makefile | 1 +
> tools/testing/selftests/kvm/get-reg-list.c | 28 +
> .../selftests/kvm/include/riscv/processor.h | 3 +
> .../selftests/kvm/riscv/get-reg-list.c | 611 ++++++++++++++++++
> 4 files changed, 643 insertions(+)
> create mode 100644 tools/testing/selftests/kvm/riscv/get-reg-list.c
>
> diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
> index d90cad19c9ee..f7bcda903dd9 100644
> --- a/tools/testing/selftests/kvm/Makefile
> +++ b/tools/testing/selftests/kvm/Makefile
> @@ -174,6 +174,7 @@ TEST_GEN_PROGS_s390x += kvm_binary_stats_test
>
> TEST_GEN_PROGS_riscv += demand_paging_test
> TEST_GEN_PROGS_riscv += dirty_log_test
> +TEST_GEN_PROGS_riscv += get-reg-list
> TEST_GEN_PROGS_riscv += kvm_create_max_vcpus
> TEST_GEN_PROGS_riscv += kvm_page_table_test
> TEST_GEN_PROGS_riscv += set_memory_region_test
> diff --git a/tools/testing/selftests/kvm/get-reg-list.c b/tools/testing/selftests/kvm/get-reg-list.c
> index abacb95c21c6..73f40e0842b8 100644
> --- a/tools/testing/selftests/kvm/get-reg-list.c
> +++ b/tools/testing/selftests/kvm/get-reg-list.c
> @@ -133,6 +133,34 @@ static struct kvm_vcpu *vcpu_config_get_vcpu(struct vcpu_reg_list *c, struct kvm
> return vcpu;
> }
> #else
> +static inline bool vcpu_has_ext(struct kvm_vcpu *vcpu, int ext)
> +{
> + int ret;
> + unsigned long value;
> +
> + ret = __vcpu_get_reg(vcpu, RISCV_ISA_EXT_REG(ext), &value);
> + if (ret) {
> + printf("Failed to get ext %d", ext);
> + return false;
> + }
> +
> + return !!value;
> +}
> +
> +static void finalize_vcpu(struct kvm_vcpu *vcpu, struct vcpu_reg_list *c)
> +{
> + struct vcpu_reg_sublist *s;
> +
> + for_each_sublist(c, s) {
> + if (!s->feature)
> + continue;

Using zero to mean "not specified" means we can't test for
KVM_RISCV_ISA_EXT_A, but that's probably OK, since Linux always has 'a',
so we'll never need to check for it.

> +
> + __TEST_REQUIRE(vcpu_has_ext(vcpu, s->feature),
> + "%s: %s not available, skipping tests\n",
> + config_name(c), s->name);
> + }
> +}
> +
> static struct kvm_vcpu *vcpu_config_get_vcpu(struct vcpu_reg_list *c, struct kvm_vm *vm)
> {
> return __vm_vcpu_add(vm, 0);
> diff --git a/tools/testing/selftests/kvm/include/riscv/processor.h b/tools/testing/selftests/kvm/include/riscv/processor.h
> index d00d213c3805..5b62a3d2aa9b 100644
> --- a/tools/testing/selftests/kvm/include/riscv/processor.h
> +++ b/tools/testing/selftests/kvm/include/riscv/processor.h
> @@ -38,6 +38,9 @@ static inline uint64_t __kvm_reg_id(uint64_t type, uint64_t idx,
> KVM_REG_RISCV_TIMER_REG(name), \
> KVM_REG_SIZE_U64)
>
> +#define RISCV_ISA_EXT_REG(idx) __kvm_reg_id(KVM_REG_RISCV_ISA_EXT, \
> + idx, KVM_REG_SIZE_ULONG)
> +
> /* L3 index Bit[47:39] */
> #define PGTBL_L3_INDEX_MASK 0x0000FF8000000000ULL
> #define PGTBL_L3_INDEX_SHIFT 39
> diff --git a/tools/testing/selftests/kvm/riscv/get-reg-list.c b/tools/testing/selftests/kvm/riscv/get-reg-list.c
> new file mode 100644
> index 000000000000..0f371d99d471
> --- /dev/null
> +++ b/tools/testing/selftests/kvm/riscv/get-reg-list.c
> @@ -0,0 +1,611 @@
> +// SPDX-License-Identifier: GPL-2.0
> +/*
> + * Check for KVM_GET_REG_LIST regressions.
> + *
> + * Copyright (C) 2020, Red Hat, Inc.

I don't think we need the Red Hat copyright. This is a completely new
work.

> + * Copyright (c) 2023 Intel Corporation
> + *
> + */
> +#include <stdio.h>
> +#include "kvm_util.h"
> +#include "test_util.h"
> +#include "processor.h"
> +
> +#define REG_MASK (KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK)
> +
> +static const char *config_id_to_str(__u64 id)
> +{
> + /* reg_off is the offset into struct kvm_riscv_config */
> + __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_CONFIG);
> +
> + switch (reg_off) {
> + case KVM_REG_RISCV_CONFIG_REG(isa):
> + return "KVM_REG_RISCV_CONFIG_REG(isa)";
> + case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size):
> + return "KVM_REG_RISCV_CONFIG_REG(zicbom_block_size)";
> + case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size):
> + return "KVM_REG_RISCV_CONFIG_REG(zicboz_block_size)";
> + case KVM_REG_RISCV_CONFIG_REG(mvendorid):
> + return "KVM_REG_RISCV_CONFIG_REG(mvendorid)";
> + case KVM_REG_RISCV_CONFIG_REG(marchid):
> + return "KVM_REG_RISCV_CONFIG_REG(marchid)";
> + case KVM_REG_RISCV_CONFIG_REG(mimpid):
> + return "KVM_REG_RISCV_CONFIG_REG(mimpid)";
> + }
> +
> + /*
> + * Config regs would grow regularly with new pseudo reg added, so
> + * just show raw id to indicate a new pseudo config reg.
> + */
> + return strdup_printf("KVM_REG_RISCV_CONFIG_REG(%lld) /* UNKNOWN */", reg_off);
> +}
> +
> +static const char *core_id_to_str(const char *prefix, __u64 id)
> +{
> + /* reg_off is the offset into struct kvm_riscv_core */
> + __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_CORE);
> +
> + switch (reg_off) {
> + case KVM_REG_RISCV_CORE_REG(regs.pc):
> + return "KVM_REG_RISCV_CORE_REG(regs.pc)";
> + case KVM_REG_RISCV_CORE_REG(regs.ra):
> + return "KVM_REG_RISCV_CORE_REG(regs.ra)";
> + case KVM_REG_RISCV_CORE_REG(regs.sp):
> + return "KVM_REG_RISCV_CORE_REG(regs.sp)";
> + case KVM_REG_RISCV_CORE_REG(regs.gp):
> + return "KVM_REG_RISCV_CORE_REG(regs.gp)";
> + case KVM_REG_RISCV_CORE_REG(regs.tp):
> + return "KVM_REG_RISCV_CORE_REG(regs.tp)";
> + case KVM_REG_RISCV_CORE_REG(regs.t0) ... KVM_REG_RISCV_CORE_REG(regs.t2):
> + return strdup_printf("KVM_REG_RISCV_CORE_REG(regs.t%lld)",
> + reg_off - KVM_REG_RISCV_CORE_REG(regs.t0));
> + case KVM_REG_RISCV_CORE_REG(regs.s0) ... KVM_REG_RISCV_CORE_REG(regs.s1):
> + return strdup_printf("KVM_REG_RISCV_CORE_REG(regs.s%lld)",
> + reg_off - KVM_REG_RISCV_CORE_REG(regs.s0));
> + case KVM_REG_RISCV_CORE_REG(regs.a0) ... KVM_REG_RISCV_CORE_REG(regs.a7):
> + return strdup_printf("KVM_REG_RISCV_CORE_REG(regs.a%lld)",
> + reg_off - KVM_REG_RISCV_CORE_REG(regs.a0));
> + case KVM_REG_RISCV_CORE_REG(regs.s2) ... KVM_REG_RISCV_CORE_REG(regs.s11):
> + return strdup_printf("KVM_REG_RISCV_CORE_REG(regs.s%lld)",
> + reg_off - KVM_REG_RISCV_CORE_REG(regs.s2) + 2);
> + case KVM_REG_RISCV_CORE_REG(regs.t3) ... KVM_REG_RISCV_CORE_REG(regs.t6):
> + return strdup_printf("KVM_REG_RISCV_CORE_REG(regs.t%lld)",
> + reg_off - KVM_REG_RISCV_CORE_REG(regs.t3) + 3);
> + case KVM_REG_RISCV_CORE_REG(mode):
> + return "KVM_REG_RISCV_CORE_REG(mode)";
> + }
> +
> + TEST_FAIL("%s: Unknown core reg id: 0x%llx", prefix, id);
> + return NULL;
> +}
> +
> +#define RISCV_CSR_GENERAL(csr) \
> + "KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(" #csr ")"
> +#define RISCV_CSR_AIA(csr) \
> + "KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_REG(" #csr ")"
> +
> +static const char *general_csr_id_to_str(__u64 reg_off)
> +{
> + /* reg_off is the offset into struct kvm_riscv_csr */
> + switch (reg_off) {
> + case KVM_REG_RISCV_CSR_REG(sstatus):
> + return RISCV_CSR_GENERAL(sstatus);
> + case KVM_REG_RISCV_CSR_REG(sie):
> + return RISCV_CSR_GENERAL(sie);
> + case KVM_REG_RISCV_CSR_REG(stvec):
> + return RISCV_CSR_GENERAL(stvec);
> + case KVM_REG_RISCV_CSR_REG(sscratch):
> + return RISCV_CSR_GENERAL(sscratch);
> + case KVM_REG_RISCV_CSR_REG(sepc):
> + return RISCV_CSR_GENERAL(sepc);
> + case KVM_REG_RISCV_CSR_REG(scause):
> + return RISCV_CSR_GENERAL(scause);
> + case KVM_REG_RISCV_CSR_REG(stval):
> + return RISCV_CSR_GENERAL(stval);
> + case KVM_REG_RISCV_CSR_REG(sip):
> + return RISCV_CSR_GENERAL(sip);
> + case KVM_REG_RISCV_CSR_REG(satp):
> + return RISCV_CSR_GENERAL(satp);
> + case KVM_REG_RISCV_CSR_REG(scounteren):
> + return RISCV_CSR_GENERAL(scounteren);
> + }
> +
> + TEST_FAIL("Unknown general csr reg: 0x%llx", reg_off);
> + return NULL;
> +}
> +
> +static const char *aia_csr_id_to_str(__u64 reg_off)
> +{
> + /* reg_off is the offset into struct kvm_riscv_aia_csr */
> + switch (reg_off) {
> + case KVM_REG_RISCV_CSR_AIA_REG(siselect):
> + return RISCV_CSR_AIA(siselect);
> + case KVM_REG_RISCV_CSR_AIA_REG(iprio1):
> + return RISCV_CSR_AIA(iprio1);
> + case KVM_REG_RISCV_CSR_AIA_REG(iprio2):
> + return RISCV_CSR_AIA(iprio2);
> + case KVM_REG_RISCV_CSR_AIA_REG(sieh):
> + return RISCV_CSR_AIA(sieh);
> + case KVM_REG_RISCV_CSR_AIA_REG(siph):
> + return RISCV_CSR_AIA(siph);
> + case KVM_REG_RISCV_CSR_AIA_REG(iprio1h):
> + return RISCV_CSR_AIA(iprio1h);
> + case KVM_REG_RISCV_CSR_AIA_REG(iprio2h):
> + return RISCV_CSR_AIA(iprio2h);
> + }
> +
> + TEST_FAIL("Unknown aia csr reg: 0x%llx", reg_off);
> + return NULL;
> +}
> +
> +static const char *csr_id_to_str(const char *prefix, __u64 id)
> +{
> + __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_CSR);
> + __u64 reg_subtype = reg_off & KVM_REG_RISCV_SUBTYPE_MASK;
> +
> + reg_off &= ~KVM_REG_RISCV_SUBTYPE_MASK;
> +
> + switch (reg_subtype) {
> + case KVM_REG_RISCV_CSR_GENERAL:
> + return general_csr_id_to_str(reg_off);
> + case KVM_REG_RISCV_CSR_AIA:
> + return aia_csr_id_to_str(reg_off);
> + }
> +
> + TEST_FAIL("%s: Unknown csr subtype: 0x%llx", prefix, reg_subtype);
> + return NULL;
> +}
> +
> +static const char *timer_id_to_str(const char *prefix, __u64 id)
> +{
> + /* reg_off is the offset into struct kvm_riscv_timer */
> + __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_TIMER);
> +
> + switch (reg_off) {
> + case KVM_REG_RISCV_TIMER_REG(frequency):
> + return "KVM_REG_RISCV_TIMER_REG(frequency)";
> + case KVM_REG_RISCV_TIMER_REG(time):
> + return "KVM_REG_RISCV_TIMER_REG(time)";
> + case KVM_REG_RISCV_TIMER_REG(compare):
> + return "KVM_REG_RISCV_TIMER_REG(compare)";
> + case KVM_REG_RISCV_TIMER_REG(state):
> + return "KVM_REG_RISCV_TIMER_REG(state)";
> + }
> +
> + TEST_FAIL("%s: Unknown timer reg id: 0x%llx", prefix, id);
> + return NULL;
> +}
> +
> +static const char *fp_f_id_to_str(const char *prefix, __u64 id)
> +{
> + /* reg_off is the offset into struct __riscv_f_ext_state */
> + __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_FP_F);
> +
> + switch (reg_off) {
> + case KVM_REG_RISCV_FP_F_REG(f[0]) ...
> + KVM_REG_RISCV_FP_F_REG(f[31]):
> + return strdup_printf("KVM_REG_RISCV_FP_F_REG(f[%lld])", reg_off);
> + case KVM_REG_RISCV_FP_F_REG(fcsr):
> + return "KVM_REG_RISCV_FP_F_REG(fcsr)";
> + }
> +
> + TEST_FAIL("%s: Unknown fp_f reg id: 0x%llx", prefix, id);
> + return NULL;
> +}
> +
> +static const char *fp_d_id_to_str(const char *prefix, __u64 id)
> +{
> + /* reg_off is the offset into struct __riscv_d_ext_state */
> + __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_FP_D);
> +
> + switch (reg_off) {
> + case KVM_REG_RISCV_FP_D_REG(f[0]) ...
> + KVM_REG_RISCV_FP_D_REG(f[31]):
> + return strdup_printf("KVM_REG_RISCV_FP_D_REG(f[%lld])", reg_off);
> + case KVM_REG_RISCV_FP_D_REG(fcsr):
> + return "KVM_REG_RISCV_FP_D_REG(fcsr)";
> + }
> +
> + TEST_FAIL("%s: Unknown fp_d reg id: 0x%llx", prefix, id);
> + return NULL;
> +}
> +
> +static const char *isa_ext_id_to_str(__u64 id)
> +{
> + /* reg_off is the offset into unsigned long kvm_isa_ext_arr[] */
> + __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_ISA_EXT);
> +
> + static const char * const kvm_isa_ext_reg_name[] = {
> + "KVM_RISCV_ISA_EXT_A",
> + "KVM_RISCV_ISA_EXT_C",
> + "KVM_RISCV_ISA_EXT_D",
> + "KVM_RISCV_ISA_EXT_F",
> + "KVM_RISCV_ISA_EXT_H",
> + "KVM_RISCV_ISA_EXT_I",
> + "KVM_RISCV_ISA_EXT_M",
> + "KVM_RISCV_ISA_EXT_SVPBMT",
> + "KVM_RISCV_ISA_EXT_SSTC",
> + "KVM_RISCV_ISA_EXT_SVINVAL",
> + "KVM_RISCV_ISA_EXT_ZIHINTPAUSE",
> + "KVM_RISCV_ISA_EXT_ZICBOM",
> + "KVM_RISCV_ISA_EXT_ZICBOZ",
> + "KVM_RISCV_ISA_EXT_ZBB",
> + "KVM_RISCV_ISA_EXT_SSAIA",
> + };
> +
> + if (reg_off >= ARRAY_SIZE(kvm_isa_ext_reg_name)) {
> + /*
> + * isa_ext regs would grow regularly with new isa extension added, so
> + * just show "reg" to indicate a new extension.
> + */
> + return strdup_printf("%lld /* UNKNOWN */", reg_off);
> + }
> +
> + return kvm_isa_ext_reg_name[reg_off];
> +}
> +
> +static const char *sbi_ext_single_id_to_str(__u64 reg_off)
> +{
> + /* reg_off is KVM_RISCV_SBI_EXT_ID */
> + static const char * const kvm_sbi_ext_reg_name[] = {
> + "KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_V01",
> + "KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_TIME",
> + "KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_IPI",
> + "KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_RFENCE",
> + "KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_SRST",
> + "KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_HSM",
> + "KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_PMU",
> + "KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_EXPERIMENTAL",
> + "KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_VENDOR",
> + };
> +
> + if (reg_off >= ARRAY_SIZE(kvm_sbi_ext_reg_name)) {
> + /*
> + * sbi_ext regs would grow regularly with new sbi extension added, so
> + * just show "reg" to indicate a new extension.
> + */
> + return strdup_printf("KVM_REG_RISCV_SBI_SINGLE | %lld /* UNKNOWN */", reg_off);
> + }
> +
> + return kvm_sbi_ext_reg_name[reg_off];
> +}
> +
> +static const char *sbi_ext_multi_id_to_str(__u64 reg_subtype, __u64 reg_off)
> +{
> + if (reg_off > KVM_REG_RISCV_SBI_MULTI_REG_LAST) {
> + /*
> + * sbi_ext regs would grow regularly with new sbi extension added, so
> + * just show "reg" to indicate a new extension.
> + */
> + return strdup_printf("%lld /* UNKNOWN */", reg_off);
> + }
> +
> + switch (reg_subtype) {
> + case KVM_REG_RISCV_SBI_MULTI_EN:
> + return strdup_printf("KVM_REG_RISCV_SBI_MULTI_EN | %lld", reg_off);
> + case KVM_REG_RISCV_SBI_MULTI_DIS:
> + return strdup_printf("KVM_REG_RISCV_SBI_MULTI_DIS | %lld", reg_off);
> + }
> +
> + return NULL;
> +}
> +
> +static const char *sbi_ext_id_to_str(const char *prefix, __u64 id)
> +{
> + __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_SBI_EXT);
> + __u64 reg_subtype = reg_off & KVM_REG_RISCV_SUBTYPE_MASK;
> +
> + reg_off &= ~KVM_REG_RISCV_SUBTYPE_MASK;
> +
> + switch (reg_subtype) {
> + case KVM_REG_RISCV_SBI_SINGLE:
> + return sbi_ext_single_id_to_str(reg_off);
> + case KVM_REG_RISCV_SBI_MULTI_EN:
> + case KVM_REG_RISCV_SBI_MULTI_DIS:
> + return sbi_ext_multi_id_to_str(reg_subtype, reg_off);
> + }
> +
> + TEST_FAIL("%s: Unknown sbi ext subtype: 0x%llx", prefix, reg_subtype);
> + return NULL;
> +}
> +
> +void print_reg(const char *prefix, __u64 id)
> +{
> + const char *reg_size = NULL;
> +
> + TEST_ASSERT((id & KVM_REG_ARCH_MASK) == KVM_REG_RISCV,
> + "%s: KVM_REG_RISCV missing in reg id: 0x%llx", prefix, id);
> +
> + switch (id & KVM_REG_SIZE_MASK) {
> + case KVM_REG_SIZE_U32:
> + reg_size = "KVM_REG_SIZE_U32";
> + break;
> + case KVM_REG_SIZE_U64:
> + reg_size = "KVM_REG_SIZE_U64";
> + break;
> + case KVM_REG_SIZE_U128:
> + reg_size = "KVM_REG_SIZE_U128";
> + break;
> + default:
> + TEST_FAIL("%s: Unexpected reg size: 0x%llx in reg id: 0x%llx",
> + prefix, (id & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT, id);
> + }
> +
> + switch (id & KVM_REG_RISCV_TYPE_MASK) {
> + case KVM_REG_RISCV_CONFIG:
> + printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_CONFIG | %s,\n",

All the work to try and use KVM_REG_SIZE_ULONG in the right places will be
lost if we print a reg list and then copy+paste it as a blessed list. On
64-bit, the only thing supported now, we'll get U64, but if we ever
supported 32-bit, then we'd get U32. This is unfortunate, but there's
nothing we can do about it. Either we can't have a true print+copy+paste
workflow or we should assume we'll only support 64-bit and only use U64
in the blessed lists (from a copy+paste). But, we've already got ULONG
in there now, so we can just leave it and burn this bridge later.

> + reg_size, config_id_to_str(id));
> + break;
> + case KVM_REG_RISCV_CORE:
> + printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_CORE | %s,\n",
> + reg_size, core_id_to_str(prefix, id));
> + break;
> + case KVM_REG_RISCV_CSR:
> + printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_CSR | %s,\n",
> + reg_size, csr_id_to_str(prefix, id));
> + break;
> + case KVM_REG_RISCV_TIMER:
> + printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_TIMER | %s,\n",
> + reg_size, timer_id_to_str(prefix, id));
> + break;
> + case KVM_REG_RISCV_FP_F:
> + printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_FP_F | %s,\n",
> + reg_size, fp_f_id_to_str(prefix, id));
> + break;
> + case KVM_REG_RISCV_FP_D:
> + printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_FP_D | %s,\n",
> + reg_size, fp_d_id_to_str(prefix, id));
> + break;
> + case KVM_REG_RISCV_ISA_EXT:
> + printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_ISA_EXT | %s,\n",
> + reg_size, isa_ext_id_to_str(id));
> + break;
> + case KVM_REG_RISCV_SBI_EXT:
> + printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_SBI_EXT | %s,\n",
> + reg_size, sbi_ext_id_to_str(prefix, id));
> + break;
> + default:
> + TEST_FAIL("%s: Unexpected reg type: 0x%llx in reg id: 0x%llx", prefix,
> + (id & KVM_REG_RISCV_TYPE_MASK) >> KVM_REG_RISCV_TYPE_SHIFT, id);
> + }
> +}
> +
> +/*
> + * The current blessed list was primed with the output of kernel version
> + * v6.4-rc5 and then later updated with new registers.
> + */
> +static __u64 base_regs[] = {
> + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(isa),
> + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(mvendorid),
> + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(marchid),
> + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(mimpid),
> + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.pc),
> + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.ra),
> + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.sp),
> + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.gp),
> + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.tp),
> + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t0),
> + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t1),
> + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t2),
> + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s0),
> + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s1),
> + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a0),
> + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a1),
> + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a2),
> + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a3),
> + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a4),
> + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a5),
> + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a6),
> + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a7),
> + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s2),
> + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s3),
> + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s4),
> + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s5),
> + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s6),
> + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s7),
> + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s8),
> + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s9),
> + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s10),
> + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s11),
> + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t3),
> + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t4),
> + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t5),
> + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t6),
> + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(mode),
> + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(sstatus),
> + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(sie),
> + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(stvec),
> + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(sscratch),
> + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(sepc),
> + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(scause),
> + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(stval),
> + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(sip),
> + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(satp),
> + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(scounteren),
> + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | KVM_REG_RISCV_TIMER_REG(frequency),
> + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | KVM_REG_RISCV_TIMER_REG(time),
> + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | KVM_REG_RISCV_TIMER_REG(compare),
> + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | KVM_REG_RISCV_TIMER_REG(state),
> + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_A,
> + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_C,
> + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_D,
> + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_F,
> + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_I,
> + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_M,
> + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_V01,
> + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_TIME,
> + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_IPI,
> + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_RFENCE,
> + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_SRST,
> + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_HSM,
> + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_PMU,
> + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_EXPERIMENTAL,
> + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_VENDOR,
> + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_MULTI_EN | 0,
> + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_MULTI_DIS | 0,
> +};
> +
> +/*
> + * The rejects_set list registers that should skip set test.
> + * - KVM_REG_RISCV_TIMER_REG(state): set would fail if it was not initialized properly.
> + * - KVM_REG_RISCV_TIMER_REG(frequency): set not supported
> + * - KVM_REG_RISCV_CONFIG_REG(zicbom_block_size): set not supported
> + * - KVM_REG_RISCV_CONFIG_REG(zicboz_block_size): set not supported
> + * - KVM_RISCV_ISA_EXT_SVPBMT: set not supported
> + * - KVM_RISCV_ISA_EXT_SVINVA: set not supported
> + * - KVM_RISCV_ISA_EXT_SSAIA: set not supported
> + */
> +static __u64 base_rejects_set[] = {
> + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(zicbom_block_size),
> + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(zicboz_block_size),
> + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | KVM_REG_RISCV_TIMER_REG(frequency),
> + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | KVM_REG_RISCV_TIMER_REG(state),
> + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SVPBMT,
> + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SVINVAL,
> + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SSAIA,

These aren't all base registers. I think we should divide the reject lists
up too, especially considering the idea I wrote in the last patch, which
is to test setting the rejects to ensure the expected error is returned.
The error may be different for a rejected set of a supported register vs.
that of an unsupported register.

> +};
> +
> +static __u64 zicbom_regs[] = {
> + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(zicbom_block_size),
> + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZICBOM,
> +};
> +
> +static __u64 zicboz_regs[] = {
> + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(zicboz_block_size),
> + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZICBOZ,
> +};
> +
> +static __u64 aia_csr_regs[] = {
> + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(siselect),
> + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio1),
> + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio2),
> + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(sieh),
> + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(siph),
> + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio1h),
> + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio2h),
> + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SSAIA,
> +};
> +
> +static __u64 fp_f_regs[] = {
> + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[0]),
> + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[1]),
> + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[2]),
> + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[3]),
> + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[4]),
> + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[5]),
> + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[6]),
> + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[7]),
> + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[8]),
> + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[9]),
> + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[10]),
> + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[11]),
> + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[12]),
> + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[13]),
> + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[14]),
> + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[15]),
> + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[16]),
> + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[17]),
> + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[18]),
> + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[19]),
> + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[20]),
> + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[21]),
> + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[22]),
> + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[23]),
> + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[24]),
> + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[25]),
> + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[26]),
> + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[27]),
> + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[28]),
> + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[29]),
> + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[30]),
> + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[31]),
> + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(fcsr),
> +};
> +
> +static __u64 fp_d_regs[] = {
> + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[0]),
> + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[1]),
> + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[2]),
> + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[3]),
> + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[4]),
> + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[5]),
> + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[6]),
> + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[7]),
> + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[8]),
> + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[9]),
> + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[10]),
> + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[11]),
> + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[12]),
> + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[13]),
> + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[14]),
> + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[15]),
> + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[16]),
> + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[17]),
> + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[18]),
> + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[19]),
> + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[20]),
> + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[21]),
> + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[22]),
> + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[23]),
> + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[24]),
> + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[25]),
> + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[26]),
> + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[27]),
> + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[28]),
> + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[29]),
> + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[30]),
> + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[31]),
> + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(fcsr),
> +};
> +
> +#define BASE_SUBLIST \
> + {"base", .regs = base_regs, .regs_n = ARRAY_SIZE(base_regs), \
> + .rejects_set = base_rejects_set, .rejects_set_n = ARRAY_SIZE(base_rejects_set),}
> +#define ZICBOM_REGS_SUBLIST \
> + {"zicbom", .feature = KVM_RISCV_ISA_EXT_ZICBOM, .regs = zicbom_regs, \
> + .regs_n = ARRAY_SIZE(zicbom_regs),}
> +#define ZICBOZ_REGS_SUBLIST \
> + {"zicboz", .feature = KVM_RISCV_ISA_EXT_ZICBOZ, .regs = zicboz_regs, \
> + .regs_n = ARRAY_SIZE(zicboz_regs),}
> +#define AIA_REGS_SUBLIST \
> + {"aia", .feature = KVM_RISCV_ISA_EXT_SSAIA, .regs = aia_csr_regs, \
> + .regs_n = ARRAY_SIZE(aia_csr_regs),}
> +#define FP_F_REGS_SUBLIST \
> + {"fp_f", .feature = KVM_RISCV_ISA_EXT_F, .regs = fp_f_regs, \
> + .regs_n = ARRAY_SIZE(fp_f_regs),}
> +#define FP_D_REGS_SUBLIST \
> + {"fp_d", .feature = KVM_RISCV_ISA_EXT_D, .regs = fp_d_regs, \
> + .regs_n = ARRAY_SIZE(fp_d_regs),}
> +
> +static struct vcpu_reg_list zicbo_config = {
> + .sublists = {
> + BASE_SUBLIST,
> + ZICBOM_REGS_SUBLIST,
> + ZICBOZ_REGS_SUBLIST,

It's possible to have zicbom without zicboz and vice-versa. Since
finalize_vcpu() will skip the whole test when it detects a missing
feature for a config, then we won't be able to test one without the
other. It's a bit annoying, but I think we may need a separate config
for each independent extension.

> + {0},
> + },
> +};
> +
> +static struct vcpu_reg_list aia_config = {
> + .sublists = {
> + BASE_SUBLIST,
> + AIA_REGS_SUBLIST,
> + {0},
> + },
> +};
> +
> +static struct vcpu_reg_list fp_f_d_config = {
> + .sublists = {
> + BASE_SUBLIST,
> + FP_F_REGS_SUBLIST,
> + FP_D_REGS_SUBLIST,
> + {0},
> + },
> +};
> +
> +struct vcpu_reg_list *vcpu_configs[] = {
> + &zicbo_config,
> + &aia_config,
> + &fp_f_d_config,
> +};
> +int vcpu_configs_n = ARRAY_SIZE(vcpu_configs);
> --
> 2.34.1
>

I see we have a bit of a problem with the configs for riscv. Since we
don't disable anything we're not testing, then for any test that is
missing, for example, the f and d registers, we'll get output like
"There are 66 new registers. Consider adding them to the blessed reg
list with the following lines:" and then a dump of all the f and d
registers. The test doesn't fail, but it's messy and confusing. Ideally
we'd disable all registers of all sublists not in the config, probably
by starting by disabling everything and then only reenabling the ones
in the config.

Anything that can't be disabled is either a KVM bug, i.e. we should
be able to disable it, because we can't expect every host to have it,
or it needs to be in the base register sublist (meaning every host
will always have it).

Thanks,
drew

2023-06-10 03:01:34

by Haibo Xu

[permalink] [raw]
Subject: Re: [PATCH v3 09/10] KVM: riscv: selftests: Skip some registers set operation

On Fri, Jun 9, 2023 at 5:24 PM Andrew Jones <[email protected]> wrote:
>
> On Fri, Jun 09, 2023 at 10:12:17AM +0800, Haibo Xu wrote:
> > Set operation on some riscv registers(mostly pesudo ones) was not
> > supported and should be skipped in the get-reg-list test. Just
> > reuse the rejects_set utilities to handle it in riscv.
> >
> > Signed-off-by: Haibo Xu <[email protected]>
> > Reviewed-by: Andrew Jones <[email protected]>
> > ---
> > tools/testing/selftests/kvm/get-reg-list.c | 20 +++++++++++++-------
> > 1 file changed, 13 insertions(+), 7 deletions(-)
> >
> > diff --git a/tools/testing/selftests/kvm/get-reg-list.c b/tools/testing/selftests/kvm/get-reg-list.c
> > index c4bd5a5259da..abacb95c21c6 100644
> > --- a/tools/testing/selftests/kvm/get-reg-list.c
> > +++ b/tools/testing/selftests/kvm/get-reg-list.c
> > @@ -211,16 +211,22 @@ static void run_test(struct vcpu_reg_list *c)
> > ++failed_get;
> > }
> >
> > - /* rejects_set registers are rejected after KVM_ARM_VCPU_FINALIZE */
> > + /*
> > + * rejects_set registers are rejected after KVM_ARM_VCPU_FINALIZE on aarch64,
> > + * or registers that should skip set operation on riscv.
> > + */
> > for_each_sublist(c, s) {
> > if (s->rejects_set && find_reg(s->rejects_set, s->rejects_set_n, reg.id)) {
> > reject_reg = true;
> > - ret = __vcpu_ioctl(vcpu, KVM_SET_ONE_REG, &reg);
> > - if (ret != -1 || errno != EPERM) {
> > - printf("%s: Failed to reject (ret=%d, errno=%d) ", config_name(c), ret, errno);
> > - print_reg(config_name(c), reg.id);
> > - putchar('\n');
> > - ++failed_reject;
> > + if ((reg.id & KVM_REG_ARCH_MASK) == KVM_REG_ARM64) {
> > + ret = __vcpu_ioctl(vcpu, KVM_SET_ONE_REG, &reg);
> > + if (ret != -1 || errno != EPERM) {
> > + printf("%s: Failed to reject (ret=%d, errno=%d) ",
> > + config_name(c), ret, errno);
> > + print_reg(config_name(c), reg.id);
> > + putchar('\n');
> > + ++failed_reject;
> > + }
>
> Thinking about this some more, shouldn't we attempt the set ioctl for
> riscv reject registers as well, but look for different error numbers?
>

Yes, we can. Currently, 2 different errno(EOPNOTSUPP/EINVAL) would be
reported for the rejected registers in risc-v.
These 2 errnos can be handled specially like below:

diff --git a/tools/testing/selftests/kvm/get-reg-list.c
b/tools/testing/selftests/kvm/get-reg-list.c
index 73f40e0842b8..f3f2c4519318 100644
--- a/tools/testing/selftests/kvm/get-reg-list.c
+++ b/tools/testing/selftests/kvm/get-reg-list.c
@@ -255,6 +255,15 @@ static void run_test(struct vcpu_reg_list *c)
putchar('\n');
++failed_reject;
}
+ } else {
+ ret = __vcpu_ioctl(vcpu,
KVM_SET_ONE_REG, &reg);
+ if (ret != -1 || (errno !=
EINVAL && errno != EOPNOTSUPP)) {
+ printf("%s: Failed to
reject (ret=%d, errno=%d) ",
+
config_name(c), ret, errno);
+
print_reg(config_name(c), reg.id);
+ putchar('\n');
+ ++failed_reject;
+ }

One possible issue for the above change is that when new registers
that don't support sets were added, we need
to add them to the reject registers list, or the test would fail.

Initially, in the v1 patch, the design was to just skip the EOPNOTSUPP
errno in set operations for all registers
since it's a known errno for registers that don't support sets. This
change cover all the registers even for future
new ones.

What's your opinion?

Thanks,
Haibo
> Thanks,
> drew

2023-06-10 03:05:29

by Haibo Xu

[permalink] [raw]
Subject: Re: [PATCH v3 07/10] KVM: arm64: selftests: Finish generalizing get-reg-list

On Fri, Jun 9, 2023 at 8:30 PM Andrew Jones <[email protected]> wrote:
>
> On Fri, Jun 09, 2023 at 10:12:15AM +0800, Haibo Xu wrote:
> > From: Andrew Jones <[email protected]>
> >
> > Add some unfortunate #ifdeffery to ensure the common get-reg-list.c
> > can be compiled and run with other architectures. The next
> > architecture to support get-reg-list should now only need to provide
> > $(ARCH_DIR)/get-reg-list.c where arch-specific print_reg() and
> > vcpu_configs[] get defined.
> >
> > Signed-off-by: Andrew Jones <[email protected]>
> > Signed-off-by: Haibo Xu <[email protected]>
> > ---
> > tools/testing/selftests/kvm/get-reg-list.c | 24 ++++++++++++++++++----
> > 1 file changed, 20 insertions(+), 4 deletions(-)
> >
> > diff --git a/tools/testing/selftests/kvm/get-reg-list.c b/tools/testing/selftests/kvm/get-reg-list.c
> > index 69bb91087081..c4bd5a5259da 100644
> > --- a/tools/testing/selftests/kvm/get-reg-list.c
> > +++ b/tools/testing/selftests/kvm/get-reg-list.c
> > @@ -98,6 +98,7 @@ void __weak print_reg(const char *prefix, __u64 id)
> > printf("\t0x%llx,\n", id);
> > }
> >
> > +#ifdef __aarch64__
> > static void prepare_vcpu_init(struct vcpu_reg_list *c, struct kvm_vcpu_init *init)
> > {
> > struct vcpu_reg_sublist *s;
> > @@ -120,6 +121,24 @@ static void finalize_vcpu(struct kvm_vcpu *vcpu, struct vcpu_reg_list *c)
> > }
> > }
> >
> > +static struct kvm_vcpu *vcpu_config_get_vcpu(struct vcpu_reg_list *c, struct kvm_vm *vm)
> > +{
> > + struct kvm_vcpu_init init = { .target = -1, };
> > + struct kvm_vcpu *vcpu;
> > +
> > + prepare_vcpu_init(c, &init);
> > + vcpu = __vm_vcpu_add(vm, 0);
> > + aarch64_vcpu_setup(vcpu, &init);
> > +
> > + return vcpu;
> > +}
> > +#else
> > +static struct kvm_vcpu *vcpu_config_get_vcpu(struct vcpu_reg_list *c, struct kvm_vm *vm)
> > +{
> > + return __vm_vcpu_add(vm, 0);
> > +}
> > +#endif
> > +
> > static void check_supported(struct vcpu_reg_list *c)
> > {
> > struct vcpu_reg_sublist *s;
> > @@ -139,7 +158,6 @@ static bool print_filtered;
> >
> > static void run_test(struct vcpu_reg_list *c)
> > {
> > - struct kvm_vcpu_init init = { .target = -1, };
> > int new_regs = 0, missing_regs = 0, i, n;
> > int failed_get = 0, failed_set = 0, failed_reject = 0;
> > struct kvm_vcpu *vcpu;
> > @@ -149,9 +167,7 @@ static void run_test(struct vcpu_reg_list *c)
> > check_supported(c);
> >
> > vm = vm_create_barebones();
> > - prepare_vcpu_init(c, &init);
> > - vcpu = __vm_vcpu_add(vm, 0);
> > - aarch64_vcpu_setup(vcpu, &init);
> > + vcpu = vcpu_config_get_vcpu(c, vm);
> > finalize_vcpu(vcpu, c);
>
> I just noticed that this has been modified from what I posted to leave
> the finalize_vcpu() call here, despite it now being inside the #ifdef
> __aarch64__. That breaks the purpose of the patch. Please make sure this
> file compiles for other architectures without requiring additional
> patches, which would keep the commit message honest. You can either
> revert this to what I posted, and then readd the finalize_vcpu() call in
> another patch, or you can add a finalize_vcpu() stub to the #else part
> of the ifdef in this patch.
>
> Also please don't modify patches authored by others without calling out
> the modifications somewhere, either the commit message or under the ---
> of the patch or in the cover letter.
>

Thanks for pointing it out! I will have a check about it.

> Thanks,
> drew

2023-06-10 03:49:53

by Haibo Xu

[permalink] [raw]
Subject: Re: [PATCH v3 10/10] KVM: riscv: selftests: Add get-reg-list test

On Fri, Jun 9, 2023 at 9:35 PM Andrew Jones <[email protected]> wrote:
>
> On Fri, Jun 09, 2023 at 10:12:18AM +0800, Haibo Xu wrote:
> > get-reg-list test is used to check for KVM registers regressions
> > during VM migration which happens when destination host kernel
> > missing registers that the source host kernel has. The blessed
> > list registers was created by running on v6.4-rc5.
> >
> > Signed-off-by: Haibo Xu <[email protected]>
> > ---
> > tools/testing/selftests/kvm/Makefile | 1 +
> > tools/testing/selftests/kvm/get-reg-list.c | 28 +
> > .../selftests/kvm/include/riscv/processor.h | 3 +
> > .../selftests/kvm/riscv/get-reg-list.c | 611 ++++++++++++++++++
> > 4 files changed, 643 insertions(+)
> > create mode 100644 tools/testing/selftests/kvm/riscv/get-reg-list.c
> >
> > diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
> > index d90cad19c9ee..f7bcda903dd9 100644
> > --- a/tools/testing/selftests/kvm/Makefile
> > +++ b/tools/testing/selftests/kvm/Makefile
> > @@ -174,6 +174,7 @@ TEST_GEN_PROGS_s390x += kvm_binary_stats_test
> >
> > TEST_GEN_PROGS_riscv += demand_paging_test
> > TEST_GEN_PROGS_riscv += dirty_log_test
> > +TEST_GEN_PROGS_riscv += get-reg-list
> > TEST_GEN_PROGS_riscv += kvm_create_max_vcpus
> > TEST_GEN_PROGS_riscv += kvm_page_table_test
> > TEST_GEN_PROGS_riscv += set_memory_region_test
> > diff --git a/tools/testing/selftests/kvm/get-reg-list.c b/tools/testing/selftests/kvm/get-reg-list.c
> > index abacb95c21c6..73f40e0842b8 100644
> > --- a/tools/testing/selftests/kvm/get-reg-list.c
> > +++ b/tools/testing/selftests/kvm/get-reg-list.c
> > @@ -133,6 +133,34 @@ static struct kvm_vcpu *vcpu_config_get_vcpu(struct vcpu_reg_list *c, struct kvm
> > return vcpu;
> > }
> > #else
> > +static inline bool vcpu_has_ext(struct kvm_vcpu *vcpu, int ext)
> > +{
> > + int ret;
> > + unsigned long value;
> > +
> > + ret = __vcpu_get_reg(vcpu, RISCV_ISA_EXT_REG(ext), &value);
> > + if (ret) {
> > + printf("Failed to get ext %d", ext);
> > + return false;
> > + }
> > +
> > + return !!value;
> > +}
> > +
> > +static void finalize_vcpu(struct kvm_vcpu *vcpu, struct vcpu_reg_list *c)
> > +{
> > + struct vcpu_reg_sublist *s;
> > +
> > + for_each_sublist(c, s) {
> > + if (!s->feature)
> > + continue;
>
> Using zero to mean "not specified" means we can't test for
> KVM_RISCV_ISA_EXT_A, but that's probably OK, since Linux always has 'a',
> so we'll never need to check for it.
>

Almost all the features would be enabled by default for RISC-V vCPU if
the host has the corresponding extension.
Just leave the gatekeeper to the vcpu_finialize() to check whether the
feature was supported before triggering the test.

> > +
> > + __TEST_REQUIRE(vcpu_has_ext(vcpu, s->feature),
> > + "%s: %s not available, skipping tests\n",
> > + config_name(c), s->name);
> > + }
> > +}
> > +
> > static struct kvm_vcpu *vcpu_config_get_vcpu(struct vcpu_reg_list *c, struct kvm_vm *vm)
> > {
> > return __vm_vcpu_add(vm, 0);
> > diff --git a/tools/testing/selftests/kvm/include/riscv/processor.h b/tools/testing/selftests/kvm/include/riscv/processor.h
> > index d00d213c3805..5b62a3d2aa9b 100644
> > --- a/tools/testing/selftests/kvm/include/riscv/processor.h
> > +++ b/tools/testing/selftests/kvm/include/riscv/processor.h
> > @@ -38,6 +38,9 @@ static inline uint64_t __kvm_reg_id(uint64_t type, uint64_t idx,
> > KVM_REG_RISCV_TIMER_REG(name), \
> > KVM_REG_SIZE_U64)
> >
> > +#define RISCV_ISA_EXT_REG(idx) __kvm_reg_id(KVM_REG_RISCV_ISA_EXT, \
> > + idx, KVM_REG_SIZE_ULONG)
> > +
> > /* L3 index Bit[47:39] */
> > #define PGTBL_L3_INDEX_MASK 0x0000FF8000000000ULL
> > #define PGTBL_L3_INDEX_SHIFT 39
> > diff --git a/tools/testing/selftests/kvm/riscv/get-reg-list.c b/tools/testing/selftests/kvm/riscv/get-reg-list.c
> > new file mode 100644
> > index 000000000000..0f371d99d471
> > --- /dev/null
> > +++ b/tools/testing/selftests/kvm/riscv/get-reg-list.c
> > @@ -0,0 +1,611 @@
> > +// SPDX-License-Identifier: GPL-2.0
> > +/*
> > + * Check for KVM_GET_REG_LIST regressions.
> > + *
> > + * Copyright (C) 2020, Red Hat, Inc.
>
> I don't think we need the Red Hat copyright. This is a completely new
> work.
>

Sure, will remove it.

> > + * Copyright (c) 2023 Intel Corporation
> > + *
> > + */
> > +#include <stdio.h>
> > +#include "kvm_util.h"
> > +#include "test_util.h"
> > +#include "processor.h"
> > +
> > +#define REG_MASK (KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK)
> > +
> > +static const char *config_id_to_str(__u64 id)
> > +{
> > + /* reg_off is the offset into struct kvm_riscv_config */
> > + __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_CONFIG);
> > +
> > + switch (reg_off) {
> > + case KVM_REG_RISCV_CONFIG_REG(isa):
> > + return "KVM_REG_RISCV_CONFIG_REG(isa)";
> > + case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size):
> > + return "KVM_REG_RISCV_CONFIG_REG(zicbom_block_size)";
> > + case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size):
> > + return "KVM_REG_RISCV_CONFIG_REG(zicboz_block_size)";
> > + case KVM_REG_RISCV_CONFIG_REG(mvendorid):
> > + return "KVM_REG_RISCV_CONFIG_REG(mvendorid)";
> > + case KVM_REG_RISCV_CONFIG_REG(marchid):
> > + return "KVM_REG_RISCV_CONFIG_REG(marchid)";
> > + case KVM_REG_RISCV_CONFIG_REG(mimpid):
> > + return "KVM_REG_RISCV_CONFIG_REG(mimpid)";
> > + }
> > +
> > + /*
> > + * Config regs would grow regularly with new pseudo reg added, so
> > + * just show raw id to indicate a new pseudo config reg.
> > + */
> > + return strdup_printf("KVM_REG_RISCV_CONFIG_REG(%lld) /* UNKNOWN */", reg_off);
> > +}
> > +
> > +static const char *core_id_to_str(const char *prefix, __u64 id)
> > +{
> > + /* reg_off is the offset into struct kvm_riscv_core */
> > + __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_CORE);
> > +
> > + switch (reg_off) {
> > + case KVM_REG_RISCV_CORE_REG(regs.pc):
> > + return "KVM_REG_RISCV_CORE_REG(regs.pc)";
> > + case KVM_REG_RISCV_CORE_REG(regs.ra):
> > + return "KVM_REG_RISCV_CORE_REG(regs.ra)";
> > + case KVM_REG_RISCV_CORE_REG(regs.sp):
> > + return "KVM_REG_RISCV_CORE_REG(regs.sp)";
> > + case KVM_REG_RISCV_CORE_REG(regs.gp):
> > + return "KVM_REG_RISCV_CORE_REG(regs.gp)";
> > + case KVM_REG_RISCV_CORE_REG(regs.tp):
> > + return "KVM_REG_RISCV_CORE_REG(regs.tp)";
> > + case KVM_REG_RISCV_CORE_REG(regs.t0) ... KVM_REG_RISCV_CORE_REG(regs.t2):
> > + return strdup_printf("KVM_REG_RISCV_CORE_REG(regs.t%lld)",
> > + reg_off - KVM_REG_RISCV_CORE_REG(regs.t0));
> > + case KVM_REG_RISCV_CORE_REG(regs.s0) ... KVM_REG_RISCV_CORE_REG(regs.s1):
> > + return strdup_printf("KVM_REG_RISCV_CORE_REG(regs.s%lld)",
> > + reg_off - KVM_REG_RISCV_CORE_REG(regs.s0));
> > + case KVM_REG_RISCV_CORE_REG(regs.a0) ... KVM_REG_RISCV_CORE_REG(regs.a7):
> > + return strdup_printf("KVM_REG_RISCV_CORE_REG(regs.a%lld)",
> > + reg_off - KVM_REG_RISCV_CORE_REG(regs.a0));
> > + case KVM_REG_RISCV_CORE_REG(regs.s2) ... KVM_REG_RISCV_CORE_REG(regs.s11):
> > + return strdup_printf("KVM_REG_RISCV_CORE_REG(regs.s%lld)",
> > + reg_off - KVM_REG_RISCV_CORE_REG(regs.s2) + 2);
> > + case KVM_REG_RISCV_CORE_REG(regs.t3) ... KVM_REG_RISCV_CORE_REG(regs.t6):
> > + return strdup_printf("KVM_REG_RISCV_CORE_REG(regs.t%lld)",
> > + reg_off - KVM_REG_RISCV_CORE_REG(regs.t3) + 3);
> > + case KVM_REG_RISCV_CORE_REG(mode):
> > + return "KVM_REG_RISCV_CORE_REG(mode)";
> > + }
> > +
> > + TEST_FAIL("%s: Unknown core reg id: 0x%llx", prefix, id);
> > + return NULL;
> > +}
> > +
> > +#define RISCV_CSR_GENERAL(csr) \
> > + "KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(" #csr ")"
> > +#define RISCV_CSR_AIA(csr) \
> > + "KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_REG(" #csr ")"
> > +
> > +static const char *general_csr_id_to_str(__u64 reg_off)
> > +{
> > + /* reg_off is the offset into struct kvm_riscv_csr */
> > + switch (reg_off) {
> > + case KVM_REG_RISCV_CSR_REG(sstatus):
> > + return RISCV_CSR_GENERAL(sstatus);
> > + case KVM_REG_RISCV_CSR_REG(sie):
> > + return RISCV_CSR_GENERAL(sie);
> > + case KVM_REG_RISCV_CSR_REG(stvec):
> > + return RISCV_CSR_GENERAL(stvec);
> > + case KVM_REG_RISCV_CSR_REG(sscratch):
> > + return RISCV_CSR_GENERAL(sscratch);
> > + case KVM_REG_RISCV_CSR_REG(sepc):
> > + return RISCV_CSR_GENERAL(sepc);
> > + case KVM_REG_RISCV_CSR_REG(scause):
> > + return RISCV_CSR_GENERAL(scause);
> > + case KVM_REG_RISCV_CSR_REG(stval):
> > + return RISCV_CSR_GENERAL(stval);
> > + case KVM_REG_RISCV_CSR_REG(sip):
> > + return RISCV_CSR_GENERAL(sip);
> > + case KVM_REG_RISCV_CSR_REG(satp):
> > + return RISCV_CSR_GENERAL(satp);
> > + case KVM_REG_RISCV_CSR_REG(scounteren):
> > + return RISCV_CSR_GENERAL(scounteren);
> > + }
> > +
> > + TEST_FAIL("Unknown general csr reg: 0x%llx", reg_off);
> > + return NULL;
> > +}
> > +
> > +static const char *aia_csr_id_to_str(__u64 reg_off)
> > +{
> > + /* reg_off is the offset into struct kvm_riscv_aia_csr */
> > + switch (reg_off) {
> > + case KVM_REG_RISCV_CSR_AIA_REG(siselect):
> > + return RISCV_CSR_AIA(siselect);
> > + case KVM_REG_RISCV_CSR_AIA_REG(iprio1):
> > + return RISCV_CSR_AIA(iprio1);
> > + case KVM_REG_RISCV_CSR_AIA_REG(iprio2):
> > + return RISCV_CSR_AIA(iprio2);
> > + case KVM_REG_RISCV_CSR_AIA_REG(sieh):
> > + return RISCV_CSR_AIA(sieh);
> > + case KVM_REG_RISCV_CSR_AIA_REG(siph):
> > + return RISCV_CSR_AIA(siph);
> > + case KVM_REG_RISCV_CSR_AIA_REG(iprio1h):
> > + return RISCV_CSR_AIA(iprio1h);
> > + case KVM_REG_RISCV_CSR_AIA_REG(iprio2h):
> > + return RISCV_CSR_AIA(iprio2h);
> > + }
> > +
> > + TEST_FAIL("Unknown aia csr reg: 0x%llx", reg_off);
> > + return NULL;
> > +}
> > +
> > +static const char *csr_id_to_str(const char *prefix, __u64 id)
> > +{
> > + __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_CSR);
> > + __u64 reg_subtype = reg_off & KVM_REG_RISCV_SUBTYPE_MASK;
> > +
> > + reg_off &= ~KVM_REG_RISCV_SUBTYPE_MASK;
> > +
> > + switch (reg_subtype) {
> > + case KVM_REG_RISCV_CSR_GENERAL:
> > + return general_csr_id_to_str(reg_off);
> > + case KVM_REG_RISCV_CSR_AIA:
> > + return aia_csr_id_to_str(reg_off);
> > + }
> > +
> > + TEST_FAIL("%s: Unknown csr subtype: 0x%llx", prefix, reg_subtype);
> > + return NULL;
> > +}
> > +
> > +static const char *timer_id_to_str(const char *prefix, __u64 id)
> > +{
> > + /* reg_off is the offset into struct kvm_riscv_timer */
> > + __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_TIMER);
> > +
> > + switch (reg_off) {
> > + case KVM_REG_RISCV_TIMER_REG(frequency):
> > + return "KVM_REG_RISCV_TIMER_REG(frequency)";
> > + case KVM_REG_RISCV_TIMER_REG(time):
> > + return "KVM_REG_RISCV_TIMER_REG(time)";
> > + case KVM_REG_RISCV_TIMER_REG(compare):
> > + return "KVM_REG_RISCV_TIMER_REG(compare)";
> > + case KVM_REG_RISCV_TIMER_REG(state):
> > + return "KVM_REG_RISCV_TIMER_REG(state)";
> > + }
> > +
> > + TEST_FAIL("%s: Unknown timer reg id: 0x%llx", prefix, id);
> > + return NULL;
> > +}
> > +
> > +static const char *fp_f_id_to_str(const char *prefix, __u64 id)
> > +{
> > + /* reg_off is the offset into struct __riscv_f_ext_state */
> > + __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_FP_F);
> > +
> > + switch (reg_off) {
> > + case KVM_REG_RISCV_FP_F_REG(f[0]) ...
> > + KVM_REG_RISCV_FP_F_REG(f[31]):
> > + return strdup_printf("KVM_REG_RISCV_FP_F_REG(f[%lld])", reg_off);
> > + case KVM_REG_RISCV_FP_F_REG(fcsr):
> > + return "KVM_REG_RISCV_FP_F_REG(fcsr)";
> > + }
> > +
> > + TEST_FAIL("%s: Unknown fp_f reg id: 0x%llx", prefix, id);
> > + return NULL;
> > +}
> > +
> > +static const char *fp_d_id_to_str(const char *prefix, __u64 id)
> > +{
> > + /* reg_off is the offset into struct __riscv_d_ext_state */
> > + __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_FP_D);
> > +
> > + switch (reg_off) {
> > + case KVM_REG_RISCV_FP_D_REG(f[0]) ...
> > + KVM_REG_RISCV_FP_D_REG(f[31]):
> > + return strdup_printf("KVM_REG_RISCV_FP_D_REG(f[%lld])", reg_off);
> > + case KVM_REG_RISCV_FP_D_REG(fcsr):
> > + return "KVM_REG_RISCV_FP_D_REG(fcsr)";
> > + }
> > +
> > + TEST_FAIL("%s: Unknown fp_d reg id: 0x%llx", prefix, id);
> > + return NULL;
> > +}
> > +
> > +static const char *isa_ext_id_to_str(__u64 id)
> > +{
> > + /* reg_off is the offset into unsigned long kvm_isa_ext_arr[] */
> > + __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_ISA_EXT);
> > +
> > + static const char * const kvm_isa_ext_reg_name[] = {
> > + "KVM_RISCV_ISA_EXT_A",
> > + "KVM_RISCV_ISA_EXT_C",
> > + "KVM_RISCV_ISA_EXT_D",
> > + "KVM_RISCV_ISA_EXT_F",
> > + "KVM_RISCV_ISA_EXT_H",
> > + "KVM_RISCV_ISA_EXT_I",
> > + "KVM_RISCV_ISA_EXT_M",
> > + "KVM_RISCV_ISA_EXT_SVPBMT",
> > + "KVM_RISCV_ISA_EXT_SSTC",
> > + "KVM_RISCV_ISA_EXT_SVINVAL",
> > + "KVM_RISCV_ISA_EXT_ZIHINTPAUSE",
> > + "KVM_RISCV_ISA_EXT_ZICBOM",
> > + "KVM_RISCV_ISA_EXT_ZICBOZ",
> > + "KVM_RISCV_ISA_EXT_ZBB",
> > + "KVM_RISCV_ISA_EXT_SSAIA",
> > + };
> > +
> > + if (reg_off >= ARRAY_SIZE(kvm_isa_ext_reg_name)) {
> > + /*
> > + * isa_ext regs would grow regularly with new isa extension added, so
> > + * just show "reg" to indicate a new extension.
> > + */
> > + return strdup_printf("%lld /* UNKNOWN */", reg_off);
> > + }
> > +
> > + return kvm_isa_ext_reg_name[reg_off];
> > +}
> > +
> > +static const char *sbi_ext_single_id_to_str(__u64 reg_off)
> > +{
> > + /* reg_off is KVM_RISCV_SBI_EXT_ID */
> > + static const char * const kvm_sbi_ext_reg_name[] = {
> > + "KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_V01",
> > + "KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_TIME",
> > + "KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_IPI",
> > + "KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_RFENCE",
> > + "KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_SRST",
> > + "KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_HSM",
> > + "KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_PMU",
> > + "KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_EXPERIMENTAL",
> > + "KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_VENDOR",
> > + };
> > +
> > + if (reg_off >= ARRAY_SIZE(kvm_sbi_ext_reg_name)) {
> > + /*
> > + * sbi_ext regs would grow regularly with new sbi extension added, so
> > + * just show "reg" to indicate a new extension.
> > + */
> > + return strdup_printf("KVM_REG_RISCV_SBI_SINGLE | %lld /* UNKNOWN */", reg_off);
> > + }
> > +
> > + return kvm_sbi_ext_reg_name[reg_off];
> > +}
> > +
> > +static const char *sbi_ext_multi_id_to_str(__u64 reg_subtype, __u64 reg_off)
> > +{
> > + if (reg_off > KVM_REG_RISCV_SBI_MULTI_REG_LAST) {
> > + /*
> > + * sbi_ext regs would grow regularly with new sbi extension added, so
> > + * just show "reg" to indicate a new extension.
> > + */
> > + return strdup_printf("%lld /* UNKNOWN */", reg_off);
> > + }
> > +
> > + switch (reg_subtype) {
> > + case KVM_REG_RISCV_SBI_MULTI_EN:
> > + return strdup_printf("KVM_REG_RISCV_SBI_MULTI_EN | %lld", reg_off);
> > + case KVM_REG_RISCV_SBI_MULTI_DIS:
> > + return strdup_printf("KVM_REG_RISCV_SBI_MULTI_DIS | %lld", reg_off);
> > + }
> > +
> > + return NULL;
> > +}
> > +
> > +static const char *sbi_ext_id_to_str(const char *prefix, __u64 id)
> > +{
> > + __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_SBI_EXT);
> > + __u64 reg_subtype = reg_off & KVM_REG_RISCV_SUBTYPE_MASK;
> > +
> > + reg_off &= ~KVM_REG_RISCV_SUBTYPE_MASK;
> > +
> > + switch (reg_subtype) {
> > + case KVM_REG_RISCV_SBI_SINGLE:
> > + return sbi_ext_single_id_to_str(reg_off);
> > + case KVM_REG_RISCV_SBI_MULTI_EN:
> > + case KVM_REG_RISCV_SBI_MULTI_DIS:
> > + return sbi_ext_multi_id_to_str(reg_subtype, reg_off);
> > + }
> > +
> > + TEST_FAIL("%s: Unknown sbi ext subtype: 0x%llx", prefix, reg_subtype);
> > + return NULL;
> > +}
> > +
> > +void print_reg(const char *prefix, __u64 id)
> > +{
> > + const char *reg_size = NULL;
> > +
> > + TEST_ASSERT((id & KVM_REG_ARCH_MASK) == KVM_REG_RISCV,
> > + "%s: KVM_REG_RISCV missing in reg id: 0x%llx", prefix, id);
> > +
> > + switch (id & KVM_REG_SIZE_MASK) {
> > + case KVM_REG_SIZE_U32:
> > + reg_size = "KVM_REG_SIZE_U32";
> > + break;
> > + case KVM_REG_SIZE_U64:
> > + reg_size = "KVM_REG_SIZE_U64";
> > + break;
> > + case KVM_REG_SIZE_U128:
> > + reg_size = "KVM_REG_SIZE_U128";
> > + break;
> > + default:
> > + TEST_FAIL("%s: Unexpected reg size: 0x%llx in reg id: 0x%llx",
> > + prefix, (id & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT, id);
> > + }
> > +
> > + switch (id & KVM_REG_RISCV_TYPE_MASK) {
> > + case KVM_REG_RISCV_CONFIG:
> > + printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_CONFIG | %s,\n",
>
> All the work to try and use KVM_REG_SIZE_ULONG in the right places will be
> lost if we print a reg list and then copy+paste it as a blessed list. On
> 64-bit, the only thing supported now, we'll get U64, but if we ever
> supported 32-bit, then we'd get U32. This is unfortunate, but there's
> nothing we can do about it. Either we can't have a true print+copy+paste
> workflow or we should assume we'll only support 64-bit and only use U64
> in the blessed lists (from a copy+paste). But, we've already got ULONG
> in there now, so we can just leave it and burn this bridge later.
>

Yes, the print_reg would print U64 for riscv64 system and U32 for riscv32.
As commented in v2 patch, it seems 32-bit was not supported in other ARCHs.
If riscv follows this assumption, I think U64 was better for print_reg.


> > + reg_size, config_id_to_str(id));
> > + break;
> > + case KVM_REG_RISCV_CORE:
> > + printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_CORE | %s,\n",
> > + reg_size, core_id_to_str(prefix, id));
> > + break;
> > + case KVM_REG_RISCV_CSR:
> > + printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_CSR | %s,\n",
> > + reg_size, csr_id_to_str(prefix, id));
> > + break;
> > + case KVM_REG_RISCV_TIMER:
> > + printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_TIMER | %s,\n",
> > + reg_size, timer_id_to_str(prefix, id));
> > + break;
> > + case KVM_REG_RISCV_FP_F:
> > + printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_FP_F | %s,\n",
> > + reg_size, fp_f_id_to_str(prefix, id));
> > + break;
> > + case KVM_REG_RISCV_FP_D:
> > + printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_FP_D | %s,\n",
> > + reg_size, fp_d_id_to_str(prefix, id));
> > + break;
> > + case KVM_REG_RISCV_ISA_EXT:
> > + printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_ISA_EXT | %s,\n",
> > + reg_size, isa_ext_id_to_str(id));
> > + break;
> > + case KVM_REG_RISCV_SBI_EXT:
> > + printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_SBI_EXT | %s,\n",
> > + reg_size, sbi_ext_id_to_str(prefix, id));
> > + break;
> > + default:
> > + TEST_FAIL("%s: Unexpected reg type: 0x%llx in reg id: 0x%llx", prefix,
> > + (id & KVM_REG_RISCV_TYPE_MASK) >> KVM_REG_RISCV_TYPE_SHIFT, id);
> > + }
> > +}
> > +
> > +/*
> > + * The current blessed list was primed with the output of kernel version
> > + * v6.4-rc5 and then later updated with new registers.
> > + */
> > +static __u64 base_regs[] = {
> > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(isa),
> > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(mvendorid),
> > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(marchid),
> > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(mimpid),
> > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.pc),
> > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.ra),
> > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.sp),
> > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.gp),
> > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.tp),
> > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t0),
> > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t1),
> > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t2),
> > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s0),
> > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s1),
> > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a0),
> > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a1),
> > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a2),
> > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a3),
> > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a4),
> > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a5),
> > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a6),
> > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a7),
> > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s2),
> > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s3),
> > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s4),
> > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s5),
> > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s6),
> > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s7),
> > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s8),
> > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s9),
> > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s10),
> > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s11),
> > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t3),
> > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t4),
> > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t5),
> > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t6),
> > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(mode),
> > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(sstatus),
> > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(sie),
> > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(stvec),
> > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(sscratch),
> > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(sepc),
> > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(scause),
> > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(stval),
> > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(sip),
> > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(satp),
> > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(scounteren),
> > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | KVM_REG_RISCV_TIMER_REG(frequency),
> > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | KVM_REG_RISCV_TIMER_REG(time),
> > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | KVM_REG_RISCV_TIMER_REG(compare),
> > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | KVM_REG_RISCV_TIMER_REG(state),
> > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_A,
> > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_C,
> > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_D,
> > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_F,
> > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_I,
> > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_M,
> > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_V01,
> > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_TIME,
> > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_IPI,
> > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_RFENCE,
> > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_SRST,
> > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_HSM,
> > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_PMU,
> > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_EXPERIMENTAL,
> > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_VENDOR,
> > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_MULTI_EN | 0,
> > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_MULTI_DIS | 0,
> > +};
> > +
> > +/*
> > + * The rejects_set list registers that should skip set test.
> > + * - KVM_REG_RISCV_TIMER_REG(state): set would fail if it was not initialized properly.
> > + * - KVM_REG_RISCV_TIMER_REG(frequency): set not supported
> > + * - KVM_REG_RISCV_CONFIG_REG(zicbom_block_size): set not supported
> > + * - KVM_REG_RISCV_CONFIG_REG(zicboz_block_size): set not supported
> > + * - KVM_RISCV_ISA_EXT_SVPBMT: set not supported
> > + * - KVM_RISCV_ISA_EXT_SVINVA: set not supported
> > + * - KVM_RISCV_ISA_EXT_SSAIA: set not supported
> > + */
> > +static __u64 base_rejects_set[] = {
> > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(zicbom_block_size),
> > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(zicboz_block_size),
> > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | KVM_REG_RISCV_TIMER_REG(frequency),
> > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | KVM_REG_RISCV_TIMER_REG(state),
> > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SVPBMT,
> > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SVINVAL,
> > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SSAIA,
>
> These aren't all base registers. I think we should divide the reject lists
> up too, especially considering the idea I wrote in the last patch, which
> is to test setting the rejects to ensure the expected error is returned.
> The error may be different for a rejected set of a supported register vs.
> that of an unsupported register.
>

Yes, the reject list should divide up too. Will change it in V4.

> > +};
> > +
> > +static __u64 zicbom_regs[] = {
> > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(zicbom_block_size),
> > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZICBOM,
> > +};
> > +
> > +static __u64 zicboz_regs[] = {
> > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(zicboz_block_size),
> > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZICBOZ,
> > +};
> > +
> > +static __u64 aia_csr_regs[] = {
> > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(siselect),
> > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio1),
> > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio2),
> > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(sieh),
> > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(siph),
> > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio1h),
> > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio2h),
> > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SSAIA,
> > +};
> > +
> > +static __u64 fp_f_regs[] = {
> > + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[0]),
> > + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[1]),
> > + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[2]),
> > + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[3]),
> > + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[4]),
> > + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[5]),
> > + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[6]),
> > + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[7]),
> > + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[8]),
> > + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[9]),
> > + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[10]),
> > + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[11]),
> > + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[12]),
> > + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[13]),
> > + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[14]),
> > + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[15]),
> > + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[16]),
> > + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[17]),
> > + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[18]),
> > + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[19]),
> > + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[20]),
> > + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[21]),
> > + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[22]),
> > + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[23]),
> > + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[24]),
> > + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[25]),
> > + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[26]),
> > + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[27]),
> > + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[28]),
> > + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[29]),
> > + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[30]),
> > + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[31]),
> > + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(fcsr),
> > +};
> > +
> > +static __u64 fp_d_regs[] = {
> > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[0]),
> > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[1]),
> > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[2]),
> > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[3]),
> > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[4]),
> > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[5]),
> > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[6]),
> > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[7]),
> > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[8]),
> > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[9]),
> > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[10]),
> > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[11]),
> > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[12]),
> > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[13]),
> > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[14]),
> > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[15]),
> > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[16]),
> > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[17]),
> > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[18]),
> > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[19]),
> > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[20]),
> > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[21]),
> > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[22]),
> > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[23]),
> > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[24]),
> > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[25]),
> > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[26]),
> > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[27]),
> > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[28]),
> > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[29]),
> > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[30]),
> > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[31]),
> > + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(fcsr),
> > +};
> > +
> > +#define BASE_SUBLIST \
> > + {"base", .regs = base_regs, .regs_n = ARRAY_SIZE(base_regs), \
> > + .rejects_set = base_rejects_set, .rejects_set_n = ARRAY_SIZE(base_rejects_set),}
> > +#define ZICBOM_REGS_SUBLIST \
> > + {"zicbom", .feature = KVM_RISCV_ISA_EXT_ZICBOM, .regs = zicbom_regs, \
> > + .regs_n = ARRAY_SIZE(zicbom_regs),}
> > +#define ZICBOZ_REGS_SUBLIST \
> > + {"zicboz", .feature = KVM_RISCV_ISA_EXT_ZICBOZ, .regs = zicboz_regs, \
> > + .regs_n = ARRAY_SIZE(zicboz_regs),}
> > +#define AIA_REGS_SUBLIST \
> > + {"aia", .feature = KVM_RISCV_ISA_EXT_SSAIA, .regs = aia_csr_regs, \
> > + .regs_n = ARRAY_SIZE(aia_csr_regs),}
> > +#define FP_F_REGS_SUBLIST \
> > + {"fp_f", .feature = KVM_RISCV_ISA_EXT_F, .regs = fp_f_regs, \
> > + .regs_n = ARRAY_SIZE(fp_f_regs),}
> > +#define FP_D_REGS_SUBLIST \
> > + {"fp_d", .feature = KVM_RISCV_ISA_EXT_D, .regs = fp_d_regs, \
> > + .regs_n = ARRAY_SIZE(fp_d_regs),}
> > +
> > +static struct vcpu_reg_list zicbo_config = {
> > + .sublists = {
> > + BASE_SUBLIST,
> > + ZICBOM_REGS_SUBLIST,
> > + ZICBOZ_REGS_SUBLIST,
>
> It's possible to have zicbom without zicboz and vice-versa. Since
> finalize_vcpu() will skip the whole test when it detects a missing
> feature for a config, then we won't be able to test one without the
> other. It's a bit annoying, but I think we may need a separate config
> for each independent extension.
>

Sure.

> > + {0},
> > + },
> > +};
> > +
> > +static struct vcpu_reg_list aia_config = {
> > + .sublists = {
> > + BASE_SUBLIST,
> > + AIA_REGS_SUBLIST,
> > + {0},
> > + },
> > +};
> > +
> > +static struct vcpu_reg_list fp_f_d_config = {
> > + .sublists = {
> > + BASE_SUBLIST,
> > + FP_F_REGS_SUBLIST,
> > + FP_D_REGS_SUBLIST,
> > + {0},
> > + },
> > +};
> > +
> > +struct vcpu_reg_list *vcpu_configs[] = {
> > + &zicbo_config,
> > + &aia_config,
> > + &fp_f_d_config,
> > +};
> > +int vcpu_configs_n = ARRAY_SIZE(vcpu_configs);
> > --
> > 2.34.1
> >
>
> I see we have a bit of a problem with the configs for riscv. Since we
> don't disable anything we're not testing, then for any test that is
> missing, for example, the f and d registers, we'll get output like
> "There are 66 new registers. Consider adding them to the blessed reg
> list with the following lines:" and then a dump of all the f and d
> registers. The test doesn't fail, but it's messy and confusing. Ideally
> we'd disable all registers of all sublists not in the config, probably
> by starting by disabling everything and then only reenabling the ones
> in the config.
>
> Anything that can't be disabled is either a KVM bug, i.e. we should
> be able to disable it, because we can't expect every host to have it,
> or it needs to be in the base register sublist (meaning every host
> will always have it).
>

Yes, as mentioned above, all the features would be enabled for vCPU by default
if it's available on the host. I think we can disable all the feature
bits at the start of
finalize_vcpu() and only enable the feature bits corresponding to the
specified config.

Thanks,
Haibo

> Thanks,
> drew

2023-06-12 09:32:59

by Andrew Jones

[permalink] [raw]
Subject: Re: [PATCH v3 10/10] KVM: riscv: selftests: Add get-reg-list test

On Sat, Jun 10, 2023 at 11:12:15AM +0800, Haibo Xu wrote:
> On Fri, Jun 9, 2023 at 9:35 PM Andrew Jones <[email protected]> wrote:
> >
> > On Fri, Jun 09, 2023 at 10:12:18AM +0800, Haibo Xu wrote:
> > > get-reg-list test is used to check for KVM registers regressions
> > > during VM migration which happens when destination host kernel
> > > missing registers that the source host kernel has. The blessed
> > > list registers was created by running on v6.4-rc5.
> > >
> > > Signed-off-by: Haibo Xu <[email protected]>
> > > ---
> > > tools/testing/selftests/kvm/Makefile | 1 +
> > > tools/testing/selftests/kvm/get-reg-list.c | 28 +
> > > .../selftests/kvm/include/riscv/processor.h | 3 +
> > > .../selftests/kvm/riscv/get-reg-list.c | 611 ++++++++++++++++++
> > > 4 files changed, 643 insertions(+)
> > > create mode 100644 tools/testing/selftests/kvm/riscv/get-reg-list.c
> > >
> > > diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
> > > index d90cad19c9ee..f7bcda903dd9 100644
> > > --- a/tools/testing/selftests/kvm/Makefile
> > > +++ b/tools/testing/selftests/kvm/Makefile
> > > @@ -174,6 +174,7 @@ TEST_GEN_PROGS_s390x += kvm_binary_stats_test
> > >
> > > TEST_GEN_PROGS_riscv += demand_paging_test
> > > TEST_GEN_PROGS_riscv += dirty_log_test
> > > +TEST_GEN_PROGS_riscv += get-reg-list
> > > TEST_GEN_PROGS_riscv += kvm_create_max_vcpus
> > > TEST_GEN_PROGS_riscv += kvm_page_table_test
> > > TEST_GEN_PROGS_riscv += set_memory_region_test
> > > diff --git a/tools/testing/selftests/kvm/get-reg-list.c b/tools/testing/selftests/kvm/get-reg-list.c
> > > index abacb95c21c6..73f40e0842b8 100644
> > > --- a/tools/testing/selftests/kvm/get-reg-list.c
> > > +++ b/tools/testing/selftests/kvm/get-reg-list.c
> > > @@ -133,6 +133,34 @@ static struct kvm_vcpu *vcpu_config_get_vcpu(struct vcpu_reg_list *c, struct kvm
> > > return vcpu;
> > > }
> > > #else
> > > +static inline bool vcpu_has_ext(struct kvm_vcpu *vcpu, int ext)
> > > +{
> > > + int ret;
> > > + unsigned long value;
> > > +
> > > + ret = __vcpu_get_reg(vcpu, RISCV_ISA_EXT_REG(ext), &value);
> > > + if (ret) {
> > > + printf("Failed to get ext %d", ext);
> > > + return false;
> > > + }
> > > +
> > > + return !!value;
> > > +}
> > > +
> > > +static void finalize_vcpu(struct kvm_vcpu *vcpu, struct vcpu_reg_list *c)
> > > +{
> > > + struct vcpu_reg_sublist *s;
> > > +
> > > + for_each_sublist(c, s) {
> > > + if (!s->feature)
> > > + continue;
> >
> > Using zero to mean "not specified" means we can't test for
> > KVM_RISCV_ISA_EXT_A, but that's probably OK, since Linux always has 'a',
> > so we'll never need to check for it.
> >
>
> Almost all the features would be enabled by default for RISC-V vCPU if
> the host has the corresponding extension.
> Just leave the gatekeeper to the vcpu_finialize() to check whether the
> feature was supported before triggering the test.

Yes, but you can't check if 'a' is available here, because
KVM_RISCV_ISA_EXT_A is 0 and 0 also means "not specified", as
shown above with the "if 0, continue". But, as I said, it
doesn't matter. 'a' will always be present.

>
> > > +
> > > + __TEST_REQUIRE(vcpu_has_ext(vcpu, s->feature),
> > > + "%s: %s not available, skipping tests\n",
> > > + config_name(c), s->name);
> > > + }
> > > +}
> > > +
> > > static struct kvm_vcpu *vcpu_config_get_vcpu(struct vcpu_reg_list *c, struct kvm_vm *vm)
> > > {
> > > return __vm_vcpu_add(vm, 0);
> > > diff --git a/tools/testing/selftests/kvm/include/riscv/processor.h b/tools/testing/selftests/kvm/include/riscv/processor.h
> > > index d00d213c3805..5b62a3d2aa9b 100644
> > > --- a/tools/testing/selftests/kvm/include/riscv/processor.h
> > > +++ b/tools/testing/selftests/kvm/include/riscv/processor.h
> > > @@ -38,6 +38,9 @@ static inline uint64_t __kvm_reg_id(uint64_t type, uint64_t idx,
> > > KVM_REG_RISCV_TIMER_REG(name), \
> > > KVM_REG_SIZE_U64)
> > >
> > > +#define RISCV_ISA_EXT_REG(idx) __kvm_reg_id(KVM_REG_RISCV_ISA_EXT, \
> > > + idx, KVM_REG_SIZE_ULONG)
> > > +
> > > /* L3 index Bit[47:39] */
> > > #define PGTBL_L3_INDEX_MASK 0x0000FF8000000000ULL
> > > #define PGTBL_L3_INDEX_SHIFT 39
> > > diff --git a/tools/testing/selftests/kvm/riscv/get-reg-list.c b/tools/testing/selftests/kvm/riscv/get-reg-list.c
> > > new file mode 100644
> > > index 000000000000..0f371d99d471
> > > --- /dev/null
> > > +++ b/tools/testing/selftests/kvm/riscv/get-reg-list.c
> > > @@ -0,0 +1,611 @@
> > > +// SPDX-License-Identifier: GPL-2.0
> > > +/*
> > > + * Check for KVM_GET_REG_LIST regressions.
> > > + *
> > > + * Copyright (C) 2020, Red Hat, Inc.
> >
> > I don't think we need the Red Hat copyright. This is a completely new
> > work.
> >
>
> Sure, will remove it.
>
> > > + * Copyright (c) 2023 Intel Corporation
> > > + *
> > > + */
> > > +#include <stdio.h>
> > > +#include "kvm_util.h"
> > > +#include "test_util.h"
> > > +#include "processor.h"
> > > +
> > > +#define REG_MASK (KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK)
> > > +
> > > +static const char *config_id_to_str(__u64 id)
> > > +{
> > > + /* reg_off is the offset into struct kvm_riscv_config */
> > > + __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_CONFIG);
> > > +
> > > + switch (reg_off) {
> > > + case KVM_REG_RISCV_CONFIG_REG(isa):
> > > + return "KVM_REG_RISCV_CONFIG_REG(isa)";
> > > + case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size):
> > > + return "KVM_REG_RISCV_CONFIG_REG(zicbom_block_size)";
> > > + case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size):
> > > + return "KVM_REG_RISCV_CONFIG_REG(zicboz_block_size)";
> > > + case KVM_REG_RISCV_CONFIG_REG(mvendorid):
> > > + return "KVM_REG_RISCV_CONFIG_REG(mvendorid)";
> > > + case KVM_REG_RISCV_CONFIG_REG(marchid):
> > > + return "KVM_REG_RISCV_CONFIG_REG(marchid)";
> > > + case KVM_REG_RISCV_CONFIG_REG(mimpid):
> > > + return "KVM_REG_RISCV_CONFIG_REG(mimpid)";
> > > + }
> > > +
> > > + /*
> > > + * Config regs would grow regularly with new pseudo reg added, so
> > > + * just show raw id to indicate a new pseudo config reg.
> > > + */
> > > + return strdup_printf("KVM_REG_RISCV_CONFIG_REG(%lld) /* UNKNOWN */", reg_off);
> > > +}
> > > +
> > > +static const char *core_id_to_str(const char *prefix, __u64 id)
> > > +{
> > > + /* reg_off is the offset into struct kvm_riscv_core */
> > > + __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_CORE);
> > > +
> > > + switch (reg_off) {
> > > + case KVM_REG_RISCV_CORE_REG(regs.pc):
> > > + return "KVM_REG_RISCV_CORE_REG(regs.pc)";
> > > + case KVM_REG_RISCV_CORE_REG(regs.ra):
> > > + return "KVM_REG_RISCV_CORE_REG(regs.ra)";
> > > + case KVM_REG_RISCV_CORE_REG(regs.sp):
> > > + return "KVM_REG_RISCV_CORE_REG(regs.sp)";
> > > + case KVM_REG_RISCV_CORE_REG(regs.gp):
> > > + return "KVM_REG_RISCV_CORE_REG(regs.gp)";
> > > + case KVM_REG_RISCV_CORE_REG(regs.tp):
> > > + return "KVM_REG_RISCV_CORE_REG(regs.tp)";
> > > + case KVM_REG_RISCV_CORE_REG(regs.t0) ... KVM_REG_RISCV_CORE_REG(regs.t2):
> > > + return strdup_printf("KVM_REG_RISCV_CORE_REG(regs.t%lld)",
> > > + reg_off - KVM_REG_RISCV_CORE_REG(regs.t0));
> > > + case KVM_REG_RISCV_CORE_REG(regs.s0) ... KVM_REG_RISCV_CORE_REG(regs.s1):
> > > + return strdup_printf("KVM_REG_RISCV_CORE_REG(regs.s%lld)",
> > > + reg_off - KVM_REG_RISCV_CORE_REG(regs.s0));
> > > + case KVM_REG_RISCV_CORE_REG(regs.a0) ... KVM_REG_RISCV_CORE_REG(regs.a7):
> > > + return strdup_printf("KVM_REG_RISCV_CORE_REG(regs.a%lld)",
> > > + reg_off - KVM_REG_RISCV_CORE_REG(regs.a0));
> > > + case KVM_REG_RISCV_CORE_REG(regs.s2) ... KVM_REG_RISCV_CORE_REG(regs.s11):
> > > + return strdup_printf("KVM_REG_RISCV_CORE_REG(regs.s%lld)",
> > > + reg_off - KVM_REG_RISCV_CORE_REG(regs.s2) + 2);
> > > + case KVM_REG_RISCV_CORE_REG(regs.t3) ... KVM_REG_RISCV_CORE_REG(regs.t6):
> > > + return strdup_printf("KVM_REG_RISCV_CORE_REG(regs.t%lld)",
> > > + reg_off - KVM_REG_RISCV_CORE_REG(regs.t3) + 3);
> > > + case KVM_REG_RISCV_CORE_REG(mode):
> > > + return "KVM_REG_RISCV_CORE_REG(mode)";
> > > + }
> > > +
> > > + TEST_FAIL("%s: Unknown core reg id: 0x%llx", prefix, id);
> > > + return NULL;
> > > +}
> > > +
> > > +#define RISCV_CSR_GENERAL(csr) \
> > > + "KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(" #csr ")"
> > > +#define RISCV_CSR_AIA(csr) \
> > > + "KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_REG(" #csr ")"
> > > +
> > > +static const char *general_csr_id_to_str(__u64 reg_off)
> > > +{
> > > + /* reg_off is the offset into struct kvm_riscv_csr */
> > > + switch (reg_off) {
> > > + case KVM_REG_RISCV_CSR_REG(sstatus):
> > > + return RISCV_CSR_GENERAL(sstatus);
> > > + case KVM_REG_RISCV_CSR_REG(sie):
> > > + return RISCV_CSR_GENERAL(sie);
> > > + case KVM_REG_RISCV_CSR_REG(stvec):
> > > + return RISCV_CSR_GENERAL(stvec);
> > > + case KVM_REG_RISCV_CSR_REG(sscratch):
> > > + return RISCV_CSR_GENERAL(sscratch);
> > > + case KVM_REG_RISCV_CSR_REG(sepc):
> > > + return RISCV_CSR_GENERAL(sepc);
> > > + case KVM_REG_RISCV_CSR_REG(scause):
> > > + return RISCV_CSR_GENERAL(scause);
> > > + case KVM_REG_RISCV_CSR_REG(stval):
> > > + return RISCV_CSR_GENERAL(stval);
> > > + case KVM_REG_RISCV_CSR_REG(sip):
> > > + return RISCV_CSR_GENERAL(sip);
> > > + case KVM_REG_RISCV_CSR_REG(satp):
> > > + return RISCV_CSR_GENERAL(satp);
> > > + case KVM_REG_RISCV_CSR_REG(scounteren):
> > > + return RISCV_CSR_GENERAL(scounteren);
> > > + }
> > > +
> > > + TEST_FAIL("Unknown general csr reg: 0x%llx", reg_off);
> > > + return NULL;
> > > +}
> > > +
> > > +static const char *aia_csr_id_to_str(__u64 reg_off)
> > > +{
> > > + /* reg_off is the offset into struct kvm_riscv_aia_csr */
> > > + switch (reg_off) {
> > > + case KVM_REG_RISCV_CSR_AIA_REG(siselect):
> > > + return RISCV_CSR_AIA(siselect);
> > > + case KVM_REG_RISCV_CSR_AIA_REG(iprio1):
> > > + return RISCV_CSR_AIA(iprio1);
> > > + case KVM_REG_RISCV_CSR_AIA_REG(iprio2):
> > > + return RISCV_CSR_AIA(iprio2);
> > > + case KVM_REG_RISCV_CSR_AIA_REG(sieh):
> > > + return RISCV_CSR_AIA(sieh);
> > > + case KVM_REG_RISCV_CSR_AIA_REG(siph):
> > > + return RISCV_CSR_AIA(siph);
> > > + case KVM_REG_RISCV_CSR_AIA_REG(iprio1h):
> > > + return RISCV_CSR_AIA(iprio1h);
> > > + case KVM_REG_RISCV_CSR_AIA_REG(iprio2h):
> > > + return RISCV_CSR_AIA(iprio2h);
> > > + }
> > > +
> > > + TEST_FAIL("Unknown aia csr reg: 0x%llx", reg_off);
> > > + return NULL;
> > > +}
> > > +
> > > +static const char *csr_id_to_str(const char *prefix, __u64 id)
> > > +{
> > > + __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_CSR);
> > > + __u64 reg_subtype = reg_off & KVM_REG_RISCV_SUBTYPE_MASK;
> > > +
> > > + reg_off &= ~KVM_REG_RISCV_SUBTYPE_MASK;
> > > +
> > > + switch (reg_subtype) {
> > > + case KVM_REG_RISCV_CSR_GENERAL:
> > > + return general_csr_id_to_str(reg_off);
> > > + case KVM_REG_RISCV_CSR_AIA:
> > > + return aia_csr_id_to_str(reg_off);
> > > + }
> > > +
> > > + TEST_FAIL("%s: Unknown csr subtype: 0x%llx", prefix, reg_subtype);
> > > + return NULL;
> > > +}
> > > +
> > > +static const char *timer_id_to_str(const char *prefix, __u64 id)
> > > +{
> > > + /* reg_off is the offset into struct kvm_riscv_timer */
> > > + __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_TIMER);
> > > +
> > > + switch (reg_off) {
> > > + case KVM_REG_RISCV_TIMER_REG(frequency):
> > > + return "KVM_REG_RISCV_TIMER_REG(frequency)";
> > > + case KVM_REG_RISCV_TIMER_REG(time):
> > > + return "KVM_REG_RISCV_TIMER_REG(time)";
> > > + case KVM_REG_RISCV_TIMER_REG(compare):
> > > + return "KVM_REG_RISCV_TIMER_REG(compare)";
> > > + case KVM_REG_RISCV_TIMER_REG(state):
> > > + return "KVM_REG_RISCV_TIMER_REG(state)";
> > > + }
> > > +
> > > + TEST_FAIL("%s: Unknown timer reg id: 0x%llx", prefix, id);
> > > + return NULL;
> > > +}
> > > +
> > > +static const char *fp_f_id_to_str(const char *prefix, __u64 id)
> > > +{
> > > + /* reg_off is the offset into struct __riscv_f_ext_state */
> > > + __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_FP_F);
> > > +
> > > + switch (reg_off) {
> > > + case KVM_REG_RISCV_FP_F_REG(f[0]) ...
> > > + KVM_REG_RISCV_FP_F_REG(f[31]):
> > > + return strdup_printf("KVM_REG_RISCV_FP_F_REG(f[%lld])", reg_off);
> > > + case KVM_REG_RISCV_FP_F_REG(fcsr):
> > > + return "KVM_REG_RISCV_FP_F_REG(fcsr)";
> > > + }
> > > +
> > > + TEST_FAIL("%s: Unknown fp_f reg id: 0x%llx", prefix, id);
> > > + return NULL;
> > > +}
> > > +
> > > +static const char *fp_d_id_to_str(const char *prefix, __u64 id)
> > > +{
> > > + /* reg_off is the offset into struct __riscv_d_ext_state */
> > > + __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_FP_D);
> > > +
> > > + switch (reg_off) {
> > > + case KVM_REG_RISCV_FP_D_REG(f[0]) ...
> > > + KVM_REG_RISCV_FP_D_REG(f[31]):
> > > + return strdup_printf("KVM_REG_RISCV_FP_D_REG(f[%lld])", reg_off);
> > > + case KVM_REG_RISCV_FP_D_REG(fcsr):
> > > + return "KVM_REG_RISCV_FP_D_REG(fcsr)";
> > > + }
> > > +
> > > + TEST_FAIL("%s: Unknown fp_d reg id: 0x%llx", prefix, id);
> > > + return NULL;
> > > +}
> > > +
> > > +static const char *isa_ext_id_to_str(__u64 id)
> > > +{
> > > + /* reg_off is the offset into unsigned long kvm_isa_ext_arr[] */
> > > + __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_ISA_EXT);
> > > +
> > > + static const char * const kvm_isa_ext_reg_name[] = {
> > > + "KVM_RISCV_ISA_EXT_A",
> > > + "KVM_RISCV_ISA_EXT_C",
> > > + "KVM_RISCV_ISA_EXT_D",
> > > + "KVM_RISCV_ISA_EXT_F",
> > > + "KVM_RISCV_ISA_EXT_H",
> > > + "KVM_RISCV_ISA_EXT_I",
> > > + "KVM_RISCV_ISA_EXT_M",
> > > + "KVM_RISCV_ISA_EXT_SVPBMT",
> > > + "KVM_RISCV_ISA_EXT_SSTC",
> > > + "KVM_RISCV_ISA_EXT_SVINVAL",
> > > + "KVM_RISCV_ISA_EXT_ZIHINTPAUSE",
> > > + "KVM_RISCV_ISA_EXT_ZICBOM",
> > > + "KVM_RISCV_ISA_EXT_ZICBOZ",
> > > + "KVM_RISCV_ISA_EXT_ZBB",
> > > + "KVM_RISCV_ISA_EXT_SSAIA",
> > > + };
> > > +
> > > + if (reg_off >= ARRAY_SIZE(kvm_isa_ext_reg_name)) {
> > > + /*
> > > + * isa_ext regs would grow regularly with new isa extension added, so
> > > + * just show "reg" to indicate a new extension.
> > > + */
> > > + return strdup_printf("%lld /* UNKNOWN */", reg_off);
> > > + }
> > > +
> > > + return kvm_isa_ext_reg_name[reg_off];
> > > +}
> > > +
> > > +static const char *sbi_ext_single_id_to_str(__u64 reg_off)
> > > +{
> > > + /* reg_off is KVM_RISCV_SBI_EXT_ID */
> > > + static const char * const kvm_sbi_ext_reg_name[] = {
> > > + "KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_V01",
> > > + "KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_TIME",
> > > + "KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_IPI",
> > > + "KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_RFENCE",
> > > + "KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_SRST",
> > > + "KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_HSM",
> > > + "KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_PMU",
> > > + "KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_EXPERIMENTAL",
> > > + "KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_VENDOR",
> > > + };
> > > +
> > > + if (reg_off >= ARRAY_SIZE(kvm_sbi_ext_reg_name)) {
> > > + /*
> > > + * sbi_ext regs would grow regularly with new sbi extension added, so
> > > + * just show "reg" to indicate a new extension.
> > > + */
> > > + return strdup_printf("KVM_REG_RISCV_SBI_SINGLE | %lld /* UNKNOWN */", reg_off);
> > > + }
> > > +
> > > + return kvm_sbi_ext_reg_name[reg_off];
> > > +}
> > > +
> > > +static const char *sbi_ext_multi_id_to_str(__u64 reg_subtype, __u64 reg_off)
> > > +{
> > > + if (reg_off > KVM_REG_RISCV_SBI_MULTI_REG_LAST) {
> > > + /*
> > > + * sbi_ext regs would grow regularly with new sbi extension added, so
> > > + * just show "reg" to indicate a new extension.
> > > + */
> > > + return strdup_printf("%lld /* UNKNOWN */", reg_off);
> > > + }
> > > +
> > > + switch (reg_subtype) {
> > > + case KVM_REG_RISCV_SBI_MULTI_EN:
> > > + return strdup_printf("KVM_REG_RISCV_SBI_MULTI_EN | %lld", reg_off);
> > > + case KVM_REG_RISCV_SBI_MULTI_DIS:
> > > + return strdup_printf("KVM_REG_RISCV_SBI_MULTI_DIS | %lld", reg_off);
> > > + }
> > > +
> > > + return NULL;
> > > +}
> > > +
> > > +static const char *sbi_ext_id_to_str(const char *prefix, __u64 id)
> > > +{
> > > + __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_SBI_EXT);
> > > + __u64 reg_subtype = reg_off & KVM_REG_RISCV_SUBTYPE_MASK;
> > > +
> > > + reg_off &= ~KVM_REG_RISCV_SUBTYPE_MASK;
> > > +
> > > + switch (reg_subtype) {
> > > + case KVM_REG_RISCV_SBI_SINGLE:
> > > + return sbi_ext_single_id_to_str(reg_off);
> > > + case KVM_REG_RISCV_SBI_MULTI_EN:
> > > + case KVM_REG_RISCV_SBI_MULTI_DIS:
> > > + return sbi_ext_multi_id_to_str(reg_subtype, reg_off);
> > > + }
> > > +
> > > + TEST_FAIL("%s: Unknown sbi ext subtype: 0x%llx", prefix, reg_subtype);
> > > + return NULL;
> > > +}
> > > +
> > > +void print_reg(const char *prefix, __u64 id)
> > > +{
> > > + const char *reg_size = NULL;
> > > +
> > > + TEST_ASSERT((id & KVM_REG_ARCH_MASK) == KVM_REG_RISCV,
> > > + "%s: KVM_REG_RISCV missing in reg id: 0x%llx", prefix, id);
> > > +
> > > + switch (id & KVM_REG_SIZE_MASK) {
> > > + case KVM_REG_SIZE_U32:
> > > + reg_size = "KVM_REG_SIZE_U32";
> > > + break;
> > > + case KVM_REG_SIZE_U64:
> > > + reg_size = "KVM_REG_SIZE_U64";
> > > + break;
> > > + case KVM_REG_SIZE_U128:
> > > + reg_size = "KVM_REG_SIZE_U128";
> > > + break;
> > > + default:
> > > + TEST_FAIL("%s: Unexpected reg size: 0x%llx in reg id: 0x%llx",
> > > + prefix, (id & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT, id);
> > > + }
> > > +
> > > + switch (id & KVM_REG_RISCV_TYPE_MASK) {
> > > + case KVM_REG_RISCV_CONFIG:
> > > + printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_CONFIG | %s,\n",
> >
> > All the work to try and use KVM_REG_SIZE_ULONG in the right places will be
> > lost if we print a reg list and then copy+paste it as a blessed list. On
> > 64-bit, the only thing supported now, we'll get U64, but if we ever
> > supported 32-bit, then we'd get U32. This is unfortunate, but there's
> > nothing we can do about it. Either we can't have a true print+copy+paste
> > workflow or we should assume we'll only support 64-bit and only use U64
> > in the blessed lists (from a copy+paste). But, we've already got ULONG
> > in there now, so we can just leave it and burn this bridge later.
> >
>
> Yes, the print_reg would print U64 for riscv64 system and U32 for riscv32.
> As commented in v2 patch, it seems 32-bit was not supported in other ARCHs.
> If riscv follows this assumption, I think U64 was better for print_reg.

It's not better, but there's no way to avoid it. The information that it
was a ULONG, instead of U64, has been lost at this point. The only way
to print it correctly is to create a reg-size mapping and then look it up,
rather than decode it. We can add a lookup when/if adding 32-bit support.

>
>
> > > + reg_size, config_id_to_str(id));
> > > + break;
> > > + case KVM_REG_RISCV_CORE:
> > > + printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_CORE | %s,\n",
> > > + reg_size, core_id_to_str(prefix, id));
> > > + break;
> > > + case KVM_REG_RISCV_CSR:
> > > + printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_CSR | %s,\n",
> > > + reg_size, csr_id_to_str(prefix, id));
> > > + break;
> > > + case KVM_REG_RISCV_TIMER:
> > > + printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_TIMER | %s,\n",
> > > + reg_size, timer_id_to_str(prefix, id));
> > > + break;
> > > + case KVM_REG_RISCV_FP_F:
> > > + printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_FP_F | %s,\n",
> > > + reg_size, fp_f_id_to_str(prefix, id));
> > > + break;
> > > + case KVM_REG_RISCV_FP_D:
> > > + printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_FP_D | %s,\n",
> > > + reg_size, fp_d_id_to_str(prefix, id));
> > > + break;
> > > + case KVM_REG_RISCV_ISA_EXT:
> > > + printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_ISA_EXT | %s,\n",
> > > + reg_size, isa_ext_id_to_str(id));
> > > + break;
> > > + case KVM_REG_RISCV_SBI_EXT:
> > > + printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_SBI_EXT | %s,\n",
> > > + reg_size, sbi_ext_id_to_str(prefix, id));
> > > + break;
> > > + default:
> > > + TEST_FAIL("%s: Unexpected reg type: 0x%llx in reg id: 0x%llx", prefix,
> > > + (id & KVM_REG_RISCV_TYPE_MASK) >> KVM_REG_RISCV_TYPE_SHIFT, id);
> > > + }
> > > +}
> > > +
> > > +/*
> > > + * The current blessed list was primed with the output of kernel version
> > > + * v6.4-rc5 and then later updated with new registers.
> > > + */
> > > +static __u64 base_regs[] = {
> > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(isa),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(mvendorid),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(marchid),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(mimpid),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.pc),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.ra),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.sp),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.gp),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.tp),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t0),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t1),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t2),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s0),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s1),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a0),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a1),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a2),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a3),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a4),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a5),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a6),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a7),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s2),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s3),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s4),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s5),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s6),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s7),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s8),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s9),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s10),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s11),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t3),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t4),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t5),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t6),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(mode),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(sstatus),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(sie),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(stvec),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(sscratch),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(sepc),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(scause),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(stval),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(sip),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(satp),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(scounteren),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | KVM_REG_RISCV_TIMER_REG(frequency),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | KVM_REG_RISCV_TIMER_REG(time),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | KVM_REG_RISCV_TIMER_REG(compare),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | KVM_REG_RISCV_TIMER_REG(state),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_A,
> > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_C,
> > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_D,
> > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_F,
> > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_I,
> > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_M,
> > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_V01,
> > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_TIME,
> > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_IPI,
> > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_RFENCE,
> > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_SRST,
> > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_HSM,
> > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_PMU,
> > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_EXPERIMENTAL,
> > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_VENDOR,
> > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_MULTI_EN | 0,
> > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_MULTI_DIS | 0,
> > > +};
> > > +
> > > +/*
> > > + * The rejects_set list registers that should skip set test.
> > > + * - KVM_REG_RISCV_TIMER_REG(state): set would fail if it was not initialized properly.
> > > + * - KVM_REG_RISCV_TIMER_REG(frequency): set not supported
> > > + * - KVM_REG_RISCV_CONFIG_REG(zicbom_block_size): set not supported
> > > + * - KVM_REG_RISCV_CONFIG_REG(zicboz_block_size): set not supported
> > > + * - KVM_RISCV_ISA_EXT_SVPBMT: set not supported
> > > + * - KVM_RISCV_ISA_EXT_SVINVA: set not supported
> > > + * - KVM_RISCV_ISA_EXT_SSAIA: set not supported
> > > + */
> > > +static __u64 base_rejects_set[] = {
> > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(zicbom_block_size),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(zicboz_block_size),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | KVM_REG_RISCV_TIMER_REG(frequency),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | KVM_REG_RISCV_TIMER_REG(state),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SVPBMT,
> > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SVINVAL,
> > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SSAIA,
> >
> > These aren't all base registers. I think we should divide the reject lists
> > up too, especially considering the idea I wrote in the last patch, which
> > is to test setting the rejects to ensure the expected error is returned.
> > The error may be different for a rejected set of a supported register vs.
> > that of an unsupported register.
> >
>
> Yes, the reject list should divide up too. Will change it in V4.
>
> > > +};
> > > +
> > > +static __u64 zicbom_regs[] = {
> > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(zicbom_block_size),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZICBOM,
> > > +};
> > > +
> > > +static __u64 zicboz_regs[] = {
> > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(zicboz_block_size),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZICBOZ,
> > > +};
> > > +
> > > +static __u64 aia_csr_regs[] = {
> > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(siselect),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio1),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio2),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(sieh),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(siph),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio1h),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio2h),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SSAIA,
> > > +};
> > > +
> > > +static __u64 fp_f_regs[] = {
> > > + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[0]),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[1]),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[2]),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[3]),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[4]),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[5]),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[6]),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[7]),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[8]),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[9]),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[10]),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[11]),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[12]),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[13]),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[14]),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[15]),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[16]),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[17]),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[18]),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[19]),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[20]),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[21]),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[22]),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[23]),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[24]),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[25]),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[26]),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[27]),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[28]),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[29]),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[30]),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[31]),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(fcsr),
> > > +};
> > > +
> > > +static __u64 fp_d_regs[] = {
> > > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[0]),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[1]),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[2]),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[3]),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[4]),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[5]),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[6]),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[7]),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[8]),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[9]),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[10]),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[11]),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[12]),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[13]),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[14]),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[15]),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[16]),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[17]),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[18]),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[19]),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[20]),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[21]),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[22]),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[23]),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[24]),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[25]),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[26]),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[27]),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[28]),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[29]),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[30]),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[31]),
> > > + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(fcsr),
> > > +};
> > > +
> > > +#define BASE_SUBLIST \
> > > + {"base", .regs = base_regs, .regs_n = ARRAY_SIZE(base_regs), \
> > > + .rejects_set = base_rejects_set, .rejects_set_n = ARRAY_SIZE(base_rejects_set),}
> > > +#define ZICBOM_REGS_SUBLIST \
> > > + {"zicbom", .feature = KVM_RISCV_ISA_EXT_ZICBOM, .regs = zicbom_regs, \
> > > + .regs_n = ARRAY_SIZE(zicbom_regs),}
> > > +#define ZICBOZ_REGS_SUBLIST \
> > > + {"zicboz", .feature = KVM_RISCV_ISA_EXT_ZICBOZ, .regs = zicboz_regs, \
> > > + .regs_n = ARRAY_SIZE(zicboz_regs),}
> > > +#define AIA_REGS_SUBLIST \
> > > + {"aia", .feature = KVM_RISCV_ISA_EXT_SSAIA, .regs = aia_csr_regs, \
> > > + .regs_n = ARRAY_SIZE(aia_csr_regs),}
> > > +#define FP_F_REGS_SUBLIST \
> > > + {"fp_f", .feature = KVM_RISCV_ISA_EXT_F, .regs = fp_f_regs, \
> > > + .regs_n = ARRAY_SIZE(fp_f_regs),}
> > > +#define FP_D_REGS_SUBLIST \
> > > + {"fp_d", .feature = KVM_RISCV_ISA_EXT_D, .regs = fp_d_regs, \
> > > + .regs_n = ARRAY_SIZE(fp_d_regs),}
> > > +
> > > +static struct vcpu_reg_list zicbo_config = {
> > > + .sublists = {
> > > + BASE_SUBLIST,
> > > + ZICBOM_REGS_SUBLIST,
> > > + ZICBOZ_REGS_SUBLIST,
> >
> > It's possible to have zicbom without zicboz and vice-versa. Since
> > finalize_vcpu() will skip the whole test when it detects a missing
> > feature for a config, then we won't be able to test one without the
> > other. It's a bit annoying, but I think we may need a separate config
> > for each independent extension.
> >
>
> Sure.
>
> > > + {0},
> > > + },
> > > +};
> > > +
> > > +static struct vcpu_reg_list aia_config = {
> > > + .sublists = {
> > > + BASE_SUBLIST,
> > > + AIA_REGS_SUBLIST,
> > > + {0},
> > > + },
> > > +};
> > > +
> > > +static struct vcpu_reg_list fp_f_d_config = {
> > > + .sublists = {
> > > + BASE_SUBLIST,
> > > + FP_F_REGS_SUBLIST,
> > > + FP_D_REGS_SUBLIST,
> > > + {0},
> > > + },
> > > +};
> > > +
> > > +struct vcpu_reg_list *vcpu_configs[] = {
> > > + &zicbo_config,
> > > + &aia_config,
> > > + &fp_f_d_config,
> > > +};
> > > +int vcpu_configs_n = ARRAY_SIZE(vcpu_configs);
> > > --
> > > 2.34.1
> > >
> >
> > I see we have a bit of a problem with the configs for riscv. Since we
> > don't disable anything we're not testing, then for any test that is
> > missing, for example, the f and d registers, we'll get output like
> > "There are 66 new registers. Consider adding them to the blessed reg
> > list with the following lines:" and then a dump of all the f and d
> > registers. The test doesn't fail, but it's messy and confusing. Ideally
> > we'd disable all registers of all sublists not in the config, probably
> > by starting by disabling everything and then only reenabling the ones
> > in the config.
> >
> > Anything that can't be disabled is either a KVM bug, i.e. we should
> > be able to disable it, because we can't expect every host to have it,
> > or it needs to be in the base register sublist (meaning every host
> > will always have it).
> >
>
> Yes, as mentioned above, all the features would be enabled for vCPU by default
> if it's available on the host. I think we can disable all the feature
> bits at the start of
> finalize_vcpu() and only enable the feature bits corresponding to the
> specified config.
>

Yup.

Thanks,
drew

2023-06-12 09:55:59

by Andrew Jones

[permalink] [raw]
Subject: Re: [PATCH v3 09/10] KVM: riscv: selftests: Skip some registers set operation

On Sat, Jun 10, 2023 at 10:35:24AM +0800, Haibo Xu wrote:
> On Fri, Jun 9, 2023 at 5:24 PM Andrew Jones <[email protected]> wrote:
> >
> > On Fri, Jun 09, 2023 at 10:12:17AM +0800, Haibo Xu wrote:
> > > Set operation on some riscv registers(mostly pesudo ones) was not
> > > supported and should be skipped in the get-reg-list test. Just
> > > reuse the rejects_set utilities to handle it in riscv.
> > >
> > > Signed-off-by: Haibo Xu <[email protected]>
> > > Reviewed-by: Andrew Jones <[email protected]>
> > > ---
> > > tools/testing/selftests/kvm/get-reg-list.c | 20 +++++++++++++-------
> > > 1 file changed, 13 insertions(+), 7 deletions(-)
> > >
> > > diff --git a/tools/testing/selftests/kvm/get-reg-list.c b/tools/testing/selftests/kvm/get-reg-list.c
> > > index c4bd5a5259da..abacb95c21c6 100644
> > > --- a/tools/testing/selftests/kvm/get-reg-list.c
> > > +++ b/tools/testing/selftests/kvm/get-reg-list.c
> > > @@ -211,16 +211,22 @@ static void run_test(struct vcpu_reg_list *c)
> > > ++failed_get;
> > > }
> > >
> > > - /* rejects_set registers are rejected after KVM_ARM_VCPU_FINALIZE */
> > > + /*
> > > + * rejects_set registers are rejected after KVM_ARM_VCPU_FINALIZE on aarch64,
> > > + * or registers that should skip set operation on riscv.
> > > + */
> > > for_each_sublist(c, s) {
> > > if (s->rejects_set && find_reg(s->rejects_set, s->rejects_set_n, reg.id)) {
> > > reject_reg = true;
> > > - ret = __vcpu_ioctl(vcpu, KVM_SET_ONE_REG, &reg);
> > > - if (ret != -1 || errno != EPERM) {
> > > - printf("%s: Failed to reject (ret=%d, errno=%d) ", config_name(c), ret, errno);
> > > - print_reg(config_name(c), reg.id);
> > > - putchar('\n');
> > > - ++failed_reject;
> > > + if ((reg.id & KVM_REG_ARCH_MASK) == KVM_REG_ARM64) {
> > > + ret = __vcpu_ioctl(vcpu, KVM_SET_ONE_REG, &reg);
> > > + if (ret != -1 || errno != EPERM) {
> > > + printf("%s: Failed to reject (ret=%d, errno=%d) ",
> > > + config_name(c), ret, errno);
> > > + print_reg(config_name(c), reg.id);
> > > + putchar('\n');
> > > + ++failed_reject;
> > > + }
> >
> > Thinking about this some more, shouldn't we attempt the set ioctl for
> > riscv reject registers as well, but look for different error numbers?
> >
>
> Yes, we can. Currently, 2 different errno(EOPNOTSUPP/EINVAL) would be
> reported for the rejected registers in risc-v.
> These 2 errnos can be handled specially like below:
>
> diff --git a/tools/testing/selftests/kvm/get-reg-list.c
> b/tools/testing/selftests/kvm/get-reg-list.c
> index 73f40e0842b8..f3f2c4519318 100644
> --- a/tools/testing/selftests/kvm/get-reg-list.c
> +++ b/tools/testing/selftests/kvm/get-reg-list.c
> @@ -255,6 +255,15 @@ static void run_test(struct vcpu_reg_list *c)
> putchar('\n');
> ++failed_reject;
> }
> + } else {
> + ret = __vcpu_ioctl(vcpu,
> KVM_SET_ONE_REG, &reg);
> + if (ret != -1 || (errno !=
> EINVAL && errno != EOPNOTSUPP)) {
> + printf("%s: Failed to
> reject (ret=%d, errno=%d) ",
> +
> config_name(c), ret, errno);
> +
> print_reg(config_name(c), reg.id);
> + putchar('\n');
> + ++failed_reject;
> + }

Instead of duplicating the code Arm uses, we just need an errno check
function, preferably one that takes the register as an input, so we
can check for specific errnos for specific registers.

>
> One possible issue for the above change is that when new registers
> that don't support sets were added, we need
> to add them to the reject registers list, or the test would fail.
>
> Initially, in the v1 patch, the design was to just skip the EOPNOTSUPP
> errno in set operations for all registers
> since it's a known errno for registers that don't support sets. This
> change cover all the registers even for future
> new ones.
>
> What's your opinion?

I think we should only do the get/set tests on present, blessed list
registers, since if it's a new register we don't know its capabilities.

So, instead of

for_each_reg(i) {
/* get/set tests */
}

we do

for_each_present_blessed_reg(i) {
/* get/set tests */
}

where we have

#define for_each_present_blessed_reg(i) \
for ((i) = 0; (i) < blessed_n; ++(i)) \
if (find_reg(reg_list->reg, reg_list->n, blessed_reg[i]))


Changing run_test() to work this way should be a separate patch.

Thanks,
drew

2023-06-12 10:13:43

by Haibo Xu

[permalink] [raw]
Subject: Re: [PATCH v3 09/10] KVM: riscv: selftests: Skip some registers set operation

On Mon, Jun 12, 2023 at 4:57 PM Andrew Jones <[email protected]> wrote:
>
> On Sat, Jun 10, 2023 at 10:35:24AM +0800, Haibo Xu wrote:
> > On Fri, Jun 9, 2023 at 5:24 PM Andrew Jones <[email protected]> wrote:
> > >
> > > On Fri, Jun 09, 2023 at 10:12:17AM +0800, Haibo Xu wrote:
> > > > Set operation on some riscv registers(mostly pesudo ones) was not
> > > > supported and should be skipped in the get-reg-list test. Just
> > > > reuse the rejects_set utilities to handle it in riscv.
> > > >
> > > > Signed-off-by: Haibo Xu <[email protected]>
> > > > Reviewed-by: Andrew Jones <[email protected]>
> > > > ---
> > > > tools/testing/selftests/kvm/get-reg-list.c | 20 +++++++++++++-------
> > > > 1 file changed, 13 insertions(+), 7 deletions(-)
> > > >
> > > > diff --git a/tools/testing/selftests/kvm/get-reg-list.c b/tools/testing/selftests/kvm/get-reg-list.c
> > > > index c4bd5a5259da..abacb95c21c6 100644
> > > > --- a/tools/testing/selftests/kvm/get-reg-list.c
> > > > +++ b/tools/testing/selftests/kvm/get-reg-list.c
> > > > @@ -211,16 +211,22 @@ static void run_test(struct vcpu_reg_list *c)
> > > > ++failed_get;
> > > > }
> > > >
> > > > - /* rejects_set registers are rejected after KVM_ARM_VCPU_FINALIZE */
> > > > + /*
> > > > + * rejects_set registers are rejected after KVM_ARM_VCPU_FINALIZE on aarch64,
> > > > + * or registers that should skip set operation on riscv.
> > > > + */
> > > > for_each_sublist(c, s) {
> > > > if (s->rejects_set && find_reg(s->rejects_set, s->rejects_set_n, reg.id)) {
> > > > reject_reg = true;
> > > > - ret = __vcpu_ioctl(vcpu, KVM_SET_ONE_REG, &reg);
> > > > - if (ret != -1 || errno != EPERM) {
> > > > - printf("%s: Failed to reject (ret=%d, errno=%d) ", config_name(c), ret, errno);
> > > > - print_reg(config_name(c), reg.id);
> > > > - putchar('\n');
> > > > - ++failed_reject;
> > > > + if ((reg.id & KVM_REG_ARCH_MASK) == KVM_REG_ARM64) {
> > > > + ret = __vcpu_ioctl(vcpu, KVM_SET_ONE_REG, &reg);
> > > > + if (ret != -1 || errno != EPERM) {
> > > > + printf("%s: Failed to reject (ret=%d, errno=%d) ",
> > > > + config_name(c), ret, errno);
> > > > + print_reg(config_name(c), reg.id);
> > > > + putchar('\n');
> > > > + ++failed_reject;
> > > > + }
> > >
> > > Thinking about this some more, shouldn't we attempt the set ioctl for
> > > riscv reject registers as well, but look for different error numbers?
> > >
> >
> > Yes, we can. Currently, 2 different errno(EOPNOTSUPP/EINVAL) would be
> > reported for the rejected registers in risc-v.
> > These 2 errnos can be handled specially like below:
> >
> > diff --git a/tools/testing/selftests/kvm/get-reg-list.c
> > b/tools/testing/selftests/kvm/get-reg-list.c
> > index 73f40e0842b8..f3f2c4519318 100644
> > --- a/tools/testing/selftests/kvm/get-reg-list.c
> > +++ b/tools/testing/selftests/kvm/get-reg-list.c
> > @@ -255,6 +255,15 @@ static void run_test(struct vcpu_reg_list *c)
> > putchar('\n');
> > ++failed_reject;
> > }
> > + } else {
> > + ret = __vcpu_ioctl(vcpu,
> > KVM_SET_ONE_REG, &reg);
> > + if (ret != -1 || (errno !=
> > EINVAL && errno != EOPNOTSUPP)) {
> > + printf("%s: Failed to
> > reject (ret=%d, errno=%d) ",
> > +
> > config_name(c), ret, errno);
> > +
> > print_reg(config_name(c), reg.id);
> > + putchar('\n');
> > + ++failed_reject;
> > + }
>
> Instead of duplicating the code Arm uses, we just need an errno check
> function, preferably one that takes the register as an input, so we
> can check for specific errnos for specific registers.
>
> >
> > One possible issue for the above change is that when new registers
> > that don't support sets were added, we need
> > to add them to the reject registers list, or the test would fail.
> >
> > Initially, in the v1 patch, the design was to just skip the EOPNOTSUPP
> > errno in set operations for all registers
> > since it's a known errno for registers that don't support sets. This
> > change cover all the registers even for future
> > new ones.
> >
> > What's your opinion?
>
> I think we should only do the get/set tests on present, blessed list
> registers, since if it's a new register we don't know its capabilities.
>
> So, instead of
>
> for_each_reg(i) {
> /* get/set tests */
> }
>
> we do
>
> for_each_present_blessed_reg(i) {
> /* get/set tests */
> }
>
> where we have
>
> #define for_each_present_blessed_reg(i) \
> for ((i) = 0; (i) < blessed_n; ++(i)) \
> if (find_reg(reg_list->reg, reg_list->n, blessed_reg[i]))
>
>
> Changing run_test() to work this way should be a separate patch.
>

Good idea! let me have a try.

Thanks,
Haibo

> Thanks,
> drew

2023-06-12 10:16:57

by Haibo Xu

[permalink] [raw]
Subject: Re: [PATCH v3 10/10] KVM: riscv: selftests: Add get-reg-list test

On Mon, Jun 12, 2023 at 5:11 PM Andrew Jones <[email protected]> wrote:
>
> On Sat, Jun 10, 2023 at 11:12:15AM +0800, Haibo Xu wrote:
> > On Fri, Jun 9, 2023 at 9:35 PM Andrew Jones <[email protected]> wrote:
> > >
> > > On Fri, Jun 09, 2023 at 10:12:18AM +0800, Haibo Xu wrote:
> > > > get-reg-list test is used to check for KVM registers regressions
> > > > during VM migration which happens when destination host kernel
> > > > missing registers that the source host kernel has. The blessed
> > > > list registers was created by running on v6.4-rc5.
> > > >
> > > > Signed-off-by: Haibo Xu <[email protected]>
> > > > ---
> > > > tools/testing/selftests/kvm/Makefile | 1 +
> > > > tools/testing/selftests/kvm/get-reg-list.c | 28 +
> > > > .../selftests/kvm/include/riscv/processor.h | 3 +
> > > > .../selftests/kvm/riscv/get-reg-list.c | 611 ++++++++++++++++++
> > > > 4 files changed, 643 insertions(+)
> > > > create mode 100644 tools/testing/selftests/kvm/riscv/get-reg-list.c
> > > >
> > > > diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
> > > > index d90cad19c9ee..f7bcda903dd9 100644
> > > > --- a/tools/testing/selftests/kvm/Makefile
> > > > +++ b/tools/testing/selftests/kvm/Makefile
> > > > @@ -174,6 +174,7 @@ TEST_GEN_PROGS_s390x += kvm_binary_stats_test
> > > >
> > > > TEST_GEN_PROGS_riscv += demand_paging_test
> > > > TEST_GEN_PROGS_riscv += dirty_log_test
> > > > +TEST_GEN_PROGS_riscv += get-reg-list
> > > > TEST_GEN_PROGS_riscv += kvm_create_max_vcpus
> > > > TEST_GEN_PROGS_riscv += kvm_page_table_test
> > > > TEST_GEN_PROGS_riscv += set_memory_region_test
> > > > diff --git a/tools/testing/selftests/kvm/get-reg-list.c b/tools/testing/selftests/kvm/get-reg-list.c
> > > > index abacb95c21c6..73f40e0842b8 100644
> > > > --- a/tools/testing/selftests/kvm/get-reg-list.c
> > > > +++ b/tools/testing/selftests/kvm/get-reg-list.c
> > > > @@ -133,6 +133,34 @@ static struct kvm_vcpu *vcpu_config_get_vcpu(struct vcpu_reg_list *c, struct kvm
> > > > return vcpu;
> > > > }
> > > > #else
> > > > +static inline bool vcpu_has_ext(struct kvm_vcpu *vcpu, int ext)
> > > > +{
> > > > + int ret;
> > > > + unsigned long value;
> > > > +
> > > > + ret = __vcpu_get_reg(vcpu, RISCV_ISA_EXT_REG(ext), &value);
> > > > + if (ret) {
> > > > + printf("Failed to get ext %d", ext);
> > > > + return false;
> > > > + }
> > > > +
> > > > + return !!value;
> > > > +}
> > > > +
> > > > +static void finalize_vcpu(struct kvm_vcpu *vcpu, struct vcpu_reg_list *c)
> > > > +{
> > > > + struct vcpu_reg_sublist *s;
> > > > +
> > > > + for_each_sublist(c, s) {
> > > > + if (!s->feature)
> > > > + continue;
> > >
> > > Using zero to mean "not specified" means we can't test for
> > > KVM_RISCV_ISA_EXT_A, but that's probably OK, since Linux always has 'a',
> > > so we'll never need to check for it.
> > >
> >
> > Almost all the features would be enabled by default for RISC-V vCPU if
> > the host has the corresponding extension.
> > Just leave the gatekeeper to the vcpu_finialize() to check whether the
> > feature was supported before triggering the test.
>
> Yes, but you can't check if 'a' is available here, because
> KVM_RISCV_ISA_EXT_A is 0 and 0 also means "not specified", as
> shown above with the "if 0, continue". But, as I said, it
> doesn't matter. 'a' will always be present.
>

Yes, hit a corner case.

> >
> > > > +
> > > > + __TEST_REQUIRE(vcpu_has_ext(vcpu, s->feature),
> > > > + "%s: %s not available, skipping tests\n",
> > > > + config_name(c), s->name);
> > > > + }
> > > > +}
> > > > +
> > > > static struct kvm_vcpu *vcpu_config_get_vcpu(struct vcpu_reg_list *c, struct kvm_vm *vm)
> > > > {
> > > > return __vm_vcpu_add(vm, 0);
> > > > diff --git a/tools/testing/selftests/kvm/include/riscv/processor.h b/tools/testing/selftests/kvm/include/riscv/processor.h
> > > > index d00d213c3805..5b62a3d2aa9b 100644
> > > > --- a/tools/testing/selftests/kvm/include/riscv/processor.h
> > > > +++ b/tools/testing/selftests/kvm/include/riscv/processor.h
> > > > @@ -38,6 +38,9 @@ static inline uint64_t __kvm_reg_id(uint64_t type, uint64_t idx,
> > > > KVM_REG_RISCV_TIMER_REG(name), \
> > > > KVM_REG_SIZE_U64)
> > > >
> > > > +#define RISCV_ISA_EXT_REG(idx) __kvm_reg_id(KVM_REG_RISCV_ISA_EXT, \
> > > > + idx, KVM_REG_SIZE_ULONG)
> > > > +
> > > > /* L3 index Bit[47:39] */
> > > > #define PGTBL_L3_INDEX_MASK 0x0000FF8000000000ULL
> > > > #define PGTBL_L3_INDEX_SHIFT 39
> > > > diff --git a/tools/testing/selftests/kvm/riscv/get-reg-list.c b/tools/testing/selftests/kvm/riscv/get-reg-list.c
> > > > new file mode 100644
> > > > index 000000000000..0f371d99d471
> > > > --- /dev/null
> > > > +++ b/tools/testing/selftests/kvm/riscv/get-reg-list.c
> > > > @@ -0,0 +1,611 @@
> > > > +// SPDX-License-Identifier: GPL-2.0
> > > > +/*
> > > > + * Check for KVM_GET_REG_LIST regressions.
> > > > + *
> > > > + * Copyright (C) 2020, Red Hat, Inc.
> > >
> > > I don't think we need the Red Hat copyright. This is a completely new
> > > work.
> > >
> >
> > Sure, will remove it.
> >
> > > > + * Copyright (c) 2023 Intel Corporation
> > > > + *
> > > > + */
> > > > +#include <stdio.h>
> > > > +#include "kvm_util.h"
> > > > +#include "test_util.h"
> > > > +#include "processor.h"
> > > > +
> > > > +#define REG_MASK (KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK)
> > > > +
> > > > +static const char *config_id_to_str(__u64 id)
> > > > +{
> > > > + /* reg_off is the offset into struct kvm_riscv_config */
> > > > + __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_CONFIG);
> > > > +
> > > > + switch (reg_off) {
> > > > + case KVM_REG_RISCV_CONFIG_REG(isa):
> > > > + return "KVM_REG_RISCV_CONFIG_REG(isa)";
> > > > + case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size):
> > > > + return "KVM_REG_RISCV_CONFIG_REG(zicbom_block_size)";
> > > > + case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size):
> > > > + return "KVM_REG_RISCV_CONFIG_REG(zicboz_block_size)";
> > > > + case KVM_REG_RISCV_CONFIG_REG(mvendorid):
> > > > + return "KVM_REG_RISCV_CONFIG_REG(mvendorid)";
> > > > + case KVM_REG_RISCV_CONFIG_REG(marchid):
> > > > + return "KVM_REG_RISCV_CONFIG_REG(marchid)";
> > > > + case KVM_REG_RISCV_CONFIG_REG(mimpid):
> > > > + return "KVM_REG_RISCV_CONFIG_REG(mimpid)";
> > > > + }
> > > > +
> > > > + /*
> > > > + * Config regs would grow regularly with new pseudo reg added, so
> > > > + * just show raw id to indicate a new pseudo config reg.
> > > > + */
> > > > + return strdup_printf("KVM_REG_RISCV_CONFIG_REG(%lld) /* UNKNOWN */", reg_off);
> > > > +}
> > > > +
> > > > +static const char *core_id_to_str(const char *prefix, __u64 id)
> > > > +{
> > > > + /* reg_off is the offset into struct kvm_riscv_core */
> > > > + __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_CORE);
> > > > +
> > > > + switch (reg_off) {
> > > > + case KVM_REG_RISCV_CORE_REG(regs.pc):
> > > > + return "KVM_REG_RISCV_CORE_REG(regs.pc)";
> > > > + case KVM_REG_RISCV_CORE_REG(regs.ra):
> > > > + return "KVM_REG_RISCV_CORE_REG(regs.ra)";
> > > > + case KVM_REG_RISCV_CORE_REG(regs.sp):
> > > > + return "KVM_REG_RISCV_CORE_REG(regs.sp)";
> > > > + case KVM_REG_RISCV_CORE_REG(regs.gp):
> > > > + return "KVM_REG_RISCV_CORE_REG(regs.gp)";
> > > > + case KVM_REG_RISCV_CORE_REG(regs.tp):
> > > > + return "KVM_REG_RISCV_CORE_REG(regs.tp)";
> > > > + case KVM_REG_RISCV_CORE_REG(regs.t0) ... KVM_REG_RISCV_CORE_REG(regs.t2):
> > > > + return strdup_printf("KVM_REG_RISCV_CORE_REG(regs.t%lld)",
> > > > + reg_off - KVM_REG_RISCV_CORE_REG(regs.t0));
> > > > + case KVM_REG_RISCV_CORE_REG(regs.s0) ... KVM_REG_RISCV_CORE_REG(regs.s1):
> > > > + return strdup_printf("KVM_REG_RISCV_CORE_REG(regs.s%lld)",
> > > > + reg_off - KVM_REG_RISCV_CORE_REG(regs.s0));
> > > > + case KVM_REG_RISCV_CORE_REG(regs.a0) ... KVM_REG_RISCV_CORE_REG(regs.a7):
> > > > + return strdup_printf("KVM_REG_RISCV_CORE_REG(regs.a%lld)",
> > > > + reg_off - KVM_REG_RISCV_CORE_REG(regs.a0));
> > > > + case KVM_REG_RISCV_CORE_REG(regs.s2) ... KVM_REG_RISCV_CORE_REG(regs.s11):
> > > > + return strdup_printf("KVM_REG_RISCV_CORE_REG(regs.s%lld)",
> > > > + reg_off - KVM_REG_RISCV_CORE_REG(regs.s2) + 2);
> > > > + case KVM_REG_RISCV_CORE_REG(regs.t3) ... KVM_REG_RISCV_CORE_REG(regs.t6):
> > > > + return strdup_printf("KVM_REG_RISCV_CORE_REG(regs.t%lld)",
> > > > + reg_off - KVM_REG_RISCV_CORE_REG(regs.t3) + 3);
> > > > + case KVM_REG_RISCV_CORE_REG(mode):
> > > > + return "KVM_REG_RISCV_CORE_REG(mode)";
> > > > + }
> > > > +
> > > > + TEST_FAIL("%s: Unknown core reg id: 0x%llx", prefix, id);
> > > > + return NULL;
> > > > +}
> > > > +
> > > > +#define RISCV_CSR_GENERAL(csr) \
> > > > + "KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(" #csr ")"
> > > > +#define RISCV_CSR_AIA(csr) \
> > > > + "KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_REG(" #csr ")"
> > > > +
> > > > +static const char *general_csr_id_to_str(__u64 reg_off)
> > > > +{
> > > > + /* reg_off is the offset into struct kvm_riscv_csr */
> > > > + switch (reg_off) {
> > > > + case KVM_REG_RISCV_CSR_REG(sstatus):
> > > > + return RISCV_CSR_GENERAL(sstatus);
> > > > + case KVM_REG_RISCV_CSR_REG(sie):
> > > > + return RISCV_CSR_GENERAL(sie);
> > > > + case KVM_REG_RISCV_CSR_REG(stvec):
> > > > + return RISCV_CSR_GENERAL(stvec);
> > > > + case KVM_REG_RISCV_CSR_REG(sscratch):
> > > > + return RISCV_CSR_GENERAL(sscratch);
> > > > + case KVM_REG_RISCV_CSR_REG(sepc):
> > > > + return RISCV_CSR_GENERAL(sepc);
> > > > + case KVM_REG_RISCV_CSR_REG(scause):
> > > > + return RISCV_CSR_GENERAL(scause);
> > > > + case KVM_REG_RISCV_CSR_REG(stval):
> > > > + return RISCV_CSR_GENERAL(stval);
> > > > + case KVM_REG_RISCV_CSR_REG(sip):
> > > > + return RISCV_CSR_GENERAL(sip);
> > > > + case KVM_REG_RISCV_CSR_REG(satp):
> > > > + return RISCV_CSR_GENERAL(satp);
> > > > + case KVM_REG_RISCV_CSR_REG(scounteren):
> > > > + return RISCV_CSR_GENERAL(scounteren);
> > > > + }
> > > > +
> > > > + TEST_FAIL("Unknown general csr reg: 0x%llx", reg_off);
> > > > + return NULL;
> > > > +}
> > > > +
> > > > +static const char *aia_csr_id_to_str(__u64 reg_off)
> > > > +{
> > > > + /* reg_off is the offset into struct kvm_riscv_aia_csr */
> > > > + switch (reg_off) {
> > > > + case KVM_REG_RISCV_CSR_AIA_REG(siselect):
> > > > + return RISCV_CSR_AIA(siselect);
> > > > + case KVM_REG_RISCV_CSR_AIA_REG(iprio1):
> > > > + return RISCV_CSR_AIA(iprio1);
> > > > + case KVM_REG_RISCV_CSR_AIA_REG(iprio2):
> > > > + return RISCV_CSR_AIA(iprio2);
> > > > + case KVM_REG_RISCV_CSR_AIA_REG(sieh):
> > > > + return RISCV_CSR_AIA(sieh);
> > > > + case KVM_REG_RISCV_CSR_AIA_REG(siph):
> > > > + return RISCV_CSR_AIA(siph);
> > > > + case KVM_REG_RISCV_CSR_AIA_REG(iprio1h):
> > > > + return RISCV_CSR_AIA(iprio1h);
> > > > + case KVM_REG_RISCV_CSR_AIA_REG(iprio2h):
> > > > + return RISCV_CSR_AIA(iprio2h);
> > > > + }
> > > > +
> > > > + TEST_FAIL("Unknown aia csr reg: 0x%llx", reg_off);
> > > > + return NULL;
> > > > +}
> > > > +
> > > > +static const char *csr_id_to_str(const char *prefix, __u64 id)
> > > > +{
> > > > + __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_CSR);
> > > > + __u64 reg_subtype = reg_off & KVM_REG_RISCV_SUBTYPE_MASK;
> > > > +
> > > > + reg_off &= ~KVM_REG_RISCV_SUBTYPE_MASK;
> > > > +
> > > > + switch (reg_subtype) {
> > > > + case KVM_REG_RISCV_CSR_GENERAL:
> > > > + return general_csr_id_to_str(reg_off);
> > > > + case KVM_REG_RISCV_CSR_AIA:
> > > > + return aia_csr_id_to_str(reg_off);
> > > > + }
> > > > +
> > > > + TEST_FAIL("%s: Unknown csr subtype: 0x%llx", prefix, reg_subtype);
> > > > + return NULL;
> > > > +}
> > > > +
> > > > +static const char *timer_id_to_str(const char *prefix, __u64 id)
> > > > +{
> > > > + /* reg_off is the offset into struct kvm_riscv_timer */
> > > > + __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_TIMER);
> > > > +
> > > > + switch (reg_off) {
> > > > + case KVM_REG_RISCV_TIMER_REG(frequency):
> > > > + return "KVM_REG_RISCV_TIMER_REG(frequency)";
> > > > + case KVM_REG_RISCV_TIMER_REG(time):
> > > > + return "KVM_REG_RISCV_TIMER_REG(time)";
> > > > + case KVM_REG_RISCV_TIMER_REG(compare):
> > > > + return "KVM_REG_RISCV_TIMER_REG(compare)";
> > > > + case KVM_REG_RISCV_TIMER_REG(state):
> > > > + return "KVM_REG_RISCV_TIMER_REG(state)";
> > > > + }
> > > > +
> > > > + TEST_FAIL("%s: Unknown timer reg id: 0x%llx", prefix, id);
> > > > + return NULL;
> > > > +}
> > > > +
> > > > +static const char *fp_f_id_to_str(const char *prefix, __u64 id)
> > > > +{
> > > > + /* reg_off is the offset into struct __riscv_f_ext_state */
> > > > + __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_FP_F);
> > > > +
> > > > + switch (reg_off) {
> > > > + case KVM_REG_RISCV_FP_F_REG(f[0]) ...
> > > > + KVM_REG_RISCV_FP_F_REG(f[31]):
> > > > + return strdup_printf("KVM_REG_RISCV_FP_F_REG(f[%lld])", reg_off);
> > > > + case KVM_REG_RISCV_FP_F_REG(fcsr):
> > > > + return "KVM_REG_RISCV_FP_F_REG(fcsr)";
> > > > + }
> > > > +
> > > > + TEST_FAIL("%s: Unknown fp_f reg id: 0x%llx", prefix, id);
> > > > + return NULL;
> > > > +}
> > > > +
> > > > +static const char *fp_d_id_to_str(const char *prefix, __u64 id)
> > > > +{
> > > > + /* reg_off is the offset into struct __riscv_d_ext_state */
> > > > + __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_FP_D);
> > > > +
> > > > + switch (reg_off) {
> > > > + case KVM_REG_RISCV_FP_D_REG(f[0]) ...
> > > > + KVM_REG_RISCV_FP_D_REG(f[31]):
> > > > + return strdup_printf("KVM_REG_RISCV_FP_D_REG(f[%lld])", reg_off);
> > > > + case KVM_REG_RISCV_FP_D_REG(fcsr):
> > > > + return "KVM_REG_RISCV_FP_D_REG(fcsr)";
> > > > + }
> > > > +
> > > > + TEST_FAIL("%s: Unknown fp_d reg id: 0x%llx", prefix, id);
> > > > + return NULL;
> > > > +}
> > > > +
> > > > +static const char *isa_ext_id_to_str(__u64 id)
> > > > +{
> > > > + /* reg_off is the offset into unsigned long kvm_isa_ext_arr[] */
> > > > + __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_ISA_EXT);
> > > > +
> > > > + static const char * const kvm_isa_ext_reg_name[] = {
> > > > + "KVM_RISCV_ISA_EXT_A",
> > > > + "KVM_RISCV_ISA_EXT_C",
> > > > + "KVM_RISCV_ISA_EXT_D",
> > > > + "KVM_RISCV_ISA_EXT_F",
> > > > + "KVM_RISCV_ISA_EXT_H",
> > > > + "KVM_RISCV_ISA_EXT_I",
> > > > + "KVM_RISCV_ISA_EXT_M",
> > > > + "KVM_RISCV_ISA_EXT_SVPBMT",
> > > > + "KVM_RISCV_ISA_EXT_SSTC",
> > > > + "KVM_RISCV_ISA_EXT_SVINVAL",
> > > > + "KVM_RISCV_ISA_EXT_ZIHINTPAUSE",
> > > > + "KVM_RISCV_ISA_EXT_ZICBOM",
> > > > + "KVM_RISCV_ISA_EXT_ZICBOZ",
> > > > + "KVM_RISCV_ISA_EXT_ZBB",
> > > > + "KVM_RISCV_ISA_EXT_SSAIA",
> > > > + };
> > > > +
> > > > + if (reg_off >= ARRAY_SIZE(kvm_isa_ext_reg_name)) {
> > > > + /*
> > > > + * isa_ext regs would grow regularly with new isa extension added, so
> > > > + * just show "reg" to indicate a new extension.
> > > > + */
> > > > + return strdup_printf("%lld /* UNKNOWN */", reg_off);
> > > > + }
> > > > +
> > > > + return kvm_isa_ext_reg_name[reg_off];
> > > > +}
> > > > +
> > > > +static const char *sbi_ext_single_id_to_str(__u64 reg_off)
> > > > +{
> > > > + /* reg_off is KVM_RISCV_SBI_EXT_ID */
> > > > + static const char * const kvm_sbi_ext_reg_name[] = {
> > > > + "KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_V01",
> > > > + "KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_TIME",
> > > > + "KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_IPI",
> > > > + "KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_RFENCE",
> > > > + "KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_SRST",
> > > > + "KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_HSM",
> > > > + "KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_PMU",
> > > > + "KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_EXPERIMENTAL",
> > > > + "KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_VENDOR",
> > > > + };
> > > > +
> > > > + if (reg_off >= ARRAY_SIZE(kvm_sbi_ext_reg_name)) {
> > > > + /*
> > > > + * sbi_ext regs would grow regularly with new sbi extension added, so
> > > > + * just show "reg" to indicate a new extension.
> > > > + */
> > > > + return strdup_printf("KVM_REG_RISCV_SBI_SINGLE | %lld /* UNKNOWN */", reg_off);
> > > > + }
> > > > +
> > > > + return kvm_sbi_ext_reg_name[reg_off];
> > > > +}
> > > > +
> > > > +static const char *sbi_ext_multi_id_to_str(__u64 reg_subtype, __u64 reg_off)
> > > > +{
> > > > + if (reg_off > KVM_REG_RISCV_SBI_MULTI_REG_LAST) {
> > > > + /*
> > > > + * sbi_ext regs would grow regularly with new sbi extension added, so
> > > > + * just show "reg" to indicate a new extension.
> > > > + */
> > > > + return strdup_printf("%lld /* UNKNOWN */", reg_off);
> > > > + }
> > > > +
> > > > + switch (reg_subtype) {
> > > > + case KVM_REG_RISCV_SBI_MULTI_EN:
> > > > + return strdup_printf("KVM_REG_RISCV_SBI_MULTI_EN | %lld", reg_off);
> > > > + case KVM_REG_RISCV_SBI_MULTI_DIS:
> > > > + return strdup_printf("KVM_REG_RISCV_SBI_MULTI_DIS | %lld", reg_off);
> > > > + }
> > > > +
> > > > + return NULL;
> > > > +}
> > > > +
> > > > +static const char *sbi_ext_id_to_str(const char *prefix, __u64 id)
> > > > +{
> > > > + __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_SBI_EXT);
> > > > + __u64 reg_subtype = reg_off & KVM_REG_RISCV_SUBTYPE_MASK;
> > > > +
> > > > + reg_off &= ~KVM_REG_RISCV_SUBTYPE_MASK;
> > > > +
> > > > + switch (reg_subtype) {
> > > > + case KVM_REG_RISCV_SBI_SINGLE:
> > > > + return sbi_ext_single_id_to_str(reg_off);
> > > > + case KVM_REG_RISCV_SBI_MULTI_EN:
> > > > + case KVM_REG_RISCV_SBI_MULTI_DIS:
> > > > + return sbi_ext_multi_id_to_str(reg_subtype, reg_off);
> > > > + }
> > > > +
> > > > + TEST_FAIL("%s: Unknown sbi ext subtype: 0x%llx", prefix, reg_subtype);
> > > > + return NULL;
> > > > +}
> > > > +
> > > > +void print_reg(const char *prefix, __u64 id)
> > > > +{
> > > > + const char *reg_size = NULL;
> > > > +
> > > > + TEST_ASSERT((id & KVM_REG_ARCH_MASK) == KVM_REG_RISCV,
> > > > + "%s: KVM_REG_RISCV missing in reg id: 0x%llx", prefix, id);
> > > > +
> > > > + switch (id & KVM_REG_SIZE_MASK) {
> > > > + case KVM_REG_SIZE_U32:
> > > > + reg_size = "KVM_REG_SIZE_U32";
> > > > + break;
> > > > + case KVM_REG_SIZE_U64:
> > > > + reg_size = "KVM_REG_SIZE_U64";
> > > > + break;
> > > > + case KVM_REG_SIZE_U128:
> > > > + reg_size = "KVM_REG_SIZE_U128";
> > > > + break;
> > > > + default:
> > > > + TEST_FAIL("%s: Unexpected reg size: 0x%llx in reg id: 0x%llx",
> > > > + prefix, (id & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT, id);
> > > > + }
> > > > +
> > > > + switch (id & KVM_REG_RISCV_TYPE_MASK) {
> > > > + case KVM_REG_RISCV_CONFIG:
> > > > + printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_CONFIG | %s,\n",
> > >
> > > All the work to try and use KVM_REG_SIZE_ULONG in the right places will be
> > > lost if we print a reg list and then copy+paste it as a blessed list. On
> > > 64-bit, the only thing supported now, we'll get U64, but if we ever
> > > supported 32-bit, then we'd get U32. This is unfortunate, but there's
> > > nothing we can do about it. Either we can't have a true print+copy+paste
> > > workflow or we should assume we'll only support 64-bit and only use U64
> > > in the blessed lists (from a copy+paste). But, we've already got ULONG
> > > in there now, so we can just leave it and burn this bridge later.
> > >
> >
> > Yes, the print_reg would print U64 for riscv64 system and U32 for riscv32.
> > As commented in v2 patch, it seems 32-bit was not supported in other ARCHs.
> > If riscv follows this assumption, I think U64 was better for print_reg.
>
> It's not better, but there's no way to avoid it. The information that it
> was a ULONG, instead of U64, has been lost at this point. The only way
> to print it correctly is to create a reg-size mapping and then look it up,
> rather than decode it. We can add a lookup when/if adding 32-bit support.
>

Sure, just leave it as ULONG here.

> >
> >
> > > > + reg_size, config_id_to_str(id));
> > > > + break;
> > > > + case KVM_REG_RISCV_CORE:
> > > > + printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_CORE | %s,\n",
> > > > + reg_size, core_id_to_str(prefix, id));
> > > > + break;
> > > > + case KVM_REG_RISCV_CSR:
> > > > + printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_CSR | %s,\n",
> > > > + reg_size, csr_id_to_str(prefix, id));
> > > > + break;
> > > > + case KVM_REG_RISCV_TIMER:
> > > > + printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_TIMER | %s,\n",
> > > > + reg_size, timer_id_to_str(prefix, id));
> > > > + break;
> > > > + case KVM_REG_RISCV_FP_F:
> > > > + printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_FP_F | %s,\n",
> > > > + reg_size, fp_f_id_to_str(prefix, id));
> > > > + break;
> > > > + case KVM_REG_RISCV_FP_D:
> > > > + printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_FP_D | %s,\n",
> > > > + reg_size, fp_d_id_to_str(prefix, id));
> > > > + break;
> > > > + case KVM_REG_RISCV_ISA_EXT:
> > > > + printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_ISA_EXT | %s,\n",
> > > > + reg_size, isa_ext_id_to_str(id));
> > > > + break;
> > > > + case KVM_REG_RISCV_SBI_EXT:
> > > > + printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_SBI_EXT | %s,\n",
> > > > + reg_size, sbi_ext_id_to_str(prefix, id));
> > > > + break;
> > > > + default:
> > > > + TEST_FAIL("%s: Unexpected reg type: 0x%llx in reg id: 0x%llx", prefix,
> > > > + (id & KVM_REG_RISCV_TYPE_MASK) >> KVM_REG_RISCV_TYPE_SHIFT, id);
> > > > + }
> > > > +}
> > > > +
> > > > +/*
> > > > + * The current blessed list was primed with the output of kernel version
> > > > + * v6.4-rc5 and then later updated with new registers.
> > > > + */
> > > > +static __u64 base_regs[] = {
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(isa),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(mvendorid),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(marchid),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(mimpid),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.pc),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.ra),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.sp),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.gp),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.tp),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t0),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t1),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t2),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s0),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s1),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a0),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a1),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a2),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a3),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a4),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a5),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a6),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a7),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s2),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s3),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s4),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s5),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s6),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s7),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s8),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s9),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s10),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s11),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t3),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t4),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t5),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t6),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(mode),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(sstatus),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(sie),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(stvec),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(sscratch),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(sepc),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(scause),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(stval),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(sip),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(satp),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(scounteren),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | KVM_REG_RISCV_TIMER_REG(frequency),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | KVM_REG_RISCV_TIMER_REG(time),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | KVM_REG_RISCV_TIMER_REG(compare),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | KVM_REG_RISCV_TIMER_REG(state),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_A,
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_C,
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_D,
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_F,
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_I,
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_M,
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_V01,
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_TIME,
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_IPI,
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_RFENCE,
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_SRST,
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_HSM,
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_PMU,
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_EXPERIMENTAL,
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_VENDOR,
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_MULTI_EN | 0,
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_MULTI_DIS | 0,
> > > > +};
> > > > +
> > > > +/*
> > > > + * The rejects_set list registers that should skip set test.
> > > > + * - KVM_REG_RISCV_TIMER_REG(state): set would fail if it was not initialized properly.
> > > > + * - KVM_REG_RISCV_TIMER_REG(frequency): set not supported
> > > > + * - KVM_REG_RISCV_CONFIG_REG(zicbom_block_size): set not supported
> > > > + * - KVM_REG_RISCV_CONFIG_REG(zicboz_block_size): set not supported
> > > > + * - KVM_RISCV_ISA_EXT_SVPBMT: set not supported
> > > > + * - KVM_RISCV_ISA_EXT_SVINVA: set not supported
> > > > + * - KVM_RISCV_ISA_EXT_SSAIA: set not supported
> > > > + */
> > > > +static __u64 base_rejects_set[] = {
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(zicbom_block_size),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(zicboz_block_size),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | KVM_REG_RISCV_TIMER_REG(frequency),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | KVM_REG_RISCV_TIMER_REG(state),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SVPBMT,
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SVINVAL,
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SSAIA,
> > >
> > > These aren't all base registers. I think we should divide the reject lists
> > > up too, especially considering the idea I wrote in the last patch, which
> > > is to test setting the rejects to ensure the expected error is returned.
> > > The error may be different for a rejected set of a supported register vs.
> > > that of an unsupported register.
> > >
> >
> > Yes, the reject list should divide up too. Will change it in V4.
> >
> > > > +};
> > > > +
> > > > +static __u64 zicbom_regs[] = {
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(zicbom_block_size),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZICBOM,
> > > > +};
> > > > +
> > > > +static __u64 zicboz_regs[] = {
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(zicboz_block_size),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZICBOZ,
> > > > +};
> > > > +
> > > > +static __u64 aia_csr_regs[] = {
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(siselect),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio1),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio2),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(sieh),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(siph),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio1h),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio2h),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SSAIA,
> > > > +};
> > > > +
> > > > +static __u64 fp_f_regs[] = {
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[0]),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[1]),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[2]),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[3]),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[4]),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[5]),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[6]),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[7]),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[8]),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[9]),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[10]),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[11]),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[12]),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[13]),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[14]),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[15]),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[16]),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[17]),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[18]),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[19]),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[20]),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[21]),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[22]),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[23]),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[24]),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[25]),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[26]),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[27]),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[28]),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[29]),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[30]),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[31]),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(fcsr),
> > > > +};
> > > > +
> > > > +static __u64 fp_d_regs[] = {
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[0]),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[1]),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[2]),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[3]),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[4]),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[5]),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[6]),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[7]),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[8]),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[9]),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[10]),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[11]),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[12]),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[13]),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[14]),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[15]),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[16]),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[17]),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[18]),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[19]),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[20]),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[21]),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[22]),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[23]),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[24]),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[25]),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[26]),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[27]),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[28]),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[29]),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[30]),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[31]),
> > > > + KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(fcsr),
> > > > +};
> > > > +
> > > > +#define BASE_SUBLIST \
> > > > + {"base", .regs = base_regs, .regs_n = ARRAY_SIZE(base_regs), \
> > > > + .rejects_set = base_rejects_set, .rejects_set_n = ARRAY_SIZE(base_rejects_set),}
> > > > +#define ZICBOM_REGS_SUBLIST \
> > > > + {"zicbom", .feature = KVM_RISCV_ISA_EXT_ZICBOM, .regs = zicbom_regs, \
> > > > + .regs_n = ARRAY_SIZE(zicbom_regs),}
> > > > +#define ZICBOZ_REGS_SUBLIST \
> > > > + {"zicboz", .feature = KVM_RISCV_ISA_EXT_ZICBOZ, .regs = zicboz_regs, \
> > > > + .regs_n = ARRAY_SIZE(zicboz_regs),}
> > > > +#define AIA_REGS_SUBLIST \
> > > > + {"aia", .feature = KVM_RISCV_ISA_EXT_SSAIA, .regs = aia_csr_regs, \
> > > > + .regs_n = ARRAY_SIZE(aia_csr_regs),}
> > > > +#define FP_F_REGS_SUBLIST \
> > > > + {"fp_f", .feature = KVM_RISCV_ISA_EXT_F, .regs = fp_f_regs, \
> > > > + .regs_n = ARRAY_SIZE(fp_f_regs),}
> > > > +#define FP_D_REGS_SUBLIST \
> > > > + {"fp_d", .feature = KVM_RISCV_ISA_EXT_D, .regs = fp_d_regs, \
> > > > + .regs_n = ARRAY_SIZE(fp_d_regs),}
> > > > +
> > > > +static struct vcpu_reg_list zicbo_config = {
> > > > + .sublists = {
> > > > + BASE_SUBLIST,
> > > > + ZICBOM_REGS_SUBLIST,
> > > > + ZICBOZ_REGS_SUBLIST,
> > >
> > > It's possible to have zicbom without zicboz and vice-versa. Since
> > > finalize_vcpu() will skip the whole test when it detects a missing
> > > feature for a config, then we won't be able to test one without the
> > > other. It's a bit annoying, but I think we may need a separate config
> > > for each independent extension.
> > >
> >
> > Sure.
> >
> > > > + {0},
> > > > + },
> > > > +};
> > > > +
> > > > +static struct vcpu_reg_list aia_config = {
> > > > + .sublists = {
> > > > + BASE_SUBLIST,
> > > > + AIA_REGS_SUBLIST,
> > > > + {0},
> > > > + },
> > > > +};
> > > > +
> > > > +static struct vcpu_reg_list fp_f_d_config = {
> > > > + .sublists = {
> > > > + BASE_SUBLIST,
> > > > + FP_F_REGS_SUBLIST,
> > > > + FP_D_REGS_SUBLIST,
> > > > + {0},
> > > > + },
> > > > +};
> > > > +
> > > > +struct vcpu_reg_list *vcpu_configs[] = {
> > > > + &zicbo_config,
> > > > + &aia_config,
> > > > + &fp_f_d_config,
> > > > +};
> > > > +int vcpu_configs_n = ARRAY_SIZE(vcpu_configs);
> > > > --
> > > > 2.34.1
> > > >
> > >
> > > I see we have a bit of a problem with the configs for riscv. Since we
> > > don't disable anything we're not testing, then for any test that is
> > > missing, for example, the f and d registers, we'll get output like
> > > "There are 66 new registers. Consider adding them to the blessed reg
> > > list with the following lines:" and then a dump of all the f and d
> > > registers. The test doesn't fail, but it's messy and confusing. Ideally
> > > we'd disable all registers of all sublists not in the config, probably
> > > by starting by disabling everything and then only reenabling the ones
> > > in the config.
> > >
> > > Anything that can't be disabled is either a KVM bug, i.e. we should
> > > be able to disable it, because we can't expect every host to have it,
> > > or it needs to be in the base register sublist (meaning every host
> > > will always have it).
> > >
> >
> > Yes, as mentioned above, all the features would be enabled for vCPU by default
> > if it's available on the host. I think we can disable all the feature
> > bits at the start of
> > finalize_vcpu() and only enable the feature bits corresponding to the
> > specified config.
> >
>
> Yup.
>
> Thanks,
> drew

2023-06-20 10:09:15

by Haibo Xu

[permalink] [raw]
Subject: Re: [PATCH v3 10/10] KVM: riscv: selftests: Add get-reg-list test

On Fri, Jun 9, 2023 at 9:35 PM Andrew Jones <[email protected]> wrote:
>
> On Fri, Jun 09, 2023 at 10:12:18AM +0800, Haibo Xu wrote:
> > +static struct vcpu_reg_list aia_config = {
> > + .sublists = {
> > + BASE_SUBLIST,
> > + AIA_REGS_SUBLIST,
> > + {0},
> > + },
> > +};
> > +
> > +static struct vcpu_reg_list fp_f_d_config = {
> > + .sublists = {
> > + BASE_SUBLIST,
> > + FP_F_REGS_SUBLIST,
> > + FP_D_REGS_SUBLIST,
> > + {0},
> > + },
> > +};
> > +
> > +struct vcpu_reg_list *vcpu_configs[] = {
> > + &zicbo_config,
> > + &aia_config,
> > + &fp_f_d_config,
> > +};
> > +int vcpu_configs_n = ARRAY_SIZE(vcpu_configs);
> > --
> > 2.34.1
> >
>
> I see we have a bit of a problem with the configs for riscv. Since we
> don't disable anything we're not testing, then for any test that is
> missing, for example, the f and d registers, we'll get output like
> "There are 66 new registers. Consider adding them to the blessed reg
> list with the following lines:" and then a dump of all the f and d
> registers. The test doesn't fail, but it's messy and confusing. Ideally
> we'd disable all registers of all sublists not in the config, probably
> by starting by disabling everything and then only reenabling the ones
> in the config.
>
> Anything that can't be disabled is either a KVM bug, i.e. we should
> be able to disable it, because we can't expect every host to have it,
> or it needs to be in the base register sublist (meaning every host
> will always have it).
>

HI Andrew,

I found several multi-letters ISA EXT(AIA/SSTC etc) were not allowed
to be disabled.
Is it a bug? shall we fix it?

Thanks,
Haibo

static bool kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext)
{
switch (ext) {
case KVM_RISCV_ISA_EXT_A:
case KVM_RISCV_ISA_EXT_C:
case KVM_RISCV_ISA_EXT_I:
case KVM_RISCV_ISA_EXT_M:
case KVM_RISCV_ISA_EXT_SSAIA:
case KVM_RISCV_ISA_EXT_SSTC:
case KVM_RISCV_ISA_EXT_SVINVAL:
case KVM_RISCV_ISA_EXT_ZIHINTPAUSE:
case KVM_RISCV_ISA_EXT_ZBB:
return false;
default:
break;
}

return true;
}

> Thanks,
> drew

2023-06-20 11:04:50

by Andrew Jones

[permalink] [raw]
Subject: Re: [PATCH v3 10/10] KVM: riscv: selftests: Add get-reg-list test

On Tue, Jun 20, 2023 at 06:05:59PM +0800, Haibo Xu wrote:
> On Fri, Jun 9, 2023 at 9:35 PM Andrew Jones <[email protected]> wrote:
> >
> > On Fri, Jun 09, 2023 at 10:12:18AM +0800, Haibo Xu wrote:
> > > +static struct vcpu_reg_list aia_config = {
> > > + .sublists = {
> > > + BASE_SUBLIST,
> > > + AIA_REGS_SUBLIST,
> > > + {0},
> > > + },
> > > +};
> > > +
> > > +static struct vcpu_reg_list fp_f_d_config = {
> > > + .sublists = {
> > > + BASE_SUBLIST,
> > > + FP_F_REGS_SUBLIST,
> > > + FP_D_REGS_SUBLIST,
> > > + {0},
> > > + },
> > > +};
> > > +
> > > +struct vcpu_reg_list *vcpu_configs[] = {
> > > + &zicbo_config,
> > > + &aia_config,
> > > + &fp_f_d_config,
> > > +};
> > > +int vcpu_configs_n = ARRAY_SIZE(vcpu_configs);
> > > --
> > > 2.34.1
> > >
> >
> > I see we have a bit of a problem with the configs for riscv. Since we
> > don't disable anything we're not testing, then for any test that is
> > missing, for example, the f and d registers, we'll get output like
> > "There are 66 new registers. Consider adding them to the blessed reg
> > list with the following lines:" and then a dump of all the f and d
> > registers. The test doesn't fail, but it's messy and confusing. Ideally
> > we'd disable all registers of all sublists not in the config, probably
> > by starting by disabling everything and then only reenabling the ones
> > in the config.
> >
> > Anything that can't be disabled is either a KVM bug, i.e. we should
> > be able to disable it, because we can't expect every host to have it,
> > or it needs to be in the base register sublist (meaning every host
> > will always have it).
> >
>
> HI Andrew,
>
> I found several multi-letters ISA EXT(AIA/SSTC etc) were not allowed
> to be disabled.
> Is it a bug? shall we fix it?

Extensions that a guest could use (regardless of whether or not the host
described it in the guest's isa string), because the instructions or CSR
accesses don't trap, can't truly be disabled. So, it's not a bug to
prohibit disabling them and indeed the test cases should actually ensure
disabling them fails.

Thanks,
drew

2023-06-21 02:12:57

by Haibo Xu

[permalink] [raw]
Subject: Re: [PATCH v3 10/10] KVM: riscv: selftests: Add get-reg-list test

On Tue, Jun 20, 2023 at 6:44 PM Andrew Jones <[email protected]> wrote:
>
> On Tue, Jun 20, 2023 at 06:05:59PM +0800, Haibo Xu wrote:
> > On Fri, Jun 9, 2023 at 9:35 PM Andrew Jones <[email protected]> wrote:
> > >
> > > On Fri, Jun 09, 2023 at 10:12:18AM +0800, Haibo Xu wrote:
> > > > +static struct vcpu_reg_list aia_config = {
> > > > + .sublists = {
> > > > + BASE_SUBLIST,
> > > > + AIA_REGS_SUBLIST,
> > > > + {0},
> > > > + },
> > > > +};
> > > > +
> > > > +static struct vcpu_reg_list fp_f_d_config = {
> > > > + .sublists = {
> > > > + BASE_SUBLIST,
> > > > + FP_F_REGS_SUBLIST,
> > > > + FP_D_REGS_SUBLIST,
> > > > + {0},
> > > > + },
> > > > +};
> > > > +
> > > > +struct vcpu_reg_list *vcpu_configs[] = {
> > > > + &zicbo_config,
> > > > + &aia_config,
> > > > + &fp_f_d_config,
> > > > +};
> > > > +int vcpu_configs_n = ARRAY_SIZE(vcpu_configs);
> > > > --
> > > > 2.34.1
> > > >
> > >
> > > I see we have a bit of a problem with the configs for riscv. Since we
> > > don't disable anything we're not testing, then for any test that is
> > > missing, for example, the f and d registers, we'll get output like
> > > "There are 66 new registers. Consider adding them to the blessed reg
> > > list with the following lines:" and then a dump of all the f and d
> > > registers. The test doesn't fail, but it's messy and confusing. Ideally
> > > we'd disable all registers of all sublists not in the config, probably
> > > by starting by disabling everything and then only reenabling the ones
> > > in the config.
> > >
> > > Anything that can't be disabled is either a KVM bug, i.e. we should
> > > be able to disable it, because we can't expect every host to have it,
> > > or it needs to be in the base register sublist (meaning every host
> > > will always have it).
> > >
> >
> > HI Andrew,
> >
> > I found several multi-letters ISA EXT(AIA/SSTC etc) were not allowed
> > to be disabled.
> > Is it a bug? shall we fix it?
>
> Extensions that a guest could use (regardless of whether or not the host
> described it in the guest's isa string), because the instructions or CSR
> accesses don't trap, can't truly be disabled. So, it's not a bug to
> prohibit disabling them and indeed the test cases should actually ensure
> disabling them fails.
>

So these kinds of ISA_EXT_* regs should be in the base reg list, right?

Thanks,
Haibo

> Thanks,
> drew

2023-06-21 07:47:07

by Andrew Jones

[permalink] [raw]
Subject: Re: [PATCH v3 10/10] KVM: riscv: selftests: Add get-reg-list test

On Wed, Jun 21, 2023 at 09:55:13AM +0800, Haibo Xu wrote:
> On Tue, Jun 20, 2023 at 6:44 PM Andrew Jones <[email protected]> wrote:
> >
> > On Tue, Jun 20, 2023 at 06:05:59PM +0800, Haibo Xu wrote:
> > > On Fri, Jun 9, 2023 at 9:35 PM Andrew Jones <[email protected]> wrote:
> > > >
> > > > On Fri, Jun 09, 2023 at 10:12:18AM +0800, Haibo Xu wrote:
> > > > > +static struct vcpu_reg_list aia_config = {
> > > > > + .sublists = {
> > > > > + BASE_SUBLIST,
> > > > > + AIA_REGS_SUBLIST,
> > > > > + {0},
> > > > > + },
> > > > > +};
> > > > > +
> > > > > +static struct vcpu_reg_list fp_f_d_config = {
> > > > > + .sublists = {
> > > > > + BASE_SUBLIST,
> > > > > + FP_F_REGS_SUBLIST,
> > > > > + FP_D_REGS_SUBLIST,
> > > > > + {0},
> > > > > + },
> > > > > +};
> > > > > +
> > > > > +struct vcpu_reg_list *vcpu_configs[] = {
> > > > > + &zicbo_config,
> > > > > + &aia_config,
> > > > > + &fp_f_d_config,
> > > > > +};
> > > > > +int vcpu_configs_n = ARRAY_SIZE(vcpu_configs);
> > > > > --
> > > > > 2.34.1
> > > > >
> > > >
> > > > I see we have a bit of a problem with the configs for riscv. Since we
> > > > don't disable anything we're not testing, then for any test that is
> > > > missing, for example, the f and d registers, we'll get output like
> > > > "There are 66 new registers. Consider adding them to the blessed reg
> > > > list with the following lines:" and then a dump of all the f and d
> > > > registers. The test doesn't fail, but it's messy and confusing. Ideally
> > > > we'd disable all registers of all sublists not in the config, probably
> > > > by starting by disabling everything and then only reenabling the ones
> > > > in the config.
> > > >
> > > > Anything that can't be disabled is either a KVM bug, i.e. we should
> > > > be able to disable it, because we can't expect every host to have it,
> > > > or it needs to be in the base register sublist (meaning every host
> > > > will always have it).
> > > >
> > >
> > > HI Andrew,
> > >
> > > I found several multi-letters ISA EXT(AIA/SSTC etc) were not allowed
> > > to be disabled.
> > > Is it a bug? shall we fix it?
> >
> > Extensions that a guest could use (regardless of whether or not the host
> > described it in the guest's isa string), because the instructions or CSR
> > accesses don't trap, can't truly be disabled. So, it's not a bug to
> > prohibit disabling them and indeed the test cases should actually ensure
> > disabling them fails.
> >
>
> So these kinds of ISA_EXT_* regs should be in the base reg list, right?
>

Ah, this is getting a bit messy. We don't want all these extensions in a
"base", which represents extensions for all possible hosts, because the
extensions are optional, but, we can't remove them from get-reg-list
output by disabling them, since they can't be disabled. It seems we
need the concept of "base", which is the common set expected on all hosts,
and also the concept of "this host's base". I'm struggling to think of
a nice way to deal with that. A first thought is to both add these types
of registers to their own extension-specific sublists and to filter_reg().
I think that will keep them from being reported as new registers in every
test, but also allow detection of them going missing when they're
extension is present.

Thanks,
drew

2023-06-21 09:08:11

by Haibo Xu

[permalink] [raw]
Subject: Re: [PATCH v3 10/10] KVM: riscv: selftests: Add get-reg-list test

On Wed, Jun 21, 2023 at 3:30 PM Andrew Jones <[email protected]> wrote:
>
> On Wed, Jun 21, 2023 at 09:55:13AM +0800, Haibo Xu wrote:
> > On Tue, Jun 20, 2023 at 6:44 PM Andrew Jones <[email protected]> wrote:
> > >
> > > On Tue, Jun 20, 2023 at 06:05:59PM +0800, Haibo Xu wrote:
> > > > On Fri, Jun 9, 2023 at 9:35 PM Andrew Jones <[email protected]> wrote:
> > > > >
> > > > > On Fri, Jun 09, 2023 at 10:12:18AM +0800, Haibo Xu wrote:
> > > > > > +static struct vcpu_reg_list aia_config = {
> > > > > > + .sublists = {
> > > > > > + BASE_SUBLIST,
> > > > > > + AIA_REGS_SUBLIST,
> > > > > > + {0},
> > > > > > + },
> > > > > > +};
> > > > > > +
> > > > > > +static struct vcpu_reg_list fp_f_d_config = {
> > > > > > + .sublists = {
> > > > > > + BASE_SUBLIST,
> > > > > > + FP_F_REGS_SUBLIST,
> > > > > > + FP_D_REGS_SUBLIST,
> > > > > > + {0},
> > > > > > + },
> > > > > > +};
> > > > > > +
> > > > > > +struct vcpu_reg_list *vcpu_configs[] = {
> > > > > > + &zicbo_config,
> > > > > > + &aia_config,
> > > > > > + &fp_f_d_config,
> > > > > > +};
> > > > > > +int vcpu_configs_n = ARRAY_SIZE(vcpu_configs);
> > > > > > --
> > > > > > 2.34.1
> > > > > >
> > > > >
> > > > > I see we have a bit of a problem with the configs for riscv. Since we
> > > > > don't disable anything we're not testing, then for any test that is
> > > > > missing, for example, the f and d registers, we'll get output like
> > > > > "There are 66 new registers. Consider adding them to the blessed reg
> > > > > list with the following lines:" and then a dump of all the f and d
> > > > > registers. The test doesn't fail, but it's messy and confusing. Ideally
> > > > > we'd disable all registers of all sublists not in the config, probably
> > > > > by starting by disabling everything and then only reenabling the ones
> > > > > in the config.
> > > > >
> > > > > Anything that can't be disabled is either a KVM bug, i.e. we should
> > > > > be able to disable it, because we can't expect every host to have it,
> > > > > or it needs to be in the base register sublist (meaning every host
> > > > > will always have it).
> > > > >
> > > >
> > > > HI Andrew,
> > > >
> > > > I found several multi-letters ISA EXT(AIA/SSTC etc) were not allowed
> > > > to be disabled.
> > > > Is it a bug? shall we fix it?
> > >
> > > Extensions that a guest could use (regardless of whether or not the host
> > > described it in the guest's isa string), because the instructions or CSR
> > > accesses don't trap, can't truly be disabled. So, it's not a bug to
> > > prohibit disabling them and indeed the test cases should actually ensure
> > > disabling them fails.
> > >
> >
> > So these kinds of ISA_EXT_* regs should be in the base reg list, right?
> >
>
> Ah, this is getting a bit messy. We don't want all these extensions in a
> "base", which represents extensions for all possible hosts, because the
> extensions are optional, but, we can't remove them from get-reg-list
> output by disabling them, since they can't be disabled. It seems we
> need the concept of "base", which is the common set expected on all hosts,
> and also the concept of "this host's base". I'm struggling to think of
> a nice way to deal with that. A first thought is to both add these types
> of registers to their own extension-specific sublists and to filter_reg().
> I think that will keep them from being reported as new registers in every
> test, but also allow detection of them going missing when they're
> extension is present.
>

Yes, I was also stuck with the mess!

I was trying one way to only include `disable not allowed` regs to the
base reg list and
only check errno for disable(set 0) operation.

I will also try your suggestion and let you know the results soon!

Thanks,
Haibo

> Thanks,
> drew