Rename 'struct vcpu' to 'struct kvm_vcpu' to align with 'struct kvm_vm'
in the selftest, and to give readers a hint that the struct is specific
to KVM.
Signed-off-by: Sean Christopherson <[email protected]>
---
.../selftests/kvm/include/kvm_util_base.h | 11 +++---
tools/testing/selftests/kvm/lib/kvm_util.c | 36 +++++++++----------
.../selftests/kvm/lib/s390x/processor.c | 2 +-
3 files changed, 24 insertions(+), 25 deletions(-)
diff --git a/tools/testing/selftests/kvm/include/kvm_util_base.h b/tools/testing/selftests/kvm/include/kvm_util_base.h
index b83c3327d0e4..d2c7fb391fc7 100644
--- a/tools/testing/selftests/kvm/include/kvm_util_base.h
+++ b/tools/testing/selftests/kvm/include/kvm_util_base.h
@@ -16,6 +16,7 @@
#include <linux/kvm.h>
#include "linux/rbtree.h"
+
#include <sys/ioctl.h>
#include "sparsebit.h"
@@ -43,7 +44,7 @@ struct userspace_mem_region {
struct hlist_node slot_node;
};
-struct vcpu {
+struct kvm_vcpu {
struct list_head list;
uint32_t id;
int fd;
@@ -92,7 +93,7 @@ struct kvm_vm {
continue; \
else
-struct vcpu *vcpu_get(struct kvm_vm *vm, uint32_t vcpuid);
+struct kvm_vcpu *vcpu_get(struct kvm_vm *vm, uint32_t vcpuid);
/*
* Virtual Translation Tables Dump
@@ -644,17 +645,17 @@ struct kvm_vm *vm_create_with_vcpus(enum vm_guest_mode mode, uint32_t nr_vcpus,
* Create a VM with a single vCPU with reasonable defaults and @extra_mem_pages
* additional pages of guest memory. Returns the VM and vCPU (via out param).
*/
-struct kvm_vm *__vm_create_with_one_vcpu(struct vcpu **vcpu,
+struct kvm_vm *__vm_create_with_one_vcpu(struct kvm_vcpu **vcpu,
uint64_t extra_mem_pages,
void *guest_code);
-static inline struct kvm_vm *vm_create_with_one_vcpu(struct vcpu **vcpu,
+static inline struct kvm_vm *vm_create_with_one_vcpu(struct kvm_vcpu **vcpu,
void *guest_code)
{
return __vm_create_with_one_vcpu(vcpu, 0, guest_code);
}
-struct vcpu *vm_recreate_with_one_vcpu(struct kvm_vm *vm);
+struct kvm_vcpu *vm_recreate_with_one_vcpu(struct kvm_vm *vm);
/*
* Adds a vCPU with reasonable defaults (e.g. a stack)
diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
index aca9ebffdc0e..99d6c5a8659e 100644
--- a/tools/testing/selftests/kvm/lib/kvm_util.c
+++ b/tools/testing/selftests/kvm/lib/kvm_util.c
@@ -349,7 +349,7 @@ struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_pages,
(uint32_t []){ vcpuid });
}
-struct kvm_vm *__vm_create_with_one_vcpu(struct vcpu **vcpu,
+struct kvm_vm *__vm_create_with_one_vcpu(struct kvm_vcpu **vcpu,
uint64_t extra_mem_pages,
void *guest_code)
{
@@ -393,7 +393,7 @@ void kvm_vm_restart(struct kvm_vm *vmp)
}
}
-struct vcpu *vm_recreate_with_one_vcpu(struct kvm_vm *vm)
+struct kvm_vcpu *vm_recreate_with_one_vcpu(struct kvm_vm *vm)
{
kvm_vm_restart(vm);
@@ -472,23 +472,23 @@ kvm_userspace_memory_region_find(struct kvm_vm *vm, uint64_t start,
return ®ion->region;
}
-static struct vcpu *vcpu_find(struct kvm_vm *vm, uint32_t vcpuid)
+static struct kvm_vcpu *vcpu_find(struct kvm_vm *vm, uint32_t vcpu_id)
{
- struct vcpu *vcpu;
+ struct kvm_vcpu *vcpu;
list_for_each_entry(vcpu, &vm->vcpus, list) {
- if (vcpu->id == vcpuid)
+ if (vcpu->id == vcpu_id)
return vcpu;
}
return NULL;
}
-struct vcpu *vcpu_get(struct kvm_vm *vm, uint32_t vcpuid)
+struct kvm_vcpu *vcpu_get(struct kvm_vm *vm, uint32_t vcpu_id)
{
- struct vcpu *vcpu = vcpu_find(vm, vcpuid);
+ struct kvm_vcpu *vcpu = vcpu_find(vm, vcpu_id);
- TEST_ASSERT(vcpu, "vCPU %d does not exist", vcpuid);
+ TEST_ASSERT(vcpu, "vCPU %d does not exist", vcpu_id);
return vcpu;
}
@@ -504,7 +504,7 @@ struct vcpu *vcpu_get(struct kvm_vm *vm, uint32_t vcpuid)
*
* Removes a vCPU from a VM and frees its resources.
*/
-static void vm_vcpu_rm(struct kvm_vm *vm, struct vcpu *vcpu)
+static void vm_vcpu_rm(struct kvm_vm *vm, struct kvm_vcpu *vcpu)
{
int ret;
@@ -526,7 +526,7 @@ static void vm_vcpu_rm(struct kvm_vm *vm, struct vcpu *vcpu)
void kvm_vm_release(struct kvm_vm *vmp)
{
- struct vcpu *vcpu, *tmp;
+ struct kvm_vcpu *vcpu, *tmp;
int ret;
list_for_each_entry_safe(vcpu, tmp, &vmp->vcpus, list)
@@ -1078,7 +1078,7 @@ static int vcpu_mmap_sz(void)
*/
void vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpuid)
{
- struct vcpu *vcpu;
+ struct kvm_vcpu *vcpu;
/* Confirm a vcpu with the specified id doesn't already exist. */
TEST_ASSERT(!vcpu_find(vm, vcpuid), "vCPU%d already exists\n", vcpuid);
@@ -1452,7 +1452,7 @@ void vm_create_irqchip(struct kvm_vm *vm)
*/
struct kvm_run *vcpu_state(struct kvm_vm *vm, uint32_t vcpuid)
{
- struct vcpu *vcpu = vcpu_get(vm, vcpuid);
+ struct kvm_vcpu *vcpu = vcpu_get(vm, vcpuid);
return vcpu->run;
}
@@ -1493,7 +1493,7 @@ int _vcpu_run(struct kvm_vm *vm, uint32_t vcpuid)
void vcpu_run_complete_io(struct kvm_vm *vm, uint32_t vcpuid)
{
- struct vcpu *vcpu = vcpu_get(vm, vcpuid);
+ struct kvm_vcpu *vcpu = vcpu_get(vm, vcpuid);
int ret;
vcpu->run->immediate_exit = 1;
@@ -1537,7 +1537,7 @@ struct kvm_reg_list *vcpu_get_reg_list(struct kvm_vm *vm, uint32_t vcpuid)
int __vcpu_ioctl(struct kvm_vm *vm, uint32_t vcpuid,
unsigned long cmd, void *arg)
{
- struct vcpu *vcpu = vcpu_get(vm, vcpuid);
+ struct kvm_vcpu *vcpu = vcpu_get(vm, vcpuid);
return ioctl(vcpu->fd, cmd, arg);
}
@@ -1552,7 +1552,7 @@ void _vcpu_ioctl(struct kvm_vm *vm, uint32_t vcpuid, unsigned long cmd,
void *vcpu_map_dirty_ring(struct kvm_vm *vm, uint32_t vcpuid)
{
- struct vcpu *vcpu = vcpu_get(vm, vcpuid);
+ struct kvm_vcpu *vcpu = vcpu_get(vm, vcpuid);
uint32_t size = vm->dirty_ring_size;
TEST_ASSERT(size > 0, "Should enable dirty ring first");
@@ -1684,9 +1684,7 @@ void vcpu_device_attr_set(struct kvm_vm *vm, uint32_t vcpuid, uint32_t group,
int __vcpu_has_device_attr(struct kvm_vm *vm, uint32_t vcpuid, uint32_t group,
uint64_t attr)
{
- struct vcpu *vcpu = vcpu_get(vm, vcpuid);
-
- return __kvm_has_device_attr(vcpu->fd, group, attr);
+ return __kvm_has_device_attr(vcpu_get(vm, vcpuid)->fd, group, attr);
}
/*
@@ -1779,7 +1777,7 @@ void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
{
int ctr;
struct userspace_mem_region *region;
- struct vcpu *vcpu;
+ struct kvm_vcpu *vcpu;
fprintf(stream, "%*smode: 0x%x\n", indent, "", vm->mode);
fprintf(stream, "%*sfd: %i\n", indent, "", vm->fd);
diff --git a/tools/testing/selftests/kvm/lib/s390x/processor.c b/tools/testing/selftests/kvm/lib/s390x/processor.c
index df9d9650d916..aec15ca9d887 100644
--- a/tools/testing/selftests/kvm/lib/s390x/processor.c
+++ b/tools/testing/selftests/kvm/lib/s390x/processor.c
@@ -207,7 +207,7 @@ void vcpu_args_set(struct kvm_vm *vm, uint32_t vcpuid, unsigned int num, ...)
void vcpu_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid, uint8_t indent)
{
- struct vcpu *vcpu = vcpu_get(vm, vcpuid);
+ struct kvm_vcpu *vcpu = vcpu_get(vm, vcpuid);
fprintf(stream, "%*spstate: psw: 0x%.16llx:0x%.16llx\n",
indent, "", vcpu->run->psw_mask, vcpu->run->psw_addr);
--
2.36.1.255.ge46751e96f-goog
On Fri, Jun 03, 2022 at 12:41:55AM +0000, Sean Christopherson wrote:
> Rename 'struct vcpu' to 'struct kvm_vcpu' to align with 'struct kvm_vm'
> in the selftest, and to give readers a hint that the struct is specific
> to KVM.
I'm not completely sold on this change. I don't mind that the selftest
vcpu struct isn't named the same as the KVM vcpu struct, since they're
different structs. I also don't mind avoiding 'kvm_' prefixes in "KVM
selftests" (indeed I wonder if we really need the kvm_ prefix for the
vm struct). If we do need prefixes for the kvm selftest framework
code to avoid collisions with test code, then maybe we should invent
something else, rather than use the somewhat ambiguous 'kvm', which
could also collide with stuff in the kvm uapi.
Thanks,
drew
On Wed, Jun 08, 2022, Andrew Jones wrote:
> On Fri, Jun 03, 2022 at 12:41:55AM +0000, Sean Christopherson wrote:
> > Rename 'struct vcpu' to 'struct kvm_vcpu' to align with 'struct kvm_vm'
> > in the selftest, and to give readers a hint that the struct is specific
> > to KVM.
>
> I'm not completely sold on this change. I don't mind that the selftest
> vcpu struct isn't named the same as the KVM vcpu struct, since they're
> different structs.
I don't care about about matching KVM's internal naming exactly, but I do care
about not having a bare "vcpu", it makes searching for usage a pain because it's
impossible to differentiate between instances of the struct and variables of the
same name without additional qualifiers.
> I also don't mind avoiding 'kvm_' prefixes in "KVM selftests" (indeed I
> wonder if we really need the kvm_ prefix for the vm struct).
Same as above, "struct vm *vm" will drive me bonkers :-)
> If we do need prefixes for the kvm selftest framework code to avoid
> collisions with test code, then maybe we should invent something else, rather
> than use the somewhat ambiguous 'kvm', which could also collide with stuff in
> the kvm uapi.
Potential collisions with the KVM uAPI is a feature of sorts, e.g. tests shouldn't
be redefining kvm_* structures (I'd prefer _tests_ not use kvm_* at all, and only
use kvm_* in the library), and I gotta imagine KVM would break at least one real
world userspace if it defined "kvm_vcpu".
That said, I don't have a super strong preference for kvm_ versus something else,
though I think it will be difficult to come up with something that's unique,
intuitive, and doesn't look like a typo.
On Wed, Jun 08, 2022 at 04:01:38PM +0000, Sean Christopherson wrote:
> On Wed, Jun 08, 2022, Andrew Jones wrote:
> > On Fri, Jun 03, 2022 at 12:41:55AM +0000, Sean Christopherson wrote:
> > > Rename 'struct vcpu' to 'struct kvm_vcpu' to align with 'struct kvm_vm'
> > > in the selftest, and to give readers a hint that the struct is specific
> > > to KVM.
> >
> > I'm not completely sold on this change. I don't mind that the selftest
> > vcpu struct isn't named the same as the KVM vcpu struct, since they're
> > different structs.
>
> I don't care about about matching KVM's internal naming exactly, but I do care
> about not having a bare "vcpu", it makes searching for usage a pain because it's
> impossible to differentiate between instances of the struct and variables of the
> same name without additional qualifiers.
>
> > I also don't mind avoiding 'kvm_' prefixes in "KVM selftests" (indeed I
> > wonder if we really need the kvm_ prefix for the vm struct).
>
> Same as above, "struct vm *vm" will drive me bonkers :-)
Yes, that is a good point.
>
> > If we do need prefixes for the kvm selftest framework code to avoid
> > collisions with test code, then maybe we should invent something else, rather
> > than use the somewhat ambiguous 'kvm', which could also collide with stuff in
> > the kvm uapi.
>
> Potential collisions with the KVM uAPI is a feature of sorts, e.g. tests shouldn't
> be redefining kvm_* structures (I'd prefer _tests_ not use kvm_* at all, and only
> use kvm_* in the library), and I gotta imagine KVM would break at least one real
> world userspace if it defined "kvm_vcpu".
>
> That said, I don't have a super strong preference for kvm_ versus something else,
> though I think it will be difficult to come up with something that's unique,
> intuitive, and doesn't look like a typo.
>
Maybe just abbreviated "Kvm Selftests", i.e. 'ks_'? I won't harp on this
any longer though, so if that doesn't look good, then we can proceed with
'kvm_'.
Thanks,
drew
On Thu, Jun 09, 2022, Andrew Jones wrote:
> On Wed, Jun 08, 2022 at 04:01:38PM +0000, Sean Christopherson wrote:
> > On Wed, Jun 08, 2022, Andrew Jones wrote:
> > > If we do need prefixes for the kvm selftest framework code to avoid
> > > collisions with test code, then maybe we should invent something else, rather
> > > than use the somewhat ambiguous 'kvm', which could also collide with stuff in
> > > the kvm uapi.
> >
> > Potential collisions with the KVM uAPI is a feature of sorts, e.g. tests shouldn't
> > be redefining kvm_* structures (I'd prefer _tests_ not use kvm_* at all, and only
> > use kvm_* in the library), and I gotta imagine KVM would break at least one real
> > world userspace if it defined "kvm_vcpu".
> >
> > That said, I don't have a super strong preference for kvm_ versus something else,
> > though I think it will be difficult to come up with something that's unique,
> > intuitive, and doesn't look like a typo.
> >
>
> Maybe just abbreviated "Kvm Selftests", i.e. 'ks_'? I won't harp on this
> any longer though, so if that doesn't look good, then we can proceed with
> 'kvm_'.
ks_ was the best I could come up with too. But looking at it in code, I find it
more distracting than helpful. It's kind of like the uncanny valley effect, where
ks_ *just* close enough to kvm_ that it makes me think something is wrong.
struct kvm_sregs sregs;
struct kvm_regs regs;
struct ks_vcpu *vcpu;
struct kvm_run *run;