2022-10-24 12:33:20

by Wang, Wei W

[permalink] [raw]
Subject: [PATCH v1 01/18] KVM: selftests/kvm_util: use array of pointers to maintain vcpus in kvm_vm

Each vcpu has an id associated with it and is intrinsically faster
and easier to be referenced by indexing into an array with "vcpu->id",
compared to using a list of vcpus in the current implementation. Change
the vcpu list to an array of vcpu pointers. Users then don't need to
allocate such a vcpu array on their own, and instead, they can reuse
the one maintained in kvm_vm.

Signed-off-by: Wei Wang <[email protected]>
---
.../testing/selftests/kvm/include/kvm_util.h | 4 +++
.../selftests/kvm/include/kvm_util_base.h | 3 +-
tools/testing/selftests/kvm/lib/kvm_util.c | 34 ++++++-------------
tools/testing/selftests/kvm/lib/x86_64/vmx.c | 2 +-
4 files changed, 17 insertions(+), 26 deletions(-)

diff --git a/tools/testing/selftests/kvm/include/kvm_util.h b/tools/testing/selftests/kvm/include/kvm_util.h
index c9286811a4cb..5d5c8968fb06 100644
--- a/tools/testing/selftests/kvm/include/kvm_util.h
+++ b/tools/testing/selftests/kvm/include/kvm_util.h
@@ -10,4 +10,8 @@
#include "kvm_util_base.h"
#include "ucall_common.h"

+#define vm_iterate_over_vcpus(vm, vcpu, i) \
+ for (i = 0, vcpu = vm->vcpus[0]; \
+ vcpu && i < KVM_MAX_VCPUS; vcpu = vm->vcpus[++i])
+
#endif /* SELFTEST_KVM_UTIL_H */
diff --git a/tools/testing/selftests/kvm/include/kvm_util_base.h b/tools/testing/selftests/kvm/include/kvm_util_base.h
index e42a09cd24a0..c90a9609b853 100644
--- a/tools/testing/selftests/kvm/include/kvm_util_base.h
+++ b/tools/testing/selftests/kvm/include/kvm_util_base.h
@@ -45,7 +45,6 @@ struct userspace_mem_region {
};

struct kvm_vcpu {
- struct list_head list;
uint32_t id;
int fd;
struct kvm_vm *vm;
@@ -75,7 +74,6 @@ struct kvm_vm {
unsigned int pa_bits;
unsigned int va_bits;
uint64_t max_gfn;
- struct list_head vcpus;
struct userspace_mem_regions regions;
struct sparsebit *vpages_valid;
struct sparsebit *vpages_mapped;
@@ -92,6 +90,7 @@ struct kvm_vm {
int stats_fd;
struct kvm_stats_header stats_header;
struct kvm_stats_desc *stats_desc;
+ struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
};


diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
index f1cb1627161f..941f6c3ea9dc 100644
--- a/tools/testing/selftests/kvm/lib/kvm_util.c
+++ b/tools/testing/selftests/kvm/lib/kvm_util.c
@@ -195,7 +195,6 @@ struct kvm_vm *____vm_create(enum vm_guest_mode mode, uint64_t nr_pages)
vm = calloc(1, sizeof(*vm));
TEST_ASSERT(vm != NULL, "Insufficient Memory");

- INIT_LIST_HEAD(&vm->vcpus);
vm->regions.gpa_tree = RB_ROOT;
vm->regions.hva_tree = RB_ROOT;
hash_init(vm->regions.slot_hash);
@@ -534,6 +533,10 @@ __weak void vcpu_arch_free(struct kvm_vcpu *vcpu)
static void vm_vcpu_rm(struct kvm_vm *vm, struct kvm_vcpu *vcpu)
{
int ret;
+ uint32_t vcpu_id = vcpu->id;
+
+ TEST_ASSERT(!!vm->vcpus[vcpu_id], "vCPU%d wasn't added\n", vcpu_id);
+ vm->vcpus[vcpu_id] = NULL;

if (vcpu->dirty_gfns) {
ret = munmap(vcpu->dirty_gfns, vm->dirty_ring_size);
@@ -547,18 +550,16 @@ static void vm_vcpu_rm(struct kvm_vm *vm, struct kvm_vcpu *vcpu)
ret = close(vcpu->fd);
TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("close()", ret));

- list_del(&vcpu->list);
-
vcpu_arch_free(vcpu);
free(vcpu);
}

void kvm_vm_release(struct kvm_vm *vmp)
{
- struct kvm_vcpu *vcpu, *tmp;
- int ret;
+ struct kvm_vcpu *vcpu;
+ int i, ret;

- list_for_each_entry_safe(vcpu, tmp, &vmp->vcpus, list)
+ vm_iterate_over_vcpus(vmp, vcpu, i)
vm_vcpu_rm(vmp, vcpu);

ret = close(vmp->fd);
@@ -1085,18 +1086,6 @@ static int vcpu_mmap_sz(void)
return ret;
}

-static bool vcpu_exists(struct kvm_vm *vm, uint32_t vcpu_id)
-{
- struct kvm_vcpu *vcpu;
-
- list_for_each_entry(vcpu, &vm->vcpus, list) {
- if (vcpu->id == vcpu_id)
- return true;
- }
-
- return false;
-}
-
/*
* Adds a virtual CPU to the VM specified by vm with the ID given by vcpu_id.
* No additional vCPU setup is done. Returns the vCPU.
@@ -1106,7 +1095,7 @@ struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
struct kvm_vcpu *vcpu;

/* Confirm a vcpu with the specified id doesn't already exist. */
- TEST_ASSERT(!vcpu_exists(vm, vcpu_id), "vCPU%d already exists\n", vcpu_id);
+ TEST_ASSERT(!vm->vcpus[vcpu_id], "vCPU%d already exists\n", vcpu_id);

/* Allocate and initialize new vcpu structure. */
vcpu = calloc(1, sizeof(*vcpu));
@@ -1125,8 +1114,7 @@ struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
TEST_ASSERT(vcpu->run != MAP_FAILED,
__KVM_SYSCALL_ERROR("mmap()", (int)(unsigned long)MAP_FAILED));

- /* Add to linked-list of VCPUs. */
- list_add(&vcpu->list, &vm->vcpus);
+ vm->vcpus[vcpu_id] = vcpu;

return vcpu;
}
@@ -1684,7 +1672,7 @@ void kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing)
*/
void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
{
- int ctr;
+ int i, ctr;
struct userspace_mem_region *region;
struct kvm_vcpu *vcpu;

@@ -1712,7 +1700,7 @@ void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
}
fprintf(stream, "%*sVCPUs:\n", indent, "");

- list_for_each_entry(vcpu, &vm->vcpus, list)
+ vm_iterate_over_vcpus(vm, vcpu, i)
vcpu_dump(stream, vcpu, indent + 2);
}

diff --git a/tools/testing/selftests/kvm/lib/x86_64/vmx.c b/tools/testing/selftests/kvm/lib/x86_64/vmx.c
index d21049c38fc5..77812dd03647 100644
--- a/tools/testing/selftests/kvm/lib/x86_64/vmx.c
+++ b/tools/testing/selftests/kvm/lib/x86_64/vmx.c
@@ -549,7 +549,7 @@ bool kvm_vm_has_ept(struct kvm_vm *vm)
struct kvm_vcpu *vcpu;
uint64_t ctrl;

- vcpu = list_first_entry(&vm->vcpus, struct kvm_vcpu, list);
+ vcpu = vm->vcpus[0];
TEST_ASSERT(vcpu, "Cannot determine EPT support without vCPUs.\n");

ctrl = vcpu_get_msr(vcpu, MSR_IA32_VMX_TRUE_PROCBASED_CTLS) >> 32;
--
2.27.0


2022-10-27 00:52:41

by Sean Christopherson

[permalink] [raw]
Subject: Re: [PATCH v1 01/18] KVM: selftests/kvm_util: use array of pointers to maintain vcpus in kvm_vm

On Mon, Oct 24, 2022, Wei Wang wrote:
> Each vcpu has an id associated with it and is intrinsically faster
> and easier to be referenced by indexing into an array with "vcpu->id",
> compared to using a list of vcpus in the current implementation. Change
> the vcpu list to an array of vcpu pointers. Users then don't need to
> allocate such a vcpu array on their own, and instead, they can reuse
> the one maintained in kvm_vm.
>
> Signed-off-by: Wei Wang <[email protected]>
> ---
> .../testing/selftests/kvm/include/kvm_util.h | 4 +++
> .../selftests/kvm/include/kvm_util_base.h | 3 +-
> tools/testing/selftests/kvm/lib/kvm_util.c | 34 ++++++-------------
> tools/testing/selftests/kvm/lib/x86_64/vmx.c | 2 +-
> 4 files changed, 17 insertions(+), 26 deletions(-)
>
> diff --git a/tools/testing/selftests/kvm/include/kvm_util.h b/tools/testing/selftests/kvm/include/kvm_util.h
> index c9286811a4cb..5d5c8968fb06 100644
> --- a/tools/testing/selftests/kvm/include/kvm_util.h
> +++ b/tools/testing/selftests/kvm/include/kvm_util.h
> @@ -10,4 +10,8 @@
> #include "kvm_util_base.h"
> #include "ucall_common.h"
>
> +#define vm_iterate_over_vcpus(vm, vcpu, i) \

vm_for_each_vcpu() is more aligned with existing KVM terminology.

> + for (i = 0, vcpu = vm->vcpus[0]; \
> + vcpu && i < KVM_MAX_VCPUS; vcpu = vm->vcpus[++i])

I hate pointer arithmetic more than most people, but in this case it avoids the
need to pass in 'i', which in turn cuts down on boilerplate and churn.

> #endif /* SELFTEST_KVM_UTIL_H */
> diff --git a/tools/testing/selftests/kvm/include/kvm_util_base.h b/tools/testing/selftests/kvm/include/kvm_util_base.h
> index e42a09cd24a0..c90a9609b853 100644
> --- a/tools/testing/selftests/kvm/include/kvm_util_base.h
> +++ b/tools/testing/selftests/kvm/include/kvm_util_base.h
> @@ -45,7 +45,6 @@ struct userspace_mem_region {
> };
>
> struct kvm_vcpu {
> - struct list_head list;
> uint32_t id;
> int fd;
> struct kvm_vm *vm;
> @@ -75,7 +74,6 @@ struct kvm_vm {
> unsigned int pa_bits;
> unsigned int va_bits;
> uint64_t max_gfn;
> - struct list_head vcpus;
> struct userspace_mem_regions regions;
> struct sparsebit *vpages_valid;
> struct sparsebit *vpages_mapped;
> @@ -92,6 +90,7 @@ struct kvm_vm {
> int stats_fd;
> struct kvm_stats_header stats_header;
> struct kvm_stats_desc *stats_desc;
> + struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];

We can dynamically allocate the array without too much trouble, though I'm not
sure it's worth shaving the few KiB of memory. For __vm_create(), the number of
vCPUs is known when the VM is created. For vm_create_barebones(), we could do
the simple thing of allocating KVM_MAX_VCPU.

> @@ -534,6 +533,10 @@ __weak void vcpu_arch_free(struct kvm_vcpu *vcpu)
> static void vm_vcpu_rm(struct kvm_vm *vm, struct kvm_vcpu *vcpu)
> {
> int ret;
> + uint32_t vcpu_id = vcpu->id;
> +
> + TEST_ASSERT(!!vm->vcpus[vcpu_id], "vCPU%d wasn't added\n", vcpu_id);

This is unecessary, there's one caller and it's iterating over the array of vCPUs.

2022-10-27 12:41:34

by Wang, Wei W

[permalink] [raw]
Subject: RE: [PATCH v1 01/18] KVM: selftests/kvm_util: use array of pointers to maintain vcpus in kvm_vm

On Thursday, October 27, 2022 7:48 AM, Sean Christopherson wrote:
> > + for (i = 0, vcpu = vm->vcpus[0]; \
> > + vcpu && i < KVM_MAX_VCPUS; vcpu = vm->vcpus[++i])
>
> I hate pointer arithmetic more than most people, but in this case it avoids the
> need to pass in 'i', which in turn cuts down on boilerplate and churn.

Hmm, indeed, this can be improved, how about this one:

+#define vm_iterate_over_vcpus(vm, vcpu) \
+ for (vcpu = vm->vcpus[0]; vcpu; vcpu = vm->vcpus[vcpu->id + 1]) \


>
> > #endif /* SELFTEST_KVM_UTIL_H */
> > diff --git a/tools/testing/selftests/kvm/include/kvm_util_base.h
> > b/tools/testing/selftests/kvm/include/kvm_util_base.h
> > index e42a09cd24a0..c90a9609b853 100644
> > --- a/tools/testing/selftests/kvm/include/kvm_util_base.h
> > +++ b/tools/testing/selftests/kvm/include/kvm_util_base.h
> > @@ -45,7 +45,6 @@ struct userspace_mem_region { };
> >
> > struct kvm_vcpu {
> > - struct list_head list;
> > uint32_t id;
> > int fd;
> > struct kvm_vm *vm;
> > @@ -75,7 +74,6 @@ struct kvm_vm {
> > unsigned int pa_bits;
> > unsigned int va_bits;
> > uint64_t max_gfn;
> > - struct list_head vcpus;
> > struct userspace_mem_regions regions;
> > struct sparsebit *vpages_valid;
> > struct sparsebit *vpages_mapped;
> > @@ -92,6 +90,7 @@ struct kvm_vm {
> > int stats_fd;
> > struct kvm_stats_header stats_header;
> > struct kvm_stats_desc *stats_desc;
> > + struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
>
> We can dynamically allocate the array without too much trouble, though I'm not
> sure it's worth shaving the few KiB of memory. For __vm_create(), the number
> of vCPUs is known when the VM is created. For vm_create_barebones(), we
> could do the simple thing of allocating KVM_MAX_VCPU.

The issue with dynamic allocation is that some users start with __vm_create(nr_vcpus), and later
could add more cpus with vm_vcpu_add (e.g. x86_64/xapic_ipi_test.c). To support this we may
need to re-allocate the array for later vm_vcpu_add(), and also need to add nr_vcpus to indicate
the size.
It's userspace memory, and not a problem to use a bit larger virtual memory (memory are not really
allocated until we have that many vcpus to touch the array entries), I think.

>
> > @@ -534,6 +533,10 @@ __weak void vcpu_arch_free(struct kvm_vcpu *vcpu)
> > static void vm_vcpu_rm(struct kvm_vm *vm, struct kvm_vcpu *vcpu) {
> > int ret;
> > + uint32_t vcpu_id = vcpu->id;
> > +
> > + TEST_ASSERT(!!vm->vcpus[vcpu_id], "vCPU%d wasn't added\n", vcpu_id);
>
> This is unecessary, there's one caller and it's iterating over the array of vCPUs.

That's right, thanks.

2022-10-27 16:51:31

by Sean Christopherson

[permalink] [raw]
Subject: Re: [PATCH v1 01/18] KVM: selftests/kvm_util: use array of pointers to maintain vcpus in kvm_vm

On Thu, Oct 27, 2022, Wang, Wei W wrote:
> On Thursday, October 27, 2022 7:48 AM, Sean Christopherson wrote:
> > > + for (i = 0, vcpu = vm->vcpus[0]; \
> > > + vcpu && i < KVM_MAX_VCPUS; vcpu = vm->vcpus[++i])
> >
> > I hate pointer arithmetic more than most people, but in this case it avoids the
> > need to pass in 'i', which in turn cuts down on boilerplate and churn.
>
> Hmm, indeed, this can be improved, how about this one:
>
> +#define vm_iterate_over_vcpus(vm, vcpu) \
> + for (vcpu = vm->vcpus[0]; vcpu; vcpu = vm->vcpus[vcpu->id + 1]) \

Needs to be bounded by the size of the array.

> > > #endif /* SELFTEST_KVM_UTIL_H */
> > > diff --git a/tools/testing/selftests/kvm/include/kvm_util_base.h
> > > b/tools/testing/selftests/kvm/include/kvm_util_base.h
> > > index e42a09cd24a0..c90a9609b853 100644
> > > --- a/tools/testing/selftests/kvm/include/kvm_util_base.h
> > > +++ b/tools/testing/selftests/kvm/include/kvm_util_base.h
> > > @@ -45,7 +45,6 @@ struct userspace_mem_region { };
> > >
> > > struct kvm_vcpu {
> > > - struct list_head list;
> > > uint32_t id;
> > > int fd;
> > > struct kvm_vm *vm;
> > > @@ -75,7 +74,6 @@ struct kvm_vm {
> > > unsigned int pa_bits;
> > > unsigned int va_bits;
> > > uint64_t max_gfn;
> > > - struct list_head vcpus;
> > > struct userspace_mem_regions regions;
> > > struct sparsebit *vpages_valid;
> > > struct sparsebit *vpages_mapped;
> > > @@ -92,6 +90,7 @@ struct kvm_vm {
> > > int stats_fd;
> > > struct kvm_stats_header stats_header;
> > > struct kvm_stats_desc *stats_desc;
> > > + struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
> >
> > We can dynamically allocate the array without too much trouble, though I'm not
> > sure it's worth shaving the few KiB of memory. For __vm_create(), the number
> > of vCPUs is known when the VM is created. For vm_create_barebones(), we
> > could do the simple thing of allocating KVM_MAX_VCPU.
>
> The issue with dynamic allocation is that some users start with
> __vm_create(nr_vcpus), and later could add more cpus with vm_vcpu_add (e.g.
> x86_64/xapic_ipi_test.c). To support this we may need to re-allocate the
> array for later vm_vcpu_add(), and also need to add nr_vcpus to indicate the
> size.

Hrm, right, the number of runnable CPUs isn't a hard upper bound. Ideally it
would be, as the number of pages required for guest memory will fail to account
for the "extra" vcpus. E.g. that test should really do vm_create(2) and then
manually add each vCPU.

> It's userspace memory, and not a problem to use a bit larger virtual memory
> (memory are not really allocated until we have that many vcpus to touch the
> array entries), I think.

Yeah, just allocate the max for now, though the array still needs to be dynamically
allocated based on the actual maximum number of vCPUs. Oh, duh, we can do the
easy thing and just bump KVM_MAX_VCPUS to 1024 to match KVM. And then assert that
kvm_check_cap(KVM_CAP_MAX_VCPUS) == KVM_CAP_MAX_VCPUS in kvm_create_max_vcpus.c.

2022-10-28 02:31:54

by Wang, Wei W

[permalink] [raw]
Subject: RE: [PATCH v1 01/18] KVM: selftests/kvm_util: use array of pointers to maintain vcpus in kvm_vm

On Thursday, October 27, 2022 11:27 PM, Sean Christopherson wrote:
> Yeah, just allocate the max for now, though the array still needs to be
> dynamically allocated based on the actual maximum number of vCPUs. Oh, duh,
> we can do the easy thing and just bump KVM_MAX_VCPUS to 1024 to match
> KVM. And then assert that
> kvm_check_cap(KVM_CAP_MAX_VCPUS) == KVM_CAP_MAX_VCPUS in
> kvm_create_max_vcpus.c.

Right. I thought about the same thing, we should update KVM_MAX_VCPUS anyway.