Remove the unnecessary definition of the threads[] array, and use the
helper functions to create and join threads.
Also move setting of the thread affinity to __vcpu_thread_create using
attribute. This avoids an explicit step to set it after thread
creation.
Signed-off-by: Wei Wang <[email protected]>
---
.../selftests/kvm/hardware_disable_test.c | 56 +++++--------------
1 file changed, 15 insertions(+), 41 deletions(-)
diff --git a/tools/testing/selftests/kvm/hardware_disable_test.c b/tools/testing/selftests/kvm/hardware_disable_test.c
index f5d59b9934f1..c212d34a6714 100644
--- a/tools/testing/selftests/kvm/hardware_disable_test.c
+++ b/tools/testing/selftests/kvm/hardware_disable_test.c
@@ -8,7 +8,6 @@
#define _GNU_SOURCE
#include <fcntl.h>
-#include <pthread.h>
#include <semaphore.h>
#include <stdint.h>
#include <stdlib.h>
@@ -59,64 +58,39 @@ static void *sleeping_thread(void *arg)
pthread_exit(NULL);
}
-static inline void check_create_thread(pthread_t *thread, pthread_attr_t *attr,
- void *(*f)(void *), void *arg)
-{
- int r;
-
- r = pthread_create(thread, attr, f, arg);
- TEST_ASSERT(r == 0, "%s: failed to create thread", __func__);
-}
-
-static inline void check_set_affinity(pthread_t thread, cpu_set_t *cpu_set)
-{
- int r;
-
- r = pthread_setaffinity_np(thread, sizeof(cpu_set_t), cpu_set);
- TEST_ASSERT(r == 0, "%s: failed set affinity", __func__);
-}
-
-static inline void check_join(pthread_t thread, void **retval)
-{
- int r;
-
- r = pthread_join(thread, retval);
- TEST_ASSERT(r == 0, "%s: failed to join thread", __func__);
-}
-
static void run_test(uint32_t run)
{
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
cpu_set_t cpu_set;
- pthread_t threads[VCPU_NUM];
pthread_t throw_away;
- void *b;
+ pthread_attr_t attr;
uint32_t i, j;
+ int r;
CPU_ZERO(&cpu_set);
for (i = 0; i < VCPU_NUM; i++)
CPU_SET(i, &cpu_set);
+ r = pthread_attr_init(&attr);
+ TEST_ASSERT(!r, "%s: failed to init thread attr, r = %d", __func__, r);
+ r = pthread_attr_setaffinity_np(&attr, sizeof(cpu_set_t), &cpu_set);
+ TEST_ASSERT(!r, "%s: failed to set affinity, r = %d", __func__, r);
- vm = vm_create(VCPU_NUM);
+ vm = vm_create_with_vcpus(VCPU_NUM, guest_code, NULL);
pr_debug("%s: [%d] start vcpus\n", __func__, run);
- for (i = 0; i < VCPU_NUM; ++i) {
- vcpu = vm_vcpu_add(vm, i, guest_code);
+ vm_iterate_over_vcpus(vm, vcpu, i) {
+ __vcpu_thread_create(vcpu, &attr, run_vcpu, 0);
- check_create_thread(&threads[i], NULL, run_vcpu, vcpu);
- check_set_affinity(threads[i], &cpu_set);
-
- for (j = 0; j < SLEEPING_THREAD_NUM; ++j) {
- check_create_thread(&throw_away, NULL, sleeping_thread,
- (void *)NULL);
- check_set_affinity(throw_away, &cpu_set);
- }
+ for (j = 0; j < SLEEPING_THREAD_NUM; ++j)
+ __pthread_create_with_name(&throw_away, &attr,
+ sleeping_thread, (void *)NULL,
+ "sleeping-thread");
}
pr_debug("%s: [%d] all threads launched\n", __func__, run);
sem_post(sem);
- for (i = 0; i < VCPU_NUM; ++i)
- check_join(threads[i], &b);
+
+ vm_vcpu_threads_join(vm);
/* Should not be reached */
TEST_ASSERT(false, "%s: [%d] child escaped the ninja\n", __func__, run);
}
--
2.27.0
On Mon, Oct 24, 2022, Wei Wang wrote:
> Remove the unnecessary definition of the threads[] array, and use the
> helper functions to create and join threads.
>
> Also move setting of the thread affinity to __vcpu_thread_create using
> attribute. This avoids an explicit step to set it after thread
> creation.
As David called out, please do this in a separate patch (one logical change per
patch).
> Signed-off-by: Wei Wang <[email protected]>
> ---
> .../selftests/kvm/hardware_disable_test.c | 56 +++++--------------
> 1 file changed, 15 insertions(+), 41 deletions(-)
>
> diff --git a/tools/testing/selftests/kvm/hardware_disable_test.c b/tools/testing/selftests/kvm/hardware_disable_test.c
> index f5d59b9934f1..c212d34a6714 100644
> --- a/tools/testing/selftests/kvm/hardware_disable_test.c
> +++ b/tools/testing/selftests/kvm/hardware_disable_test.c
> @@ -8,7 +8,6 @@
> #define _GNU_SOURCE
>
> #include <fcntl.h>
> -#include <pthread.h>
> #include <semaphore.h>
> #include <stdint.h>
> #include <stdlib.h>
> @@ -59,64 +58,39 @@ static void *sleeping_thread(void *arg)
> pthread_exit(NULL);
> }
>
> -static inline void check_create_thread(pthread_t *thread, pthread_attr_t *attr,
> - void *(*f)(void *), void *arg)
> -{
> - int r;
> -
> - r = pthread_create(thread, attr, f, arg);
> - TEST_ASSERT(r == 0, "%s: failed to create thread", __func__);
> -}
> -
> -static inline void check_set_affinity(pthread_t thread, cpu_set_t *cpu_set)
> -{
> - int r;
> -
> - r = pthread_setaffinity_np(thread, sizeof(cpu_set_t), cpu_set);
> - TEST_ASSERT(r == 0, "%s: failed set affinity", __func__);
> -}
> -
> -static inline void check_join(pthread_t thread, void **retval)
> -{
> - int r;
> -
> - r = pthread_join(thread, retval);
> - TEST_ASSERT(r == 0, "%s: failed to join thread", __func__);
> -}
> -
> static void run_test(uint32_t run)
> {
> struct kvm_vcpu *vcpu;
> struct kvm_vm *vm;
> cpu_set_t cpu_set;
> - pthread_t threads[VCPU_NUM];
> pthread_t throw_away;
> - void *b;
> + pthread_attr_t attr;
> uint32_t i, j;
> + int r;
>
> CPU_ZERO(&cpu_set);
> for (i = 0; i < VCPU_NUM; i++)
> CPU_SET(i, &cpu_set);
Uh, what is this test doing? I assume the intent is to avoid spamming all pCPUs
in the system, but I don't get the benefit of doing so.
On Thursday, October 27, 2022 8:16 AM, Sean Christopherson wrote:
> > diff --git a/tools/testing/selftests/kvm/hardware_disable_test.c
> > static void run_test(uint32_t run)
> > {
> > struct kvm_vcpu *vcpu;
> > struct kvm_vm *vm;
> > cpu_set_t cpu_set;
> > - pthread_t threads[VCPU_NUM];
> > pthread_t throw_away;
> > - void *b;
> > + pthread_attr_t attr;
> > uint32_t i, j;
> > + int r;
> >
> > CPU_ZERO(&cpu_set);
> > for (i = 0; i < VCPU_NUM; i++)
> > CPU_SET(i, &cpu_set);
>
> Uh, what is this test doing? I assume the intent is to avoid spamming all
> pCPUs in the system, but I don't get the benefit of doing so.
IIUIC, it is to test if the condition race between the 2 paths:
#1 kvm_arch_hardware_disable->drop_user_return_notifiers() and
#2 fire_user_return_notifiers->kvm_on_user_return
has been solved by disabling interrupts in kvm_on_user_return.
To stress the tests, it creates a bunch of threads (continuously making syscalls
to trigger #2 above) to be scheduled on the same pCPU that runs a vCPU, and
then VM is killed, which triggers #1 above.
They fork to test 512 times hoping there is chance #1 and #2 above can happen
at the same time without an issue.
+ Ignacio to confirm if possible.
On Thu, Oct 27, 2022, Wang, Wei W wrote:
> On Thursday, October 27, 2022 8:16 AM, Sean Christopherson wrote:
> > > diff --git a/tools/testing/selftests/kvm/hardware_disable_test.c
> > > static void run_test(uint32_t run)
> > > {
> > > struct kvm_vcpu *vcpu;
> > > struct kvm_vm *vm;
> > > cpu_set_t cpu_set;
> > > - pthread_t threads[VCPU_NUM];
> > > pthread_t throw_away;
> > > - void *b;
> > > + pthread_attr_t attr;
> > > uint32_t i, j;
> > > + int r;
> > >
> > > CPU_ZERO(&cpu_set);
> > > for (i = 0; i < VCPU_NUM; i++)
> > > CPU_SET(i, &cpu_set);
> >
> > Uh, what is this test doing? I assume the intent is to avoid spamming all
> > pCPUs in the system, but I don't get the benefit of doing so.
>
> IIUIC, it is to test if the condition race between the 2 paths:
> #1 kvm_arch_hardware_disable->drop_user_return_notifiers() and
> #2 fire_user_return_notifiers->kvm_on_user_return
> has been solved by disabling interrupts in kvm_on_user_return.
>
> To stress the tests, it creates a bunch of threads (continuously making syscalls
> to trigger #2 above) to be scheduled on the same pCPU that runs a vCPU, and
> then VM is killed, which triggers #1 above.
> They fork to test 512 times hoping there is chance #1 and #2 above can happen
> at the same time without an issue.
But why does it matter what pCPU a vCPU is running on? Wouldn't the probability
of triggering a race between kvm_on_user_return() and hardware_disable() be
_higher_ if there are more pCPUs returning to userspace?
On Friday, October 28, 2022 2:03 AM, Sean Christopherson wrote:
> But why does it matter what pCPU a vCPU is running on? Wouldn't the
> probability of triggering a race between kvm_on_user_return() and
> hardware_disable() be _higher_ if there are more pCPUs returning to userspace?
I think the point there is that the vcpus and those syscall threads need to be on the
same pCPUs. Linux by default has its own load balancing for threads to run on. If the
vcpus and syscall threads are scattered on different pCPUs, kvm_on_user_return
would less likely to be triggered when the syscall threads return to userspace.