2024-01-11 15:49:51

by Mathieu Desnoyers

[permalink] [raw]
Subject: [PATCH] selftests/rseq: Do not skip !allowed_cpus for mm_cid

Indexing with mm_cid is incompatible with skipping disallowed cpumask,
because concurrency IDs are based on a virtual ID allocation which is
unrelated to the physical CPU mask.

These issues can be reproduced by running the rseq selftests under a
taskset which excludes CPU 0, e.g.

taskset -c 10-20 ./run_param_test.sh

Signed-off-by: Mathieu Desnoyers <[email protected]>
Cc: Shuah Khan <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: "Paul E. McKenney" <[email protected]>
Cc: Boqun Feng <[email protected]>
---
.../selftests/rseq/basic_percpu_ops_test.c | 14 ++++++++++--
tools/testing/selftests/rseq/param_test.c | 22 ++++++++++++++-----
2 files changed, 28 insertions(+), 8 deletions(-)

diff --git a/tools/testing/selftests/rseq/basic_percpu_ops_test.c b/tools/testing/selftests/rseq/basic_percpu_ops_test.c
index 887542961968..2348d2c20d0a 100644
--- a/tools/testing/selftests/rseq/basic_percpu_ops_test.c
+++ b/tools/testing/selftests/rseq/basic_percpu_ops_test.c
@@ -24,6 +24,11 @@ bool rseq_validate_cpu_id(void)
{
return rseq_mm_cid_available();
}
+static
+bool rseq_use_cpu_index(void)
+{
+ return false; /* Use mm_cid */
+}
#else
# define RSEQ_PERCPU RSEQ_PERCPU_CPU_ID
static
@@ -36,6 +41,11 @@ bool rseq_validate_cpu_id(void)
{
return rseq_current_cpu_raw() >= 0;
}
+static
+bool rseq_use_cpu_index(void)
+{
+ return true; /* Use cpu_id as index. */
+}
#endif

struct percpu_lock_entry {
@@ -274,7 +284,7 @@ void test_percpu_list(void)
/* Generate list entries for every usable cpu. */
sched_getaffinity(0, sizeof(allowed_cpus), &allowed_cpus);
for (i = 0; i < CPU_SETSIZE; i++) {
- if (!CPU_ISSET(i, &allowed_cpus))
+ if (rseq_use_cpu_index() && !CPU_ISSET(i, &allowed_cpus))
continue;
for (j = 1; j <= 100; j++) {
struct percpu_list_node *node;
@@ -299,7 +309,7 @@ void test_percpu_list(void)
for (i = 0; i < CPU_SETSIZE; i++) {
struct percpu_list_node *node;

- if (!CPU_ISSET(i, &allowed_cpus))
+ if (rseq_use_cpu_index() && !CPU_ISSET(i, &allowed_cpus))
continue;

while ((node = __percpu_list_pop(&list, i))) {
diff --git a/tools/testing/selftests/rseq/param_test.c b/tools/testing/selftests/rseq/param_test.c
index 20403d58345c..2f37961240ca 100644
--- a/tools/testing/selftests/rseq/param_test.c
+++ b/tools/testing/selftests/rseq/param_test.c
@@ -288,6 +288,11 @@ bool rseq_validate_cpu_id(void)
{
return rseq_mm_cid_available();
}
+static
+bool rseq_use_cpu_index(void)
+{
+ return false; /* Use mm_cid */
+}
# ifdef TEST_MEMBARRIER
/*
* Membarrier does not currently support targeting a mm_cid, so
@@ -312,6 +317,11 @@ bool rseq_validate_cpu_id(void)
{
return rseq_current_cpu_raw() >= 0;
}
+static
+bool rseq_use_cpu_index(void)
+{
+ return true; /* Use cpu_id as index. */
+}
# ifdef TEST_MEMBARRIER
static
int rseq_membarrier_expedited(int cpu)
@@ -715,7 +725,7 @@ void test_percpu_list(void)
/* Generate list entries for every usable cpu. */
sched_getaffinity(0, sizeof(allowed_cpus), &allowed_cpus);
for (i = 0; i < CPU_SETSIZE; i++) {
- if (!CPU_ISSET(i, &allowed_cpus))
+ if (rseq_use_cpu_index() && !CPU_ISSET(i, &allowed_cpus))
continue;
for (j = 1; j <= 100; j++) {
struct percpu_list_node *node;
@@ -752,7 +762,7 @@ void test_percpu_list(void)
for (i = 0; i < CPU_SETSIZE; i++) {
struct percpu_list_node *node;

- if (!CPU_ISSET(i, &allowed_cpus))
+ if (rseq_use_cpu_index() && !CPU_ISSET(i, &allowed_cpus))
continue;

while ((node = __percpu_list_pop(&list, i))) {
@@ -902,7 +912,7 @@ void test_percpu_buffer(void)
/* Generate list entries for every usable cpu. */
sched_getaffinity(0, sizeof(allowed_cpus), &allowed_cpus);
for (i = 0; i < CPU_SETSIZE; i++) {
- if (!CPU_ISSET(i, &allowed_cpus))
+ if (rseq_use_cpu_index() && !CPU_ISSET(i, &allowed_cpus))
continue;
/* Worse-case is every item in same CPU. */
buffer.c[i].array =
@@ -952,7 +962,7 @@ void test_percpu_buffer(void)
for (i = 0; i < CPU_SETSIZE; i++) {
struct percpu_buffer_node *node;

- if (!CPU_ISSET(i, &allowed_cpus))
+ if (rseq_use_cpu_index() && !CPU_ISSET(i, &allowed_cpus))
continue;

while ((node = __percpu_buffer_pop(&buffer, i))) {
@@ -1113,7 +1123,7 @@ void test_percpu_memcpy_buffer(void)
/* Generate list entries for every usable cpu. */
sched_getaffinity(0, sizeof(allowed_cpus), &allowed_cpus);
for (i = 0; i < CPU_SETSIZE; i++) {
- if (!CPU_ISSET(i, &allowed_cpus))
+ if (rseq_use_cpu_index() && !CPU_ISSET(i, &allowed_cpus))
continue;
/* Worse-case is every item in same CPU. */
buffer.c[i].array =
@@ -1160,7 +1170,7 @@ void test_percpu_memcpy_buffer(void)
for (i = 0; i < CPU_SETSIZE; i++) {
struct percpu_memcpy_buffer_node item;

- if (!CPU_ISSET(i, &allowed_cpus))
+ if (rseq_use_cpu_index() && !CPU_ISSET(i, &allowed_cpus))
continue;

while (__percpu_memcpy_buffer_pop(&buffer, &item, i)) {
--
2.25.1



2024-01-22 19:03:32

by Shuah Khan

[permalink] [raw]
Subject: Re: [PATCH] selftests/rseq: Do not skip !allowed_cpus for mm_cid

On 1/11/24 08:49, Mathieu Desnoyers wrote:
> Indexing with mm_cid is incompatible with skipping disallowed cpumask,
> because concurrency IDs are based on a virtual ID allocation which is
> unrelated to the physical CPU mask.
>
> These issues can be reproduced by running the rseq selftests under a
> taskset which excludes CPU 0, e.g.
>
> taskset -c 10-20 ./run_param_test.sh
>
> Signed-off-by: Mathieu Desnoyers <[email protected]>
> Cc: Shuah Khan <[email protected]>
> Cc: Peter Zijlstra <[email protected]>
> Cc: "Paul E. McKenney" <[email protected]>
> Cc: Boqun Feng <[email protected]>
> ---

Hi Mathieu,

I applied this to linux-kselftest fixes for the next rc.

Please cc linux-kselftest mailing list on your future patches. This makes
my workflow easier as it relies on patches going to linux-kselftest patchworks
project.

thanks,
-- Shuah


2024-01-22 19:13:59

by Mathieu Desnoyers

[permalink] [raw]
Subject: Re: [PATCH] selftests/rseq: Do not skip !allowed_cpus for mm_cid

On 2024-01-22 13:43, Shuah Khan wrote:
> On 1/11/24 08:49, Mathieu Desnoyers wrote:
>> Indexing with mm_cid is incompatible with skipping disallowed cpumask,
>> because concurrency IDs are based on a virtual ID allocation which is
>> unrelated to the physical CPU mask.
>>
>> These issues can be reproduced by running the rseq selftests under a
>> taskset which excludes CPU 0, e.g.
>>
>>    taskset -c 10-20 ./run_param_test.sh
>>
>> Signed-off-by: Mathieu Desnoyers <[email protected]>
>> Cc: Shuah Khan <[email protected]>
>> Cc: Peter Zijlstra <[email protected]>
>> Cc: "Paul E. McKenney" <[email protected]>
>> Cc: Boqun Feng <[email protected]>
>> ---
>
> Hi Mathieu,
>
> I applied this to linux-kselftest fixes for the next rc.
>
> Please cc linux-kselftest mailing list on your future patches. This makes
> my workflow easier as it relies on patches going to linux-kselftest
> patchworks
> project.

Will do, thanks!

Mathieu

>
> thanks,
> -- Shuah
>

--
Mathieu Desnoyers
EfficiOS Inc.
https://www.efficios.com