Skip to content

Commit d53271c

Browse files
compudjshuahkh
authored andcommitted
selftests/rseq: Do not skip !allowed_cpus for mm_cid
Indexing with mm_cid is incompatible with skipping disallowed cpumask, because concurrency IDs are based on a virtual ID allocation which is unrelated to the physical CPU mask. These issues can be reproduced by running the rseq selftests under a taskset which excludes CPU 0, e.g. taskset -c 10-20 ./run_param_test.sh Signed-off-by: Mathieu Desnoyers <[email protected]> Cc: Shuah Khan <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: "Paul E. McKenney" <[email protected]> Cc: Boqun Feng <[email protected]> Signed-off-by: Shuah Khan <[email protected]>
1 parent 6613476 commit d53271c

File tree

2 files changed

+28
-8
lines changed

2 files changed

+28
-8
lines changed

tools/testing/selftests/rseq/basic_percpu_ops_test.c

Lines changed: 12 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,11 @@ bool rseq_validate_cpu_id(void)
2424
{
2525
return rseq_mm_cid_available();
2626
}
27+
static
28+
bool rseq_use_cpu_index(void)
29+
{
30+
return false; /* Use mm_cid */
31+
}
2732
#else
2833
# define RSEQ_PERCPU RSEQ_PERCPU_CPU_ID
2934
static
@@ -36,6 +41,11 @@ bool rseq_validate_cpu_id(void)
3641
{
3742
return rseq_current_cpu_raw() >= 0;
3843
}
44+
static
45+
bool rseq_use_cpu_index(void)
46+
{
47+
return true; /* Use cpu_id as index. */
48+
}
3949
#endif
4050

4151
struct percpu_lock_entry {
@@ -274,7 +284,7 @@ void test_percpu_list(void)
274284
/* Generate list entries for every usable cpu. */
275285
sched_getaffinity(0, sizeof(allowed_cpus), &allowed_cpus);
276286
for (i = 0; i < CPU_SETSIZE; i++) {
277-
if (!CPU_ISSET(i, &allowed_cpus))
287+
if (rseq_use_cpu_index() && !CPU_ISSET(i, &allowed_cpus))
278288
continue;
279289
for (j = 1; j <= 100; j++) {
280290
struct percpu_list_node *node;
@@ -299,7 +309,7 @@ void test_percpu_list(void)
299309
for (i = 0; i < CPU_SETSIZE; i++) {
300310
struct percpu_list_node *node;
301311

302-
if (!CPU_ISSET(i, &allowed_cpus))
312+
if (rseq_use_cpu_index() && !CPU_ISSET(i, &allowed_cpus))
303313
continue;
304314

305315
while ((node = __percpu_list_pop(&list, i))) {

tools/testing/selftests/rseq/param_test.c

Lines changed: 16 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -288,6 +288,11 @@ bool rseq_validate_cpu_id(void)
288288
{
289289
return rseq_mm_cid_available();
290290
}
291+
static
292+
bool rseq_use_cpu_index(void)
293+
{
294+
return false; /* Use mm_cid */
295+
}
291296
# ifdef TEST_MEMBARRIER
292297
/*
293298
* Membarrier does not currently support targeting a mm_cid, so
@@ -312,6 +317,11 @@ bool rseq_validate_cpu_id(void)
312317
{
313318
return rseq_current_cpu_raw() >= 0;
314319
}
320+
static
321+
bool rseq_use_cpu_index(void)
322+
{
323+
return true; /* Use cpu_id as index. */
324+
}
315325
# ifdef TEST_MEMBARRIER
316326
static
317327
int rseq_membarrier_expedited(int cpu)
@@ -715,7 +725,7 @@ void test_percpu_list(void)
715725
/* Generate list entries for every usable cpu. */
716726
sched_getaffinity(0, sizeof(allowed_cpus), &allowed_cpus);
717727
for (i = 0; i < CPU_SETSIZE; i++) {
718-
if (!CPU_ISSET(i, &allowed_cpus))
728+
if (rseq_use_cpu_index() && !CPU_ISSET(i, &allowed_cpus))
719729
continue;
720730
for (j = 1; j <= 100; j++) {
721731
struct percpu_list_node *node;
@@ -752,7 +762,7 @@ void test_percpu_list(void)
752762
for (i = 0; i < CPU_SETSIZE; i++) {
753763
struct percpu_list_node *node;
754764

755-
if (!CPU_ISSET(i, &allowed_cpus))
765+
if (rseq_use_cpu_index() && !CPU_ISSET(i, &allowed_cpus))
756766
continue;
757767

758768
while ((node = __percpu_list_pop(&list, i))) {
@@ -902,7 +912,7 @@ void test_percpu_buffer(void)
902912
/* Generate list entries for every usable cpu. */
903913
sched_getaffinity(0, sizeof(allowed_cpus), &allowed_cpus);
904914
for (i = 0; i < CPU_SETSIZE; i++) {
905-
if (!CPU_ISSET(i, &allowed_cpus))
915+
if (rseq_use_cpu_index() && !CPU_ISSET(i, &allowed_cpus))
906916
continue;
907917
/* Worse-case is every item in same CPU. */
908918
buffer.c[i].array =
@@ -952,7 +962,7 @@ void test_percpu_buffer(void)
952962
for (i = 0; i < CPU_SETSIZE; i++) {
953963
struct percpu_buffer_node *node;
954964

955-
if (!CPU_ISSET(i, &allowed_cpus))
965+
if (rseq_use_cpu_index() && !CPU_ISSET(i, &allowed_cpus))
956966
continue;
957967

958968
while ((node = __percpu_buffer_pop(&buffer, i))) {
@@ -1113,7 +1123,7 @@ void test_percpu_memcpy_buffer(void)
11131123
/* Generate list entries for every usable cpu. */
11141124
sched_getaffinity(0, sizeof(allowed_cpus), &allowed_cpus);
11151125
for (i = 0; i < CPU_SETSIZE; i++) {
1116-
if (!CPU_ISSET(i, &allowed_cpus))
1126+
if (rseq_use_cpu_index() && !CPU_ISSET(i, &allowed_cpus))
11171127
continue;
11181128
/* Worse-case is every item in same CPU. */
11191129
buffer.c[i].array =
@@ -1160,7 +1170,7 @@ void test_percpu_memcpy_buffer(void)
11601170
for (i = 0; i < CPU_SETSIZE; i++) {
11611171
struct percpu_memcpy_buffer_node item;
11621172

1163-
if (!CPU_ISSET(i, &allowed_cpus))
1173+
if (rseq_use_cpu_index() && !CPU_ISSET(i, &allowed_cpus))
11641174
continue;
11651175

11661176
while (__percpu_memcpy_buffer_pop(&buffer, &item, i)) {

0 commit comments

Comments
 (0)