Skip to content

Commit 81af89e

Browse files
melverIngo Molnar
authored andcommitted
kcsan: Add kcsan_set_access_mask() support
When setting up an access mask with kcsan_set_access_mask(), KCSAN will only report races if concurrent changes to bits set in access_mask are observed. Conveying access_mask via a separate call avoids introducing overhead in the common-case fast-path. Acked-by: John Hubbard <[email protected]> Signed-off-by: Marco Elver <[email protected]> Signed-off-by: Paul E. McKenney <[email protected]> Signed-off-by: Ingo Molnar <[email protected]>
1 parent b738f61 commit 81af89e

File tree

6 files changed

+73
-5
lines changed

6 files changed

+73
-5
lines changed

include/linux/kcsan-checks.h

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -68,6 +68,16 @@ void kcsan_flat_atomic_end(void);
6868
*/
6969
void kcsan_atomic_next(int n);
7070

71+
/**
72+
* kcsan_set_access_mask - set access mask
73+
*
74+
* Set the access mask for all accesses for the current context if non-zero.
75+
* Only value changes to bits set in the mask will be reported.
76+
*
77+
* @mask bitmask
78+
*/
79+
void kcsan_set_access_mask(unsigned long mask);
80+
7181
#else /* CONFIG_KCSAN */
7282

7383
static inline void __kcsan_check_access(const volatile void *ptr, size_t size,
@@ -78,6 +88,7 @@ static inline void kcsan_nestable_atomic_end(void) { }
7888
static inline void kcsan_flat_atomic_begin(void) { }
7989
static inline void kcsan_flat_atomic_end(void) { }
8090
static inline void kcsan_atomic_next(int n) { }
91+
static inline void kcsan_set_access_mask(unsigned long mask) { }
8192

8293
#endif /* CONFIG_KCSAN */
8394

include/linux/kcsan.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,11 @@ struct kcsan_ctx {
3535
*/
3636
int atomic_nest_count;
3737
bool in_flat_atomic;
38+
39+
/*
40+
* Access mask for all accesses if non-zero.
41+
*/
42+
unsigned long access_mask;
3843
};
3944

4045
/**

init/init_task.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -167,6 +167,7 @@ struct task_struct init_task
167167
.atomic_next = 0,
168168
.atomic_nest_count = 0,
169169
.in_flat_atomic = false,
170+
.access_mask = 0,
170171
},
171172
#endif
172173
#ifdef CONFIG_TRACE_IRQFLAGS

kernel/kcsan/core.c

Lines changed: 39 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -39,6 +39,7 @@ static DEFINE_PER_CPU(struct kcsan_ctx, kcsan_cpu_ctx) = {
3939
.atomic_next = 0,
4040
.atomic_nest_count = 0,
4141
.in_flat_atomic = false,
42+
.access_mask = 0,
4243
};
4344

4445
/*
@@ -298,6 +299,15 @@ static noinline void kcsan_found_watchpoint(const volatile void *ptr,
298299

299300
if (!kcsan_is_enabled())
300301
return;
302+
303+
/*
304+
* The access_mask check relies on value-change comparison. To avoid
305+
* reporting a race where e.g. the writer set up the watchpoint, but the
306+
* reader has access_mask!=0, we have to ignore the found watchpoint.
307+
*/
308+
if (get_ctx()->access_mask != 0)
309+
return;
310+
301311
/*
302312
* Consume the watchpoint as soon as possible, to minimize the chances
303313
* of !consumed. Consuming the watchpoint must always be guarded by
@@ -341,6 +351,7 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type)
341351
u32 _4;
342352
u64 _8;
343353
} expect_value;
354+
unsigned long access_mask;
344355
enum kcsan_value_change value_change = KCSAN_VALUE_CHANGE_MAYBE;
345356
unsigned long ua_flags = user_access_save();
346357
unsigned long irq_flags;
@@ -435,18 +446,27 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type)
435446
* Re-read value, and check if it is as expected; if not, we infer a
436447
* racy access.
437448
*/
449+
access_mask = get_ctx()->access_mask;
438450
switch (size) {
439451
case 1:
440452
expect_value._1 ^= READ_ONCE(*(const u8 *)ptr);
453+
if (access_mask)
454+
expect_value._1 &= (u8)access_mask;
441455
break;
442456
case 2:
443457
expect_value._2 ^= READ_ONCE(*(const u16 *)ptr);
458+
if (access_mask)
459+
expect_value._2 &= (u16)access_mask;
444460
break;
445461
case 4:
446462
expect_value._4 ^= READ_ONCE(*(const u32 *)ptr);
463+
if (access_mask)
464+
expect_value._4 &= (u32)access_mask;
447465
break;
448466
case 8:
449467
expect_value._8 ^= READ_ONCE(*(const u64 *)ptr);
468+
if (access_mask)
469+
expect_value._8 &= (u64)access_mask;
450470
break;
451471
default:
452472
break; /* ignore; we do not diff the values */
@@ -460,11 +480,20 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type)
460480
if (!remove_watchpoint(watchpoint)) {
461481
/*
462482
* Depending on the access type, map a value_change of MAYBE to
463-
* TRUE (require reporting).
483+
* TRUE (always report) or FALSE (never report).
464484
*/
465-
if (value_change == KCSAN_VALUE_CHANGE_MAYBE && (size > 8 || is_assert)) {
466-
/* Always assume a value-change. */
467-
value_change = KCSAN_VALUE_CHANGE_TRUE;
485+
if (value_change == KCSAN_VALUE_CHANGE_MAYBE) {
486+
if (access_mask != 0) {
487+
/*
488+
* For access with access_mask, we require a
489+
* value-change, as it is likely that races on
490+
* ~access_mask bits are expected.
491+
*/
492+
value_change = KCSAN_VALUE_CHANGE_FALSE;
493+
} else if (size > 8 || is_assert) {
494+
/* Always assume a value-change. */
495+
value_change = KCSAN_VALUE_CHANGE_TRUE;
496+
}
468497
}
469498

470499
/*
@@ -622,6 +651,12 @@ void kcsan_atomic_next(int n)
622651
}
623652
EXPORT_SYMBOL(kcsan_atomic_next);
624653

654+
void kcsan_set_access_mask(unsigned long mask)
655+
{
656+
get_ctx()->access_mask = mask;
657+
}
658+
EXPORT_SYMBOL(kcsan_set_access_mask);
659+
625660
void __kcsan_check_access(const volatile void *ptr, size_t size, int type)
626661
{
627662
check_access(ptr, size, type);

kernel/kcsan/kcsan.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -98,6 +98,11 @@ enum kcsan_value_change {
9898
*/
9999
KCSAN_VALUE_CHANGE_MAYBE,
100100

101+
/*
102+
* Did not observe a value-change, and it is invalid to report the race.
103+
*/
104+
KCSAN_VALUE_CHANGE_FALSE,
105+
101106
/*
102107
* The value was observed to change, and the race should be reported.
103108
*/

kernel/kcsan/report.c

Lines changed: 12 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -132,6 +132,9 @@ static bool rate_limit_report(unsigned long frame1, unsigned long frame2)
132132
static bool
133133
skip_report(enum kcsan_value_change value_change, unsigned long top_frame)
134134
{
135+
/* Should never get here if value_change==FALSE. */
136+
WARN_ON_ONCE(value_change == KCSAN_VALUE_CHANGE_FALSE);
137+
135138
/*
136139
* The first call to skip_report always has value_change==TRUE, since we
137140
* cannot know the value written of an instrumented access. For the 2nd
@@ -493,7 +496,15 @@ void kcsan_report(const volatile void *ptr, size_t size, int access_type,
493496

494497
kcsan_disable_current();
495498
if (prepare_report(&flags, ptr, size, access_type, cpu_id, type)) {
496-
if (print_report(ptr, size, access_type, value_change, cpu_id, type) && panic_on_warn)
499+
/*
500+
* Never report if value_change is FALSE, only if we it is
501+
* either TRUE or MAYBE. In case of MAYBE, further filtering may
502+
* be done once we know the full stack trace in print_report().
503+
*/
504+
bool reported = value_change != KCSAN_VALUE_CHANGE_FALSE &&
505+
print_report(ptr, size, access_type, value_change, cpu_id, type);
506+
507+
if (reported && panic_on_warn)
497508
panic("panic_on_warn set ...\n");
498509

499510
release_report(&flags, type);

0 commit comments

Comments
 (0)