Skip to content

Commit c9c2606

Browse files
iii-iVasily Gorbik
authored andcommitted
s390/preempt: mark all functions __always_inline
preempt_count-related functions are quite ubiquitous and may be called by noinstr ones, introducing unwanted instrumentation. Here is one example call chain: irqentry_nmi_enter() # noinstr lockdep_hardirqs_enabled() this_cpu_read() __pcpu_size_call_return() this_cpu_read_*() this_cpu_generic_read() __this_cpu_generic_read_nopreempt() preempt_disable_notrace() __preempt_count_inc() __preempt_count_add() They are very small, so there are no significant downsides to force-inlining them. Signed-off-by: Ilya Leoshkevich <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Heiko Carstens <[email protected]> Signed-off-by: Vasily Gorbik <[email protected]>
1 parent 01cac82 commit c9c2606

File tree

1 file changed

+18
-18
lines changed

1 file changed

+18
-18
lines changed

arch/s390/include/asm/preempt.h

Lines changed: 18 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -12,12 +12,12 @@
1212
#define PREEMPT_NEED_RESCHED 0x80000000
1313
#define PREEMPT_ENABLED (0 + PREEMPT_NEED_RESCHED)
1414

15-
static inline int preempt_count(void)
15+
static __always_inline int preempt_count(void)
1616
{
1717
return READ_ONCE(S390_lowcore.preempt_count) & ~PREEMPT_NEED_RESCHED;
1818
}
1919

20-
static inline void preempt_count_set(int pc)
20+
static __always_inline void preempt_count_set(int pc)
2121
{
2222
int old, new;
2323

@@ -29,22 +29,22 @@ static inline void preempt_count_set(int pc)
2929
old, new) != old);
3030
}
3131

32-
static inline void set_preempt_need_resched(void)
32+
static __always_inline void set_preempt_need_resched(void)
3333
{
3434
__atomic_and(~PREEMPT_NEED_RESCHED, &S390_lowcore.preempt_count);
3535
}
3636

37-
static inline void clear_preempt_need_resched(void)
37+
static __always_inline void clear_preempt_need_resched(void)
3838
{
3939
__atomic_or(PREEMPT_NEED_RESCHED, &S390_lowcore.preempt_count);
4040
}
4141

42-
static inline bool test_preempt_need_resched(void)
42+
static __always_inline bool test_preempt_need_resched(void)
4343
{
4444
return !(READ_ONCE(S390_lowcore.preempt_count) & PREEMPT_NEED_RESCHED);
4545
}
4646

47-
static inline void __preempt_count_add(int val)
47+
static __always_inline void __preempt_count_add(int val)
4848
{
4949
/*
5050
* With some obscure config options and CONFIG_PROFILE_ALL_BRANCHES
@@ -59,17 +59,17 @@ static inline void __preempt_count_add(int val)
5959
__atomic_add(val, &S390_lowcore.preempt_count);
6060
}
6161

62-
static inline void __preempt_count_sub(int val)
62+
static __always_inline void __preempt_count_sub(int val)
6363
{
6464
__preempt_count_add(-val);
6565
}
6666

67-
static inline bool __preempt_count_dec_and_test(void)
67+
static __always_inline bool __preempt_count_dec_and_test(void)
6868
{
6969
return __atomic_add(-1, &S390_lowcore.preempt_count) == 1;
7070
}
7171

72-
static inline bool should_resched(int preempt_offset)
72+
static __always_inline bool should_resched(int preempt_offset)
7373
{
7474
return unlikely(READ_ONCE(S390_lowcore.preempt_count) ==
7575
preempt_offset);
@@ -79,45 +79,45 @@ static inline bool should_resched(int preempt_offset)
7979

8080
#define PREEMPT_ENABLED (0)
8181

82-
static inline int preempt_count(void)
82+
static __always_inline int preempt_count(void)
8383
{
8484
return READ_ONCE(S390_lowcore.preempt_count);
8585
}
8686

87-
static inline void preempt_count_set(int pc)
87+
static __always_inline void preempt_count_set(int pc)
8888
{
8989
S390_lowcore.preempt_count = pc;
9090
}
9191

92-
static inline void set_preempt_need_resched(void)
92+
static __always_inline void set_preempt_need_resched(void)
9393
{
9494
}
9595

96-
static inline void clear_preempt_need_resched(void)
96+
static __always_inline void clear_preempt_need_resched(void)
9797
{
9898
}
9999

100-
static inline bool test_preempt_need_resched(void)
100+
static __always_inline bool test_preempt_need_resched(void)
101101
{
102102
return false;
103103
}
104104

105-
static inline void __preempt_count_add(int val)
105+
static __always_inline void __preempt_count_add(int val)
106106
{
107107
S390_lowcore.preempt_count += val;
108108
}
109109

110-
static inline void __preempt_count_sub(int val)
110+
static __always_inline void __preempt_count_sub(int val)
111111
{
112112
S390_lowcore.preempt_count -= val;
113113
}
114114

115-
static inline bool __preempt_count_dec_and_test(void)
115+
static __always_inline bool __preempt_count_dec_and_test(void)
116116
{
117117
return !--S390_lowcore.preempt_count && tif_need_resched();
118118
}
119119

120-
static inline bool should_resched(int preempt_offset)
120+
static __always_inline bool should_resched(int preempt_offset)
121121
{
122122
return unlikely(preempt_count() == preempt_offset &&
123123
tif_need_resched());

0 commit comments

Comments
 (0)