Skip to content

Commit 5009447

Browse files
committed
Merge tag 's390-6.9-3' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
Pull s390 fixes from Alexander Gordeev: - Fix missing NULL pointer check when determining guest/host fault - Mark all functions in asm/atomic_ops.h, asm/atomic.h and asm/preempt.h as __always_inline to avoid unwanted instrumentation - Fix removal of a Processor Activity Instrumentation (PAI) sampling event in PMU device driver - Align system call table on 8 bytes * tag 's390-6.9-3' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: s390/entry: align system call table on 8 bytes s390/pai: fix sampling event removal for PMU device driver s390/preempt: mark all functions __always_inline s390/atomic: mark all functions __always_inline s390/mm: fix NULL pointer dereference
2 parents 2f9fd9e + 378ca2d commit 5009447

File tree

7 files changed

+67
-58
lines changed

7 files changed

+67
-58
lines changed

arch/s390/include/asm/atomic.h

Lines changed: 22 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -15,31 +15,31 @@
1515
#include <asm/barrier.h>
1616
#include <asm/cmpxchg.h>
1717

18-
static inline int arch_atomic_read(const atomic_t *v)
18+
static __always_inline int arch_atomic_read(const atomic_t *v)
1919
{
2020
return __atomic_read(v);
2121
}
2222
#define arch_atomic_read arch_atomic_read
2323

24-
static inline void arch_atomic_set(atomic_t *v, int i)
24+
static __always_inline void arch_atomic_set(atomic_t *v, int i)
2525
{
2626
__atomic_set(v, i);
2727
}
2828
#define arch_atomic_set arch_atomic_set
2929

30-
static inline int arch_atomic_add_return(int i, atomic_t *v)
30+
static __always_inline int arch_atomic_add_return(int i, atomic_t *v)
3131
{
3232
return __atomic_add_barrier(i, &v->counter) + i;
3333
}
3434
#define arch_atomic_add_return arch_atomic_add_return
3535

36-
static inline int arch_atomic_fetch_add(int i, atomic_t *v)
36+
static __always_inline int arch_atomic_fetch_add(int i, atomic_t *v)
3737
{
3838
return __atomic_add_barrier(i, &v->counter);
3939
}
4040
#define arch_atomic_fetch_add arch_atomic_fetch_add
4141

42-
static inline void arch_atomic_add(int i, atomic_t *v)
42+
static __always_inline void arch_atomic_add(int i, atomic_t *v)
4343
{
4444
__atomic_add(i, &v->counter);
4545
}
@@ -50,11 +50,11 @@ static inline void arch_atomic_add(int i, atomic_t *v)
5050
#define arch_atomic_fetch_sub(_i, _v) arch_atomic_fetch_add(-(int)(_i), _v)
5151

5252
#define ATOMIC_OPS(op) \
53-
static inline void arch_atomic_##op(int i, atomic_t *v) \
53+
static __always_inline void arch_atomic_##op(int i, atomic_t *v) \
5454
{ \
5555
__atomic_##op(i, &v->counter); \
5656
} \
57-
static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
57+
static __always_inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
5858
{ \
5959
return __atomic_##op##_barrier(i, &v->counter); \
6060
}
@@ -74,60 +74,60 @@ ATOMIC_OPS(xor)
7474

7575
#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new))
7676

77-
static inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
77+
static __always_inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
7878
{
7979
return __atomic_cmpxchg(&v->counter, old, new);
8080
}
8181
#define arch_atomic_cmpxchg arch_atomic_cmpxchg
8282

8383
#define ATOMIC64_INIT(i) { (i) }
8484

85-
static inline s64 arch_atomic64_read(const atomic64_t *v)
85+
static __always_inline s64 arch_atomic64_read(const atomic64_t *v)
8686
{
8787
return __atomic64_read(v);
8888
}
8989
#define arch_atomic64_read arch_atomic64_read
9090

91-
static inline void arch_atomic64_set(atomic64_t *v, s64 i)
91+
static __always_inline void arch_atomic64_set(atomic64_t *v, s64 i)
9292
{
9393
__atomic64_set(v, i);
9494
}
9595
#define arch_atomic64_set arch_atomic64_set
9696

97-
static inline s64 arch_atomic64_add_return(s64 i, atomic64_t *v)
97+
static __always_inline s64 arch_atomic64_add_return(s64 i, atomic64_t *v)
9898
{
9999
return __atomic64_add_barrier(i, (long *)&v->counter) + i;
100100
}
101101
#define arch_atomic64_add_return arch_atomic64_add_return
102102

103-
static inline s64 arch_atomic64_fetch_add(s64 i, atomic64_t *v)
103+
static __always_inline s64 arch_atomic64_fetch_add(s64 i, atomic64_t *v)
104104
{
105105
return __atomic64_add_barrier(i, (long *)&v->counter);
106106
}
107107
#define arch_atomic64_fetch_add arch_atomic64_fetch_add
108108

109-
static inline void arch_atomic64_add(s64 i, atomic64_t *v)
109+
static __always_inline void arch_atomic64_add(s64 i, atomic64_t *v)
110110
{
111111
__atomic64_add(i, (long *)&v->counter);
112112
}
113113
#define arch_atomic64_add arch_atomic64_add
114114

115115
#define arch_atomic64_xchg(v, new) (arch_xchg(&((v)->counter), new))
116116

117-
static inline s64 arch_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
117+
static __always_inline s64 arch_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
118118
{
119119
return __atomic64_cmpxchg((long *)&v->counter, old, new);
120120
}
121121
#define arch_atomic64_cmpxchg arch_atomic64_cmpxchg
122122

123-
#define ATOMIC64_OPS(op) \
124-
static inline void arch_atomic64_##op(s64 i, atomic64_t *v) \
125-
{ \
126-
__atomic64_##op(i, (long *)&v->counter); \
127-
} \
128-
static inline long arch_atomic64_fetch_##op(s64 i, atomic64_t *v) \
129-
{ \
130-
return __atomic64_##op##_barrier(i, (long *)&v->counter); \
123+
#define ATOMIC64_OPS(op) \
124+
static __always_inline void arch_atomic64_##op(s64 i, atomic64_t *v) \
125+
{ \
126+
__atomic64_##op(i, (long *)&v->counter); \
127+
} \
128+
static __always_inline long arch_atomic64_fetch_##op(s64 i, atomic64_t *v) \
129+
{ \
130+
return __atomic64_##op##_barrier(i, (long *)&v->counter); \
131131
}
132132

133133
ATOMIC64_OPS(and)

arch/s390/include/asm/atomic_ops.h

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88
#ifndef __ARCH_S390_ATOMIC_OPS__
99
#define __ARCH_S390_ATOMIC_OPS__
1010

11-
static inline int __atomic_read(const atomic_t *v)
11+
static __always_inline int __atomic_read(const atomic_t *v)
1212
{
1313
int c;
1414

@@ -18,14 +18,14 @@ static inline int __atomic_read(const atomic_t *v)
1818
return c;
1919
}
2020

21-
static inline void __atomic_set(atomic_t *v, int i)
21+
static __always_inline void __atomic_set(atomic_t *v, int i)
2222
{
2323
asm volatile(
2424
" st %1,%0\n"
2525
: "=R" (v->counter) : "d" (i));
2626
}
2727

28-
static inline s64 __atomic64_read(const atomic64_t *v)
28+
static __always_inline s64 __atomic64_read(const atomic64_t *v)
2929
{
3030
s64 c;
3131

@@ -35,7 +35,7 @@ static inline s64 __atomic64_read(const atomic64_t *v)
3535
return c;
3636
}
3737

38-
static inline void __atomic64_set(atomic64_t *v, s64 i)
38+
static __always_inline void __atomic64_set(atomic64_t *v, s64 i)
3939
{
4040
asm volatile(
4141
" stg %1,%0\n"
@@ -45,7 +45,7 @@ static inline void __atomic64_set(atomic64_t *v, s64 i)
4545
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
4646

4747
#define __ATOMIC_OP(op_name, op_type, op_string, op_barrier) \
48-
static inline op_type op_name(op_type val, op_type *ptr) \
48+
static __always_inline op_type op_name(op_type val, op_type *ptr) \
4949
{ \
5050
op_type old; \
5151
\
@@ -96,7 +96,7 @@ __ATOMIC_CONST_OPS(__atomic64_add_const, long, "agsi")
9696
#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
9797

9898
#define __ATOMIC_OP(op_name, op_string) \
99-
static inline int op_name(int val, int *ptr) \
99+
static __always_inline int op_name(int val, int *ptr) \
100100
{ \
101101
int old, new; \
102102
\
@@ -122,7 +122,7 @@ __ATOMIC_OPS(__atomic_xor, "xr")
122122
#undef __ATOMIC_OPS
123123

124124
#define __ATOMIC64_OP(op_name, op_string) \
125-
static inline long op_name(long val, long *ptr) \
125+
static __always_inline long op_name(long val, long *ptr) \
126126
{ \
127127
long old, new; \
128128
\
@@ -154,7 +154,7 @@ __ATOMIC64_OPS(__atomic64_xor, "xgr")
154154

155155
#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
156156

157-
static inline int __atomic_cmpxchg(int *ptr, int old, int new)
157+
static __always_inline int __atomic_cmpxchg(int *ptr, int old, int new)
158158
{
159159
asm volatile(
160160
" cs %[old],%[new],%[ptr]"
@@ -164,7 +164,7 @@ static inline int __atomic_cmpxchg(int *ptr, int old, int new)
164164
return old;
165165
}
166166

167-
static inline bool __atomic_cmpxchg_bool(int *ptr, int old, int new)
167+
static __always_inline bool __atomic_cmpxchg_bool(int *ptr, int old, int new)
168168
{
169169
int old_expected = old;
170170

@@ -176,7 +176,7 @@ static inline bool __atomic_cmpxchg_bool(int *ptr, int old, int new)
176176
return old == old_expected;
177177
}
178178

179-
static inline long __atomic64_cmpxchg(long *ptr, long old, long new)
179+
static __always_inline long __atomic64_cmpxchg(long *ptr, long old, long new)
180180
{
181181
asm volatile(
182182
" csg %[old],%[new],%[ptr]"
@@ -186,7 +186,7 @@ static inline long __atomic64_cmpxchg(long *ptr, long old, long new)
186186
return old;
187187
}
188188

189-
static inline bool __atomic64_cmpxchg_bool(long *ptr, long old, long new)
189+
static __always_inline bool __atomic64_cmpxchg_bool(long *ptr, long old, long new)
190190
{
191191
long old_expected = old;
192192

arch/s390/include/asm/preempt.h

Lines changed: 18 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -12,12 +12,12 @@
1212
#define PREEMPT_NEED_RESCHED 0x80000000
1313
#define PREEMPT_ENABLED (0 + PREEMPT_NEED_RESCHED)
1414

15-
static inline int preempt_count(void)
15+
static __always_inline int preempt_count(void)
1616
{
1717
return READ_ONCE(S390_lowcore.preempt_count) & ~PREEMPT_NEED_RESCHED;
1818
}
1919

20-
static inline void preempt_count_set(int pc)
20+
static __always_inline void preempt_count_set(int pc)
2121
{
2222
int old, new;
2323

@@ -29,22 +29,22 @@ static inline void preempt_count_set(int pc)
2929
old, new) != old);
3030
}
3131

32-
static inline void set_preempt_need_resched(void)
32+
static __always_inline void set_preempt_need_resched(void)
3333
{
3434
__atomic_and(~PREEMPT_NEED_RESCHED, &S390_lowcore.preempt_count);
3535
}
3636

37-
static inline void clear_preempt_need_resched(void)
37+
static __always_inline void clear_preempt_need_resched(void)
3838
{
3939
__atomic_or(PREEMPT_NEED_RESCHED, &S390_lowcore.preempt_count);
4040
}
4141

42-
static inline bool test_preempt_need_resched(void)
42+
static __always_inline bool test_preempt_need_resched(void)
4343
{
4444
return !(READ_ONCE(S390_lowcore.preempt_count) & PREEMPT_NEED_RESCHED);
4545
}
4646

47-
static inline void __preempt_count_add(int val)
47+
static __always_inline void __preempt_count_add(int val)
4848
{
4949
/*
5050
* With some obscure config options and CONFIG_PROFILE_ALL_BRANCHES
@@ -59,17 +59,17 @@ static inline void __preempt_count_add(int val)
5959
__atomic_add(val, &S390_lowcore.preempt_count);
6060
}
6161

62-
static inline void __preempt_count_sub(int val)
62+
static __always_inline void __preempt_count_sub(int val)
6363
{
6464
__preempt_count_add(-val);
6565
}
6666

67-
static inline bool __preempt_count_dec_and_test(void)
67+
static __always_inline bool __preempt_count_dec_and_test(void)
6868
{
6969
return __atomic_add(-1, &S390_lowcore.preempt_count) == 1;
7070
}
7171

72-
static inline bool should_resched(int preempt_offset)
72+
static __always_inline bool should_resched(int preempt_offset)
7373
{
7474
return unlikely(READ_ONCE(S390_lowcore.preempt_count) ==
7575
preempt_offset);
@@ -79,45 +79,45 @@ static inline bool should_resched(int preempt_offset)
7979

8080
#define PREEMPT_ENABLED (0)
8181

82-
static inline int preempt_count(void)
82+
static __always_inline int preempt_count(void)
8383
{
8484
return READ_ONCE(S390_lowcore.preempt_count);
8585
}
8686

87-
static inline void preempt_count_set(int pc)
87+
static __always_inline void preempt_count_set(int pc)
8888
{
8989
S390_lowcore.preempt_count = pc;
9090
}
9191

92-
static inline void set_preempt_need_resched(void)
92+
static __always_inline void set_preempt_need_resched(void)
9393
{
9494
}
9595

96-
static inline void clear_preempt_need_resched(void)
96+
static __always_inline void clear_preempt_need_resched(void)
9797
{
9898
}
9999

100-
static inline bool test_preempt_need_resched(void)
100+
static __always_inline bool test_preempt_need_resched(void)
101101
{
102102
return false;
103103
}
104104

105-
static inline void __preempt_count_add(int val)
105+
static __always_inline void __preempt_count_add(int val)
106106
{
107107
S390_lowcore.preempt_count += val;
108108
}
109109

110-
static inline void __preempt_count_sub(int val)
110+
static __always_inline void __preempt_count_sub(int val)
111111
{
112112
S390_lowcore.preempt_count -= val;
113113
}
114114

115-
static inline bool __preempt_count_dec_and_test(void)
115+
static __always_inline bool __preempt_count_dec_and_test(void)
116116
{
117117
return !--S390_lowcore.preempt_count && tif_need_resched();
118118
}
119119

120-
static inline bool should_resched(int preempt_offset)
120+
static __always_inline bool should_resched(int preempt_offset)
121121
{
122122
return unlikely(preempt_count() == preempt_offset &&
123123
tif_need_resched());

arch/s390/kernel/entry.S

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -635,6 +635,7 @@ SYM_DATA_START_LOCAL(daton_psw)
635635
SYM_DATA_END(daton_psw)
636636

637637
.section .rodata, "a"
638+
.balign 8
638639
#define SYSCALL(esame,emu) .quad __s390x_ ## esame
639640
SYM_DATA_START(sys_call_table)
640641
#include "asm/syscall_table.h"

arch/s390/kernel/perf_pai_crypto.c

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -90,7 +90,6 @@ static void paicrypt_event_destroy(struct perf_event *event)
9090
event->cpu);
9191
struct paicrypt_map *cpump = mp->mapptr;
9292

93-
cpump->event = NULL;
9493
static_branch_dec(&pai_key);
9594
mutex_lock(&pai_reserve_mutex);
9695
debug_sprintf_event(cfm_dbg, 5, "%s event %#llx cpu %d users %d"
@@ -356,10 +355,15 @@ static int paicrypt_add(struct perf_event *event, int flags)
356355

357356
static void paicrypt_stop(struct perf_event *event, int flags)
358357
{
359-
if (!event->attr.sample_period) /* Counting */
358+
struct paicrypt_mapptr *mp = this_cpu_ptr(paicrypt_root.mapptr);
359+
struct paicrypt_map *cpump = mp->mapptr;
360+
361+
if (!event->attr.sample_period) { /* Counting */
360362
paicrypt_read(event);
361-
else /* Sampling */
363+
} else { /* Sampling */
362364
perf_sched_cb_dec(event->pmu);
365+
cpump->event = NULL;
366+
}
363367
event->hw.state = PERF_HES_STOPPED;
364368
}
365369

0 commit comments

Comments
 (0)