@@ -205,21 +205,21 @@ static __always_inline bool arch_atomic_try_cmpxchg(atomic_t *v, int *old, int n
205
205
}
206
206
#define arch_atomic_try_cmpxchg arch_atomic_try_cmpxchg
207
207
208
- static inline int arch_atomic_xchg (atomic_t * v , int new )
208
+ static __always_inline int arch_atomic_xchg (atomic_t * v , int new )
209
209
{
210
210
return arch_xchg (& v -> counter , new );
211
211
}
212
212
#define arch_atomic_xchg arch_atomic_xchg
213
213
214
- static inline void arch_atomic_and (int i , atomic_t * v )
214
+ static __always_inline void arch_atomic_and (int i , atomic_t * v )
215
215
{
216
216
asm volatile (LOCK_PREFIX "andl %1,%0"
217
217
: "+m" (v -> counter )
218
218
: "ir" (i )
219
219
: "memory" );
220
220
}
221
221
222
- static inline int arch_atomic_fetch_and (int i , atomic_t * v )
222
+ static __always_inline int arch_atomic_fetch_and (int i , atomic_t * v )
223
223
{
224
224
int val = arch_atomic_read (v );
225
225
@@ -229,15 +229,15 @@ static inline int arch_atomic_fetch_and(int i, atomic_t *v)
229
229
}
230
230
#define arch_atomic_fetch_and arch_atomic_fetch_and
231
231
232
- static inline void arch_atomic_or (int i , atomic_t * v )
232
+ static __always_inline void arch_atomic_or (int i , atomic_t * v )
233
233
{
234
234
asm volatile (LOCK_PREFIX "orl %1,%0"
235
235
: "+m" (v -> counter )
236
236
: "ir" (i )
237
237
: "memory" );
238
238
}
239
239
240
- static inline int arch_atomic_fetch_or (int i , atomic_t * v )
240
+ static __always_inline int arch_atomic_fetch_or (int i , atomic_t * v )
241
241
{
242
242
int val = arch_atomic_read (v );
243
243
@@ -247,15 +247,15 @@ static inline int arch_atomic_fetch_or(int i, atomic_t *v)
247
247
}
248
248
#define arch_atomic_fetch_or arch_atomic_fetch_or
249
249
250
- static inline void arch_atomic_xor (int i , atomic_t * v )
250
+ static __always_inline void arch_atomic_xor (int i , atomic_t * v )
251
251
{
252
252
asm volatile (LOCK_PREFIX "xorl %1,%0"
253
253
: "+m" (v -> counter )
254
254
: "ir" (i )
255
255
: "memory" );
256
256
}
257
257
258
- static inline int arch_atomic_fetch_xor (int i , atomic_t * v )
258
+ static __always_inline int arch_atomic_fetch_xor (int i , atomic_t * v )
259
259
{
260
260
int val = arch_atomic_read (v );
261
261
0 commit comments