Skip to content

Commit b465254

Browse files
hcahcaVasily Gorbik
authored andcommitted
s390/spinlock: Implement SPINLOCK_LOCKVAL with inline assembly
Implement SPINLOCK_LOCKVAL with an inline assembly, which makes use of the ALTERNATIVE macro, to read spinlock_lockval from lowcore. Provide an alternative instruction with a different offset in case lowcore is relocated. This replaces sequences of two instructions with one instruction. Before: 10602a: a7 78 00 00 lhi %r7,0 10602e: a5 8e 00 00 llilh %r8,0 106032: 58 d0 83 ac l %r13,940(%r8) 106036: ba 7d b5 80 cs %r7,%r13,1408(%r11) After: 10602a: a7 88 00 00 lhi %r8,0 10602e: e3 70 03 ac 00 58 ly %r7,940 106034: ba 87 b5 80 cs %r8,%r7,1408(%r11) Kernel image size change: add/remove: 756/750 grow/shrink: 646/3435 up/down: 30778/-46326 (-15548) Acked-by: Vasily Gorbik <[email protected]> Signed-off-by: Heiko Carstens <[email protected]> Signed-off-by: Vasily Gorbik <[email protected]>
1 parent 4797e9b commit b465254

File tree

2 files changed

+21
-5
lines changed

2 files changed

+21
-5
lines changed

arch/s390/include/asm/spinlock.h

Lines changed: 18 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,23 @@
1616
#include <asm/processor.h>
1717
#include <asm/alternative.h>
1818

19-
#define SPINLOCK_LOCKVAL (get_lowcore()->spinlock_lockval)
19+
static __always_inline unsigned int spinlock_lockval(void)
20+
{
21+
unsigned long lc_lockval;
22+
unsigned int lockval;
23+
24+
BUILD_BUG_ON(sizeof_field(struct lowcore, spinlock_lockval) != sizeof(lockval));
25+
lc_lockval = offsetof(struct lowcore, spinlock_lockval);
26+
asm_inline(
27+
ALTERNATIVE(" ly %[lockval],%[offzero](%%r0)\n",
28+
" ly %[lockval],%[offalt](%%r0)\n",
29+
ALT_FEATURE(MFEATURE_LOWCORE))
30+
: [lockval] "=d" (lockval)
31+
: [offzero] "i" (lc_lockval),
32+
[offalt] "i" (lc_lockval + LOWCORE_ALT_ADDRESS),
33+
"m" (((struct lowcore *)0)->spinlock_lockval));
34+
return lockval;
35+
}
2036

2137
extern int spin_retry;
2238

@@ -60,7 +76,7 @@ static inline int arch_spin_trylock_once(arch_spinlock_t *lp)
6076
int old = 0;
6177

6278
barrier();
63-
return likely(arch_try_cmpxchg(&lp->lock, &old, SPINLOCK_LOCKVAL));
79+
return likely(arch_try_cmpxchg(&lp->lock, &old, spinlock_lockval()));
6480
}
6581

6682
static inline void arch_spin_lock(arch_spinlock_t *lp)

arch/s390/lib/spinlock.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -160,7 +160,7 @@ static inline void arch_spin_lock_queued(arch_spinlock_t *lp)
160160

161161
ix = get_lowcore()->spinlock_index++;
162162
barrier();
163-
lockval = SPINLOCK_LOCKVAL; /* cpu + 1 */
163+
lockval = spinlock_lockval(); /* cpu + 1 */
164164
node = this_cpu_ptr(&spin_wait[ix]);
165165
node->prev = node->next = NULL;
166166
node_id = node->node_id;
@@ -251,7 +251,7 @@ static inline void arch_spin_lock_classic(arch_spinlock_t *lp)
251251
{
252252
int lockval, old, new, owner, count;
253253

254-
lockval = SPINLOCK_LOCKVAL; /* cpu + 1 */
254+
lockval = spinlock_lockval(); /* cpu + 1 */
255255

256256
/* Pass the virtual CPU to the lock holder if it is not running */
257257
owner = arch_spin_yield_target(READ_ONCE(lp->lock), NULL);
@@ -290,7 +290,7 @@ EXPORT_SYMBOL(arch_spin_lock_wait);
290290

291291
int arch_spin_trylock_retry(arch_spinlock_t *lp)
292292
{
293-
int cpu = SPINLOCK_LOCKVAL;
293+
int cpu = spinlock_lockval();
294294
int owner, count;
295295

296296
for (count = spin_retry; count > 0; count--) {

0 commit comments

Comments
 (0)