Skip to content

Commit 247dbcd

Browse files
Matthew Wilcox (Oracle)akpm00
authored andcommitted
bitops: add xor_unlock_is_negative_byte()
Replace clear_bit_and_unlock_is_negative_byte() with xor_unlock_is_negative_byte(). We have a few places that like to lock a folio, set a flag and unlock it again. Allow for the possibility of combining the latter two operations for efficiency. We are guaranteed that the caller holds the lock, so it is safe to unlock it with the xor. The caller must guarantee that nobody else will set the flag without holding the lock; it is not safe to do this with the PG_dirty flag, for example. Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Matthew Wilcox (Oracle) <[email protected]> Cc: Albert Ou <[email protected]> Cc: Alexander Gordeev <[email protected]> Cc: Andreas Dilger <[email protected]> Cc: Christian Borntraeger <[email protected]> Cc: Christophe Leroy <[email protected]> Cc: Geert Uytterhoeven <[email protected]> Cc: Heiko Carstens <[email protected]> Cc: Ivan Kokshaysky <[email protected]> Cc: Matt Turner <[email protected]> Cc: Michael Ellerman <[email protected]> Cc: Nicholas Piggin <[email protected]> Cc: Palmer Dabbelt <[email protected]> Cc: Paul Walmsley <[email protected]> Cc: Richard Henderson <[email protected]> Cc: Sven Schnelle <[email protected]> Cc: "Theodore Ts'o" <[email protected]> Cc: Thomas Bogendoerfer <[email protected]> Cc: Vasily Gorbik <[email protected]> Signed-off-by: Andrew Morton <[email protected]>
1 parent 7a4847e commit 247dbcd

File tree

8 files changed

+47
-57
lines changed

8 files changed

+47
-57
lines changed

arch/powerpc/include/asm/bitops.h

Lines changed: 5 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -234,32 +234,25 @@ static inline int arch_test_and_change_bit(unsigned long nr,
234234
}
235235

236236
#ifdef CONFIG_PPC64
237-
static inline unsigned long
238-
clear_bit_unlock_return_word(int nr, volatile unsigned long *addr)
237+
static inline bool arch_xor_unlock_is_negative_byte(unsigned long mask,
238+
volatile unsigned long *p)
239239
{
240240
unsigned long old, t;
241-
unsigned long *p = (unsigned long *)addr + BIT_WORD(nr);
242-
unsigned long mask = BIT_MASK(nr);
243241

244242
__asm__ __volatile__ (
245243
PPC_RELEASE_BARRIER
246244
"1:" PPC_LLARX "%0,0,%3,0\n"
247-
"andc %1,%0,%2\n"
245+
"xor %1,%0,%2\n"
248246
PPC_STLCX "%1,0,%3\n"
249247
"bne- 1b\n"
250248
: "=&r" (old), "=&r" (t)
251249
: "r" (mask), "r" (p)
252250
: "cc", "memory");
253251

254-
return old;
252+
return (old & BIT_MASK(7)) != 0;
255253
}
256254

257-
/*
258-
* This is a special function for mm/filemap.c
259-
* Bit 7 corresponds to PG_waiters.
260-
*/
261-
#define arch_clear_bit_unlock_is_negative_byte(nr, addr) \
262-
(clear_bit_unlock_return_word(nr, addr) & BIT_MASK(7))
255+
#define arch_xor_unlock_is_negative_byte arch_xor_unlock_is_negative_byte
263256

264257
#endif /* CONFIG_PPC64 */
265258

arch/x86/include/asm/bitops.h

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -94,18 +94,17 @@ arch___clear_bit(unsigned long nr, volatile unsigned long *addr)
9494
asm volatile(__ASM_SIZE(btr) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
9595
}
9696

97-
static __always_inline bool
98-
arch_clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr)
97+
static __always_inline bool arch_xor_unlock_is_negative_byte(unsigned long mask,
98+
volatile unsigned long *addr)
9999
{
100100
bool negative;
101-
asm volatile(LOCK_PREFIX "andb %2,%1"
101+
asm volatile(LOCK_PREFIX "xorb %2,%1"
102102
CC_SET(s)
103103
: CC_OUT(s) (negative), WBYTE_ADDR(addr)
104-
: "ir" ((char) ~(1 << nr)) : "memory");
104+
: "iq" ((char)mask) : "memory");
105105
return negative;
106106
}
107-
#define arch_clear_bit_unlock_is_negative_byte \
108-
arch_clear_bit_unlock_is_negative_byte
107+
#define arch_xor_unlock_is_negative_byte arch_xor_unlock_is_negative_byte
109108

110109
static __always_inline void
111110
arch___clear_bit_unlock(long nr, volatile unsigned long *addr)

include/asm-generic/bitops/instrumented-lock.h

Lines changed: 15 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -58,27 +58,30 @@ static inline bool test_and_set_bit_lock(long nr, volatile unsigned long *addr)
5858
return arch_test_and_set_bit_lock(nr, addr);
5959
}
6060

61-
#if defined(arch_clear_bit_unlock_is_negative_byte)
61+
#if defined(arch_xor_unlock_is_negative_byte)
6262
/**
63-
* clear_bit_unlock_is_negative_byte - Clear a bit in memory and test if bottom
64-
* byte is negative, for unlock.
65-
* @nr: the bit to clear
66-
* @addr: the address to start counting from
63+
* xor_unlock_is_negative_byte - XOR a single byte in memory and test if
64+
* it is negative, for unlock.
65+
* @mask: Change the bits which are set in this mask.
66+
* @addr: The address of the word containing the byte to change.
6767
*
68+
* Changes some of bits 0-6 in the word pointed to by @addr.
6869
* This operation is atomic and provides release barrier semantics.
70+
* Used to optimise some folio operations which are commonly paired
71+
* with an unlock or end of writeback. Bit 7 is used as PG_waiters to
72+
* indicate whether anybody is waiting for the unlock.
6973
*
70-
* This is a bit of a one-trick-pony for the filemap code, which clears
71-
* PG_locked and tests PG_waiters,
74+
* Return: Whether the top bit of the byte is set.
7275
*/
73-
static inline bool
74-
clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr)
76+
static inline bool xor_unlock_is_negative_byte(unsigned long mask,
77+
volatile unsigned long *addr)
7578
{
7679
kcsan_release();
77-
instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
78-
return arch_clear_bit_unlock_is_negative_byte(nr, addr);
80+
instrument_atomic_write(addr, sizeof(long));
81+
return arch_xor_unlock_is_negative_byte(mask, addr);
7982
}
8083
/* Let everybody know we have it. */
81-
#define clear_bit_unlock_is_negative_byte clear_bit_unlock_is_negative_byte
84+
#define xor_unlock_is_negative_byte xor_unlock_is_negative_byte
8285
#endif
8386

8487
#endif /* _ASM_GENERIC_BITOPS_INSTRUMENTED_LOCK_H */

include/asm-generic/bitops/lock.h

Lines changed: 5 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -66,27 +66,16 @@ arch___clear_bit_unlock(unsigned int nr, volatile unsigned long *p)
6666
raw_atomic_long_set_release((atomic_long_t *)p, old);
6767
}
6868

69-
/**
70-
* arch_clear_bit_unlock_is_negative_byte - Clear a bit in memory and test if bottom
71-
* byte is negative, for unlock.
72-
* @nr: the bit to clear
73-
* @addr: the address to start counting from
74-
*
75-
* This is a bit of a one-trick-pony for the filemap code, which clears
76-
* PG_locked and tests PG_waiters,
77-
*/
78-
#ifndef arch_clear_bit_unlock_is_negative_byte
79-
static inline bool arch_clear_bit_unlock_is_negative_byte(unsigned int nr,
80-
volatile unsigned long *p)
69+
#ifndef arch_xor_unlock_is_negative_byte
70+
static inline bool arch_xor_unlock_is_negative_byte(unsigned long mask,
71+
volatile unsigned long *p)
8172
{
8273
long old;
83-
unsigned long mask = BIT_MASK(nr);
8474

85-
p += BIT_WORD(nr);
86-
old = raw_atomic_long_fetch_andnot_release(mask, (atomic_long_t *)p);
75+
old = raw_atomic_long_fetch_xor_release(mask, (atomic_long_t *)p);
8776
return !!(old & BIT(7));
8877
}
89-
#define arch_clear_bit_unlock_is_negative_byte arch_clear_bit_unlock_is_negative_byte
78+
#define arch_xor_unlock_is_negative_byte arch_xor_unlock_is_negative_byte
9079
#endif
9180

9281
#include <asm-generic/bitops/instrumented-lock.h>

kernel/kcsan/kcsan_test.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -700,10 +700,10 @@ static void test_barrier_nothreads(struct kunit *test)
700700
KCSAN_EXPECT_RW_BARRIER(mutex_lock(&test_mutex), false);
701701
KCSAN_EXPECT_RW_BARRIER(mutex_unlock(&test_mutex), true);
702702

703-
#ifdef clear_bit_unlock_is_negative_byte
704-
KCSAN_EXPECT_READ_BARRIER(clear_bit_unlock_is_negative_byte(0, &test_var), true);
705-
KCSAN_EXPECT_WRITE_BARRIER(clear_bit_unlock_is_negative_byte(0, &test_var), true);
706-
KCSAN_EXPECT_RW_BARRIER(clear_bit_unlock_is_negative_byte(0, &test_var), true);
703+
#ifdef xor_unlock_is_negative_byte
704+
KCSAN_EXPECT_READ_BARRIER(xor_unlock_is_negative_byte(1, &test_var), true);
705+
KCSAN_EXPECT_WRITE_BARRIER(xor_unlock_is_negative_byte(1, &test_var), true);
706+
KCSAN_EXPECT_RW_BARRIER(xor_unlock_is_negative_byte(1, &test_var), true);
707707
#endif
708708
kcsan_nestable_atomic_end();
709709
}

kernel/kcsan/selftest.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -228,10 +228,10 @@ static bool __init test_barrier(void)
228228
spin_lock(&test_spinlock);
229229
KCSAN_CHECK_RW_BARRIER(spin_unlock(&test_spinlock));
230230

231-
#ifdef clear_bit_unlock_is_negative_byte
232-
KCSAN_CHECK_RW_BARRIER(clear_bit_unlock_is_negative_byte(0, &test_var));
233-
KCSAN_CHECK_READ_BARRIER(clear_bit_unlock_is_negative_byte(0, &test_var));
234-
KCSAN_CHECK_WRITE_BARRIER(clear_bit_unlock_is_negative_byte(0, &test_var));
231+
#ifdef xor_unlock_is_negative_byte
232+
KCSAN_CHECK_RW_BARRIER(xor_unlock_is_negative_byte(1, &test_var));
233+
KCSAN_CHECK_READ_BARRIER(xor_unlock_is_negative_byte(1, &test_var));
234+
KCSAN_CHECK_WRITE_BARRIER(xor_unlock_is_negative_byte(1, &test_var));
235235
#endif
236236
kcsan_nestable_atomic_end();
237237

mm/filemap.c

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1482,6 +1482,11 @@ void folio_add_wait_queue(struct folio *folio, wait_queue_entry_t *waiter)
14821482
}
14831483
EXPORT_SYMBOL_GPL(folio_add_wait_queue);
14841484

1485+
#ifdef xor_unlock_is_negative_byte
1486+
#define clear_bit_unlock_is_negative_byte(nr, p) \
1487+
xor_unlock_is_negative_byte(1 << nr, p)
1488+
#endif
1489+
14851490
#ifndef clear_bit_unlock_is_negative_byte
14861491

14871492
/*

mm/kasan/kasan_test.c

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1099,9 +1099,10 @@ static void kasan_bitops_test_and_modify(struct kunit *test, int nr, void *addr)
10991099
KUNIT_EXPECT_KASAN_FAIL(test, __test_and_change_bit(nr, addr));
11001100
KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = test_bit(nr, addr));
11011101

1102-
#if defined(clear_bit_unlock_is_negative_byte)
1103-
KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result =
1104-
clear_bit_unlock_is_negative_byte(nr, addr));
1102+
#if defined(xor_unlock_is_negative_byte)
1103+
if (nr < 7)
1104+
KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result =
1105+
xor_unlock_is_negative_byte(1 << nr, addr));
11051106
#endif
11061107
}
11071108

0 commit comments

Comments
 (0)