Skip to content

Commit d4a72e7

Browse files
Peter Zijlstrahansendc
authored andcommitted
x86/mm/pae: Get rid of set_64bit()
Recognise that set_64bit() is a special case of our previously introduced pxx_xchg64(), so use that and get rid of set_64bit(). Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Link: https://lkml.kernel.org/r/20221022114425.233481884%40infradead.org
1 parent 9ee850a commit d4a72e7

File tree

2 files changed

+12
-39
lines changed

2 files changed

+12
-39
lines changed

arch/x86/include/asm/cmpxchg_32.h

Lines changed: 0 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -7,34 +7,6 @@
77
* you need to test for the feature in boot_cpu_data.
88
*/
99

10-
/*
11-
* CMPXCHG8B only writes to the target if we had the previous
12-
* value in registers, otherwise it acts as a read and gives us the
13-
* "new previous" value. That is why there is a loop. Preloading
14-
* EDX:EAX is a performance optimization: in the common case it means
15-
* we need only one locked operation.
16-
*
17-
* A SIMD/3DNOW!/MMX/FPU 64-bit store here would require at the very
18-
* least an FPU save and/or %cr0.ts manipulation.
19-
*
20-
* cmpxchg8b must be used with the lock prefix here to allow the
21-
* instruction to be executed atomically. We need to have the reader
22-
* side to see the coherent 64bit value.
23-
*/
24-
static inline void set_64bit(volatile u64 *ptr, u64 value)
25-
{
26-
u32 low = value;
27-
u32 high = value >> 32;
28-
u64 prev = *ptr;
29-
30-
asm volatile("\n1:\t"
31-
LOCK_PREFIX "cmpxchg8b %0\n\t"
32-
"jnz 1b"
33-
: "=m" (*ptr), "+A" (prev)
34-
: "b" (low), "c" (high)
35-
: "memory");
36-
}
37-
3810
#ifdef CONFIG_X86_CMPXCHG64
3911
#define arch_cmpxchg64(ptr, o, n) \
4012
((__typeof__(*(ptr)))__cmpxchg64((ptr), (unsigned long long)(o), \

arch/x86/include/asm/pgtable-3level.h

Lines changed: 12 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,15 @@
1919
pr_err("%s:%d: bad pgd %p(%016Lx)\n", \
2020
__FILE__, __LINE__, &(e), pgd_val(e))
2121

22-
/* Rules for using set_pte: the pte being assigned *must* be
22+
#define pxx_xchg64(_pxx, _ptr, _val) ({ \
23+
_pxx##val_t *_p = (_pxx##val_t *)_ptr; \
24+
_pxx##val_t _o = *_p; \
25+
do { } while (!try_cmpxchg64(_p, &_o, (_val))); \
26+
native_make_##_pxx(_o); \
27+
})
28+
29+
/*
30+
* Rules for using set_pte: the pte being assigned *must* be
2331
* either not present or in a state where the hardware will
2432
* not attempt to update the pte. In places where this is
2533
* not possible, use pte_get_and_clear to obtain the old pte
@@ -34,20 +42,20 @@ static inline void native_set_pte(pte_t *ptep, pte_t pte)
3442

3543
static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
3644
{
37-
set_64bit((unsigned long long *)(ptep), native_pte_val(pte));
45+
pxx_xchg64(pte, ptep, native_pte_val(pte));
3846
}
3947

4048
static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
4149
{
42-
set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
50+
pxx_xchg64(pmd, pmdp, native_pmd_val(pmd));
4351
}
4452

4553
static inline void native_set_pud(pud_t *pudp, pud_t pud)
4654
{
4755
#ifdef CONFIG_PAGE_TABLE_ISOLATION
4856
pud.p4d.pgd = pti_set_user_pgtbl(&pudp->p4d.pgd, pud.p4d.pgd);
4957
#endif
50-
set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
58+
pxx_xchg64(pud, pudp, native_pud_val(pud));
5159
}
5260

5361
/*
@@ -91,13 +99,6 @@ static inline void pud_clear(pud_t *pudp)
9199
}
92100

93101

94-
#define pxx_xchg64(_pxx, _ptr, _val) ({ \
95-
_pxx##val_t *_p = (_pxx##val_t *)_ptr; \
96-
_pxx##val_t _o = *_p; \
97-
do { } while (!try_cmpxchg64(_p, &_o, (_val))); \
98-
native_make_##_pxx(_o); \
99-
})
100-
101102
#ifdef CONFIG_SMP
102103
static inline pte_t native_ptep_get_and_clear(pte_t *ptep)
103104
{

0 commit comments

Comments
 (0)