Skip to content

Commit 4f292c4

Browse files
committed
Merge tag 'x86_mm_for_6.2_v2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 mm updates from Dave Hansen: "New Feature: - Randomize the per-cpu entry areas Cleanups: - Have CR3_ADDR_MASK use PHYSICAL_PAGE_MASK instead of open coding it - Move to "native" set_memory_rox() helper - Clean up pmd_get_atomic() and i386-PAE - Remove some unused page table size macros" * tag 'x86_mm_for_6.2_v2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (35 commits) x86/mm: Ensure forced page table splitting x86/kasan: Populate shadow for shared chunk of the CPU entry area x86/kasan: Add helpers to align shadow addresses up and down x86/kasan: Rename local CPU_ENTRY_AREA variables to shorten names x86/mm: Populate KASAN shadow for entire per-CPU range of CPU entry area x86/mm: Recompute physical address for every page of per-CPU CEA mapping x86/mm: Rename __change_page_attr_set_clr(.checkalias) x86/mm: Inhibit _PAGE_NX changes from cpa_process_alias() x86/mm: Untangle __change_page_attr_set_clr(.checkalias) x86/mm: Add a few comments x86/mm: Fix CR3_ADDR_MASK x86/mm: Remove P*D_PAGE_MASK and P*D_PAGE_SIZE macros mm: Convert __HAVE_ARCH_P..P_GET to the new style mm: Remove pointless barrier() after pmdp_get_lockless() x86/mm/pae: Get rid of set_64bit() x86_64: Remove pointless set_64bit() usage x86/mm/pae: Be consistent with pXXp_get_and_clear() x86/mm/pae: Use WRITE_ONCE() x86/mm/pae: Don't (ab)use atomic64 mm/gup: Fix the lockless PMD access ...
2 parents 03d84bd + 3e844d8 commit 4f292c4

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

55 files changed

+356
-395
lines changed

arch/arm/mach-omap1/sram-init.c

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -10,11 +10,11 @@
1010
#include <linux/kernel.h>
1111
#include <linux/init.h>
1212
#include <linux/io.h>
13+
#include <linux/set_memory.h>
1314

1415
#include <asm/fncpy.h>
1516
#include <asm/tlb.h>
1617
#include <asm/cacheflush.h>
17-
#include <asm/set_memory.h>
1818

1919
#include <asm/mach/map.h>
2020

@@ -74,8 +74,7 @@ void *omap_sram_push(void *funcp, unsigned long size)
7474

7575
dst = fncpy(sram, funcp, size);
7676

77-
set_memory_ro(base, pages);
78-
set_memory_x(base, pages);
77+
set_memory_rox(base, pages);
7978

8079
return dst;
8180
}
@@ -126,8 +125,7 @@ static void __init omap_detect_and_map_sram(void)
126125
base = (unsigned long)omap_sram_base;
127126
pages = PAGE_ALIGN(omap_sram_size) / PAGE_SIZE;
128127

129-
set_memory_ro(base, pages);
130-
set_memory_x(base, pages);
128+
set_memory_rox(base, pages);
131129
}
132130

133131
static void (*_omap_sram_reprogram_clock)(u32 dpllctl, u32 ckctl);

arch/arm/mach-omap2/sram.c

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -14,11 +14,11 @@
1414
#include <linux/kernel.h>
1515
#include <linux/init.h>
1616
#include <linux/io.h>
17+
#include <linux/set_memory.h>
1718

1819
#include <asm/fncpy.h>
1920
#include <asm/tlb.h>
2021
#include <asm/cacheflush.h>
21-
#include <asm/set_memory.h>
2222

2323
#include <asm/mach/map.h>
2424

@@ -96,8 +96,7 @@ void *omap_sram_push(void *funcp, unsigned long size)
9696

9797
dst = fncpy(sram, funcp, size);
9898

99-
set_memory_ro(base, pages);
100-
set_memory_x(base, pages);
99+
set_memory_rox(base, pages);
101100

102101
return dst;
103102
}
@@ -217,8 +216,7 @@ static void __init omap2_map_sram(void)
217216
base = (unsigned long)omap_sram_base;
218217
pages = PAGE_ALIGN(omap_sram_size) / PAGE_SIZE;
219218

220-
set_memory_ro(base, pages);
221-
set_memory_x(base, pages);
219+
set_memory_rox(base, pages);
222220
}
223221

224222
static void (*_omap2_sram_ddr_init)(u32 *slow_dll_ctrl, u32 fast_dll_ctrl,

arch/mips/Kconfig

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,7 @@ config MIPS
4646
select GENERIC_SCHED_CLOCK if !CAVIUM_OCTEON_SOC
4747
select GENERIC_SMP_IDLE_THREAD
4848
select GENERIC_TIME_VSYSCALL
49-
select GUP_GET_PTE_LOW_HIGH if CPU_MIPS32 && PHYS_ADDR_T_64BIT
49+
select GUP_GET_PXX_LOW_HIGH if CPU_MIPS32 && PHYS_ADDR_T_64BIT
5050
select HAVE_ARCH_COMPILER_H
5151
select HAVE_ARCH_JUMP_LABEL
5252
select HAVE_ARCH_KGDB if MIPS_FP_SUPPORT

arch/powerpc/include/asm/nohash/32/pgtable.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -263,7 +263,7 @@ static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, p
263263
}
264264

265265
#ifdef CONFIG_PPC_16K_PAGES
266-
#define __HAVE_ARCH_PTEP_GET
266+
#define ptep_get ptep_get
267267
static inline pte_t ptep_get(pte_t *ptep)
268268
{
269269
pte_basic_t val = READ_ONCE(ptep->pte);

arch/powerpc/kernel/kprobes.c

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -20,12 +20,12 @@
2020
#include <linux/kdebug.h>
2121
#include <linux/slab.h>
2222
#include <linux/moduleloader.h>
23+
#include <linux/set_memory.h>
2324
#include <asm/code-patching.h>
2425
#include <asm/cacheflush.h>
2526
#include <asm/sstep.h>
2627
#include <asm/sections.h>
2728
#include <asm/inst.h>
28-
#include <asm/set_memory.h>
2929
#include <linux/uaccess.h>
3030

3131
DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
@@ -134,10 +134,9 @@ void *alloc_insn_page(void)
134134
if (!page)
135135
return NULL;
136136

137-
if (strict_module_rwx_enabled()) {
138-
set_memory_ro((unsigned long)page, 1);
139-
set_memory_x((unsigned long)page, 1);
140-
}
137+
if (strict_module_rwx_enabled())
138+
set_memory_rox((unsigned long)page, 1);
139+
141140
return page;
142141
}
143142

arch/sh/Kconfig

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ config SUPERH
2424
select GENERIC_PCI_IOMAP if PCI
2525
select GENERIC_SCHED_CLOCK
2626
select GENERIC_SMP_IDLE_THREAD
27-
select GUP_GET_PTE_LOW_HIGH if X2TLB
27+
select GUP_GET_PXX_LOW_HIGH if X2TLB
2828
select HAVE_ARCH_AUDITSYSCALL
2929
select HAVE_ARCH_KGDB
3030
select HAVE_ARCH_SECCOMP_FILTER

arch/sh/include/asm/pgtable-3level.h

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -28,9 +28,15 @@
2828
#define pmd_ERROR(e) \
2929
printk("%s:%d: bad pmd %016llx.\n", __FILE__, __LINE__, pmd_val(e))
3030

31-
typedef struct { unsigned long long pmd; } pmd_t;
31+
typedef struct {
32+
struct {
33+
unsigned long pmd_low;
34+
unsigned long pmd_high;
35+
};
36+
unsigned long long pmd;
37+
} pmd_t;
3238
#define pmd_val(x) ((x).pmd)
33-
#define __pmd(x) ((pmd_t) { (x) } )
39+
#define __pmd(x) ((pmd_t) { .pmd = (x) } )
3440

3541
static inline pmd_t *pud_pgtable(pud_t pud)
3642
{

arch/um/include/asm/pgtable-3level.h

Lines changed: 0 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -58,11 +58,7 @@
5858
#define pud_populate(mm, pud, pmd) \
5959
set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
6060

61-
#ifdef CONFIG_64BIT
62-
#define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
63-
#else
6461
#define set_pud(pudptr, pudval) (*(pudptr) = (pudval))
65-
#endif
6662

6763
static inline int pgd_newpage(pgd_t pgd)
6864
{
@@ -71,11 +67,7 @@ static inline int pgd_newpage(pgd_t pgd)
7167

7268
static inline void pgd_mkuptodate(pgd_t pgd) { pgd_val(pgd) &= ~_PAGE_NEWPAGE; }
7369

74-
#ifdef CONFIG_64BIT
75-
#define set_pmd(pmdptr, pmdval) set_64bit((u64 *) (pmdptr), pmd_val(pmdval))
76-
#else
7770
#define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval))
78-
#endif
7971

8072
static inline void pud_clear (pud_t *pud)
8173
{

arch/x86/Kconfig

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -159,7 +159,7 @@ config X86
159159
select GENERIC_TIME_VSYSCALL
160160
select GENERIC_GETTIMEOFDAY
161161
select GENERIC_VDSO_TIME_NS
162-
select GUP_GET_PTE_LOW_HIGH if X86_PAE
162+
select GUP_GET_PXX_LOW_HIGH if X86_PAE
163163
select HARDIRQS_SW_RESEND
164164
select HARDLOCKUP_CHECK_TIMESTAMP if X86_64
165165
select HAVE_ACPI_APEI if ACPI

arch/x86/include/asm/cmpxchg_32.h

Lines changed: 0 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -7,34 +7,6 @@
77
* you need to test for the feature in boot_cpu_data.
88
*/
99

10-
/*
11-
* CMPXCHG8B only writes to the target if we had the previous
12-
* value in registers, otherwise it acts as a read and gives us the
13-
* "new previous" value. That is why there is a loop. Preloading
14-
* EDX:EAX is a performance optimization: in the common case it means
15-
* we need only one locked operation.
16-
*
17-
* A SIMD/3DNOW!/MMX/FPU 64-bit store here would require at the very
18-
* least an FPU save and/or %cr0.ts manipulation.
19-
*
20-
* cmpxchg8b must be used with the lock prefix here to allow the
21-
* instruction to be executed atomically. We need to have the reader
22-
* side to see the coherent 64bit value.
23-
*/
24-
static inline void set_64bit(volatile u64 *ptr, u64 value)
25-
{
26-
u32 low = value;
27-
u32 high = value >> 32;
28-
u64 prev = *ptr;
29-
30-
asm volatile("\n1:\t"
31-
LOCK_PREFIX "cmpxchg8b %0\n\t"
32-
"jnz 1b"
33-
: "=m" (*ptr), "+A" (prev)
34-
: "b" (low), "c" (high)
35-
: "memory");
36-
}
37-
3810
#ifdef CONFIG_X86_CMPXCHG64
3911
#define arch_cmpxchg64(ptr, o, n) \
4012
((__typeof__(*(ptr)))__cmpxchg64((ptr), (unsigned long long)(o), \

0 commit comments

Comments
 (0)