Skip to content

Commit 9d84ad4

Browse files
committed
Merge branch 'for-next/trivial' into for-next/core
* for-next/trivial: arm64: alternatives: add __init/__initconst to some functions/variables arm64/asm: Remove unused assembler DAIF save/restore macros arm64/kpti: Move DAIF masking to C code Revert "arm64/mm: Drop redundant BUG_ON(!pgtable_alloc)" arm64/mm: Drop unused restore_ttbr1 arm64: alternatives: make apply_alternatives_vdso() static arm64/mm: Drop idmap_pg_end[] declaration arm64/mm: Drop redundant BUG_ON(!pgtable_alloc) arm64: make is_ttbrX_addr() noinstr-safe arm64/signal: Document our convention for choosing magic numbers arm64: atomics: lse: remove stale dependency on JUMP_LABEL arm64: paravirt: remove conduit check in has_pv_steal_clock arm64: entry: Fix typo arm64/booting: Add missing colon to FA64 entry arm64/mm: Drop ARM64_KERNEL_USES_PMD_MAPS arm64/asm: Remove unused enable_da macro
2 parents 70b1c62 + 67bc5b2 commit 9d84ad4

File tree

14 files changed

+32
-59
lines changed

14 files changed

+32
-59
lines changed

Documentation/arm64/booting.rst

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -349,7 +349,7 @@ Before jumping into the kernel, the following conditions must be met:
349349

350350
- HWFGWTR_EL2.nSMPRI_EL1 (bit 54) must be initialised to 0b01.
351351

352-
For CPUs with the Scalable Matrix Extension FA64 feature (FEAT_SME_FA64)
352+
For CPUs with the Scalable Matrix Extension FA64 feature (FEAT_SME_FA64):
353353

354354
- If EL3 is present:
355355

arch/arm64/Kconfig

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1737,7 +1737,6 @@ config ARM64_LSE_ATOMICS
17371737

17381738
config ARM64_USE_LSE_ATOMICS
17391739
bool "Atomic instructions"
1740-
depends on JUMP_LABEL
17411740
default y
17421741
help
17431742
As part of the Large System Extensions, ARMv8.1 introduces new

arch/arm64/include/asm/assembler.h

Lines changed: 0 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -34,11 +34,6 @@
3434
wx\n .req w\n
3535
.endr
3636

37-
.macro save_and_disable_daif, flags
38-
mrs \flags, daif
39-
msr daifset, #0xf
40-
.endm
41-
4237
.macro disable_daif
4338
msr daifset, #0xf
4439
.endm
@@ -47,15 +42,6 @@
4742
msr daifclr, #0xf
4843
.endm
4944

50-
.macro restore_daif, flags:req
51-
msr daif, \flags
52-
.endm
53-
54-
/* IRQ/FIQ are the lowest priority flags, unconditionally unmask the rest. */
55-
.macro enable_da
56-
msr daifclr, #(8 | 4)
57-
.endm
58-
5945
/*
6046
* Save/restore interrupts.
6147
*/
@@ -619,17 +605,6 @@ alternative_endif
619605
#endif
620606
.endm
621607

622-
/*
623-
* Perform the reverse of offset_ttbr1.
624-
* bic is used as it can cover the immediate value and, in future, won't need
625-
* to be nop'ed out when dealing with 52-bit kernel VAs.
626-
*/
627-
.macro restore_ttbr1, ttbr
628-
#ifdef CONFIG_ARM64_VA_BITS_52
629-
bic \ttbr, \ttbr, #TTBR1_BADDR_4852_OFFSET
630-
#endif
631-
.endm
632-
633608
/*
634609
* Arrange a physical address in a TTBR register, taking care of 52-bit
635610
* addresses.

arch/arm64/include/asm/kernel-pgtable.h

Lines changed: 3 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -18,11 +18,6 @@
1818
* with 4K (section size = 2M) but not with 16K (section size = 32M) or
1919
* 64K (section size = 512M).
2020
*/
21-
#ifdef CONFIG_ARM64_4K_PAGES
22-
#define ARM64_KERNEL_USES_PMD_MAPS 1
23-
#else
24-
#define ARM64_KERNEL_USES_PMD_MAPS 0
25-
#endif
2621

2722
/*
2823
* The idmap and swapper page tables need some space reserved in the kernel
@@ -34,7 +29,7 @@
3429
* VA range, so pages required to map highest possible PA are reserved in all
3530
* cases.
3631
*/
37-
#if ARM64_KERNEL_USES_PMD_MAPS
32+
#ifdef CONFIG_ARM64_4K_PAGES
3833
#define SWAPPER_PGTABLE_LEVELS (CONFIG_PGTABLE_LEVELS - 1)
3934
#else
4035
#define SWAPPER_PGTABLE_LEVELS (CONFIG_PGTABLE_LEVELS)
@@ -96,7 +91,7 @@
9691
#define INIT_IDMAP_DIR_PAGES EARLY_PAGES(KIMAGE_VADDR, _end + MAX_FDT_SIZE + SWAPPER_BLOCK_SIZE, 1)
9792

9893
/* Initial memory map size */
99-
#if ARM64_KERNEL_USES_PMD_MAPS
94+
#ifdef CONFIG_ARM64_4K_PAGES
10095
#define SWAPPER_BLOCK_SHIFT PMD_SHIFT
10196
#define SWAPPER_BLOCK_SIZE PMD_SIZE
10297
#define SWAPPER_TABLE_SHIFT PUD_SHIFT
@@ -112,7 +107,7 @@
112107
#define SWAPPER_PTE_FLAGS (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
113108
#define SWAPPER_PMD_FLAGS (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
114109

115-
#if ARM64_KERNEL_USES_PMD_MAPS
110+
#ifdef CONFIG_ARM64_4K_PAGES
116111
#define SWAPPER_RW_MMUFLAGS (PMD_ATTRINDX(MT_NORMAL) | SWAPPER_PMD_FLAGS)
117112
#define SWAPPER_RX_MMUFLAGS (SWAPPER_RW_MMUFLAGS | PMD_SECT_RDONLY)
118113
#else

arch/arm64/include/asm/lse.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,6 @@
1010

1111
#include <linux/compiler_types.h>
1212
#include <linux/export.h>
13-
#include <linux/jump_label.h>
1413
#include <linux/stringify.h>
1514
#include <asm/alternative.h>
1615
#include <asm/alternative-macros.h>

arch/arm64/include/asm/mmu_context.h

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@
1818

1919
#include <asm/cacheflush.h>
2020
#include <asm/cpufeature.h>
21+
#include <asm/daifflags.h>
2122
#include <asm/proc-fns.h>
2223
#include <asm-generic/mm_hooks.h>
2324
#include <asm/cputype.h>
@@ -152,6 +153,7 @@ static inline void cpu_replace_ttbr1(pgd_t *pgdp, pgd_t *idmap)
152153
typedef void (ttbr_replace_func)(phys_addr_t);
153154
extern ttbr_replace_func idmap_cpu_replace_ttbr1;
154155
ttbr_replace_func *replace_phys;
156+
unsigned long daif;
155157

156158
/* phys_to_ttbr() zeros lower 2 bits of ttbr with 52-bit PA */
157159
phys_addr_t ttbr1 = phys_to_ttbr(virt_to_phys(pgdp));
@@ -171,7 +173,15 @@ static inline void cpu_replace_ttbr1(pgd_t *pgdp, pgd_t *idmap)
171173
replace_phys = (void *)__pa_symbol(idmap_cpu_replace_ttbr1);
172174

173175
__cpu_install_idmap(idmap);
176+
177+
/*
178+
* We really don't want to take *any* exceptions while TTBR1 is
179+
* in the process of being replaced so mask everything.
180+
*/
181+
daif = local_daif_save();
174182
replace_phys(ttbr1);
183+
local_daif_restore(daif);
184+
175185
cpu_uninstall_idmap();
176186
}
177187

arch/arm64/include/asm/pgtable.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -609,7 +609,6 @@ extern pgd_t init_pg_dir[PTRS_PER_PGD];
609609
extern pgd_t init_pg_end[];
610610
extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
611611
extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
612-
extern pgd_t idmap_pg_end[];
613612
extern pgd_t tramp_pg_dir[PTRS_PER_PGD];
614613
extern pgd_t reserved_pg_dir[PTRS_PER_PGD];
615614

arch/arm64/include/asm/processor.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -315,13 +315,13 @@ static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc,
315315
}
316316
#endif
317317

318-
static inline bool is_ttbr0_addr(unsigned long addr)
318+
static __always_inline bool is_ttbr0_addr(unsigned long addr)
319319
{
320320
/* entry assembly clears tags for TTBR0 addrs */
321321
return addr < TASK_SIZE;
322322
}
323323

324-
static inline bool is_ttbr1_addr(unsigned long addr)
324+
static __always_inline bool is_ttbr1_addr(unsigned long addr)
325325
{
326326
/* TTBR1 addresses may have a tag if KASAN_SW_TAGS is in use */
327327
return arch_kasan_reset_tag(addr) >= PAGE_OFFSET;

arch/arm64/include/uapi/asm/sigcontext.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -62,6 +62,10 @@ struct sigcontext {
6262
* context. Such structures must be placed after the rt_sigframe on the stack
6363
* and be 16-byte aligned. The last structure must be a dummy one with the
6464
* magic and size set to 0.
65+
*
66+
* Note that the values allocated for use as magic should be chosen to
67+
* be meaningful in ASCII to aid manual parsing, ZA doesn't follow this
68+
* convention due to oversight but it should be observed for future additions.
6569
*/
6670
struct _aarch64_ctx {
6771
__u32 magic;

arch/arm64/kernel/alternative.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -196,7 +196,7 @@ static void __apply_alternatives(const struct alt_region *region,
196196
}
197197
}
198198

199-
void apply_alternatives_vdso(void)
199+
static void __init apply_alternatives_vdso(void)
200200
{
201201
struct alt_region region;
202202
const struct elf64_hdr *hdr;
@@ -220,7 +220,7 @@ void apply_alternatives_vdso(void)
220220
__apply_alternatives(&region, false, &all_capabilities[0]);
221221
}
222222

223-
static const struct alt_region kernel_alternatives = {
223+
static const struct alt_region kernel_alternatives __initconst = {
224224
.begin = (struct alt_instr *)__alt_instructions,
225225
.end = (struct alt_instr *)__alt_instructions_end,
226226
};
@@ -229,7 +229,7 @@ static const struct alt_region kernel_alternatives = {
229229
* We might be patching the stop_machine state machine, so implement a
230230
* really simple polling protocol here.
231231
*/
232-
static int __apply_alternatives_multi_stop(void *unused)
232+
static int __init __apply_alternatives_multi_stop(void *unused)
233233
{
234234
/* We always have a CPU 0 at this point (__init) */
235235
if (smp_processor_id()) {

0 commit comments

Comments
 (0)