Skip to content

Commit 9888428

Browse files
committed
Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 fixes from Catalin Marinas: - ZONE_DMA32 initialisation fix when memblocks fall entirely within the first GB (used by ZONE_DMA in 5.5 for Raspberry Pi 4). - Couple of ftrace fixes following the FTRACE_WITH_REGS patchset. - access_ok() fix for the Tagged Address ABI when called from from a kernel thread (asynchronous I/O): the kthread does not have the TIF flags of the mm owner, so untag the user address unconditionally. - KVM compute_layout() called before the alternatives code patching. - Minor clean-ups. * tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: arm64: entry: refine comment of stack overflow check arm64: ftrace: fix ifdeffery arm64: KVM: Invoke compute_layout() before alternatives are applied arm64: Validate tagged addresses in access_ok() called from kernel threads arm64: mm: Fix column alignment for UXN in kernel_page_tables arm64: insn: consistently handle exit text arm64: mm: Fix initialisation of DMA zones on non-NUMA systems
2 parents 76f6777 + de85804 commit 9888428

File tree

11 files changed

+49
-29
lines changed

11 files changed

+49
-29
lines changed

arch/arm64/include/asm/kvm_mmu.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -91,6 +91,7 @@ alternative_cb_end
9191

9292
void kvm_update_va_mask(struct alt_instr *alt,
9393
__le32 *origptr, __le32 *updptr, int nr_inst);
94+
void kvm_compute_layout(void);
9495

9596
static inline unsigned long __kern_hyp_va(unsigned long v)
9697
{

arch/arm64/include/asm/sections.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@ extern char __hyp_text_start[], __hyp_text_end[];
1515
extern char __idmap_text_start[], __idmap_text_end[];
1616
extern char __initdata_begin[], __initdata_end[];
1717
extern char __inittext_begin[], __inittext_end[];
18+
extern char __exittext_begin[], __exittext_end[];
1819
extern char __irqentry_text_start[], __irqentry_text_end[];
1920
extern char __mmuoff_data_start[], __mmuoff_data_end[];
2021
extern char __entry_tramp_text_start[], __entry_tramp_text_end[];

arch/arm64/include/asm/uaccess.h

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -62,8 +62,13 @@ static inline unsigned long __range_ok(const void __user *addr, unsigned long si
6262
{
6363
unsigned long ret, limit = current_thread_info()->addr_limit;
6464

65+
/*
66+
* Asynchronous I/O running in a kernel thread does not have the
67+
* TIF_TAGGED_ADDR flag of the process owning the mm, so always untag
68+
* the user address before checking.
69+
*/
6570
if (IS_ENABLED(CONFIG_ARM64_TAGGED_ADDR_ABI) &&
66-
test_thread_flag(TIF_TAGGED_ADDR))
71+
(current->flags & PF_KTHREAD || test_thread_flag(TIF_TAGGED_ADDR)))
6772
addr = untagged_addr(addr);
6873

6974
__chk_user_ptr(addr);

arch/arm64/kernel/entry-ftrace.S

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -133,7 +133,6 @@ ENTRY(ftrace_graph_caller)
133133
bl prepare_ftrace_return
134134
b ftrace_common_return
135135
ENDPROC(ftrace_graph_caller)
136-
#else
137136
#endif
138137

139138
#else /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
@@ -287,6 +286,7 @@ GLOBAL(ftrace_graph_call) // ftrace_graph_caller();
287286

288287
mcount_exit
289288
ENDPROC(ftrace_caller)
289+
#endif /* CONFIG_DYNAMIC_FTRACE */
290290

291291
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
292292
/*
@@ -307,7 +307,6 @@ ENTRY(ftrace_graph_caller)
307307
mcount_exit
308308
ENDPROC(ftrace_graph_caller)
309309
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
310-
#endif /* CONFIG_DYNAMIC_FTRACE */
311310
#endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
312311

313312
ENTRY(ftrace_stub)

arch/arm64/kernel/entry.S

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -76,7 +76,8 @@ alternative_else_nop_endif
7676
#ifdef CONFIG_VMAP_STACK
7777
/*
7878
* Test whether the SP has overflowed, without corrupting a GPR.
79-
* Task and IRQ stacks are aligned to (1 << THREAD_SHIFT).
79+
* Task and IRQ stacks are aligned so that SP & (1 << THREAD_SHIFT)
80+
* should always be zero.
8081
*/
8182
add sp, sp, x0 // sp' = sp + x0
8283
sub x0, sp, x0 // x0' = sp' - x0 = (sp + x0) - x0 = sp

arch/arm64/kernel/insn.c

Lines changed: 18 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@
2121
#include <asm/fixmap.h>
2222
#include <asm/insn.h>
2323
#include <asm/kprobes.h>
24+
#include <asm/sections.h>
2425

2526
#define AARCH64_INSN_SF_BIT BIT(31)
2627
#define AARCH64_INSN_N_BIT BIT(22)
@@ -78,16 +79,29 @@ bool aarch64_insn_is_branch_imm(u32 insn)
7879

7980
static DEFINE_RAW_SPINLOCK(patch_lock);
8081

82+
static bool is_exit_text(unsigned long addr)
83+
{
84+
/* discarded with init text/data */
85+
return system_state < SYSTEM_RUNNING &&
86+
addr >= (unsigned long)__exittext_begin &&
87+
addr < (unsigned long)__exittext_end;
88+
}
89+
90+
static bool is_image_text(unsigned long addr)
91+
{
92+
return core_kernel_text(addr) || is_exit_text(addr);
93+
}
94+
8195
static void __kprobes *patch_map(void *addr, int fixmap)
8296
{
8397
unsigned long uintaddr = (uintptr_t) addr;
84-
bool module = !core_kernel_text(uintaddr);
98+
bool image = is_image_text(uintaddr);
8599
struct page *page;
86100

87-
if (module && IS_ENABLED(CONFIG_STRICT_MODULE_RWX))
88-
page = vmalloc_to_page(addr);
89-
else if (!module)
101+
if (image)
90102
page = phys_to_page(__pa_symbol(addr));
103+
else if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX))
104+
page = vmalloc_to_page(addr);
91105
else
92106
return addr;
93107

arch/arm64/kernel/smp.c

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,7 @@
3131
#include <linux/of.h>
3232
#include <linux/irq_work.h>
3333
#include <linux/kexec.h>
34+
#include <linux/kvm_host.h>
3435

3536
#include <asm/alternative.h>
3637
#include <asm/atomic.h>
@@ -39,6 +40,7 @@
3940
#include <asm/cputype.h>
4041
#include <asm/cpu_ops.h>
4142
#include <asm/daifflags.h>
43+
#include <asm/kvm_mmu.h>
4244
#include <asm/mmu_context.h>
4345
#include <asm/numa.h>
4446
#include <asm/pgtable.h>
@@ -407,6 +409,8 @@ static void __init hyp_mode_check(void)
407409
"CPU: CPUs started in inconsistent modes");
408410
else
409411
pr_info("CPU: All CPU(s) started at EL1\n");
412+
if (IS_ENABLED(CONFIG_KVM_ARM_HOST))
413+
kvm_compute_layout();
410414
}
411415

412416
void __init smp_cpus_done(unsigned int max_cpus)

arch/arm64/kernel/vmlinux.lds.S

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -158,9 +158,12 @@ SECTIONS
158158
__inittext_begin = .;
159159

160160
INIT_TEXT_SECTION(8)
161+
162+
__exittext_begin = .;
161163
.exit.text : {
162164
ARM_EXIT_KEEP(EXIT_TEXT)
163165
}
166+
__exittext_end = .;
164167

165168
. = ALIGN(4);
166169
.altinstructions : {

arch/arm64/kvm/va_layout.c

Lines changed: 1 addition & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@ static u8 tag_lsb;
2222
static u64 tag_val;
2323
static u64 va_mask;
2424

25-
static void compute_layout(void)
25+
__init void kvm_compute_layout(void)
2626
{
2727
phys_addr_t idmap_addr = __pa_symbol(__hyp_idmap_text_start);
2828
u64 hyp_va_msb;
@@ -110,9 +110,6 @@ void __init kvm_update_va_mask(struct alt_instr *alt,
110110

111111
BUG_ON(nr_inst != 5);
112112

113-
if (!has_vhe() && !va_mask)
114-
compute_layout();
115-
116113
for (i = 0; i < nr_inst; i++) {
117114
u32 rd, rn, insn, oinsn;
118115

@@ -156,9 +153,6 @@ void kvm_patch_vector_branch(struct alt_instr *alt,
156153
return;
157154
}
158155

159-
if (!va_mask)
160-
compute_layout();
161-
162156
/*
163157
* Compute HYP VA by using the same computation as kern_hyp_va()
164158
*/

arch/arm64/mm/dump.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -142,6 +142,7 @@ static const struct prot_bits pte_bits[] = {
142142
.mask = PTE_UXN,
143143
.val = PTE_UXN,
144144
.set = "UXN",
145+
.clear = " ",
145146
}, {
146147
.mask = PTE_ATTRINDX_MASK,
147148
.val = PTE_ATTRINDX(MT_DEVICE_nGnRnE),

0 commit comments

Comments
 (0)