Skip to content

Commit a8356cd

Browse files
committed
Merge tag 'loongarch-6.3' of git://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson
Pull LoongArch updates from Huacai Chen: - Make -mstrict-align configurable - Add kernel relocation and KASLR support - Add single kernel image implementation for kdump - Add hardware breakpoints/watchpoints support - Add kprobes/kretprobes/kprobes_on_ftrace support - Add LoongArch support for some selftests. * tag 'loongarch-6.3' of git://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson: (23 commits) selftests/ftrace: Add LoongArch kprobe args string tests support selftests/seccomp: Add LoongArch selftesting support tools: Add LoongArch build infrastructure samples/kprobes: Add LoongArch support LoongArch: Mark some assembler symbols as non-kprobe-able LoongArch: Add kprobes on ftrace support LoongArch: Add kretprobes support LoongArch: Add kprobes support LoongArch: Simulate branch and PC* instructions LoongArch: ptrace: Add hardware single step support LoongArch: ptrace: Add function argument access API LoongArch: ptrace: Expose hardware breakpoints to debuggers LoongArch: Add hardware breakpoints/watchpoints support LoongArch: kdump: Add crashkernel=YM handling LoongArch: kdump: Add single kernel image implementation LoongArch: Add support for kernel address space layout randomization (KASLR) LoongArch: Add support for kernel relocation LoongArch: Add la_abs macro implementation LoongArch: Add JUMP_VIRT_ADDR macro implementation to avoid using la.abs LoongArch: Use la.pcrel instead of la.abs when it's trivially possible ...
2 parents 64e8516 + 8883bf8 commit a8356cd

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

47 files changed

+2665
-130
lines changed

arch/loongarch/Kconfig

Lines changed: 56 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -94,15 +94,21 @@ config LOONGARCH
9494
select HAVE_DYNAMIC_FTRACE_WITH_ARGS
9595
select HAVE_DYNAMIC_FTRACE_WITH_REGS
9696
select HAVE_EBPF_JIT
97+
select HAVE_EFFICIENT_UNALIGNED_ACCESS if !ARCH_STRICT_ALIGN
9798
select HAVE_EXIT_THREAD
9899
select HAVE_FAST_GUP
99100
select HAVE_FTRACE_MCOUNT_RECORD
101+
select HAVE_FUNCTION_ARG_ACCESS_API
100102
select HAVE_FUNCTION_GRAPH_TRACER
101103
select HAVE_FUNCTION_TRACER
102104
select HAVE_GENERIC_VDSO
105+
select HAVE_HW_BREAKPOINT if PERF_EVENTS
103106
select HAVE_IOREMAP_PROT
104107
select HAVE_IRQ_EXIT_ON_IRQ_STACK
105108
select HAVE_IRQ_TIME_ACCOUNTING
109+
select HAVE_KPROBES
110+
select HAVE_KPROBES_ON_FTRACE
111+
select HAVE_KRETPROBES
106112
select HAVE_MOD_ARCH_SPECIFIC
107113
select HAVE_NMI
108114
select HAVE_PCI
@@ -441,6 +447,24 @@ config ARCH_IOREMAP
441447
protection support. However, you can enable LoongArch DMW-based
442448
ioremap() for better performance.
443449

450+
config ARCH_STRICT_ALIGN
451+
bool "Enable -mstrict-align to prevent unaligned accesses" if EXPERT
452+
default y
453+
help
454+
Not all LoongArch cores support h/w unaligned access, we can use
455+
-mstrict-align build parameter to prevent unaligned accesses.
456+
457+
CPUs with h/w unaligned access support:
458+
Loongson-2K2000/2K3000/3A5000/3C5000/3D5000.
459+
460+
CPUs without h/w unaligned access support:
461+
Loongson-2K500/2K1000.
462+
463+
This option is enabled by default to make the kernel be able to run
464+
on all LoongArch systems. But you can disable it manually if you want
465+
to run kernel only on systems with h/w unaligned access support in
466+
order to optimise for performance.
467+
444468
config KEXEC
445469
bool "Kexec system call"
446470
select KEXEC_CORE
@@ -454,6 +478,7 @@ config KEXEC
454478

455479
config CRASH_DUMP
456480
bool "Build kdump crash kernel"
481+
select RELOCATABLE
457482
help
458483
Generate crash dump after being started by kexec. This should
459484
be normally only set in special crash dump kernels which are
@@ -463,16 +488,38 @@ config CRASH_DUMP
463488

464489
For more details see Documentation/admin-guide/kdump/kdump.rst
465490

466-
config PHYSICAL_START
467-
hex "Physical address where the kernel is loaded"
468-
default "0x90000000a0000000"
469-
depends on CRASH_DUMP
491+
config RELOCATABLE
492+
bool "Relocatable kernel"
470493
help
471-
This gives the XKPRANGE address where the kernel is loaded.
472-
If you plan to use kernel for capturing the crash dump change
473-
this value to start of the reserved region (the "X" value as
474-
specified in the "crashkernel=YM@XM" command line boot parameter
475-
passed to the panic-ed kernel).
494+
This builds the kernel as a Position Independent Executable (PIE),
495+
which retains all relocation metadata required, so as to relocate
496+
the kernel binary at runtime to a different virtual address from
497+
its link address.
498+
499+
config RANDOMIZE_BASE
500+
bool "Randomize the address of the kernel (KASLR)"
501+
depends on RELOCATABLE
502+
help
503+
Randomizes the physical and virtual address at which the
504+
kernel image is loaded, as a security feature that
505+
deters exploit attempts relying on knowledge of the location
506+
of kernel internals.
507+
508+
The kernel will be offset by up to RANDOMIZE_BASE_MAX_OFFSET.
509+
510+
If unsure, say N.
511+
512+
config RANDOMIZE_BASE_MAX_OFFSET
513+
hex "Maximum KASLR offset" if EXPERT
514+
depends on RANDOMIZE_BASE
515+
range 0x0 0x10000000
516+
default "0x01000000"
517+
help
518+
When KASLR is active, this provides the maximum offset that will
519+
be applied to the kernel image. It should be set according to the
520+
amount of physical RAM available in the target system.
521+
522+
This is limited by the size of the lower address memory, 256MB.
476523

477524
config SECCOMP
478525
bool "Enable seccomp to safely compute untrusted bytecode"

arch/loongarch/Makefile

Lines changed: 10 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -71,14 +71,15 @@ KBUILD_AFLAGS_MODULE += -Wa,-mla-global-with-abs
7171
KBUILD_CFLAGS_MODULE += -fplt -Wa,-mla-global-with-abs,-mla-local-with-abs
7272
endif
7373

74+
ifeq ($(CONFIG_RELOCATABLE),y)
75+
KBUILD_CFLAGS_KERNEL += -fPIE
76+
LDFLAGS_vmlinux += -static -pie --no-dynamic-linker -z notext
77+
endif
78+
7479
cflags-y += -ffreestanding
7580
cflags-y += $(call cc-option, -mno-check-zero-division)
7681

77-
ifndef CONFIG_PHYSICAL_START
7882
load-y = 0x9000000000200000
79-
else
80-
load-y = $(CONFIG_PHYSICAL_START)
81-
endif
8283
bootvars-y = VMLINUX_LOAD_ADDRESS=$(load-y)
8384

8485
drivers-$(CONFIG_PCI) += arch/loongarch/pci/
@@ -91,10 +92,15 @@ KBUILD_CPPFLAGS += -DVMLINUX_LOAD_ADDRESS=$(load-y)
9192
# instead of .eh_frame so we don't discard them.
9293
KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
9394

95+
ifdef CONFIG_ARCH_STRICT_ALIGN
9496
# Don't emit unaligned accesses.
9597
# Not all LoongArch cores support unaligned access, and as kernel we can't
9698
# rely on others to provide emulation for these accesses.
9799
KBUILD_CFLAGS += $(call cc-option,-mstrict-align)
100+
else
101+
# Optimise for performance on hardware supports unaligned access.
102+
KBUILD_CFLAGS += $(call cc-option,-mno-strict-align)
103+
endif
98104

99105
KBUILD_CFLAGS += -isystem $(shell $(CC) -print-file-name=include)
100106

arch/loongarch/configs/loongson3_defconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -48,6 +48,7 @@ CONFIG_HOTPLUG_CPU=y
4848
CONFIG_NR_CPUS=64
4949
CONFIG_NUMA=y
5050
CONFIG_KEXEC=y
51+
CONFIG_CRASH_DUMP=y
5152
CONFIG_SUSPEND=y
5253
CONFIG_HIBERNATION=y
5354
CONFIG_ACPI=y

arch/loongarch/include/asm/addrspace.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -125,4 +125,6 @@ extern unsigned long vm_map_base;
125125
#define ISA_IOSIZE SZ_16K
126126
#define IO_SPACE_LIMIT (PCI_IOSIZE - 1)
127127

128+
#define PHYS_LINK_KADDR PHYSADDR(VMLINUX_LOAD_ADDRESS)
129+
128130
#endif /* _ASM_ADDRSPACE_H */

arch/loongarch/include/asm/asm.h

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -188,4 +188,14 @@
188188
#define PTRLOG 3
189189
#endif
190190

191+
/* Annotate a function as being unsuitable for kprobes. */
192+
#ifdef CONFIG_KPROBES
193+
#define _ASM_NOKPROBE(name) \
194+
.pushsection "_kprobe_blacklist", "aw"; \
195+
.quad name; \
196+
.popsection
197+
#else
198+
#define _ASM_NOKPROBE(name)
199+
#endif
200+
191201
#endif /* __ASM_ASM_H */

arch/loongarch/include/asm/asmmacro.h

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -274,4 +274,21 @@
274274
nor \dst, \src, zero
275275
.endm
276276

277+
.macro la_abs reg, sym
278+
#ifndef CONFIG_RELOCATABLE
279+
la.abs \reg, \sym
280+
#else
281+
766:
282+
lu12i.w \reg, 0
283+
ori \reg, \reg, 0
284+
lu32i.d \reg, 0
285+
lu52i.d \reg, \reg, 0
286+
.pushsection ".la_abs", "aw", %progbits
287+
768:
288+
.dword 768b-766b
289+
.dword \sym
290+
.popsection
291+
#endif
292+
.endm
293+
277294
#endif /* _ASM_ASMMACRO_H */

arch/loongarch/include/asm/cpu.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@
3636

3737
#define PRID_SERIES_LA132 0x8000 /* Loongson 32bit */
3838
#define PRID_SERIES_LA264 0xa000 /* Loongson 64bit, 2-issue */
39-
#define PRID_SERIES_LA364 0xb000 /* Loongson 64bit3-issue */
39+
#define PRID_SERIES_LA364 0xb000 /* Loongson 64bit, 3-issue */
4040
#define PRID_SERIES_LA464 0xc000 /* Loongson 64bit, 4-issue */
4141
#define PRID_SERIES_LA664 0xd000 /* Loongson 64bit, 6-issue */
4242

Lines changed: 145 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,145 @@
1+
/* SPDX-License-Identifier: GPL-2.0 */
2+
/*
3+
* Copyright (C) 2022-2023 Loongson Technology Corporation Limited
4+
*/
5+
#ifndef __ASM_HW_BREAKPOINT_H
6+
#define __ASM_HW_BREAKPOINT_H
7+
8+
#include <asm/loongarch.h>
9+
10+
#ifdef __KERNEL__
11+
12+
/* Breakpoint */
13+
#define LOONGARCH_BREAKPOINT_EXECUTE (0 << 0)
14+
15+
/* Watchpoints */
16+
#define LOONGARCH_BREAKPOINT_LOAD (1 << 0)
17+
#define LOONGARCH_BREAKPOINT_STORE (1 << 1)
18+
19+
struct arch_hw_breakpoint_ctrl {
20+
u32 __reserved : 28,
21+
len : 2,
22+
type : 2;
23+
};
24+
25+
struct arch_hw_breakpoint {
26+
u64 address;
27+
u64 mask;
28+
struct arch_hw_breakpoint_ctrl ctrl;
29+
};
30+
31+
/* Lengths */
32+
#define LOONGARCH_BREAKPOINT_LEN_1 0b11
33+
#define LOONGARCH_BREAKPOINT_LEN_2 0b10
34+
#define LOONGARCH_BREAKPOINT_LEN_4 0b01
35+
#define LOONGARCH_BREAKPOINT_LEN_8 0b00
36+
37+
/*
38+
* Limits.
39+
* Changing these will require modifications to the register accessors.
40+
*/
41+
#define LOONGARCH_MAX_BRP 8
42+
#define LOONGARCH_MAX_WRP 8
43+
44+
/* Virtual debug register bases. */
45+
#define CSR_CFG_ADDR 0
46+
#define CSR_CFG_MASK (CSR_CFG_ADDR + LOONGARCH_MAX_BRP)
47+
#define CSR_CFG_CTRL (CSR_CFG_MASK + LOONGARCH_MAX_BRP)
48+
#define CSR_CFG_ASID (CSR_CFG_CTRL + LOONGARCH_MAX_WRP)
49+
50+
/* Debug register names. */
51+
#define LOONGARCH_CSR_NAME_ADDR ADDR
52+
#define LOONGARCH_CSR_NAME_MASK MASK
53+
#define LOONGARCH_CSR_NAME_CTRL CTRL
54+
#define LOONGARCH_CSR_NAME_ASID ASID
55+
56+
/* Accessor macros for the debug registers. */
57+
#define LOONGARCH_CSR_WATCH_READ(N, REG, T, VAL) \
58+
do { \
59+
if (T == 0) \
60+
VAL = csr_read64(LOONGARCH_CSR_##IB##N##REG); \
61+
else \
62+
VAL = csr_read64(LOONGARCH_CSR_##DB##N##REG); \
63+
} while (0)
64+
65+
#define LOONGARCH_CSR_WATCH_WRITE(N, REG, T, VAL) \
66+
do { \
67+
if (T == 0) \
68+
csr_write64(VAL, LOONGARCH_CSR_##IB##N##REG); \
69+
else \
70+
csr_write64(VAL, LOONGARCH_CSR_##DB##N##REG); \
71+
} while (0)
72+
73+
/* Exact number */
74+
#define CSR_FWPC_NUM 0x3f
75+
#define CSR_MWPC_NUM 0x3f
76+
77+
#define CTRL_PLV_ENABLE 0x1e
78+
79+
#define MWPnCFG3_LoadEn 8
80+
#define MWPnCFG3_StoreEn 9
81+
82+
#define MWPnCFG3_Type_mask 0x3
83+
#define MWPnCFG3_Size_mask 0x3
84+
85+
static inline u32 encode_ctrl_reg(struct arch_hw_breakpoint_ctrl ctrl)
86+
{
87+
return (ctrl.len << 10) | (ctrl.type << 8);
88+
}
89+
90+
static inline void decode_ctrl_reg(u32 reg, struct arch_hw_breakpoint_ctrl *ctrl)
91+
{
92+
reg >>= 8;
93+
ctrl->type = reg & MWPnCFG3_Type_mask;
94+
reg >>= 2;
95+
ctrl->len = reg & MWPnCFG3_Size_mask;
96+
}
97+
98+
struct task_struct;
99+
struct notifier_block;
100+
struct perf_event;
101+
struct perf_event_attr;
102+
103+
extern int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
104+
int *gen_len, int *gen_type, int *offset);
105+
extern int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw);
106+
extern int hw_breakpoint_arch_parse(struct perf_event *bp,
107+
const struct perf_event_attr *attr,
108+
struct arch_hw_breakpoint *hw);
109+
extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
110+
unsigned long val, void *data);
111+
112+
extern int arch_install_hw_breakpoint(struct perf_event *bp);
113+
extern void arch_uninstall_hw_breakpoint(struct perf_event *bp);
114+
extern int hw_breakpoint_slots(int type);
115+
extern void hw_breakpoint_pmu_read(struct perf_event *bp);
116+
117+
void breakpoint_handler(struct pt_regs *regs);
118+
void watchpoint_handler(struct pt_regs *regs);
119+
120+
#ifdef CONFIG_HAVE_HW_BREAKPOINT
121+
extern void ptrace_hw_copy_thread(struct task_struct *task);
122+
extern void hw_breakpoint_thread_switch(struct task_struct *next);
123+
#else
124+
static inline void ptrace_hw_copy_thread(struct task_struct *task)
125+
{
126+
}
127+
static inline void hw_breakpoint_thread_switch(struct task_struct *next)
128+
{
129+
}
130+
#endif
131+
132+
/* Determine number of BRP registers available. */
133+
static inline int get_num_brps(void)
134+
{
135+
return csr_read64(LOONGARCH_CSR_FWPC) & CSR_FWPC_NUM;
136+
}
137+
138+
/* Determine number of WRP registers available. */
139+
static inline int get_num_wrps(void)
140+
{
141+
return csr_read64(LOONGARCH_CSR_MWPC) & CSR_MWPC_NUM;
142+
}
143+
144+
#endif /* __KERNEL__ */
145+
#endif /* __ASM_BREAKPOINT_H */

0 commit comments

Comments
 (0)