Skip to content

Commit c5f4045

Browse files
committed
Merge tag 'loongarch-fixes-6.12-2' of git://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson
Pull LoongArch fixes from Huacai Chen: - fix possible CPUs setup logical-physical CPU mapping, in order to avoid CPU hotplug issue - fix some KASAN bugs - fix AP booting issue in VM mode - some trivial cleanups * tag 'loongarch-fixes-6.12-2' of git://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson: LoongArch: Fix AP booting issue in VM mode LoongArch: Add WriteCombine shadow mapping in KASAN LoongArch: Disable KASAN if PGDIR_SIZE is too large for cpu_vabits LoongArch: Make KASAN work with 5-level page-tables LoongArch: Define a default value for VM_DATA_DEFAULT_FLAGS LoongArch: Fix early_numa_add_cpu() usage for FDT systems LoongArch: For all possible CPUs setup logical-physical CPU mapping
2 parents 4b49c0b + 6ce031e commit c5f4045

File tree

6 files changed

+124
-41
lines changed

6 files changed

+124
-41
lines changed

arch/loongarch/include/asm/kasan.h

Lines changed: 11 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,7 @@
2525
/* 64-bit segment value. */
2626
#define XKPRANGE_UC_SEG (0x8000)
2727
#define XKPRANGE_CC_SEG (0x9000)
28+
#define XKPRANGE_WC_SEG (0xa000)
2829
#define XKVRANGE_VC_SEG (0xffff)
2930

3031
/* Cached */
@@ -41,20 +42,28 @@
4142
#define XKPRANGE_UC_SHADOW_SIZE (XKPRANGE_UC_SIZE >> KASAN_SHADOW_SCALE_SHIFT)
4243
#define XKPRANGE_UC_SHADOW_END (XKPRANGE_UC_KASAN_OFFSET + XKPRANGE_UC_SHADOW_SIZE)
4344

45+
/* WriteCombine */
46+
#define XKPRANGE_WC_START WRITECOMBINE_BASE
47+
#define XKPRANGE_WC_SIZE XRANGE_SIZE
48+
#define XKPRANGE_WC_KASAN_OFFSET XKPRANGE_UC_SHADOW_END
49+
#define XKPRANGE_WC_SHADOW_SIZE (XKPRANGE_WC_SIZE >> KASAN_SHADOW_SCALE_SHIFT)
50+
#define XKPRANGE_WC_SHADOW_END (XKPRANGE_WC_KASAN_OFFSET + XKPRANGE_WC_SHADOW_SIZE)
51+
4452
/* VMALLOC (Cached or UnCached) */
4553
#define XKVRANGE_VC_START MODULES_VADDR
4654
#define XKVRANGE_VC_SIZE round_up(KFENCE_AREA_END - MODULES_VADDR + 1, PGDIR_SIZE)
47-
#define XKVRANGE_VC_KASAN_OFFSET XKPRANGE_UC_SHADOW_END
55+
#define XKVRANGE_VC_KASAN_OFFSET XKPRANGE_WC_SHADOW_END
4856
#define XKVRANGE_VC_SHADOW_SIZE (XKVRANGE_VC_SIZE >> KASAN_SHADOW_SCALE_SHIFT)
4957
#define XKVRANGE_VC_SHADOW_END (XKVRANGE_VC_KASAN_OFFSET + XKVRANGE_VC_SHADOW_SIZE)
5058

5159
/* KAsan shadow memory start right after vmalloc. */
5260
#define KASAN_SHADOW_START round_up(KFENCE_AREA_END, PGDIR_SIZE)
5361
#define KASAN_SHADOW_SIZE (XKVRANGE_VC_SHADOW_END - XKPRANGE_CC_KASAN_OFFSET)
54-
#define KASAN_SHADOW_END round_up(KASAN_SHADOW_START + KASAN_SHADOW_SIZE, PGDIR_SIZE)
62+
#define KASAN_SHADOW_END (round_up(KASAN_SHADOW_START + KASAN_SHADOW_SIZE, PGDIR_SIZE) - 1)
5563

5664
#define XKPRANGE_CC_SHADOW_OFFSET (KASAN_SHADOW_START + XKPRANGE_CC_KASAN_OFFSET)
5765
#define XKPRANGE_UC_SHADOW_OFFSET (KASAN_SHADOW_START + XKPRANGE_UC_KASAN_OFFSET)
66+
#define XKPRANGE_WC_SHADOW_OFFSET (KASAN_SHADOW_START + XKPRANGE_WC_KASAN_OFFSET)
5867
#define XKVRANGE_VC_SHADOW_OFFSET (KASAN_SHADOW_START + XKVRANGE_VC_KASAN_OFFSET)
5968

6069
extern bool kasan_early_stage;

arch/loongarch/include/asm/page.h

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -113,10 +113,7 @@ struct page *tlb_virt_to_page(unsigned long kaddr);
113113
extern int __virt_addr_valid(volatile void *kaddr);
114114
#define virt_addr_valid(kaddr) __virt_addr_valid((volatile void *)(kaddr))
115115

116-
#define VM_DATA_DEFAULT_FLAGS \
117-
(VM_READ | VM_WRITE | \
118-
((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
119-
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
116+
#define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_TSK_EXEC
120117

121118
#include <asm-generic/memory_model.h>
122119
#include <asm-generic/getorder.h>

arch/loongarch/kernel/acpi.c

Lines changed: 53 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -58,48 +58,48 @@ void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size)
5858
return ioremap_cache(phys, size);
5959
}
6060

61-
static int cpu_enumerated = 0;
62-
6361
#ifdef CONFIG_SMP
64-
static int set_processor_mask(u32 id, u32 flags)
62+
static int set_processor_mask(u32 id, u32 pass)
6563
{
66-
int nr_cpus;
67-
int cpu, cpuid = id;
68-
69-
if (!cpu_enumerated)
70-
nr_cpus = NR_CPUS;
71-
else
72-
nr_cpus = nr_cpu_ids;
64+
int cpu = -1, cpuid = id;
7365

74-
if (num_processors >= nr_cpus) {
66+
if (num_processors >= NR_CPUS) {
7567
pr_warn(PREFIX "nr_cpus limit of %i reached."
76-
" processor 0x%x ignored.\n", nr_cpus, cpuid);
68+
" processor 0x%x ignored.\n", NR_CPUS, cpuid);
7769

7870
return -ENODEV;
7971

8072
}
73+
8174
if (cpuid == loongson_sysconf.boot_cpu_id)
8275
cpu = 0;
83-
else
84-
cpu = find_first_zero_bit(cpumask_bits(cpu_present_mask), NR_CPUS);
85-
86-
if (!cpu_enumerated)
87-
set_cpu_possible(cpu, true);
8876

89-
if (flags & ACPI_MADT_ENABLED) {
77+
switch (pass) {
78+
case 1: /* Pass 1 handle enabled processors */
79+
if (cpu < 0)
80+
cpu = find_first_zero_bit(cpumask_bits(cpu_present_mask), NR_CPUS);
9081
num_processors++;
9182
set_cpu_present(cpu, true);
92-
__cpu_number_map[cpuid] = cpu;
93-
__cpu_logical_map[cpu] = cpuid;
94-
} else
83+
break;
84+
case 2: /* Pass 2 handle disabled processors */
85+
if (cpu < 0)
86+
cpu = find_first_zero_bit(cpumask_bits(cpu_possible_mask), NR_CPUS);
9587
disabled_cpus++;
88+
break;
89+
default:
90+
return cpu;
91+
}
92+
93+
set_cpu_possible(cpu, true);
94+
__cpu_number_map[cpuid] = cpu;
95+
__cpu_logical_map[cpu] = cpuid;
9696

9797
return cpu;
9898
}
9999
#endif
100100

101101
static int __init
102-
acpi_parse_processor(union acpi_subtable_headers *header, const unsigned long end)
102+
acpi_parse_p1_processor(union acpi_subtable_headers *header, const unsigned long end)
103103
{
104104
struct acpi_madt_core_pic *processor = NULL;
105105

@@ -110,12 +110,29 @@ acpi_parse_processor(union acpi_subtable_headers *header, const unsigned long en
110110
acpi_table_print_madt_entry(&header->common);
111111
#ifdef CONFIG_SMP
112112
acpi_core_pic[processor->core_id] = *processor;
113-
set_processor_mask(processor->core_id, processor->flags);
113+
if (processor->flags & ACPI_MADT_ENABLED)
114+
set_processor_mask(processor->core_id, 1);
114115
#endif
115116

116117
return 0;
117118
}
118119

120+
static int __init
121+
acpi_parse_p2_processor(union acpi_subtable_headers *header, const unsigned long end)
122+
{
123+
struct acpi_madt_core_pic *processor = NULL;
124+
125+
processor = (struct acpi_madt_core_pic *)header;
126+
if (BAD_MADT_ENTRY(processor, end))
127+
return -EINVAL;
128+
129+
#ifdef CONFIG_SMP
130+
if (!(processor->flags & ACPI_MADT_ENABLED))
131+
set_processor_mask(processor->core_id, 2);
132+
#endif
133+
134+
return 0;
135+
}
119136
static int __init
120137
acpi_parse_eio_master(union acpi_subtable_headers *header, const unsigned long end)
121138
{
@@ -143,12 +160,14 @@ static void __init acpi_process_madt(void)
143160
}
144161
#endif
145162
acpi_table_parse_madt(ACPI_MADT_TYPE_CORE_PIC,
146-
acpi_parse_processor, MAX_CORE_PIC);
163+
acpi_parse_p1_processor, MAX_CORE_PIC);
164+
165+
acpi_table_parse_madt(ACPI_MADT_TYPE_CORE_PIC,
166+
acpi_parse_p2_processor, MAX_CORE_PIC);
147167

148168
acpi_table_parse_madt(ACPI_MADT_TYPE_EIO_PIC,
149169
acpi_parse_eio_master, MAX_IO_PICS);
150170

151-
cpu_enumerated = 1;
152171
loongson_sysconf.nr_cpus = num_processors;
153172
}
154173

@@ -310,6 +329,10 @@ static int __ref acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
310329
int nid;
311330

312331
nid = acpi_get_node(handle);
332+
333+
if (nid != NUMA_NO_NODE)
334+
nid = early_cpu_to_node(cpu);
335+
313336
if (nid != NUMA_NO_NODE) {
314337
set_cpuid_to_node(physid, nid);
315338
node_set(nid, numa_nodes_parsed);
@@ -324,12 +347,14 @@ int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, u32 acpi_id, int *pcpu
324347
{
325348
int cpu;
326349

327-
cpu = set_processor_mask(physid, ACPI_MADT_ENABLED);
328-
if (cpu < 0) {
350+
cpu = cpu_number_map(physid);
351+
if (cpu < 0 || cpu >= nr_cpu_ids) {
329352
pr_info(PREFIX "Unable to map lapic to logical cpu number\n");
330-
return cpu;
353+
return -ERANGE;
331354
}
332355

356+
num_processors++;
357+
set_cpu_present(cpu, true);
333358
acpi_map_cpu2node(handle, cpu, physid);
334359

335360
*pcpu = cpu;

arch/loongarch/kernel/paravirt.c

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -51,11 +51,18 @@ static u64 paravt_steal_clock(int cpu)
5151
}
5252

5353
#ifdef CONFIG_SMP
54+
static struct smp_ops native_ops;
55+
5456
static void pv_send_ipi_single(int cpu, unsigned int action)
5557
{
5658
int min, old;
5759
irq_cpustat_t *info = &per_cpu(irq_stat, cpu);
5860

61+
if (unlikely(action == ACTION_BOOT_CPU)) {
62+
native_ops.send_ipi_single(cpu, action);
63+
return;
64+
}
65+
5966
old = atomic_fetch_or(BIT(action), &info->message);
6067
if (old)
6168
return;
@@ -75,6 +82,11 @@ static void pv_send_ipi_mask(const struct cpumask *mask, unsigned int action)
7582
if (cpumask_empty(mask))
7683
return;
7784

85+
if (unlikely(action == ACTION_BOOT_CPU)) {
86+
native_ops.send_ipi_mask(mask, action);
87+
return;
88+
}
89+
7890
action = BIT(action);
7991
for_each_cpu(i, mask) {
8092
info = &per_cpu(irq_stat, i);
@@ -147,6 +159,8 @@ static void pv_init_ipi(void)
147159
{
148160
int r, swi;
149161

162+
/* Init native ipi irq for ACTION_BOOT_CPU */
163+
native_ops.init_ipi();
150164
swi = get_percpu_irq(INT_SWI0);
151165
if (swi < 0)
152166
panic("SWI0 IRQ mapping failed\n");
@@ -193,6 +207,7 @@ int __init pv_ipi_init(void)
193207
return 0;
194208

195209
#ifdef CONFIG_SMP
210+
native_ops = mp_ops;
196211
mp_ops.init_ipi = pv_init_ipi;
197212
mp_ops.send_ipi_single = pv_send_ipi_single;
198213
mp_ops.send_ipi_mask = pv_send_ipi_mask;

arch/loongarch/kernel/smp.c

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -302,7 +302,7 @@ static void __init fdt_smp_setup(void)
302302
__cpu_number_map[cpuid] = cpu;
303303
__cpu_logical_map[cpu] = cpuid;
304304

305-
early_numa_add_cpu(cpu, 0);
305+
early_numa_add_cpu(cpuid, 0);
306306
set_cpuid_to_node(cpuid, 0);
307307
}
308308

@@ -331,11 +331,11 @@ void __init loongson_prepare_cpus(unsigned int max_cpus)
331331
int i = 0;
332332

333333
parse_acpi_topology();
334+
cpu_data[0].global_id = cpu_logical_map(0);
334335

335336
for (i = 0; i < loongson_sysconf.nr_cpus; i++) {
336337
set_cpu_present(i, true);
337338
csr_mail_send(0, __cpu_logical_map[i], 0);
338-
cpu_data[i].global_id = __cpu_logical_map[i];
339339
}
340340

341341
per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
@@ -380,6 +380,7 @@ void loongson_init_secondary(void)
380380
cpu_logical_map(cpu) / loongson_sysconf.cores_per_package;
381381
cpu_data[cpu].core = pptt_enabled ? cpu_data[cpu].core :
382382
cpu_logical_map(cpu) % loongson_sysconf.cores_per_package;
383+
cpu_data[cpu].global_id = cpu_logical_map(cpu);
383384
}
384385

385386
void loongson_smp_finish(void)

arch/loongarch/mm/kasan_init.c

Lines changed: 41 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,13 @@
1313

1414
static pgd_t kasan_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);
1515

16+
#ifdef __PAGETABLE_P4D_FOLDED
17+
#define __pgd_none(early, pgd) (0)
18+
#else
19+
#define __pgd_none(early, pgd) (early ? (pgd_val(pgd) == 0) : \
20+
(__pa(pgd_val(pgd)) == (unsigned long)__pa(kasan_early_shadow_p4d)))
21+
#endif
22+
1623
#ifdef __PAGETABLE_PUD_FOLDED
1724
#define __p4d_none(early, p4d) (0)
1825
#else
@@ -55,6 +62,9 @@ void *kasan_mem_to_shadow(const void *addr)
5562
case XKPRANGE_UC_SEG:
5663
offset = XKPRANGE_UC_SHADOW_OFFSET;
5764
break;
65+
case XKPRANGE_WC_SEG:
66+
offset = XKPRANGE_WC_SHADOW_OFFSET;
67+
break;
5868
case XKVRANGE_VC_SEG:
5969
offset = XKVRANGE_VC_SHADOW_OFFSET;
6070
break;
@@ -79,6 +89,8 @@ const void *kasan_shadow_to_mem(const void *shadow_addr)
7989

8090
if (addr >= XKVRANGE_VC_SHADOW_OFFSET)
8191
return (void *)(((addr - XKVRANGE_VC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKVRANGE_VC_START);
92+
else if (addr >= XKPRANGE_WC_SHADOW_OFFSET)
93+
return (void *)(((addr - XKPRANGE_WC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKPRANGE_WC_START);
8294
else if (addr >= XKPRANGE_UC_SHADOW_OFFSET)
8395
return (void *)(((addr - XKPRANGE_UC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKPRANGE_UC_START);
8496
else if (addr >= XKPRANGE_CC_SHADOW_OFFSET)
@@ -142,6 +154,19 @@ static pud_t *__init kasan_pud_offset(p4d_t *p4dp, unsigned long addr, int node,
142154
return pud_offset(p4dp, addr);
143155
}
144156

157+
static p4d_t *__init kasan_p4d_offset(pgd_t *pgdp, unsigned long addr, int node, bool early)
158+
{
159+
if (__pgd_none(early, pgdp_get(pgdp))) {
160+
phys_addr_t p4d_phys = early ?
161+
__pa_symbol(kasan_early_shadow_p4d) : kasan_alloc_zeroed_page(node);
162+
if (!early)
163+
memcpy(__va(p4d_phys), kasan_early_shadow_p4d, sizeof(kasan_early_shadow_p4d));
164+
pgd_populate(&init_mm, pgdp, (p4d_t *)__va(p4d_phys));
165+
}
166+
167+
return p4d_offset(pgdp, addr);
168+
}
169+
145170
static void __init kasan_pte_populate(pmd_t *pmdp, unsigned long addr,
146171
unsigned long end, int node, bool early)
147172
{
@@ -178,19 +203,19 @@ static void __init kasan_pud_populate(p4d_t *p4dp, unsigned long addr,
178203
do {
179204
next = pud_addr_end(addr, end);
180205
kasan_pmd_populate(pudp, addr, next, node, early);
181-
} while (pudp++, addr = next, addr != end);
206+
} while (pudp++, addr = next, addr != end && __pud_none(early, READ_ONCE(*pudp)));
182207
}
183208

184209
static void __init kasan_p4d_populate(pgd_t *pgdp, unsigned long addr,
185210
unsigned long end, int node, bool early)
186211
{
187212
unsigned long next;
188-
p4d_t *p4dp = p4d_offset(pgdp, addr);
213+
p4d_t *p4dp = kasan_p4d_offset(pgdp, addr, node, early);
189214

190215
do {
191216
next = p4d_addr_end(addr, end);
192217
kasan_pud_populate(p4dp, addr, next, node, early);
193-
} while (p4dp++, addr = next, addr != end);
218+
} while (p4dp++, addr = next, addr != end && __p4d_none(early, READ_ONCE(*p4dp)));
194219
}
195220

196221
static void __init kasan_pgd_populate(unsigned long addr, unsigned long end,
@@ -218,7 +243,7 @@ static void __init kasan_map_populate(unsigned long start, unsigned long end,
218243
asmlinkage void __init kasan_early_init(void)
219244
{
220245
BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, PGDIR_SIZE));
221-
BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, PGDIR_SIZE));
246+
BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END + 1, PGDIR_SIZE));
222247
}
223248

224249
static inline void kasan_set_pgd(pgd_t *pgdp, pgd_t pgdval)
@@ -233,7 +258,7 @@ static void __init clear_pgds(unsigned long start, unsigned long end)
233258
* swapper_pg_dir. pgd_clear() can't be used
234259
* here because it's nop on 2,3-level pagetable setups
235260
*/
236-
for (; start < end; start += PGDIR_SIZE)
261+
for (; start < end; start = pgd_addr_end(start, end))
237262
kasan_set_pgd((pgd_t *)pgd_offset_k(start), __pgd(0));
238263
}
239264

@@ -242,6 +267,17 @@ void __init kasan_init(void)
242267
u64 i;
243268
phys_addr_t pa_start, pa_end;
244269

270+
/*
271+
* If PGDIR_SIZE is too large for cpu_vabits, KASAN_SHADOW_END will
272+
* overflow UINTPTR_MAX and then looks like a user space address.
273+
* For example, PGDIR_SIZE of CONFIG_4KB_4LEVEL is 2^39, which is too
274+
* large for Loongson-2K series whose cpu_vabits = 39.
275+
*/
276+
if (KASAN_SHADOW_END < vm_map_base) {
277+
pr_warn("PGDIR_SIZE too large for cpu_vabits, KernelAddressSanitizer disabled.\n");
278+
return;
279+
}
280+
245281
/*
246282
* PGD was populated as invalid_pmd_table or invalid_pud_table
247283
* in pagetable_init() which depends on how many levels of page

0 commit comments

Comments
 (0)