Skip to content

Commit 6498f61

Browse files
committed
Merge tag 'riscv-for-linus-5.14-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux
Pull RISC-V fixes from Palmer Dabbelt: - properly set the memory size, which fixes 32-bit systems - allow initrd to load anywhere in memory, rather that restricting it to the first 256MiB - fix the 'mem=' parameter on 64-bit systems to properly account for the maximum supported memory now that the kernel is outside the linear map - avoid installing mappings into the last 4KiB of memory, which conflicts with error values - avoid the stack from being freed while it is being walked - a handful of fixes to the new copy to/from user routines * tag 'riscv-for-linus-5.14-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux: riscv: __asm_copy_to-from_user: Fix: Typos in comments riscv: __asm_copy_to-from_user: Remove unnecessary size check riscv: __asm_copy_to-from_user: Fix: fail on RV32 riscv: __asm_copy_to-from_user: Fix: overrun copy riscv: stacktrace: pin the task's stack in get_wchan riscv: Make sure the kernel mapping does not overlap with IS_ERR_VALUE riscv: Make sure the linear mapping does not use the kernel mapping riscv: Fix memory_limit for 64-bit kernel RISC-V: load initrd wherever it fits into memory riscv: Fix 32-bit RISC-V boot failure
2 parents fc68f42 + ea196c5 commit 6498f61

File tree

4 files changed

+48
-21
lines changed

4 files changed

+48
-21
lines changed

arch/riscv/include/asm/efi.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -27,10 +27,10 @@ int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md);
2727

2828
#define ARCH_EFI_IRQ_FLAGS_MASK (SR_IE | SR_SPIE)
2929

30-
/* Load initrd at enough distance from DRAM start */
30+
/* Load initrd anywhere in system RAM */
3131
static inline unsigned long efi_get_max_initrd_addr(unsigned long image_addr)
3232
{
33-
return image_addr + SZ_256M;
33+
return ULONG_MAX;
3434
}
3535

3636
#define alloc_screen_info(x...) (&screen_info)

arch/riscv/kernel/stacktrace.c

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -132,8 +132,12 @@ unsigned long get_wchan(struct task_struct *task)
132132
{
133133
unsigned long pc = 0;
134134

135-
if (likely(task && task != current && !task_is_running(task)))
135+
if (likely(task && task != current && !task_is_running(task))) {
136+
if (!try_get_task_stack(task))
137+
return 0;
136138
walk_stackframe(task, NULL, save_wchan, &pc);
139+
put_task_stack(task);
140+
}
137141
return pc;
138142
}
139143

arch/riscv/lib/uaccess.S

Lines changed: 13 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -30,23 +30,23 @@ ENTRY(__asm_copy_from_user)
3030
* t0 - end of uncopied dst
3131
*/
3232
add t0, a0, a2
33-
bgtu a0, t0, 5f
3433

3534
/*
3635
* Use byte copy only if too small.
36+
* SZREG holds 4 for RV32 and 8 for RV64
3737
*/
38-
li a3, 8*SZREG /* size must be larger than size in word_copy */
38+
li a3, 9*SZREG /* size must be larger than size in word_copy */
3939
bltu a2, a3, .Lbyte_copy_tail
4040

4141
/*
42-
* Copy first bytes until dst is align to word boundary.
42+
* Copy first bytes until dst is aligned to word boundary.
4343
* a0 - start of dst
4444
* t1 - start of aligned dst
4545
*/
4646
addi t1, a0, SZREG-1
4747
andi t1, t1, ~(SZREG-1)
4848
/* dst is already aligned, skip */
49-
beq a0, t1, .Lskip_first_bytes
49+
beq a0, t1, .Lskip_align_dst
5050
1:
5151
/* a5 - one byte for copying data */
5252
fixup lb a5, 0(a1), 10f
@@ -55,7 +55,7 @@ ENTRY(__asm_copy_from_user)
5555
addi a0, a0, 1 /* dst */
5656
bltu a0, t1, 1b /* t1 - start of aligned dst */
5757

58-
.Lskip_first_bytes:
58+
.Lskip_align_dst:
5959
/*
6060
* Now dst is aligned.
6161
* Use shift-copy if src is misaligned.
@@ -72,10 +72,9 @@ ENTRY(__asm_copy_from_user)
7272
*
7373
* a0 - start of aligned dst
7474
* a1 - start of aligned src
75-
* a3 - a1 & mask:(SZREG-1)
7675
* t0 - end of aligned dst
7776
*/
78-
addi t0, t0, -(8*SZREG-1) /* not to over run */
77+
addi t0, t0, -(8*SZREG) /* not to over run */
7978
2:
8079
fixup REG_L a4, 0(a1), 10f
8180
fixup REG_L a5, SZREG(a1), 10f
@@ -97,7 +96,7 @@ ENTRY(__asm_copy_from_user)
9796
addi a1, a1, 8*SZREG
9897
bltu a0, t0, 2b
9998

100-
addi t0, t0, 8*SZREG-1 /* revert to original value */
99+
addi t0, t0, 8*SZREG /* revert to original value */
101100
j .Lbyte_copy_tail
102101

103102
.Lshift_copy:
@@ -107,7 +106,7 @@ ENTRY(__asm_copy_from_user)
107106
* For misaligned copy we still perform aligned word copy, but
108107
* we need to use the value fetched from the previous iteration and
109108
* do some shifts.
110-
* This is safe because reading less than a word size.
109+
* This is safe because reading is less than a word size.
111110
*
112111
* a0 - start of aligned dst
113112
* a1 - start of src
@@ -117,19 +116,19 @@ ENTRY(__asm_copy_from_user)
117116
*/
118117
/* calculating aligned word boundary for dst */
119118
andi t1, t0, ~(SZREG-1)
120-
/* Converting unaligned src to aligned arc */
119+
/* Converting unaligned src to aligned src */
121120
andi a1, a1, ~(SZREG-1)
122121

123122
/*
124123
* Calculate shifts
125124
* t3 - prev shift
126125
* t4 - current shift
127126
*/
128-
slli t3, a3, LGREG
127+
slli t3, a3, 3 /* converting bytes in a3 to bits */
129128
li a5, SZREG*8
130129
sub t4, a5, t3
131130

132-
/* Load the first word to combine with seceond word */
131+
/* Load the first word to combine with second word */
133132
fixup REG_L a5, 0(a1), 10f
134133

135134
3:
@@ -161,15 +160,15 @@ ENTRY(__asm_copy_from_user)
161160
* a1 - start of remaining src
162161
* t0 - end of remaining dst
163162
*/
164-
bgeu a0, t0, 5f
163+
bgeu a0, t0, .Lout_copy_user /* check if end of copy */
165164
4:
166165
fixup lb a5, 0(a1), 10f
167166
addi a1, a1, 1 /* src */
168167
fixup sb a5, 0(a0), 10f
169168
addi a0, a0, 1 /* dst */
170169
bltu a0, t0, 4b /* t0 - end of dst */
171170

172-
5:
171+
.Lout_copy_user:
173172
/* Disable access to user memory */
174173
csrc CSR_STATUS, t6
175174
li a0, 0

arch/riscv/mm/init.c

Lines changed: 28 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -127,10 +127,17 @@ void __init mem_init(void)
127127
}
128128

129129
/*
130-
* The default maximal physical memory size is -PAGE_OFFSET,
131-
* limit the memory size via mem.
130+
* The default maximal physical memory size is -PAGE_OFFSET for 32-bit kernel,
131+
* whereas for 64-bit kernel, the end of the virtual address space is occupied
132+
* by the modules/BPF/kernel mappings which reduces the available size of the
133+
* linear mapping.
134+
* Limit the memory size via mem.
132135
*/
136+
#ifdef CONFIG_64BIT
137+
static phys_addr_t memory_limit = -PAGE_OFFSET - SZ_4G;
138+
#else
133139
static phys_addr_t memory_limit = -PAGE_OFFSET;
140+
#endif
134141

135142
static int __init early_mem(char *p)
136143
{
@@ -152,7 +159,7 @@ static void __init setup_bootmem(void)
152159
{
153160
phys_addr_t vmlinux_end = __pa_symbol(&_end);
154161
phys_addr_t vmlinux_start = __pa_symbol(&_start);
155-
phys_addr_t max_mapped_addr = __pa(~(ulong)0);
162+
phys_addr_t __maybe_unused max_mapped_addr;
156163
phys_addr_t dram_end;
157164

158165
#ifdef CONFIG_XIP_KERNEL
@@ -175,14 +182,21 @@ static void __init setup_bootmem(void)
175182
memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start);
176183

177184
dram_end = memblock_end_of_DRAM();
185+
186+
#ifndef CONFIG_64BIT
178187
/*
179188
* memblock allocator is not aware of the fact that last 4K bytes of
180189
* the addressable memory can not be mapped because of IS_ERR_VALUE
181190
* macro. Make sure that last 4k bytes are not usable by memblock
182-
* if end of dram is equal to maximum addressable memory.
191+
* if end of dram is equal to maximum addressable memory. For 64-bit
192+
* kernel, this problem can't happen here as the end of the virtual
193+
* address space is occupied by the kernel mapping then this check must
194+
* be done in create_kernel_page_table.
183195
*/
196+
max_mapped_addr = __pa(~(ulong)0);
184197
if (max_mapped_addr == (dram_end - 1))
185198
memblock_set_current_limit(max_mapped_addr - 4096);
199+
#endif
186200

187201
min_low_pfn = PFN_UP(memblock_start_of_DRAM());
188202
max_low_pfn = max_pfn = PFN_DOWN(dram_end);
@@ -570,6 +584,14 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
570584
BUG_ON((PAGE_OFFSET % PGDIR_SIZE) != 0);
571585
BUG_ON((kernel_map.phys_addr % map_size) != 0);
572586

587+
#ifdef CONFIG_64BIT
588+
/*
589+
* The last 4K bytes of the addressable memory can not be mapped because
590+
* of IS_ERR_VALUE macro.
591+
*/
592+
BUG_ON((kernel_map.virt_addr + kernel_map.size) > ADDRESS_SPACE_END - SZ_4K);
593+
#endif
594+
573595
pt_ops.alloc_pte = alloc_pte_early;
574596
pt_ops.get_pte_virt = get_pte_virt_early;
575597
#ifndef __PAGETABLE_PMD_FOLDED
@@ -709,6 +731,8 @@ static void __init setup_vm_final(void)
709731
if (start <= __pa(PAGE_OFFSET) &&
710732
__pa(PAGE_OFFSET) < end)
711733
start = __pa(PAGE_OFFSET);
734+
if (end >= __pa(PAGE_OFFSET) + memory_limit)
735+
end = __pa(PAGE_OFFSET) + memory_limit;
712736

713737
map_size = best_map_size(start, end - start);
714738
for (pa = start; pa < end; pa += map_size) {

0 commit comments

Comments
 (0)