Skip to content

Commit c6cc979

Browse files
committed
Merge tag 'riscv-for-linus-6.10-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux
Pull RISC-V fixes from Palmer Dabbelt: - A fix to avoid pt_regs aliasing with idle thread stacks on secondary harts. - HAVE_ARCH_HUGE_VMAP is enabled on XIP kernels, which fixes boot issues on XIP systems with huge pages. - An update to the uABI documentation clarifying that only scalar misaligned accesses were grandfathered in as supported, as the vector extension did not exist at the time the uABI was frozen. - A fix for the recently-added byte/half atomics to avoid losing the fully ordered decorations. * tag 'riscv-for-linus-6.10-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux: riscv: Fix fully ordered LR/SC xchg[8|16]() implementations Documentation: RISC-V: uabi: Only scalar misaligned loads are supported riscv: enable HAVE_ARCH_HUGE_VMAP for XIP kernel riscv: prevent pt_regs corruption for secondary idle threads
2 parents ff9bce3 + 1d84afa commit c6cc979

File tree

5 files changed

+18
-15
lines changed

5 files changed

+18
-15
lines changed

Documentation/arch/riscv/uabi.rst

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -65,4 +65,6 @@ the extension, or may have deliberately removed it from the listing.
6565
Misaligned accesses
6666
-------------------
6767

68-
Misaligned accesses are supported in userspace, but they may perform poorly.
68+
Misaligned scalar accesses are supported in userspace, but they may perform
69+
poorly. Misaligned vector accesses are only supported if the Zicclsm extension
70+
is supported.

arch/riscv/Kconfig

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -106,7 +106,7 @@ config RISCV
106106
select HAS_IOPORT if MMU
107107
select HAVE_ARCH_AUDITSYSCALL
108108
select HAVE_ARCH_HUGE_VMALLOC if HAVE_ARCH_HUGE_VMAP
109-
select HAVE_ARCH_HUGE_VMAP if MMU && 64BIT && !XIP_KERNEL
109+
select HAVE_ARCH_HUGE_VMAP if MMU && 64BIT
110110
select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL
111111
select HAVE_ARCH_JUMP_LABEL_RELATIVE if !XIP_KERNEL
112112
select HAVE_ARCH_KASAN if MMU && 64BIT

arch/riscv/include/asm/cmpxchg.h

Lines changed: 12 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@
1010

1111
#include <asm/fence.h>
1212

13-
#define __arch_xchg_masked(prepend, append, r, p, n) \
13+
#define __arch_xchg_masked(sc_sfx, prepend, append, r, p, n) \
1414
({ \
1515
u32 *__ptr32b = (u32 *)((ulong)(p) & ~0x3); \
1616
ulong __s = ((ulong)(p) & (0x4 - sizeof(*p))) * BITS_PER_BYTE; \
@@ -25,7 +25,7 @@
2525
"0: lr.w %0, %2\n" \
2626
" and %1, %0, %z4\n" \
2727
" or %1, %1, %z3\n" \
28-
" sc.w %1, %1, %2\n" \
28+
" sc.w" sc_sfx " %1, %1, %2\n" \
2929
" bnez %1, 0b\n" \
3030
append \
3131
: "=&r" (__retx), "=&r" (__rc), "+A" (*(__ptr32b)) \
@@ -46,7 +46,8 @@
4646
: "memory"); \
4747
})
4848

49-
#define _arch_xchg(ptr, new, sfx, prepend, append) \
49+
#define _arch_xchg(ptr, new, sc_sfx, swap_sfx, prepend, \
50+
sc_append, swap_append) \
5051
({ \
5152
__typeof__(ptr) __ptr = (ptr); \
5253
__typeof__(*(__ptr)) __new = (new); \
@@ -55,15 +56,15 @@
5556
switch (sizeof(*__ptr)) { \
5657
case 1: \
5758
case 2: \
58-
__arch_xchg_masked(prepend, append, \
59+
__arch_xchg_masked(sc_sfx, prepend, sc_append, \
5960
__ret, __ptr, __new); \
6061
break; \
6162
case 4: \
62-
__arch_xchg(".w" sfx, prepend, append, \
63+
__arch_xchg(".w" swap_sfx, prepend, swap_append, \
6364
__ret, __ptr, __new); \
6465
break; \
6566
case 8: \
66-
__arch_xchg(".d" sfx, prepend, append, \
67+
__arch_xchg(".d" swap_sfx, prepend, swap_append, \
6768
__ret, __ptr, __new); \
6869
break; \
6970
default: \
@@ -73,16 +74,17 @@
7374
})
7475

7576
#define arch_xchg_relaxed(ptr, x) \
76-
_arch_xchg(ptr, x, "", "", "")
77+
_arch_xchg(ptr, x, "", "", "", "", "")
7778

7879
#define arch_xchg_acquire(ptr, x) \
79-
_arch_xchg(ptr, x, "", "", RISCV_ACQUIRE_BARRIER)
80+
_arch_xchg(ptr, x, "", "", "", \
81+
RISCV_ACQUIRE_BARRIER, RISCV_ACQUIRE_BARRIER)
8082

8183
#define arch_xchg_release(ptr, x) \
82-
_arch_xchg(ptr, x, "", RISCV_RELEASE_BARRIER, "")
84+
_arch_xchg(ptr, x, "", "", RISCV_RELEASE_BARRIER, "", "")
8385

8486
#define arch_xchg(ptr, x) \
85-
_arch_xchg(ptr, x, ".aqrl", "", "")
87+
_arch_xchg(ptr, x, ".rl", ".aqrl", "", RISCV_FULL_BARRIER, "")
8688

8789
#define xchg32(ptr, x) \
8890
({ \

arch/riscv/kernel/cpu_ops_sbi.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -72,7 +72,7 @@ static int sbi_cpu_start(unsigned int cpuid, struct task_struct *tidle)
7272
/* Make sure tidle is updated */
7373
smp_mb();
7474
bdata->task_ptr = tidle;
75-
bdata->stack_ptr = task_stack_page(tidle) + THREAD_SIZE;
75+
bdata->stack_ptr = task_pt_regs(tidle);
7676
/* Make sure boot data is updated */
7777
smp_mb();
7878
hsm_data = __pa(bdata);

arch/riscv/kernel/cpu_ops_spinwait.c

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -34,8 +34,7 @@ static void cpu_update_secondary_bootdata(unsigned int cpuid,
3434

3535
/* Make sure tidle is updated */
3636
smp_mb();
37-
WRITE_ONCE(__cpu_spinwait_stack_pointer[hartid],
38-
task_stack_page(tidle) + THREAD_SIZE);
37+
WRITE_ONCE(__cpu_spinwait_stack_pointer[hartid], task_pt_regs(tidle));
3938
WRITE_ONCE(__cpu_spinwait_task_pointer[hartid], tidle);
4039
}
4140

0 commit comments

Comments
 (0)