Skip to content

Commit 676f98f

Browse files
committed
[libcpu-riscv]: [surpport SMP]: Fix issues
Fix some potential concurrency issues and code standardization problems. The .percpu section is only used when both ARCH_MM_MMU and RT_USING_SMP are enabled. However, there is a certain amount of space waste since no macro guard is added for it in the link script currently. The physical memory of QEMU started in CI is 128MB, so RT_HW_PAGE_END is modified from the original +256MB to +128MB. Modify the SConscript file under the common64 directory to include common/atomic_riscv.c in the compilation process. Signed-off-by: Mengchen Teng <[email protected]>
1 parent 8a9066f commit 676f98f

File tree

11 files changed

+62
-218
lines changed

11 files changed

+62
-218
lines changed

bsp/qemu-virt64-riscv/driver/board.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -15,17 +15,17 @@
1515

1616
extern unsigned int __bss_start;
1717
extern unsigned int __bss_end;
18-
extern unsigned int _end;
18+
1919
#ifndef RT_USING_SMART
2020
#define KERNEL_VADDR_START 0x0
2121
#endif
2222

2323
#define VIRT64_SBI_MEMSZ (0x200000)
2424

25-
#define RT_HW_HEAP_BEGIN ((void *)&_end)
25+
#define RT_HW_HEAP_BEGIN ((void *)&__bss_end)
2626
#define RT_HW_HEAP_END ((void *)(RT_HW_HEAP_BEGIN + 64 * 1024 * 1024))
2727
#define RT_HW_PAGE_START RT_HW_HEAP_END
28-
#define RT_HW_PAGE_END ((void *)(KERNEL_VADDR_START + (256 * 1024 * 1024 - VIRT64_SBI_MEMSZ)))
28+
#define RT_HW_PAGE_END ((void *)(KERNEL_VADDR_START + (128 * 1024 * 1024 - VIRT64_SBI_MEMSZ)))
2929

3030
void rt_hw_board_init(void);
3131
void rt_init_user_mem(struct rt_thread *thread, const char *name,

bsp/qemu-virt64-riscv/link.lds

Lines changed: 13 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -136,32 +136,32 @@ SECTIONS
136136
*(.scommon)
137137
} > SRAM
138138

139-
.bss :
140-
{
141-
*(.bss)
142-
*(.bss.*)
143-
*(.dynbss)
144-
*(COMMON)
145-
__bss_end = .;
146-
} > SRAM
147-
148139
.percpu (NOLOAD) :
149140
{
150-
/* Align for MMU early map */
151-
. = ALIGN(1<<(12+9));
141+
/* 2MB Align for MMU early map */
142+
. = ALIGN(0x200000);
152143
PROVIDE(__percpu_start = .);
153144

154145
*(.percpu)
155146

156-
/* Align for MMU early map */
157-
. = ALIGN(1<<(12+9));
147+
/* 2MB Align for MMU early map */
148+
. = ALIGN(0x200000);
158149

159150
PROVIDE(__percpu_end = .);
160151

161152
/* Clone the area */
162153
. = __percpu_end + (__percpu_end - __percpu_start) * (RT_CPUS_NR - 1);
163154
PROVIDE(__percpu_real_end = .);
164155
} > SRAM
156+
157+
.bss :
158+
{
159+
*(.bss)
160+
*(.bss.*)
161+
*(.dynbss)
162+
*(COMMON)
163+
__bss_end = .;
164+
} > SRAM
165165

166166
_end = .;
167167

bsp/qemu-virt64-riscv/link_smart.lds

Lines changed: 14 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -137,25 +137,16 @@ SECTIONS
137137
*(.scommon)
138138
} > SRAM
139139

140-
.bss :
140+
.percpu (NOLOAD) :
141141
{
142-
*(.bss)
143-
*(.bss.*)
144-
*(.dynbss)
145-
*(COMMON)
146-
__bss_end = .;
147-
} > SRAM
148-
149-
.percpu (NOLOAD) :
150-
{
151-
/* Align for MMU early map */
152-
. = ALIGN(1<<(12+9));
142+
/* 2MB Align for MMU early map */
143+
. = ALIGN(0x200000);
153144
PROVIDE(__percpu_start = .);
154145

155146
*(.percpu)
156147

157-
/* Align for MMU early map */
158-
. = ALIGN(1<<(12+9));
148+
/* 2MB Align for MMU early map */
149+
. = ALIGN(0x200000);
159150

160151
PROVIDE(__percpu_end = .);
161152

@@ -164,6 +155,15 @@ SECTIONS
164155
PROVIDE(__percpu_real_end = .);
165156
} > SRAM
166157

158+
.bss :
159+
{
160+
*(.bss)
161+
*(.bss.*)
162+
*(.dynbss)
163+
*(COMMON)
164+
__bss_end = .;
165+
} > SRAM
166+
167167
_end = .;
168168

169169
/* Stabs debugging sections. */

bsp/qemu-virt64-riscv/run.sh

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -27,8 +27,9 @@ fi
2727
QEMU_CMD="qemu-system-riscv64 -nographic -machine virt -m 256M -kernel rtthread.bin"
2828

2929
if grep -q "#define RT_USING_SMP" ./rtconfig.h 2>/dev/null; then
30-
hart_num=$(grep "RT_CPUS_NR = [0-9]*;" ./link_cpus.lds | awk -F'[=;]' '{gsub(/ /, "", $2); print $2}')
31-
if [ -z "$hart_num" ]; then
30+
hart_num=$(grep "RT_CPUS_NR = [0-9]*;" ./link_cpus.lds 2>/dev/null | awk -F'[=;]' '{gsub(/ /, "", $2); print $2}')
31+
if [ -z "$hart_num" ] || [ "$hart_num" -lt 1 ]; then
32+
echo "Warning: Invalid or missing RT_CPUS_NR, defaulting to 1"
3233
hart_num=1
3334
fi
3435
QEMU_CMD="$QEMU_CMD -smp $hart_num"

libcpu/risc-v/common64/SConscript

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,8 @@ CPPPATH = [cwd]
77
if not GetDepend('ARCH_USING_ASID'):
88
SrcRemove(src, ['asid.c'])
99

10-
group = DefineGroup('libcpu', src, depend = [''], CPPPATH = CPPPATH)
10+
src.append('../common/atomic_riscv.c')
11+
12+
group = DefineGroup('CPU', src, depend = [''], CPPPATH = CPPPATH)
1113

1214
Return('group')

libcpu/risc-v/common64/atomic_riscv.c

Lines changed: 0 additions & 159 deletions
This file was deleted.

libcpu/risc-v/common64/context_gcc.S

Lines changed: 2 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -82,10 +82,7 @@ rt_hw_context_switch_to:
8282
LOAD sp, (a0)
8383

8484
#ifdef RT_USING_SMP
85-
/*
86-
* Pass the previous CPU lock status to
87-
* rt_cpus_lock_status_restore for restoration
88-
*/
85+
/* Pass the previous CPU lock status to rt_cpus_lock_status_restore for restoration */
8986
mv a0, a1
9087
call rt_cpus_lock_status_restore
9188
#endif
@@ -125,10 +122,7 @@ rt_hw_context_switch:
125122
LOAD sp, (a1)
126123

127124
#ifdef RT_USING_SMP
128-
/*
129-
* Pass the previous CPU lock status to
130-
* rt_cpus_lock_status_restore for restoration
131-
*/
125+
/* Pass the previous CPU lock status to rt_cpus_lock_status_restore for restoration */
132126
mv a0, a2
133127
call rt_cpus_lock_status_restore
134128
#endif /*RT_USING_SMP*/

libcpu/risc-v/common64/cpuport.c

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -88,7 +88,7 @@ int rt_hw_cpu_id(void)
8888
// if not enable MMU or pvoff==0, read hartid from satp register
8989
rt_ubase_t hartid;
9090
asm volatile("csrr %0, satp" : "=r"(hartid));
91-
return hartid;
91+
return hartid & 0xFFFF; // Assuming hartid fits in lower 16 bits
9292
}
9393
#endif /* RT_USING_SMP */
9494
}
@@ -198,7 +198,8 @@ void rt_hw_secondary_cpu_up(void)
198198
#else
199199
entry_pa = (rt_uint64_t)&_start;
200200
#endif /* ARCH_MM_MMU */
201-
201+
/* Assumes hart IDs are in range [0, RT_CPUS_NR) */
202+
RT_ASSERT(boot_hartid < RT_CPUS_NR);
202203
for (hart = 0; hart < RT_CPUS_NR; hart++)
203204
{
204205
if (hart == boot_hartid)
@@ -217,6 +218,7 @@ void rt_hw_secondary_cpu_up(void)
217218
#ifdef ARCH_MM_MMU
218219
void rt_hw_percpu_hartid_init(rt_ubase_t *percpu_ptr, rt_ubase_t hartid)
219220
{
221+
RT_ASSERT(hartid < RT_CPUS_NR);
220222
rt_ubase_t *percpu_hartid_paddr;
221223
rt_size_t percpu_size = (rt_size_t)((rt_ubase_t)&__percpu_end - (rt_ubase_t)&__percpu_start);
222224

libcpu/risc-v/common64/mmu.c

Lines changed: 6 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@
3838

3939
static size_t _unmap_area(struct rt_aspace *aspace, void *v_addr);
4040

41-
// Define the structure of early page table
41+
/* Define the structure of early page table */
4242
struct page_table
4343
{
4444
unsigned long page[ARCH_PAGE_SIZE / sizeof(unsigned long)];
@@ -120,7 +120,7 @@ static int _map_one_page(struct rt_aspace *aspace, void *va, void *pa,
120120
l1_off = GET_L1((size_t)va);
121121
l2_off = GET_L2((size_t)va);
122122
l3_off = GET_L3((size_t)va);
123-
// create map for each hart
123+
/* Create a separate page table for each hart to facilitate access to the .percpu section. */
124124
for (int hartid = 0; hartid < RT_CPUS_NR; hartid++)
125125
{
126126
mmu_l1 = (rt_ubase_t *)((rt_ubase_t)aspace->page_table + (rt_ubase_t)(hartid * ARCH_PAGE_SIZE)) + l1_off;
@@ -345,11 +345,12 @@ void set_free_page(void *page_array)
345345
// Early-stage page allocator
346346
unsigned long get_free_page(void)
347347
{
348-
static unsigned long page_off = 0UL;
348+
static rt_atomic_t page_off = 0;
349349

350-
if (page_off < ARCH_PAGE_SIZE / sizeof(unsigned long))
350+
rt_atomic_t old_off = rt_hw_atomic_add(&page_off, 1);
351+
if (old_off < ARCH_PAGE_SIZE / sizeof(unsigned long))
351352
{
352-
return (unsigned long)(__init_page_array[page_off++].page);
353+
return (unsigned long)(__init_page_array[old_off].page);
353354
}
354355

355356
return 0;
@@ -860,10 +861,6 @@ void rt_hw_mmu_setup(rt_aspace_t aspace, struct mem_desc *mdesc, int desc_nr)
860861

861862
#define SATP_BASE ((rt_ubase_t)SATP_MODE << SATP_MODE_OFFSET)
862863

863-
extern unsigned int __bss_end;
864-
#if defined(RT_USING_SMP) && defined(ARCH_MM_MMU)
865-
extern unsigned int __percpu_real_end;
866-
#endif
867864
/**
868865
* @brief Early memory setup function for hardware initialization.
869866
*

0 commit comments

Comments
 (0)