Skip to content

Commit 737dcce

Browse files
carlocaionestephanosio
authored andcommitted
riscv: Move syscall parameter from a7 to t0
To prepare for RV32E support. Signed-off-by: Carlo Caione <[email protected]>
1 parent e05a1e3 commit 737dcce

File tree

2 files changed

+29
-29
lines changed

2 files changed

+29
-29
lines changed

arch/riscv/core/isr.S

Lines changed: 14 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -303,16 +303,16 @@ is_kernel_syscall:
303303
csrs mstatus, t1
304304
#endif
305305

306-
/* Determine what to do. Operation code is in a7. */
307-
lr a7, __z_arch_esf_t_a7_OFFSET(sp)
306+
/* Determine what to do. Operation code is in t0. */
307+
lr t0, __z_arch_esf_t_t0_OFFSET(sp)
308308

309309
ASSUME_EQUAL(RV_ECALL_RUNTIME_EXCEPT, 0)
310-
beqz a7, do_fault
310+
beqz t0, do_fault
311311

312312
#if defined(CONFIG_IRQ_OFFLOAD)
313-
addi a7, a7, -1
313+
addi t0, t0, -1
314314
ASSUME_EQUAL(RV_ECALL_IRQ_OFFLOAD, 1)
315-
beqz a7, do_irq_offload
315+
beqz t0, do_irq_offload
316316
#endif
317317

318318
/* default fault code is K_ERR_KERNEL_OOPS */
@@ -386,24 +386,24 @@ is_user_syscall:
386386
lr a3, __z_arch_esf_t_a3_OFFSET(sp)
387387
lr a4, __z_arch_esf_t_a4_OFFSET(sp)
388388
lr a5, __z_arch_esf_t_a5_OFFSET(sp)
389+
lr t0, __z_arch_esf_t_t0_OFFSET(sp)
389390
mv a6, sp
390-
lr a7, __z_arch_esf_t_a7_OFFSET(sp)
391391

392392
/* validate syscall limit */
393-
li t0, K_SYSCALL_LIMIT
394-
bltu a7, t0, valid_syscall_id
393+
li t1, K_SYSCALL_LIMIT
394+
bltu t0, t1, valid_syscall_id
395395

396396
/* bad syscall id. Set arg1 to bad id and set call_id to SYSCALL_BAD */
397-
mv a0, a7
398-
li a7, K_SYSCALL_BAD
397+
mv a0, t0
398+
li t0, K_SYSCALL_BAD
399399

400400
valid_syscall_id:
401401

402-
la t0, _k_syscall_table
402+
la t2, _k_syscall_table
403403

404-
slli t1, a7, RV_REGSHIFT # Determine offset from indice value
405-
add t0, t0, t1 # Table addr + offset = function addr
406-
lr t2, 0(t0) # Load function address
404+
slli t1, t0, RV_REGSHIFT # Determine offset from indice value
405+
add t2, t2, t1 # Table addr + offset = function addr
406+
lr t2, 0(t2) # Load function address
407407

408408
/* Execute syscall function */
409409
jalr ra, t2, 0

include/zephyr/arch/riscv/syscall.h

Lines changed: 15 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -46,12 +46,12 @@ static inline uintptr_t arch_syscall_invoke6(uintptr_t arg1, uintptr_t arg2,
4646
register ulong_t a3 __asm__ ("a3") = arg4;
4747
register ulong_t a4 __asm__ ("a4") = arg5;
4848
register ulong_t a5 __asm__ ("a5") = arg6;
49-
register ulong_t a7 __asm__ ("a7") = call_id;
49+
register ulong_t t0 __asm__ ("t0") = call_id;
5050

5151
__asm__ volatile ("ecall"
5252
: "+r" (a0)
5353
: "r" (a1), "r" (a2), "r" (a3), "r" (a4), "r" (a5),
54-
"r" (a7)
54+
"r" (t0)
5555
: "memory");
5656
return a0;
5757
}
@@ -66,11 +66,11 @@ static inline uintptr_t arch_syscall_invoke5(uintptr_t arg1, uintptr_t arg2,
6666
register ulong_t a2 __asm__ ("a2") = arg3;
6767
register ulong_t a3 __asm__ ("a3") = arg4;
6868
register ulong_t a4 __asm__ ("a4") = arg5;
69-
register ulong_t a7 __asm__ ("a7") = call_id;
69+
register ulong_t t0 __asm__ ("t0") = call_id;
7070

7171
__asm__ volatile ("ecall"
7272
: "+r" (a0)
73-
: "r" (a1), "r" (a2), "r" (a3), "r" (a4), "r" (a7)
73+
: "r" (a1), "r" (a2), "r" (a3), "r" (a4), "r" (t0)
7474
: "memory");
7575
return a0;
7676
}
@@ -83,11 +83,11 @@ static inline uintptr_t arch_syscall_invoke4(uintptr_t arg1, uintptr_t arg2,
8383
register ulong_t a1 __asm__ ("a1") = arg2;
8484
register ulong_t a2 __asm__ ("a2") = arg3;
8585
register ulong_t a3 __asm__ ("a3") = arg4;
86-
register ulong_t a7 __asm__ ("a7") = call_id;
86+
register ulong_t t0 __asm__ ("t0") = call_id;
8787

8888
__asm__ volatile ("ecall"
8989
: "+r" (a0)
90-
: "r" (a1), "r" (a2), "r" (a3), "r" (a7)
90+
: "r" (a1), "r" (a2), "r" (a3), "r" (t0)
9191
: "memory");
9292
return a0;
9393
}
@@ -99,11 +99,11 @@ static inline uintptr_t arch_syscall_invoke3(uintptr_t arg1, uintptr_t arg2,
9999
register ulong_t a0 __asm__ ("a0") = arg1;
100100
register ulong_t a1 __asm__ ("a1") = arg2;
101101
register ulong_t a2 __asm__ ("a2") = arg3;
102-
register ulong_t a7 __asm__ ("a7") = call_id;
102+
register ulong_t t0 __asm__ ("t0") = call_id;
103103

104104
__asm__ volatile ("ecall"
105105
: "+r" (a0)
106-
: "r" (a1), "r" (a2), "r" (a7)
106+
: "r" (a1), "r" (a2), "r" (t0)
107107
: "memory");
108108
return a0;
109109
}
@@ -113,35 +113,35 @@ static inline uintptr_t arch_syscall_invoke2(uintptr_t arg1, uintptr_t arg2,
113113
{
114114
register ulong_t a0 __asm__ ("a0") = arg1;
115115
register ulong_t a1 __asm__ ("a1") = arg2;
116-
register ulong_t a7 __asm__ ("a7") = call_id;
116+
register ulong_t t0 __asm__ ("t0") = call_id;
117117

118118
__asm__ volatile ("ecall"
119119
: "+r" (a0)
120-
: "r" (a1), "r" (a7)
120+
: "r" (a1), "r" (t0)
121121
: "memory");
122122
return a0;
123123
}
124124

125125
static inline uintptr_t arch_syscall_invoke1(uintptr_t arg1, uintptr_t call_id)
126126
{
127127
register ulong_t a0 __asm__ ("a0") = arg1;
128-
register ulong_t a7 __asm__ ("a7") = call_id;
128+
register ulong_t t0 __asm__ ("t0") = call_id;
129129

130130
__asm__ volatile ("ecall"
131131
: "+r" (a0)
132-
: "r" (a7)
132+
: "r" (t0)
133133
: "memory");
134134
return a0;
135135
}
136136

137137
static inline uintptr_t arch_syscall_invoke0(uintptr_t call_id)
138138
{
139139
register ulong_t a0 __asm__ ("a0");
140-
register ulong_t a7 __asm__ ("a7") = call_id;
140+
register ulong_t t0 __asm__ ("t0") = call_id;
141141

142142
__asm__ volatile ("ecall"
143143
: "=r" (a0)
144-
: "r" (a7)
144+
: "r" (t0)
145145
: "memory");
146146
return a0;
147147
}
@@ -168,7 +168,7 @@ static inline bool arch_is_user_context(void)
168168
* and omit the volatile to give the compiler a chance to cache
169169
* the result.
170170
*/
171-
register ulong_t is_user __asm__ ("t0");
171+
register ulong_t is_user __asm__ ("t1");
172172
__asm__ ("csrr %0, mscratch" : "=r" (is_user));
173173
return is_user != 0;
174174
#else

0 commit comments

Comments
 (0)