Skip to content

Commit 8bf90f3

Browse files
Christoph Hellwigpaul-walmsley-sifive
authored andcommitted
riscv: implement remote sfence.i using IPIs
The RISC-V ISA only supports flushing the instruction cache for the local CPU core. Currently we always offload the remote TLB flushing to the SBI, which then issues an IPI under the hoods. But with M-mode we do not have an SBI so we have to do it ourselves. IPI to the other nodes using the existing kernel helpers instead if we have native clint support and thus can IPI directly from the kernel. Signed-off-by: Christoph Hellwig <[email protected]> Reviewed-by: Anup Patel <[email protected]> [[email protected]: cleaned up code comment] Signed-off-by: Paul Walmsley <[email protected]>
1 parent 3320648 commit 8bf90f3

File tree

2 files changed

+21
-6
lines changed

2 files changed

+21
-6
lines changed

arch/riscv/include/asm/sbi.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -94,5 +94,8 @@ static inline void sbi_remote_sfence_vma_asid(const unsigned long *hart_mask,
9494
{
9595
SBI_CALL_4(SBI_REMOTE_SFENCE_VMA_ASID, hart_mask, start, size, asid);
9696
}
97+
#else /* CONFIG_RISCV_SBI */
98+
/* stub for code that is only reachable under IS_ENABLED(CONFIG_RISCV_SBI): */
99+
void sbi_remote_fence_i(const unsigned long *hart_mask);
97100
#endif /* CONFIG_RISCV_SBI */
98101
#endif /* _ASM_RISCV_SBI_H */

arch/riscv/mm/cacheflush.c

Lines changed: 18 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -10,9 +10,17 @@
1010

1111
#include <asm/sbi.h>
1212

13+
static void ipi_remote_fence_i(void *info)
14+
{
15+
return local_flush_icache_all();
16+
}
17+
1318
void flush_icache_all(void)
1419
{
15-
sbi_remote_fence_i(NULL);
20+
if (IS_ENABLED(CONFIG_RISCV_SBI))
21+
sbi_remote_fence_i(NULL);
22+
else
23+
on_each_cpu(ipi_remote_fence_i, NULL, 1);
1624
}
1725

1826
/*
@@ -28,7 +36,7 @@ void flush_icache_all(void)
2836
void flush_icache_mm(struct mm_struct *mm, bool local)
2937
{
3038
unsigned int cpu;
31-
cpumask_t others, hmask, *mask;
39+
cpumask_t others, *mask;
3240

3341
preempt_disable();
3442

@@ -46,10 +54,7 @@ void flush_icache_mm(struct mm_struct *mm, bool local)
4654
*/
4755
cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu));
4856
local |= cpumask_empty(&others);
49-
if (mm != current->active_mm || !local) {
50-
riscv_cpuid_to_hartid_mask(&others, &hmask);
51-
sbi_remote_fence_i(hmask.bits);
52-
} else {
57+
if (mm == current->active_mm && local) {
5358
/*
5459
* It's assumed that at least one strongly ordered operation is
5560
* performed on this hart between setting a hart's cpumask bit
@@ -59,6 +64,13 @@ void flush_icache_mm(struct mm_struct *mm, bool local)
5964
* with flush_icache_deferred().
6065
*/
6166
smp_mb();
67+
} else if (IS_ENABLED(CONFIG_RISCV_SBI)) {
68+
cpumask_t hartid_mask;
69+
70+
riscv_cpuid_to_hartid_mask(&others, &hartid_mask);
71+
sbi_remote_fence_i(cpumask_bits(&hartid_mask));
72+
} else {
73+
on_each_cpu_mask(&others, ipi_remote_fence_i, NULL, 1);
6274
}
6375

6476
preempt_enable();

0 commit comments

Comments
 (0)