Skip to content

Commit f58e5dc

Browse files
SiFiveHollandpalmer-dabbelt
authored andcommitted
riscv: mm: Use a fixed layout for the MM context ID
Currently, the size of the ASID field in the MM context ID dynamically depends on the number of hardware-supported ASID bits. This requires reading a global variable to extract either field from the context ID. Instead, allocate the maximum possible number of bits to the ASID field, so the layout of the context ID is known at compile-time. Reviewed-by: Alexandre Ghiti <[email protected]> Signed-off-by: Samuel Holland <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Palmer Dabbelt <[email protected]>
1 parent 74cd177 commit f58e5dc

File tree

3 files changed

+4
-8
lines changed

3 files changed

+4
-8
lines changed

arch/riscv/include/asm/mmu.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -26,8 +26,8 @@ typedef struct {
2626
#endif
2727
} mm_context_t;
2828

29-
#define cntx2asid(cntx) ((cntx) & asid_mask)
30-
#define cntx2version(cntx) ((cntx) & ~asid_mask)
29+
#define cntx2asid(cntx) ((cntx) & SATP_ASID_MASK)
30+
#define cntx2version(cntx) ((cntx) & ~SATP_ASID_MASK)
3131

3232
void __init create_pgd_mapping(pgd_t *pgdp, uintptr_t va, phys_addr_t pa,
3333
phys_addr_t sz, pgprot_t prot);

arch/riscv/include/asm/tlbflush.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -15,8 +15,6 @@
1515
#define FLUSH_TLB_NO_ASID ((unsigned long)-1)
1616

1717
#ifdef CONFIG_MMU
18-
extern unsigned long asid_mask;
19-
2018
static inline void local_flush_tlb_all(void)
2119
{
2220
__asm__ __volatile__ ("sfence.vma" : : : "memory");

arch/riscv/mm/context.c

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,6 @@ DEFINE_STATIC_KEY_FALSE(use_asid_allocator);
2222

2323
static unsigned long asid_bits;
2424
static unsigned long num_asids;
25-
unsigned long asid_mask;
2625

2726
static atomic_long_t current_version;
2827

@@ -128,7 +127,7 @@ static unsigned long __new_context(struct mm_struct *mm)
128127
goto set_asid;
129128

130129
/* We're out of ASIDs, so increment current_version */
131-
ver = atomic_long_add_return_relaxed(num_asids, &current_version);
130+
ver = atomic_long_add_return_relaxed(BIT(SATP_ASID_BITS), &current_version);
132131

133132
/* Flush everything */
134133
__flush_context();
@@ -247,15 +246,14 @@ static int __init asids_init(void)
247246
/* Pre-compute ASID details */
248247
if (asid_bits) {
249248
num_asids = 1 << asid_bits;
250-
asid_mask = num_asids - 1;
251249
}
252250

253251
/*
254252
* Use ASID allocator only if number of HW ASIDs are
255253
* at-least twice more than CPUs
256254
*/
257255
if (num_asids > (2 * num_possible_cpus())) {
258-
atomic_long_set(&current_version, num_asids);
256+
atomic_long_set(&current_version, BIT(SATP_ASID_BITS));
259257

260258
context_asid_map = bitmap_zalloc(num_asids, GFP_KERNEL);
261259
if (!context_asid_map)

0 commit comments

Comments
 (0)