Skip to content

Commit 1355ea2

Browse files
committed
ARC: mm: tlb flush optim: elide repeated uTLB invalidate in loop
The unconditional full TLB flush (on say ASID rollover) iterates over each entry and uses TLBWrite to zero it out. TLBWrite by design also invalidates the uTLBs thus we end up invalidating it as many times as numbe rof entries (512 or 1k) Optimize this by using a weaker TLBWriteNI cmd in loop, which doesn't tinker with uTLBs and an explicit one time IVUTLB, outside the loop to invalidate them all once. And given the optimiztion, the IVUTLB is now needed on MMUv4 too where the uTLBs and JTLBs are otherwise coherent given the TLBInsertEntry / TLBDeleteEntry commands Signed-off-by: Vineet Gupta <[email protected]>
1 parent ad4c40e commit 1355ea2

File tree

1 file changed

+29
-45
lines changed

1 file changed

+29
-45
lines changed

arch/arc/mm/tlb.c

Lines changed: 29 additions & 45 deletions
Original file line numberDiff line numberDiff line change
@@ -118,6 +118,33 @@ static inline void __tlb_entry_erase(void)
118118
write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);
119119
}
120120

121+
static void utlb_invalidate(void)
122+
{
123+
#if (CONFIG_ARC_MMU_VER >= 2)
124+
125+
#if (CONFIG_ARC_MMU_VER == 2)
126+
/* MMU v2 introduced the uTLB Flush command.
127+
* There was however an obscure hardware bug, where uTLB flush would
128+
* fail when a prior probe for J-TLB (both totally unrelated) would
129+
* return lkup err - because the entry didn't exist in MMU.
130+
* The Workround was to set Index reg with some valid value, prior to
131+
* flush. This was fixed in MMU v3
132+
*/
133+
unsigned int idx;
134+
135+
/* make sure INDEX Reg is valid */
136+
idx = read_aux_reg(ARC_REG_TLBINDEX);
137+
138+
/* If not write some dummy val */
139+
if (unlikely(idx & TLB_LKUP_ERR))
140+
write_aux_reg(ARC_REG_TLBINDEX, 0xa);
141+
#endif
142+
143+
write_aux_reg(ARC_REG_TLBCOMMAND, TLBIVUTLB);
144+
#endif
145+
146+
}
147+
121148
#if (CONFIG_ARC_MMU_VER < 4)
122149

123150
static inline unsigned int tlb_entry_lkup(unsigned long vaddr_n_asid)
@@ -149,44 +176,6 @@ static void tlb_entry_erase(unsigned int vaddr_n_asid)
149176
}
150177
}
151178

152-
/****************************************************************************
153-
* ARC700 MMU caches recently used J-TLB entries (RAM) as uTLBs (FLOPs)
154-
*
155-
* New IVUTLB cmd in MMU v2 explictly invalidates the uTLB
156-
*
157-
* utlb_invalidate ( )
158-
* -For v2 MMU calls Flush uTLB Cmd
159-
* -For v1 MMU does nothing (except for Metal Fix v1 MMU)
160-
* This is because in v1 TLBWrite itself invalidate uTLBs
161-
***************************************************************************/
162-
163-
static void utlb_invalidate(void)
164-
{
165-
#if (CONFIG_ARC_MMU_VER >= 2)
166-
167-
#if (CONFIG_ARC_MMU_VER == 2)
168-
/* MMU v2 introduced the uTLB Flush command.
169-
* There was however an obscure hardware bug, where uTLB flush would
170-
* fail when a prior probe for J-TLB (both totally unrelated) would
171-
* return lkup err - because the entry didn't exist in MMU.
172-
* The Workround was to set Index reg with some valid value, prior to
173-
* flush. This was fixed in MMU v3 hence not needed any more
174-
*/
175-
unsigned int idx;
176-
177-
/* make sure INDEX Reg is valid */
178-
idx = read_aux_reg(ARC_REG_TLBINDEX);
179-
180-
/* If not write some dummy val */
181-
if (unlikely(idx & TLB_LKUP_ERR))
182-
write_aux_reg(ARC_REG_TLBINDEX, 0xa);
183-
#endif
184-
185-
write_aux_reg(ARC_REG_TLBCOMMAND, TLBIVUTLB);
186-
#endif
187-
188-
}
189-
190179
static void tlb_entry_insert(unsigned int pd0, pte_t pd1)
191180
{
192181
unsigned int idx;
@@ -219,11 +208,6 @@ static void tlb_entry_insert(unsigned int pd0, pte_t pd1)
219208

220209
#else /* CONFIG_ARC_MMU_VER >= 4) */
221210

222-
static void utlb_invalidate(void)
223-
{
224-
/* No need since uTLB is always in sync with JTLB */
225-
}
226-
227211
static void tlb_entry_erase(unsigned int vaddr_n_asid)
228212
{
229213
write_aux_reg(ARC_REG_TLBPD0, vaddr_n_asid | _PAGE_PRESENT);
@@ -267,7 +251,7 @@ noinline void local_flush_tlb_all(void)
267251
for (entry = 0; entry < num_tlb; entry++) {
268252
/* write this entry to the TLB */
269253
write_aux_reg(ARC_REG_TLBINDEX, entry);
270-
write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);
254+
write_aux_reg(ARC_REG_TLBCOMMAND, TLBWriteNI);
271255
}
272256

273257
if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
@@ -278,7 +262,7 @@ noinline void local_flush_tlb_all(void)
278262

279263
for (entry = stlb_idx; entry < stlb_idx + 16; entry++) {
280264
write_aux_reg(ARC_REG_TLBINDEX, entry);
281-
write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);
265+
write_aux_reg(ARC_REG_TLBCOMMAND, TLBWriteNI);
282266
}
283267
}
284268

0 commit comments

Comments
 (0)