Skip to content

Commit 3608390

Browse files
rananta468Marc Zyngier
authored andcommitted
arm64: tlb: Refactor the core flush algorithm of __flush_tlb_range
Currently, the core TLB flush functionality of __flush_tlb_range() hardcodes vae1is (and variants) for the flush operation. In the upcoming patches, the KVM code reuses this core algorithm with ipas2e1is for range based TLB invalidations based on the IPA. Hence, extract the core flush functionality of __flush_tlb_range() into its own macro that accepts an 'op' argument to pass any TLBI operation, such that other callers (KVM) can benefit. No functional changes intended. Signed-off-by: Raghavendra Rao Ananta <[email protected]> Reviewed-by: Catalin Marinas <[email protected]> Reviewed-by: Gavin Shan <[email protected]> Reviewed-by: Shaoqin Huang <[email protected]> Signed-off-by: Marc Zyngier <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent 619b507 commit 3608390

File tree

1 file changed

+68
-53
lines changed

1 file changed

+68
-53
lines changed

arch/arm64/include/asm/tlbflush.h

Lines changed: 68 additions & 53 deletions
Original file line numberDiff line numberDiff line change
@@ -278,14 +278,74 @@ static inline void flush_tlb_page(struct vm_area_struct *vma,
278278
*/
279279
#define MAX_TLBI_OPS PTRS_PER_PTE
280280

281+
/*
282+
* __flush_tlb_range_op - Perform TLBI operation upon a range
283+
*
284+
* @op: TLBI instruction that operates on a range (has 'r' prefix)
285+
* @start: The start address of the range
286+
* @pages: Range as the number of pages from 'start'
287+
* @stride: Flush granularity
288+
* @asid: The ASID of the task (0 for IPA instructions)
289+
* @tlb_level: Translation Table level hint, if known
290+
* @tlbi_user: If 'true', call an additional __tlbi_user()
291+
* (typically for user ASIDs). 'flase' for IPA instructions
292+
*
293+
* When the CPU does not support TLB range operations, flush the TLB
294+
* entries one by one at the granularity of 'stride'. If the TLB
295+
* range ops are supported, then:
296+
*
297+
* 1. If 'pages' is odd, flush the first page through non-range
298+
* operations;
299+
*
300+
* 2. For remaining pages: the minimum range granularity is decided
301+
* by 'scale', so multiple range TLBI operations may be required.
302+
* Start from scale = 0, flush the corresponding number of pages
303+
* ((num+1)*2^(5*scale+1) starting from 'addr'), then increase it
304+
* until no pages left.
305+
*
306+
* Note that certain ranges can be represented by either num = 31 and
307+
* scale or num = 0 and scale + 1. The loop below favours the latter
308+
* since num is limited to 30 by the __TLBI_RANGE_NUM() macro.
309+
*/
310+
#define __flush_tlb_range_op(op, start, pages, stride, \
311+
asid, tlb_level, tlbi_user) \
312+
do { \
313+
int num = 0; \
314+
int scale = 0; \
315+
unsigned long addr; \
316+
\
317+
while (pages > 0) { \
318+
if (!system_supports_tlb_range() || \
319+
pages % 2 == 1) { \
320+
addr = __TLBI_VADDR(start, asid); \
321+
__tlbi_level(op, addr, tlb_level); \
322+
if (tlbi_user) \
323+
__tlbi_user_level(op, addr, tlb_level); \
324+
start += stride; \
325+
pages -= stride >> PAGE_SHIFT; \
326+
continue; \
327+
} \
328+
\
329+
num = __TLBI_RANGE_NUM(pages, scale); \
330+
if (num >= 0) { \
331+
addr = __TLBI_VADDR_RANGE(start, asid, scale, \
332+
num, tlb_level); \
333+
__tlbi(r##op, addr); \
334+
if (tlbi_user) \
335+
__tlbi_user(r##op, addr); \
336+
start += __TLBI_RANGE_PAGES(num, scale) << PAGE_SHIFT; \
337+
pages -= __TLBI_RANGE_PAGES(num, scale); \
338+
} \
339+
scale++; \
340+
} \
341+
} while (0)
342+
281343
static inline void __flush_tlb_range(struct vm_area_struct *vma,
282344
unsigned long start, unsigned long end,
283345
unsigned long stride, bool last_level,
284346
int tlb_level)
285347
{
286-
int num = 0;
287-
int scale = 0;
288-
unsigned long asid, addr, pages;
348+
unsigned long asid, pages;
289349

290350
start = round_down(start, stride);
291351
end = round_up(end, stride);
@@ -307,56 +367,11 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma,
307367
dsb(ishst);
308368
asid = ASID(vma->vm_mm);
309369

310-
/*
311-
* When the CPU does not support TLB range operations, flush the TLB
312-
* entries one by one at the granularity of 'stride'. If the TLB
313-
* range ops are supported, then:
314-
*
315-
* 1. If 'pages' is odd, flush the first page through non-range
316-
* operations;
317-
*
318-
* 2. For remaining pages: the minimum range granularity is decided
319-
* by 'scale', so multiple range TLBI operations may be required.
320-
* Start from scale = 0, flush the corresponding number of pages
321-
* ((num+1)*2^(5*scale+1) starting from 'addr'), then increase it
322-
* until no pages left.
323-
*
324-
* Note that certain ranges can be represented by either num = 31 and
325-
* scale or num = 0 and scale + 1. The loop below favours the latter
326-
* since num is limited to 30 by the __TLBI_RANGE_NUM() macro.
327-
*/
328-
while (pages > 0) {
329-
if (!system_supports_tlb_range() ||
330-
pages % 2 == 1) {
331-
addr = __TLBI_VADDR(start, asid);
332-
if (last_level) {
333-
__tlbi_level(vale1is, addr, tlb_level);
334-
__tlbi_user_level(vale1is, addr, tlb_level);
335-
} else {
336-
__tlbi_level(vae1is, addr, tlb_level);
337-
__tlbi_user_level(vae1is, addr, tlb_level);
338-
}
339-
start += stride;
340-
pages -= stride >> PAGE_SHIFT;
341-
continue;
342-
}
343-
344-
num = __TLBI_RANGE_NUM(pages, scale);
345-
if (num >= 0) {
346-
addr = __TLBI_VADDR_RANGE(start, asid, scale,
347-
num, tlb_level);
348-
if (last_level) {
349-
__tlbi(rvale1is, addr);
350-
__tlbi_user(rvale1is, addr);
351-
} else {
352-
__tlbi(rvae1is, addr);
353-
__tlbi_user(rvae1is, addr);
354-
}
355-
start += __TLBI_RANGE_PAGES(num, scale) << PAGE_SHIFT;
356-
pages -= __TLBI_RANGE_PAGES(num, scale);
357-
}
358-
scale++;
359-
}
370+
if (last_level)
371+
__flush_tlb_range_op(vale1is, start, pages, stride, asid, tlb_level, true);
372+
else
373+
__flush_tlb_range_op(vae1is, start, pages, stride, asid, tlb_level, true);
374+
360375
dsb(ish);
361376
}
362377

0 commit comments

Comments
 (0)