@@ -431,6 +431,23 @@ do { \
431
431
#define __flush_s2_tlb_range_op (op , start , pages , stride , tlb_level ) \
432
432
__flush_tlb_range_op(op, start, pages, stride, 0, tlb_level, false, kvm_lpa2_is_enabled());
433
433
434
+ static inline bool __flush_tlb_range_limit_excess (unsigned long start ,
435
+ unsigned long end , unsigned long pages , unsigned long stride )
436
+ {
437
+ /*
438
+ * When the system does not support TLB range based flush
439
+ * operation, (MAX_DVM_OPS - 1) pages can be handled. But
440
+ * with TLB range based operation, MAX_TLBI_RANGE_PAGES
441
+ * pages can be handled.
442
+ */
443
+ if ((!system_supports_tlb_range () &&
444
+ (end - start ) >= (MAX_DVM_OPS * stride )) ||
445
+ pages > MAX_TLBI_RANGE_PAGES )
446
+ return true;
447
+
448
+ return false;
449
+ }
450
+
434
451
static inline void __flush_tlb_range_nosync (struct vm_area_struct * vma ,
435
452
unsigned long start , unsigned long end ,
436
453
unsigned long stride , bool last_level ,
@@ -442,15 +459,7 @@ static inline void __flush_tlb_range_nosync(struct vm_area_struct *vma,
442
459
end = round_up (end , stride );
443
460
pages = (end - start ) >> PAGE_SHIFT ;
444
461
445
- /*
446
- * When not uses TLB range ops, we can handle up to
447
- * (MAX_DVM_OPS - 1) pages;
448
- * When uses TLB range ops, we can handle up to
449
- * MAX_TLBI_RANGE_PAGES pages.
450
- */
451
- if ((!system_supports_tlb_range () &&
452
- (end - start ) >= (MAX_DVM_OPS * stride )) ||
453
- pages > MAX_TLBI_RANGE_PAGES ) {
462
+ if (__flush_tlb_range_limit_excess (start , end , pages , stride )) {
454
463
flush_tlb_mm (vma -> vm_mm );
455
464
return ;
456
465
}
0 commit comments