@@ -431,6 +431,23 @@ do { \
431431#define __flush_s2_tlb_range_op (op , start , pages , stride , tlb_level ) \
432432 __flush_tlb_range_op(op, start, pages, stride, 0, tlb_level, false, kvm_lpa2_is_enabled());
433433
434+ static inline bool __flush_tlb_range_limit_excess (unsigned long start ,
435+ unsigned long end , unsigned long pages , unsigned long stride )
436+ {
437+ /*
438+ * When the system does not support TLB range based flush
439+ * operation, (MAX_DVM_OPS - 1) pages can be handled. But
440+ * with TLB range based operation, MAX_TLBI_RANGE_PAGES
441+ * pages can be handled.
442+ */
443+ if ((!system_supports_tlb_range () &&
444+ (end - start ) >= (MAX_DVM_OPS * stride )) ||
445+ pages > MAX_TLBI_RANGE_PAGES )
446+ return true;
447+
448+ return false;
449+ }
450+
434451static inline void __flush_tlb_range_nosync (struct vm_area_struct * vma ,
435452 unsigned long start , unsigned long end ,
436453 unsigned long stride , bool last_level ,
@@ -442,15 +459,7 @@ static inline void __flush_tlb_range_nosync(struct vm_area_struct *vma,
442459 end = round_up (end , stride );
443460 pages = (end - start ) >> PAGE_SHIFT ;
444461
445- /*
446- * When not uses TLB range ops, we can handle up to
447- * (MAX_DVM_OPS - 1) pages;
448- * When uses TLB range ops, we can handle up to
449- * MAX_TLBI_RANGE_PAGES pages.
450- */
451- if ((!system_supports_tlb_range () &&
452- (end - start ) >= (MAX_DVM_OPS * stride )) ||
453- pages > MAX_TLBI_RANGE_PAGES ) {
462+ if (__flush_tlb_range_limit_excess (start , end , pages , stride )) {
454463 flush_tlb_mm (vma -> vm_mm );
455464 return ;
456465 }
0 commit comments