@@ -497,6 +497,38 @@ static inline void iommu_iotlb_sync(struct iommu_domain *domain,
497
497
iommu_iotlb_gather_init (iotlb_gather );
498
498
}
499
499
500
+ /**
501
+ * iommu_iotlb_gather_add_range - Gather for address-based TLB invalidation
502
+ * @gather: TLB gather data
503
+ * @iova: start of page to invalidate
504
+ * @size: size of page to invalidate
505
+ *
506
+ * Helper for IOMMU drivers to build arbitrarily-sized invalidation commands
507
+ * where only the address range matters, and simply minimising intermediate
508
+ * syncs is preferred.
509
+ */
510
+ static inline void iommu_iotlb_gather_add_range (struct iommu_iotlb_gather * gather ,
511
+ unsigned long iova , size_t size )
512
+ {
513
+ unsigned long end = iova + size - 1 ;
514
+
515
+ if (gather -> start > iova )
516
+ gather -> start = iova ;
517
+ if (gather -> end < end )
518
+ gather -> end = end ;
519
+ }
520
+
521
+ /**
522
+ * iommu_iotlb_gather_add_page - Gather for page-based TLB invalidation
523
+ * @domain: IOMMU domain to be invalidated
524
+ * @gather: TLB gather data
525
+ * @iova: start of page to invalidate
526
+ * @size: size of page to invalidate
527
+ *
528
+ * Helper for IOMMU drivers to build invalidation commands based on individual
529
+ * pages, or with page size/table level hints which cannot be gathered if they
530
+ * differ.
531
+ */
500
532
static inline void iommu_iotlb_gather_add_page (struct iommu_domain * domain ,
501
533
struct iommu_iotlb_gather * gather ,
502
534
unsigned long iova , size_t size )
@@ -515,11 +547,7 @@ static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain,
515
547
gather -> pgsize = size ;
516
548
}
517
549
518
- if (gather -> end < end )
519
- gather -> end = end ;
520
-
521
- if (gather -> start > start )
522
- gather -> start = start ;
550
+ iommu_iotlb_gather_add_range (gather , iova , size );
523
551
}
524
552
525
553
/* PCI device grouping function */
0 commit comments