@@ -660,19 +660,16 @@ static void iommu_dma_init_options(struct iommu_dma_options *options,
660
660
/**
661
661
* iommu_dma_init_domain - Initialise a DMA mapping domain
662
662
* @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
663
- * @base: IOVA at which the mappable address space starts
664
- * @limit: Last address of the IOVA space
665
663
* @dev: Device the domain is being initialised for
666
664
*
667
- * @base and @limit + 1 should be exact multiples of IOMMU page granularity to
668
- * avoid rounding surprises. If necessary, we reserve the page at address 0
665
+ * If the geometry and dma_range_map include address 0, we reserve that page
669
666
* to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but
670
667
* any change which could make prior IOVAs invalid will fail.
671
668
*/
672
- static int iommu_dma_init_domain (struct iommu_domain * domain , dma_addr_t base ,
673
- dma_addr_t limit , struct device * dev )
669
+ static int iommu_dma_init_domain (struct iommu_domain * domain , struct device * dev )
674
670
{
675
671
struct iommu_dma_cookie * cookie = domain -> iova_cookie ;
672
+ const struct bus_dma_region * map = dev -> dma_range_map ;
676
673
unsigned long order , base_pfn ;
677
674
struct iova_domain * iovad ;
678
675
int ret ;
@@ -684,18 +681,18 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
684
681
685
682
/* Use the smallest supported page size for IOVA granularity */
686
683
order = __ffs (domain -> pgsize_bitmap );
687
- base_pfn = max_t ( unsigned long , 1 , base >> order ) ;
684
+ base_pfn = 1 ;
688
685
689
686
/* Check the domain allows at least some access to the device... */
690
- if (domain -> geometry .force_aperture ) {
687
+ if (map ) {
688
+ dma_addr_t base = dma_range_map_min (map );
691
689
if (base > domain -> geometry .aperture_end ||
692
- limit < domain -> geometry .aperture_start ) {
690
+ dma_range_map_max ( map ) < domain -> geometry .aperture_start ) {
693
691
pr_warn ("specified DMA range outside IOMMU capability\n" );
694
692
return - EFAULT ;
695
693
}
696
694
/* ...then finally give it a kicking to make sure it fits */
697
- base_pfn = max_t (unsigned long , base_pfn ,
698
- domain -> geometry .aperture_start >> order );
695
+ base_pfn = max (base , domain -> geometry .aperture_start ) >> order ;
699
696
}
700
697
701
698
/* start_pfn is always nonzero for an already-initialised domain */
@@ -1760,7 +1757,7 @@ void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 dma_limit)
1760
1757
* underlying IOMMU driver needs to support via the dma-iommu layer.
1761
1758
*/
1762
1759
if (iommu_is_dma_domain (domain )) {
1763
- if (iommu_dma_init_domain (domain , dma_base , dma_limit , dev ))
1760
+ if (iommu_dma_init_domain (domain , dev ))
1764
1761
goto out_err ;
1765
1762
dev -> dma_ops = & iommu_dma_ops ;
1766
1763
}
0 commit comments