@@ -244,8 +244,10 @@ static int bcm2712_iommu_map(struct iommu_domain *domain, unsigned long iova,
244244 u32 entry = MMMU_PTE_VALID | (pa >> MMU_PAGE_SHIFT );
245245 u32 align = (u32 )(iova | pa | bytes );
246246 unsigned int p ;
247+ unsigned long flags ;
247248
248- /* Reject if at least the first page is not within our aperture */
249+ /* Reject if not entirely within our aperture (should never happen) */
250+ bytes *= count ;
249251 if (iova < mmu -> dma_iova_offset + APERTURE_BASE ||
250252 iova + bytes > mmu -> dma_iova_offset + APERTURE_TOP ) {
251253 dev_warn (mmu -> dev , "%s: iova=0x%lx pa=0x%llx bytes=0x%lx OUT OF RANGE\n" ,
@@ -267,14 +269,14 @@ static int bcm2712_iommu_map(struct iommu_domain *domain, unsigned long iova,
267269 entry |= MMMU_PTE_WRITEABLE ;
268270
269271 /* Ensure tables are cache-coherent with CPU */
272+ spin_lock_irqsave (& mmu -> hw_lock , flags );
270273 if (!mmu -> dirty ) {
271274 dma_sync_sgtable_for_cpu (mmu -> dev , mmu -> sgt , DMA_TO_DEVICE );
272275 mmu -> dirty = true;
273276 }
274277
275- /* Make iova relative to table base; amalgamate count pages */
278+ /* Make iova relative to table base */
276279 iova -= (mmu -> dma_iova_offset + APERTURE_BASE );
277- bytes = min (APERTURE_SIZE - iova , count * bytes );
278280
279281 /* Iterate over table by smallest native IOMMU page size */
280282 for (p = iova >> MMU_PAGE_SHIFT ;
@@ -283,6 +285,7 @@ static int bcm2712_iommu_map(struct iommu_domain *domain, unsigned long iova,
283285 mmu -> tables [p ] = entry ++ ;
284286 }
285287
288+ spin_unlock_irqrestore (& mmu -> hw_lock , flags );
286289 * mapped = bytes ;
287290
288291 return 0 ;
@@ -293,31 +296,27 @@ static size_t bcm2712_iommu_unmap(struct iommu_domain *domain, unsigned long iov
293296 struct iommu_iotlb_gather * gather )
294297{
295298 struct bcm2712_iommu * mmu = domain_to_mmu (domain );
299+ unsigned long flags ;
296300 unsigned int p ;
297301
302+ /* Reject if not entirely within our aperture (should never happen) */
303+ bytes *= count ;
298304 if (iova < mmu -> dma_iova_offset + APERTURE_BASE ||
299305 iova + bytes > mmu -> dma_iova_offset + APERTURE_TOP )
300306 return 0 ;
301307
302308 /* Record just the lower and upper bounds in "gather" */
303- if (gather ) {
304- bool empty = (gather -> end <= gather -> start );
305-
306- if (empty || gather -> start < iova )
307- gather -> start = iova ;
308- if (empty || gather -> end < iova + bytes )
309- gather -> end = iova + bytes ;
310- }
309+ spin_lock_irqsave (& mmu -> hw_lock , flags );
310+ iommu_iotlb_gather_add_range (gather , iova , bytes );
311311
312312 /* Ensure tables are cache-coherent with CPU */
313313 if (!mmu -> dirty ) {
314314 dma_sync_sgtable_for_cpu (mmu -> dev , mmu -> sgt , DMA_TO_DEVICE );
315315 mmu -> dirty = true;
316316 }
317317
318- /* Make iova relative to table base; amalgamate count pages */
318+ /* Make iova relative to table base */
319319 iova -= (mmu -> dma_iova_offset + APERTURE_BASE );
320- bytes = min (APERTURE_SIZE - iova , count * bytes );
321320
322321 /* Clear table entries, this marks the addresses as illegal */
323322 for (p = iova >> MMU_PAGE_SHIFT ;
@@ -327,20 +326,22 @@ static size_t bcm2712_iommu_unmap(struct iommu_domain *domain, unsigned long iov
327326 mmu -> tables [p ] = 0 ;
328327 }
329328
329+ spin_unlock_irqrestore (& mmu -> hw_lock , flags );
330330 return bytes ;
331331}
332332
333333static int bcm2712_iommu_sync_range (struct iommu_domain * domain ,
334334 unsigned long iova , size_t size )
335335{
336336 struct bcm2712_iommu * mmu = domain_to_mmu (domain );
337- unsigned long iova_end ;
337+ unsigned long flags , iova_end ;
338338 unsigned int i , p4 ;
339339
340340 if (!mmu || !mmu -> dirty )
341341 return 0 ;
342342
343343 /* Ensure tables are cleaned from CPU cache or write-buffer */
344+ spin_lock_irqsave (& mmu -> hw_lock , flags );
344345 dma_sync_sgtable_for_device (mmu -> dev , mmu -> sgt , DMA_TO_DEVICE );
345346 mmu -> dirty = false;
346347
@@ -384,19 +385,26 @@ static int bcm2712_iommu_sync_range(struct iommu_domain *domain,
384385 }
385386 }
386387
388+ spin_unlock_irqrestore (& mmu -> hw_lock , flags );
387389 return 0 ;
388390}
389391
390392static void bcm2712_iommu_sync (struct iommu_domain * domain ,
391393 struct iommu_iotlb_gather * gather )
392394{
393- bcm2712_iommu_sync_range (domain , gather -> start ,
394- gather -> end - gather -> start );
395+ if (gather -> end )
396+ bcm2712_iommu_sync_range (domain , gather -> start ,
397+ gather -> end - gather -> start + 1 );
395398}
396399
397400static void bcm2712_iommu_sync_all (struct iommu_domain * domain )
398401{
399- bcm2712_iommu_sync_range (domain , APERTURE_BASE , APERTURE_SIZE );
402+ struct bcm2712_iommu * mmu = domain_to_mmu (domain );
403+
404+ if (mmu )
405+ bcm2712_iommu_sync_range (domain ,
406+ mmu -> dma_iova_offset + APERTURE_BASE ,
407+ APERTURE_SIZE );
400408}
401409
402410static phys_addr_t bcm2712_iommu_iova_to_phys (struct iommu_domain * domain , dma_addr_t iova )
@@ -406,9 +414,17 @@ static phys_addr_t bcm2712_iommu_iova_to_phys(struct iommu_domain *domain, dma_a
406414
407415 iova -= mmu -> dma_iova_offset ;
408416 if (iova >= APERTURE_BASE && iova < APERTURE_TOP ) {
417+ unsigned long flags ;
418+ phys_addr_t addr ;
419+
420+ spin_lock_irqsave (& mmu -> hw_lock , flags );
409421 p = (iova - APERTURE_BASE ) >> MMU_PAGE_SHIFT ;
410422 p = mmu -> tables [p ] & 0x0FFFFFFFu ;
411- return (((phys_addr_t )p ) << MMU_PAGE_SHIFT ) + (iova & (MMU_PAGE_SIZE - 1u ));
423+ addr = (((phys_addr_t )p ) << MMU_PAGE_SHIFT ) +
424+ (iova & (MMU_PAGE_SIZE - 1u ));
425+
426+ spin_unlock_irqrestore (& mmu -> hw_lock , flags );
427+ return addr ;
412428 } else if (iova < APERTURE_BASE ) {
413429 return (phys_addr_t )iova ;
414430 } else {
0 commit comments