@@ -1334,8 +1334,12 @@ static void domain_flush_np_cache(struct protection_domain *domain,
1334
1334
dma_addr_t iova , size_t size )
1335
1335
{
1336
1336
if (unlikely (amd_iommu_np_cache )) {
1337
+ unsigned long flags ;
1338
+
1339
+ spin_lock_irqsave (& domain -> lock , flags );
1337
1340
domain_flush_pages (domain , iova , size );
1338
1341
domain_flush_complete (domain );
1342
+ spin_unlock_irqrestore (& domain -> lock , flags );
1339
1343
}
1340
1344
}
1341
1345
@@ -1700,8 +1704,13 @@ static int iommu_map_page(struct protection_domain *dom,
1700
1704
ret = 0 ;
1701
1705
1702
1706
out :
1703
- if (updated )
1707
+ if (updated ) {
1708
+ unsigned long flags ;
1709
+
1710
+ spin_lock_irqsave (& dom -> lock , flags );
1704
1711
update_domain (dom );
1712
+ spin_unlock_irqrestore (& dom -> lock , flags );
1713
+ }
1705
1714
1706
1715
/* Everything flushed out, free pages now */
1707
1716
free_page_list (freelist );
@@ -1857,8 +1866,12 @@ static void free_gcr3_table(struct protection_domain *domain)
1857
1866
1858
1867
static void dma_ops_domain_flush_tlb (struct dma_ops_domain * dom )
1859
1868
{
1869
+ unsigned long flags ;
1870
+
1871
+ spin_lock_irqsave (& dom -> domain .lock , flags );
1860
1872
domain_flush_tlb (& dom -> domain );
1861
1873
domain_flush_complete (& dom -> domain );
1874
+ spin_unlock_irqrestore (& dom -> domain .lock , flags );
1862
1875
}
1863
1876
1864
1877
static void iova_domain_flush_tlb (struct iova_domain * iovad )
@@ -2414,6 +2427,7 @@ static dma_addr_t __map_single(struct device *dev,
2414
2427
{
2415
2428
dma_addr_t offset = paddr & ~PAGE_MASK ;
2416
2429
dma_addr_t address , start , ret ;
2430
+ unsigned long flags ;
2417
2431
unsigned int pages ;
2418
2432
int prot = 0 ;
2419
2433
int i ;
@@ -2451,8 +2465,10 @@ static dma_addr_t __map_single(struct device *dev,
2451
2465
iommu_unmap_page (& dma_dom -> domain , start , PAGE_SIZE );
2452
2466
}
2453
2467
2468
+ spin_lock_irqsave (& dma_dom -> domain .lock , flags );
2454
2469
domain_flush_tlb (& dma_dom -> domain );
2455
2470
domain_flush_complete (& dma_dom -> domain );
2471
+ spin_unlock_irqrestore (& dma_dom -> domain .lock , flags );
2456
2472
2457
2473
dma_ops_free_iova (dma_dom , address , pages );
2458
2474
@@ -2481,8 +2497,12 @@ static void __unmap_single(struct dma_ops_domain *dma_dom,
2481
2497
}
2482
2498
2483
2499
if (amd_iommu_unmap_flush ) {
2500
+ unsigned long flags ;
2501
+
2502
+ spin_lock_irqsave (& dma_dom -> domain .lock , flags );
2484
2503
domain_flush_tlb (& dma_dom -> domain );
2485
2504
domain_flush_complete (& dma_dom -> domain );
2505
+ spin_unlock_irqrestore (& dma_dom -> domain .lock , flags );
2486
2506
dma_ops_free_iova (dma_dom , dma_addr , pages );
2487
2507
} else {
2488
2508
pages = __roundup_pow_of_two (pages );
@@ -3246,9 +3266,12 @@ static bool amd_iommu_is_attach_deferred(struct iommu_domain *domain,
3246
3266
static void amd_iommu_flush_iotlb_all (struct iommu_domain * domain )
3247
3267
{
3248
3268
struct protection_domain * dom = to_pdomain (domain );
3269
+ unsigned long flags ;
3249
3270
3271
+ spin_lock_irqsave (& dom -> lock , flags );
3250
3272
domain_flush_tlb_pde (dom );
3251
3273
domain_flush_complete (dom );
3274
+ spin_unlock_irqrestore (& dom -> lock , flags );
3252
3275
}
3253
3276
3254
3277
static void amd_iommu_iotlb_sync (struct iommu_domain * domain ,
0 commit comments