Skip to content

Commit 2a78f99

Browse files
committed
iommu/amd: Lock code paths traversing protection_domain->dev_list
The traversing of this list requires protection_domain->lock to be taken to avoid nasty races with attach/detach code. Make sure the lock is held on all code-paths traversing this list. Reported-by: Filippo Sironi <[email protected]> Fixes: 92d420e ("iommu/amd: Relax locking in dma_ops path") Reviewed-by: Filippo Sironi <[email protected]> Reviewed-by: Jerry Snitselaar <[email protected]> Signed-off-by: Joerg Roedel <[email protected]>
1 parent ab7b257 commit 2a78f99

File tree

1 file changed

+24
-1
lines changed

1 file changed

+24
-1
lines changed

drivers/iommu/amd_iommu.c

Lines changed: 24 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1334,8 +1334,12 @@ static void domain_flush_np_cache(struct protection_domain *domain,
13341334
dma_addr_t iova, size_t size)
13351335
{
13361336
if (unlikely(amd_iommu_np_cache)) {
1337+
unsigned long flags;
1338+
1339+
spin_lock_irqsave(&domain->lock, flags);
13371340
domain_flush_pages(domain, iova, size);
13381341
domain_flush_complete(domain);
1342+
spin_unlock_irqrestore(&domain->lock, flags);
13391343
}
13401344
}
13411345

@@ -1700,8 +1704,13 @@ static int iommu_map_page(struct protection_domain *dom,
17001704
ret = 0;
17011705

17021706
out:
1703-
if (updated)
1707+
if (updated) {
1708+
unsigned long flags;
1709+
1710+
spin_lock_irqsave(&dom->lock, flags);
17041711
update_domain(dom);
1712+
spin_unlock_irqrestore(&dom->lock, flags);
1713+
}
17051714

17061715
/* Everything flushed out, free pages now */
17071716
free_page_list(freelist);
@@ -1857,8 +1866,12 @@ static void free_gcr3_table(struct protection_domain *domain)
18571866

18581867
static void dma_ops_domain_flush_tlb(struct dma_ops_domain *dom)
18591868
{
1869+
unsigned long flags;
1870+
1871+
spin_lock_irqsave(&dom->domain.lock, flags);
18601872
domain_flush_tlb(&dom->domain);
18611873
domain_flush_complete(&dom->domain);
1874+
spin_unlock_irqrestore(&dom->domain.lock, flags);
18621875
}
18631876

18641877
static void iova_domain_flush_tlb(struct iova_domain *iovad)
@@ -2414,6 +2427,7 @@ static dma_addr_t __map_single(struct device *dev,
24142427
{
24152428
dma_addr_t offset = paddr & ~PAGE_MASK;
24162429
dma_addr_t address, start, ret;
2430+
unsigned long flags;
24172431
unsigned int pages;
24182432
int prot = 0;
24192433
int i;
@@ -2451,8 +2465,10 @@ static dma_addr_t __map_single(struct device *dev,
24512465
iommu_unmap_page(&dma_dom->domain, start, PAGE_SIZE);
24522466
}
24532467

2468+
spin_lock_irqsave(&dma_dom->domain.lock, flags);
24542469
domain_flush_tlb(&dma_dom->domain);
24552470
domain_flush_complete(&dma_dom->domain);
2471+
spin_unlock_irqrestore(&dma_dom->domain.lock, flags);
24562472

24572473
dma_ops_free_iova(dma_dom, address, pages);
24582474

@@ -2481,8 +2497,12 @@ static void __unmap_single(struct dma_ops_domain *dma_dom,
24812497
}
24822498

24832499
if (amd_iommu_unmap_flush) {
2500+
unsigned long flags;
2501+
2502+
spin_lock_irqsave(&dma_dom->domain.lock, flags);
24842503
domain_flush_tlb(&dma_dom->domain);
24852504
domain_flush_complete(&dma_dom->domain);
2505+
spin_unlock_irqrestore(&dma_dom->domain.lock, flags);
24862506
dma_ops_free_iova(dma_dom, dma_addr, pages);
24872507
} else {
24882508
pages = __roundup_pow_of_two(pages);
@@ -3246,9 +3266,12 @@ static bool amd_iommu_is_attach_deferred(struct iommu_domain *domain,
32463266
static void amd_iommu_flush_iotlb_all(struct iommu_domain *domain)
32473267
{
32483268
struct protection_domain *dom = to_pdomain(domain);
3269+
unsigned long flags;
32493270

3271+
spin_lock_irqsave(&dom->lock, flags);
32503272
domain_flush_tlb_pde(dom);
32513273
domain_flush_complete(dom);
3274+
spin_unlock_irqrestore(&dom->lock, flags);
32523275
}
32533276

32543277
static void amd_iommu_iotlb_sync(struct iommu_domain *domain,

0 commit comments

Comments
 (0)