Skip to content

Commit 0ea9ee4

Browse files
Max Gurtovoymstsirkin
authored andcommitted
vdpasim: protect concurrent access to iommu iotlb
Iommu iotlb can be accessed by different cores for performing IO using multiple virt queues. Add a spinlock to synchronize iotlb accesses. This could be easily reproduced when using more than 1 pktgen threads to inject traffic to vdpa simulator. Fixes: 2c53d0f("vdpasim: vDPA device simulator") Cc: [email protected] Signed-off-by: Max Gurtovoy <[email protected]> Signed-off-by: Jason Wang <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Michael S. Tsirkin <[email protected]>
1 parent 6234f80 commit 0ea9ee4

File tree

1 file changed

+27
-4
lines changed

1 file changed

+27
-4
lines changed

drivers/vdpa/vdpa_sim/vdpa_sim.c

Lines changed: 27 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -71,6 +71,8 @@ struct vdpasim {
7171
u32 status;
7272
u32 generation;
7373
u64 features;
74+
/* spinlock to synchronize iommu table */
75+
spinlock_t iommu_lock;
7476
};
7577

7678
/* TODO: cross-endian support */
@@ -136,7 +138,9 @@ static void vdpasim_reset(struct vdpasim *vdpasim)
136138
for (i = 0; i < VDPASIM_VQ_NUM; i++)
137139
vdpasim_vq_reset(&vdpasim->vqs[i]);
138140

141+
spin_lock(&vdpasim->iommu_lock);
139142
vhost_iotlb_reset(vdpasim->iommu);
143+
spin_unlock(&vdpasim->iommu_lock);
140144

141145
vdpasim->features = 0;
142146
vdpasim->status = 0;
@@ -254,8 +258,10 @@ static dma_addr_t vdpasim_map_page(struct device *dev, struct page *page,
254258
/* For simplicity, use identical mapping to avoid e.g iova
255259
* allocator.
256260
*/
261+
spin_lock(&vdpasim->iommu_lock);
257262
ret = vhost_iotlb_add_range(iommu, pa, pa + size - 1,
258263
pa, dir_to_perm(dir));
264+
spin_unlock(&vdpasim->iommu_lock);
259265
if (ret)
260266
return DMA_MAPPING_ERROR;
261267

@@ -269,8 +275,10 @@ static void vdpasim_unmap_page(struct device *dev, dma_addr_t dma_addr,
269275
struct vdpasim *vdpasim = dev_to_sim(dev);
270276
struct vhost_iotlb *iommu = vdpasim->iommu;
271277

278+
spin_lock(&vdpasim->iommu_lock);
272279
vhost_iotlb_del_range(iommu, (u64)dma_addr,
273280
(u64)dma_addr + size - 1);
281+
spin_unlock(&vdpasim->iommu_lock);
274282
}
275283

276284
static void *vdpasim_alloc_coherent(struct device *dev, size_t size,
@@ -282,9 +290,10 @@ static void *vdpasim_alloc_coherent(struct device *dev, size_t size,
282290
void *addr = kmalloc(size, flag);
283291
int ret;
284292

285-
if (!addr)
293+
spin_lock(&vdpasim->iommu_lock);
294+
if (!addr) {
286295
*dma_addr = DMA_MAPPING_ERROR;
287-
else {
296+
} else {
288297
u64 pa = virt_to_phys(addr);
289298

290299
ret = vhost_iotlb_add_range(iommu, (u64)pa,
@@ -297,6 +306,7 @@ static void *vdpasim_alloc_coherent(struct device *dev, size_t size,
297306
} else
298307
*dma_addr = (dma_addr_t)pa;
299308
}
309+
spin_unlock(&vdpasim->iommu_lock);
300310

301311
return addr;
302312
}
@@ -308,8 +318,11 @@ static void vdpasim_free_coherent(struct device *dev, size_t size,
308318
struct vdpasim *vdpasim = dev_to_sim(dev);
309319
struct vhost_iotlb *iommu = vdpasim->iommu;
310320

321+
spin_lock(&vdpasim->iommu_lock);
311322
vhost_iotlb_del_range(iommu, (u64)dma_addr,
312323
(u64)dma_addr + size - 1);
324+
spin_unlock(&vdpasim->iommu_lock);
325+
313326
kfree(phys_to_virt((uintptr_t)dma_addr));
314327
}
315328

@@ -555,6 +568,7 @@ static int vdpasim_set_map(struct vdpa_device *vdpa,
555568
u64 start = 0ULL, last = 0ULL - 1;
556569
int ret;
557570

571+
spin_lock(&vdpasim->iommu_lock);
558572
vhost_iotlb_reset(vdpasim->iommu);
559573

560574
for (map = vhost_iotlb_itree_first(iotlb, start, last); map;
@@ -564,27 +578,36 @@ static int vdpasim_set_map(struct vdpa_device *vdpa,
564578
if (ret)
565579
goto err;
566580
}
581+
spin_unlock(&vdpasim->iommu_lock);
567582
return 0;
568583

569584
err:
570585
vhost_iotlb_reset(vdpasim->iommu);
586+
spin_unlock(&vdpasim->iommu_lock);
571587
return ret;
572588
}
573589

574590
static int vdpasim_dma_map(struct vdpa_device *vdpa, u64 iova, u64 size,
575591
u64 pa, u32 perm)
576592
{
577593
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
594+
int ret;
578595

579-
return vhost_iotlb_add_range(vdpasim->iommu, iova,
580-
iova + size - 1, pa, perm);
596+
spin_lock(&vdpasim->iommu_lock);
597+
ret = vhost_iotlb_add_range(vdpasim->iommu, iova, iova + size - 1, pa,
598+
perm);
599+
spin_unlock(&vdpasim->iommu_lock);
600+
601+
return ret;
581602
}
582603

583604
static int vdpasim_dma_unmap(struct vdpa_device *vdpa, u64 iova, u64 size)
584605
{
585606
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
586607

608+
spin_lock(&vdpasim->iommu_lock);
587609
vhost_iotlb_del_range(vdpasim->iommu, iova, iova + size - 1);
610+
spin_unlock(&vdpasim->iommu_lock);
588611

589612
return 0;
590613
}

0 commit comments

Comments
 (0)