Skip to content

Commit b03cbca

Browse files
jpbruckerjoergroedel
authored andcommitted
iommu/virtio: Support identity-mapped domains
Support identity domains for devices that do not offer the VIRTIO_IOMMU_F_BYPASS_CONFIG feature, by creating 1:1 mappings between the virtual and physical address space. Identity domains created this way still perform noticeably better than DMA domains, because they don't have the overhead of setting up and tearing down mappings at runtime. The performance difference between this and bypass is minimal in comparison. It does not matter that the physical addresses in the identity mappings do not all correspond to memory. By enabling passthrough we are trusting the device driver and the device itself to only perform DMA to suitable locations. In some cases it may even be desirable to perform DMA to MMIO regions. Reviewed-by: Eric Auger <[email protected]> Reviewed-by: Kevin Tian <[email protected]> Signed-off-by: Jean-Philippe Brucker <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Joerg Roedel <[email protected]>
1 parent c0c7635 commit b03cbca

File tree

1 file changed

+57
-4
lines changed

1 file changed

+57
-4
lines changed

drivers/iommu/virtio-iommu.c

Lines changed: 57 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -375,6 +375,55 @@ static size_t viommu_del_mappings(struct viommu_domain *vdomain,
375375
return unmapped;
376376
}
377377

378+
/*
379+
* Fill the domain with identity mappings, skipping the device's reserved
380+
* regions.
381+
*/
382+
static int viommu_domain_map_identity(struct viommu_endpoint *vdev,
383+
struct viommu_domain *vdomain)
384+
{
385+
int ret;
386+
struct iommu_resv_region *resv;
387+
u64 iova = vdomain->domain.geometry.aperture_start;
388+
u64 limit = vdomain->domain.geometry.aperture_end;
389+
u32 flags = VIRTIO_IOMMU_MAP_F_READ | VIRTIO_IOMMU_MAP_F_WRITE;
390+
unsigned long granule = 1UL << __ffs(vdomain->domain.pgsize_bitmap);
391+
392+
iova = ALIGN(iova, granule);
393+
limit = ALIGN_DOWN(limit + 1, granule) - 1;
394+
395+
list_for_each_entry(resv, &vdev->resv_regions, list) {
396+
u64 resv_start = ALIGN_DOWN(resv->start, granule);
397+
u64 resv_end = ALIGN(resv->start + resv->length, granule) - 1;
398+
399+
if (resv_end < iova || resv_start > limit)
400+
/* No overlap */
401+
continue;
402+
403+
if (resv_start > iova) {
404+
ret = viommu_add_mapping(vdomain, iova, resv_start - 1,
405+
(phys_addr_t)iova, flags);
406+
if (ret)
407+
goto err_unmap;
408+
}
409+
410+
if (resv_end >= limit)
411+
return 0;
412+
413+
iova = resv_end + 1;
414+
}
415+
416+
ret = viommu_add_mapping(vdomain, iova, limit, (phys_addr_t)iova,
417+
flags);
418+
if (ret)
419+
goto err_unmap;
420+
return 0;
421+
422+
err_unmap:
423+
viommu_del_mappings(vdomain, 0, iova);
424+
return ret;
425+
}
426+
378427
/*
379428
* viommu_replay_mappings - re-send MAP requests
380429
*
@@ -637,14 +686,18 @@ static int viommu_domain_finalise(struct viommu_endpoint *vdev,
637686
vdomain->viommu = viommu;
638687

639688
if (domain->type == IOMMU_DOMAIN_IDENTITY) {
640-
if (!virtio_has_feature(viommu->vdev,
641-
VIRTIO_IOMMU_F_BYPASS_CONFIG)) {
689+
if (virtio_has_feature(viommu->vdev,
690+
VIRTIO_IOMMU_F_BYPASS_CONFIG)) {
691+
vdomain->bypass = true;
692+
return 0;
693+
}
694+
695+
ret = viommu_domain_map_identity(vdev, vdomain);
696+
if (ret) {
642697
ida_free(&viommu->domain_ids, vdomain->id);
643698
vdomain->viommu = NULL;
644699
return -EOPNOTSUPP;
645700
}
646-
647-
vdomain->bypass = true;
648701
}
649702

650703
return 0;

0 commit comments

Comments
 (0)