Skip to content

Commit 452e69b

Browse files
rmurphy-armjoergroedel
authored andcommitted
iommu: Allow enabling non-strict mode dynamically
Allocating and enabling a flush queue is in fact something we can reasonably do while a DMA domain is active, without having to rebuild it from scratch. Thus we can allow a strict -> non-strict transition from sysfs without requiring to unbind the device's driver, which is of particular interest to users who want to make selective relaxations to critical devices like the one serving their root filesystem. Disabling and draining a queue also seems technically possible to achieve without rebuilding the whole domain, but would certainly be more involved. Furthermore there's not such a clear use-case for tightening up security *after* the device may already have done whatever it is that you don't trust it not to do, so we only consider the relaxation case. Signed-off-by: Robin Murphy <[email protected]> Link: https://lore.kernel.org/r/d652966348c78457c38bf18daf369272a4ebc2c9.1628682049.git.robin.murphy@arm.com Signed-off-by: Joerg Roedel <[email protected]>
1 parent e96763e commit 452e69b

File tree

4 files changed

+57
-24
lines changed

4 files changed

+57
-24
lines changed

drivers/iommu/dma-iommu.c

Lines changed: 32 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -317,6 +317,30 @@ static bool dev_is_untrusted(struct device *dev)
317317
return dev_is_pci(dev) && to_pci_dev(dev)->untrusted;
318318
}
319319

320+
/* sysfs updates are serialised by the mutex of the group owning @domain */
321+
int iommu_dma_init_fq(struct iommu_domain *domain)
322+
{
323+
struct iommu_dma_cookie *cookie = domain->iova_cookie;
324+
int ret;
325+
326+
if (cookie->fq_domain)
327+
return 0;
328+
329+
ret = init_iova_flush_queue(&cookie->iovad, iommu_dma_flush_iotlb_all,
330+
iommu_dma_entry_dtor);
331+
if (ret) {
332+
pr_warn("iova flush queue initialization failed\n");
333+
return ret;
334+
}
335+
/*
336+
* Prevent incomplete iovad->fq being observable. Pairs with path from
337+
* __iommu_dma_unmap() through iommu_dma_free_iova() to queue_iova()
338+
*/
339+
smp_wmb();
340+
WRITE_ONCE(cookie->fq_domain, domain);
341+
return 0;
342+
}
343+
320344
/**
321345
* iommu_dma_init_domain - Initialise a DMA mapping domain
322346
* @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
@@ -371,15 +395,8 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
371395
init_iova_domain(iovad, 1UL << order, base_pfn);
372396

373397
/* If the FQ fails we can simply fall back to strict mode */
374-
if (domain->type == IOMMU_DOMAIN_DMA_FQ && !cookie->fq_domain) {
375-
if (init_iova_flush_queue(iovad, iommu_dma_flush_iotlb_all,
376-
iommu_dma_entry_dtor)) {
377-
pr_warn("iova flush queue initialization failed\n");
378-
domain->type = IOMMU_DOMAIN_DMA;
379-
} else {
380-
cookie->fq_domain = domain;
381-
}
382-
}
398+
if (domain->type == IOMMU_DOMAIN_DMA_FQ && iommu_dma_init_fq(domain))
399+
domain->type = IOMMU_DOMAIN_DMA;
383400

384401
return iova_reserve_iommu_regions(dev, domain);
385402
}
@@ -454,17 +471,17 @@ static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
454471
}
455472

456473
static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie,
457-
dma_addr_t iova, size_t size, struct page *freelist)
474+
dma_addr_t iova, size_t size, struct iommu_iotlb_gather *gather)
458475
{
459476
struct iova_domain *iovad = &cookie->iovad;
460477

461478
/* The MSI case is only ever cleaning up its most recent allocation */
462479
if (cookie->type == IOMMU_DMA_MSI_COOKIE)
463480
cookie->msi_iova -= size;
464-
else if (cookie->fq_domain) /* non-strict mode */
481+
else if (gather && gather->queued)
465482
queue_iova(iovad, iova_pfn(iovad, iova),
466483
size >> iova_shift(iovad),
467-
(unsigned long)freelist);
484+
(unsigned long)gather->freelist);
468485
else
469486
free_iova_fast(iovad, iova_pfn(iovad, iova),
470487
size >> iova_shift(iovad));
@@ -483,14 +500,14 @@ static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr,
483500
dma_addr -= iova_off;
484501
size = iova_align(iovad, size + iova_off);
485502
iommu_iotlb_gather_init(&iotlb_gather);
486-
iotlb_gather.queued = cookie->fq_domain;
503+
iotlb_gather.queued = READ_ONCE(cookie->fq_domain);
487504

488505
unmapped = iommu_unmap_fast(domain, dma_addr, size, &iotlb_gather);
489506
WARN_ON(unmapped != size);
490507

491-
if (!cookie->fq_domain)
508+
if (!iotlb_gather.queued)
492509
iommu_iotlb_sync(domain, &iotlb_gather);
493-
iommu_dma_free_iova(cookie, dma_addr, size, iotlb_gather.freelist);
510+
iommu_dma_free_iova(cookie, dma_addr, size, &iotlb_gather);
494511
}
495512

496513
static void __iommu_dma_unmap_swiotlb(struct device *dev, dma_addr_t dma_addr,

drivers/iommu/iommu.c

Lines changed: 13 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -3204,6 +3204,14 @@ static int iommu_change_dev_def_domain(struct iommu_group *group,
32043204
goto out;
32053205
}
32063206

3207+
/* We can bring up a flush queue without tearing down the domain */
3208+
if (type == IOMMU_DOMAIN_DMA_FQ && prev_dom->type == IOMMU_DOMAIN_DMA) {
3209+
ret = iommu_dma_init_fq(prev_dom);
3210+
if (!ret)
3211+
prev_dom->type = IOMMU_DOMAIN_DMA_FQ;
3212+
goto out;
3213+
}
3214+
32073215
/* Sets group->default_domain to the newly allocated domain */
32083216
ret = iommu_group_alloc_default_domain(dev->bus, group, type);
32093217
if (ret)
@@ -3244,9 +3252,9 @@ static int iommu_change_dev_def_domain(struct iommu_group *group,
32443252
}
32453253

32463254
/*
3247-
* Changing the default domain through sysfs requires the users to ubind the
3248-
* drivers from the devices in the iommu group. Return failure if this doesn't
3249-
* meet.
3255+
* Changing the default domain through sysfs requires the users to unbind the
3256+
* drivers from the devices in the iommu group, except for a DMA -> DMA-FQ
3257+
* transition. Return failure if this isn't met.
32503258
*
32513259
* We need to consider the race between this and the device release path.
32523260
* device_lock(dev) is used here to guarantee that the device release path
@@ -3322,7 +3330,8 @@ static ssize_t iommu_group_store_type(struct iommu_group *group,
33223330

33233331
/* Check if the device in the group still has a driver bound to it */
33243332
device_lock(dev);
3325-
if (device_is_bound(dev)) {
3333+
if (device_is_bound(dev) && !(req_type == IOMMU_DOMAIN_DMA_FQ &&
3334+
group->default_domain->type == IOMMU_DOMAIN_DMA)) {
33263335
pr_err_ratelimited("Device is still bound to driver\n");
33273336
ret = -EBUSY;
33283337
goto out;

drivers/iommu/iova.c

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -121,8 +121,6 @@ int init_iova_flush_queue(struct iova_domain *iovad,
121121
spin_lock_init(&fq->lock);
122122
}
123123

124-
smp_wmb();
125-
126124
iovad->fq = queue;
127125

128126
timer_setup(&iovad->fq_timer, fq_flush_timeout, 0);
@@ -633,17 +631,20 @@ void queue_iova(struct iova_domain *iovad,
633631
unsigned long pfn, unsigned long pages,
634632
unsigned long data)
635633
{
636-
struct iova_fq *fq = raw_cpu_ptr(iovad->fq);
634+
struct iova_fq *fq;
637635
unsigned long flags;
638636
unsigned idx;
639637

640638
/*
641639
* Order against the IOMMU driver's pagetable update from unmapping
642640
* @pte, to guarantee that iova_domain_flush() observes that if called
643-
* from a different CPU before we release the lock below.
641+
* from a different CPU before we release the lock below. Full barrier
642+
* so it also pairs with iommu_dma_init_fq() to avoid seeing partially
643+
* written fq state here.
644644
*/
645-
smp_wmb();
645+
smp_mb();
646646

647+
fq = raw_cpu_ptr(iovad->fq);
647648
spin_lock_irqsave(&fq->lock, flags);
648649

649650
/*

include/linux/dma-iommu.h

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@ void iommu_put_dma_cookie(struct iommu_domain *domain);
2020

2121
/* Setup call for arch DMA mapping code */
2222
void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 dma_limit);
23+
int iommu_dma_init_fq(struct iommu_domain *domain);
2324

2425
/* The DMA API isn't _quite_ the whole story, though... */
2526
/*
@@ -54,6 +55,11 @@ static inline void iommu_setup_dma_ops(struct device *dev, u64 dma_base,
5455
{
5556
}
5657

58+
static inline int iommu_dma_init_fq(struct iommu_domain *domain)
59+
{
60+
return -EINVAL;
61+
}
62+
5763
static inline int iommu_get_dma_cookie(struct iommu_domain *domain)
5864
{
5965
return -ENODEV;

0 commit comments

Comments
 (0)