Skip to content

Commit 59b6c34

Browse files
committed
Merge tag 'for-joerg' of git://git.kernel.org/pub/scm/linux/kernel/git/jgg/iommufd into core
iommu shared branch with iommufd The three dependent series on a shared branch: - Change the iommufd fault handle into an always present hwpt handle in the domain - Give iommufd its own SW_MSI implementation along with some IRQ layer rework - Improvements to the handle attach API
2 parents 0ad2507 + 5e9f822 commit 59b6c34

File tree

17 files changed

+501
-357
lines changed

17 files changed

+501
-357
lines changed

drivers/iommu/Kconfig

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -154,7 +154,6 @@ config IOMMU_DMA
154154
select DMA_OPS_HELPERS
155155
select IOMMU_API
156156
select IOMMU_IOVA
157-
select IRQ_MSI_IOMMU
158157
select NEED_SG_DMA_LENGTH
159158
select NEED_SG_DMA_FLAGS if SWIOTLB
160159

drivers/iommu/dma-iommu.c

Lines changed: 21 additions & 44 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,7 @@
2424
#include <linux/memremap.h>
2525
#include <linux/mm.h>
2626
#include <linux/mutex.h>
27+
#include <linux/msi.h>
2728
#include <linux/of_iommu.h>
2829
#include <linux/pci.h>
2930
#include <linux/scatterlist.h>
@@ -102,6 +103,9 @@ static int __init iommu_dma_forcedac_setup(char *str)
102103
}
103104
early_param("iommu.forcedac", iommu_dma_forcedac_setup);
104105

106+
static int iommu_dma_sw_msi(struct iommu_domain *domain, struct msi_desc *desc,
107+
phys_addr_t msi_addr);
108+
105109
/* Number of entries per flush queue */
106110
#define IOVA_DEFAULT_FQ_SIZE 256
107111
#define IOVA_SINGLE_FQ_SIZE 32768
@@ -398,6 +402,7 @@ int iommu_get_dma_cookie(struct iommu_domain *domain)
398402
return -ENOMEM;
399403

400404
mutex_init(&domain->iova_cookie->mutex);
405+
iommu_domain_set_sw_msi(domain, iommu_dma_sw_msi);
401406
return 0;
402407
}
403408

@@ -429,6 +434,7 @@ int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
429434

430435
cookie->msi_iova = base;
431436
domain->iova_cookie = cookie;
437+
iommu_domain_set_sw_msi(domain, iommu_dma_sw_msi);
432438
return 0;
433439
}
434440
EXPORT_SYMBOL(iommu_get_msi_cookie);
@@ -443,6 +449,11 @@ void iommu_put_dma_cookie(struct iommu_domain *domain)
443449
struct iommu_dma_cookie *cookie = domain->iova_cookie;
444450
struct iommu_dma_msi_page *msi, *tmp;
445451

452+
#if IS_ENABLED(CONFIG_IRQ_MSI_IOMMU)
453+
if (domain->sw_msi != iommu_dma_sw_msi)
454+
return;
455+
#endif
456+
446457
if (!cookie)
447458
return;
448459

@@ -1800,60 +1811,26 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
18001811
return NULL;
18011812
}
18021813

1803-
/**
1804-
* iommu_dma_prepare_msi() - Map the MSI page in the IOMMU domain
1805-
* @desc: MSI descriptor, will store the MSI page
1806-
* @msi_addr: MSI target address to be mapped
1807-
*
1808-
* Return: 0 on success or negative error code if the mapping failed.
1809-
*/
1810-
int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr)
1814+
static int iommu_dma_sw_msi(struct iommu_domain *domain, struct msi_desc *desc,
1815+
phys_addr_t msi_addr)
18111816
{
18121817
struct device *dev = msi_desc_to_dev(desc);
1813-
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
1814-
struct iommu_dma_msi_page *msi_page;
1815-
static DEFINE_MUTEX(msi_prepare_lock); /* see below */
1818+
const struct iommu_dma_msi_page *msi_page;
18161819

1817-
if (!domain || !domain->iova_cookie) {
1818-
desc->iommu_cookie = NULL;
1820+
if (!domain->iova_cookie) {
1821+
msi_desc_set_iommu_msi_iova(desc, 0, 0);
18191822
return 0;
18201823
}
18211824

1822-
/*
1823-
* In fact the whole prepare operation should already be serialised by
1824-
* irq_domain_mutex further up the callchain, but that's pretty subtle
1825-
* on its own, so consider this locking as failsafe documentation...
1826-
*/
1827-
mutex_lock(&msi_prepare_lock);
1825+
iommu_group_mutex_assert(dev);
18281826
msi_page = iommu_dma_get_msi_page(dev, msi_addr, domain);
1829-
mutex_unlock(&msi_prepare_lock);
1830-
1831-
msi_desc_set_iommu_cookie(desc, msi_page);
1832-
18331827
if (!msi_page)
18341828
return -ENOMEM;
1835-
return 0;
1836-
}
1837-
1838-
/**
1839-
* iommu_dma_compose_msi_msg() - Apply translation to an MSI message
1840-
* @desc: MSI descriptor prepared by iommu_dma_prepare_msi()
1841-
* @msg: MSI message containing target physical address
1842-
*/
1843-
void iommu_dma_compose_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
1844-
{
1845-
struct device *dev = msi_desc_to_dev(desc);
1846-
const struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
1847-
const struct iommu_dma_msi_page *msi_page;
1848-
1849-
msi_page = msi_desc_get_iommu_cookie(desc);
18501829

1851-
if (!domain || !domain->iova_cookie || WARN_ON(!msi_page))
1852-
return;
1853-
1854-
msg->address_hi = upper_32_bits(msi_page->iova);
1855-
msg->address_lo &= cookie_msi_granule(domain->iova_cookie) - 1;
1856-
msg->address_lo += lower_32_bits(msi_page->iova);
1830+
msi_desc_set_iommu_msi_iova(
1831+
desc, msi_page->iova,
1832+
ilog2(cookie_msi_granule(domain->iova_cookie)));
1833+
return 0;
18571834
}
18581835

18591836
static int iommu_dma_init(void)

drivers/iommu/iommu-priv.h

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -24,9 +24,6 @@ static inline const struct iommu_ops *iommu_fwspec_ops(struct iommu_fwspec *fwsp
2424
return iommu_ops_from_fwnode(fwspec ? fwspec->iommu_fwnode : NULL);
2525
}
2626

27-
int iommu_group_replace_domain(struct iommu_group *group,
28-
struct iommu_domain *new_domain);
29-
3027
int iommu_device_register_bus(struct iommu_device *iommu,
3128
const struct iommu_ops *ops,
3229
const struct bus_type *bus,

drivers/iommu/iommu.c

Lines changed: 106 additions & 58 deletions
Original file line numberDiff line numberDiff line change
@@ -45,6 +45,9 @@ static unsigned int iommu_def_domain_type __read_mostly;
4545
static bool iommu_dma_strict __read_mostly = IS_ENABLED(CONFIG_IOMMU_DEFAULT_DMA_STRICT);
4646
static u32 iommu_cmd_line __read_mostly;
4747

48+
/* Tags used with xa_tag_pointer() in group->pasid_array */
49+
enum { IOMMU_PASID_ARRAY_DOMAIN = 0, IOMMU_PASID_ARRAY_HANDLE = 1 };
50+
4851
struct iommu_group {
4952
struct kobject kobj;
5053
struct kobject *devices_kobj;
@@ -2147,6 +2150,17 @@ struct iommu_domain *iommu_get_dma_domain(struct device *dev)
21472150
return dev->iommu_group->default_domain;
21482151
}
21492152

2153+
static void *iommu_make_pasid_array_entry(struct iommu_domain *domain,
2154+
struct iommu_attach_handle *handle)
2155+
{
2156+
if (handle) {
2157+
handle->domain = domain;
2158+
return xa_tag_pointer(handle, IOMMU_PASID_ARRAY_HANDLE);
2159+
}
2160+
2161+
return xa_tag_pointer(domain, IOMMU_PASID_ARRAY_DOMAIN);
2162+
}
2163+
21502164
static int __iommu_attach_group(struct iommu_domain *domain,
21512165
struct iommu_group *group)
21522166
{
@@ -2187,32 +2201,6 @@ int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group)
21872201
}
21882202
EXPORT_SYMBOL_GPL(iommu_attach_group);
21892203

2190-
/**
2191-
* iommu_group_replace_domain - replace the domain that a group is attached to
2192-
* @group: IOMMU group that will be attached to the new domain
2193-
* @new_domain: new IOMMU domain to replace with
2194-
*
2195-
* This API allows the group to switch domains without being forced to go to
2196-
* the blocking domain in-between.
2197-
*
2198-
* If the currently attached domain is a core domain (e.g. a default_domain),
2199-
* it will act just like the iommu_attach_group().
2200-
*/
2201-
int iommu_group_replace_domain(struct iommu_group *group,
2202-
struct iommu_domain *new_domain)
2203-
{
2204-
int ret;
2205-
2206-
if (!new_domain)
2207-
return -EINVAL;
2208-
2209-
mutex_lock(&group->mutex);
2210-
ret = __iommu_group_set_domain(group, new_domain);
2211-
mutex_unlock(&group->mutex);
2212-
return ret;
2213-
}
2214-
EXPORT_SYMBOL_NS_GPL(iommu_group_replace_domain, "IOMMUFD_INTERNAL");
2215-
22162204
static int __iommu_device_set_domain(struct iommu_group *group,
22172205
struct device *dev,
22182206
struct iommu_domain *new_domain,
@@ -3374,6 +3362,7 @@ int iommu_attach_device_pasid(struct iommu_domain *domain,
33743362
struct iommu_group *group = dev->iommu_group;
33753363
struct group_device *device;
33763364
const struct iommu_ops *ops;
3365+
void *entry;
33773366
int ret;
33783367

33793368
if (!group)
@@ -3397,16 +3386,31 @@ int iommu_attach_device_pasid(struct iommu_domain *domain,
33973386
}
33983387
}
33993388

3400-
if (handle)
3401-
handle->domain = domain;
3389+
entry = iommu_make_pasid_array_entry(domain, handle);
34023390

3403-
ret = xa_insert(&group->pasid_array, pasid, handle, GFP_KERNEL);
3391+
/*
3392+
* Entry present is a failure case. Use xa_insert() instead of
3393+
* xa_reserve().
3394+
*/
3395+
ret = xa_insert(&group->pasid_array, pasid, XA_ZERO_ENTRY, GFP_KERNEL);
34043396
if (ret)
34053397
goto out_unlock;
34063398

34073399
ret = __iommu_set_group_pasid(domain, group, pasid);
3408-
if (ret)
3409-
xa_erase(&group->pasid_array, pasid);
3400+
if (ret) {
3401+
xa_release(&group->pasid_array, pasid);
3402+
goto out_unlock;
3403+
}
3404+
3405+
/*
3406+
* The xa_insert() above reserved the memory, and the group->mutex is
3407+
* held, this cannot fail. The new domain cannot be visible until the
3408+
* operation succeeds as we cannot tolerate PRIs becoming concurrently
3409+
* queued and then failing attach.
3410+
*/
3411+
WARN_ON(xa_is_err(xa_store(&group->pasid_array,
3412+
pasid, entry, GFP_KERNEL)));
3413+
34103414
out_unlock:
34113415
mutex_unlock(&group->mutex);
34123416
return ret;
@@ -3480,13 +3484,17 @@ struct iommu_attach_handle *
34803484
iommu_attach_handle_get(struct iommu_group *group, ioasid_t pasid, unsigned int type)
34813485
{
34823486
struct iommu_attach_handle *handle;
3487+
void *entry;
34833488

34843489
xa_lock(&group->pasid_array);
3485-
handle = xa_load(&group->pasid_array, pasid);
3486-
if (!handle)
3490+
entry = xa_load(&group->pasid_array, pasid);
3491+
if (!entry || xa_pointer_tag(entry) != IOMMU_PASID_ARRAY_HANDLE) {
34873492
handle = ERR_PTR(-ENOENT);
3488-
else if (type && handle->domain->type != type)
3489-
handle = ERR_PTR(-EBUSY);
3493+
} else {
3494+
handle = xa_untag_pointer(entry);
3495+
if (type && handle->domain->type != type)
3496+
handle = ERR_PTR(-EBUSY);
3497+
}
34903498
xa_unlock(&group->pasid_array);
34913499

34923500
return handle;
@@ -3509,25 +3517,35 @@ int iommu_attach_group_handle(struct iommu_domain *domain,
35093517
struct iommu_group *group,
35103518
struct iommu_attach_handle *handle)
35113519
{
3520+
void *entry;
35123521
int ret;
35133522

3514-
if (handle)
3515-
handle->domain = domain;
3523+
if (!handle)
3524+
return -EINVAL;
35163525

35173526
mutex_lock(&group->mutex);
3518-
ret = xa_insert(&group->pasid_array, IOMMU_NO_PASID, handle, GFP_KERNEL);
3527+
entry = iommu_make_pasid_array_entry(domain, handle);
3528+
ret = xa_insert(&group->pasid_array,
3529+
IOMMU_NO_PASID, XA_ZERO_ENTRY, GFP_KERNEL);
35193530
if (ret)
3520-
goto err_unlock;
3531+
goto out_unlock;
35213532

35223533
ret = __iommu_attach_group(domain, group);
3523-
if (ret)
3524-
goto err_erase;
3525-
mutex_unlock(&group->mutex);
3534+
if (ret) {
3535+
xa_release(&group->pasid_array, IOMMU_NO_PASID);
3536+
goto out_unlock;
3537+
}
35263538

3527-
return 0;
3528-
err_erase:
3529-
xa_erase(&group->pasid_array, IOMMU_NO_PASID);
3530-
err_unlock:
3539+
/*
3540+
* The xa_insert() above reserved the memory, and the group->mutex is
3541+
* held, this cannot fail. The new domain cannot be visible until the
3542+
* operation succeeds as we cannot tolerate PRIs becoming concurrently
3543+
* queued and then failing attach.
3544+
*/
3545+
WARN_ON(xa_is_err(xa_store(&group->pasid_array,
3546+
IOMMU_NO_PASID, entry, GFP_KERNEL)));
3547+
3548+
out_unlock:
35313549
mutex_unlock(&group->mutex);
35323550
return ret;
35333551
}
@@ -3557,33 +3575,34 @@ EXPORT_SYMBOL_NS_GPL(iommu_detach_group_handle, "IOMMUFD_INTERNAL");
35573575
* @new_domain: new IOMMU domain to replace with
35583576
* @handle: attach handle
35593577
*
3560-
* This is a variant of iommu_group_replace_domain(). It allows the caller to
3561-
* provide an attach handle for the new domain and use it when the domain is
3562-
* attached.
3578+
* This API allows the group to switch domains without being forced to go to
3579+
* the blocking domain in-between. It allows the caller to provide an attach
3580+
* handle for the new domain and use it when the domain is attached.
3581+
*
3582+
* If the currently attached domain is a core domain (e.g. a default_domain),
3583+
* it will act just like the iommu_attach_group_handle().
35633584
*/
35643585
int iommu_replace_group_handle(struct iommu_group *group,
35653586
struct iommu_domain *new_domain,
35663587
struct iommu_attach_handle *handle)
35673588
{
3568-
void *curr;
3589+
void *curr, *entry;
35693590
int ret;
35703591

3571-
if (!new_domain)
3592+
if (!new_domain || !handle)
35723593
return -EINVAL;
35733594

35743595
mutex_lock(&group->mutex);
3575-
if (handle) {
3576-
ret = xa_reserve(&group->pasid_array, IOMMU_NO_PASID, GFP_KERNEL);
3577-
if (ret)
3578-
goto err_unlock;
3579-
handle->domain = new_domain;
3580-
}
3596+
entry = iommu_make_pasid_array_entry(new_domain, handle);
3597+
ret = xa_reserve(&group->pasid_array, IOMMU_NO_PASID, GFP_KERNEL);
3598+
if (ret)
3599+
goto err_unlock;
35813600

35823601
ret = __iommu_group_set_domain(group, new_domain);
35833602
if (ret)
35843603
goto err_release;
35853604

3586-
curr = xa_store(&group->pasid_array, IOMMU_NO_PASID, handle, GFP_KERNEL);
3605+
curr = xa_store(&group->pasid_array, IOMMU_NO_PASID, entry, GFP_KERNEL);
35873606
WARN_ON(xa_is_err(curr));
35883607

35893608
mutex_unlock(&group->mutex);
@@ -3596,3 +3615,32 @@ int iommu_replace_group_handle(struct iommu_group *group,
35963615
return ret;
35973616
}
35983617
EXPORT_SYMBOL_NS_GPL(iommu_replace_group_handle, "IOMMUFD_INTERNAL");
3618+
3619+
#if IS_ENABLED(CONFIG_IRQ_MSI_IOMMU)
3620+
/**
3621+
* iommu_dma_prepare_msi() - Map the MSI page in the IOMMU domain
3622+
* @desc: MSI descriptor, will store the MSI page
3623+
* @msi_addr: MSI target address to be mapped
3624+
*
3625+
* The implementation of sw_msi() should take msi_addr and map it to
3626+
* an IOVA in the domain and call msi_desc_set_iommu_msi_iova() with the
3627+
* mapping information.
3628+
*
3629+
* Return: 0 on success or negative error code if the mapping failed.
3630+
*/
3631+
int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr)
3632+
{
3633+
struct device *dev = msi_desc_to_dev(desc);
3634+
struct iommu_group *group = dev->iommu_group;
3635+
int ret = 0;
3636+
3637+
if (!group)
3638+
return 0;
3639+
3640+
mutex_lock(&group->mutex);
3641+
if (group->domain && group->domain->sw_msi)
3642+
ret = group->domain->sw_msi(group->domain, desc, msi_addr);
3643+
mutex_unlock(&group->mutex);
3644+
return ret;
3645+
}
3646+
#endif /* CONFIG_IRQ_MSI_IOMMU */

0 commit comments

Comments
 (0)