Skip to content

Commit 18dcca2

Browse files
committed
Merge branch 'iommufd_pri' into iommufd for-next
Lu Baolu says: ==================== This series implements the functionality of delivering IO page faults to user space through the IOMMUFD framework. One feasible use case is the nested translation. Nested translation is a hardware feature that supports two-stage translation tables for IOMMU. The second-stage translation table is managed by the host VMM, while the first-stage translation table is owned by user space. This allows user space to control the IOMMU mappings for its devices. When an IO page fault occurs on the first-stage translation table, the IOMMU hardware can deliver the page fault to user space through the IOMMUFD framework. User space can then handle the page fault and respond to the device top-down through the IOMMUFD. This allows user space to implement its own IO page fault handling policies. User space application that is capable of handling IO page faults should allocate a fault object, and bind the fault object to any domain that it is willing to handle the fault generatd for them. On a successful return of fault object allocation, the user can retrieve and respond to page faults by reading or writing to the file descriptor (FD) returned. The iommu selftest framework has been updated to test the IO page fault delivery and response functionality. ==================== * iommufd_pri: iommufd/selftest: Add coverage for IOPF test iommufd/selftest: Add IOPF support for mock device iommufd: Associate fault object with iommufd_hw_pgtable iommufd: Fault-capable hwpt attach/detach/replace iommufd: Add iommufd fault object iommufd: Add fault and response message definitions iommu: Extend domain attach group with handle support iommu: Add attach handle to struct iopf_group iommu: Remove sva handle list iommu: Introduce domain attachment handle Link: https://lore.kernel.org/all/[email protected] Signed-off-by: Jason Gunthorpe <[email protected]>
2 parents 53e6b65 + d121176 commit 18dcca2

File tree

18 files changed

+1083
-117
lines changed

18 files changed

+1083
-117
lines changed

drivers/dma/idxd/init.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -584,7 +584,7 @@ static int idxd_enable_system_pasid(struct idxd_device *idxd)
584584
* DMA domain is owned by the driver, it should support all valid
585585
* types such as DMA-FQ, identity, etc.
586586
*/
587-
ret = iommu_attach_device_pasid(domain, dev, pasid);
587+
ret = iommu_attach_device_pasid(domain, dev, pasid, NULL);
588588
if (ret) {
589589
dev_err(dev, "failed to attach device pasid %d, domain type %d",
590590
pasid, domain->type);

drivers/iommu/io-pgfault.c

Lines changed: 36 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -59,30 +59,6 @@ void iopf_free_group(struct iopf_group *group)
5959
}
6060
EXPORT_SYMBOL_GPL(iopf_free_group);
6161

62-
static struct iommu_domain *get_domain_for_iopf(struct device *dev,
63-
struct iommu_fault *fault)
64-
{
65-
struct iommu_domain *domain;
66-
67-
if (fault->prm.flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID) {
68-
domain = iommu_get_domain_for_dev_pasid(dev, fault->prm.pasid, 0);
69-
if (IS_ERR(domain))
70-
domain = NULL;
71-
} else {
72-
domain = iommu_get_domain_for_dev(dev);
73-
}
74-
75-
if (!domain || !domain->iopf_handler) {
76-
dev_warn_ratelimited(dev,
77-
"iopf (pasid %d) without domain attached or handler installed\n",
78-
fault->prm.pasid);
79-
80-
return NULL;
81-
}
82-
83-
return domain;
84-
}
85-
8662
/* Non-last request of a group. Postpone until the last one. */
8763
static int report_partial_fault(struct iommu_fault_param *fault_param,
8864
struct iommu_fault *fault)
@@ -134,6 +110,8 @@ static struct iopf_group *iopf_group_alloc(struct iommu_fault_param *iopf_param,
134110
list_add(&group->pending_node, &iopf_param->faults);
135111
mutex_unlock(&iopf_param->lock);
136112

113+
group->fault_count = list_count_nodes(&group->faults);
114+
137115
return group;
138116
}
139117

@@ -206,20 +184,51 @@ void iommu_report_device_fault(struct device *dev, struct iopf_fault *evt)
206184
if (group == &abort_group)
207185
goto err_abort;
208186

209-
group->domain = get_domain_for_iopf(dev, fault);
210-
if (!group->domain)
187+
if (fault->prm.flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID) {
188+
group->attach_handle = iommu_attach_handle_get(dev->iommu_group,
189+
fault->prm.pasid,
190+
0);
191+
if (IS_ERR(group->attach_handle)) {
192+
const struct iommu_ops *ops = dev_iommu_ops(dev);
193+
194+
if (!ops->user_pasid_table)
195+
goto err_abort;
196+
197+
/*
198+
* The iommu driver for this device supports user-
199+
* managed PASID table. Therefore page faults for
200+
* any PASID should go through the NESTING domain
201+
* attached to the device RID.
202+
*/
203+
group->attach_handle =
204+
iommu_attach_handle_get(dev->iommu_group,
205+
IOMMU_NO_PASID,
206+
IOMMU_DOMAIN_NESTED);
207+
if (IS_ERR(group->attach_handle))
208+
goto err_abort;
209+
}
210+
} else {
211+
group->attach_handle =
212+
iommu_attach_handle_get(dev->iommu_group, IOMMU_NO_PASID, 0);
213+
if (IS_ERR(group->attach_handle))
214+
goto err_abort;
215+
}
216+
217+
if (!group->attach_handle->domain->iopf_handler)
211218
goto err_abort;
212219

213220
/*
214221
* On success iopf_handler must call iopf_group_response() and
215222
* iopf_free_group()
216223
*/
217-
if (group->domain->iopf_handler(group))
224+
if (group->attach_handle->domain->iopf_handler(group))
218225
goto err_abort;
219226

220227
return;
221228

222229
err_abort:
230+
dev_warn_ratelimited(dev, "iopf with pasid %d aborted\n",
231+
fault->prm.pasid);
223232
iopf_group_response(group, IOMMU_PAGE_RESP_FAILURE);
224233
if (group == &abort_group)
225234
__iopf_free_group(group);

drivers/iommu/iommu-priv.h

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -28,4 +28,15 @@ void iommu_device_unregister_bus(struct iommu_device *iommu,
2828
const struct bus_type *bus,
2929
struct notifier_block *nb);
3030

31+
struct iommu_attach_handle *iommu_attach_handle_get(struct iommu_group *group,
32+
ioasid_t pasid,
33+
unsigned int type);
34+
int iommu_attach_group_handle(struct iommu_domain *domain,
35+
struct iommu_group *group,
36+
struct iommu_attach_handle *handle);
37+
void iommu_detach_group_handle(struct iommu_domain *domain,
38+
struct iommu_group *group);
39+
int iommu_replace_group_handle(struct iommu_group *group,
40+
struct iommu_domain *new_domain,
41+
struct iommu_attach_handle *handle);
3142
#endif /* __LINUX_IOMMU_PRIV_H */

drivers/iommu/iommu-sva.c

Lines changed: 28 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,6 @@ static struct iommu_mm_data *iommu_alloc_mm_data(struct mm_struct *mm, struct de
4141
}
4242
iommu_mm->pasid = pasid;
4343
INIT_LIST_HEAD(&iommu_mm->sva_domains);
44-
INIT_LIST_HEAD(&iommu_mm->sva_handles);
4544
/*
4645
* Make sure the write to mm->iommu_mm is not reordered in front of
4746
* initialization to iommu_mm fields. If it does, readers may see a
@@ -69,11 +68,16 @@ static struct iommu_mm_data *iommu_alloc_mm_data(struct mm_struct *mm, struct de
6968
*/
7069
struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm)
7170
{
71+
struct iommu_group *group = dev->iommu_group;
72+
struct iommu_attach_handle *attach_handle;
7273
struct iommu_mm_data *iommu_mm;
7374
struct iommu_domain *domain;
7475
struct iommu_sva *handle;
7576
int ret;
7677

78+
if (!group)
79+
return ERR_PTR(-ENODEV);
80+
7781
mutex_lock(&iommu_sva_lock);
7882

7983
/* Allocate mm->pasid if necessary. */
@@ -83,12 +87,22 @@ struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm
8387
goto out_unlock;
8488
}
8589

86-
list_for_each_entry(handle, &mm->iommu_mm->sva_handles, handle_item) {
87-
if (handle->dev == dev) {
88-
refcount_inc(&handle->users);
89-
mutex_unlock(&iommu_sva_lock);
90-
return handle;
90+
/* A bond already exists, just take a reference`. */
91+
attach_handle = iommu_attach_handle_get(group, iommu_mm->pasid, IOMMU_DOMAIN_SVA);
92+
if (!IS_ERR(attach_handle)) {
93+
handle = container_of(attach_handle, struct iommu_sva, handle);
94+
if (attach_handle->domain->mm != mm) {
95+
ret = -EBUSY;
96+
goto out_unlock;
9197
}
98+
refcount_inc(&handle->users);
99+
mutex_unlock(&iommu_sva_lock);
100+
return handle;
101+
}
102+
103+
if (PTR_ERR(attach_handle) != -ENOENT) {
104+
ret = PTR_ERR(attach_handle);
105+
goto out_unlock;
92106
}
93107

94108
handle = kzalloc(sizeof(*handle), GFP_KERNEL);
@@ -99,7 +113,8 @@ struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm
99113

100114
/* Search for an existing domain. */
101115
list_for_each_entry(domain, &mm->iommu_mm->sva_domains, next) {
102-
ret = iommu_attach_device_pasid(domain, dev, iommu_mm->pasid);
116+
ret = iommu_attach_device_pasid(domain, dev, iommu_mm->pasid,
117+
&handle->handle);
103118
if (!ret) {
104119
domain->users++;
105120
goto out;
@@ -113,18 +128,17 @@ struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm
113128
goto out_free_handle;
114129
}
115130

116-
ret = iommu_attach_device_pasid(domain, dev, iommu_mm->pasid);
131+
ret = iommu_attach_device_pasid(domain, dev, iommu_mm->pasid,
132+
&handle->handle);
117133
if (ret)
118134
goto out_free_domain;
119135
domain->users = 1;
120136
list_add(&domain->next, &mm->iommu_mm->sva_domains);
121137

122138
out:
123139
refcount_set(&handle->users, 1);
124-
list_add(&handle->handle_item, &mm->iommu_mm->sva_handles);
125140
mutex_unlock(&iommu_sva_lock);
126141
handle->dev = dev;
127-
handle->domain = domain;
128142
return handle;
129143

130144
out_free_domain:
@@ -147,7 +161,7 @@ EXPORT_SYMBOL_GPL(iommu_sva_bind_device);
147161
*/
148162
void iommu_sva_unbind_device(struct iommu_sva *handle)
149163
{
150-
struct iommu_domain *domain = handle->domain;
164+
struct iommu_domain *domain = handle->handle.domain;
151165
struct iommu_mm_data *iommu_mm = domain->mm->iommu_mm;
152166
struct device *dev = handle->dev;
153167

@@ -156,7 +170,6 @@ void iommu_sva_unbind_device(struct iommu_sva *handle)
156170
mutex_unlock(&iommu_sva_lock);
157171
return;
158172
}
159-
list_del(&handle->handle_item);
160173

161174
iommu_detach_device_pasid(domain, dev, iommu_mm->pasid);
162175
if (--domain->users == 0) {
@@ -170,7 +183,7 @@ EXPORT_SYMBOL_GPL(iommu_sva_unbind_device);
170183

171184
u32 iommu_sva_get_pasid(struct iommu_sva *handle)
172185
{
173-
struct iommu_domain *domain = handle->domain;
186+
struct iommu_domain *domain = handle->handle.domain;
174187

175188
return mm_get_enqcmd_pasid(domain->mm);
176189
}
@@ -259,7 +272,8 @@ static void iommu_sva_handle_iopf(struct work_struct *work)
259272
if (status != IOMMU_PAGE_RESP_SUCCESS)
260273
break;
261274

262-
status = iommu_sva_handle_mm(&iopf->fault, group->domain->mm);
275+
status = iommu_sva_handle_mm(&iopf->fault,
276+
group->attach_handle->domain->mm);
263277
}
264278

265279
iopf_group_response(group, status);

0 commit comments

Comments
 (0)