Skip to content

Commit 17c51a0

Browse files
LuBaolujoergroedel
authored andcommitted
iommu: Separate SVA and IOPF
Add CONFIG_IOMMU_IOPF for page fault handling framework and select it from its real consumer. Move iopf function declaration from iommu-sva.h to iommu.h and remove iommu-sva.h as it's empty now. Consolidate all SVA related code into iommu-sva.c: - Move iommu_sva_domain_alloc() from iommu.c to iommu-sva.c. - Move sva iopf handling code from io-pgfault.c to iommu-sva.c. Consolidate iommu_report_device_fault() and iommu_page_response() into io-pgfault.c. Export iopf_free_group() and iopf_group_response() for iopf handlers implemented in modules. Some functions are renamed with more meaningful names. No other intentional functionality changes. Signed-off-by: Lu Baolu <[email protected]> Reviewed-by: Jason Gunthorpe <[email protected]> Reviewed-by: Kevin Tian <[email protected]> Tested-by: Yan Zhao <[email protected]> Tested-by: Longfang Liu <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Joerg Roedel <[email protected]>
1 parent 351ffcb commit 17c51a0

File tree

12 files changed

+277
-291
lines changed

12 files changed

+277
-291
lines changed

drivers/iommu/Kconfig

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -163,6 +163,9 @@ config IOMMU_SVA
163163
select IOMMU_MM_DATA
164164
bool
165165

166+
config IOMMU_IOPF
167+
bool
168+
166169
config FSL_PAMU
167170
bool "Freescale IOMMU support"
168171
depends on PCI
@@ -398,6 +401,7 @@ config ARM_SMMU_V3_SVA
398401
bool "Shared Virtual Addressing support for the ARM SMMUv3"
399402
depends on ARM_SMMU_V3
400403
select IOMMU_SVA
404+
select IOMMU_IOPF
401405
select MMU_NOTIFIER
402406
help
403407
Support for sharing process address spaces with devices using the

drivers/iommu/Makefile

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,7 @@ obj-$(CONFIG_FSL_PAMU) += fsl_pamu.o fsl_pamu_domain.o
2626
obj-$(CONFIG_S390_IOMMU) += s390-iommu.o
2727
obj-$(CONFIG_HYPERV_IOMMU) += hyperv-iommu.o
2828
obj-$(CONFIG_VIRTIO_IOMMU) += virtio-iommu.o
29-
obj-$(CONFIG_IOMMU_SVA) += iommu-sva.o io-pgfault.o
29+
obj-$(CONFIG_IOMMU_SVA) += iommu-sva.o
30+
obj-$(CONFIG_IOMMU_IOPF) += io-pgfault.o
3031
obj-$(CONFIG_SPRD_IOMMU) += sprd-iommu.o
3132
obj-$(CONFIG_APPLE_DART) += apple-dart.o

drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,6 @@
1010
#include <linux/slab.h>
1111

1212
#include "arm-smmu-v3.h"
13-
#include "../../iommu-sva.h"
1413
#include "../../io-pgtable-arm.h"
1514

1615
struct arm_smmu_mmu_notifier {

drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,6 @@
2929

3030
#include "arm-smmu-v3.h"
3131
#include "../../dma-iommu.h"
32-
#include "../../iommu-sva.h"
3332

3433
static bool disable_bypass = true;
3534
module_param(disable_bypass, bool, 0444);

drivers/iommu/intel/Kconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -51,6 +51,7 @@ config INTEL_IOMMU_SVM
5151
depends on X86_64
5252
select MMU_NOTIFIER
5353
select IOMMU_SVA
54+
select IOMMU_IOPF
5455
help
5556
Shared Virtual Memory (SVM) provides a facility for devices
5657
to access DMA resources through process address space by

drivers/iommu/intel/iommu.c

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,6 @@
2727
#include "iommu.h"
2828
#include "../dma-iommu.h"
2929
#include "../irq_remapping.h"
30-
#include "../iommu-sva.h"
3130
#include "pasid.h"
3231
#include "cap_audit.h"
3332
#include "perfmon.h"

drivers/iommu/intel/svm.c

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,6 @@
2222
#include "iommu.h"
2323
#include "pasid.h"
2424
#include "perf.h"
25-
#include "../iommu-sva.h"
2625
#include "trace.h"
2726

2827
static irqreturn_t prq_event_thread(int irq, void *d);

drivers/iommu/io-pgfault.c

Lines changed: 136 additions & 52 deletions
Original file line numberDiff line numberDiff line change
@@ -11,12 +11,9 @@
1111
#include <linux/slab.h>
1212
#include <linux/workqueue.h>
1313

14-
#include "iommu-sva.h"
14+
#include "iommu-priv.h"
1515

16-
enum iommu_page_response_code
17-
iommu_sva_handle_mm(struct iommu_fault *fault, struct mm_struct *mm);
18-
19-
static void iopf_free_group(struct iopf_group *group)
16+
void iopf_free_group(struct iopf_group *group)
2017
{
2118
struct iopf_fault *iopf, *next;
2219

@@ -27,44 +24,7 @@ static void iopf_free_group(struct iopf_group *group)
2724

2825
kfree(group);
2926
}
30-
31-
static int iopf_complete_group(struct device *dev, struct iopf_fault *iopf,
32-
enum iommu_page_response_code status)
33-
{
34-
struct iommu_page_response resp = {
35-
.pasid = iopf->fault.prm.pasid,
36-
.grpid = iopf->fault.prm.grpid,
37-
.code = status,
38-
};
39-
40-
if ((iopf->fault.prm.flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID) &&
41-
(iopf->fault.prm.flags & IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID))
42-
resp.flags = IOMMU_PAGE_RESP_PASID_VALID;
43-
44-
return iommu_page_response(dev, &resp);
45-
}
46-
47-
static void iopf_handler(struct work_struct *work)
48-
{
49-
struct iopf_fault *iopf;
50-
struct iopf_group *group;
51-
enum iommu_page_response_code status = IOMMU_PAGE_RESP_SUCCESS;
52-
53-
group = container_of(work, struct iopf_group, work);
54-
list_for_each_entry(iopf, &group->faults, list) {
55-
/*
56-
* For the moment, errors are sticky: don't handle subsequent
57-
* faults in the group if there is an error.
58-
*/
59-
if (status != IOMMU_PAGE_RESP_SUCCESS)
60-
break;
61-
62-
status = iommu_sva_handle_mm(&iopf->fault, group->domain->mm);
63-
}
64-
65-
iopf_complete_group(group->dev, &group->last_fault, status);
66-
iopf_free_group(group);
67-
}
27+
EXPORT_SYMBOL_GPL(iopf_free_group);
6828

6929
static struct iommu_domain *get_domain_for_iopf(struct device *dev,
7030
struct iommu_fault *fault)
@@ -91,7 +51,7 @@ static struct iommu_domain *get_domain_for_iopf(struct device *dev,
9151
}
9252

9353
/**
94-
* iommu_queue_iopf - IO Page Fault handler
54+
* iommu_handle_iopf - IO Page Fault handler
9555
* @fault: fault event
9656
* @dev: struct device.
9757
*
@@ -130,7 +90,7 @@ static struct iommu_domain *get_domain_for_iopf(struct device *dev,
13090
*
13191
* Return: 0 on success and <0 on error.
13292
*/
133-
int iommu_queue_iopf(struct iommu_fault *fault, struct device *dev)
93+
static int iommu_handle_iopf(struct iommu_fault *fault, struct device *dev)
13494
{
13595
int ret;
13696
struct iopf_group *group;
@@ -212,18 +172,117 @@ int iommu_queue_iopf(struct iommu_fault *fault, struct device *dev)
212172
}
213173
return ret;
214174
}
215-
EXPORT_SYMBOL_GPL(iommu_queue_iopf);
216175

217-
int iommu_sva_handle_iopf(struct iopf_group *group)
176+
/**
177+
* iommu_report_device_fault() - Report fault event to device driver
178+
* @dev: the device
179+
* @evt: fault event data
180+
*
181+
* Called by IOMMU drivers when a fault is detected, typically in a threaded IRQ
182+
* handler. When this function fails and the fault is recoverable, it is the
183+
* caller's responsibility to complete the fault.
184+
*
185+
* Return 0 on success, or an error.
186+
*/
187+
int iommu_report_device_fault(struct device *dev, struct iopf_fault *evt)
218188
{
219-
struct iommu_fault_param *fault_param = group->dev->iommu->fault_param;
189+
struct dev_iommu *param = dev->iommu;
190+
struct iopf_fault *evt_pending = NULL;
191+
struct iommu_fault_param *fparam;
192+
int ret = 0;
220193

221-
INIT_WORK(&group->work, iopf_handler);
222-
if (!queue_work(fault_param->queue->wq, &group->work))
223-
return -EBUSY;
194+
if (!param || !evt)
195+
return -EINVAL;
224196

225-
return 0;
197+
/* we only report device fault if there is a handler registered */
198+
mutex_lock(&param->lock);
199+
fparam = param->fault_param;
200+
201+
if (evt->fault.type == IOMMU_FAULT_PAGE_REQ &&
202+
(evt->fault.prm.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE)) {
203+
evt_pending = kmemdup(evt, sizeof(struct iopf_fault),
204+
GFP_KERNEL);
205+
if (!evt_pending) {
206+
ret = -ENOMEM;
207+
goto done_unlock;
208+
}
209+
mutex_lock(&fparam->lock);
210+
list_add_tail(&evt_pending->list, &fparam->faults);
211+
mutex_unlock(&fparam->lock);
212+
}
213+
214+
ret = iommu_handle_iopf(&evt->fault, dev);
215+
if (ret && evt_pending) {
216+
mutex_lock(&fparam->lock);
217+
list_del(&evt_pending->list);
218+
mutex_unlock(&fparam->lock);
219+
kfree(evt_pending);
220+
}
221+
done_unlock:
222+
mutex_unlock(&param->lock);
223+
return ret;
224+
}
225+
EXPORT_SYMBOL_GPL(iommu_report_device_fault);
226+
227+
int iommu_page_response(struct device *dev,
228+
struct iommu_page_response *msg)
229+
{
230+
bool needs_pasid;
231+
int ret = -EINVAL;
232+
struct iopf_fault *evt;
233+
struct iommu_fault_page_request *prm;
234+
struct dev_iommu *param = dev->iommu;
235+
const struct iommu_ops *ops = dev_iommu_ops(dev);
236+
bool has_pasid = msg->flags & IOMMU_PAGE_RESP_PASID_VALID;
237+
238+
if (!ops->page_response)
239+
return -ENODEV;
240+
241+
if (!param || !param->fault_param)
242+
return -EINVAL;
243+
244+
/* Only send response if there is a fault report pending */
245+
mutex_lock(&param->fault_param->lock);
246+
if (list_empty(&param->fault_param->faults)) {
247+
dev_warn_ratelimited(dev, "no pending PRQ, drop response\n");
248+
goto done_unlock;
249+
}
250+
/*
251+
* Check if we have a matching page request pending to respond,
252+
* otherwise return -EINVAL
253+
*/
254+
list_for_each_entry(evt, &param->fault_param->faults, list) {
255+
prm = &evt->fault.prm;
256+
if (prm->grpid != msg->grpid)
257+
continue;
258+
259+
/*
260+
* If the PASID is required, the corresponding request is
261+
* matched using the group ID, the PASID valid bit and the PASID
262+
* value. Otherwise only the group ID matches request and
263+
* response.
264+
*/
265+
needs_pasid = prm->flags & IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID;
266+
if (needs_pasid && (!has_pasid || msg->pasid != prm->pasid))
267+
continue;
268+
269+
if (!needs_pasid && has_pasid) {
270+
/* No big deal, just clear it. */
271+
msg->flags &= ~IOMMU_PAGE_RESP_PASID_VALID;
272+
msg->pasid = 0;
273+
}
274+
275+
ret = ops->page_response(dev, evt, msg);
276+
list_del(&evt->list);
277+
kfree(evt);
278+
break;
279+
}
280+
281+
done_unlock:
282+
mutex_unlock(&param->fault_param->lock);
283+
return ret;
226284
}
285+
EXPORT_SYMBOL_GPL(iommu_page_response);
227286

228287
/**
229288
* iopf_queue_flush_dev - Ensure that all queued faults have been processed
@@ -258,6 +317,31 @@ int iopf_queue_flush_dev(struct device *dev)
258317
}
259318
EXPORT_SYMBOL_GPL(iopf_queue_flush_dev);
260319

320+
/**
321+
* iopf_group_response - Respond a group of page faults
322+
* @group: the group of faults with the same group id
323+
* @status: the response code
324+
*
325+
* Return 0 on success and <0 on error.
326+
*/
327+
int iopf_group_response(struct iopf_group *group,
328+
enum iommu_page_response_code status)
329+
{
330+
struct iopf_fault *iopf = &group->last_fault;
331+
struct iommu_page_response resp = {
332+
.pasid = iopf->fault.prm.pasid,
333+
.grpid = iopf->fault.prm.grpid,
334+
.code = status,
335+
};
336+
337+
if ((iopf->fault.prm.flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID) &&
338+
(iopf->fault.prm.flags & IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID))
339+
resp.flags = IOMMU_PAGE_RESP_PASID_VALID;
340+
341+
return iommu_page_response(group->dev, &resp);
342+
}
343+
EXPORT_SYMBOL_GPL(iopf_group_response);
344+
261345
/**
262346
* iopf_queue_discard_partial - Remove all pending partial fault
263347
* @queue: the queue whose partial faults need to be discarded

drivers/iommu/iommu-sva.c

Lines changed: 60 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77
#include <linux/sched/mm.h>
88
#include <linux/iommu.h>
99

10-
#include "iommu-sva.h"
10+
#include "iommu-priv.h"
1111

1212
static DEFINE_MUTEX(iommu_sva_lock);
1313

@@ -159,10 +159,21 @@ u32 iommu_sva_get_pasid(struct iommu_sva *handle)
159159
}
160160
EXPORT_SYMBOL_GPL(iommu_sva_get_pasid);
161161

162+
void mm_pasid_drop(struct mm_struct *mm)
163+
{
164+
struct iommu_mm_data *iommu_mm = mm->iommu_mm;
165+
166+
if (!iommu_mm)
167+
return;
168+
169+
iommu_free_global_pasid(iommu_mm->pasid);
170+
kfree(iommu_mm);
171+
}
172+
162173
/*
163174
* I/O page fault handler for SVA
164175
*/
165-
enum iommu_page_response_code
176+
static enum iommu_page_response_code
166177
iommu_sva_handle_mm(struct iommu_fault *fault, struct mm_struct *mm)
167178
{
168179
vm_fault_t ret;
@@ -216,13 +227,54 @@ iommu_sva_handle_mm(struct iommu_fault *fault, struct mm_struct *mm)
216227
return status;
217228
}
218229

219-
void mm_pasid_drop(struct mm_struct *mm)
230+
static void iommu_sva_handle_iopf(struct work_struct *work)
220231
{
221-
struct iommu_mm_data *iommu_mm = mm->iommu_mm;
232+
struct iopf_fault *iopf;
233+
struct iopf_group *group;
234+
enum iommu_page_response_code status = IOMMU_PAGE_RESP_SUCCESS;
235+
236+
group = container_of(work, struct iopf_group, work);
237+
list_for_each_entry(iopf, &group->faults, list) {
238+
/*
239+
* For the moment, errors are sticky: don't handle subsequent
240+
* faults in the group if there is an error.
241+
*/
242+
if (status != IOMMU_PAGE_RESP_SUCCESS)
243+
break;
244+
245+
status = iommu_sva_handle_mm(&iopf->fault, group->domain->mm);
246+
}
222247

223-
if (!iommu_mm)
224-
return;
248+
iopf_group_response(group, status);
249+
iopf_free_group(group);
250+
}
225251

226-
iommu_free_global_pasid(iommu_mm->pasid);
227-
kfree(iommu_mm);
252+
static int iommu_sva_iopf_handler(struct iopf_group *group)
253+
{
254+
struct iommu_fault_param *fault_param = group->dev->iommu->fault_param;
255+
256+
INIT_WORK(&group->work, iommu_sva_handle_iopf);
257+
if (!queue_work(fault_param->queue->wq, &group->work))
258+
return -EBUSY;
259+
260+
return 0;
261+
}
262+
263+
struct iommu_domain *iommu_sva_domain_alloc(struct device *dev,
264+
struct mm_struct *mm)
265+
{
266+
const struct iommu_ops *ops = dev_iommu_ops(dev);
267+
struct iommu_domain *domain;
268+
269+
domain = ops->domain_alloc(IOMMU_DOMAIN_SVA);
270+
if (!domain)
271+
return NULL;
272+
273+
domain->type = IOMMU_DOMAIN_SVA;
274+
mmgrab(mm);
275+
domain->mm = mm;
276+
domain->owner = ops;
277+
domain->iopf_handler = iommu_sva_iopf_handler;
278+
279+
return domain;
228280
}

0 commit comments

Comments
 (0)