Skip to content

Commit 4bb4211

Browse files
LuBaolujoergroedel
authored andcommitted
iommu: Per-domain I/O page fault handling
Tweak the I/O page fault handling framework to route the page faults to the domain and call the page fault handler retrieved from the domain. This makes the I/O page fault handling framework possible to serve more usage scenarios as long as they have an IOMMU domain and install a page fault handler in it. Some unused functions are also removed to avoid dead code. The iommu_get_domain_for_dev_pasid() which retrieves attached domain for a {device, PASID} pair is used. It will be used by the page fault handling framework which knows {device, PASID} reported from the iommu driver. We have a guarantee that the SVA domain doesn't go away during IOPF handling, because unbind() won't free the domain until all the pending page requests have been flushed from the pipeline. The drivers either call iopf_queue_flush_dev() explicitly, or in stall case, the device driver is required to flush all DMAs including stalled transactions before calling unbind(). This also renames iopf_handle_group() to iopf_handler() to avoid confusing. Signed-off-by: Lu Baolu <[email protected]> Reviewed-by: Jean-Philippe Brucker <[email protected]> Reviewed-by: Kevin Tian <[email protected]> Reviewed-by: Jason Gunthorpe <[email protected]> Tested-by: Zhangfei Gao <[email protected]> Tested-by: Tony Zhu <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Joerg Roedel <[email protected]>
1 parent 8cc9315 commit 4bb4211

File tree

1 file changed

+9
-59
lines changed

1 file changed

+9
-59
lines changed

drivers/iommu/io-pgfault.c

Lines changed: 9 additions & 59 deletions
Original file line numberDiff line numberDiff line change
@@ -69,77 +69,27 @@ static int iopf_complete_group(struct device *dev, struct iopf_fault *iopf,
6969
return iommu_page_response(dev, &resp);
7070
}
7171

72-
static enum iommu_page_response_code
73-
iopf_handle_single(struct iopf_fault *iopf)
74-
{
75-
vm_fault_t ret;
76-
struct mm_struct *mm;
77-
struct vm_area_struct *vma;
78-
unsigned int access_flags = 0;
79-
unsigned int fault_flags = FAULT_FLAG_REMOTE;
80-
struct iommu_fault_page_request *prm = &iopf->fault.prm;
81-
enum iommu_page_response_code status = IOMMU_PAGE_RESP_INVALID;
82-
83-
if (!(prm->flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID))
84-
return status;
85-
86-
mm = iommu_sva_find(prm->pasid);
87-
if (IS_ERR_OR_NULL(mm))
88-
return status;
89-
90-
mmap_read_lock(mm);
91-
92-
vma = find_extend_vma(mm, prm->addr);
93-
if (!vma)
94-
/* Unmapped area */
95-
goto out_put_mm;
96-
97-
if (prm->perm & IOMMU_FAULT_PERM_READ)
98-
access_flags |= VM_READ;
99-
100-
if (prm->perm & IOMMU_FAULT_PERM_WRITE) {
101-
access_flags |= VM_WRITE;
102-
fault_flags |= FAULT_FLAG_WRITE;
103-
}
104-
105-
if (prm->perm & IOMMU_FAULT_PERM_EXEC) {
106-
access_flags |= VM_EXEC;
107-
fault_flags |= FAULT_FLAG_INSTRUCTION;
108-
}
109-
110-
if (!(prm->perm & IOMMU_FAULT_PERM_PRIV))
111-
fault_flags |= FAULT_FLAG_USER;
112-
113-
if (access_flags & ~vma->vm_flags)
114-
/* Access fault */
115-
goto out_put_mm;
116-
117-
ret = handle_mm_fault(vma, prm->addr, fault_flags, NULL);
118-
status = ret & VM_FAULT_ERROR ? IOMMU_PAGE_RESP_INVALID :
119-
IOMMU_PAGE_RESP_SUCCESS;
120-
121-
out_put_mm:
122-
mmap_read_unlock(mm);
123-
mmput(mm);
124-
125-
return status;
126-
}
127-
128-
static void iopf_handle_group(struct work_struct *work)
72+
static void iopf_handler(struct work_struct *work)
12973
{
13074
struct iopf_group *group;
75+
struct iommu_domain *domain;
13176
struct iopf_fault *iopf, *next;
13277
enum iommu_page_response_code status = IOMMU_PAGE_RESP_SUCCESS;
13378

13479
group = container_of(work, struct iopf_group, work);
80+
domain = iommu_get_domain_for_dev_pasid(group->dev,
81+
group->last_fault.fault.prm.pasid, 0);
82+
if (!domain || !domain->iopf_handler)
83+
status = IOMMU_PAGE_RESP_INVALID;
13584

13685
list_for_each_entry_safe(iopf, next, &group->faults, list) {
13786
/*
13887
* For the moment, errors are sticky: don't handle subsequent
13988
* faults in the group if there is an error.
14089
*/
14190
if (status == IOMMU_PAGE_RESP_SUCCESS)
142-
status = iopf_handle_single(iopf);
91+
status = domain->iopf_handler(&iopf->fault,
92+
domain->fault_data);
14393

14494
if (!(iopf->fault.prm.flags &
14595
IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE))
@@ -242,7 +192,7 @@ int iommu_queue_iopf(struct iommu_fault *fault, void *cookie)
242192
group->last_fault.fault = *fault;
243193
INIT_LIST_HEAD(&group->faults);
244194
list_add(&group->last_fault.list, &group->faults);
245-
INIT_WORK(&group->work, iopf_handle_group);
195+
INIT_WORK(&group->work, iopf_handler);
246196

247197
/* See if we have partial faults for this group */
248198
list_for_each_entry_safe(iopf, next, &iopf_param->partial, list) {

0 commit comments

Comments
 (0)