13
13
14
14
#include "iommu-sva.h"
15
15
16
+ enum iommu_page_response_code
17
+ iommu_sva_handle_mm (struct iommu_fault * fault , struct mm_struct * mm );
18
+
16
19
static void iopf_free_group (struct iopf_group * group )
17
20
{
18
21
struct iopf_fault * iopf , * next ;
@@ -45,29 +48,48 @@ static void iopf_handler(struct work_struct *work)
45
48
{
46
49
struct iopf_fault * iopf ;
47
50
struct iopf_group * group ;
48
- struct iommu_domain * domain ;
49
51
enum iommu_page_response_code status = IOMMU_PAGE_RESP_SUCCESS ;
50
52
51
53
group = container_of (work , struct iopf_group , work );
52
- domain = iommu_get_domain_for_dev_pasid (group -> dev ,
53
- group -> last_fault .fault .prm .pasid , 0 );
54
- if (!domain || !domain -> iopf_handler )
55
- status = IOMMU_PAGE_RESP_INVALID ;
56
-
57
54
list_for_each_entry (iopf , & group -> faults , list ) {
58
55
/*
59
56
* For the moment, errors are sticky: don't handle subsequent
60
57
* faults in the group if there is an error.
61
58
*/
62
- if (status == IOMMU_PAGE_RESP_SUCCESS )
63
- status = domain -> iopf_handler (& iopf -> fault ,
64
- domain -> fault_data );
59
+ if (status != IOMMU_PAGE_RESP_SUCCESS )
60
+ break ;
61
+
62
+ status = iommu_sva_handle_mm (& iopf -> fault , group -> domain -> mm );
65
63
}
66
64
67
65
iopf_complete_group (group -> dev , & group -> last_fault , status );
68
66
iopf_free_group (group );
69
67
}
70
68
69
+ static struct iommu_domain * get_domain_for_iopf (struct device * dev ,
70
+ struct iommu_fault * fault )
71
+ {
72
+ struct iommu_domain * domain ;
73
+
74
+ if (fault -> prm .flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID ) {
75
+ domain = iommu_get_domain_for_dev_pasid (dev , fault -> prm .pasid , 0 );
76
+ if (IS_ERR (domain ))
77
+ domain = NULL ;
78
+ } else {
79
+ domain = iommu_get_domain_for_dev (dev );
80
+ }
81
+
82
+ if (!domain || !domain -> iopf_handler ) {
83
+ dev_warn_ratelimited (dev ,
84
+ "iopf (pasid %d) without domain attached or handler installed\n" ,
85
+ fault -> prm .pasid );
86
+
87
+ return NULL ;
88
+ }
89
+
90
+ return domain ;
91
+ }
92
+
71
93
/**
72
94
* iommu_queue_iopf - IO Page Fault handler
73
95
* @fault: fault event
@@ -112,6 +134,7 @@ int iommu_queue_iopf(struct iommu_fault *fault, struct device *dev)
112
134
{
113
135
int ret ;
114
136
struct iopf_group * group ;
137
+ struct iommu_domain * domain ;
115
138
struct iopf_fault * iopf , * next ;
116
139
struct iommu_fault_param * iopf_param ;
117
140
struct dev_iommu * param = dev -> iommu ;
@@ -143,6 +166,12 @@ int iommu_queue_iopf(struct iommu_fault *fault, struct device *dev)
143
166
return 0 ;
144
167
}
145
168
169
+ domain = get_domain_for_iopf (dev , fault );
170
+ if (!domain ) {
171
+ ret = - EINVAL ;
172
+ goto cleanup_partial ;
173
+ }
174
+
146
175
group = kzalloc (sizeof (* group ), GFP_KERNEL );
147
176
if (!group ) {
148
177
/*
@@ -157,8 +186,8 @@ int iommu_queue_iopf(struct iommu_fault *fault, struct device *dev)
157
186
group -> dev = dev ;
158
187
group -> last_fault .fault = * fault ;
159
188
INIT_LIST_HEAD (& group -> faults );
189
+ group -> domain = domain ;
160
190
list_add (& group -> last_fault .list , & group -> faults );
161
- INIT_WORK (& group -> work , iopf_handler );
162
191
163
192
/* See if we have partial faults for this group */
164
193
list_for_each_entry_safe (iopf , next , & iopf_param -> partial , list ) {
@@ -167,9 +196,13 @@ int iommu_queue_iopf(struct iommu_fault *fault, struct device *dev)
167
196
list_move (& iopf -> list , & group -> faults );
168
197
}
169
198
170
- queue_work (iopf_param -> queue -> wq , & group -> work );
171
- return 0 ;
199
+ mutex_unlock (& iopf_param -> lock );
200
+ ret = domain -> iopf_handler (group );
201
+ mutex_lock (& iopf_param -> lock );
202
+ if (ret )
203
+ iopf_free_group (group );
172
204
205
+ return ret ;
173
206
cleanup_partial :
174
207
list_for_each_entry_safe (iopf , next , & iopf_param -> partial , list ) {
175
208
if (iopf -> fault .prm .grpid == fault -> prm .grpid ) {
@@ -181,6 +214,17 @@ int iommu_queue_iopf(struct iommu_fault *fault, struct device *dev)
181
214
}
182
215
EXPORT_SYMBOL_GPL (iommu_queue_iopf );
183
216
217
+ int iommu_sva_handle_iopf (struct iopf_group * group )
218
+ {
219
+ struct iommu_fault_param * fault_param = group -> dev -> iommu -> fault_param ;
220
+
221
+ INIT_WORK (& group -> work , iopf_handler );
222
+ if (!queue_work (fault_param -> queue -> wq , & group -> work ))
223
+ return - EBUSY ;
224
+
225
+ return 0 ;
226
+ }
227
+
184
228
/**
185
229
* iopf_queue_flush_dev - Ensure that all queued faults have been processed
186
230
* @dev: the endpoint whose faults need to be flushed.
0 commit comments