@@ -25,21 +25,6 @@ struct iopf_queue {
25
25
struct mutex lock ;
26
26
};
27
27
28
- /**
29
- * struct iopf_device_param - IO Page Fault data attached to a device
30
- * @dev: the device that owns this param
31
- * @queue: IOPF queue
32
- * @queue_list: index into queue->devices
33
- * @partial: faults that are part of a Page Request Group for which the last
34
- * request hasn't been submitted yet.
35
- */
36
- struct iopf_device_param {
37
- struct device * dev ;
38
- struct iopf_queue * queue ;
39
- struct list_head queue_list ;
40
- struct list_head partial ;
41
- };
42
-
43
28
struct iopf_fault {
44
29
struct iommu_fault fault ;
45
30
struct list_head list ;
@@ -144,7 +129,7 @@ int iommu_queue_iopf(struct iommu_fault *fault, void *cookie)
144
129
int ret ;
145
130
struct iopf_group * group ;
146
131
struct iopf_fault * iopf , * next ;
147
- struct iopf_device_param * iopf_param ;
132
+ struct iommu_fault_param * iopf_param ;
148
133
149
134
struct device * dev = cookie ;
150
135
struct dev_iommu * param = dev -> iommu ;
@@ -159,7 +144,7 @@ int iommu_queue_iopf(struct iommu_fault *fault, void *cookie)
159
144
* As long as we're holding param->lock, the queue can't be unlinked
160
145
* from the device and therefore cannot disappear.
161
146
*/
162
- iopf_param = param -> iopf_param ;
147
+ iopf_param = param -> fault_param ;
163
148
if (!iopf_param )
164
149
return - ENODEV ;
165
150
@@ -229,14 +214,14 @@ EXPORT_SYMBOL_GPL(iommu_queue_iopf);
229
214
int iopf_queue_flush_dev (struct device * dev )
230
215
{
231
216
int ret = 0 ;
232
- struct iopf_device_param * iopf_param ;
217
+ struct iommu_fault_param * iopf_param ;
233
218
struct dev_iommu * param = dev -> iommu ;
234
219
235
220
if (!param )
236
221
return - ENODEV ;
237
222
238
223
mutex_lock (& param -> lock );
239
- iopf_param = param -> iopf_param ;
224
+ iopf_param = param -> fault_param ;
240
225
if (iopf_param )
241
226
flush_workqueue (iopf_param -> queue -> wq );
242
227
else
@@ -260,7 +245,7 @@ EXPORT_SYMBOL_GPL(iopf_queue_flush_dev);
260
245
int iopf_queue_discard_partial (struct iopf_queue * queue )
261
246
{
262
247
struct iopf_fault * iopf , * next ;
263
- struct iopf_device_param * iopf_param ;
248
+ struct iommu_fault_param * iopf_param ;
264
249
265
250
if (!queue )
266
251
return - EINVAL ;
@@ -287,34 +272,36 @@ EXPORT_SYMBOL_GPL(iopf_queue_discard_partial);
287
272
*/
288
273
int iopf_queue_add_device (struct iopf_queue * queue , struct device * dev )
289
274
{
290
- int ret = - EBUSY ;
291
- struct iopf_device_param * iopf_param ;
275
+ int ret = 0 ;
292
276
struct dev_iommu * param = dev -> iommu ;
293
-
294
- if (!param )
295
- return - ENODEV ;
296
-
297
- iopf_param = kzalloc (sizeof (* iopf_param ), GFP_KERNEL );
298
- if (!iopf_param )
299
- return - ENOMEM ;
300
-
301
- INIT_LIST_HEAD (& iopf_param -> partial );
302
- iopf_param -> queue = queue ;
303
- iopf_param -> dev = dev ;
277
+ struct iommu_fault_param * fault_param ;
304
278
305
279
mutex_lock (& queue -> lock );
306
280
mutex_lock (& param -> lock );
307
- if (!param -> iopf_param ) {
308
- list_add (& iopf_param -> queue_list , & queue -> devices );
309
- param -> iopf_param = iopf_param ;
310
- ret = 0 ;
281
+ if (param -> fault_param ) {
282
+ ret = - EBUSY ;
283
+ goto done_unlock ;
311
284
}
285
+
286
+ fault_param = kzalloc (sizeof (* fault_param ), GFP_KERNEL );
287
+ if (!fault_param ) {
288
+ ret = - ENOMEM ;
289
+ goto done_unlock ;
290
+ }
291
+
292
+ mutex_init (& fault_param -> lock );
293
+ INIT_LIST_HEAD (& fault_param -> faults );
294
+ INIT_LIST_HEAD (& fault_param -> partial );
295
+ fault_param -> dev = dev ;
296
+ list_add (& fault_param -> queue_list , & queue -> devices );
297
+ fault_param -> queue = queue ;
298
+
299
+ param -> fault_param = fault_param ;
300
+
301
+ done_unlock :
312
302
mutex_unlock (& param -> lock );
313
303
mutex_unlock (& queue -> lock );
314
304
315
- if (ret )
316
- kfree (iopf_param );
317
-
318
305
return ret ;
319
306
}
320
307
EXPORT_SYMBOL_GPL (iopf_queue_add_device );
@@ -330,34 +317,41 @@ EXPORT_SYMBOL_GPL(iopf_queue_add_device);
330
317
*/
331
318
int iopf_queue_remove_device (struct iopf_queue * queue , struct device * dev )
332
319
{
333
- int ret = - EINVAL ;
320
+ int ret = 0 ;
334
321
struct iopf_fault * iopf , * next ;
335
- struct iopf_device_param * iopf_param ;
336
322
struct dev_iommu * param = dev -> iommu ;
337
-
338
- if (!param || !queue )
339
- return - EINVAL ;
323
+ struct iommu_fault_param * fault_param = param -> fault_param ;
340
324
341
325
mutex_lock (& queue -> lock );
342
326
mutex_lock (& param -> lock );
343
- iopf_param = param -> iopf_param ;
344
- if (iopf_param && iopf_param -> queue == queue ) {
345
- list_del (& iopf_param -> queue_list );
346
- param -> iopf_param = NULL ;
347
- ret = 0 ;
327
+ if (!fault_param ) {
328
+ ret = - ENODEV ;
329
+ goto unlock ;
348
330
}
349
- mutex_unlock (& param -> lock );
350
- mutex_unlock (& queue -> lock );
351
- if (ret )
352
- return ret ;
331
+
332
+ if (fault_param -> queue != queue ) {
333
+ ret = - EINVAL ;
334
+ goto unlock ;
335
+ }
336
+
337
+ if (!list_empty (& fault_param -> faults )) {
338
+ ret = - EBUSY ;
339
+ goto unlock ;
340
+ }
341
+
342
+ list_del (& fault_param -> queue_list );
353
343
354
344
/* Just in case some faults are still stuck */
355
- list_for_each_entry_safe (iopf , next , & iopf_param -> partial , list )
345
+ list_for_each_entry_safe (iopf , next , & fault_param -> partial , list )
356
346
kfree (iopf );
357
347
358
- kfree (iopf_param );
348
+ param -> fault_param = NULL ;
349
+ kfree (fault_param );
350
+ unlock :
351
+ mutex_unlock (& param -> lock );
352
+ mutex_unlock (& queue -> lock );
359
353
360
- return 0 ;
354
+ return ret ;
361
355
}
362
356
EXPORT_SYMBOL_GPL (iopf_queue_remove_device );
363
357
@@ -403,7 +397,7 @@ EXPORT_SYMBOL_GPL(iopf_queue_alloc);
403
397
*/
404
398
void iopf_queue_free (struct iopf_queue * queue )
405
399
{
406
- struct iopf_device_param * iopf_param , * next ;
400
+ struct iommu_fault_param * iopf_param , * next ;
407
401
408
402
if (!queue )
409
403
return ;
0 commit comments