Skip to content

Commit 3d49020

Browse files
nicolincjgunthorpe
authored andcommitted
iommufd/fault: Use a separate spinlock to protect fault->deliver list
The fault->mutex serializes the fault read()/write() fops and the iommufd_fault_auto_response_faults(), mainly for fault->response. Also, it was conveniently used to fence the fault->deliver in poll() fop and iommufd_fault_iopf_handler(). However, copy_from/to_user() may sleep if pagefaults are enabled. Thus, they could take a long time to wait for user pages to swap in, blocking iommufd_fault_iopf_handler() and its caller that is typically a shared IRQ handler of an IOMMU driver, resulting in a potential global DOS. Instead of reusing the mutex to protect the fault->deliver list, add a separate spinlock, nested under the mutex, to do the job. iommufd_fault_iopf_handler() would no longer be blocked by copy_from/to_user(). Add a free_list in iommufd_auto_response_faults(), so the spinlock can simply fence a fast list_for_each_entry_safe routine. Provide two deliver list helpers for iommufd_fault_fops_read() to use: - Fetch the first iopf_group out of the fault->deliver list - Restore an iopf_group back to the head of the fault->deliver list Lastly, move the mutex closer to the response in the fault structure, and update its kdoc accordingly. Fixes: 07838f7 ("iommufd: Add iommufd fault object") Link: https://patch.msgid.link/r/[email protected] Cc: [email protected] Suggested-by: Jason Gunthorpe <[email protected]> Reviewed-by: Kevin Tian <[email protected]> Reviewed-by: Lu Baolu <[email protected]> Reviewed-by: Jason Gunthorpe <[email protected]> Signed-off-by: Nicolin Chen <[email protected]> Signed-off-by: Jason Gunthorpe <[email protected]>
1 parent 3f4818e commit 3d49020

File tree

2 files changed

+49
-14
lines changed

2 files changed

+49
-14
lines changed

drivers/iommu/iommufd/fault.c

Lines changed: 22 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -103,15 +103,23 @@ static void iommufd_auto_response_faults(struct iommufd_hw_pagetable *hwpt,
103103
{
104104
struct iommufd_fault *fault = hwpt->fault;
105105
struct iopf_group *group, *next;
106+
struct list_head free_list;
106107
unsigned long index;
107108

108109
if (!fault)
109110
return;
111+
INIT_LIST_HEAD(&free_list);
110112

111113
mutex_lock(&fault->mutex);
114+
spin_lock(&fault->lock);
112115
list_for_each_entry_safe(group, next, &fault->deliver, node) {
113116
if (group->attach_handle != &handle->handle)
114117
continue;
118+
list_move(&group->node, &free_list);
119+
}
120+
spin_unlock(&fault->lock);
121+
122+
list_for_each_entry_safe(group, next, &free_list, node) {
115123
list_del(&group->node);
116124
iopf_group_response(group, IOMMU_PAGE_RESP_INVALID);
117125
iopf_free_group(group);
@@ -266,17 +274,19 @@ static ssize_t iommufd_fault_fops_read(struct file *filep, char __user *buf,
266274
return -ESPIPE;
267275

268276
mutex_lock(&fault->mutex);
269-
while (!list_empty(&fault->deliver) && count > done) {
270-
group = list_first_entry(&fault->deliver,
271-
struct iopf_group, node);
272-
273-
if (group->fault_count * fault_size > count - done)
277+
while ((group = iommufd_fault_deliver_fetch(fault))) {
278+
if (done >= count ||
279+
group->fault_count * fault_size > count - done) {
280+
iommufd_fault_deliver_restore(fault, group);
274281
break;
282+
}
275283

276284
rc = xa_alloc(&fault->response, &group->cookie, group,
277285
xa_limit_32b, GFP_KERNEL);
278-
if (rc)
286+
if (rc) {
287+
iommufd_fault_deliver_restore(fault, group);
279288
break;
289+
}
280290

281291
idev = to_iommufd_handle(group->attach_handle)->idev;
282292
list_for_each_entry(iopf, &group->faults, list) {
@@ -285,13 +295,12 @@ static ssize_t iommufd_fault_fops_read(struct file *filep, char __user *buf,
285295
group->cookie);
286296
if (copy_to_user(buf + done, &data, fault_size)) {
287297
xa_erase(&fault->response, group->cookie);
298+
iommufd_fault_deliver_restore(fault, group);
288299
rc = -EFAULT;
289300
break;
290301
}
291302
done += fault_size;
292303
}
293-
294-
list_del(&group->node);
295304
}
296305
mutex_unlock(&fault->mutex);
297306

@@ -349,10 +358,10 @@ static __poll_t iommufd_fault_fops_poll(struct file *filep,
349358
__poll_t pollflags = EPOLLOUT;
350359

351360
poll_wait(filep, &fault->wait_queue, wait);
352-
mutex_lock(&fault->mutex);
361+
spin_lock(&fault->lock);
353362
if (!list_empty(&fault->deliver))
354363
pollflags |= EPOLLIN | EPOLLRDNORM;
355-
mutex_unlock(&fault->mutex);
364+
spin_unlock(&fault->lock);
356365

357366
return pollflags;
358367
}
@@ -394,6 +403,7 @@ int iommufd_fault_alloc(struct iommufd_ucmd *ucmd)
394403
INIT_LIST_HEAD(&fault->deliver);
395404
xa_init_flags(&fault->response, XA_FLAGS_ALLOC1);
396405
mutex_init(&fault->mutex);
406+
spin_lock_init(&fault->lock);
397407
init_waitqueue_head(&fault->wait_queue);
398408

399409
filep = anon_inode_getfile("[iommufd-pgfault]", &iommufd_fault_fops,
@@ -442,9 +452,9 @@ int iommufd_fault_iopf_handler(struct iopf_group *group)
442452
hwpt = group->attach_handle->domain->fault_data;
443453
fault = hwpt->fault;
444454

445-
mutex_lock(&fault->mutex);
455+
spin_lock(&fault->lock);
446456
list_add_tail(&group->node, &fault->deliver);
447-
mutex_unlock(&fault->mutex);
457+
spin_unlock(&fault->lock);
448458

449459
wake_up_interruptible(&fault->wait_queue);
450460

drivers/iommu/iommufd/iommufd_private.h

Lines changed: 27 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -443,14 +443,39 @@ struct iommufd_fault {
443443
struct iommufd_ctx *ictx;
444444
struct file *filep;
445445

446-
/* The lists of outstanding faults protected by below mutex. */
447-
struct mutex mutex;
446+
spinlock_t lock; /* protects the deliver list */
448447
struct list_head deliver;
448+
struct mutex mutex; /* serializes response flows */
449449
struct xarray response;
450450

451451
struct wait_queue_head wait_queue;
452452
};
453453

454+
/* Fetch the first node out of the fault->deliver list */
455+
static inline struct iopf_group *
456+
iommufd_fault_deliver_fetch(struct iommufd_fault *fault)
457+
{
458+
struct list_head *list = &fault->deliver;
459+
struct iopf_group *group = NULL;
460+
461+
spin_lock(&fault->lock);
462+
if (!list_empty(list)) {
463+
group = list_first_entry(list, struct iopf_group, node);
464+
list_del(&group->node);
465+
}
466+
spin_unlock(&fault->lock);
467+
return group;
468+
}
469+
470+
/* Restore a node back to the head of the fault->deliver list */
471+
static inline void iommufd_fault_deliver_restore(struct iommufd_fault *fault,
472+
struct iopf_group *group)
473+
{
474+
spin_lock(&fault->lock);
475+
list_add(&group->node, &fault->deliver);
476+
spin_unlock(&fault->lock);
477+
}
478+
454479
struct iommufd_attach_handle {
455480
struct iommu_attach_handle handle;
456481
struct iommufd_device *idev;

0 commit comments

Comments
 (0)