Skip to content

Commit fb01562

Browse files
jpbruckerjoergroedel
authored andcommitted
uacce: Remove mm_exit() op
The mm_exit() op will be removed from the SVA API. When a process dies and its mm goes away, the IOMMU driver won't notify device drivers anymore. Drivers should expect to handle a lot more aborted DMA. On the upside, it does greatly simplify the queue management. The uacce_mm struct, that tracks all queues bound to an mm, was only used by the mm_exit() callback. Remove it. Signed-off-by: Jean-Philippe Brucker <[email protected]> Acked-by: Jacob Pan <[email protected]> Acked-by: Lu Baolu <[email protected]> Acked-by: Zhangfei Gao <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Joerg Roedel <[email protected]>
1 parent da656a0 commit fb01562

File tree

2 files changed

+51
-155
lines changed

2 files changed

+51
-155
lines changed

drivers/misc/uacce/uacce.c

Lines changed: 42 additions & 130 deletions
Original file line numberDiff line numberDiff line change
@@ -90,109 +90,39 @@ static long uacce_fops_compat_ioctl(struct file *filep,
9090
}
9191
#endif
9292

93-
static int uacce_sva_exit(struct device *dev, struct iommu_sva *handle,
94-
void *data)
93+
static int uacce_bind_queue(struct uacce_device *uacce, struct uacce_queue *q)
9594
{
96-
struct uacce_mm *uacce_mm = data;
97-
struct uacce_queue *q;
98-
99-
/*
100-
* No new queue can be added concurrently because no caller can have a
101-
* reference to this mm. But there may be concurrent calls to
102-
* uacce_mm_put(), so we need the lock.
103-
*/
104-
mutex_lock(&uacce_mm->lock);
105-
list_for_each_entry(q, &uacce_mm->queues, list)
106-
uacce_put_queue(q);
107-
uacce_mm->mm = NULL;
108-
mutex_unlock(&uacce_mm->lock);
95+
int pasid;
96+
struct iommu_sva *handle;
10997

110-
return 0;
111-
}
112-
113-
static struct iommu_sva_ops uacce_sva_ops = {
114-
.mm_exit = uacce_sva_exit,
115-
};
116-
117-
static struct uacce_mm *uacce_mm_get(struct uacce_device *uacce,
118-
struct uacce_queue *q,
119-
struct mm_struct *mm)
120-
{
121-
struct uacce_mm *uacce_mm = NULL;
122-
struct iommu_sva *handle = NULL;
123-
int ret;
124-
125-
lockdep_assert_held(&uacce->mm_lock);
126-
127-
list_for_each_entry(uacce_mm, &uacce->mm_list, list) {
128-
if (uacce_mm->mm == mm) {
129-
mutex_lock(&uacce_mm->lock);
130-
list_add(&q->list, &uacce_mm->queues);
131-
mutex_unlock(&uacce_mm->lock);
132-
return uacce_mm;
133-
}
134-
}
135-
136-
uacce_mm = kzalloc(sizeof(*uacce_mm), GFP_KERNEL);
137-
if (!uacce_mm)
138-
return NULL;
98+
if (!(uacce->flags & UACCE_DEV_SVA))
99+
return 0;
139100

140-
if (uacce->flags & UACCE_DEV_SVA) {
141-
/*
142-
* Safe to pass an incomplete uacce_mm, since mm_exit cannot
143-
* fire while we hold a reference to the mm.
144-
*/
145-
handle = iommu_sva_bind_device(uacce->parent, mm, uacce_mm);
146-
if (IS_ERR(handle))
147-
goto err_free;
101+
handle = iommu_sva_bind_device(uacce->parent, current->mm, NULL);
102+
if (IS_ERR(handle))
103+
return PTR_ERR(handle);
148104

149-
ret = iommu_sva_set_ops(handle, &uacce_sva_ops);
150-
if (ret)
151-
goto err_unbind;
152-
153-
uacce_mm->pasid = iommu_sva_get_pasid(handle);
154-
if (uacce_mm->pasid == IOMMU_PASID_INVALID)
155-
goto err_unbind;
105+
pasid = iommu_sva_get_pasid(handle);
106+
if (pasid == IOMMU_PASID_INVALID) {
107+
iommu_sva_unbind_device(handle);
108+
return -ENODEV;
156109
}
157110

158-
uacce_mm->mm = mm;
159-
uacce_mm->handle = handle;
160-
INIT_LIST_HEAD(&uacce_mm->queues);
161-
mutex_init(&uacce_mm->lock);
162-
list_add(&q->list, &uacce_mm->queues);
163-
list_add(&uacce_mm->list, &uacce->mm_list);
164-
165-
return uacce_mm;
166-
167-
err_unbind:
168-
if (handle)
169-
iommu_sva_unbind_device(handle);
170-
err_free:
171-
kfree(uacce_mm);
172-
return NULL;
111+
q->handle = handle;
112+
q->pasid = pasid;
113+
return 0;
173114
}
174115

175-
static void uacce_mm_put(struct uacce_queue *q)
116+
static void uacce_unbind_queue(struct uacce_queue *q)
176117
{
177-
struct uacce_mm *uacce_mm = q->uacce_mm;
178-
179-
lockdep_assert_held(&q->uacce->mm_lock);
180-
181-
mutex_lock(&uacce_mm->lock);
182-
list_del(&q->list);
183-
mutex_unlock(&uacce_mm->lock);
184-
185-
if (list_empty(&uacce_mm->queues)) {
186-
if (uacce_mm->handle)
187-
iommu_sva_unbind_device(uacce_mm->handle);
188-
list_del(&uacce_mm->list);
189-
kfree(uacce_mm);
190-
}
118+
if (!q->handle)
119+
return;
120+
iommu_sva_unbind_device(q->handle);
121+
q->handle = NULL;
191122
}
192123

193124
static int uacce_fops_open(struct inode *inode, struct file *filep)
194125
{
195-
struct uacce_mm *uacce_mm = NULL;
196126
struct uacce_device *uacce;
197127
struct uacce_queue *q;
198128
int ret = 0;
@@ -205,34 +135,31 @@ static int uacce_fops_open(struct inode *inode, struct file *filep)
205135
if (!q)
206136
return -ENOMEM;
207137

208-
mutex_lock(&uacce->mm_lock);
209-
uacce_mm = uacce_mm_get(uacce, q, current->mm);
210-
mutex_unlock(&uacce->mm_lock);
211-
if (!uacce_mm) {
212-
ret = -ENOMEM;
138+
ret = uacce_bind_queue(uacce, q);
139+
if (ret)
213140
goto out_with_mem;
214-
}
215141

216142
q->uacce = uacce;
217-
q->uacce_mm = uacce_mm;
218143

219144
if (uacce->ops->get_queue) {
220-
ret = uacce->ops->get_queue(uacce, uacce_mm->pasid, q);
145+
ret = uacce->ops->get_queue(uacce, q->pasid, q);
221146
if (ret < 0)
222-
goto out_with_mm;
147+
goto out_with_bond;
223148
}
224149

225150
init_waitqueue_head(&q->wait);
226151
filep->private_data = q;
227152
uacce->inode = inode;
228153
q->state = UACCE_Q_INIT;
229154

155+
mutex_lock(&uacce->queues_lock);
156+
list_add(&q->list, &uacce->queues);
157+
mutex_unlock(&uacce->queues_lock);
158+
230159
return 0;
231160

232-
out_with_mm:
233-
mutex_lock(&uacce->mm_lock);
234-
uacce_mm_put(q);
235-
mutex_unlock(&uacce->mm_lock);
161+
out_with_bond:
162+
uacce_unbind_queue(q);
236163
out_with_mem:
237164
kfree(q);
238165
return ret;
@@ -241,14 +168,12 @@ static int uacce_fops_open(struct inode *inode, struct file *filep)
241168
static int uacce_fops_release(struct inode *inode, struct file *filep)
242169
{
243170
struct uacce_queue *q = filep->private_data;
244-
struct uacce_device *uacce = q->uacce;
245171

172+
mutex_lock(&q->uacce->queues_lock);
173+
list_del(&q->list);
174+
mutex_unlock(&q->uacce->queues_lock);
246175
uacce_put_queue(q);
247-
248-
mutex_lock(&uacce->mm_lock);
249-
uacce_mm_put(q);
250-
mutex_unlock(&uacce->mm_lock);
251-
176+
uacce_unbind_queue(q);
252177
kfree(q);
253178

254179
return 0;
@@ -513,8 +438,8 @@ struct uacce_device *uacce_alloc(struct device *parent,
513438
if (ret < 0)
514439
goto err_with_uacce;
515440

516-
INIT_LIST_HEAD(&uacce->mm_list);
517-
mutex_init(&uacce->mm_lock);
441+
INIT_LIST_HEAD(&uacce->queues);
442+
mutex_init(&uacce->queues_lock);
518443
device_initialize(&uacce->dev);
519444
uacce->dev.devt = MKDEV(MAJOR(uacce_devt), uacce->dev_id);
520445
uacce->dev.class = uacce_class;
@@ -561,8 +486,7 @@ EXPORT_SYMBOL_GPL(uacce_register);
561486
*/
562487
void uacce_remove(struct uacce_device *uacce)
563488
{
564-
struct uacce_mm *uacce_mm;
565-
struct uacce_queue *q;
489+
struct uacce_queue *q, *next_q;
566490

567491
if (!uacce)
568492
return;
@@ -574,24 +498,12 @@ void uacce_remove(struct uacce_device *uacce)
574498
unmap_mapping_range(uacce->inode->i_mapping, 0, 0, 1);
575499

576500
/* ensure no open queue remains */
577-
mutex_lock(&uacce->mm_lock);
578-
list_for_each_entry(uacce_mm, &uacce->mm_list, list) {
579-
/*
580-
* We don't take the uacce_mm->lock here. Since we hold the
581-
* device's mm_lock, no queue can be added to or removed from
582-
* this uacce_mm. We may run concurrently with mm_exit, but
583-
* uacce_put_queue() is serialized and iommu_sva_unbind_device()
584-
* waits for the lock that mm_exit is holding.
585-
*/
586-
list_for_each_entry(q, &uacce_mm->queues, list)
587-
uacce_put_queue(q);
588-
589-
if (uacce->flags & UACCE_DEV_SVA) {
590-
iommu_sva_unbind_device(uacce_mm->handle);
591-
uacce_mm->handle = NULL;
592-
}
501+
mutex_lock(&uacce->queues_lock);
502+
list_for_each_entry_safe(q, next_q, &uacce->queues, list) {
503+
uacce_put_queue(q);
504+
uacce_unbind_queue(q);
593505
}
594-
mutex_unlock(&uacce->mm_lock);
506+
mutex_unlock(&uacce->queues_lock);
595507

596508
/* disable sva now since no opened queues */
597509
if (uacce->flags & UACCE_DEV_SVA)

include/linux/uacce.h

Lines changed: 9 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -68,19 +68,21 @@ enum uacce_q_state {
6868
* @uacce: pointer to uacce
6969
* @priv: private pointer
7070
* @wait: wait queue head
71-
* @list: index into uacce_mm
72-
* @uacce_mm: the corresponding mm
71+
* @list: index into uacce queues list
7372
* @qfrs: pointer of qfr regions
7473
* @state: queue state machine
74+
* @pasid: pasid associated to the mm
75+
* @handle: iommu_sva handle returned by iommu_sva_bind_device()
7576
*/
7677
struct uacce_queue {
7778
struct uacce_device *uacce;
7879
void *priv;
7980
wait_queue_head_t wait;
8081
struct list_head list;
81-
struct uacce_mm *uacce_mm;
8282
struct uacce_qfile_region *qfrs[UACCE_MAX_REGION];
8383
enum uacce_q_state state;
84+
int pasid;
85+
struct iommu_sva *handle;
8486
};
8587

8688
/**
@@ -96,8 +98,8 @@ struct uacce_queue {
9698
* @cdev: cdev of the uacce
9799
* @dev: dev of the uacce
98100
* @priv: private pointer of the uacce
99-
* @mm_list: list head of uacce_mm->list
100-
* @mm_lock: lock for mm_list
101+
* @queues: list of queues
102+
* @queues_lock: lock for queues list
101103
* @inode: core vfs
102104
*/
103105
struct uacce_device {
@@ -112,27 +114,9 @@ struct uacce_device {
112114
struct cdev *cdev;
113115
struct device dev;
114116
void *priv;
115-
struct list_head mm_list;
116-
struct mutex mm_lock;
117-
struct inode *inode;
118-
};
119-
120-
/**
121-
* struct uacce_mm - keep track of queues bound to a process
122-
* @list: index into uacce_device
123-
* @queues: list of queues
124-
* @mm: the mm struct
125-
* @lock: protects the list of queues
126-
* @pasid: pasid of the uacce_mm
127-
* @handle: iommu_sva handle return from iommu_sva_bind_device
128-
*/
129-
struct uacce_mm {
130-
struct list_head list;
131117
struct list_head queues;
132-
struct mm_struct *mm;
133-
struct mutex lock;
134-
int pasid;
135-
struct iommu_sva *handle;
118+
struct mutex queues_lock;
119+
struct inode *inode;
136120
};
137121

138122
#if IS_ENABLED(CONFIG_UACCE)

0 commit comments

Comments
 (0)