@@ -90,109 +90,39 @@ static long uacce_fops_compat_ioctl(struct file *filep,
90
90
}
91
91
#endif
92
92
93
- static int uacce_sva_exit (struct device * dev , struct iommu_sva * handle ,
94
- void * data )
93
+ static int uacce_bind_queue (struct uacce_device * uacce , struct uacce_queue * q )
95
94
{
96
- struct uacce_mm * uacce_mm = data ;
97
- struct uacce_queue * q ;
98
-
99
- /*
100
- * No new queue can be added concurrently because no caller can have a
101
- * reference to this mm. But there may be concurrent calls to
102
- * uacce_mm_put(), so we need the lock.
103
- */
104
- mutex_lock (& uacce_mm -> lock );
105
- list_for_each_entry (q , & uacce_mm -> queues , list )
106
- uacce_put_queue (q );
107
- uacce_mm -> mm = NULL ;
108
- mutex_unlock (& uacce_mm -> lock );
95
+ int pasid ;
96
+ struct iommu_sva * handle ;
109
97
110
- return 0 ;
111
- }
112
-
113
- static struct iommu_sva_ops uacce_sva_ops = {
114
- .mm_exit = uacce_sva_exit ,
115
- };
116
-
117
- static struct uacce_mm * uacce_mm_get (struct uacce_device * uacce ,
118
- struct uacce_queue * q ,
119
- struct mm_struct * mm )
120
- {
121
- struct uacce_mm * uacce_mm = NULL ;
122
- struct iommu_sva * handle = NULL ;
123
- int ret ;
124
-
125
- lockdep_assert_held (& uacce -> mm_lock );
126
-
127
- list_for_each_entry (uacce_mm , & uacce -> mm_list , list ) {
128
- if (uacce_mm -> mm == mm ) {
129
- mutex_lock (& uacce_mm -> lock );
130
- list_add (& q -> list , & uacce_mm -> queues );
131
- mutex_unlock (& uacce_mm -> lock );
132
- return uacce_mm ;
133
- }
134
- }
135
-
136
- uacce_mm = kzalloc (sizeof (* uacce_mm ), GFP_KERNEL );
137
- if (!uacce_mm )
138
- return NULL ;
98
+ if (!(uacce -> flags & UACCE_DEV_SVA ))
99
+ return 0 ;
139
100
140
- if (uacce -> flags & UACCE_DEV_SVA ) {
141
- /*
142
- * Safe to pass an incomplete uacce_mm, since mm_exit cannot
143
- * fire while we hold a reference to the mm.
144
- */
145
- handle = iommu_sva_bind_device (uacce -> parent , mm , uacce_mm );
146
- if (IS_ERR (handle ))
147
- goto err_free ;
101
+ handle = iommu_sva_bind_device (uacce -> parent , current -> mm , NULL );
102
+ if (IS_ERR (handle ))
103
+ return PTR_ERR (handle );
148
104
149
- ret = iommu_sva_set_ops (handle , & uacce_sva_ops );
150
- if (ret )
151
- goto err_unbind ;
152
-
153
- uacce_mm -> pasid = iommu_sva_get_pasid (handle );
154
- if (uacce_mm -> pasid == IOMMU_PASID_INVALID )
155
- goto err_unbind ;
105
+ pasid = iommu_sva_get_pasid (handle );
106
+ if (pasid == IOMMU_PASID_INVALID ) {
107
+ iommu_sva_unbind_device (handle );
108
+ return - ENODEV ;
156
109
}
157
110
158
- uacce_mm -> mm = mm ;
159
- uacce_mm -> handle = handle ;
160
- INIT_LIST_HEAD (& uacce_mm -> queues );
161
- mutex_init (& uacce_mm -> lock );
162
- list_add (& q -> list , & uacce_mm -> queues );
163
- list_add (& uacce_mm -> list , & uacce -> mm_list );
164
-
165
- return uacce_mm ;
166
-
167
- err_unbind :
168
- if (handle )
169
- iommu_sva_unbind_device (handle );
170
- err_free :
171
- kfree (uacce_mm );
172
- return NULL ;
111
+ q -> handle = handle ;
112
+ q -> pasid = pasid ;
113
+ return 0 ;
173
114
}
174
115
175
- static void uacce_mm_put (struct uacce_queue * q )
116
+ static void uacce_unbind_queue (struct uacce_queue * q )
176
117
{
177
- struct uacce_mm * uacce_mm = q -> uacce_mm ;
178
-
179
- lockdep_assert_held (& q -> uacce -> mm_lock );
180
-
181
- mutex_lock (& uacce_mm -> lock );
182
- list_del (& q -> list );
183
- mutex_unlock (& uacce_mm -> lock );
184
-
185
- if (list_empty (& uacce_mm -> queues )) {
186
- if (uacce_mm -> handle )
187
- iommu_sva_unbind_device (uacce_mm -> handle );
188
- list_del (& uacce_mm -> list );
189
- kfree (uacce_mm );
190
- }
118
+ if (!q -> handle )
119
+ return ;
120
+ iommu_sva_unbind_device (q -> handle );
121
+ q -> handle = NULL ;
191
122
}
192
123
193
124
static int uacce_fops_open (struct inode * inode , struct file * filep )
194
125
{
195
- struct uacce_mm * uacce_mm = NULL ;
196
126
struct uacce_device * uacce ;
197
127
struct uacce_queue * q ;
198
128
int ret = 0 ;
@@ -205,34 +135,31 @@ static int uacce_fops_open(struct inode *inode, struct file *filep)
205
135
if (!q )
206
136
return - ENOMEM ;
207
137
208
- mutex_lock (& uacce -> mm_lock );
209
- uacce_mm = uacce_mm_get (uacce , q , current -> mm );
210
- mutex_unlock (& uacce -> mm_lock );
211
- if (!uacce_mm ) {
212
- ret = - ENOMEM ;
138
+ ret = uacce_bind_queue (uacce , q );
139
+ if (ret )
213
140
goto out_with_mem ;
214
- }
215
141
216
142
q -> uacce = uacce ;
217
- q -> uacce_mm = uacce_mm ;
218
143
219
144
if (uacce -> ops -> get_queue ) {
220
- ret = uacce -> ops -> get_queue (uacce , uacce_mm -> pasid , q );
145
+ ret = uacce -> ops -> get_queue (uacce , q -> pasid , q );
221
146
if (ret < 0 )
222
- goto out_with_mm ;
147
+ goto out_with_bond ;
223
148
}
224
149
225
150
init_waitqueue_head (& q -> wait );
226
151
filep -> private_data = q ;
227
152
uacce -> inode = inode ;
228
153
q -> state = UACCE_Q_INIT ;
229
154
155
+ mutex_lock (& uacce -> queues_lock );
156
+ list_add (& q -> list , & uacce -> queues );
157
+ mutex_unlock (& uacce -> queues_lock );
158
+
230
159
return 0 ;
231
160
232
- out_with_mm :
233
- mutex_lock (& uacce -> mm_lock );
234
- uacce_mm_put (q );
235
- mutex_unlock (& uacce -> mm_lock );
161
+ out_with_bond :
162
+ uacce_unbind_queue (q );
236
163
out_with_mem :
237
164
kfree (q );
238
165
return ret ;
@@ -241,14 +168,12 @@ static int uacce_fops_open(struct inode *inode, struct file *filep)
241
168
static int uacce_fops_release (struct inode * inode , struct file * filep )
242
169
{
243
170
struct uacce_queue * q = filep -> private_data ;
244
- struct uacce_device * uacce = q -> uacce ;
245
171
172
+ mutex_lock (& q -> uacce -> queues_lock );
173
+ list_del (& q -> list );
174
+ mutex_unlock (& q -> uacce -> queues_lock );
246
175
uacce_put_queue (q );
247
-
248
- mutex_lock (& uacce -> mm_lock );
249
- uacce_mm_put (q );
250
- mutex_unlock (& uacce -> mm_lock );
251
-
176
+ uacce_unbind_queue (q );
252
177
kfree (q );
253
178
254
179
return 0 ;
@@ -513,8 +438,8 @@ struct uacce_device *uacce_alloc(struct device *parent,
513
438
if (ret < 0 )
514
439
goto err_with_uacce ;
515
440
516
- INIT_LIST_HEAD (& uacce -> mm_list );
517
- mutex_init (& uacce -> mm_lock );
441
+ INIT_LIST_HEAD (& uacce -> queues );
442
+ mutex_init (& uacce -> queues_lock );
518
443
device_initialize (& uacce -> dev );
519
444
uacce -> dev .devt = MKDEV (MAJOR (uacce_devt ), uacce -> dev_id );
520
445
uacce -> dev .class = uacce_class ;
@@ -561,8 +486,7 @@ EXPORT_SYMBOL_GPL(uacce_register);
561
486
*/
562
487
void uacce_remove (struct uacce_device * uacce )
563
488
{
564
- struct uacce_mm * uacce_mm ;
565
- struct uacce_queue * q ;
489
+ struct uacce_queue * q , * next_q ;
566
490
567
491
if (!uacce )
568
492
return ;
@@ -574,24 +498,12 @@ void uacce_remove(struct uacce_device *uacce)
574
498
unmap_mapping_range (uacce -> inode -> i_mapping , 0 , 0 , 1 );
575
499
576
500
/* ensure no open queue remains */
577
- mutex_lock (& uacce -> mm_lock );
578
- list_for_each_entry (uacce_mm , & uacce -> mm_list , list ) {
579
- /*
580
- * We don't take the uacce_mm->lock here. Since we hold the
581
- * device's mm_lock, no queue can be added to or removed from
582
- * this uacce_mm. We may run concurrently with mm_exit, but
583
- * uacce_put_queue() is serialized and iommu_sva_unbind_device()
584
- * waits for the lock that mm_exit is holding.
585
- */
586
- list_for_each_entry (q , & uacce_mm -> queues , list )
587
- uacce_put_queue (q );
588
-
589
- if (uacce -> flags & UACCE_DEV_SVA ) {
590
- iommu_sva_unbind_device (uacce_mm -> handle );
591
- uacce_mm -> handle = NULL ;
592
- }
501
+ mutex_lock (& uacce -> queues_lock );
502
+ list_for_each_entry_safe (q , next_q , & uacce -> queues , list ) {
503
+ uacce_put_queue (q );
504
+ uacce_unbind_queue (q );
593
505
}
594
- mutex_unlock (& uacce -> mm_lock );
506
+ mutex_unlock (& uacce -> queues_lock );
595
507
596
508
/* disable sva now since no opened queues */
597
509
if (uacce -> flags & UACCE_DEV_SVA )
0 commit comments