@@ -181,149 +181,171 @@ static void kvm_vfio_update_coherency(struct kvm_device *dev)
181
181
mutex_unlock (& kv -> lock );
182
182
}
183
183
184
- static int kvm_vfio_set_group (struct kvm_device * dev , long attr , u64 arg )
184
+ static int kvm_vfio_group_add (struct kvm_device * dev , unsigned int fd )
185
185
{
186
186
struct kvm_vfio * kv = dev -> private ;
187
187
struct vfio_group * vfio_group ;
188
188
struct kvm_vfio_group * kvg ;
189
- int32_t __user * argp = (int32_t __user * )(unsigned long )arg ;
190
189
struct fd f ;
191
- int32_t fd ;
192
190
int ret ;
193
191
194
- switch (attr ) {
195
- case KVM_DEV_VFIO_GROUP_ADD :
196
- if (get_user (fd , argp ))
197
- return - EFAULT ;
198
-
199
- f = fdget (fd );
200
- if (!f .file )
201
- return - EBADF ;
202
-
203
- vfio_group = kvm_vfio_group_get_external_user (f .file );
204
- fdput (f );
192
+ f = fdget (fd );
193
+ if (!f .file )
194
+ return - EBADF ;
205
195
206
- if ( IS_ERR ( vfio_group ))
207
- return PTR_ERR ( vfio_group );
196
+ vfio_group = kvm_vfio_group_get_external_user ( f . file );
197
+ fdput ( f );
208
198
209
- mutex_lock (& kv -> lock );
199
+ if (IS_ERR (vfio_group ))
200
+ return PTR_ERR (vfio_group );
210
201
211
- list_for_each_entry (kvg , & kv -> group_list , node ) {
212
- if (kvg -> vfio_group == vfio_group ) {
213
- mutex_unlock (& kv -> lock );
214
- kvm_vfio_group_put_external_user (vfio_group );
215
- return - EEXIST ;
216
- }
217
- }
202
+ mutex_lock (& kv -> lock );
218
203
219
- kvg = kzalloc (sizeof (* kvg ), GFP_KERNEL_ACCOUNT );
220
- if (!kvg ) {
221
- mutex_unlock (& kv -> lock );
222
- kvm_vfio_group_put_external_user (vfio_group );
223
- return - ENOMEM ;
204
+ list_for_each_entry (kvg , & kv -> group_list , node ) {
205
+ if (kvg -> vfio_group == vfio_group ) {
206
+ ret = - EEXIST ;
207
+ goto err_unlock ;
224
208
}
209
+ }
225
210
226
- list_add_tail (& kvg -> node , & kv -> group_list );
227
- kvg -> vfio_group = vfio_group ;
211
+ kvg = kzalloc (sizeof (* kvg ), GFP_KERNEL_ACCOUNT );
212
+ if (!kvg ) {
213
+ ret = - ENOMEM ;
214
+ goto err_unlock ;
215
+ }
228
216
229
- kvm_arch_start_assignment (dev -> kvm );
217
+ list_add_tail (& kvg -> node , & kv -> group_list );
218
+ kvg -> vfio_group = vfio_group ;
230
219
231
- mutex_unlock ( & kv -> lock );
220
+ kvm_arch_start_assignment ( dev -> kvm );
232
221
233
- kvm_vfio_group_set_kvm ( vfio_group , dev -> kvm );
222
+ mutex_unlock ( & kv -> lock );
234
223
235
- kvm_vfio_update_coherency (dev );
224
+ kvm_vfio_group_set_kvm (vfio_group , dev -> kvm );
225
+ kvm_vfio_update_coherency (dev );
236
226
237
- return 0 ;
227
+ return 0 ;
228
+ err_unlock :
229
+ mutex_unlock (& kv -> lock );
230
+ kvm_vfio_group_put_external_user (vfio_group );
231
+ return ret ;
232
+ }
238
233
239
- case KVM_DEV_VFIO_GROUP_DEL :
240
- if (get_user (fd , argp ))
241
- return - EFAULT ;
234
+ static int kvm_vfio_group_del (struct kvm_device * dev , unsigned int fd )
235
+ {
236
+ struct kvm_vfio * kv = dev -> private ;
237
+ struct kvm_vfio_group * kvg ;
238
+ struct fd f ;
239
+ int ret ;
242
240
243
- f = fdget (fd );
244
- if (!f .file )
245
- return - EBADF ;
241
+ f = fdget (fd );
242
+ if (!f .file )
243
+ return - EBADF ;
246
244
247
- ret = - ENOENT ;
245
+ ret = - ENOENT ;
248
246
249
- mutex_lock (& kv -> lock );
247
+ mutex_lock (& kv -> lock );
250
248
251
- list_for_each_entry (kvg , & kv -> group_list , node ) {
252
- if (!kvm_vfio_external_group_match_file (kvg -> vfio_group ,
253
- f .file ))
254
- continue ;
249
+ list_for_each_entry (kvg , & kv -> group_list , node ) {
250
+ if (!kvm_vfio_external_group_match_file (kvg -> vfio_group ,
251
+ f .file ))
252
+ continue ;
255
253
256
- list_del (& kvg -> node );
257
- kvm_arch_end_assignment (dev -> kvm );
254
+ list_del (& kvg -> node );
255
+ kvm_arch_end_assignment (dev -> kvm );
258
256
#ifdef CONFIG_SPAPR_TCE_IOMMU
259
- kvm_spapr_tce_release_vfio_group (dev -> kvm ,
260
- kvg -> vfio_group );
257
+ kvm_spapr_tce_release_vfio_group (dev -> kvm , kvg -> vfio_group );
261
258
#endif
262
- kvm_vfio_group_set_kvm (kvg -> vfio_group , NULL );
263
- kvm_vfio_group_put_external_user (kvg -> vfio_group );
264
- kfree (kvg );
265
- ret = 0 ;
266
- break ;
267
- }
259
+ kvm_vfio_group_set_kvm (kvg -> vfio_group , NULL );
260
+ kvm_vfio_group_put_external_user (kvg -> vfio_group );
261
+ kfree (kvg );
262
+ ret = 0 ;
263
+ break ;
264
+ }
268
265
269
- mutex_unlock (& kv -> lock );
266
+ mutex_unlock (& kv -> lock );
270
267
271
- fdput (f );
268
+ fdput (f );
272
269
273
- kvm_vfio_update_coherency (dev );
270
+ kvm_vfio_update_coherency (dev );
274
271
275
- return ret ;
272
+ return ret ;
273
+ }
276
274
277
275
#ifdef CONFIG_SPAPR_TCE_IOMMU
278
- case KVM_DEV_VFIO_GROUP_SET_SPAPR_TCE : {
279
- struct kvm_vfio_spapr_tce param ;
280
- struct kvm_vfio * kv = dev -> private ;
281
- struct vfio_group * vfio_group ;
282
- struct kvm_vfio_group * kvg ;
283
- struct fd f ;
284
- struct iommu_group * grp ;
285
-
286
- if (copy_from_user (& param , (void __user * )arg ,
287
- sizeof (struct kvm_vfio_spapr_tce )))
288
- return - EFAULT ;
276
+ static int kvm_vfio_group_set_spapr_tce (struct kvm_device * dev ,
277
+ void __user * arg )
278
+ {
279
+ struct kvm_vfio_spapr_tce param ;
280
+ struct kvm_vfio * kv = dev -> private ;
281
+ struct vfio_group * vfio_group ;
282
+ struct kvm_vfio_group * kvg ;
283
+ struct fd f ;
284
+ struct iommu_group * grp ;
285
+ int ret ;
289
286
290
- f = fdget (param .groupfd );
291
- if (!f .file )
292
- return - EBADF ;
287
+ if (copy_from_user (& param , arg , sizeof (struct kvm_vfio_spapr_tce )))
288
+ return - EFAULT ;
293
289
294
- vfio_group = kvm_vfio_group_get_external_user (f .file );
295
- fdput (f );
290
+ f = fdget (param .groupfd );
291
+ if (!f .file )
292
+ return - EBADF ;
296
293
297
- if ( IS_ERR ( vfio_group ))
298
- return PTR_ERR ( vfio_group );
294
+ vfio_group = kvm_vfio_group_get_external_user ( f . file );
295
+ fdput ( f );
299
296
300
- grp = kvm_vfio_group_get_iommu_group (vfio_group );
301
- if (WARN_ON_ONCE (!grp )) {
302
- kvm_vfio_group_put_external_user (vfio_group );
303
- return - EIO ;
304
- }
297
+ if (IS_ERR (vfio_group ))
298
+ return PTR_ERR (vfio_group );
305
299
306
- ret = - ENOENT ;
300
+ grp = kvm_vfio_group_get_iommu_group (vfio_group );
301
+ if (WARN_ON_ONCE (!grp )) {
302
+ ret = - EIO ;
303
+ goto err_put_external ;
304
+ }
307
305
308
- mutex_lock ( & kv -> lock ) ;
306
+ ret = - ENOENT ;
309
307
310
- list_for_each_entry (kvg , & kv -> group_list , node ) {
311
- if (kvg -> vfio_group != vfio_group )
312
- continue ;
308
+ mutex_lock (& kv -> lock );
313
309
314
- ret = kvm_spapr_tce_attach_iommu_group (dev -> kvm ,
315
- param .tablefd , grp );
316
- break ;
317
- }
310
+ list_for_each_entry (kvg , & kv -> group_list , node ) {
311
+ if (kvg -> vfio_group != vfio_group )
312
+ continue ;
318
313
319
- mutex_unlock (& kv -> lock );
314
+ ret = kvm_spapr_tce_attach_iommu_group (dev -> kvm , param .tablefd ,
315
+ grp );
316
+ break ;
317
+ }
320
318
321
- iommu_group_put (grp );
322
- kvm_vfio_group_put_external_user (vfio_group );
319
+ mutex_unlock (& kv -> lock );
323
320
324
- return ret ;
325
- }
326
- #endif /* CONFIG_SPAPR_TCE_IOMMU */
321
+ iommu_group_put (grp );
322
+ err_put_external :
323
+ kvm_vfio_group_put_external_user (vfio_group );
324
+ return ret ;
325
+ }
326
+ #endif
327
+
328
+ static int kvm_vfio_set_group (struct kvm_device * dev , long attr ,
329
+ void __user * arg )
330
+ {
331
+ int32_t __user * argp = arg ;
332
+ int32_t fd ;
333
+
334
+ switch (attr ) {
335
+ case KVM_DEV_VFIO_GROUP_ADD :
336
+ if (get_user (fd , argp ))
337
+ return - EFAULT ;
338
+ return kvm_vfio_group_add (dev , fd );
339
+
340
+ case KVM_DEV_VFIO_GROUP_DEL :
341
+ if (get_user (fd , argp ))
342
+ return - EFAULT ;
343
+ return kvm_vfio_group_del (dev , fd );
344
+
345
+ #ifdef CONFIG_SPAPR_TCE_IOMMU
346
+ case KVM_DEV_VFIO_GROUP_SET_SPAPR_TCE :
347
+ return kvm_vfio_group_set_spapr_tce (dev , arg );
348
+ #endif
327
349
}
328
350
329
351
return - ENXIO ;
@@ -334,7 +356,8 @@ static int kvm_vfio_set_attr(struct kvm_device *dev,
334
356
{
335
357
switch (attr -> group ) {
336
358
case KVM_DEV_VFIO_GROUP :
337
- return kvm_vfio_set_group (dev , attr -> attr , attr -> addr );
359
+ return kvm_vfio_set_group (dev , attr -> attr ,
360
+ u64_to_user_ptr (attr -> addr ));
338
361
}
339
362
340
363
return - ENXIO ;
0 commit comments