@@ -259,19 +259,13 @@ static const struct mmu_notifier_ops intel_mmuops = {
259
259
.invalidate_range = intel_invalidate_range ,
260
260
};
261
261
262
- static DEFINE_MUTEX (pasid_mutex );
263
-
264
262
static int pasid_to_svm_sdev (struct device * dev , unsigned int pasid ,
265
263
struct intel_svm * * rsvm ,
266
264
struct intel_svm_dev * * rsdev )
267
265
{
268
266
struct intel_svm_dev * sdev = NULL ;
269
267
struct intel_svm * svm ;
270
268
271
- /* The caller should hold the pasid_mutex lock */
272
- if (WARN_ON (!mutex_is_locked (& pasid_mutex )))
273
- return - EINVAL ;
274
-
275
269
if (pasid == IOMMU_PASID_INVALID || pasid >= PASID_MAX )
276
270
return - EINVAL ;
277
271
@@ -371,22 +365,19 @@ static int intel_svm_bind_mm(struct intel_iommu *iommu, struct device *dev,
371
365
return ret ;
372
366
}
373
367
374
- /* Caller must hold pasid_mutex */
375
- static int intel_svm_unbind_mm (struct device * dev , u32 pasid )
368
+ void intel_svm_remove_dev_pasid (struct device * dev , u32 pasid )
376
369
{
377
370
struct intel_svm_dev * sdev ;
378
371
struct intel_iommu * iommu ;
379
372
struct intel_svm * svm ;
380
373
struct mm_struct * mm ;
381
- int ret = - EINVAL ;
382
374
383
375
iommu = device_to_iommu (dev , NULL , NULL );
384
376
if (!iommu )
385
- goto out ;
377
+ return ;
386
378
387
- ret = pasid_to_svm_sdev (dev , pasid , & svm , & sdev );
388
- if (ret )
389
- goto out ;
379
+ if (pasid_to_svm_sdev (dev , pasid , & svm , & sdev ))
380
+ return ;
390
381
mm = svm -> mm ;
391
382
392
383
if (sdev ) {
@@ -418,8 +409,6 @@ static int intel_svm_unbind_mm(struct device *dev, u32 pasid)
418
409
kfree (svm );
419
410
}
420
411
}
421
- out :
422
- return ret ;
423
412
}
424
413
425
414
/* Page request queue descriptor */
@@ -520,19 +509,7 @@ static void intel_svm_drain_prq(struct device *dev, u32 pasid)
520
509
goto prq_retry ;
521
510
}
522
511
523
- /*
524
- * A work in IO page fault workqueue may try to lock pasid_mutex now.
525
- * Holding pasid_mutex while waiting in iopf_queue_flush_dev() for
526
- * all works in the workqueue to finish may cause deadlock.
527
- *
528
- * It's unnecessary to hold pasid_mutex in iopf_queue_flush_dev().
529
- * Unlock it to allow the works to be handled while waiting for
530
- * them to finish.
531
- */
532
- lockdep_assert_held (& pasid_mutex );
533
- mutex_unlock (& pasid_mutex );
534
512
iopf_queue_flush_dev (dev );
535
- mutex_lock (& pasid_mutex );
536
513
537
514
/*
538
515
* Perform steps described in VT-d spec CH7.10 to drain page
@@ -827,26 +804,14 @@ int intel_svm_page_response(struct device *dev,
827
804
return ret ;
828
805
}
829
806
830
- void intel_svm_remove_dev_pasid (struct device * dev , ioasid_t pasid )
831
- {
832
- mutex_lock (& pasid_mutex );
833
- intel_svm_unbind_mm (dev , pasid );
834
- mutex_unlock (& pasid_mutex );
835
- }
836
-
837
807
static int intel_svm_set_dev_pasid (struct iommu_domain * domain ,
838
808
struct device * dev , ioasid_t pasid )
839
809
{
840
810
struct device_domain_info * info = dev_iommu_priv_get (dev );
841
811
struct intel_iommu * iommu = info -> iommu ;
842
812
struct mm_struct * mm = domain -> mm ;
843
- int ret ;
844
813
845
- mutex_lock (& pasid_mutex );
846
- ret = intel_svm_bind_mm (iommu , dev , mm );
847
- mutex_unlock (& pasid_mutex );
848
-
849
- return ret ;
814
+ return intel_svm_bind_mm (iommu , dev , mm );
850
815
}
851
816
852
817
static void intel_svm_domain_free (struct iommu_domain * domain )
0 commit comments