@@ -292,10 +292,8 @@ arm_smmu_mmu_notifier_get(struct arm_smmu_domain *smmu_domain,
292
292
struct mm_struct * mm )
293
293
{
294
294
int ret ;
295
- unsigned long flags ;
296
295
struct arm_smmu_ctx_desc * cd ;
297
296
struct arm_smmu_mmu_notifier * smmu_mn ;
298
- struct arm_smmu_master * master ;
299
297
300
298
list_for_each_entry (smmu_mn , & smmu_domain -> mmu_notifiers , list ) {
301
299
if (smmu_mn -> mn .mm == mm ) {
@@ -325,28 +323,9 @@ arm_smmu_mmu_notifier_get(struct arm_smmu_domain *smmu_domain,
325
323
goto err_free_cd ;
326
324
}
327
325
328
- spin_lock_irqsave (& smmu_domain -> devices_lock , flags );
329
- list_for_each_entry (master , & smmu_domain -> devices , domain_head ) {
330
- ret = arm_smmu_write_ctx_desc (master , mm_get_enqcmd_pasid (mm ),
331
- cd );
332
- if (ret ) {
333
- list_for_each_entry_from_reverse (
334
- master , & smmu_domain -> devices , domain_head )
335
- arm_smmu_write_ctx_desc (
336
- master , mm_get_enqcmd_pasid (mm ), NULL );
337
- break ;
338
- }
339
- }
340
- spin_unlock_irqrestore (& smmu_domain -> devices_lock , flags );
341
- if (ret )
342
- goto err_put_notifier ;
343
-
344
326
list_add (& smmu_mn -> list , & smmu_domain -> mmu_notifiers );
345
327
return smmu_mn ;
346
328
347
- err_put_notifier :
348
- /* Frees smmu_mn */
349
- mmu_notifier_put (& smmu_mn -> mn );
350
329
err_free_cd :
351
330
arm_smmu_free_shared_cd (cd );
352
331
return ERR_PTR (ret );
@@ -363,9 +342,6 @@ static void arm_smmu_mmu_notifier_put(struct arm_smmu_mmu_notifier *smmu_mn)
363
342
364
343
list_del (& smmu_mn -> list );
365
344
366
- arm_smmu_update_ctx_desc_devices (smmu_domain , mm_get_enqcmd_pasid (mm ),
367
- NULL );
368
-
369
345
/*
370
346
* If we went through clear(), we've already invalidated, and no
371
347
* new TLB entry can have been formed.
@@ -381,13 +357,20 @@ static void arm_smmu_mmu_notifier_put(struct arm_smmu_mmu_notifier *smmu_mn)
381
357
arm_smmu_free_shared_cd (cd );
382
358
}
383
359
384
- static int __arm_smmu_sva_bind (struct device * dev , struct mm_struct * mm )
360
+ static int __arm_smmu_sva_bind (struct device * dev , ioasid_t pasid ,
361
+ struct mm_struct * mm )
385
362
{
386
363
int ret ;
387
364
struct arm_smmu_bond * bond ;
388
365
struct arm_smmu_master * master = dev_iommu_priv_get (dev );
389
366
struct iommu_domain * domain = iommu_get_domain_for_dev (dev );
390
- struct arm_smmu_domain * smmu_domain = to_smmu_domain (domain );
367
+ struct arm_smmu_domain * smmu_domain ;
368
+
369
+ if (!(domain -> type & __IOMMU_DOMAIN_PAGING ))
370
+ return - ENODEV ;
371
+ smmu_domain = to_smmu_domain (domain );
372
+ if (smmu_domain -> stage != ARM_SMMU_DOMAIN_S1 )
373
+ return - ENODEV ;
391
374
392
375
if (!master || !master -> sva_enabled )
393
376
return - ENODEV ;
@@ -404,9 +387,15 @@ static int __arm_smmu_sva_bind(struct device *dev, struct mm_struct *mm)
404
387
goto err_free_bond ;
405
388
}
406
389
390
+ ret = arm_smmu_write_ctx_desc (master , pasid , bond -> smmu_mn -> cd );
391
+ if (ret )
392
+ goto err_put_notifier ;
393
+
407
394
list_add (& bond -> list , & master -> bonds );
408
395
return 0 ;
409
396
397
+ err_put_notifier :
398
+ arm_smmu_mmu_notifier_put (bond -> smmu_mn );
410
399
err_free_bond :
411
400
kfree (bond );
412
401
return ret ;
@@ -568,6 +557,9 @@ void arm_smmu_sva_remove_dev_pasid(struct iommu_domain *domain,
568
557
struct arm_smmu_master * master = dev_iommu_priv_get (dev );
569
558
570
559
mutex_lock (& sva_lock );
560
+
561
+ arm_smmu_write_ctx_desc (master , id , NULL );
562
+
571
563
list_for_each_entry (t , & master -> bonds , list ) {
572
564
if (t -> mm == mm ) {
573
565
bond = t ;
@@ -590,7 +582,7 @@ static int arm_smmu_sva_set_dev_pasid(struct iommu_domain *domain,
590
582
struct mm_struct * mm = domain -> mm ;
591
583
592
584
mutex_lock (& sva_lock );
593
- ret = __arm_smmu_sva_bind (dev , mm );
585
+ ret = __arm_smmu_sva_bind (dev , id , mm );
594
586
mutex_unlock (& sva_lock );
595
587
596
588
return ret ;
0 commit comments