27
27
#include <linux/pci-ats.h>
28
28
#include <linux/platform_device.h>
29
29
#include <kunit/visibility.h>
30
+ #include <uapi/linux/iommufd.h>
30
31
31
32
#include "arm-smmu-v3.h"
32
33
#include "../../dma-iommu.h"
@@ -37,6 +38,7 @@ MODULE_PARM_DESC(disable_msipolling,
37
38
"Disable MSI-based polling for CMD_SYNC completion." );
38
39
39
40
static struct iommu_ops arm_smmu_ops ;
41
+ static struct iommu_dirty_ops arm_smmu_dirty_ops ;
40
42
41
43
enum arm_smmu_msi_index {
42
44
EVTQ_MSI_INDEX ,
@@ -82,7 +84,7 @@ static struct arm_smmu_option_prop arm_smmu_options[] = {
82
84
};
83
85
84
86
static int arm_smmu_domain_finalise (struct arm_smmu_domain * smmu_domain ,
85
- struct arm_smmu_device * smmu );
87
+ struct arm_smmu_device * smmu , u32 flags );
86
88
static int arm_smmu_alloc_cd_tables (struct arm_smmu_master * master );
87
89
88
90
static void parse_driver_options (struct arm_smmu_device * smmu )
@@ -2282,7 +2284,7 @@ static struct iommu_domain *arm_smmu_domain_alloc_paging(struct device *dev)
2282
2284
struct arm_smmu_master * master = dev_iommu_priv_get (dev );
2283
2285
int ret ;
2284
2286
2285
- ret = arm_smmu_domain_finalise (smmu_domain , master -> smmu );
2287
+ ret = arm_smmu_domain_finalise (smmu_domain , master -> smmu , 0 );
2286
2288
if (ret ) {
2287
2289
kfree (smmu_domain );
2288
2290
return ERR_PTR (ret );
@@ -2346,56 +2348,63 @@ static int arm_smmu_domain_finalise_s2(struct arm_smmu_device *smmu,
2346
2348
}
2347
2349
2348
2350
static int arm_smmu_domain_finalise (struct arm_smmu_domain * smmu_domain ,
2349
- struct arm_smmu_device * smmu )
2351
+ struct arm_smmu_device * smmu , u32 flags )
2350
2352
{
2351
2353
int ret ;
2352
- unsigned long ias , oas ;
2353
2354
enum io_pgtable_fmt fmt ;
2354
2355
struct io_pgtable_cfg pgtbl_cfg ;
2355
2356
struct io_pgtable_ops * pgtbl_ops ;
2356
2357
int (* finalise_stage_fn )(struct arm_smmu_device * smmu ,
2357
2358
struct arm_smmu_domain * smmu_domain );
2359
+ bool enable_dirty = flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING ;
2358
2360
2359
2361
/* Restrict the stage to what we can actually support */
2360
2362
if (!(smmu -> features & ARM_SMMU_FEAT_TRANS_S1 ))
2361
2363
smmu_domain -> stage = ARM_SMMU_DOMAIN_S2 ;
2362
2364
if (!(smmu -> features & ARM_SMMU_FEAT_TRANS_S2 ))
2363
2365
smmu_domain -> stage = ARM_SMMU_DOMAIN_S1 ;
2364
2366
2367
+ pgtbl_cfg = (struct io_pgtable_cfg ) {
2368
+ .pgsize_bitmap = smmu -> pgsize_bitmap ,
2369
+ .coherent_walk = smmu -> features & ARM_SMMU_FEAT_COHERENCY ,
2370
+ .tlb = & arm_smmu_flush_ops ,
2371
+ .iommu_dev = smmu -> dev ,
2372
+ };
2373
+
2365
2374
switch (smmu_domain -> stage ) {
2366
- case ARM_SMMU_DOMAIN_S1 :
2367
- ias = (smmu -> features & ARM_SMMU_FEAT_VAX ) ? 52 : 48 ;
2368
- ias = min_t (unsigned long , ias , VA_BITS );
2369
- oas = smmu -> ias ;
2375
+ case ARM_SMMU_DOMAIN_S1 : {
2376
+ unsigned long ias = (smmu -> features &
2377
+ ARM_SMMU_FEAT_VAX ) ? 52 : 48 ;
2378
+
2379
+ pgtbl_cfg .ias = min_t (unsigned long , ias , VA_BITS );
2380
+ pgtbl_cfg .oas = smmu -> ias ;
2381
+ if (enable_dirty )
2382
+ pgtbl_cfg .quirks |= IO_PGTABLE_QUIRK_ARM_HD ;
2370
2383
fmt = ARM_64_LPAE_S1 ;
2371
2384
finalise_stage_fn = arm_smmu_domain_finalise_s1 ;
2372
2385
break ;
2386
+ }
2373
2387
case ARM_SMMU_DOMAIN_S2 :
2374
- ias = smmu -> ias ;
2375
- oas = smmu -> oas ;
2388
+ if (enable_dirty )
2389
+ return - EOPNOTSUPP ;
2390
+ pgtbl_cfg .ias = smmu -> ias ;
2391
+ pgtbl_cfg .oas = smmu -> oas ;
2376
2392
fmt = ARM_64_LPAE_S2 ;
2377
2393
finalise_stage_fn = arm_smmu_domain_finalise_s2 ;
2378
2394
break ;
2379
2395
default :
2380
2396
return - EINVAL ;
2381
2397
}
2382
2398
2383
- pgtbl_cfg = (struct io_pgtable_cfg ) {
2384
- .pgsize_bitmap = smmu -> pgsize_bitmap ,
2385
- .ias = ias ,
2386
- .oas = oas ,
2387
- .coherent_walk = smmu -> features & ARM_SMMU_FEAT_COHERENCY ,
2388
- .tlb = & arm_smmu_flush_ops ,
2389
- .iommu_dev = smmu -> dev ,
2390
- };
2391
-
2392
2399
pgtbl_ops = alloc_io_pgtable_ops (fmt , & pgtbl_cfg , smmu_domain );
2393
2400
if (!pgtbl_ops )
2394
2401
return - ENOMEM ;
2395
2402
2396
2403
smmu_domain -> domain .pgsize_bitmap = pgtbl_cfg .pgsize_bitmap ;
2397
2404
smmu_domain -> domain .geometry .aperture_end = (1UL << pgtbl_cfg .ias ) - 1 ;
2398
2405
smmu_domain -> domain .geometry .force_aperture = true;
2406
+ if (enable_dirty && smmu_domain -> stage == ARM_SMMU_DOMAIN_S1 )
2407
+ smmu_domain -> domain .dirty_ops = & arm_smmu_dirty_ops ;
2399
2408
2400
2409
ret = finalise_stage_fn (smmu , smmu_domain );
2401
2410
if (ret < 0 ) {
@@ -2745,7 +2754,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
2745
2754
mutex_lock (& smmu_domain -> init_mutex );
2746
2755
2747
2756
if (!smmu_domain -> smmu ) {
2748
- ret = arm_smmu_domain_finalise (smmu_domain , smmu );
2757
+ ret = arm_smmu_domain_finalise (smmu_domain , smmu , 0 );
2749
2758
} else if (smmu_domain -> smmu != smmu )
2750
2759
ret = - EINVAL ;
2751
2760
@@ -2810,7 +2819,7 @@ static int arm_smmu_s1_set_dev_pasid(struct iommu_domain *domain,
2810
2819
2811
2820
mutex_lock (& smmu_domain -> init_mutex );
2812
2821
if (!smmu_domain -> smmu )
2813
- ret = arm_smmu_domain_finalise (smmu_domain , smmu );
2822
+ ret = arm_smmu_domain_finalise (smmu_domain , smmu , 0 );
2814
2823
else if (smmu_domain -> smmu != smmu )
2815
2824
ret = - EINVAL ;
2816
2825
mutex_unlock (& smmu_domain -> init_mutex );
@@ -3028,10 +3037,13 @@ arm_smmu_domain_alloc_user(struct device *dev, u32 flags,
3028
3037
const struct iommu_user_data * user_data )
3029
3038
{
3030
3039
struct arm_smmu_master * master = dev_iommu_priv_get (dev );
3040
+ const u32 PAGING_FLAGS = IOMMU_HWPT_ALLOC_DIRTY_TRACKING ;
3031
3041
struct arm_smmu_domain * smmu_domain ;
3032
3042
int ret ;
3033
3043
3034
- if (flags || parent || user_data )
3044
+ if (flags & ~PAGING_FLAGS )
3045
+ return ERR_PTR (- EOPNOTSUPP );
3046
+ if (parent || user_data )
3035
3047
return ERR_PTR (- EOPNOTSUPP );
3036
3048
3037
3049
smmu_domain = arm_smmu_domain_alloc ();
@@ -3040,7 +3052,7 @@ arm_smmu_domain_alloc_user(struct device *dev, u32 flags,
3040
3052
3041
3053
smmu_domain -> domain .type = IOMMU_DOMAIN_UNMANAGED ;
3042
3054
smmu_domain -> domain .ops = arm_smmu_ops .default_domain_ops ;
3043
- ret = arm_smmu_domain_finalise (smmu_domain , master -> smmu );
3055
+ ret = arm_smmu_domain_finalise (smmu_domain , master -> smmu , flags );
3044
3056
if (ret )
3045
3057
goto err_free ;
3046
3058
return & smmu_domain -> domain ;
@@ -3295,6 +3307,27 @@ static void arm_smmu_release_device(struct device *dev)
3295
3307
kfree (master );
3296
3308
}
3297
3309
3310
+ static int arm_smmu_read_and_clear_dirty (struct iommu_domain * domain ,
3311
+ unsigned long iova , size_t size ,
3312
+ unsigned long flags ,
3313
+ struct iommu_dirty_bitmap * dirty )
3314
+ {
3315
+ struct arm_smmu_domain * smmu_domain = to_smmu_domain (domain );
3316
+ struct io_pgtable_ops * ops = smmu_domain -> pgtbl_ops ;
3317
+
3318
+ return ops -> read_and_clear_dirty (ops , iova , size , flags , dirty );
3319
+ }
3320
+
3321
+ static int arm_smmu_set_dirty_tracking (struct iommu_domain * domain ,
3322
+ bool enabled )
3323
+ {
3324
+ /*
3325
+ * Always enabled and the dirty bitmap is cleared prior to
3326
+ * set_dirty_tracking().
3327
+ */
3328
+ return 0 ;
3329
+ }
3330
+
3298
3331
static struct iommu_group * arm_smmu_device_group (struct device * dev )
3299
3332
{
3300
3333
struct iommu_group * group ;
@@ -3453,6 +3486,11 @@ static struct iommu_ops arm_smmu_ops = {
3453
3486
}
3454
3487
};
3455
3488
3489
+ static struct iommu_dirty_ops arm_smmu_dirty_ops = {
3490
+ .read_and_clear_dirty = arm_smmu_read_and_clear_dirty ,
3491
+ .set_dirty_tracking = arm_smmu_set_dirty_tracking ,
3492
+ };
3493
+
3456
3494
/* Probing and initialisation functions */
3457
3495
static int arm_smmu_init_one_queue (struct arm_smmu_device * smmu ,
3458
3496
struct arm_smmu_queue * q ,
0 commit comments