37
37
#include <asm/iommu.h>
38
38
#include <asm/gart.h>
39
39
#include <asm/dma.h>
40
+ #include <uapi/linux/iommufd.h>
40
41
41
42
#include "amd_iommu.h"
42
43
#include "../dma-iommu.h"
@@ -63,6 +64,7 @@ LIST_HEAD(hpet_map);
63
64
LIST_HEAD (acpihid_map );
64
65
65
66
const struct iommu_ops amd_iommu_ops ;
67
+ const struct iommu_dirty_ops amd_dirty_ops ;
66
68
67
69
int amd_iommu_max_glx_val = -1 ;
68
70
@@ -1738,6 +1740,9 @@ static void set_dte_entry(struct amd_iommu *iommu, u16 devid,
1738
1740
if (ppr )
1739
1741
pte_root |= 1ULL << DEV_ENTRY_PPR ;
1740
1742
1743
+ if (domain -> dirty_tracking )
1744
+ pte_root |= DTE_FLAG_HAD ;
1745
+
1741
1746
if (domain -> flags & PD_IOMMUV2_MASK ) {
1742
1747
u64 gcr3 = iommu_virt_to_phys (domain -> gcr3_tbl );
1743
1748
u64 glx = domain -> glx ;
@@ -2195,28 +2200,79 @@ static inline u64 dma_max_address(void)
2195
2200
return ((1ULL << PM_LEVEL_SHIFT (amd_iommu_gpt_level )) - 1 );
2196
2201
}
2197
2202
2198
- static struct iommu_domain * amd_iommu_domain_alloc ( unsigned type )
2203
+ static bool amd_iommu_hd_support ( struct amd_iommu * iommu )
2199
2204
{
2205
+ return iommu && (iommu -> features & FEATURE_HDSUP );
2206
+ }
2207
+
2208
+ static struct iommu_domain * do_iommu_domain_alloc (unsigned int type ,
2209
+ struct device * dev , u32 flags )
2210
+ {
2211
+ bool dirty_tracking = flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING ;
2200
2212
struct protection_domain * domain ;
2213
+ struct amd_iommu * iommu = NULL ;
2214
+
2215
+ if (dev ) {
2216
+ iommu = rlookup_amd_iommu (dev );
2217
+ if (!iommu )
2218
+ return ERR_PTR (- ENODEV );
2219
+ }
2201
2220
2202
2221
/*
2203
2222
* Since DTE[Mode]=0 is prohibited on SNP-enabled system,
2204
2223
* default to use IOMMU_DOMAIN_DMA[_FQ].
2205
2224
*/
2206
2225
if (amd_iommu_snp_en && (type == IOMMU_DOMAIN_IDENTITY ))
2207
- return NULL ;
2226
+ return ERR_PTR (- EINVAL );
2227
+
2228
+ if (dirty_tracking && !amd_iommu_hd_support (iommu ))
2229
+ return ERR_PTR (- EOPNOTSUPP );
2208
2230
2209
2231
domain = protection_domain_alloc (type );
2210
2232
if (!domain )
2211
- return NULL ;
2233
+ return ERR_PTR ( - ENOMEM ) ;
2212
2234
2213
2235
domain -> domain .geometry .aperture_start = 0 ;
2214
2236
domain -> domain .geometry .aperture_end = dma_max_address ();
2215
2237
domain -> domain .geometry .force_aperture = true;
2216
2238
2239
+ if (iommu ) {
2240
+ domain -> domain .type = type ;
2241
+ domain -> domain .pgsize_bitmap = iommu -> iommu .ops -> pgsize_bitmap ;
2242
+ domain -> domain .ops = iommu -> iommu .ops -> default_domain_ops ;
2243
+
2244
+ if (dirty_tracking )
2245
+ domain -> domain .dirty_ops = & amd_dirty_ops ;
2246
+ }
2247
+
2217
2248
return & domain -> domain ;
2218
2249
}
2219
2250
2251
+ static struct iommu_domain * amd_iommu_domain_alloc (unsigned int type )
2252
+ {
2253
+ struct iommu_domain * domain ;
2254
+
2255
+ domain = do_iommu_domain_alloc (type , NULL , 0 );
2256
+ if (IS_ERR (domain ))
2257
+ return NULL ;
2258
+
2259
+ return domain ;
2260
+ }
2261
+
2262
+ static struct iommu_domain *
2263
+ amd_iommu_domain_alloc_user (struct device * dev , u32 flags ,
2264
+ struct iommu_domain * parent ,
2265
+ const struct iommu_user_data * user_data )
2266
+
2267
+ {
2268
+ unsigned int type = IOMMU_DOMAIN_UNMANAGED ;
2269
+
2270
+ if ((flags & ~IOMMU_HWPT_ALLOC_DIRTY_TRACKING ) || parent || user_data )
2271
+ return ERR_PTR (- EOPNOTSUPP );
2272
+
2273
+ return do_iommu_domain_alloc (type , dev , flags );
2274
+ }
2275
+
2220
2276
static void amd_iommu_domain_free (struct iommu_domain * dom )
2221
2277
{
2222
2278
struct protection_domain * domain ;
@@ -2253,6 +2309,13 @@ static int amd_iommu_attach_device(struct iommu_domain *dom,
2253
2309
2254
2310
dev_data -> defer_attach = false;
2255
2311
2312
+ /*
2313
+ * Restrict to devices with compatible IOMMU hardware support
2314
+ * when enforcement of dirty tracking is enabled.
2315
+ */
2316
+ if (dom -> dirty_ops && !amd_iommu_hd_support (iommu ))
2317
+ return - EINVAL ;
2318
+
2256
2319
if (dev_data -> domain )
2257
2320
detach_device (dev );
2258
2321
@@ -2372,13 +2435,85 @@ static bool amd_iommu_capable(struct device *dev, enum iommu_cap cap)
2372
2435
return true;
2373
2436
case IOMMU_CAP_DEFERRED_FLUSH :
2374
2437
return true;
2438
+ case IOMMU_CAP_DIRTY_TRACKING : {
2439
+ struct amd_iommu * iommu = rlookup_amd_iommu (dev );
2440
+
2441
+ return amd_iommu_hd_support (iommu );
2442
+ }
2375
2443
default :
2376
2444
break ;
2377
2445
}
2378
2446
2379
2447
return false;
2380
2448
}
2381
2449
2450
+ static int amd_iommu_set_dirty_tracking (struct iommu_domain * domain ,
2451
+ bool enable )
2452
+ {
2453
+ struct protection_domain * pdomain = to_pdomain (domain );
2454
+ struct dev_table_entry * dev_table ;
2455
+ struct iommu_dev_data * dev_data ;
2456
+ bool domain_flush = false;
2457
+ struct amd_iommu * iommu ;
2458
+ unsigned long flags ;
2459
+ u64 pte_root ;
2460
+
2461
+ spin_lock_irqsave (& pdomain -> lock , flags );
2462
+ if (!(pdomain -> dirty_tracking ^ enable )) {
2463
+ spin_unlock_irqrestore (& pdomain -> lock , flags );
2464
+ return 0 ;
2465
+ }
2466
+
2467
+ list_for_each_entry (dev_data , & pdomain -> dev_list , list ) {
2468
+ iommu = rlookup_amd_iommu (dev_data -> dev );
2469
+ if (!iommu )
2470
+ continue ;
2471
+
2472
+ dev_table = get_dev_table (iommu );
2473
+ pte_root = dev_table [dev_data -> devid ].data [0 ];
2474
+
2475
+ pte_root = (enable ? pte_root | DTE_FLAG_HAD :
2476
+ pte_root & ~DTE_FLAG_HAD );
2477
+
2478
+ /* Flush device DTE */
2479
+ dev_table [dev_data -> devid ].data [0 ] = pte_root ;
2480
+ device_flush_dte (dev_data );
2481
+ domain_flush = true;
2482
+ }
2483
+
2484
+ /* Flush IOTLB to mark IOPTE dirty on the next translation(s) */
2485
+ if (domain_flush ) {
2486
+ amd_iommu_domain_flush_tlb_pde (pdomain );
2487
+ amd_iommu_domain_flush_complete (pdomain );
2488
+ }
2489
+ pdomain -> dirty_tracking = enable ;
2490
+ spin_unlock_irqrestore (& pdomain -> lock , flags );
2491
+
2492
+ return 0 ;
2493
+ }
2494
+
2495
+ static int amd_iommu_read_and_clear_dirty (struct iommu_domain * domain ,
2496
+ unsigned long iova , size_t size ,
2497
+ unsigned long flags ,
2498
+ struct iommu_dirty_bitmap * dirty )
2499
+ {
2500
+ struct protection_domain * pdomain = to_pdomain (domain );
2501
+ struct io_pgtable_ops * ops = & pdomain -> iop .iop .ops ;
2502
+ unsigned long lflags ;
2503
+
2504
+ if (!ops || !ops -> read_and_clear_dirty )
2505
+ return - EOPNOTSUPP ;
2506
+
2507
+ spin_lock_irqsave (& pdomain -> lock , lflags );
2508
+ if (!pdomain -> dirty_tracking && dirty -> bitmap ) {
2509
+ spin_unlock_irqrestore (& pdomain -> lock , lflags );
2510
+ return - EINVAL ;
2511
+ }
2512
+ spin_unlock_irqrestore (& pdomain -> lock , lflags );
2513
+
2514
+ return ops -> read_and_clear_dirty (ops , iova , size , flags , dirty );
2515
+ }
2516
+
2382
2517
static void amd_iommu_get_resv_regions (struct device * dev ,
2383
2518
struct list_head * head )
2384
2519
{
@@ -2500,9 +2635,15 @@ static bool amd_iommu_enforce_cache_coherency(struct iommu_domain *domain)
2500
2635
return true;
2501
2636
}
2502
2637
2638
+ const struct iommu_dirty_ops amd_dirty_ops = {
2639
+ .set_dirty_tracking = amd_iommu_set_dirty_tracking ,
2640
+ .read_and_clear_dirty = amd_iommu_read_and_clear_dirty ,
2641
+ };
2642
+
2503
2643
const struct iommu_ops amd_iommu_ops = {
2504
2644
.capable = amd_iommu_capable ,
2505
2645
.domain_alloc = amd_iommu_domain_alloc ,
2646
+ .domain_alloc_user = amd_iommu_domain_alloc_user ,
2506
2647
.probe_device = amd_iommu_probe_device ,
2507
2648
.release_device = amd_iommu_release_device ,
2508
2649
.probe_finalize = amd_iommu_probe_finalize ,
0 commit comments