@@ -74,6 +74,9 @@ struct kmem_cache *amd_iommu_irq_cache;
7474
7575static void detach_device (struct device * dev );
7676
77+ static int amd_iommu_attach_device (struct iommu_domain * dom ,
78+ struct device * dev );
79+
7780static void set_dte_entry (struct amd_iommu * iommu ,
7881 struct iommu_dev_data * dev_data );
7982
@@ -2263,43 +2266,41 @@ void protection_domain_free(struct protection_domain *domain)
22632266 kfree (domain );
22642267}
22652268
2269+ static void protection_domain_init (struct protection_domain * domain , int nid )
2270+ {
2271+ spin_lock_init (& domain -> lock );
2272+ INIT_LIST_HEAD (& domain -> dev_list );
2273+ INIT_LIST_HEAD (& domain -> dev_data_list );
2274+ domain -> iop .pgtbl .cfg .amd .nid = nid ;
2275+ }
2276+
22662277struct protection_domain * protection_domain_alloc (unsigned int type , int nid )
22672278{
2268- struct io_pgtable_ops * pgtbl_ops ;
22692279 struct protection_domain * domain ;
2270- int pgtable ;
22712280
22722281 domain = kzalloc (sizeof (* domain ), GFP_KERNEL );
22732282 if (!domain )
22742283 return NULL ;
22752284
22762285 domain -> id = domain_id_alloc ();
2277- if (!domain -> id )
2278- goto err_free ;
2286+ if (!domain -> id ) {
2287+ kfree (domain );
2288+ return NULL ;
2289+ }
22792290
2280- spin_lock_init (& domain -> lock );
2281- INIT_LIST_HEAD (& domain -> dev_list );
2282- INIT_LIST_HEAD (& domain -> dev_data_list );
2283- domain -> iop .pgtbl .cfg .amd .nid = nid ;
2291+ protection_domain_init (domain , nid );
2292+
2293+ return domain ;
2294+ }
2295+
2296+ static int pdom_setup_pgtable (struct protection_domain * domain ,
2297+ unsigned int type , int pgtable )
2298+ {
2299+ struct io_pgtable_ops * pgtbl_ops ;
22842300
2285- switch (type ) {
22862301 /* No need to allocate io pgtable ops in passthrough mode */
2287- case IOMMU_DOMAIN_IDENTITY :
2288- case IOMMU_DOMAIN_SVA :
2289- return domain ;
2290- case IOMMU_DOMAIN_DMA :
2291- pgtable = amd_iommu_pgtable ;
2292- break ;
2293- /*
2294- * Force IOMMU v1 page table when allocating
2295- * domain for pass-through devices.
2296- */
2297- case IOMMU_DOMAIN_UNMANAGED :
2298- pgtable = AMD_IOMMU_V1 ;
2299- break ;
2300- default :
2301- goto err_id ;
2302- }
2302+ if (!(type & __IOMMU_DOMAIN_PAGING ))
2303+ return 0 ;
23032304
23042305 switch (pgtable ) {
23052306 case AMD_IOMMU_V1 :
@@ -2309,25 +2310,20 @@ struct protection_domain *protection_domain_alloc(unsigned int type, int nid)
23092310 domain -> pd_mode = PD_MODE_V2 ;
23102311 break ;
23112312 default :
2312- goto err_id ;
2313+ return - EINVAL ;
23132314 }
23142315
23152316 pgtbl_ops =
23162317 alloc_io_pgtable_ops (pgtable , & domain -> iop .pgtbl .cfg , domain );
23172318 if (!pgtbl_ops )
2318- goto err_id ;
2319+ return - ENOMEM ;
23192320
2320- return domain ;
2321- err_id :
2322- domain_id_free (domain -> id );
2323- err_free :
2324- kfree (domain );
2325- return NULL ;
2321+ return 0 ;
23262322}
23272323
2328- static inline u64 dma_max_address (void )
2324+ static inline u64 dma_max_address (int pgtable )
23292325{
2330- if (amd_iommu_pgtable == AMD_IOMMU_V1 )
2326+ if (pgtable == AMD_IOMMU_V1 )
23312327 return ~0ULL ;
23322328
23332329 /* V2 with 4/5 level page table */
@@ -2340,11 +2336,13 @@ static bool amd_iommu_hd_support(struct amd_iommu *iommu)
23402336}
23412337
23422338static struct iommu_domain * do_iommu_domain_alloc (unsigned int type ,
2343- struct device * dev , u32 flags )
2339+ struct device * dev ,
2340+ u32 flags , int pgtable )
23442341{
23452342 bool dirty_tracking = flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING ;
23462343 struct protection_domain * domain ;
23472344 struct amd_iommu * iommu = NULL ;
2345+ int ret ;
23482346
23492347 if (dev )
23502348 iommu = get_amd_iommu_from_dev (dev );
@@ -2356,16 +2354,20 @@ static struct iommu_domain *do_iommu_domain_alloc(unsigned int type,
23562354 if (amd_iommu_snp_en && (type == IOMMU_DOMAIN_IDENTITY ))
23572355 return ERR_PTR (- EINVAL );
23582356
2359- if (dirty_tracking && !amd_iommu_hd_support (iommu ))
2360- return ERR_PTR (- EOPNOTSUPP );
2361-
23622357 domain = protection_domain_alloc (type ,
23632358 dev ? dev_to_node (dev ) : NUMA_NO_NODE );
23642359 if (!domain )
23652360 return ERR_PTR (- ENOMEM );
23662361
2362+ ret = pdom_setup_pgtable (domain , type , pgtable );
2363+ if (ret ) {
2364+ domain_id_free (domain -> id );
2365+ kfree (domain );
2366+ return ERR_PTR (ret );
2367+ }
2368+
23672369 domain -> domain .geometry .aperture_start = 0 ;
2368- domain -> domain .geometry .aperture_end = dma_max_address ();
2370+ domain -> domain .geometry .aperture_end = dma_max_address (pgtable );
23692371 domain -> domain .geometry .force_aperture = true;
23702372 domain -> domain .pgsize_bitmap = domain -> iop .pgtbl .cfg .pgsize_bitmap ;
23712373
@@ -2383,8 +2385,16 @@ static struct iommu_domain *do_iommu_domain_alloc(unsigned int type,
23832385static struct iommu_domain * amd_iommu_domain_alloc (unsigned int type )
23842386{
23852387 struct iommu_domain * domain ;
2388+ int pgtable = amd_iommu_pgtable ;
23862389
2387- domain = do_iommu_domain_alloc (type , NULL , 0 );
2390+ /*
2391+ * Force IOMMU v1 page table when allocating
2392+ * domain for pass-through devices.
2393+ */
2394+ if (type == IOMMU_DOMAIN_UNMANAGED )
2395+ pgtable = AMD_IOMMU_V1 ;
2396+
2397+ domain = do_iommu_domain_alloc (type , NULL , 0 , pgtable );
23882398 if (IS_ERR (domain ))
23892399 return NULL ;
23902400
@@ -2398,11 +2408,36 @@ amd_iommu_domain_alloc_user(struct device *dev, u32 flags,
23982408
23992409{
24002410 unsigned int type = IOMMU_DOMAIN_UNMANAGED ;
2411+ struct amd_iommu * iommu = NULL ;
2412+ const u32 supported_flags = IOMMU_HWPT_ALLOC_DIRTY_TRACKING |
2413+ IOMMU_HWPT_ALLOC_PASID ;
2414+
2415+ if (dev )
2416+ iommu = get_amd_iommu_from_dev (dev );
24012417
2402- if ((flags & ~IOMMU_HWPT_ALLOC_DIRTY_TRACKING ) || parent || user_data )
2418+ if ((flags & ~supported_flags ) || parent || user_data )
24032419 return ERR_PTR (- EOPNOTSUPP );
24042420
2405- return do_iommu_domain_alloc (type , dev , flags );
2421+ /* Allocate domain with v2 page table if IOMMU supports PASID. */
2422+ if (flags & IOMMU_HWPT_ALLOC_PASID ) {
2423+ if (!amd_iommu_pasid_supported ())
2424+ return ERR_PTR (- EOPNOTSUPP );
2425+
2426+ return do_iommu_domain_alloc (type , dev , flags , AMD_IOMMU_V2 );
2427+ }
2428+
2429+ /* Allocate domain with v1 page table for dirty tracking */
2430+ if (flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING ) {
2431+ if (iommu && amd_iommu_hd_support (iommu )) {
2432+ return do_iommu_domain_alloc (type , dev ,
2433+ flags , AMD_IOMMU_V1 );
2434+ }
2435+
2436+ return ERR_PTR (- EOPNOTSUPP );
2437+ }
2438+
2439+ /* If nothing specific is required use the kernel commandline default */
2440+ return do_iommu_domain_alloc (type , dev , 0 , amd_iommu_pgtable );
24062441}
24072442
24082443void amd_iommu_domain_free (struct iommu_domain * dom )
@@ -2444,6 +2479,25 @@ static struct iommu_domain blocked_domain = {
24442479 }
24452480};
24462481
2482+ static struct protection_domain identity_domain ;
2483+
2484+ static const struct iommu_domain_ops identity_domain_ops = {
2485+ .attach_dev = amd_iommu_attach_device ,
2486+ };
2487+
2488+ void amd_iommu_init_identity_domain (void )
2489+ {
2490+ struct iommu_domain * domain = & identity_domain .domain ;
2491+
2492+ domain -> type = IOMMU_DOMAIN_IDENTITY ;
2493+ domain -> ops = & identity_domain_ops ;
2494+ domain -> owner = & amd_iommu_ops ;
2495+
2496+ identity_domain .id = domain_id_alloc ();
2497+
2498+ protection_domain_init (& identity_domain , NUMA_NO_NODE );
2499+ }
2500+
24472501static int amd_iommu_attach_device (struct iommu_domain * dom ,
24482502 struct device * dev )
24492503{
@@ -2842,6 +2896,7 @@ static int amd_iommu_dev_disable_feature(struct device *dev,
28422896const struct iommu_ops amd_iommu_ops = {
28432897 .capable = amd_iommu_capable ,
28442898 .blocked_domain = & blocked_domain ,
2899+ .identity_domain = & identity_domain .domain ,
28452900 .domain_alloc = amd_iommu_domain_alloc ,
28462901 .domain_alloc_user = amd_iommu_domain_alloc_user ,
28472902 .domain_alloc_sva = amd_iommu_domain_alloc_sva ,
0 commit comments