@@ -3281,10 +3281,15 @@ static struct dmar_domain *paging_domain_alloc(struct device *dev, bool first_st
3281
3281
spin_lock_init (& domain -> lock );
3282
3282
spin_lock_init (& domain -> cache_lock );
3283
3283
xa_init (& domain -> iommu_array );
3284
+ INIT_LIST_HEAD (& domain -> s1_domains );
3285
+ spin_lock_init (& domain -> s1_lock );
3284
3286
3285
3287
domain -> nid = dev_to_node (dev );
3286
3288
domain -> use_first_level = first_stage ;
3287
3289
3290
+ domain -> domain .type = IOMMU_DOMAIN_UNMANAGED ;
3291
+ domain -> domain .ops = intel_iommu_ops .default_domain_ops ;
3292
+
3288
3293
/* calculate the address width */
3289
3294
addr_width = agaw_to_width (iommu -> agaw );
3290
3295
if (addr_width > cap_mgaw (iommu -> cap ))
@@ -3326,62 +3331,73 @@ static struct dmar_domain *paging_domain_alloc(struct device *dev, bool first_st
3326
3331
}
3327
3332
3328
3333
static struct iommu_domain *
3329
- intel_iommu_domain_alloc_paging_flags (struct device * dev , u32 flags ,
3330
- const struct iommu_user_data * user_data )
3334
+ intel_iommu_domain_alloc_first_stage (struct device * dev ,
3335
+ struct intel_iommu * iommu , u32 flags )
3336
+ {
3337
+ struct dmar_domain * dmar_domain ;
3338
+
3339
+ if (flags & ~IOMMU_HWPT_ALLOC_PASID )
3340
+ return ERR_PTR (- EOPNOTSUPP );
3341
+
3342
+ /* Only SL is available in legacy mode */
3343
+ if (!sm_supported (iommu ) || !ecap_flts (iommu -> ecap ))
3344
+ return ERR_PTR (- EOPNOTSUPP );
3345
+
3346
+ dmar_domain = paging_domain_alloc (dev , true);
3347
+ if (IS_ERR (dmar_domain ))
3348
+ return ERR_CAST (dmar_domain );
3349
+ return & dmar_domain -> domain ;
3350
+ }
3351
+
3352
+ static struct iommu_domain *
3353
+ intel_iommu_domain_alloc_second_stage (struct device * dev ,
3354
+ struct intel_iommu * iommu , u32 flags )
3331
3355
{
3332
- struct device_domain_info * info = dev_iommu_priv_get (dev );
3333
- bool dirty_tracking = flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING ;
3334
- bool nested_parent = flags & IOMMU_HWPT_ALLOC_NEST_PARENT ;
3335
- struct intel_iommu * iommu = info -> iommu ;
3336
3356
struct dmar_domain * dmar_domain ;
3337
- struct iommu_domain * domain ;
3338
- bool first_stage ;
3339
3357
3340
3358
if (flags &
3341
3359
(~(IOMMU_HWPT_ALLOC_NEST_PARENT | IOMMU_HWPT_ALLOC_DIRTY_TRACKING |
3342
3360
IOMMU_HWPT_ALLOC_PASID )))
3343
3361
return ERR_PTR (- EOPNOTSUPP );
3344
- if (nested_parent && !nested_supported (iommu ))
3345
- return ERR_PTR (- EOPNOTSUPP );
3346
- if (user_data || (dirty_tracking && !ssads_supported (iommu )))
3362
+
3363
+ if (((flags & IOMMU_HWPT_ALLOC_NEST_PARENT ) &&
3364
+ !nested_supported (iommu )) ||
3365
+ ((flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING ) &&
3366
+ !ssads_supported (iommu )))
3347
3367
return ERR_PTR (- EOPNOTSUPP );
3348
3368
3349
- /*
3350
- * Always allocate the guest compatible page table unless
3351
- * IOMMU_HWPT_ALLOC_NEST_PARENT or IOMMU_HWPT_ALLOC_DIRTY_TRACKING
3352
- * is specified.
3353
- */
3354
- if (nested_parent || dirty_tracking ) {
3355
- if (!sm_supported (iommu ) || !ecap_slts (iommu -> ecap ))
3356
- return ERR_PTR (- EOPNOTSUPP );
3357
- first_stage = false;
3358
- } else {
3359
- first_stage = first_level_by_default (iommu );
3360
- }
3369
+ /* Legacy mode always supports second stage */
3370
+ if (sm_supported (iommu ) && !ecap_slts (iommu -> ecap ))
3371
+ return ERR_PTR (- EOPNOTSUPP );
3361
3372
3362
- dmar_domain = paging_domain_alloc (dev , first_stage );
3373
+ dmar_domain = paging_domain_alloc (dev , false );
3363
3374
if (IS_ERR (dmar_domain ))
3364
3375
return ERR_CAST (dmar_domain );
3365
- domain = & dmar_domain -> domain ;
3366
- domain -> type = IOMMU_DOMAIN_UNMANAGED ;
3367
- domain -> owner = & intel_iommu_ops ;
3368
- domain -> ops = intel_iommu_ops .default_domain_ops ;
3369
-
3370
- if (nested_parent ) {
3371
- dmar_domain -> nested_parent = true;
3372
- INIT_LIST_HEAD (& dmar_domain -> s1_domains );
3373
- spin_lock_init (& dmar_domain -> s1_lock );
3374
- }
3375
3376
3376
- if (dirty_tracking ) {
3377
- if (dmar_domain -> use_first_level ) {
3378
- iommu_domain_free (domain );
3379
- return ERR_PTR (- EOPNOTSUPP );
3380
- }
3381
- domain -> dirty_ops = & intel_dirty_ops ;
3382
- }
3377
+ dmar_domain -> nested_parent = flags & IOMMU_HWPT_ALLOC_NEST_PARENT ;
3383
3378
3384
- return domain ;
3379
+ if (flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING )
3380
+ dmar_domain -> domain .dirty_ops = & intel_dirty_ops ;
3381
+
3382
+ return & dmar_domain -> domain ;
3383
+ }
3384
+
3385
+ static struct iommu_domain *
3386
+ intel_iommu_domain_alloc_paging_flags (struct device * dev , u32 flags ,
3387
+ const struct iommu_user_data * user_data )
3388
+ {
3389
+ struct device_domain_info * info = dev_iommu_priv_get (dev );
3390
+ struct intel_iommu * iommu = info -> iommu ;
3391
+ struct iommu_domain * domain ;
3392
+
3393
+ if (user_data )
3394
+ return ERR_PTR (- EOPNOTSUPP );
3395
+
3396
+ /* Prefer first stage if possible by default. */
3397
+ domain = intel_iommu_domain_alloc_first_stage (dev , iommu , flags );
3398
+ if (domain != ERR_PTR (- EOPNOTSUPP ))
3399
+ return domain ;
3400
+ return intel_iommu_domain_alloc_second_stage (dev , iommu , flags );
3385
3401
}
3386
3402
3387
3403
static void intel_iommu_domain_free (struct iommu_domain * domain )
0 commit comments