@@ -383,11 +383,6 @@ static inline int domain_type_is_si(struct dmar_domain *domain)
383383 return domain -> domain .type == IOMMU_DOMAIN_IDENTITY ;
384384}
385385
386- static inline bool domain_use_first_level (struct dmar_domain * domain )
387- {
388- return domain -> flags & DOMAIN_FLAG_USE_FIRST_LEVEL ;
389- }
390-
391386static inline int domain_pfn_supported (struct dmar_domain * domain ,
392387 unsigned long pfn )
393388{
@@ -501,7 +496,7 @@ static int domain_update_iommu_superpage(struct dmar_domain *domain,
501496 rcu_read_lock ();
502497 for_each_active_iommu (iommu , drhd ) {
503498 if (iommu != skip ) {
504- if (domain && domain_use_first_level ( domain ) ) {
499+ if (domain && domain -> use_first_level ) {
505500 if (!cap_fl1gp_support (iommu -> cap ))
506501 mask = 0x1 ;
507502 } else {
@@ -579,7 +574,7 @@ static void domain_update_iommu_cap(struct dmar_domain *domain)
579574 * paging and 57-bits with 5-level paging). Hence, skip bit
580575 * [N-1].
581576 */
582- if (domain_use_first_level ( domain ) )
577+ if (domain -> use_first_level )
583578 domain -> domain .geometry .aperture_end = __DOMAIN_MAX_ADDR (domain -> gaw - 1 );
584579 else
585580 domain -> domain .geometry .aperture_end = __DOMAIN_MAX_ADDR (domain -> gaw );
@@ -947,7 +942,7 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
947942
948943 domain_flush_cache (domain , tmp_page , VTD_PAGE_SIZE );
949944 pteval = ((uint64_t )virt_to_dma_pfn (tmp_page ) << VTD_PAGE_SHIFT ) | DMA_PTE_READ | DMA_PTE_WRITE ;
950- if (domain_use_first_level ( domain ) )
945+ if (domain -> use_first_level )
951946 pteval |= DMA_FL_PTE_XD | DMA_FL_PTE_US | DMA_FL_PTE_ACCESS ;
952947
953948 if (cmpxchg64 (& pte -> val , 0ULL , pteval ))
@@ -1498,7 +1493,7 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
14981493 if (ih )
14991494 ih = 1 << 6 ;
15001495
1501- if (domain_use_first_level ( domain ) ) {
1496+ if (domain -> use_first_level ) {
15021497 qi_flush_piotlb (iommu , did , PASID_RID2PASID , addr , pages , ih );
15031498 } else {
15041499 unsigned long bitmask = aligned_pages - 1 ;
@@ -1552,7 +1547,7 @@ static inline void __mapping_notify_one(struct intel_iommu *iommu,
15521547 * It's a non-present to present mapping. Only flush if caching mode
15531548 * and second level.
15541549 */
1555- if (cap_caching_mode (iommu -> cap ) && !domain_use_first_level ( domain ) )
1550+ if (cap_caching_mode (iommu -> cap ) && !domain -> use_first_level )
15561551 iommu_flush_iotlb_psi (iommu , domain , pfn , pages , 0 , 1 );
15571552 else
15581553 iommu_flush_write_buffer (iommu );
@@ -1568,7 +1563,7 @@ static void intel_flush_iotlb_all(struct iommu_domain *domain)
15681563 struct intel_iommu * iommu = info -> iommu ;
15691564 u16 did = domain_id_iommu (dmar_domain , iommu );
15701565
1571- if (domain_use_first_level ( dmar_domain ) )
1566+ if (dmar_domain -> use_first_level )
15721567 qi_flush_piotlb (iommu , did , PASID_RID2PASID , 0 , -1 , 0 );
15731568 else
15741569 iommu -> flush .flush_iotlb (iommu , did , 0 , 0 ,
@@ -1741,7 +1736,7 @@ static struct dmar_domain *alloc_domain(unsigned int type)
17411736
17421737 domain -> nid = NUMA_NO_NODE ;
17431738 if (first_level_by_default (type ))
1744- domain -> flags |= DOMAIN_FLAG_USE_FIRST_LEVEL ;
1739+ domain -> use_first_level = true ;
17451740 domain -> has_iotlb_device = false;
17461741 INIT_LIST_HEAD (& domain -> devices );
17471742 spin_lock_init (& domain -> lock );
@@ -2173,7 +2168,7 @@ __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
21732168
21742169 attr = prot & (DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP );
21752170 attr |= DMA_FL_PTE_PRESENT ;
2176- if (domain_use_first_level ( domain ) ) {
2171+ if (domain -> use_first_level ) {
21772172 attr |= DMA_FL_PTE_XD | DMA_FL_PTE_US | DMA_FL_PTE_ACCESS ;
21782173 if (prot & DMA_PTE_WRITE )
21792174 attr |= DMA_FL_PTE_DIRTY ;
@@ -2443,7 +2438,7 @@ static int dmar_domain_attach_device(struct dmar_domain *domain,
24432438 if (hw_pass_through && domain_type_is_si (domain ))
24442439 ret = intel_pasid_setup_pass_through (iommu , domain ,
24452440 dev , PASID_RID2PASID );
2446- else if (domain_use_first_level ( domain ) )
2441+ else if (domain -> use_first_level )
24472442 ret = domain_setup_first_level (iommu , domain , dev ,
24482443 PASID_RID2PASID );
24492444 else
@@ -4412,7 +4407,7 @@ static void domain_set_force_snooping(struct dmar_domain *domain)
44124407 * Second level page table supports per-PTE snoop control. The
44134408 * iommu_map() interface will handle this by setting SNP bit.
44144409 */
4415- if (!domain_use_first_level ( domain ) ) {
4410+ if (!domain -> use_first_level ) {
44164411 domain -> set_pte_snp = true;
44174412 return ;
44184413 }
0 commit comments