@@ -383,11 +383,6 @@ static inline int domain_type_is_si(struct dmar_domain *domain)
383
383
return domain -> domain .type == IOMMU_DOMAIN_IDENTITY ;
384
384
}
385
385
386
- static inline bool domain_use_first_level (struct dmar_domain * domain )
387
- {
388
- return domain -> flags & DOMAIN_FLAG_USE_FIRST_LEVEL ;
389
- }
390
-
391
386
static inline int domain_pfn_supported (struct dmar_domain * domain ,
392
387
unsigned long pfn )
393
388
{
@@ -501,7 +496,7 @@ static int domain_update_iommu_superpage(struct dmar_domain *domain,
501
496
rcu_read_lock ();
502
497
for_each_active_iommu (iommu , drhd ) {
503
498
if (iommu != skip ) {
504
- if (domain && domain_use_first_level ( domain ) ) {
499
+ if (domain && domain -> use_first_level ) {
505
500
if (!cap_fl1gp_support (iommu -> cap ))
506
501
mask = 0x1 ;
507
502
} else {
@@ -579,7 +574,7 @@ static void domain_update_iommu_cap(struct dmar_domain *domain)
579
574
* paging and 57-bits with 5-level paging). Hence, skip bit
580
575
* [N-1].
581
576
*/
582
- if (domain_use_first_level ( domain ) )
577
+ if (domain -> use_first_level )
583
578
domain -> domain .geometry .aperture_end = __DOMAIN_MAX_ADDR (domain -> gaw - 1 );
584
579
else
585
580
domain -> domain .geometry .aperture_end = __DOMAIN_MAX_ADDR (domain -> gaw );
@@ -947,7 +942,7 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
947
942
948
943
domain_flush_cache (domain , tmp_page , VTD_PAGE_SIZE );
949
944
pteval = ((uint64_t )virt_to_dma_pfn (tmp_page ) << VTD_PAGE_SHIFT ) | DMA_PTE_READ | DMA_PTE_WRITE ;
950
- if (domain_use_first_level ( domain ) )
945
+ if (domain -> use_first_level )
951
946
pteval |= DMA_FL_PTE_XD | DMA_FL_PTE_US | DMA_FL_PTE_ACCESS ;
952
947
953
948
if (cmpxchg64 (& pte -> val , 0ULL , pteval ))
@@ -1498,7 +1493,7 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
1498
1493
if (ih )
1499
1494
ih = 1 << 6 ;
1500
1495
1501
- if (domain_use_first_level ( domain ) ) {
1496
+ if (domain -> use_first_level ) {
1502
1497
qi_flush_piotlb (iommu , did , PASID_RID2PASID , addr , pages , ih );
1503
1498
} else {
1504
1499
unsigned long bitmask = aligned_pages - 1 ;
@@ -1552,7 +1547,7 @@ static inline void __mapping_notify_one(struct intel_iommu *iommu,
1552
1547
* It's a non-present to present mapping. Only flush if caching mode
1553
1548
* and second level.
1554
1549
*/
1555
- if (cap_caching_mode (iommu -> cap ) && !domain_use_first_level ( domain ) )
1550
+ if (cap_caching_mode (iommu -> cap ) && !domain -> use_first_level )
1556
1551
iommu_flush_iotlb_psi (iommu , domain , pfn , pages , 0 , 1 );
1557
1552
else
1558
1553
iommu_flush_write_buffer (iommu );
@@ -1568,7 +1563,7 @@ static void intel_flush_iotlb_all(struct iommu_domain *domain)
1568
1563
struct intel_iommu * iommu = info -> iommu ;
1569
1564
u16 did = domain_id_iommu (dmar_domain , iommu );
1570
1565
1571
- if (domain_use_first_level ( dmar_domain ) )
1566
+ if (dmar_domain -> use_first_level )
1572
1567
qi_flush_piotlb (iommu , did , PASID_RID2PASID , 0 , -1 , 0 );
1573
1568
else
1574
1569
iommu -> flush .flush_iotlb (iommu , did , 0 , 0 ,
@@ -1741,7 +1736,7 @@ static struct dmar_domain *alloc_domain(unsigned int type)
1741
1736
1742
1737
domain -> nid = NUMA_NO_NODE ;
1743
1738
if (first_level_by_default (type ))
1744
- domain -> flags |= DOMAIN_FLAG_USE_FIRST_LEVEL ;
1739
+ domain -> use_first_level = true ;
1745
1740
domain -> has_iotlb_device = false;
1746
1741
INIT_LIST_HEAD (& domain -> devices );
1747
1742
spin_lock_init (& domain -> lock );
@@ -2173,7 +2168,7 @@ __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2173
2168
2174
2169
attr = prot & (DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP );
2175
2170
attr |= DMA_FL_PTE_PRESENT ;
2176
- if (domain_use_first_level ( domain ) ) {
2171
+ if (domain -> use_first_level ) {
2177
2172
attr |= DMA_FL_PTE_XD | DMA_FL_PTE_US | DMA_FL_PTE_ACCESS ;
2178
2173
if (prot & DMA_PTE_WRITE )
2179
2174
attr |= DMA_FL_PTE_DIRTY ;
@@ -2443,7 +2438,7 @@ static int dmar_domain_attach_device(struct dmar_domain *domain,
2443
2438
if (hw_pass_through && domain_type_is_si (domain ))
2444
2439
ret = intel_pasid_setup_pass_through (iommu , domain ,
2445
2440
dev , PASID_RID2PASID );
2446
- else if (domain_use_first_level ( domain ) )
2441
+ else if (domain -> use_first_level )
2447
2442
ret = domain_setup_first_level (iommu , domain , dev ,
2448
2443
PASID_RID2PASID );
2449
2444
else
@@ -4412,7 +4407,7 @@ static void domain_set_force_snooping(struct dmar_domain *domain)
4412
4407
* Second level page table supports per-PTE snoop control. The
4413
4408
* iommu_map() interface will handle this by setting SNP bit.
4414
4409
*/
4415
- if (!domain_use_first_level ( domain ) ) {
4410
+ if (!domain -> use_first_level ) {
4416
4411
domain -> set_pte_snp = true;
4417
4412
return ;
4418
4413
}
0 commit comments