@@ -137,11 +137,13 @@ static void free_sub_pt(u64 *root, int mode, struct list_head *freelist)
137137 * another level increases the size of the address space by 9 bits to a size up
138138 * to 64 bits.
139139 */
140- static bool increase_address_space (struct protection_domain * domain ,
140+ static bool increase_address_space (struct amd_io_pgtable * pgtable ,
141141 unsigned long address ,
142142 gfp_t gfp )
143143{
144- struct io_pgtable_cfg * cfg = & domain -> iop .pgtbl .cfg ;
144+ struct io_pgtable_cfg * cfg = & pgtable -> pgtbl .cfg ;
145+ struct protection_domain * domain =
146+ container_of (pgtable , struct protection_domain , iop );
145147 unsigned long flags ;
146148 bool ret = true;
147149 u64 * pte ;
@@ -152,17 +154,17 @@ static bool increase_address_space(struct protection_domain *domain,
152154
153155 spin_lock_irqsave (& domain -> lock , flags );
154156
155- if (address <= PM_LEVEL_SIZE (domain -> iop . mode ))
157+ if (address <= PM_LEVEL_SIZE (pgtable -> mode ))
156158 goto out ;
157159
158160 ret = false;
159- if (WARN_ON_ONCE (domain -> iop . mode == PAGE_MODE_6_LEVEL ))
161+ if (WARN_ON_ONCE (pgtable -> mode == PAGE_MODE_6_LEVEL ))
160162 goto out ;
161163
162- * pte = PM_LEVEL_PDE (domain -> iop . mode , iommu_virt_to_phys (domain -> iop . root ));
164+ * pte = PM_LEVEL_PDE (pgtable -> mode , iommu_virt_to_phys (pgtable -> root ));
163165
164- domain -> iop . root = pte ;
165- domain -> iop . mode += 1 ;
166+ pgtable -> root = pte ;
167+ pgtable -> mode += 1 ;
166168 amd_iommu_update_and_flush_device_table (domain );
167169
168170 pte = NULL ;
@@ -175,31 +177,31 @@ static bool increase_address_space(struct protection_domain *domain,
175177 return ret ;
176178}
177179
178- static u64 * alloc_pte (struct protection_domain * domain ,
180+ static u64 * alloc_pte (struct amd_io_pgtable * pgtable ,
179181 unsigned long address ,
180182 unsigned long page_size ,
181183 u64 * * pte_page ,
182184 gfp_t gfp ,
183185 bool * updated )
184186{
185- struct io_pgtable_cfg * cfg = & domain -> iop . pgtbl .cfg ;
187+ struct io_pgtable_cfg * cfg = & pgtable -> pgtbl .cfg ;
186188 int level , end_lvl ;
187189 u64 * pte , * page ;
188190
189191 BUG_ON (!is_power_of_2 (page_size ));
190192
191- while (address > PM_LEVEL_SIZE (domain -> iop . mode )) {
193+ while (address > PM_LEVEL_SIZE (pgtable -> mode )) {
192194 /*
193195 * Return an error if there is no memory to update the
194196 * page-table.
195197 */
196- if (!increase_address_space (domain , address , gfp ))
198+ if (!increase_address_space (pgtable , address , gfp ))
197199 return NULL ;
198200 }
199201
200202
201- level = domain -> iop . mode - 1 ;
202- pte = & domain -> iop . root [PM_LEVEL_INDEX (level , address )];
203+ level = pgtable -> mode - 1 ;
204+ pte = & pgtable -> root [PM_LEVEL_INDEX (level , address )];
203205 address = PAGE_SIZE_ALIGN (address , page_size );
204206 end_lvl = PAGE_SIZE_LEVEL (page_size );
205207
@@ -348,7 +350,7 @@ static int iommu_v1_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
348350 phys_addr_t paddr , size_t pgsize , size_t pgcount ,
349351 int prot , gfp_t gfp , size_t * mapped )
350352{
351- struct protection_domain * dom = io_pgtable_ops_to_domain (ops );
353+ struct amd_io_pgtable * pgtable = io_pgtable_ops_to_data (ops );
352354 LIST_HEAD (freelist );
353355 bool updated = false;
354356 u64 __pte , * pte ;
@@ -365,7 +367,7 @@ static int iommu_v1_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
365367
366368 while (pgcount > 0 ) {
367369 count = PAGE_SIZE_PTE_COUNT (pgsize );
368- pte = alloc_pte (dom , iova , pgsize , NULL , gfp , & updated );
370+ pte = alloc_pte (pgtable , iova , pgsize , NULL , gfp , & updated );
369371
370372 ret = - ENOMEM ;
371373 if (!pte )
@@ -402,6 +404,7 @@ static int iommu_v1_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
402404
403405out :
404406 if (updated ) {
407+ struct protection_domain * dom = io_pgtable_ops_to_domain (ops );
405408 unsigned long flags ;
406409
407410 spin_lock_irqsave (& dom -> lock , flags );
0 commit comments