Skip to content

Commit 9ac0b33

Browse files
jgunthorpejoergroedel
authored andcommitted
iommu/amd: Narrow the use of struct protection_domain to invalidation
The AMD io_pgtable stuff doesn't implement the tlb ops callbacks, instead it invokes the invalidation ops directly on the struct protection_domain. Narrow the use of struct protection_domain to only those few code paths. Make everything else properly use struct amd_io_pgtable through the call chains, which is the correct modular type for an io-pgtable module. Signed-off-by: Jason Gunthorpe <[email protected]> Reviewed-by: Vasant Hegde <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Joerg Roedel <[email protected]>
1 parent 47f218d commit 9ac0b33

File tree

2 files changed

+25
-19
lines changed

2 files changed

+25
-19
lines changed

drivers/iommu/amd/io_pgtable.c

Lines changed: 18 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -137,11 +137,13 @@ static void free_sub_pt(u64 *root, int mode, struct list_head *freelist)
137137
* another level increases the size of the address space by 9 bits to a size up
138138
* to 64 bits.
139139
*/
140-
static bool increase_address_space(struct protection_domain *domain,
140+
static bool increase_address_space(struct amd_io_pgtable *pgtable,
141141
unsigned long address,
142142
gfp_t gfp)
143143
{
144-
struct io_pgtable_cfg *cfg = &domain->iop.pgtbl.cfg;
144+
struct io_pgtable_cfg *cfg = &pgtable->pgtbl.cfg;
145+
struct protection_domain *domain =
146+
container_of(pgtable, struct protection_domain, iop);
145147
unsigned long flags;
146148
bool ret = true;
147149
u64 *pte;
@@ -152,17 +154,17 @@ static bool increase_address_space(struct protection_domain *domain,
152154

153155
spin_lock_irqsave(&domain->lock, flags);
154156

155-
if (address <= PM_LEVEL_SIZE(domain->iop.mode))
157+
if (address <= PM_LEVEL_SIZE(pgtable->mode))
156158
goto out;
157159

158160
ret = false;
159-
if (WARN_ON_ONCE(domain->iop.mode == PAGE_MODE_6_LEVEL))
161+
if (WARN_ON_ONCE(pgtable->mode == PAGE_MODE_6_LEVEL))
160162
goto out;
161163

162-
*pte = PM_LEVEL_PDE(domain->iop.mode, iommu_virt_to_phys(domain->iop.root));
164+
*pte = PM_LEVEL_PDE(pgtable->mode, iommu_virt_to_phys(pgtable->root));
163165

164-
domain->iop.root = pte;
165-
domain->iop.mode += 1;
166+
pgtable->root = pte;
167+
pgtable->mode += 1;
166168
amd_iommu_update_and_flush_device_table(domain);
167169

168170
pte = NULL;
@@ -175,31 +177,31 @@ static bool increase_address_space(struct protection_domain *domain,
175177
return ret;
176178
}
177179

178-
static u64 *alloc_pte(struct protection_domain *domain,
180+
static u64 *alloc_pte(struct amd_io_pgtable *pgtable,
179181
unsigned long address,
180182
unsigned long page_size,
181183
u64 **pte_page,
182184
gfp_t gfp,
183185
bool *updated)
184186
{
185-
struct io_pgtable_cfg *cfg = &domain->iop.pgtbl.cfg;
187+
struct io_pgtable_cfg *cfg = &pgtable->pgtbl.cfg;
186188
int level, end_lvl;
187189
u64 *pte, *page;
188190

189191
BUG_ON(!is_power_of_2(page_size));
190192

191-
while (address > PM_LEVEL_SIZE(domain->iop.mode)) {
193+
while (address > PM_LEVEL_SIZE(pgtable->mode)) {
192194
/*
193195
* Return an error if there is no memory to update the
194196
* page-table.
195197
*/
196-
if (!increase_address_space(domain, address, gfp))
198+
if (!increase_address_space(pgtable, address, gfp))
197199
return NULL;
198200
}
199201

200202

201-
level = domain->iop.mode - 1;
202-
pte = &domain->iop.root[PM_LEVEL_INDEX(level, address)];
203+
level = pgtable->mode - 1;
204+
pte = &pgtable->root[PM_LEVEL_INDEX(level, address)];
203205
address = PAGE_SIZE_ALIGN(address, page_size);
204206
end_lvl = PAGE_SIZE_LEVEL(page_size);
205207

@@ -348,7 +350,7 @@ static int iommu_v1_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
348350
phys_addr_t paddr, size_t pgsize, size_t pgcount,
349351
int prot, gfp_t gfp, size_t *mapped)
350352
{
351-
struct protection_domain *dom = io_pgtable_ops_to_domain(ops);
353+
struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops);
352354
LIST_HEAD(freelist);
353355
bool updated = false;
354356
u64 __pte, *pte;
@@ -365,7 +367,7 @@ static int iommu_v1_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
365367

366368
while (pgcount > 0) {
367369
count = PAGE_SIZE_PTE_COUNT(pgsize);
368-
pte = alloc_pte(dom, iova, pgsize, NULL, gfp, &updated);
370+
pte = alloc_pte(pgtable, iova, pgsize, NULL, gfp, &updated);
369371

370372
ret = -ENOMEM;
371373
if (!pte)
@@ -402,6 +404,7 @@ static int iommu_v1_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
402404

403405
out:
404406
if (updated) {
407+
struct protection_domain *dom = io_pgtable_ops_to_domain(ops);
405408
unsigned long flags;
406409

407410
spin_lock_irqsave(&dom->lock, flags);

drivers/iommu/amd/io_pgtable_v2.c

Lines changed: 7 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -233,8 +233,8 @@ static int iommu_v2_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
233233
phys_addr_t paddr, size_t pgsize, size_t pgcount,
234234
int prot, gfp_t gfp, size_t *mapped)
235235
{
236-
struct protection_domain *pdom = io_pgtable_ops_to_domain(ops);
237-
struct io_pgtable_cfg *cfg = &pdom->iop.pgtbl.cfg;
236+
struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops);
237+
struct io_pgtable_cfg *cfg = &pgtable->pgtbl.cfg;
238238
u64 *pte;
239239
unsigned long map_size;
240240
unsigned long mapped_size = 0;
@@ -251,7 +251,7 @@ static int iommu_v2_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
251251

252252
while (mapped_size < size) {
253253
map_size = get_alloc_page_size(pgsize);
254-
pte = v2_alloc_pte(cfg->amd.nid, pdom->iop.pgd,
254+
pte = v2_alloc_pte(cfg->amd.nid, pgtable->pgd,
255255
iova, map_size, gfp, &updated);
256256
if (!pte) {
257257
ret = -EINVAL;
@@ -266,8 +266,11 @@ static int iommu_v2_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
266266
}
267267

268268
out:
269-
if (updated)
269+
if (updated) {
270+
struct protection_domain *pdom = io_pgtable_ops_to_domain(ops);
271+
270272
amd_iommu_domain_flush_pages(pdom, o_iova, size);
273+
}
271274

272275
if (mapped)
273276
*mapped += mapped_size;

0 commit comments

Comments
 (0)