Skip to content

Commit 87f60cc

Browse files
Matthew Wilcox (Oracle)joergroedel
authored andcommitted
iommu/vt-d: Use put_pages_list
page->freelist is for the use of slab. We already have the ability to free a list of pages in the core mm, but it requires the use of a list_head and for the pages to be chained together through page->lru. Switch the Intel IOMMU and IOVA code over to using free_pages_list(). Signed-off-by: Matthew Wilcox (Oracle) <[email protected]> [rm: split from original patch, cosmetic tweaks, fix fq entries] Signed-off-by: Robin Murphy <[email protected]> Reviewed-by: Lu Baolu <[email protected]> Link: https://lore.kernel.org/r/2115b560d9a0ce7cd4b948bd51a2b7bde8fdfd59.1639753638.git.robin.murphy@arm.com Signed-off-by: Joerg Roedel <[email protected]>
1 parent ce00eec commit 87f60cc

File tree

5 files changed

+45
-79
lines changed

5 files changed

+45
-79
lines changed

drivers/iommu/dma-iommu.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -447,7 +447,7 @@ static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie,
447447
else if (gather && gather->queued)
448448
queue_iova(iovad, iova_pfn(iovad, iova),
449449
size >> iova_shift(iovad),
450-
gather->freelist);
450+
&gather->freelist);
451451
else
452452
free_iova_fast(iovad, iova_pfn(iovad, iova),
453453
size >> iova_shift(iovad));

drivers/iommu/intel/iommu.c

Lines changed: 31 additions & 58 deletions
Original file line numberDiff line numberDiff line change
@@ -1303,35 +1303,30 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain,
13031303
know the hardware page-walk will no longer touch them.
13041304
The 'pte' argument is the *parent* PTE, pointing to the page that is to
13051305
be freed. */
1306-
static struct page *dma_pte_list_pagetables(struct dmar_domain *domain,
1307-
int level, struct dma_pte *pte,
1308-
struct page *freelist)
1306+
static void dma_pte_list_pagetables(struct dmar_domain *domain,
1307+
int level, struct dma_pte *pte,
1308+
struct list_head *freelist)
13091309
{
13101310
struct page *pg;
13111311

13121312
pg = pfn_to_page(dma_pte_addr(pte) >> PAGE_SHIFT);
1313-
pg->freelist = freelist;
1314-
freelist = pg;
1313+
list_add_tail(&pg->lru, freelist);
13151314

13161315
if (level == 1)
1317-
return freelist;
1316+
return;
13181317

13191318
pte = page_address(pg);
13201319
do {
13211320
if (dma_pte_present(pte) && !dma_pte_superpage(pte))
1322-
freelist = dma_pte_list_pagetables(domain, level - 1,
1323-
pte, freelist);
1321+
dma_pte_list_pagetables(domain, level - 1, pte, freelist);
13241322
pte++;
13251323
} while (!first_pte_in_page(pte));
1326-
1327-
return freelist;
13281324
}
13291325

1330-
static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level,
1331-
struct dma_pte *pte, unsigned long pfn,
1332-
unsigned long start_pfn,
1333-
unsigned long last_pfn,
1334-
struct page *freelist)
1326+
static void dma_pte_clear_level(struct dmar_domain *domain, int level,
1327+
struct dma_pte *pte, unsigned long pfn,
1328+
unsigned long start_pfn, unsigned long last_pfn,
1329+
struct list_head *freelist)
13351330
{
13361331
struct dma_pte *first_pte = NULL, *last_pte = NULL;
13371332

@@ -1350,18 +1345,18 @@ static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level,
13501345
/* These suborbinate page tables are going away entirely. Don't
13511346
bother to clear them; we're just going to *free* them. */
13521347
if (level > 1 && !dma_pte_superpage(pte))
1353-
freelist = dma_pte_list_pagetables(domain, level - 1, pte, freelist);
1348+
dma_pte_list_pagetables(domain, level - 1, pte, freelist);
13541349

13551350
dma_clear_pte(pte);
13561351
if (!first_pte)
13571352
first_pte = pte;
13581353
last_pte = pte;
13591354
} else if (level > 1) {
13601355
/* Recurse down into a level that isn't *entirely* obsolete */
1361-
freelist = dma_pte_clear_level(domain, level - 1,
1362-
phys_to_virt(dma_pte_addr(pte)),
1363-
level_pfn, start_pfn, last_pfn,
1364-
freelist);
1356+
dma_pte_clear_level(domain, level - 1,
1357+
phys_to_virt(dma_pte_addr(pte)),
1358+
level_pfn, start_pfn, last_pfn,
1359+
freelist);
13651360
}
13661361
next:
13671362
pfn = level_pfn + level_size(level);
@@ -1370,47 +1365,28 @@ static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level,
13701365
if (first_pte)
13711366
domain_flush_cache(domain, first_pte,
13721367
(void *)++last_pte - (void *)first_pte);
1373-
1374-
return freelist;
13751368
}
13761369

13771370
/* We can't just free the pages because the IOMMU may still be walking
13781371
the page tables, and may have cached the intermediate levels. The
13791372
pages can only be freed after the IOTLB flush has been done. */
1380-
static struct page *domain_unmap(struct dmar_domain *domain,
1381-
unsigned long start_pfn,
1382-
unsigned long last_pfn,
1383-
struct page *freelist)
1373+
static void domain_unmap(struct dmar_domain *domain, unsigned long start_pfn,
1374+
unsigned long last_pfn, struct list_head *freelist)
13841375
{
13851376
BUG_ON(!domain_pfn_supported(domain, start_pfn));
13861377
BUG_ON(!domain_pfn_supported(domain, last_pfn));
13871378
BUG_ON(start_pfn > last_pfn);
13881379

13891380
/* we don't need lock here; nobody else touches the iova range */
1390-
freelist = dma_pte_clear_level(domain, agaw_to_level(domain->agaw),
1391-
domain->pgd, 0, start_pfn, last_pfn,
1392-
freelist);
1381+
dma_pte_clear_level(domain, agaw_to_level(domain->agaw),
1382+
domain->pgd, 0, start_pfn, last_pfn, freelist);
13931383

13941384
/* free pgd */
13951385
if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
13961386
struct page *pgd_page = virt_to_page(domain->pgd);
1397-
pgd_page->freelist = freelist;
1398-
freelist = pgd_page;
1399-
1387+
list_add_tail(&pgd_page->lru, freelist);
14001388
domain->pgd = NULL;
14011389
}
1402-
1403-
return freelist;
1404-
}
1405-
1406-
static void dma_free_pagelist(struct page *freelist)
1407-
{
1408-
struct page *pg;
1409-
1410-
while ((pg = freelist)) {
1411-
freelist = pg->freelist;
1412-
free_pgtable_page(page_address(pg));
1413-
}
14141390
}
14151391

14161392
/* iommu handling */
@@ -2095,11 +2071,10 @@ static void domain_exit(struct dmar_domain *domain)
20952071
domain_remove_dev_info(domain);
20962072

20972073
if (domain->pgd) {
2098-
struct page *freelist;
2074+
LIST_HEAD(freelist);
20992075

2100-
freelist = domain_unmap(domain, 0,
2101-
DOMAIN_MAX_PFN(domain->gaw), NULL);
2102-
dma_free_pagelist(freelist);
2076+
domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw), &freelist);
2077+
put_pages_list(&freelist);
21032078
}
21042079

21052080
free_domain_mem(domain);
@@ -4192,19 +4167,17 @@ static int intel_iommu_memory_notifier(struct notifier_block *nb,
41924167
{
41934168
struct dmar_drhd_unit *drhd;
41944169
struct intel_iommu *iommu;
4195-
struct page *freelist;
4170+
LIST_HEAD(freelist);
41964171

4197-
freelist = domain_unmap(si_domain,
4198-
start_vpfn, last_vpfn,
4199-
NULL);
4172+
domain_unmap(si_domain, start_vpfn, last_vpfn, &freelist);
42004173

42014174
rcu_read_lock();
42024175
for_each_active_iommu(iommu, drhd)
42034176
iommu_flush_iotlb_psi(iommu, si_domain,
42044177
start_vpfn, mhp->nr_pages,
4205-
!freelist, 0);
4178+
list_empty(&freelist), 0);
42064179
rcu_read_unlock();
4207-
dma_free_pagelist(freelist);
4180+
put_pages_list(&freelist);
42084181
}
42094182
break;
42104183
}
@@ -5211,8 +5184,7 @@ static size_t intel_iommu_unmap(struct iommu_domain *domain,
52115184
start_pfn = iova >> VTD_PAGE_SHIFT;
52125185
last_pfn = (iova + size - 1) >> VTD_PAGE_SHIFT;
52135186

5214-
gather->freelist = domain_unmap(dmar_domain, start_pfn,
5215-
last_pfn, gather->freelist);
5187+
domain_unmap(dmar_domain, start_pfn, last_pfn, &gather->freelist);
52165188

52175189
if (dmar_domain->max_addr == iova + size)
52185190
dmar_domain->max_addr = iova;
@@ -5248,9 +5220,10 @@ static void intel_iommu_tlb_sync(struct iommu_domain *domain,
52485220

52495221
for_each_domain_iommu(iommu_id, dmar_domain)
52505222
iommu_flush_iotlb_psi(g_iommus[iommu_id], dmar_domain,
5251-
start_pfn, nrpages, !gather->freelist, 0);
5223+
start_pfn, nrpages,
5224+
list_empty(&gather->freelist), 0);
52525225

5253-
dma_free_pagelist(gather->freelist);
5226+
put_pages_list(&gather->freelist);
52545227
}
52555228

52565229
static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,

drivers/iommu/iova.c

Lines changed: 9 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -96,7 +96,7 @@ static void free_iova_flush_queue(struct iova_domain *iovad)
9696
int init_iova_flush_queue(struct iova_domain *iovad, struct iommu_domain *fq_domain)
9797
{
9898
struct iova_fq __percpu *queue;
99-
int cpu;
99+
int i, cpu;
100100

101101
atomic64_set(&iovad->fq_flush_start_cnt, 0);
102102
atomic64_set(&iovad->fq_flush_finish_cnt, 0);
@@ -113,6 +113,9 @@ int init_iova_flush_queue(struct iova_domain *iovad, struct iommu_domain *fq_dom
113113
fq->tail = 0;
114114

115115
spin_lock_init(&fq->lock);
116+
117+
for (i = 0; i < IOVA_FQ_SIZE; i++)
118+
INIT_LIST_HEAD(&fq->entries[i].freelist);
116119
}
117120

118121
iovad->fq_domain = fq_domain;
@@ -543,16 +546,6 @@ free_iova_fast(struct iova_domain *iovad, unsigned long pfn, unsigned long size)
543546
}
544547
EXPORT_SYMBOL_GPL(free_iova_fast);
545548

546-
static void fq_entry_dtor(struct page *freelist)
547-
{
548-
while (freelist) {
549-
unsigned long p = (unsigned long)page_address(freelist);
550-
551-
freelist = freelist->freelist;
552-
free_page(p);
553-
}
554-
}
555-
556549
#define fq_ring_for_each(i, fq) \
557550
for ((i) = (fq)->head; (i) != (fq)->tail; (i) = ((i) + 1) % IOVA_FQ_SIZE)
558551

@@ -585,7 +578,7 @@ static void fq_ring_free(struct iova_domain *iovad, struct iova_fq *fq)
585578
if (fq->entries[idx].counter >= counter)
586579
break;
587580

588-
fq_entry_dtor(fq->entries[idx].freelist);
581+
put_pages_list(&fq->entries[idx].freelist);
589582
free_iova_fast(iovad,
590583
fq->entries[idx].iova_pfn,
591584
fq->entries[idx].pages);
@@ -607,15 +600,14 @@ static void fq_destroy_all_entries(struct iova_domain *iovad)
607600

608601
/*
609602
* This code runs when the iova_domain is being detroyed, so don't
610-
* bother to free iovas, just call the entry_dtor on all remaining
611-
* entries.
603+
* bother to free iovas, just free any remaining pagetable pages.
612604
*/
613605
for_each_possible_cpu(cpu) {
614606
struct iova_fq *fq = per_cpu_ptr(iovad->fq, cpu);
615607
int idx;
616608

617609
fq_ring_for_each(idx, fq)
618-
fq_entry_dtor(fq->entries[idx].freelist);
610+
put_pages_list(&fq->entries[idx].freelist);
619611
}
620612
}
621613

@@ -640,7 +632,7 @@ static void fq_flush_timeout(struct timer_list *t)
640632

641633
void queue_iova(struct iova_domain *iovad,
642634
unsigned long pfn, unsigned long pages,
643-
struct page *freelist)
635+
struct list_head *freelist)
644636
{
645637
struct iova_fq *fq;
646638
unsigned long flags;
@@ -674,8 +666,8 @@ void queue_iova(struct iova_domain *iovad,
674666

675667
fq->entries[idx].iova_pfn = pfn;
676668
fq->entries[idx].pages = pages;
677-
fq->entries[idx].freelist = freelist;
678669
fq->entries[idx].counter = atomic64_read(&iovad->fq_flush_start_cnt);
670+
list_splice(freelist, &fq->entries[idx].freelist);
679671

680672
spin_unlock_irqrestore(&fq->lock, flags);
681673

include/linux/iommu.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -186,7 +186,7 @@ struct iommu_iotlb_gather {
186186
unsigned long start;
187187
unsigned long end;
188188
size_t pgsize;
189-
struct page *freelist;
189+
struct list_head freelist;
190190
bool queued;
191191
};
192192

@@ -399,6 +399,7 @@ static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather)
399399
{
400400
*gather = (struct iommu_iotlb_gather) {
401401
.start = ULONG_MAX,
402+
.freelist = LIST_HEAD_INIT(gather->freelist),
402403
};
403404
}
404405

include/linux/iova.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,7 @@ struct iova_rcache {
4646
struct iova_fq_entry {
4747
unsigned long iova_pfn;
4848
unsigned long pages;
49-
struct page *freelist;
49+
struct list_head freelist;
5050
u64 counter; /* Flush counter when this entrie was added */
5151
};
5252

@@ -135,7 +135,7 @@ void free_iova_fast(struct iova_domain *iovad, unsigned long pfn,
135135
unsigned long size);
136136
void queue_iova(struct iova_domain *iovad,
137137
unsigned long pfn, unsigned long pages,
138-
struct page *freelist);
138+
struct list_head *freelist);
139139
unsigned long alloc_iova_fast(struct iova_domain *iovad, unsigned long size,
140140
unsigned long limit_pfn, bool flush_rcache);
141141
struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo,

0 commit comments

Comments
 (0)