Skip to content

Commit 06c3750

Browse files
soleenjoergroedel
authored andcommitted
iommu/vt-d: add wrapper functions for page allocations
In order to improve observability and accountability of IOMMU layer, we must account the number of pages that are allocated by functions that are calling directly into buddy allocator. This is achieved by first wrapping the allocation related functions into a separate inline functions in new file: drivers/iommu/iommu-pages.h Convert all page allocation calls under iommu/intel to use these new functions. Signed-off-by: Pasha Tatashin <[email protected]> Acked-by: David Rientjes <[email protected]> Tested-by: Bagas Sanjaya <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Joerg Roedel <[email protected]>
1 parent 0bbac3f commit 06c3750

File tree

7 files changed

+201
-63
lines changed

7 files changed

+201
-63
lines changed

drivers/iommu/intel/dmar.c

Lines changed: 9 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -32,6 +32,7 @@
3232

3333
#include "iommu.h"
3434
#include "../irq_remapping.h"
35+
#include "../iommu-pages.h"
3536
#include "perf.h"
3637
#include "trace.h"
3738
#include "perfmon.h"
@@ -1187,7 +1188,7 @@ static void free_iommu(struct intel_iommu *iommu)
11871188
}
11881189

11891190
if (iommu->qi) {
1190-
free_page((unsigned long)iommu->qi->desc);
1191+
iommu_free_page(iommu->qi->desc);
11911192
kfree(iommu->qi->desc_status);
11921193
kfree(iommu->qi);
11931194
}
@@ -1755,7 +1756,8 @@ static void __dmar_enable_qi(struct intel_iommu *iommu)
17551756
int dmar_enable_qi(struct intel_iommu *iommu)
17561757
{
17571758
struct q_inval *qi;
1758-
struct page *desc_page;
1759+
void *desc;
1760+
int order;
17591761

17601762
if (!ecap_qis(iommu->ecap))
17611763
return -ENOENT;
@@ -1776,19 +1778,19 @@ int dmar_enable_qi(struct intel_iommu *iommu)
17761778
* Need two pages to accommodate 256 descriptors of 256 bits each
17771779
* if the remapping hardware supports scalable mode translation.
17781780
*/
1779-
desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO,
1780-
!!ecap_smts(iommu->ecap));
1781-
if (!desc_page) {
1781+
order = ecap_smts(iommu->ecap) ? 1 : 0;
1782+
desc = iommu_alloc_pages_node(iommu->node, GFP_ATOMIC, order);
1783+
if (!desc) {
17821784
kfree(qi);
17831785
iommu->qi = NULL;
17841786
return -ENOMEM;
17851787
}
17861788

1787-
qi->desc = page_address(desc_page);
1789+
qi->desc = desc;
17881790

17891791
qi->desc_status = kcalloc(QI_LENGTH, sizeof(int), GFP_ATOMIC);
17901792
if (!qi->desc_status) {
1791-
free_page((unsigned long) qi->desc);
1793+
iommu_free_page(qi->desc);
17921794
kfree(qi);
17931795
iommu->qi = NULL;
17941796
return -ENOMEM;

drivers/iommu/intel/iommu.c

Lines changed: 16 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,7 @@
2727
#include "iommu.h"
2828
#include "../dma-iommu.h"
2929
#include "../irq_remapping.h"
30+
#include "../iommu-pages.h"
3031
#include "pasid.h"
3132
#include "cap_audit.h"
3233
#include "perfmon.h"
@@ -298,22 +299,6 @@ static int __init intel_iommu_setup(char *str)
298299
}
299300
__setup("intel_iommu=", intel_iommu_setup);
300301

301-
void *alloc_pgtable_page(int node, gfp_t gfp)
302-
{
303-
struct page *page;
304-
void *vaddr = NULL;
305-
306-
page = alloc_pages_node(node, gfp | __GFP_ZERO, 0);
307-
if (page)
308-
vaddr = page_address(page);
309-
return vaddr;
310-
}
311-
312-
void free_pgtable_page(void *vaddr)
313-
{
314-
free_page((unsigned long)vaddr);
315-
}
316-
317302
static int domain_type_is_si(struct dmar_domain *domain)
318303
{
319304
return domain->domain.type == IOMMU_DOMAIN_IDENTITY;
@@ -545,7 +530,7 @@ struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus,
545530
if (!alloc)
546531
return NULL;
547532

548-
context = alloc_pgtable_page(iommu->node, GFP_ATOMIC);
533+
context = iommu_alloc_page_node(iommu->node, GFP_ATOMIC);
549534
if (!context)
550535
return NULL;
551536

@@ -719,17 +704,17 @@ static void free_context_table(struct intel_iommu *iommu)
719704
for (i = 0; i < ROOT_ENTRY_NR; i++) {
720705
context = iommu_context_addr(iommu, i, 0, 0);
721706
if (context)
722-
free_pgtable_page(context);
707+
iommu_free_page(context);
723708

724709
if (!sm_supported(iommu))
725710
continue;
726711

727712
context = iommu_context_addr(iommu, i, 0x80, 0);
728713
if (context)
729-
free_pgtable_page(context);
714+
iommu_free_page(context);
730715
}
731716

732-
free_pgtable_page(iommu->root_entry);
717+
iommu_free_page(iommu->root_entry);
733718
iommu->root_entry = NULL;
734719
}
735720

@@ -867,7 +852,7 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
867852
if (!dma_pte_present(pte)) {
868853
uint64_t pteval;
869854

870-
tmp_page = alloc_pgtable_page(domain->nid, gfp);
855+
tmp_page = iommu_alloc_page_node(domain->nid, gfp);
871856

872857
if (!tmp_page)
873858
return NULL;
@@ -879,7 +864,7 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
879864

880865
if (cmpxchg64(&pte->val, 0ULL, pteval))
881866
/* Someone else set it while we were thinking; use theirs. */
882-
free_pgtable_page(tmp_page);
867+
iommu_free_page(tmp_page);
883868
else
884869
domain_flush_cache(domain, pte, sizeof(*pte));
885870
}
@@ -992,7 +977,7 @@ static void dma_pte_free_level(struct dmar_domain *domain, int level,
992977
last_pfn < level_pfn + level_size(level) - 1)) {
993978
dma_clear_pte(pte);
994979
domain_flush_cache(domain, pte, sizeof(*pte));
995-
free_pgtable_page(level_pte);
980+
iommu_free_page(level_pte);
996981
}
997982
next:
998983
pfn += level_size(level);
@@ -1016,7 +1001,7 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain,
10161001

10171002
/* free pgd */
10181003
if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
1019-
free_pgtable_page(domain->pgd);
1004+
iommu_free_page(domain->pgd);
10201005
domain->pgd = NULL;
10211006
}
10221007
}
@@ -1118,7 +1103,7 @@ static int iommu_alloc_root_entry(struct intel_iommu *iommu)
11181103
{
11191104
struct root_entry *root;
11201105

1121-
root = alloc_pgtable_page(iommu->node, GFP_ATOMIC);
1106+
root = iommu_alloc_page_node(iommu->node, GFP_ATOMIC);
11221107
if (!root) {
11231108
pr_err("Allocating root entry for %s failed\n",
11241109
iommu->name);
@@ -1841,7 +1826,7 @@ static void domain_exit(struct dmar_domain *domain)
18411826
LIST_HEAD(freelist);
18421827

18431828
domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw), &freelist);
1844-
put_pages_list(&freelist);
1829+
iommu_put_pages_list(&freelist);
18451830
}
18461831

18471832
if (WARN_ON(!list_empty(&domain->devices)))
@@ -2497,7 +2482,7 @@ static int copy_context_table(struct intel_iommu *iommu,
24972482
if (!old_ce)
24982483
goto out;
24992484

2500-
new_ce = alloc_pgtable_page(iommu->node, GFP_KERNEL);
2485+
new_ce = iommu_alloc_page_node(iommu->node, GFP_KERNEL);
25012486
if (!new_ce)
25022487
goto out_unmap;
25032488

@@ -3426,7 +3411,7 @@ static int intel_iommu_memory_notifier(struct notifier_block *nb,
34263411
start_vpfn, mhp->nr_pages,
34273412
list_empty(&freelist), 0);
34283413
rcu_read_unlock();
3429-
put_pages_list(&freelist);
3414+
iommu_put_pages_list(&freelist);
34303415
}
34313416
break;
34323417
}
@@ -3833,7 +3818,7 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width)
38333818
domain->max_addr = 0;
38343819

38353820
/* always allocate the top pgd */
3836-
domain->pgd = alloc_pgtable_page(domain->nid, GFP_ATOMIC);
3821+
domain->pgd = iommu_alloc_page_node(domain->nid, GFP_ATOMIC);
38373822
if (!domain->pgd)
38383823
return -ENOMEM;
38393824
domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
@@ -3987,7 +3972,7 @@ int prepare_domain_attach_device(struct iommu_domain *domain,
39873972
pte = dmar_domain->pgd;
39883973
if (dma_pte_present(pte)) {
39893974
dmar_domain->pgd = phys_to_virt(dma_pte_addr(pte));
3990-
free_pgtable_page(pte);
3975+
iommu_free_page(pte);
39913976
}
39923977
dmar_domain->agaw--;
39933978
}
@@ -4141,7 +4126,7 @@ static void intel_iommu_tlb_sync(struct iommu_domain *domain,
41414126
if (dmar_domain->nested_parent)
41424127
parent_domain_flush(dmar_domain, start_pfn, nrpages,
41434128
list_empty(&gather->freelist));
4144-
put_pages_list(&gather->freelist);
4129+
iommu_put_pages_list(&gather->freelist);
41454130
}
41464131

41474132
static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,

drivers/iommu/intel/iommu.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1085,8 +1085,6 @@ void domain_update_iommu_cap(struct dmar_domain *domain);
10851085

10861086
int dmar_ir_support(void);
10871087

1088-
void *alloc_pgtable_page(int node, gfp_t gfp);
1089-
void free_pgtable_page(void *vaddr);
10901088
void iommu_flush_write_buffer(struct intel_iommu *iommu);
10911089
struct iommu_domain *intel_nested_domain_alloc(struct iommu_domain *parent,
10921090
const struct iommu_user_data *user_data);

drivers/iommu/intel/irq_remapping.c

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,7 @@
2222

2323
#include "iommu.h"
2424
#include "../irq_remapping.h"
25+
#include "../iommu-pages.h"
2526
#include "cap_audit.h"
2627

2728
enum irq_mode {
@@ -527,7 +528,7 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu)
527528
struct ir_table *ir_table;
528529
struct fwnode_handle *fn;
529530
unsigned long *bitmap;
530-
struct page *pages;
531+
void *ir_table_base;
531532

532533
if (iommu->ir_table)
533534
return 0;
@@ -536,9 +537,9 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu)
536537
if (!ir_table)
537538
return -ENOMEM;
538539

539-
pages = alloc_pages_node(iommu->node, GFP_KERNEL | __GFP_ZERO,
540-
INTR_REMAP_PAGE_ORDER);
541-
if (!pages) {
540+
ir_table_base = iommu_alloc_pages_node(iommu->node, GFP_KERNEL,
541+
INTR_REMAP_PAGE_ORDER);
542+
if (!ir_table_base) {
542543
pr_err("IR%d: failed to allocate pages of order %d\n",
543544
iommu->seq_id, INTR_REMAP_PAGE_ORDER);
544545
goto out_free_table;
@@ -573,7 +574,7 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu)
573574
else
574575
iommu->ir_domain->msi_parent_ops = &dmar_msi_parent_ops;
575576

576-
ir_table->base = page_address(pages);
577+
ir_table->base = ir_table_base;
577578
ir_table->bitmap = bitmap;
578579
iommu->ir_table = ir_table;
579580

@@ -622,7 +623,7 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu)
622623
out_free_bitmap:
623624
bitmap_free(bitmap);
624625
out_free_pages:
625-
__free_pages(pages, INTR_REMAP_PAGE_ORDER);
626+
iommu_free_pages(ir_table_base, INTR_REMAP_PAGE_ORDER);
626627
out_free_table:
627628
kfree(ir_table);
628629

@@ -643,8 +644,7 @@ static void intel_teardown_irq_remapping(struct intel_iommu *iommu)
643644
irq_domain_free_fwnode(fn);
644645
iommu->ir_domain = NULL;
645646
}
646-
free_pages((unsigned long)iommu->ir_table->base,
647-
INTR_REMAP_PAGE_ORDER);
647+
iommu_free_pages(iommu->ir_table->base, INTR_REMAP_PAGE_ORDER);
648648
bitmap_free(iommu->ir_table->bitmap);
649649
kfree(iommu->ir_table);
650650
iommu->ir_table = NULL;

drivers/iommu/intel/pasid.c

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@
2020

2121
#include "iommu.h"
2222
#include "pasid.h"
23+
#include "../iommu-pages.h"
2324

2425
/*
2526
* Intel IOMMU system wide PASID name space:
@@ -38,7 +39,7 @@ int intel_pasid_alloc_table(struct device *dev)
3839
{
3940
struct device_domain_info *info;
4041
struct pasid_table *pasid_table;
41-
struct page *pages;
42+
struct pasid_dir_entry *dir;
4243
u32 max_pasid = 0;
4344
int order, size;
4445

@@ -59,14 +60,13 @@ int intel_pasid_alloc_table(struct device *dev)
5960

6061
size = max_pasid >> (PASID_PDE_SHIFT - 3);
6162
order = size ? get_order(size) : 0;
62-
pages = alloc_pages_node(info->iommu->node,
63-
GFP_KERNEL | __GFP_ZERO, order);
64-
if (!pages) {
63+
dir = iommu_alloc_pages_node(info->iommu->node, GFP_KERNEL, order);
64+
if (!dir) {
6565
kfree(pasid_table);
6666
return -ENOMEM;
6767
}
6868

69-
pasid_table->table = page_address(pages);
69+
pasid_table->table = dir;
7070
pasid_table->order = order;
7171
pasid_table->max_pasid = 1 << (order + PAGE_SHIFT + 3);
7272
info->pasid_table = pasid_table;
@@ -97,10 +97,10 @@ void intel_pasid_free_table(struct device *dev)
9797
max_pde = pasid_table->max_pasid >> PASID_PDE_SHIFT;
9898
for (i = 0; i < max_pde; i++) {
9999
table = get_pasid_table_from_pde(&dir[i]);
100-
free_pgtable_page(table);
100+
iommu_free_page(table);
101101
}
102102

103-
free_pages((unsigned long)pasid_table->table, pasid_table->order);
103+
iommu_free_pages(pasid_table->table, pasid_table->order);
104104
kfree(pasid_table);
105105
}
106106

@@ -146,7 +146,7 @@ static struct pasid_entry *intel_pasid_get_entry(struct device *dev, u32 pasid)
146146
retry:
147147
entries = get_pasid_table_from_pde(&dir[dir_index]);
148148
if (!entries) {
149-
entries = alloc_pgtable_page(info->iommu->node, GFP_ATOMIC);
149+
entries = iommu_alloc_page_node(info->iommu->node, GFP_ATOMIC);
150150
if (!entries)
151151
return NULL;
152152

@@ -158,7 +158,7 @@ static struct pasid_entry *intel_pasid_get_entry(struct device *dev, u32 pasid)
158158
*/
159159
if (cmpxchg64(&dir[dir_index].val, 0ULL,
160160
(u64)virt_to_phys(entries) | PASID_PTE_PRESENT)) {
161-
free_pgtable_page(entries);
161+
iommu_free_page(entries);
162162
goto retry;
163163
}
164164
if (!ecap_coherent(info->iommu->ecap)) {

drivers/iommu/intel/svm.c

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,7 @@
2222
#include "iommu.h"
2323
#include "pasid.h"
2424
#include "perf.h"
25+
#include "../iommu-pages.h"
2526
#include "trace.h"
2627

2728
static irqreturn_t prq_event_thread(int irq, void *d);
@@ -63,16 +64,14 @@ svm_lookup_device_by_dev(struct intel_svm *svm, struct device *dev)
6364
int intel_svm_enable_prq(struct intel_iommu *iommu)
6465
{
6566
struct iopf_queue *iopfq;
66-
struct page *pages;
6767
int irq, ret;
6868

69-
pages = alloc_pages_node(iommu->node, GFP_KERNEL | __GFP_ZERO, PRQ_ORDER);
70-
if (!pages) {
69+
iommu->prq = iommu_alloc_pages_node(iommu->node, GFP_KERNEL, PRQ_ORDER);
70+
if (!iommu->prq) {
7171
pr_warn("IOMMU: %s: Failed to allocate page request queue\n",
7272
iommu->name);
7373
return -ENOMEM;
7474
}
75-
iommu->prq = page_address(pages);
7675

7776
irq = dmar_alloc_hwirq(IOMMU_IRQ_ID_OFFSET_PRQ + iommu->seq_id, iommu->node, iommu);
7877
if (irq <= 0) {
@@ -117,7 +116,7 @@ int intel_svm_enable_prq(struct intel_iommu *iommu)
117116
dmar_free_hwirq(irq);
118117
iommu->pr_irq = 0;
119118
free_prq:
120-
free_pages((unsigned long)iommu->prq, PRQ_ORDER);
119+
iommu_free_pages(iommu->prq, PRQ_ORDER);
121120
iommu->prq = NULL;
122121

123122
return ret;
@@ -140,7 +139,7 @@ int intel_svm_finish_prq(struct intel_iommu *iommu)
140139
iommu->iopf_queue = NULL;
141140
}
142141

143-
free_pages((unsigned long)iommu->prq, PRQ_ORDER);
142+
iommu_free_pages(iommu->prq, PRQ_ORDER);
144143
iommu->prq = NULL;
145144

146145
return 0;

0 commit comments

Comments
 (0)