Skip to content

Commit 75114cb

Browse files
soleenjoergroedel
authored andcommitted
iommu/amd: use page allocation function provided by iommu-pages.h
Convert iommu/amd/* files to use the new page allocation functions provided in iommu-pages.h. Signed-off-by: Pasha Tatashin <[email protected]> Acked-by: David Rientjes <[email protected]> Tested-by: Bagas Sanjaya <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Joerg Roedel <[email protected]>
1 parent 95b18ef commit 75114cb

File tree

5 files changed

+62
-79
lines changed

5 files changed

+62
-79
lines changed

drivers/iommu/amd/amd_iommu.h

Lines changed: 0 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -134,14 +134,6 @@ static inline int get_pci_sbdf_id(struct pci_dev *pdev)
134134
return PCI_SEG_DEVID_TO_SBDF(seg, devid);
135135
}
136136

137-
static inline void *alloc_pgtable_page(int nid, gfp_t gfp)
138-
{
139-
struct page *page;
140-
141-
page = alloc_pages_node(nid, gfp | __GFP_ZERO, 0);
142-
return page ? page_address(page) : NULL;
143-
}
144-
145137
/*
146138
* This must be called after device probe completes. During probe
147139
* use rlookup_amd_iommu() get the iommu.

drivers/iommu/amd/init.c

Lines changed: 42 additions & 49 deletions
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,7 @@
3636

3737
#include "amd_iommu.h"
3838
#include "../irq_remapping.h"
39+
#include "../iommu-pages.h"
3940

4041
/*
4142
* definitions for the ACPI scanning code
@@ -649,8 +650,8 @@ static int __init find_last_devid_acpi(struct acpi_table_header *table, u16 pci_
649650
/* Allocate per PCI segment device table */
650651
static inline int __init alloc_dev_table(struct amd_iommu_pci_seg *pci_seg)
651652
{
652-
pci_seg->dev_table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO | GFP_DMA32,
653-
get_order(pci_seg->dev_table_size));
653+
pci_seg->dev_table = iommu_alloc_pages(GFP_KERNEL | GFP_DMA32,
654+
get_order(pci_seg->dev_table_size));
654655
if (!pci_seg->dev_table)
655656
return -ENOMEM;
656657

@@ -659,17 +660,16 @@ static inline int __init alloc_dev_table(struct amd_iommu_pci_seg *pci_seg)
659660

660661
static inline void free_dev_table(struct amd_iommu_pci_seg *pci_seg)
661662
{
662-
free_pages((unsigned long)pci_seg->dev_table,
663-
get_order(pci_seg->dev_table_size));
663+
iommu_free_pages(pci_seg->dev_table,
664+
get_order(pci_seg->dev_table_size));
664665
pci_seg->dev_table = NULL;
665666
}
666667

667668
/* Allocate per PCI segment IOMMU rlookup table. */
668669
static inline int __init alloc_rlookup_table(struct amd_iommu_pci_seg *pci_seg)
669670
{
670-
pci_seg->rlookup_table = (void *)__get_free_pages(
671-
GFP_KERNEL | __GFP_ZERO,
672-
get_order(pci_seg->rlookup_table_size));
671+
pci_seg->rlookup_table = iommu_alloc_pages(GFP_KERNEL,
672+
get_order(pci_seg->rlookup_table_size));
673673
if (pci_seg->rlookup_table == NULL)
674674
return -ENOMEM;
675675

@@ -678,16 +678,15 @@ static inline int __init alloc_rlookup_table(struct amd_iommu_pci_seg *pci_seg)
678678

679679
static inline void free_rlookup_table(struct amd_iommu_pci_seg *pci_seg)
680680
{
681-
free_pages((unsigned long)pci_seg->rlookup_table,
682-
get_order(pci_seg->rlookup_table_size));
681+
iommu_free_pages(pci_seg->rlookup_table,
682+
get_order(pci_seg->rlookup_table_size));
683683
pci_seg->rlookup_table = NULL;
684684
}
685685

686686
static inline int __init alloc_irq_lookup_table(struct amd_iommu_pci_seg *pci_seg)
687687
{
688-
pci_seg->irq_lookup_table = (void *)__get_free_pages(
689-
GFP_KERNEL | __GFP_ZERO,
690-
get_order(pci_seg->rlookup_table_size));
688+
pci_seg->irq_lookup_table = iommu_alloc_pages(GFP_KERNEL,
689+
get_order(pci_seg->rlookup_table_size));
691690
kmemleak_alloc(pci_seg->irq_lookup_table,
692691
pci_seg->rlookup_table_size, 1, GFP_KERNEL);
693692
if (pci_seg->irq_lookup_table == NULL)
@@ -699,17 +698,17 @@ static inline int __init alloc_irq_lookup_table(struct amd_iommu_pci_seg *pci_se
699698
static inline void free_irq_lookup_table(struct amd_iommu_pci_seg *pci_seg)
700699
{
701700
kmemleak_free(pci_seg->irq_lookup_table);
702-
free_pages((unsigned long)pci_seg->irq_lookup_table,
703-
get_order(pci_seg->rlookup_table_size));
701+
iommu_free_pages(pci_seg->irq_lookup_table,
702+
get_order(pci_seg->rlookup_table_size));
704703
pci_seg->irq_lookup_table = NULL;
705704
}
706705

707706
static int __init alloc_alias_table(struct amd_iommu_pci_seg *pci_seg)
708707
{
709708
int i;
710709

711-
pci_seg->alias_table = (void *)__get_free_pages(GFP_KERNEL,
712-
get_order(pci_seg->alias_table_size));
710+
pci_seg->alias_table = iommu_alloc_pages(GFP_KERNEL,
711+
get_order(pci_seg->alias_table_size));
713712
if (!pci_seg->alias_table)
714713
return -ENOMEM;
715714

@@ -724,8 +723,8 @@ static int __init alloc_alias_table(struct amd_iommu_pci_seg *pci_seg)
724723

725724
static void __init free_alias_table(struct amd_iommu_pci_seg *pci_seg)
726725
{
727-
free_pages((unsigned long)pci_seg->alias_table,
728-
get_order(pci_seg->alias_table_size));
726+
iommu_free_pages(pci_seg->alias_table,
727+
get_order(pci_seg->alias_table_size));
729728
pci_seg->alias_table = NULL;
730729
}
731730

@@ -736,8 +735,8 @@ static void __init free_alias_table(struct amd_iommu_pci_seg *pci_seg)
736735
*/
737736
static int __init alloc_command_buffer(struct amd_iommu *iommu)
738737
{
739-
iommu->cmd_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
740-
get_order(CMD_BUFFER_SIZE));
738+
iommu->cmd_buf = iommu_alloc_pages(GFP_KERNEL,
739+
get_order(CMD_BUFFER_SIZE));
741740

742741
return iommu->cmd_buf ? 0 : -ENOMEM;
743742
}
@@ -845,19 +844,19 @@ static void iommu_disable_command_buffer(struct amd_iommu *iommu)
845844

846845
static void __init free_command_buffer(struct amd_iommu *iommu)
847846
{
848-
free_pages((unsigned long)iommu->cmd_buf, get_order(CMD_BUFFER_SIZE));
847+
iommu_free_pages(iommu->cmd_buf, get_order(CMD_BUFFER_SIZE));
849848
}
850849

851850
static void *__init iommu_alloc_4k_pages(struct amd_iommu *iommu,
852851
gfp_t gfp, size_t size)
853852
{
854853
int order = get_order(size);
855-
void *buf = (void *)__get_free_pages(gfp, order);
854+
void *buf = iommu_alloc_pages(gfp, order);
856855

857856
if (buf &&
858857
check_feature(FEATURE_SNP) &&
859858
set_memory_4k((unsigned long)buf, (1 << order))) {
860-
free_pages((unsigned long)buf, order);
859+
iommu_free_pages(buf, order);
861860
buf = NULL;
862861
}
863862

@@ -867,7 +866,7 @@ static void *__init iommu_alloc_4k_pages(struct amd_iommu *iommu,
867866
/* allocates the memory where the IOMMU will log its events to */
868867
static int __init alloc_event_buffer(struct amd_iommu *iommu)
869868
{
870-
iommu->evt_buf = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO,
869+
iommu->evt_buf = iommu_alloc_4k_pages(iommu, GFP_KERNEL,
871870
EVT_BUFFER_SIZE);
872871

873872
return iommu->evt_buf ? 0 : -ENOMEM;
@@ -901,14 +900,13 @@ static void iommu_disable_event_buffer(struct amd_iommu *iommu)
901900

902901
static void __init free_event_buffer(struct amd_iommu *iommu)
903902
{
904-
free_pages((unsigned long)iommu->evt_buf, get_order(EVT_BUFFER_SIZE));
903+
iommu_free_pages(iommu->evt_buf, get_order(EVT_BUFFER_SIZE));
905904
}
906905

907906
/* allocates the memory where the IOMMU will log its events to */
908907
static int __init alloc_ppr_log(struct amd_iommu *iommu)
909908
{
910-
iommu->ppr_log = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO,
911-
PPR_LOG_SIZE);
909+
iommu->ppr_log = iommu_alloc_4k_pages(iommu, GFP_KERNEL, PPR_LOG_SIZE);
912910

913911
return iommu->ppr_log ? 0 : -ENOMEM;
914912
}
@@ -937,14 +935,14 @@ static void iommu_enable_ppr_log(struct amd_iommu *iommu)
937935

938936
static void __init free_ppr_log(struct amd_iommu *iommu)
939937
{
940-
free_pages((unsigned long)iommu->ppr_log, get_order(PPR_LOG_SIZE));
938+
iommu_free_pages(iommu->ppr_log, get_order(PPR_LOG_SIZE));
941939
}
942940

943941
static void free_ga_log(struct amd_iommu *iommu)
944942
{
945943
#ifdef CONFIG_IRQ_REMAP
946-
free_pages((unsigned long)iommu->ga_log, get_order(GA_LOG_SIZE));
947-
free_pages((unsigned long)iommu->ga_log_tail, get_order(8));
944+
iommu_free_pages(iommu->ga_log, get_order(GA_LOG_SIZE));
945+
iommu_free_pages(iommu->ga_log_tail, get_order(8));
948946
#endif
949947
}
950948

@@ -989,13 +987,11 @@ static int iommu_init_ga_log(struct amd_iommu *iommu)
989987
if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
990988
return 0;
991989

992-
iommu->ga_log = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
993-
get_order(GA_LOG_SIZE));
990+
iommu->ga_log = iommu_alloc_pages(GFP_KERNEL, get_order(GA_LOG_SIZE));
994991
if (!iommu->ga_log)
995992
goto err_out;
996993

997-
iommu->ga_log_tail = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
998-
get_order(8));
994+
iommu->ga_log_tail = iommu_alloc_pages(GFP_KERNEL, get_order(8));
999995
if (!iommu->ga_log_tail)
1000996
goto err_out;
1001997

@@ -1008,15 +1004,15 @@ static int iommu_init_ga_log(struct amd_iommu *iommu)
10081004

10091005
static int __init alloc_cwwb_sem(struct amd_iommu *iommu)
10101006
{
1011-
iommu->cmd_sem = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO, 1);
1007+
iommu->cmd_sem = iommu_alloc_4k_pages(iommu, GFP_KERNEL, 1);
10121008

10131009
return iommu->cmd_sem ? 0 : -ENOMEM;
10141010
}
10151011

10161012
static void __init free_cwwb_sem(struct amd_iommu *iommu)
10171013
{
10181014
if (iommu->cmd_sem)
1019-
free_page((unsigned long)iommu->cmd_sem);
1015+
iommu_free_page((void *)iommu->cmd_sem);
10201016
}
10211017

10221018
static void iommu_enable_xt(struct amd_iommu *iommu)
@@ -1081,7 +1077,6 @@ static bool __copy_device_table(struct amd_iommu *iommu)
10811077
u32 lo, hi, devid, old_devtb_size;
10821078
phys_addr_t old_devtb_phys;
10831079
u16 dom_id, dte_v, irq_v;
1084-
gfp_t gfp_flag;
10851080
u64 tmp;
10861081

10871082
/* Each IOMMU use separate device table with the same size */
@@ -1115,9 +1110,8 @@ static bool __copy_device_table(struct amd_iommu *iommu)
11151110
if (!old_devtb)
11161111
return false;
11171112

1118-
gfp_flag = GFP_KERNEL | __GFP_ZERO | GFP_DMA32;
1119-
pci_seg->old_dev_tbl_cpy = (void *)__get_free_pages(gfp_flag,
1120-
get_order(pci_seg->dev_table_size));
1113+
pci_seg->old_dev_tbl_cpy = iommu_alloc_pages(GFP_KERNEL | GFP_DMA32,
1114+
get_order(pci_seg->dev_table_size));
11211115
if (pci_seg->old_dev_tbl_cpy == NULL) {
11221116
pr_err("Failed to allocate memory for copying old device table!\n");
11231117
memunmap(old_devtb);
@@ -2805,8 +2799,8 @@ static void early_enable_iommus(void)
28052799

28062800
for_each_pci_segment(pci_seg) {
28072801
if (pci_seg->old_dev_tbl_cpy != NULL) {
2808-
free_pages((unsigned long)pci_seg->old_dev_tbl_cpy,
2809-
get_order(pci_seg->dev_table_size));
2802+
iommu_free_pages(pci_seg->old_dev_tbl_cpy,
2803+
get_order(pci_seg->dev_table_size));
28102804
pci_seg->old_dev_tbl_cpy = NULL;
28112805
}
28122806
}
@@ -2819,8 +2813,8 @@ static void early_enable_iommus(void)
28192813
pr_info("Copied DEV table from previous kernel.\n");
28202814

28212815
for_each_pci_segment(pci_seg) {
2822-
free_pages((unsigned long)pci_seg->dev_table,
2823-
get_order(pci_seg->dev_table_size));
2816+
iommu_free_pages(pci_seg->dev_table,
2817+
get_order(pci_seg->dev_table_size));
28242818
pci_seg->dev_table = pci_seg->old_dev_tbl_cpy;
28252819
}
28262820

@@ -3022,8 +3016,8 @@ static bool __init check_ioapic_information(void)
30223016

30233017
static void __init free_dma_resources(void)
30243018
{
3025-
free_pages((unsigned long)amd_iommu_pd_alloc_bitmap,
3026-
get_order(MAX_DOMAIN_ID/8));
3019+
iommu_free_pages(amd_iommu_pd_alloc_bitmap,
3020+
get_order(MAX_DOMAIN_ID / 8));
30273021
amd_iommu_pd_alloc_bitmap = NULL;
30283022

30293023
free_unity_maps();
@@ -3095,9 +3089,8 @@ static int __init early_amd_iommu_init(void)
30953089
/* Device table - directly used by all IOMMUs */
30963090
ret = -ENOMEM;
30973091

3098-
amd_iommu_pd_alloc_bitmap = (void *)__get_free_pages(
3099-
GFP_KERNEL | __GFP_ZERO,
3100-
get_order(MAX_DOMAIN_ID/8));
3092+
amd_iommu_pd_alloc_bitmap = iommu_alloc_pages(GFP_KERNEL,
3093+
get_order(MAX_DOMAIN_ID / 8));
31013094
if (amd_iommu_pd_alloc_bitmap == NULL)
31023095
goto out;
31033096

drivers/iommu/amd/io_pgtable.c

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,7 @@
2222

2323
#include "amd_iommu_types.h"
2424
#include "amd_iommu.h"
25+
#include "../iommu-pages.h"
2526

2627
static void v1_tlb_flush_all(void *cookie)
2728
{
@@ -156,7 +157,7 @@ static bool increase_address_space(struct protection_domain *domain,
156157
bool ret = true;
157158
u64 *pte;
158159

159-
pte = alloc_pgtable_page(domain->nid, gfp);
160+
pte = iommu_alloc_page_node(domain->nid, gfp);
160161
if (!pte)
161162
return false;
162163

@@ -187,7 +188,7 @@ static bool increase_address_space(struct protection_domain *domain,
187188

188189
out:
189190
spin_unlock_irqrestore(&domain->lock, flags);
190-
free_page((unsigned long)pte);
191+
iommu_free_page(pte);
191192

192193
return ret;
193194
}
@@ -250,7 +251,7 @@ static u64 *alloc_pte(struct protection_domain *domain,
250251

251252
if (!IOMMU_PTE_PRESENT(__pte) ||
252253
pte_level == PAGE_MODE_NONE) {
253-
page = alloc_pgtable_page(domain->nid, gfp);
254+
page = iommu_alloc_page_node(domain->nid, gfp);
254255

255256
if (!page)
256257
return NULL;
@@ -259,7 +260,7 @@ static u64 *alloc_pte(struct protection_domain *domain,
259260

260261
/* pte could have been changed somewhere. */
261262
if (!try_cmpxchg64(pte, &__pte, __npte))
262-
free_page((unsigned long)page);
263+
iommu_free_page(page);
263264
else if (IOMMU_PTE_PRESENT(__pte))
264265
*updated = true;
265266

@@ -431,7 +432,7 @@ static int iommu_v1_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
431432
}
432433

433434
/* Everything flushed out, free pages now */
434-
put_pages_list(&freelist);
435+
iommu_put_pages_list(&freelist);
435436

436437
return ret;
437438
}
@@ -580,7 +581,7 @@ static void v1_free_pgtable(struct io_pgtable *iop)
580581
/* Make changes visible to IOMMUs */
581582
amd_iommu_domain_update(dom);
582583

583-
put_pages_list(&freelist);
584+
iommu_put_pages_list(&freelist);
584585
}
585586

586587
static struct io_pgtable *v1_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)

0 commit comments

Comments
 (0)