3636
3737#include "amd_iommu.h"
3838#include "../irq_remapping.h"
39+ #include "../iommu-pages.h"
3940
4041/*
4142 * definitions for the ACPI scanning code
@@ -649,8 +650,8 @@ static int __init find_last_devid_acpi(struct acpi_table_header *table, u16 pci_
649650/* Allocate per PCI segment device table */
650651static inline int __init alloc_dev_table (struct amd_iommu_pci_seg * pci_seg )
651652{
652- pci_seg -> dev_table = ( void * ) __get_free_pages ( GFP_KERNEL | __GFP_ZERO | GFP_DMA32 ,
653- get_order (pci_seg -> dev_table_size ));
653+ pci_seg -> dev_table = iommu_alloc_pages ( GFP_KERNEL | GFP_DMA32 ,
654+ get_order (pci_seg -> dev_table_size ));
654655 if (!pci_seg -> dev_table )
655656 return - ENOMEM ;
656657
@@ -659,17 +660,16 @@ static inline int __init alloc_dev_table(struct amd_iommu_pci_seg *pci_seg)
659660
660661static inline void free_dev_table (struct amd_iommu_pci_seg * pci_seg )
661662{
662- free_pages (( unsigned long ) pci_seg -> dev_table ,
663- get_order (pci_seg -> dev_table_size ));
663+ iommu_free_pages ( pci_seg -> dev_table ,
664+ get_order (pci_seg -> dev_table_size ));
664665 pci_seg -> dev_table = NULL ;
665666}
666667
667668/* Allocate per PCI segment IOMMU rlookup table. */
668669static inline int __init alloc_rlookup_table (struct amd_iommu_pci_seg * pci_seg )
669670{
670- pci_seg -> rlookup_table = (void * )__get_free_pages (
671- GFP_KERNEL | __GFP_ZERO ,
672- get_order (pci_seg -> rlookup_table_size ));
671+ pci_seg -> rlookup_table = iommu_alloc_pages (GFP_KERNEL ,
672+ get_order (pci_seg -> rlookup_table_size ));
673673 if (pci_seg -> rlookup_table == NULL )
674674 return - ENOMEM ;
675675
@@ -678,16 +678,15 @@ static inline int __init alloc_rlookup_table(struct amd_iommu_pci_seg *pci_seg)
678678
679679static inline void free_rlookup_table (struct amd_iommu_pci_seg * pci_seg )
680680{
681- free_pages (( unsigned long ) pci_seg -> rlookup_table ,
682- get_order (pci_seg -> rlookup_table_size ));
681+ iommu_free_pages ( pci_seg -> rlookup_table ,
682+ get_order (pci_seg -> rlookup_table_size ));
683683 pci_seg -> rlookup_table = NULL ;
684684}
685685
686686static inline int __init alloc_irq_lookup_table (struct amd_iommu_pci_seg * pci_seg )
687687{
688- pci_seg -> irq_lookup_table = (void * )__get_free_pages (
689- GFP_KERNEL | __GFP_ZERO ,
690- get_order (pci_seg -> rlookup_table_size ));
688+ pci_seg -> irq_lookup_table = iommu_alloc_pages (GFP_KERNEL ,
689+ get_order (pci_seg -> rlookup_table_size ));
691690 kmemleak_alloc (pci_seg -> irq_lookup_table ,
692691 pci_seg -> rlookup_table_size , 1 , GFP_KERNEL );
693692 if (pci_seg -> irq_lookup_table == NULL )
@@ -699,17 +698,17 @@ static inline int __init alloc_irq_lookup_table(struct amd_iommu_pci_seg *pci_se
699698static inline void free_irq_lookup_table (struct amd_iommu_pci_seg * pci_seg )
700699{
701700 kmemleak_free (pci_seg -> irq_lookup_table );
702- free_pages (( unsigned long ) pci_seg -> irq_lookup_table ,
703- get_order (pci_seg -> rlookup_table_size ));
701+ iommu_free_pages ( pci_seg -> irq_lookup_table ,
702+ get_order (pci_seg -> rlookup_table_size ));
704703 pci_seg -> irq_lookup_table = NULL ;
705704}
706705
707706static int __init alloc_alias_table (struct amd_iommu_pci_seg * pci_seg )
708707{
709708 int i ;
710709
711- pci_seg -> alias_table = ( void * ) __get_free_pages (GFP_KERNEL ,
712- get_order (pci_seg -> alias_table_size ));
710+ pci_seg -> alias_table = iommu_alloc_pages (GFP_KERNEL ,
711+ get_order (pci_seg -> alias_table_size ));
713712 if (!pci_seg -> alias_table )
714713 return - ENOMEM ;
715714
@@ -724,8 +723,8 @@ static int __init alloc_alias_table(struct amd_iommu_pci_seg *pci_seg)
724723
725724static void __init free_alias_table (struct amd_iommu_pci_seg * pci_seg )
726725{
727- free_pages (( unsigned long ) pci_seg -> alias_table ,
728- get_order (pci_seg -> alias_table_size ));
726+ iommu_free_pages ( pci_seg -> alias_table ,
727+ get_order (pci_seg -> alias_table_size ));
729728 pci_seg -> alias_table = NULL ;
730729}
731730
@@ -736,8 +735,8 @@ static void __init free_alias_table(struct amd_iommu_pci_seg *pci_seg)
736735 */
737736static int __init alloc_command_buffer (struct amd_iommu * iommu )
738737{
739- iommu -> cmd_buf = ( void * ) __get_free_pages ( GFP_KERNEL | __GFP_ZERO ,
740- get_order (CMD_BUFFER_SIZE ));
738+ iommu -> cmd_buf = iommu_alloc_pages ( GFP_KERNEL ,
739+ get_order (CMD_BUFFER_SIZE ));
741740
742741 return iommu -> cmd_buf ? 0 : - ENOMEM ;
743742}
@@ -834,19 +833,19 @@ static void iommu_disable_command_buffer(struct amd_iommu *iommu)
834833
835834static void __init free_command_buffer (struct amd_iommu * iommu )
836835{
837- free_pages (( unsigned long ) iommu -> cmd_buf , get_order (CMD_BUFFER_SIZE ));
836+ iommu_free_pages ( iommu -> cmd_buf , get_order (CMD_BUFFER_SIZE ));
838837}
839838
840839void * __init iommu_alloc_4k_pages (struct amd_iommu * iommu , gfp_t gfp ,
841840 size_t size )
842841{
843842 int order = get_order (size );
844- void * buf = ( void * ) __get_free_pages (gfp , order );
843+ void * buf = iommu_alloc_pages (gfp , order );
845844
846845 if (buf &&
847846 check_feature (FEATURE_SNP ) &&
848847 set_memory_4k ((unsigned long )buf , (1 << order ))) {
849- free_pages (( unsigned long ) buf , order );
848+ iommu_free_pages ( buf , order );
850849 buf = NULL ;
851850 }
852851
@@ -856,7 +855,7 @@ void *__init iommu_alloc_4k_pages(struct amd_iommu *iommu, gfp_t gfp,
856855/* allocates the memory where the IOMMU will log its events to */
857856static int __init alloc_event_buffer (struct amd_iommu * iommu )
858857{
859- iommu -> evt_buf = iommu_alloc_4k_pages (iommu , GFP_KERNEL | __GFP_ZERO ,
858+ iommu -> evt_buf = iommu_alloc_4k_pages (iommu , GFP_KERNEL ,
860859 EVT_BUFFER_SIZE );
861860
862861 return iommu -> evt_buf ? 0 : - ENOMEM ;
@@ -890,14 +889,14 @@ static void iommu_disable_event_buffer(struct amd_iommu *iommu)
890889
891890static void __init free_event_buffer (struct amd_iommu * iommu )
892891{
893- free_pages (( unsigned long ) iommu -> evt_buf , get_order (EVT_BUFFER_SIZE ));
892+ iommu_free_pages ( iommu -> evt_buf , get_order (EVT_BUFFER_SIZE ));
894893}
895894
896895static void free_ga_log (struct amd_iommu * iommu )
897896{
898897#ifdef CONFIG_IRQ_REMAP
899- free_pages (( unsigned long ) iommu -> ga_log , get_order (GA_LOG_SIZE ));
900- free_pages (( unsigned long ) iommu -> ga_log_tail , get_order (8 ));
898+ iommu_free_pages ( iommu -> ga_log , get_order (GA_LOG_SIZE ));
899+ iommu_free_pages ( iommu -> ga_log_tail , get_order (8 ));
901900#endif
902901}
903902
@@ -942,13 +941,11 @@ static int iommu_init_ga_log(struct amd_iommu *iommu)
942941 if (!AMD_IOMMU_GUEST_IR_VAPIC (amd_iommu_guest_ir ))
943942 return 0 ;
944943
945- iommu -> ga_log = (u8 * )__get_free_pages (GFP_KERNEL | __GFP_ZERO ,
946- get_order (GA_LOG_SIZE ));
944+ iommu -> ga_log = iommu_alloc_pages (GFP_KERNEL , get_order (GA_LOG_SIZE ));
947945 if (!iommu -> ga_log )
948946 goto err_out ;
949947
950- iommu -> ga_log_tail = (u8 * )__get_free_pages (GFP_KERNEL | __GFP_ZERO ,
951- get_order (8 ));
948+ iommu -> ga_log_tail = iommu_alloc_pages (GFP_KERNEL , get_order (8 ));
952949 if (!iommu -> ga_log_tail )
953950 goto err_out ;
954951
@@ -961,15 +958,15 @@ static int iommu_init_ga_log(struct amd_iommu *iommu)
961958
962959static int __init alloc_cwwb_sem (struct amd_iommu * iommu )
963960{
964- iommu -> cmd_sem = iommu_alloc_4k_pages (iommu , GFP_KERNEL | __GFP_ZERO , 1 );
961+ iommu -> cmd_sem = iommu_alloc_4k_pages (iommu , GFP_KERNEL , 1 );
965962
966963 return iommu -> cmd_sem ? 0 : - ENOMEM ;
967964}
968965
969966static void __init free_cwwb_sem (struct amd_iommu * iommu )
970967{
971968 if (iommu -> cmd_sem )
972- free_page (( unsigned long )iommu -> cmd_sem );
969+ iommu_free_page (( void * )iommu -> cmd_sem );
973970}
974971
975972static void iommu_enable_xt (struct amd_iommu * iommu )
@@ -1034,7 +1031,6 @@ static bool __copy_device_table(struct amd_iommu *iommu)
10341031 u32 lo , hi , devid , old_devtb_size ;
10351032 phys_addr_t old_devtb_phys ;
10361033 u16 dom_id , dte_v , irq_v ;
1037- gfp_t gfp_flag ;
10381034 u64 tmp ;
10391035
10401036 /* Each IOMMU use separate device table with the same size */
@@ -1068,9 +1064,8 @@ static bool __copy_device_table(struct amd_iommu *iommu)
10681064 if (!old_devtb )
10691065 return false;
10701066
1071- gfp_flag = GFP_KERNEL | __GFP_ZERO | GFP_DMA32 ;
1072- pci_seg -> old_dev_tbl_cpy = (void * )__get_free_pages (gfp_flag ,
1073- get_order (pci_seg -> dev_table_size ));
1067+ pci_seg -> old_dev_tbl_cpy = iommu_alloc_pages (GFP_KERNEL | GFP_DMA32 ,
1068+ get_order (pci_seg -> dev_table_size ));
10741069 if (pci_seg -> old_dev_tbl_cpy == NULL ) {
10751070 pr_err ("Failed to allocate memory for copying old device table!\n" );
10761071 memunmap (old_devtb );
@@ -2769,8 +2764,8 @@ static void early_enable_iommus(void)
27692764
27702765 for_each_pci_segment (pci_seg ) {
27712766 if (pci_seg -> old_dev_tbl_cpy != NULL ) {
2772- free_pages (( unsigned long ) pci_seg -> old_dev_tbl_cpy ,
2773- get_order (pci_seg -> dev_table_size ));
2767+ iommu_free_pages ( pci_seg -> old_dev_tbl_cpy ,
2768+ get_order (pci_seg -> dev_table_size ));
27742769 pci_seg -> old_dev_tbl_cpy = NULL ;
27752770 }
27762771 }
@@ -2783,8 +2778,8 @@ static void early_enable_iommus(void)
27832778 pr_info ("Copied DEV table from previous kernel.\n" );
27842779
27852780 for_each_pci_segment (pci_seg ) {
2786- free_pages (( unsigned long ) pci_seg -> dev_table ,
2787- get_order (pci_seg -> dev_table_size ));
2781+ iommu_free_pages ( pci_seg -> dev_table ,
2782+ get_order (pci_seg -> dev_table_size ));
27882783 pci_seg -> dev_table = pci_seg -> old_dev_tbl_cpy ;
27892784 }
27902785
@@ -2989,8 +2984,8 @@ static bool __init check_ioapic_information(void)
29892984
29902985static void __init free_dma_resources (void )
29912986{
2992- free_pages (( unsigned long ) amd_iommu_pd_alloc_bitmap ,
2993- get_order (MAX_DOMAIN_ID / 8 ));
2987+ iommu_free_pages ( amd_iommu_pd_alloc_bitmap ,
2988+ get_order (MAX_DOMAIN_ID / 8 ));
29942989 amd_iommu_pd_alloc_bitmap = NULL ;
29952990
29962991 free_unity_maps ();
@@ -3062,9 +3057,8 @@ static int __init early_amd_iommu_init(void)
30623057 /* Device table - directly used by all IOMMUs */
30633058 ret = - ENOMEM ;
30643059
3065- amd_iommu_pd_alloc_bitmap = (void * )__get_free_pages (
3066- GFP_KERNEL | __GFP_ZERO ,
3067- get_order (MAX_DOMAIN_ID /8 ));
3060+ amd_iommu_pd_alloc_bitmap = iommu_alloc_pages (GFP_KERNEL ,
3061+ get_order (MAX_DOMAIN_ID / 8 ));
30683062 if (amd_iommu_pd_alloc_bitmap == NULL )
30693063 goto out ;
30703064
0 commit comments