Skip to content

Commit 8232822

Browse files
soleenhansendc
authored andcommitted
x86/mm: Remove P*D_PAGE_MASK and P*D_PAGE_SIZE macros
Other architectures and the common mm/ use P*D_MASK, and P*D_SIZE. Remove the duplicated P*D_PAGE_MASK and P*D_PAGE_SIZE which are only used in x86/*. Signed-off-by: Pasha Tatashin <[email protected]> Signed-off-by: Borislav Petkov <[email protected]> Reviewed-by: Anshuman Khandual <[email protected]> Acked-by: Mike Rapoport <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent 2dff2c3 commit 8232822

File tree

7 files changed

+20
-26
lines changed

7 files changed

+20
-26
lines changed

arch/x86/include/asm/page_types.h

Lines changed: 3 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -11,20 +11,14 @@
1111
#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
1212
#define PAGE_MASK (~(PAGE_SIZE-1))
1313

14-
#define PMD_PAGE_SIZE (_AC(1, UL) << PMD_SHIFT)
15-
#define PMD_PAGE_MASK (~(PMD_PAGE_SIZE-1))
16-
17-
#define PUD_PAGE_SIZE (_AC(1, UL) << PUD_SHIFT)
18-
#define PUD_PAGE_MASK (~(PUD_PAGE_SIZE-1))
19-
2014
#define __VIRTUAL_MASK ((1UL << __VIRTUAL_MASK_SHIFT) - 1)
2115

22-
/* Cast *PAGE_MASK to a signed type so that it is sign-extended if
16+
/* Cast P*D_MASK to a signed type so that it is sign-extended if
2317
virtual addresses are 32-bits but physical addresses are larger
2418
(ie, 32-bit PAE). */
2519
#define PHYSICAL_PAGE_MASK (((signed long)PAGE_MASK) & __PHYSICAL_MASK)
26-
#define PHYSICAL_PMD_PAGE_MASK (((signed long)PMD_PAGE_MASK) & __PHYSICAL_MASK)
27-
#define PHYSICAL_PUD_PAGE_MASK (((signed long)PUD_PAGE_MASK) & __PHYSICAL_MASK)
20+
#define PHYSICAL_PMD_PAGE_MASK (((signed long)PMD_MASK) & __PHYSICAL_MASK)
21+
#define PHYSICAL_PUD_PAGE_MASK (((signed long)PUD_MASK) & __PHYSICAL_MASK)
2822

2923
#define HPAGE_SHIFT PMD_SHIFT
3024
#define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT)

arch/x86/kernel/amd_gart_64.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -504,7 +504,7 @@ static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size)
504504
}
505505

506506
a = aper + iommu_size;
507-
iommu_size -= round_up(a, PMD_PAGE_SIZE) - a;
507+
iommu_size -= round_up(a, PMD_SIZE) - a;
508508

509509
if (iommu_size < 64*1024*1024) {
510510
pr_warn("PCI-DMA: Warning: Small IOMMU %luMB."

arch/x86/kernel/head64.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -203,7 +203,7 @@ unsigned long __head __startup_64(unsigned long physaddr,
203203
load_delta = physaddr - (unsigned long)(_text - __START_KERNEL_map);
204204

205205
/* Is the address not 2M aligned? */
206-
if (load_delta & ~PMD_PAGE_MASK)
206+
if (load_delta & ~PMD_MASK)
207207
for (;;);
208208

209209
/* Include the SME encryption mask in the fixup value */

arch/x86/mm/mem_encrypt_boot.S

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ SYM_FUNC_START(sme_encrypt_execute)
2626
* RCX - virtual address of the encryption workarea, including:
2727
* - stack page (PAGE_SIZE)
2828
* - encryption routine page (PAGE_SIZE)
29-
* - intermediate copy buffer (PMD_PAGE_SIZE)
29+
* - intermediate copy buffer (PMD_SIZE)
3030
* R8 - physical address of the pagetables to use for encryption
3131
*/
3232

@@ -123,7 +123,7 @@ SYM_FUNC_START(__enc_copy)
123123
wbinvd /* Invalidate any cache entries */
124124

125125
/* Copy/encrypt up to 2MB at a time */
126-
movq $PMD_PAGE_SIZE, %r12
126+
movq $PMD_SIZE, %r12
127127
1:
128128
cmpq %r12, %r9
129129
jnb 2f

arch/x86/mm/mem_encrypt_identity.c

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -93,7 +93,7 @@ struct sme_populate_pgd_data {
9393
* section is 2MB aligned to allow for simple pagetable setup using only
9494
* PMD entries (see vmlinux.lds.S).
9595
*/
96-
static char sme_workarea[2 * PMD_PAGE_SIZE] __section(".init.scratch");
96+
static char sme_workarea[2 * PMD_SIZE] __section(".init.scratch");
9797

9898
static char sme_cmdline_arg[] __initdata = "mem_encrypt";
9999
static char sme_cmdline_on[] __initdata = "on";
@@ -198,8 +198,8 @@ static void __init __sme_map_range_pmd(struct sme_populate_pgd_data *ppd)
198198
while (ppd->vaddr < ppd->vaddr_end) {
199199
sme_populate_pgd_large(ppd);
200200

201-
ppd->vaddr += PMD_PAGE_SIZE;
202-
ppd->paddr += PMD_PAGE_SIZE;
201+
ppd->vaddr += PMD_SIZE;
202+
ppd->paddr += PMD_SIZE;
203203
}
204204
}
205205

@@ -225,11 +225,11 @@ static void __init __sme_map_range(struct sme_populate_pgd_data *ppd,
225225
vaddr_end = ppd->vaddr_end;
226226

227227
/* If start is not 2MB aligned, create PTE entries */
228-
ppd->vaddr_end = ALIGN(ppd->vaddr, PMD_PAGE_SIZE);
228+
ppd->vaddr_end = ALIGN(ppd->vaddr, PMD_SIZE);
229229
__sme_map_range_pte(ppd);
230230

231231
/* Create PMD entries */
232-
ppd->vaddr_end = vaddr_end & PMD_PAGE_MASK;
232+
ppd->vaddr_end = vaddr_end & PMD_MASK;
233233
__sme_map_range_pmd(ppd);
234234

235235
/* If end is not 2MB aligned, create PTE entries */
@@ -325,7 +325,7 @@ void __init sme_encrypt_kernel(struct boot_params *bp)
325325

326326
/* Physical addresses gives us the identity mapped virtual addresses */
327327
kernel_start = __pa_symbol(_text);
328-
kernel_end = ALIGN(__pa_symbol(_end), PMD_PAGE_SIZE);
328+
kernel_end = ALIGN(__pa_symbol(_end), PMD_SIZE);
329329
kernel_len = kernel_end - kernel_start;
330330

331331
initrd_start = 0;
@@ -355,12 +355,12 @@ void __init sme_encrypt_kernel(struct boot_params *bp)
355355
* executable encryption area size:
356356
* stack page (PAGE_SIZE)
357357
* encryption routine page (PAGE_SIZE)
358-
* intermediate copy buffer (PMD_PAGE_SIZE)
358+
* intermediate copy buffer (PMD_SIZE)
359359
* pagetable structures for the encryption of the kernel
360360
* pagetable structures for workarea (in case not currently mapped)
361361
*/
362362
execute_start = workarea_start;
363-
execute_end = execute_start + (PAGE_SIZE * 2) + PMD_PAGE_SIZE;
363+
execute_end = execute_start + (PAGE_SIZE * 2) + PMD_SIZE;
364364
execute_len = execute_end - execute_start;
365365

366366
/*
@@ -383,7 +383,7 @@ void __init sme_encrypt_kernel(struct boot_params *bp)
383383
* before it is mapped.
384384
*/
385385
workarea_len = execute_len + pgtable_area_len;
386-
workarea_end = ALIGN(workarea_start + workarea_len, PMD_PAGE_SIZE);
386+
workarea_end = ALIGN(workarea_start + workarea_len, PMD_SIZE);
387387

388388
/*
389389
* Set the address to the start of where newly created pagetable

arch/x86/mm/pat/set_memory.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -743,11 +743,11 @@ phys_addr_t slow_virt_to_phys(void *__virt_addr)
743743
switch (level) {
744744
case PG_LEVEL_1G:
745745
phys_addr = (phys_addr_t)pud_pfn(*(pud_t *)pte) << PAGE_SHIFT;
746-
offset = virt_addr & ~PUD_PAGE_MASK;
746+
offset = virt_addr & ~PUD_MASK;
747747
break;
748748
case PG_LEVEL_2M:
749749
phys_addr = (phys_addr_t)pmd_pfn(*(pmd_t *)pte) << PAGE_SHIFT;
750-
offset = virt_addr & ~PMD_PAGE_MASK;
750+
offset = virt_addr & ~PMD_MASK;
751751
break;
752752
default:
753753
phys_addr = (phys_addr_t)pte_pfn(*pte) << PAGE_SHIFT;
@@ -1037,7 +1037,7 @@ __split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address,
10371037
case PG_LEVEL_1G:
10381038
ref_prot = pud_pgprot(*(pud_t *)kpte);
10391039
ref_pfn = pud_pfn(*(pud_t *)kpte);
1040-
pfninc = PMD_PAGE_SIZE >> PAGE_SHIFT;
1040+
pfninc = PMD_SIZE >> PAGE_SHIFT;
10411041
lpaddr = address & PUD_MASK;
10421042
lpinc = PMD_SIZE;
10431043
/*

arch/x86/mm/pti.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -592,7 +592,7 @@ static void pti_set_kernel_image_nonglobal(void)
592592
* of the image.
593593
*/
594594
unsigned long start = PFN_ALIGN(_text);
595-
unsigned long end = ALIGN((unsigned long)_end, PMD_PAGE_SIZE);
595+
unsigned long end = ALIGN((unsigned long)_end, PMD_SIZE);
596596

597597
/*
598598
* This clears _PAGE_GLOBAL from the entire kernel image.

0 commit comments

Comments
 (0)