Skip to content

Commit c164fbb

Browse files
lsgunthtorvalds
authored andcommitted
x86/mm: thread pgprot_t through init_memory_mapping()
In preparation to support a pgprot_t argument for arch_add_memory(). It's required to move the prototype of init_memory_mapping() seeing the original location came before the definition of pgprot_t. Signed-off-by: Logan Gunthorpe <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Reviewed-by: Dan Williams <[email protected]> Acked-by: Michal Hocko <[email protected]> Cc: Thomas Gleixner <[email protected]> Cc: Ingo Molnar <[email protected]> Cc: Borislav Petkov <[email protected]> Cc: "H. Peter Anvin" <[email protected]> Cc: Dave Hansen <[email protected]> Cc: Andy Lutomirski <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Benjamin Herrenschmidt <[email protected]> Cc: Catalin Marinas <[email protected]> Cc: Christoph Hellwig <[email protected]> Cc: David Hildenbrand <[email protected]> Cc: Eric Badger <[email protected]> Cc: Jason Gunthorpe <[email protected]> Cc: Michael Ellerman <[email protected]> Cc: Paul Mackerras <[email protected]> Cc: Will Deacon <[email protected]> Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Linus Torvalds <[email protected]>
1 parent f5637d3 commit c164fbb

File tree

8 files changed

+34
-25
lines changed

8 files changed

+34
-25
lines changed

arch/x86/include/asm/page_types.h

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -71,9 +71,6 @@ static inline phys_addr_t get_max_mapped(void)
7171

7272
bool pfn_range_is_mapped(unsigned long start_pfn, unsigned long end_pfn);
7373

74-
extern unsigned long init_memory_mapping(unsigned long start,
75-
unsigned long end);
76-
7774
extern void initmem_init(void);
7875

7976
#endif /* !__ASSEMBLY__ */

arch/x86/include/asm/pgtable.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1081,6 +1081,9 @@ static inline void __meminit init_trampoline_default(void)
10811081

10821082
void __init poking_init(void);
10831083

1084+
unsigned long init_memory_mapping(unsigned long start,
1085+
unsigned long end, pgprot_t prot);
1086+
10841087
# ifdef CONFIG_RANDOMIZE_MEMORY
10851088
void __meminit init_trampoline(void);
10861089
# else

arch/x86/kernel/amd_gart_64.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -744,7 +744,8 @@ int __init gart_iommu_init(void)
744744

745745
start_pfn = PFN_DOWN(aper_base);
746746
if (!pfn_range_is_mapped(start_pfn, end_pfn))
747-
init_memory_mapping(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT);
747+
init_memory_mapping(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT,
748+
PAGE_KERNEL);
748749

749750
pr_info("PCI-DMA: using GART IOMMU.\n");
750751
iommu_size = check_iommu_size(info.aper_base, aper_size);

arch/x86/mm/init.c

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -467,7 +467,7 @@ bool pfn_range_is_mapped(unsigned long start_pfn, unsigned long end_pfn)
467467
* the physical memory. To access them they are temporarily mapped.
468468
*/
469469
unsigned long __ref init_memory_mapping(unsigned long start,
470-
unsigned long end)
470+
unsigned long end, pgprot_t prot)
471471
{
472472
struct map_range mr[NR_RANGE_MR];
473473
unsigned long ret = 0;
@@ -481,7 +481,8 @@ unsigned long __ref init_memory_mapping(unsigned long start,
481481

482482
for (i = 0; i < nr_range; i++)
483483
ret = kernel_physical_mapping_init(mr[i].start, mr[i].end,
484-
mr[i].page_size_mask);
484+
mr[i].page_size_mask,
485+
prot);
485486

486487
add_pfn_range_mapped(start >> PAGE_SHIFT, ret >> PAGE_SHIFT);
487488

@@ -521,7 +522,7 @@ static unsigned long __init init_range_memory_mapping(
521522
*/
522523
can_use_brk_pgt = max(start, (u64)pgt_buf_end<<PAGE_SHIFT) >=
523524
min(end, (u64)pgt_buf_top<<PAGE_SHIFT);
524-
init_memory_mapping(start, end);
525+
init_memory_mapping(start, end, PAGE_KERNEL);
525526
mapped_ram_size += end - start;
526527
can_use_brk_pgt = true;
527528
}
@@ -661,7 +662,7 @@ void __init init_mem_mapping(void)
661662
#endif
662663

663664
/* the ISA range is always mapped regardless of memory holes */
664-
init_memory_mapping(0, ISA_END_ADDRESS);
665+
init_memory_mapping(0, ISA_END_ADDRESS, PAGE_KERNEL);
665666

666667
/* Init the trampoline, possibly with KASLR memory offset */
667668
init_trampoline();

arch/x86/mm/init_32.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -257,7 +257,8 @@ static inline int __is_kernel_text(unsigned long addr)
257257
unsigned long __init
258258
kernel_physical_mapping_init(unsigned long start,
259259
unsigned long end,
260-
unsigned long page_size_mask)
260+
unsigned long page_size_mask,
261+
pgprot_t prot)
261262
{
262263
int use_pse = page_size_mask == (1<<PG_LEVEL_2M);
263264
unsigned long last_map_addr = end;

arch/x86/mm/init_64.c

Lines changed: 18 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -585,7 +585,7 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long paddr, unsigned long paddr_end,
585585
*/
586586
static unsigned long __meminit
587587
phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end,
588-
unsigned long page_size_mask, bool init)
588+
unsigned long page_size_mask, pgprot_t _prot, bool init)
589589
{
590590
unsigned long pages = 0, paddr_next;
591591
unsigned long paddr_last = paddr_end;
@@ -595,7 +595,7 @@ phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end,
595595
for (; i < PTRS_PER_PUD; i++, paddr = paddr_next) {
596596
pud_t *pud;
597597
pmd_t *pmd;
598-
pgprot_t prot = PAGE_KERNEL;
598+
pgprot_t prot = _prot;
599599

600600
vaddr = (unsigned long)__va(paddr);
601601
pud = pud_page + pud_index(vaddr);
@@ -644,9 +644,12 @@ phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end,
644644
if (page_size_mask & (1<<PG_LEVEL_1G)) {
645645
pages++;
646646
spin_lock(&init_mm.page_table_lock);
647+
648+
prot = __pgprot(pgprot_val(prot) | __PAGE_KERNEL_LARGE);
649+
647650
set_pte_init((pte_t *)pud,
648651
pfn_pte((paddr & PUD_MASK) >> PAGE_SHIFT,
649-
PAGE_KERNEL_LARGE),
652+
prot),
650653
init);
651654
spin_unlock(&init_mm.page_table_lock);
652655
paddr_last = paddr_next;
@@ -669,7 +672,7 @@ phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end,
669672

670673
static unsigned long __meminit
671674
phys_p4d_init(p4d_t *p4d_page, unsigned long paddr, unsigned long paddr_end,
672-
unsigned long page_size_mask, bool init)
675+
unsigned long page_size_mask, pgprot_t prot, bool init)
673676
{
674677
unsigned long vaddr, vaddr_end, vaddr_next, paddr_next, paddr_last;
675678

@@ -679,7 +682,7 @@ phys_p4d_init(p4d_t *p4d_page, unsigned long paddr, unsigned long paddr_end,
679682

680683
if (!pgtable_l5_enabled())
681684
return phys_pud_init((pud_t *) p4d_page, paddr, paddr_end,
682-
page_size_mask, init);
685+
page_size_mask, prot, init);
683686

684687
for (; vaddr < vaddr_end; vaddr = vaddr_next) {
685688
p4d_t *p4d = p4d_page + p4d_index(vaddr);
@@ -702,13 +705,13 @@ phys_p4d_init(p4d_t *p4d_page, unsigned long paddr, unsigned long paddr_end,
702705
if (!p4d_none(*p4d)) {
703706
pud = pud_offset(p4d, 0);
704707
paddr_last = phys_pud_init(pud, paddr, __pa(vaddr_end),
705-
page_size_mask, init);
708+
page_size_mask, prot, init);
706709
continue;
707710
}
708711

709712
pud = alloc_low_page();
710713
paddr_last = phys_pud_init(pud, paddr, __pa(vaddr_end),
711-
page_size_mask, init);
714+
page_size_mask, prot, init);
712715

713716
spin_lock(&init_mm.page_table_lock);
714717
p4d_populate_init(&init_mm, p4d, pud, init);
@@ -722,7 +725,7 @@ static unsigned long __meminit
722725
__kernel_physical_mapping_init(unsigned long paddr_start,
723726
unsigned long paddr_end,
724727
unsigned long page_size_mask,
725-
bool init)
728+
pgprot_t prot, bool init)
726729
{
727730
bool pgd_changed = false;
728731
unsigned long vaddr, vaddr_start, vaddr_end, vaddr_next, paddr_last;
@@ -743,13 +746,13 @@ __kernel_physical_mapping_init(unsigned long paddr_start,
743746
paddr_last = phys_p4d_init(p4d, __pa(vaddr),
744747
__pa(vaddr_end),
745748
page_size_mask,
746-
init);
749+
prot, init);
747750
continue;
748751
}
749752

750753
p4d = alloc_low_page();
751754
paddr_last = phys_p4d_init(p4d, __pa(vaddr), __pa(vaddr_end),
752-
page_size_mask, init);
755+
page_size_mask, prot, init);
753756

754757
spin_lock(&init_mm.page_table_lock);
755758
if (pgtable_l5_enabled())
@@ -778,10 +781,10 @@ __kernel_physical_mapping_init(unsigned long paddr_start,
778781
unsigned long __meminit
779782
kernel_physical_mapping_init(unsigned long paddr_start,
780783
unsigned long paddr_end,
781-
unsigned long page_size_mask)
784+
unsigned long page_size_mask, pgprot_t prot)
782785
{
783786
return __kernel_physical_mapping_init(paddr_start, paddr_end,
784-
page_size_mask, true);
787+
page_size_mask, prot, true);
785788
}
786789

787790
/*
@@ -796,7 +799,8 @@ kernel_physical_mapping_change(unsigned long paddr_start,
796799
unsigned long page_size_mask)
797800
{
798801
return __kernel_physical_mapping_init(paddr_start, paddr_end,
799-
page_size_mask, false);
802+
page_size_mask, PAGE_KERNEL,
803+
false);
800804
}
801805

802806
#ifndef CONFIG_NUMA
@@ -863,7 +867,7 @@ int arch_add_memory(int nid, u64 start, u64 size,
863867
unsigned long start_pfn = start >> PAGE_SHIFT;
864868
unsigned long nr_pages = size >> PAGE_SHIFT;
865869

866-
init_memory_mapping(start, start + size);
870+
init_memory_mapping(start, start + size, PAGE_KERNEL);
867871

868872
return add_pages(nid, start_pfn, nr_pages, params);
869873
}

arch/x86/mm/mm_internal.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,8 @@ void early_ioremap_page_table_range_init(void);
1212

1313
unsigned long kernel_physical_mapping_init(unsigned long start,
1414
unsigned long end,
15-
unsigned long page_size_mask);
15+
unsigned long page_size_mask,
16+
pgprot_t prot);
1617
unsigned long kernel_physical_mapping_change(unsigned long start,
1718
unsigned long end,
1819
unsigned long page_size_mask);

arch/x86/platform/uv/bios_uv.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -352,7 +352,8 @@ void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size,
352352
if (type == EFI_MEMORY_MAPPED_IO)
353353
return ioremap(phys_addr, size);
354354

355-
last_map_pfn = init_memory_mapping(phys_addr, phys_addr + size);
355+
last_map_pfn = init_memory_mapping(phys_addr, phys_addr + size,
356+
PAGE_KERNEL);
356357
if ((last_map_pfn << PAGE_SHIFT) < phys_addr + size) {
357358
unsigned long top = last_map_pfn << PAGE_SHIFT;
358359
efi_ioremap(top, size - (top - phys_addr), type, attribute);

0 commit comments

Comments
 (0)