Skip to content

Commit 09a4a03

Browse files
committed
Merge tag 'powerpc-6.6-6' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux
Pull powerpc fixes from Michael Ellerman: - Fix boot crash with FLATMEM since set_ptes() introduction - Avoid calling arch_enter/leave_lazy_mmu() in set_ptes() Thanks to Aneesh Kumar K.V and Erhard Furtner. * tag 'powerpc-6.6-6' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux: powerpc/mm: Avoid calling arch_enter/leave_lazy_mmu() in set_ptes powerpc/mm: Fix boot crash with FLATMEM
2 parents 750b958 + 47b8def commit 09a4a03

File tree

3 files changed

+24
-11
lines changed

3 files changed

+24
-11
lines changed

arch/powerpc/kernel/setup-common.c

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -948,6 +948,8 @@ void __init setup_arch(char **cmdline_p)
948948

949949
/* Parse memory topology */
950950
mem_topology_setup();
951+
/* Set max_mapnr before paging_init() */
952+
set_max_mapnr(max_pfn);
951953

952954
/*
953955
* Release secondary cpus out of their spinloops at 0x60 now that

arch/powerpc/mm/mem.c

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -288,7 +288,6 @@ void __init mem_init(void)
288288
#endif
289289

290290
high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
291-
set_max_mapnr(max_pfn);
292291

293292
kasan_late_init();
294293

arch/powerpc/mm/pgtable.c

Lines changed: 22 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -104,6 +104,8 @@ static pte_t set_pte_filter_hash(pte_t pte) { return pte; }
104104
/* Embedded type MMU with HW exec support. This is a bit more complicated
105105
* as we don't have two bits to spare for _PAGE_EXEC and _PAGE_HWEXEC so
106106
* instead we "filter out" the exec permission for non clean pages.
107+
*
108+
* This is also called once for the folio. So only work with folio->flags here.
107109
*/
108110
static inline pte_t set_pte_filter(pte_t pte)
109111
{
@@ -190,29 +192,39 @@ static pte_t set_access_flags_filter(pte_t pte, struct vm_area_struct *vma,
190192
void set_ptes(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
191193
pte_t pte, unsigned int nr)
192194
{
193-
/*
194-
* Make sure hardware valid bit is not set. We don't do
195-
* tlb flush for this update.
196-
*/
197-
VM_WARN_ON(pte_hw_valid(*ptep) && !pte_protnone(*ptep));
198195

199196
/* Note: mm->context.id might not yet have been assigned as
200197
* this context might not have been activated yet when this
201-
* is called.
198+
* is called. Filter the pte value and use the filtered value
199+
* to setup all the ptes in the range.
202200
*/
203201
pte = set_pte_filter(pte);
204202

205-
/* Perform the setting of the PTE */
206-
arch_enter_lazy_mmu_mode();
203+
/*
204+
* We don't need to call arch_enter/leave_lazy_mmu_mode()
205+
* because we expect set_ptes to be only be used on not present
206+
* and not hw_valid ptes. Hence there is no translation cache flush
207+
* involved that need to be batched.
208+
*/
207209
for (;;) {
210+
211+
/*
212+
* Make sure hardware valid bit is not set. We don't do
213+
* tlb flush for this update.
214+
*/
215+
VM_WARN_ON(pte_hw_valid(*ptep) && !pte_protnone(*ptep));
216+
217+
/* Perform the setting of the PTE */
208218
__set_pte_at(mm, addr, ptep, pte, 0);
209219
if (--nr == 0)
210220
break;
211221
ptep++;
212-
pte = __pte(pte_val(pte) + (1UL << PTE_RPN_SHIFT));
213222
addr += PAGE_SIZE;
223+
/*
224+
* increment the pfn.
225+
*/
226+
pte = pfn_pte(pte_pfn(pte) + 1, pte_pgprot((pte)));
214227
}
215-
arch_leave_lazy_mmu_mode();
216228
}
217229

218230
void unmap_kernel_page(unsigned long va)

0 commit comments

Comments
 (0)