Skip to content

Commit 7900757

Browse files
kvaneeshmpe
authored andcommitted
powerpc/hash64: Restrict page table lookup using init_mm with __flush_hash_table_range
This is only used with init_mm currently. Walking init_mm is much simpler because we don't need to handle concurrent page table like other mm_context Signed-off-by: Aneesh Kumar K.V <[email protected]> Signed-off-by: Michael Ellerman <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent ec4abf1 commit 7900757

File tree

3 files changed

+5
-16
lines changed

3 files changed

+5
-16
lines changed

arch/powerpc/include/asm/book3s/64/tlbflush-hash.h

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -113,8 +113,7 @@ static inline void hash__flush_tlb_kernel_range(unsigned long start,
113113
struct mmu_gather;
114114
extern void hash__tlb_flush(struct mmu_gather *tlb);
115115
/* Private function for use by PCI IO mapping code */
116-
extern void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
117-
unsigned long end);
116+
extern void __flush_hash_table_range(unsigned long start, unsigned long end);
118117
extern void flush_tlb_pmd_range(struct mm_struct *mm, pmd_t *pmd,
119118
unsigned long addr);
120119
#endif /* _ASM_POWERPC_BOOK3S_64_TLBFLUSH_HASH_H */

arch/powerpc/kernel/pci_64.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -100,7 +100,7 @@ int pcibios_unmap_io_space(struct pci_bus *bus)
100100
pci_name(bus->self));
101101

102102
#ifdef CONFIG_PPC_BOOK3S_64
103-
__flush_hash_table_range(&init_mm, res->start + _IO_BASE,
103+
__flush_hash_table_range(res->start + _IO_BASE,
104104
res->end + _IO_BASE + 1);
105105
#endif
106106
return 0;

arch/powerpc/mm/book3s64/hash_tlb.c

Lines changed: 3 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -176,7 +176,6 @@ void hash__tlb_flush(struct mmu_gather *tlb)
176176
* from the hash table (and the TLB). But keeps
177177
* the linux PTEs intact.
178178
*
179-
* @mm : mm_struct of the target address space (generally init_mm)
180179
* @start : starting address
181180
* @end : ending address (not included in the flush)
182181
*
@@ -189,17 +188,14 @@ void hash__tlb_flush(struct mmu_gather *tlb)
189188
* Because of that usage pattern, it is implemented for small size rather
190189
* than speed.
191190
*/
192-
void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
193-
unsigned long end)
191+
void __flush_hash_table_range(unsigned long start, unsigned long end)
194192
{
195-
bool is_thp;
196193
int hugepage_shift;
197194
unsigned long flags;
198195

199196
start = _ALIGN_DOWN(start, PAGE_SIZE);
200197
end = _ALIGN_UP(end, PAGE_SIZE);
201198

202-
BUG_ON(!mm->pgd);
203199

204200
/*
205201
* Note: Normally, we should only ever use a batch within a
@@ -212,21 +208,15 @@ void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
212208
local_irq_save(flags);
213209
arch_enter_lazy_mmu_mode();
214210
for (; start < end; start += PAGE_SIZE) {
215-
pte_t *ptep = find_current_mm_pte(mm->pgd, start, &is_thp,
216-
&hugepage_shift);
211+
pte_t *ptep = find_init_mm_pte(start, &hugepage_shift);
217212
unsigned long pte;
218213

219214
if (ptep == NULL)
220215
continue;
221216
pte = pte_val(*ptep);
222-
if (is_thp)
223-
trace_hugepage_invalidate(start, pte);
224217
if (!(pte & H_PAGE_HASHPTE))
225218
continue;
226-
if (unlikely(is_thp))
227-
hpte_do_hugepage_flush(mm, start, (pmd_t *)ptep, pte);
228-
else
229-
hpte_need_flush(mm, start, ptep, pte, hugepage_shift);
219+
hpte_need_flush(&init_mm, start, ptep, pte, hugepage_shift);
230220
}
231221
arch_leave_lazy_mmu_mode();
232222
local_irq_restore(flags);

0 commit comments

Comments
 (0)