Skip to content

Commit 837c07c

Browse files
committed
Merge tag 'powerpc-6.2-4' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux
Pull powerpc fixes from Michael Ellerman: "It's a bit of a big batch for rc6, but just because I didn't send any fixes the last week or two while I was on vacation, next week should be quieter: - Fix a few objtool warnings since we recently enabled objtool. - Fix a deadlock with the hash MMU vs perf record. - Fix perf profiling of asynchronous interrupt handlers. - Revert the IMC PMU nest_init_lock to being a mutex. - Two commits fixing problems with the kexec_file FDT size estimation. - Two commits fixing problems with strict RWX vs kernels running at non-zero. - Reconnect tlb_flush() to hash__tlb_flush() Thanks to Kajol Jain, Nicholas Piggin, Sachin Sant Sathvika Vasireddy, and Sourabh Jain" * tag 'powerpc-6.2-4' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux: powerpc/64s: Reconnect tlb_flush() to hash__tlb_flush() powerpc/kexec_file: Count hot-pluggable memory in FDT estimate powerpc/64s/radix: Fix RWX mapping with relocated kernel powerpc/64s/radix: Fix crash with unaligned relocated kernel powerpc/kexec_file: Fix division by zero in extra size estimation powerpc/imc-pmu: Revert nest_init_lock to being a mutex powerpc/64: Fix perf profiling asynchronous interrupt handlers powerpc/64s: Fix local irq disable when PMIs are disabled powerpc/kvm: Fix unannotated intra-function call warning powerpc/85xx: Fix unannotated intra-function call warning
2 parents 9507806 + 1665c02 commit 837c07c

File tree

10 files changed

+77
-31
lines changed

10 files changed

+77
-31
lines changed

arch/powerpc/include/asm/book3s/64/tlbflush.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -97,6 +97,8 @@ static inline void tlb_flush(struct mmu_gather *tlb)
9797
{
9898
if (radix_enabled())
9999
radix__tlb_flush(tlb);
100+
101+
return hash__tlb_flush(tlb);
100102
}
101103

102104
#ifdef CONFIG_SMP

arch/powerpc/include/asm/hw_irq.h

Lines changed: 30 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -173,6 +173,15 @@ static inline notrace unsigned long irq_soft_mask_or_return(unsigned long mask)
173173
return flags;
174174
}
175175

176+
static inline notrace unsigned long irq_soft_mask_andc_return(unsigned long mask)
177+
{
178+
unsigned long flags = irq_soft_mask_return();
179+
180+
irq_soft_mask_set(flags & ~mask);
181+
182+
return flags;
183+
}
184+
176185
static inline unsigned long arch_local_save_flags(void)
177186
{
178187
return irq_soft_mask_return();
@@ -192,7 +201,7 @@ static inline void arch_local_irq_enable(void)
192201

193202
static inline unsigned long arch_local_irq_save(void)
194203
{
195-
return irq_soft_mask_set_return(IRQS_DISABLED);
204+
return irq_soft_mask_or_return(IRQS_DISABLED);
196205
}
197206

198207
static inline bool arch_irqs_disabled_flags(unsigned long flags)
@@ -331,10 +340,11 @@ bool power_pmu_wants_prompt_pmi(void);
331340
* is a different soft-masked interrupt pending that requires hard
332341
* masking.
333342
*/
334-
static inline bool should_hard_irq_enable(void)
343+
static inline bool should_hard_irq_enable(struct pt_regs *regs)
335344
{
336345
if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
337-
WARN_ON(irq_soft_mask_return() == IRQS_ENABLED);
346+
WARN_ON(irq_soft_mask_return() != IRQS_ALL_DISABLED);
347+
WARN_ON(!(get_paca()->irq_happened & PACA_IRQ_HARD_DIS));
338348
WARN_ON(mfmsr() & MSR_EE);
339349
}
340350

@@ -347,8 +357,17 @@ static inline bool should_hard_irq_enable(void)
347357
*
348358
* TODO: Add test for 64e
349359
*/
350-
if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && !power_pmu_wants_prompt_pmi())
351-
return false;
360+
if (IS_ENABLED(CONFIG_PPC_BOOK3S_64)) {
361+
if (!power_pmu_wants_prompt_pmi())
362+
return false;
363+
/*
364+
* If PMIs are disabled then IRQs should be disabled as well,
365+
* so we shouldn't see this condition, check for it just in
366+
* case because we are about to enable PMIs.
367+
*/
368+
if (WARN_ON_ONCE(regs->softe & IRQS_PMI_DISABLED))
369+
return false;
370+
}
352371

353372
if (get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK)
354373
return false;
@@ -358,18 +377,16 @@ static inline bool should_hard_irq_enable(void)
358377

359378
/*
360379
* Do the hard enabling, only call this if should_hard_irq_enable is true.
380+
* This allows PMI interrupts to profile irq handlers.
361381
*/
362382
static inline void do_hard_irq_enable(void)
363383
{
364-
if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
365-
WARN_ON(irq_soft_mask_return() == IRQS_ENABLED);
366-
WARN_ON(get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK);
367-
WARN_ON(mfmsr() & MSR_EE);
368-
}
369384
/*
370-
* This allows PMI interrupts (and watchdog soft-NMIs) through.
371-
* There is no other reason to enable this way.
385+
* Asynch interrupts come in with IRQS_ALL_DISABLED,
386+
* PACA_IRQ_HARD_DIS, and MSR[EE]=0.
372387
*/
388+
if (IS_ENABLED(CONFIG_PPC_BOOK3S_64))
389+
irq_soft_mask_andc_return(IRQS_PMI_DISABLED);
373390
get_paca()->irq_happened &= ~PACA_IRQ_HARD_DIS;
374391
__hard_irq_enable();
375392
}
@@ -452,7 +469,7 @@ static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
452469
return !(regs->msr & MSR_EE);
453470
}
454471

455-
static __always_inline bool should_hard_irq_enable(void)
472+
static __always_inline bool should_hard_irq_enable(struct pt_regs *regs)
456473
{
457474
return false;
458475
}

arch/powerpc/kernel/dbell.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@ DEFINE_INTERRUPT_HANDLER_ASYNC(doorbell_exception)
2727

2828
ppc_msgsync();
2929

30-
if (should_hard_irq_enable())
30+
if (should_hard_irq_enable(regs))
3131
do_hard_irq_enable();
3232

3333
kvmppc_clear_host_ipi(smp_processor_id());

arch/powerpc/kernel/head_85xx.S

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -864,7 +864,7 @@ _GLOBAL(load_up_spe)
864864
* SPE unavailable trap from kernel - print a message, but let
865865
* the task use SPE in the kernel until it returns to user mode.
866866
*/
867-
KernelSPE:
867+
SYM_FUNC_START_LOCAL(KernelSPE)
868868
lwz r3,_MSR(r1)
869869
oris r3,r3,MSR_SPE@h
870870
stw r3,_MSR(r1) /* enable use of SPE after return */
@@ -881,6 +881,7 @@ KernelSPE:
881881
#endif
882882
.align 4,0
883883

884+
SYM_FUNC_END(KernelSPE)
884885
#endif /* CONFIG_SPE */
885886

886887
/*

arch/powerpc/kernel/irq.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -238,7 +238,7 @@ static void __do_irq(struct pt_regs *regs, unsigned long oldsp)
238238
irq = static_call(ppc_get_irq)();
239239

240240
/* We can hard enable interrupts now to allow perf interrupts */
241-
if (should_hard_irq_enable())
241+
if (should_hard_irq_enable(regs))
242242
do_hard_irq_enable();
243243

244244
/* And finally process it */

arch/powerpc/kernel/time.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -515,7 +515,7 @@ DEFINE_INTERRUPT_HANDLER_ASYNC(timer_interrupt)
515515
}
516516

517517
/* Conditionally hard-enable interrupts. */
518-
if (should_hard_irq_enable()) {
518+
if (should_hard_irq_enable(regs)) {
519519
/*
520520
* Ensure a positive value is written to the decrementer, or
521521
* else some CPUs will continue to take decrementer exceptions.

arch/powerpc/kexec/file_load_64.c

Lines changed: 7 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -989,10 +989,13 @@ unsigned int kexec_extra_fdt_size_ppc64(struct kimage *image)
989989
* linux,drconf-usable-memory properties. Get an approximate on the
990990
* number of usable memory entries and use for FDT size estimation.
991991
*/
992-
usm_entries = ((memblock_end_of_DRAM() / drmem_lmb_size()) +
993-
(2 * (resource_size(&crashk_res) / drmem_lmb_size())));
994-
995-
extra_size = (unsigned int)(usm_entries * sizeof(u64));
992+
if (drmem_lmb_size()) {
993+
usm_entries = ((memory_hotplug_max() / drmem_lmb_size()) +
994+
(2 * (resource_size(&crashk_res) / drmem_lmb_size())));
995+
extra_size = (unsigned int)(usm_entries * sizeof(u64));
996+
} else {
997+
extra_size = 0;
998+
}
996999

9971000
/*
9981001
* Get the number of CPU nodes in the current DT. This allows to

arch/powerpc/kvm/booke.c

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -912,16 +912,15 @@ static int kvmppc_handle_debug(struct kvm_vcpu *vcpu)
912912

913913
static void kvmppc_fill_pt_regs(struct pt_regs *regs)
914914
{
915-
ulong r1, ip, msr, lr;
915+
ulong r1, msr, lr;
916916

917917
asm("mr %0, 1" : "=r"(r1));
918918
asm("mflr %0" : "=r"(lr));
919919
asm("mfmsr %0" : "=r"(msr));
920-
asm("bl 1f; 1: mflr %0" : "=r"(ip));
921920

922921
memset(regs, 0, sizeof(*regs));
923922
regs->gpr[1] = r1;
924-
regs->nip = ip;
923+
regs->nip = _THIS_IP_;
925924
regs->msr = msr;
926925
regs->link = lr;
927926
}

arch/powerpc/mm/book3s64/radix_pgtable.c

Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -234,6 +234,14 @@ void radix__mark_rodata_ro(void)
234234
end = (unsigned long)__end_rodata;
235235

236236
radix__change_memory_range(start, end, _PAGE_WRITE);
237+
238+
for (start = PAGE_OFFSET; start < (unsigned long)_stext; start += PAGE_SIZE) {
239+
end = start + PAGE_SIZE;
240+
if (overlaps_interrupt_vector_text(start, end))
241+
radix__change_memory_range(start, end, _PAGE_WRITE);
242+
else
243+
break;
244+
}
237245
}
238246

239247
void radix__mark_initmem_nx(void)
@@ -262,6 +270,22 @@ print_mapping(unsigned long start, unsigned long end, unsigned long size, bool e
262270
static unsigned long next_boundary(unsigned long addr, unsigned long end)
263271
{
264272
#ifdef CONFIG_STRICT_KERNEL_RWX
273+
unsigned long stext_phys;
274+
275+
stext_phys = __pa_symbol(_stext);
276+
277+
// Relocatable kernel running at non-zero real address
278+
if (stext_phys != 0) {
279+
// The end of interrupts code at zero is a rodata boundary
280+
unsigned long end_intr = __pa_symbol(__end_interrupts) - stext_phys;
281+
if (addr < end_intr)
282+
return end_intr;
283+
284+
// Start of relocated kernel text is a rodata boundary
285+
if (addr < stext_phys)
286+
return stext_phys;
287+
}
288+
265289
if (addr < __pa_symbol(__srwx_boundary))
266290
return __pa_symbol(__srwx_boundary);
267291
#endif

arch/powerpc/perf/imc-pmu.c

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@
2222
* Used to avoid races in counting the nest-pmu units during hotplug
2323
* register and unregister
2424
*/
25-
static DEFINE_SPINLOCK(nest_init_lock);
25+
static DEFINE_MUTEX(nest_init_lock);
2626
static DEFINE_PER_CPU(struct imc_pmu_ref *, local_nest_imc_refc);
2727
static struct imc_pmu **per_nest_pmu_arr;
2828
static cpumask_t nest_imc_cpumask;
@@ -1629,7 +1629,7 @@ static void imc_common_mem_free(struct imc_pmu *pmu_ptr)
16291629
static void imc_common_cpuhp_mem_free(struct imc_pmu *pmu_ptr)
16301630
{
16311631
if (pmu_ptr->domain == IMC_DOMAIN_NEST) {
1632-
spin_lock(&nest_init_lock);
1632+
mutex_lock(&nest_init_lock);
16331633
if (nest_pmus == 1) {
16341634
cpuhp_remove_state(CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE);
16351635
kfree(nest_imc_refc);
@@ -1639,7 +1639,7 @@ static void imc_common_cpuhp_mem_free(struct imc_pmu *pmu_ptr)
16391639

16401640
if (nest_pmus > 0)
16411641
nest_pmus--;
1642-
spin_unlock(&nest_init_lock);
1642+
mutex_unlock(&nest_init_lock);
16431643
}
16441644

16451645
/* Free core_imc memory */
@@ -1796,27 +1796,27 @@ int init_imc_pmu(struct device_node *parent, struct imc_pmu *pmu_ptr, int pmu_id
17961796
* rest. To handle the cpuhotplug callback unregister, we track
17971797
* the number of nest pmus in "nest_pmus".
17981798
*/
1799-
spin_lock(&nest_init_lock);
1799+
mutex_lock(&nest_init_lock);
18001800
if (nest_pmus == 0) {
18011801
ret = init_nest_pmu_ref();
18021802
if (ret) {
1803-
spin_unlock(&nest_init_lock);
1803+
mutex_unlock(&nest_init_lock);
18041804
kfree(per_nest_pmu_arr);
18051805
per_nest_pmu_arr = NULL;
18061806
goto err_free_mem;
18071807
}
18081808
/* Register for cpu hotplug notification. */
18091809
ret = nest_pmu_cpumask_init();
18101810
if (ret) {
1811-
spin_unlock(&nest_init_lock);
1811+
mutex_unlock(&nest_init_lock);
18121812
kfree(nest_imc_refc);
18131813
kfree(per_nest_pmu_arr);
18141814
per_nest_pmu_arr = NULL;
18151815
goto err_free_mem;
18161816
}
18171817
}
18181818
nest_pmus++;
1819-
spin_unlock(&nest_init_lock);
1819+
mutex_unlock(&nest_init_lock);
18201820
break;
18211821
case IMC_DOMAIN_CORE:
18221822
ret = core_imc_pmu_cpumask_init();

0 commit comments

Comments
 (0)