Skip to content

Commit ba1004f

Browse files
author
Marc Zyngier
committed
arm64: smp: Support non-SGIs for IPIs
The arm64 arch has relied so far on GIC architectural software generated interrupt (SGIs) to handle IPIs. Those are per-cpu software generated interrupts. arm64 architecture code that allocates the IPIs virtual IRQs and IRQ descriptors was written accordingly. On GICv5 systems, IPIs are implemented using LPIs that are not per-cpu interrupts - they are just normal routable IRQs. Add arch code to set-up IPIs on systems where they are handled using normal routable IRQs. For those systems, force the IRQ affinity (and make it immutable) to the cpu a given IRQ was assigned to. Signed-off-by: Timothy Hayes <[email protected]> [lpieralisi: changed affinity set-up, log] Signed-off-by: Lorenzo Pieralisi <[email protected]> Cc: Will Deacon <[email protected]> Cc: Catalin Marinas <[email protected]> Acked-by: Catalin Marinas <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Marc Zyngier <[email protected]>
1 parent 988699f commit ba1004f

File tree

2 files changed

+98
-34
lines changed

2 files changed

+98
-34
lines changed

arch/arm64/include/asm/smp.h

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,12 @@ extern void smp_init_cpus(void);
5353
/*
5454
* Register IPI interrupts with the arch SMP code
5555
*/
56-
extern void set_smp_ipi_range(int ipi_base, int nr_ipi);
56+
extern void set_smp_ipi_range_percpu(int ipi_base, int nr_ipi, int ncpus);
57+
58+
static inline void set_smp_ipi_range(int ipi_base, int n)
59+
{
60+
set_smp_ipi_range_percpu(ipi_base, n, 0);
61+
}
5762

5863
/*
5964
* Called from the secondary holding pen, this is the secondary CPU entry point.

arch/arm64/kernel/smp.c

Lines changed: 92 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -83,7 +83,16 @@ enum ipi_msg_type {
8383

8484
static int ipi_irq_base __ro_after_init;
8585
static int nr_ipi __ro_after_init = NR_IPI;
86-
static struct irq_desc *ipi_desc[MAX_IPI] __ro_after_init;
86+
87+
struct ipi_descs {
88+
struct irq_desc *descs[MAX_IPI];
89+
};
90+
91+
static DEFINE_PER_CPU_READ_MOSTLY(struct ipi_descs, pcpu_ipi_desc);
92+
93+
#define get_ipi_desc(__cpu, __ipi) (per_cpu_ptr(&pcpu_ipi_desc, __cpu)->descs[__ipi])
94+
95+
static bool percpu_ipi_descs __ro_after_init;
8796

8897
static bool crash_stop;
8998

@@ -844,7 +853,7 @@ int arch_show_interrupts(struct seq_file *p, int prec)
844853
seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i,
845854
prec >= 4 ? " " : "");
846855
for_each_online_cpu(cpu)
847-
seq_printf(p, "%10u ", irq_desc_kstat_cpu(ipi_desc[i], cpu));
856+
seq_printf(p, "%10u ", irq_desc_kstat_cpu(get_ipi_desc(cpu, i), cpu));
848857
seq_printf(p, " %s\n", ipi_types[i]);
849858
}
850859

@@ -917,9 +926,20 @@ static void __noreturn ipi_cpu_crash_stop(unsigned int cpu, struct pt_regs *regs
917926
#endif
918927
}
919928

929+
static void arm64_send_ipi(const cpumask_t *mask, unsigned int nr)
930+
{
931+
unsigned int cpu;
932+
933+
if (!percpu_ipi_descs)
934+
__ipi_send_mask(get_ipi_desc(0, nr), mask);
935+
else
936+
for_each_cpu(cpu, mask)
937+
__ipi_send_single(get_ipi_desc(cpu, nr), cpu);
938+
}
939+
920940
static void arm64_backtrace_ipi(cpumask_t *mask)
921941
{
922-
__ipi_send_mask(ipi_desc[IPI_CPU_BACKTRACE], mask);
942+
arm64_send_ipi(mask, IPI_CPU_BACKTRACE);
923943
}
924944

925945
void arch_trigger_cpumask_backtrace(const cpumask_t *mask, int exclude_cpu)
@@ -944,7 +964,7 @@ void kgdb_roundup_cpus(void)
944964
if (cpu == this_cpu)
945965
continue;
946966

947-
__ipi_send_single(ipi_desc[IPI_KGDB_ROUNDUP], cpu);
967+
__ipi_send_single(get_ipi_desc(cpu, IPI_KGDB_ROUNDUP), cpu);
948968
}
949969
}
950970
#endif
@@ -1013,14 +1033,16 @@ static void do_handle_IPI(int ipinr)
10131033

10141034
static irqreturn_t ipi_handler(int irq, void *data)
10151035
{
1016-
do_handle_IPI(irq - ipi_irq_base);
1036+
unsigned int ipi = (irq - ipi_irq_base) % nr_ipi;
1037+
1038+
do_handle_IPI(ipi);
10171039
return IRQ_HANDLED;
10181040
}
10191041

10201042
static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
10211043
{
10221044
trace_ipi_raise(target, ipi_types[ipinr]);
1023-
__ipi_send_mask(ipi_desc[ipinr], target);
1045+
arm64_send_ipi(target, ipinr);
10241046
}
10251047

10261048
static bool ipi_should_be_nmi(enum ipi_msg_type ipi)
@@ -1046,11 +1068,15 @@ static void ipi_setup(int cpu)
10461068
return;
10471069

10481070
for (i = 0; i < nr_ipi; i++) {
1049-
if (ipi_should_be_nmi(i)) {
1050-
prepare_percpu_nmi(ipi_irq_base + i);
1051-
enable_percpu_nmi(ipi_irq_base + i, 0);
1071+
if (!percpu_ipi_descs) {
1072+
if (ipi_should_be_nmi(i)) {
1073+
prepare_percpu_nmi(ipi_irq_base + i);
1074+
enable_percpu_nmi(ipi_irq_base + i, 0);
1075+
} else {
1076+
enable_percpu_irq(ipi_irq_base + i, 0);
1077+
}
10521078
} else {
1053-
enable_percpu_irq(ipi_irq_base + i, 0);
1079+
enable_irq(irq_desc_get_irq(get_ipi_desc(cpu, i)));
10541080
}
10551081
}
10561082
}
@@ -1064,44 +1090,77 @@ static void ipi_teardown(int cpu)
10641090
return;
10651091

10661092
for (i = 0; i < nr_ipi; i++) {
1067-
if (ipi_should_be_nmi(i)) {
1068-
disable_percpu_nmi(ipi_irq_base + i);
1069-
teardown_percpu_nmi(ipi_irq_base + i);
1093+
if (!percpu_ipi_descs) {
1094+
if (ipi_should_be_nmi(i)) {
1095+
disable_percpu_nmi(ipi_irq_base + i);
1096+
teardown_percpu_nmi(ipi_irq_base + i);
1097+
} else {
1098+
disable_percpu_irq(ipi_irq_base + i);
1099+
}
10701100
} else {
1071-
disable_percpu_irq(ipi_irq_base + i);
1101+
disable_irq(irq_desc_get_irq(get_ipi_desc(cpu, i)));
10721102
}
10731103
}
10741104
}
10751105
#endif
10761106

1077-
void __init set_smp_ipi_range(int ipi_base, int n)
1107+
static void ipi_setup_sgi(int ipi)
10781108
{
1079-
int i;
1109+
int err, irq, cpu;
10801110

1081-
WARN_ON(n < MAX_IPI);
1082-
nr_ipi = min(n, MAX_IPI);
1111+
irq = ipi_irq_base + ipi;
10831112

1084-
for (i = 0; i < nr_ipi; i++) {
1085-
int err;
1113+
if (ipi_should_be_nmi(irq)) {
1114+
err = request_percpu_nmi(irq, ipi_handler, "IPI", &irq_stat);
1115+
WARN(err, "Could not request IRQ %d as NMI, err=%d\n", irq, err);
1116+
} else {
1117+
err = request_percpu_irq(irq, ipi_handler, "IPI", &irq_stat);
1118+
WARN(err, "Could not request IRQ %d as IRQ, err=%d\n", irq, err);
1119+
}
10861120

1087-
if (ipi_should_be_nmi(i)) {
1088-
err = request_percpu_nmi(ipi_base + i, ipi_handler,
1089-
"IPI", &irq_stat);
1090-
WARN(err, "Could not request IPI %d as NMI, err=%d\n",
1091-
i, err);
1092-
} else {
1093-
err = request_percpu_irq(ipi_base + i, ipi_handler,
1094-
"IPI", &irq_stat);
1095-
WARN(err, "Could not request IPI %d as IRQ, err=%d\n",
1096-
i, err);
1097-
}
1121+
for_each_possible_cpu(cpu)
1122+
get_ipi_desc(cpu, ipi) = irq_to_desc(irq);
1123+
1124+
irq_set_status_flags(irq, IRQ_HIDDEN);
1125+
}
1126+
1127+
static void ipi_setup_lpi(int ipi, int ncpus)
1128+
{
1129+
for (int cpu = 0; cpu < ncpus; cpu++) {
1130+
int err, irq;
10981131

1099-
ipi_desc[i] = irq_to_desc(ipi_base + i);
1100-
irq_set_status_flags(ipi_base + i, IRQ_HIDDEN);
1132+
irq = ipi_irq_base + (cpu * nr_ipi) + ipi;
1133+
1134+
err = irq_force_affinity(irq, cpumask_of(cpu));
1135+
WARN(err, "Could not force affinity IRQ %d, err=%d\n", irq, err);
1136+
1137+
err = request_irq(irq, ipi_handler, IRQF_NO_AUTOEN, "IPI",
1138+
NULL);
1139+
WARN(err, "Could not request IRQ %d, err=%d\n", irq, err);
1140+
1141+
irq_set_status_flags(irq, (IRQ_HIDDEN | IRQ_NO_BALANCING_MASK));
1142+
1143+
get_ipi_desc(cpu, ipi) = irq_to_desc(irq);
11011144
}
1145+
}
11021146

1147+
void __init set_smp_ipi_range_percpu(int ipi_base, int n, int ncpus)
1148+
{
1149+
int i;
1150+
1151+
WARN_ON(n < MAX_IPI);
1152+
nr_ipi = min(n, MAX_IPI);
1153+
1154+
percpu_ipi_descs = !!ncpus;
11031155
ipi_irq_base = ipi_base;
11041156

1157+
for (i = 0; i < nr_ipi; i++) {
1158+
if (!percpu_ipi_descs)
1159+
ipi_setup_sgi(i);
1160+
else
1161+
ipi_setup_lpi(i, ncpus);
1162+
}
1163+
11051164
/* Setup the boot CPU immediately */
11061165
ipi_setup(smp_processor_id());
11071166
}

0 commit comments

Comments
 (0)