Skip to content

Commit c19f897

Browse files
author
Marc Zyngier
committed
irqchip/apple-aic: Move over to core ipi-mux
Now that the complexity of the AIC IPI mux has been copied into the core code for the benefit of the riscv architecture, shrink the AIC driver by the same amount by using that infrastructure. Signed-off-by: Marc Zyngier <[email protected]> Signed-off-by: Anup Patel <[email protected]> Acked-by: Hector Martin <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent 835a486 commit c19f897

File tree

2 files changed

+9
-153
lines changed

2 files changed

+9
-153
lines changed

drivers/irqchip/Kconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -658,6 +658,7 @@ config APPLE_AIC
658658
bool "Apple Interrupt Controller (AIC)"
659659
depends on ARM64
660660
depends on ARCH_APPLE || COMPILE_TEST
661+
select GENERIC_IRQ_IPI_MUX
661662
help
662663
Support for the Apple Interrupt Controller found on Apple Silicon SoCs,
663664
such as the M1.

drivers/irqchip/irq-apple-aic.c

Lines changed: 8 additions & 153 deletions
Original file line numberDiff line numberDiff line change
@@ -292,7 +292,6 @@ struct aic_irq_chip {
292292
void __iomem *base;
293293
void __iomem *event;
294294
struct irq_domain *hw_domain;
295-
struct irq_domain *ipi_domain;
296295
struct {
297296
cpumask_t aff;
298297
} *fiq_aff[AIC_NR_FIQ];
@@ -307,9 +306,6 @@ struct aic_irq_chip {
307306

308307
static DEFINE_PER_CPU(uint32_t, aic_fiq_unmasked);
309308

310-
static DEFINE_PER_CPU(atomic_t, aic_vipi_flag);
311-
static DEFINE_PER_CPU(atomic_t, aic_vipi_enable);
312-
313309
static struct aic_irq_chip *aic_irqc;
314310

315311
static void aic_handle_ipi(struct pt_regs *regs);
@@ -751,98 +747,8 @@ static void aic_ipi_send_fast(int cpu)
751747
isb();
752748
}
753749

754-
static void aic_ipi_mask(struct irq_data *d)
755-
{
756-
u32 irq_bit = BIT(irqd_to_hwirq(d));
757-
758-
/* No specific ordering requirements needed here. */
759-
atomic_andnot(irq_bit, this_cpu_ptr(&aic_vipi_enable));
760-
}
761-
762-
static void aic_ipi_unmask(struct irq_data *d)
763-
{
764-
struct aic_irq_chip *ic = irq_data_get_irq_chip_data(d);
765-
u32 irq_bit = BIT(irqd_to_hwirq(d));
766-
767-
atomic_or(irq_bit, this_cpu_ptr(&aic_vipi_enable));
768-
769-
/*
770-
* The atomic_or() above must complete before the atomic_read()
771-
* below to avoid racing aic_ipi_send_mask().
772-
*/
773-
smp_mb__after_atomic();
774-
775-
/*
776-
* If a pending vIPI was unmasked, raise a HW IPI to ourselves.
777-
* No barriers needed here since this is a self-IPI.
778-
*/
779-
if (atomic_read(this_cpu_ptr(&aic_vipi_flag)) & irq_bit) {
780-
if (static_branch_likely(&use_fast_ipi))
781-
aic_ipi_send_fast(smp_processor_id());
782-
else
783-
aic_ic_write(ic, AIC_IPI_SEND, AIC_IPI_SEND_CPU(smp_processor_id()));
784-
}
785-
}
786-
787-
static void aic_ipi_send_mask(struct irq_data *d, const struct cpumask *mask)
788-
{
789-
struct aic_irq_chip *ic = irq_data_get_irq_chip_data(d);
790-
u32 irq_bit = BIT(irqd_to_hwirq(d));
791-
u32 send = 0;
792-
int cpu;
793-
unsigned long pending;
794-
795-
for_each_cpu(cpu, mask) {
796-
/*
797-
* This sequence is the mirror of the one in aic_ipi_unmask();
798-
* see the comment there. Additionally, release semantics
799-
* ensure that the vIPI flag set is ordered after any shared
800-
* memory accesses that precede it. This therefore also pairs
801-
* with the atomic_fetch_andnot in aic_handle_ipi().
802-
*/
803-
pending = atomic_fetch_or_release(irq_bit, per_cpu_ptr(&aic_vipi_flag, cpu));
804-
805-
/*
806-
* The atomic_fetch_or_release() above must complete before the
807-
* atomic_read() below to avoid racing aic_ipi_unmask().
808-
*/
809-
smp_mb__after_atomic();
810-
811-
if (!(pending & irq_bit) &&
812-
(atomic_read(per_cpu_ptr(&aic_vipi_enable, cpu)) & irq_bit)) {
813-
if (static_branch_likely(&use_fast_ipi))
814-
aic_ipi_send_fast(cpu);
815-
else
816-
send |= AIC_IPI_SEND_CPU(cpu);
817-
}
818-
}
819-
820-
/*
821-
* The flag writes must complete before the physical IPI is issued
822-
* to another CPU. This is implied by the control dependency on
823-
* the result of atomic_read_acquire() above, which is itself
824-
* already ordered after the vIPI flag write.
825-
*/
826-
if (send)
827-
aic_ic_write(ic, AIC_IPI_SEND, send);
828-
}
829-
830-
static struct irq_chip ipi_chip = {
831-
.name = "AIC-IPI",
832-
.irq_mask = aic_ipi_mask,
833-
.irq_unmask = aic_ipi_unmask,
834-
.ipi_send_mask = aic_ipi_send_mask,
835-
};
836-
837-
/*
838-
* IPI IRQ domain
839-
*/
840-
841750
static void aic_handle_ipi(struct pt_regs *regs)
842751
{
843-
int i;
844-
unsigned long enabled, firing;
845-
846752
/*
847753
* Ack the IPI. We need to order this after the AIC event read, but
848754
* that is enforced by normal MMIO ordering guarantees.
@@ -857,27 +763,7 @@ static void aic_handle_ipi(struct pt_regs *regs)
857763
aic_ic_write(aic_irqc, AIC_IPI_ACK, AIC_IPI_OTHER);
858764
}
859765

860-
/*
861-
* The mask read does not need to be ordered. Only we can change
862-
* our own mask anyway, so no races are possible here, as long as
863-
* we are properly in the interrupt handler (which is covered by
864-
* the barrier that is part of the top-level AIC handler's readl()).
865-
*/
866-
enabled = atomic_read(this_cpu_ptr(&aic_vipi_enable));
867-
868-
/*
869-
* Clear the IPIs we are about to handle. This pairs with the
870-
* atomic_fetch_or_release() in aic_ipi_send_mask(), and needs to be
871-
* ordered after the aic_ic_write() above (to avoid dropping vIPIs) and
872-
* before IPI handling code (to avoid races handling vIPIs before they
873-
* are signaled). The former is taken care of by the release semantics
874-
* of the write portion, while the latter is taken care of by the
875-
* acquire semantics of the read portion.
876-
*/
877-
firing = atomic_fetch_andnot(enabled, this_cpu_ptr(&aic_vipi_flag)) & enabled;
878-
879-
for_each_set_bit(i, &firing, AIC_NR_SWIPI)
880-
generic_handle_domain_irq(aic_irqc->ipi_domain, i);
766+
ipi_mux_process();
881767

882768
/*
883769
* No ordering needed here; at worst this just changes the timing of
@@ -887,55 +773,24 @@ static void aic_handle_ipi(struct pt_regs *regs)
887773
aic_ic_write(aic_irqc, AIC_IPI_MASK_CLR, AIC_IPI_OTHER);
888774
}
889775

890-
static int aic_ipi_alloc(struct irq_domain *d, unsigned int virq,
891-
unsigned int nr_irqs, void *args)
776+
static void aic_ipi_send_single(unsigned int cpu)
892777
{
893-
int i;
894-
895-
for (i = 0; i < nr_irqs; i++) {
896-
irq_set_percpu_devid(virq + i);
897-
irq_domain_set_info(d, virq + i, i, &ipi_chip, d->host_data,
898-
handle_percpu_devid_irq, NULL, NULL);
899-
}
900-
901-
return 0;
902-
}
903-
904-
static void aic_ipi_free(struct irq_domain *d, unsigned int virq, unsigned int nr_irqs)
905-
{
906-
/* Not freeing IPIs */
778+
if (static_branch_likely(&use_fast_ipi))
779+
aic_ipi_send_fast(cpu);
780+
else
781+
aic_ic_write(aic_irqc, AIC_IPI_SEND, AIC_IPI_SEND_CPU(cpu));
907782
}
908783

909-
static const struct irq_domain_ops aic_ipi_domain_ops = {
910-
.alloc = aic_ipi_alloc,
911-
.free = aic_ipi_free,
912-
};
913-
914784
static int __init aic_init_smp(struct aic_irq_chip *irqc, struct device_node *node)
915785
{
916-
struct irq_domain *ipi_domain;
917786
int base_ipi;
918787

919-
ipi_domain = irq_domain_create_linear(irqc->hw_domain->fwnode, AIC_NR_SWIPI,
920-
&aic_ipi_domain_ops, irqc);
921-
if (WARN_ON(!ipi_domain))
922-
return -ENODEV;
923-
924-
ipi_domain->flags |= IRQ_DOMAIN_FLAG_IPI_SINGLE;
925-
irq_domain_update_bus_token(ipi_domain, DOMAIN_BUS_IPI);
926-
927-
base_ipi = __irq_domain_alloc_irqs(ipi_domain, -1, AIC_NR_SWIPI,
928-
NUMA_NO_NODE, NULL, false, NULL);
929-
930-
if (WARN_ON(!base_ipi)) {
931-
irq_domain_remove(ipi_domain);
788+
base_ipi = ipi_mux_create(AIC_NR_SWIPI, aic_ipi_send_single);
789+
if (WARN_ON(base_ipi <= 0))
932790
return -ENODEV;
933-
}
934791

935792
set_smp_ipi_range(base_ipi, AIC_NR_SWIPI);
936793

937-
irqc->ipi_domain = ipi_domain;
938-
939794
return 0;
940795
}
941796

0 commit comments

Comments
 (0)