|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
| 2 | +/* |
| 3 | + * EN751221 Interrupt Controller Driver. |
| 4 | + * |
| 5 | + * The EcoNet EN751221 Interrupt Controller is a simple interrupt controller |
| 6 | + * designed for the MIPS 34Kc MT SMP processor with 2 VPEs. Each interrupt can |
| 7 | + * be routed to either VPE but not both, so to support per-CPU interrupts, a |
| 8 | + * secondary IRQ number is allocated to control masking/unmasking on VPE#1. In |
| 9 | + * this driver, these are called "shadow interrupts". The assignment of shadow |
| 10 | + * interrupts is defined by the SoC integrator when wiring the interrupt lines, |
| 11 | + * so they are configurable in the device tree. |
| 12 | + * |
| 13 | + * If an interrupt (say 30) needs per-CPU capability, the SoC integrator |
| 14 | + * allocates another IRQ number (say 29) to be its shadow. The device tree |
| 15 | + * reflects this by adding the pair <30 29> to the "econet,shadow-interrupts" |
| 16 | + * property. |
| 17 | + * |
| 18 | + * When VPE#1 requests IRQ 30, the driver manipulates the mask bit for IRQ 29, |
| 19 | + * telling the hardware to mask VPE#1's view of IRQ 30. |
| 20 | + * |
| 21 | + * Copyright (C) 2025 Caleb James DeLisle <[email protected]> |
| 22 | + */ |
| 23 | + |
| 24 | +#include <linux/cleanup.h> |
| 25 | +#include <linux/io.h> |
| 26 | +#include <linux/of.h> |
| 27 | +#include <linux/of_address.h> |
| 28 | +#include <linux/of_irq.h> |
| 29 | +#include <linux/irqdomain.h> |
| 30 | +#include <linux/irqchip.h> |
| 31 | +#include <linux/irqchip/chained_irq.h> |
| 32 | + |
| 33 | +#define IRQ_COUNT 40 |
| 34 | + |
| 35 | +#define NOT_PERCPU 0xff |
| 36 | +#define IS_SHADOW 0xfe |
| 37 | + |
| 38 | +#define REG_MASK0 0x04 |
| 39 | +#define REG_MASK1 0x50 |
| 40 | +#define REG_PENDING0 0x08 |
| 41 | +#define REG_PENDING1 0x54 |
| 42 | + |
| 43 | +/** |
| 44 | + * @membase: Base address of the interrupt controller registers |
| 45 | + * @interrupt_shadows: Array of all interrupts, for each value, |
| 46 | + * - NOT_PERCPU: This interrupt is not per-cpu, so it has no shadow |
| 47 | + * - IS_SHADOW: This interrupt is a shadow of another per-cpu interrupt |
| 48 | + * - else: This is a per-cpu interrupt whose shadow is the value |
| 49 | + */ |
| 50 | +static struct { |
| 51 | + void __iomem *membase; |
| 52 | + u8 interrupt_shadows[IRQ_COUNT]; |
| 53 | +} econet_intc __ro_after_init; |
| 54 | + |
| 55 | +static DEFINE_RAW_SPINLOCK(irq_lock); |
| 56 | + |
| 57 | +/* IRQs must be disabled */ |
| 58 | +static void econet_wreg(u32 reg, u32 val, u32 mask) |
| 59 | +{ |
| 60 | + u32 v; |
| 61 | + |
| 62 | + guard(raw_spinlock)(&irq_lock); |
| 63 | + |
| 64 | + v = ioread32(econet_intc.membase + reg); |
| 65 | + v &= ~mask; |
| 66 | + v |= val & mask; |
| 67 | + iowrite32(v, econet_intc.membase + reg); |
| 68 | +} |
| 69 | + |
| 70 | +/* IRQs must be disabled */ |
| 71 | +static void econet_chmask(u32 hwirq, bool unmask) |
| 72 | +{ |
| 73 | + u32 reg, mask; |
| 74 | + u8 shadow; |
| 75 | + |
| 76 | + /* |
| 77 | + * If the IRQ is a shadow, it should never be manipulated directly. |
| 78 | + * It should only be masked/unmasked as a result of the "real" per-cpu |
| 79 | + * irq being manipulated by a thread running on VPE#1. |
| 80 | + * If it is per-cpu (has a shadow), and we're on VPE#1, the shadow is what we mask. |
| 81 | + * This is single processor only, so smp_processor_id() never exceeds 1. |
| 82 | + */ |
| 83 | + shadow = econet_intc.interrupt_shadows[hwirq]; |
| 84 | + if (WARN_ON_ONCE(shadow == IS_SHADOW)) |
| 85 | + return; |
| 86 | + else if (shadow != NOT_PERCPU && smp_processor_id() == 1) |
| 87 | + hwirq = shadow; |
| 88 | + |
| 89 | + if (hwirq >= 32) { |
| 90 | + reg = REG_MASK1; |
| 91 | + mask = BIT(hwirq - 32); |
| 92 | + } else { |
| 93 | + reg = REG_MASK0; |
| 94 | + mask = BIT(hwirq); |
| 95 | + } |
| 96 | + |
| 97 | + econet_wreg(reg, unmask ? mask : 0, mask); |
| 98 | +} |
| 99 | + |
| 100 | +/* IRQs must be disabled */ |
| 101 | +static void econet_intc_mask(struct irq_data *d) |
| 102 | +{ |
| 103 | + econet_chmask(d->hwirq, false); |
| 104 | +} |
| 105 | + |
| 106 | +/* IRQs must be disabled */ |
| 107 | +static void econet_intc_unmask(struct irq_data *d) |
| 108 | +{ |
| 109 | + econet_chmask(d->hwirq, true); |
| 110 | +} |
| 111 | + |
| 112 | +static void econet_mask_all(void) |
| 113 | +{ |
| 114 | + /* IRQs are generally disabled during init, but guarding here makes it non-obligatory. */ |
| 115 | + guard(irqsave)(); |
| 116 | + econet_wreg(REG_MASK0, 0, ~0); |
| 117 | + econet_wreg(REG_MASK1, 0, ~0); |
| 118 | +} |
| 119 | + |
| 120 | +static void econet_intc_handle_pending(struct irq_domain *d, u32 pending, u32 offset) |
| 121 | +{ |
| 122 | + int hwirq; |
| 123 | + |
| 124 | + while (pending) { |
| 125 | + hwirq = fls(pending) - 1; |
| 126 | + generic_handle_domain_irq(d, hwirq + offset); |
| 127 | + pending &= ~BIT(hwirq); |
| 128 | + } |
| 129 | +} |
| 130 | + |
| 131 | +static void econet_intc_from_parent(struct irq_desc *desc) |
| 132 | +{ |
| 133 | + struct irq_chip *chip = irq_desc_get_chip(desc); |
| 134 | + struct irq_domain *domain; |
| 135 | + u32 pending0, pending1; |
| 136 | + |
| 137 | + chained_irq_enter(chip, desc); |
| 138 | + |
| 139 | + pending0 = ioread32(econet_intc.membase + REG_PENDING0); |
| 140 | + pending1 = ioread32(econet_intc.membase + REG_PENDING1); |
| 141 | + |
| 142 | + if (unlikely(!(pending0 | pending1))) { |
| 143 | + spurious_interrupt(); |
| 144 | + } else { |
| 145 | + domain = irq_desc_get_handler_data(desc); |
| 146 | + econet_intc_handle_pending(domain, pending0, 0); |
| 147 | + econet_intc_handle_pending(domain, pending1, 32); |
| 148 | + } |
| 149 | + |
| 150 | + chained_irq_exit(chip, desc); |
| 151 | +} |
| 152 | + |
| 153 | +static const struct irq_chip econet_irq_chip; |
| 154 | + |
| 155 | +static int econet_intc_map(struct irq_domain *d, u32 irq, irq_hw_number_t hwirq) |
| 156 | +{ |
| 157 | + int ret; |
| 158 | + |
| 159 | + if (hwirq >= IRQ_COUNT) { |
| 160 | + pr_err("%s: hwirq %lu out of range\n", __func__, hwirq); |
| 161 | + return -EINVAL; |
| 162 | + } else if (econet_intc.interrupt_shadows[hwirq] == IS_SHADOW) { |
| 163 | + pr_err("%s: can't map hwirq %lu, it is a shadow interrupt\n", __func__, hwirq); |
| 164 | + return -EINVAL; |
| 165 | + } |
| 166 | + |
| 167 | + if (econet_intc.interrupt_shadows[hwirq] == NOT_PERCPU) { |
| 168 | + irq_set_chip_and_handler(irq, &econet_irq_chip, handle_level_irq); |
| 169 | + } else { |
| 170 | + irq_set_chip_and_handler(irq, &econet_irq_chip, handle_percpu_devid_irq); |
| 171 | + ret = irq_set_percpu_devid(irq); |
| 172 | + if (ret) |
| 173 | + pr_warn("%s: Failed irq_set_percpu_devid for %u: %d\n", d->name, irq, ret); |
| 174 | + } |
| 175 | + |
| 176 | + irq_set_chip_data(irq, NULL); |
| 177 | + return 0; |
| 178 | +} |
| 179 | + |
| 180 | +static const struct irq_chip econet_irq_chip = { |
| 181 | + .name = "en751221-intc", |
| 182 | + .irq_unmask = econet_intc_unmask, |
| 183 | + .irq_mask = econet_intc_mask, |
| 184 | + .irq_mask_ack = econet_intc_mask, |
| 185 | +}; |
| 186 | + |
| 187 | +static const struct irq_domain_ops econet_domain_ops = { |
| 188 | + .xlate = irq_domain_xlate_onecell, |
| 189 | + .map = econet_intc_map |
| 190 | +}; |
| 191 | + |
| 192 | +static int __init get_shadow_interrupts(struct device_node *node) |
| 193 | +{ |
| 194 | + const char *field = "econet,shadow-interrupts"; |
| 195 | + int num_shadows; |
| 196 | + |
| 197 | + num_shadows = of_property_count_u32_elems(node, field); |
| 198 | + |
| 199 | + memset(econet_intc.interrupt_shadows, NOT_PERCPU, |
| 200 | + sizeof(econet_intc.interrupt_shadows)); |
| 201 | + |
| 202 | + if (num_shadows <= 0) { |
| 203 | + return 0; |
| 204 | + } else if (num_shadows % 2) { |
| 205 | + pr_err("%pOF: %s count is odd, ignoring\n", node, field); |
| 206 | + return 0; |
| 207 | + } |
| 208 | + |
| 209 | + u32 *shadows __free(kfree) = kmalloc_array(num_shadows, sizeof(u32), GFP_KERNEL); |
| 210 | + if (!shadows) |
| 211 | + return -ENOMEM; |
| 212 | + |
| 213 | + if (of_property_read_u32_array(node, field, shadows, num_shadows)) { |
| 214 | + pr_err("%pOF: Failed to read %s\n", node, field); |
| 215 | + return -EINVAL; |
| 216 | + } |
| 217 | + |
| 218 | + for (int i = 0; i < num_shadows; i += 2) { |
| 219 | + u32 shadow = shadows[i + 1]; |
| 220 | + u32 target = shadows[i]; |
| 221 | + |
| 222 | + if (shadow > IRQ_COUNT) { |
| 223 | + pr_err("%pOF: %s[%d] shadow(%d) out of range\n", |
| 224 | + node, field, i + 1, shadow); |
| 225 | + continue; |
| 226 | + } |
| 227 | + |
| 228 | + if (target >= IRQ_COUNT) { |
| 229 | + pr_err("%pOF: %s[%d] target(%d) out of range\n", node, field, i, target); |
| 230 | + continue; |
| 231 | + } |
| 232 | + |
| 233 | + if (econet_intc.interrupt_shadows[target] != NOT_PERCPU) { |
| 234 | + pr_err("%pOF: %s[%d] target(%d) already has a shadow\n", |
| 235 | + node, field, i, target); |
| 236 | + continue; |
| 237 | + } |
| 238 | + |
| 239 | + if (econet_intc.interrupt_shadows[shadow] != NOT_PERCPU) { |
| 240 | + pr_err("%pOF: %s[%d] shadow(%d) already has a target\n", |
| 241 | + node, field, i + 1, shadow); |
| 242 | + continue; |
| 243 | + } |
| 244 | + |
| 245 | + econet_intc.interrupt_shadows[target] = shadow; |
| 246 | + econet_intc.interrupt_shadows[shadow] = IS_SHADOW; |
| 247 | + } |
| 248 | + |
| 249 | + return 0; |
| 250 | +} |
| 251 | + |
| 252 | +static int __init econet_intc_of_init(struct device_node *node, struct device_node *parent) |
| 253 | +{ |
| 254 | + struct irq_domain *domain; |
| 255 | + struct resource res; |
| 256 | + int ret, irq; |
| 257 | + |
| 258 | + ret = get_shadow_interrupts(node); |
| 259 | + if (ret) |
| 260 | + return ret; |
| 261 | + |
| 262 | + irq = irq_of_parse_and_map(node, 0); |
| 263 | + if (!irq) { |
| 264 | + pr_err("%pOF: DT: Failed to get IRQ from 'interrupts'\n", node); |
| 265 | + return -EINVAL; |
| 266 | + } |
| 267 | + |
| 268 | + if (of_address_to_resource(node, 0, &res)) { |
| 269 | + pr_err("%pOF: DT: Failed to get 'reg'\n", node); |
| 270 | + ret = -EINVAL; |
| 271 | + goto err_dispose_mapping; |
| 272 | + } |
| 273 | + |
| 274 | + if (!request_mem_region(res.start, resource_size(&res), res.name)) { |
| 275 | + pr_err("%pOF: Failed to request memory\n", node); |
| 276 | + ret = -EBUSY; |
| 277 | + goto err_dispose_mapping; |
| 278 | + } |
| 279 | + |
| 280 | + econet_intc.membase = ioremap(res.start, resource_size(&res)); |
| 281 | + if (!econet_intc.membase) { |
| 282 | + pr_err("%pOF: Failed to remap membase\n", node); |
| 283 | + ret = -ENOMEM; |
| 284 | + goto err_release; |
| 285 | + } |
| 286 | + |
| 287 | + econet_mask_all(); |
| 288 | + |
| 289 | + domain = irq_domain_add_linear(node, IRQ_COUNT, &econet_domain_ops, NULL); |
| 290 | + if (!domain) { |
| 291 | + pr_err("%pOF: Failed to add irqdomain\n", node); |
| 292 | + ret = -ENOMEM; |
| 293 | + goto err_unmap; |
| 294 | + } |
| 295 | + |
| 296 | + irq_set_chained_handler_and_data(irq, econet_intc_from_parent, domain); |
| 297 | + |
| 298 | + return 0; |
| 299 | + |
| 300 | +err_unmap: |
| 301 | + iounmap(econet_intc.membase); |
| 302 | +err_release: |
| 303 | + release_mem_region(res.start, resource_size(&res)); |
| 304 | +err_dispose_mapping: |
| 305 | + irq_dispose_mapping(irq); |
| 306 | + return ret; |
| 307 | +} |
| 308 | + |
| 309 | +IRQCHIP_DECLARE(econet_en751221_intc, "econet,en751221-intc", econet_intc_of_init); |
0 commit comments