Skip to content

Commit 3e36c82

Browse files
Thangaraj Samynathanbroonie
authored andcommitted
spi: spi-pci1xxxx: Add support for per-instance DMA interrupt vectors
Add support for dedicated DMA interrupt vectors for each SPI hardware instance in the pci1xxxx driver. This improves scalability and interrupt handling for systems using multiple SPI instances with DMA. Introduce a constant `NUM_VEC_PER_INST` to define the number of IRQ vectors per instance (main, DMA write, DMA read). Update the `pci1xxxx_spi_internal` structure to use an IRQ array. Refactor IRQ allocation and DMA initialization logic: - Assign separate IRQ vectors for DMA read and write interrupts. - Split the original DMA ISR into two handlers: `pci1xxxx_spi_isr_dma_rd` and `pci1xxxx_spi_isr_dma_wr`. - Configure IMWR registers per instance using cached MSI data. - Move DMA register configuration into a new helper function, `pci1xxxx_spi_dma_config()`. Invoke the DMA initialization after all instances are configured to ensure correct IRQ vector mapping. Signed-off-by: Thangaraj Samynathan <[email protected]> Link: https://patch.msgid.link/[email protected] Signed-off-by: Mark Brown <[email protected]>
1 parent 1256eb4 commit 3e36c82

File tree

1 file changed

+144
-68
lines changed

1 file changed

+144
-68
lines changed

drivers/spi/spi-pci1xxxx.c

Lines changed: 144 additions & 68 deletions
Original file line numberDiff line numberDiff line change
@@ -97,8 +97,8 @@
9797
#define SPI_DMA_CH1_DONE_INT BIT(1)
9898
#define SPI_DMA_CH0_ABORT_INT BIT(16)
9999
#define SPI_DMA_CH1_ABORT_INT BIT(17)
100-
#define SPI_DMA_DONE_INT_MASK (SPI_DMA_CH0_DONE_INT | SPI_DMA_CH1_DONE_INT)
101-
#define SPI_DMA_ABORT_INT_MASK (SPI_DMA_CH0_ABORT_INT | SPI_DMA_CH1_ABORT_INT)
100+
#define SPI_DMA_DONE_INT_MASK(x) (1 << (x))
101+
#define SPI_DMA_ABORT_INT_MASK(x) (1 << (16 + (x)))
102102
#define DMA_CH_CONTROL_LIE BIT(3)
103103
#define DMA_CH_CONTROL_RIE BIT(4)
104104
#define DMA_INTR_EN (DMA_CH_CONTROL_RIE | DMA_CH_CONTROL_LIE)
@@ -132,10 +132,12 @@
132132
#define SPI_SUSPEND_CONFIG 0x101
133133
#define SPI_RESUME_CONFIG 0x203
134134

135+
#define NUM_VEC_PER_INST 3
136+
135137
struct pci1xxxx_spi_internal {
136138
u8 hw_inst;
137139
u8 clkdiv;
138-
int irq;
140+
int irq[NUM_VEC_PER_INST];
139141
int mode;
140142
bool spi_xfer_in_progress;
141143
void *rx_buf;
@@ -193,6 +195,9 @@ static const struct pci_device_id pci1xxxx_spi_pci_id_table[] = {
193195

194196
MODULE_DEVICE_TABLE(pci, pci1xxxx_spi_pci_id_table);
195197

198+
static irqreturn_t pci1xxxx_spi_isr_dma_rd(int irq, void *dev);
199+
static irqreturn_t pci1xxxx_spi_isr_dma_wr(int irq, void *dev);
200+
196201
static int pci1xxxx_set_sys_lock(struct pci1xxxx_spi *par)
197202
{
198203
writel(SPI_SYSLOCK, par->reg_base + SPI_SYSLOCK_REG);
@@ -213,13 +218,16 @@ static void pci1xxxx_release_sys_lock(struct pci1xxxx_spi *par)
213218
writel(0x0, par->reg_base + SPI_SYSLOCK_REG);
214219
}
215220

216-
static int pci1xxxx_check_spi_can_dma(struct pci1xxxx_spi *spi_bus, int irq)
221+
static int pci1xxxx_check_spi_can_dma(struct pci1xxxx_spi *spi_bus, int hw_inst, int num_vector)
217222
{
218223
struct pci_dev *pdev = spi_bus->dev;
219224
u32 pf_num;
220225
u32 regval;
221226
int ret;
222227

228+
if (num_vector != hw_inst * NUM_VEC_PER_INST)
229+
return -EOPNOTSUPP;
230+
223231
/*
224232
* DEV REV Registers is a system register, HW Syslock bit
225233
* should be acquired before accessing the register
@@ -247,16 +255,6 @@ static int pci1xxxx_check_spi_can_dma(struct pci1xxxx_spi *spi_bus, int irq)
247255
if (spi_bus->dev_rev < 0xC0 || pf_num)
248256
return -EOPNOTSUPP;
249257

250-
/*
251-
* DMA Supported only with MSI Interrupts
252-
* One of the SPI instance's MSI vector address and data
253-
* is used for DMA Interrupt
254-
*/
255-
if (!irq_get_msi_desc(irq)) {
256-
dev_warn(&pdev->dev, "Error MSI Interrupt not supported, will operate in PIO mode\n");
257-
return -EOPNOTSUPP;
258-
}
259-
260258
spi_bus->dma_offset_bar = pcim_iomap(pdev, 2, pci_resource_len(pdev, 2));
261259
if (!spi_bus->dma_offset_bar) {
262260
dev_warn(&pdev->dev, "Error failed to map dma bar, will operate in PIO mode\n");
@@ -273,29 +271,90 @@ static int pci1xxxx_check_spi_can_dma(struct pci1xxxx_spi *spi_bus, int irq)
273271
return 0;
274272
}
275273

276-
static int pci1xxxx_spi_dma_init(struct pci1xxxx_spi *spi_bus, int irq)
274+
static void pci1xxxx_spi_dma_config(struct pci1xxxx_spi *spi_bus)
277275
{
276+
struct pci1xxxx_spi_internal *spi_sub_ptr;
277+
u8 iter, irq_index;
278278
struct msi_msg msi;
279+
u32 regval;
280+
u16 data;
281+
282+
irq_index = spi_bus->total_hw_instances;
283+
for (iter = 0; iter < spi_bus->total_hw_instances; iter++) {
284+
spi_sub_ptr = spi_bus->spi_int[iter];
285+
get_cached_msi_msg(spi_sub_ptr->irq[1], &msi);
286+
if (iter == 0) {
287+
writel(msi.address_hi, spi_bus->dma_offset_bar +
288+
SPI_DMA_INTR_IMWR_WDONE_HIGH);
289+
writel(msi.address_hi, spi_bus->dma_offset_bar +
290+
SPI_DMA_INTR_IMWR_WABORT_HIGH);
291+
writel(msi.address_hi, spi_bus->dma_offset_bar +
292+
SPI_DMA_INTR_IMWR_RDONE_HIGH);
293+
writel(msi.address_hi, spi_bus->dma_offset_bar +
294+
SPI_DMA_INTR_IMWR_RABORT_HIGH);
295+
writel(msi.address_lo, spi_bus->dma_offset_bar +
296+
SPI_DMA_INTR_IMWR_WDONE_LOW);
297+
writel(msi.address_lo, spi_bus->dma_offset_bar +
298+
SPI_DMA_INTR_IMWR_WABORT_LOW);
299+
writel(msi.address_lo, spi_bus->dma_offset_bar +
300+
SPI_DMA_INTR_IMWR_RDONE_LOW);
301+
writel(msi.address_lo, spi_bus->dma_offset_bar +
302+
SPI_DMA_INTR_IMWR_RABORT_LOW);
303+
writel(0, spi_bus->dma_offset_bar + SPI_DMA_INTR_WR_IMWR_DATA);
304+
writel(0, spi_bus->dma_offset_bar + SPI_DMA_INTR_RD_IMWR_DATA);
305+
}
306+
regval = readl(spi_bus->dma_offset_bar + SPI_DMA_INTR_WR_IMWR_DATA);
307+
data = msi.data + irq_index;
308+
writel((regval | (data << (iter * 16))), spi_bus->dma_offset_bar +
309+
SPI_DMA_INTR_WR_IMWR_DATA);
310+
regval = readl(spi_bus->dma_offset_bar + SPI_DMA_INTR_WR_IMWR_DATA);
311+
irq_index++;
312+
313+
data = msi.data + irq_index;
314+
regval = readl(spi_bus->dma_offset_bar + SPI_DMA_INTR_RD_IMWR_DATA);
315+
writel(regval | (data << (iter * 16)), spi_bus->dma_offset_bar +
316+
SPI_DMA_INTR_RD_IMWR_DATA);
317+
regval = readl(spi_bus->dma_offset_bar + SPI_DMA_INTR_RD_IMWR_DATA);
318+
irq_index++;
319+
}
320+
}
321+
322+
static int pci1xxxx_spi_dma_init(struct pci1xxxx_spi *spi_bus, int hw_inst, int num_vector)
323+
{
324+
struct pci1xxxx_spi_internal *spi_sub_ptr;
325+
u8 iter, irq_index;
279326
int ret;
280327

281-
ret = pci1xxxx_check_spi_can_dma(spi_bus, irq);
328+
irq_index = hw_inst;
329+
ret = pci1xxxx_check_spi_can_dma(spi_bus, hw_inst, num_vector);
282330
if (ret)
283331
return ret;
284332

285333
spin_lock_init(&spi_bus->dma_reg_lock);
286-
get_cached_msi_msg(irq, &msi);
287334
writel(SPI_DMA_ENGINE_EN, spi_bus->dma_offset_bar + SPI_DMA_GLOBAL_WR_ENGINE_EN);
288335
writel(SPI_DMA_ENGINE_EN, spi_bus->dma_offset_bar + SPI_DMA_GLOBAL_RD_ENGINE_EN);
289-
writel(msi.address_hi, spi_bus->dma_offset_bar + SPI_DMA_INTR_IMWR_WDONE_HIGH);
290-
writel(msi.address_hi, spi_bus->dma_offset_bar + SPI_DMA_INTR_IMWR_WABORT_HIGH);
291-
writel(msi.address_hi, spi_bus->dma_offset_bar + SPI_DMA_INTR_IMWR_RDONE_HIGH);
292-
writel(msi.address_hi, spi_bus->dma_offset_bar + SPI_DMA_INTR_IMWR_RABORT_HIGH);
293-
writel(msi.address_lo, spi_bus->dma_offset_bar + SPI_DMA_INTR_IMWR_WDONE_LOW);
294-
writel(msi.address_lo, spi_bus->dma_offset_bar + SPI_DMA_INTR_IMWR_WABORT_LOW);
295-
writel(msi.address_lo, spi_bus->dma_offset_bar + SPI_DMA_INTR_IMWR_RDONE_LOW);
296-
writel(msi.address_lo, spi_bus->dma_offset_bar + SPI_DMA_INTR_IMWR_RABORT_LOW);
297-
writel(msi.data, spi_bus->dma_offset_bar + SPI_DMA_INTR_WR_IMWR_DATA);
298-
writel(msi.data, spi_bus->dma_offset_bar + SPI_DMA_INTR_RD_IMWR_DATA);
336+
337+
for (iter = 0; iter < hw_inst; iter++) {
338+
spi_sub_ptr = spi_bus->spi_int[iter];
339+
spi_sub_ptr->irq[1] = pci_irq_vector(spi_bus->dev, irq_index);
340+
ret = devm_request_irq(&spi_bus->dev->dev, spi_sub_ptr->irq[1],
341+
pci1xxxx_spi_isr_dma_wr, PCI1XXXX_IRQ_FLAGS,
342+
pci_name(spi_bus->dev), spi_sub_ptr);
343+
if (ret < 0)
344+
return ret;
345+
346+
irq_index++;
347+
348+
spi_sub_ptr->irq[2] = pci_irq_vector(spi_bus->dev, irq_index);
349+
ret = devm_request_irq(&spi_bus->dev->dev, spi_sub_ptr->irq[2],
350+
pci1xxxx_spi_isr_dma_rd, PCI1XXXX_IRQ_FLAGS,
351+
pci_name(spi_bus->dev), spi_sub_ptr);
352+
if (ret < 0)
353+
return ret;
354+
355+
irq_index++;
356+
}
357+
pci1xxxx_spi_dma_config(spi_bus);
299358
dma_set_max_seg_size(&spi_bus->dev->dev, PCI1XXXX_SPI_BUFFER_SIZE);
300359
spi_bus->can_dma = true;
301360
return 0;
@@ -401,13 +460,13 @@ static void pci1xxxx_spi_setup(struct pci1xxxx_spi *par, u8 hw_inst, u32 mode,
401460
writel(regval, par->reg_base + SPI_MST_CTL_REG_OFFSET(hw_inst));
402461
}
403462

404-
static void pci1xxxx_start_spi_xfer(struct pci1xxxx_spi_internal *p, u8 hw_inst)
463+
static void pci1xxxx_start_spi_xfer(struct pci1xxxx_spi_internal *p)
405464
{
406465
u32 regval;
407466

408-
regval = readl(p->parent->reg_base + SPI_MST_CTL_REG_OFFSET(hw_inst));
467+
regval = readl(p->parent->reg_base + SPI_MST_CTL_REG_OFFSET(p->hw_inst));
409468
regval |= SPI_MST_CTL_GO;
410-
writel(regval, p->parent->reg_base + SPI_MST_CTL_REG_OFFSET(hw_inst));
469+
writel(regval, p->parent->reg_base + SPI_MST_CTL_REG_OFFSET(p->hw_inst));
411470
}
412471

413472
static int pci1xxxx_spi_transfer_with_io(struct spi_controller *spi_ctlr,
@@ -451,7 +510,7 @@ static int pci1xxxx_spi_transfer_with_io(struct spi_controller *spi_ctlr,
451510
&tx_buf[bytes_transfered], len);
452511
bytes_transfered += len;
453512
pci1xxxx_spi_setup(par, p->hw_inst, spi->mode, clkdiv, len);
454-
pci1xxxx_start_spi_xfer(p, p->hw_inst);
513+
pci1xxxx_start_spi_xfer(p);
455514

456515
/* Wait for DMA_TERM interrupt */
457516
result = wait_for_completion_timeout(&p->spi_xfer_done,
@@ -627,7 +686,7 @@ static void pci1xxxx_spi_setup_next_dma_transfer(struct pci1xxxx_spi_internal *p
627686
}
628687
}
629688

630-
static irqreturn_t pci1xxxx_spi_isr_dma(int irq, void *dev)
689+
static irqreturn_t pci1xxxx_spi_isr_dma_rd(int irq, void *dev)
631690
{
632691
struct pci1xxxx_spi_internal *p = dev;
633692
irqreturn_t spi_int_fired = IRQ_NONE;
@@ -637,36 +696,53 @@ static irqreturn_t pci1xxxx_spi_isr_dma(int irq, void *dev)
637696
spin_lock_irqsave(&p->parent->dma_reg_lock, flags);
638697
/* Clear the DMA RD INT and start spi xfer*/
639698
regval = readl(p->parent->dma_offset_bar + SPI_DMA_INTR_RD_STS);
640-
if (regval & SPI_DMA_DONE_INT_MASK) {
641-
if (regval & SPI_DMA_CH0_DONE_INT)
642-
pci1xxxx_start_spi_xfer(p, SPI0);
643-
if (regval & SPI_DMA_CH1_DONE_INT)
644-
pci1xxxx_start_spi_xfer(p, SPI1);
645-
spi_int_fired = IRQ_HANDLED;
646-
}
647-
if (regval & SPI_DMA_ABORT_INT_MASK) {
648-
p->dma_aborted_rd = true;
649-
spi_int_fired = IRQ_HANDLED;
699+
if (regval) {
700+
if (regval & SPI_DMA_DONE_INT_MASK(p->hw_inst)) {
701+
pci1xxxx_start_spi_xfer(p);
702+
spi_int_fired = IRQ_HANDLED;
703+
}
704+
if (regval & SPI_DMA_ABORT_INT_MASK(p->hw_inst)) {
705+
p->dma_aborted_rd = true;
706+
spi_int_fired = IRQ_HANDLED;
707+
}
650708
}
651-
writel(regval, p->parent->dma_offset_bar + SPI_DMA_INTR_RD_CLR);
709+
writel((SPI_DMA_DONE_INT_MASK(p->hw_inst) | SPI_DMA_ABORT_INT_MASK(p->hw_inst)),
710+
p->parent->dma_offset_bar + SPI_DMA_INTR_RD_CLR);
711+
spin_unlock_irqrestore(&p->parent->dma_reg_lock, flags);
712+
return spi_int_fired;
713+
}
652714

715+
static irqreturn_t pci1xxxx_spi_isr_dma_wr(int irq, void *dev)
716+
{
717+
struct pci1xxxx_spi_internal *p = dev;
718+
irqreturn_t spi_int_fired = IRQ_NONE;
719+
unsigned long flags;
720+
u32 regval;
721+
722+
spin_lock_irqsave(&p->parent->dma_reg_lock, flags);
653723
/* Clear the DMA WR INT */
654724
regval = readl(p->parent->dma_offset_bar + SPI_DMA_INTR_WR_STS);
655-
if (regval & SPI_DMA_DONE_INT_MASK) {
656-
if (regval & SPI_DMA_CH0_DONE_INT)
657-
pci1xxxx_spi_setup_next_dma_transfer(p->parent->spi_int[SPI0]);
658-
659-
if (regval & SPI_DMA_CH1_DONE_INT)
660-
pci1xxxx_spi_setup_next_dma_transfer(p->parent->spi_int[SPI1]);
661-
662-
spi_int_fired = IRQ_HANDLED;
663-
}
664-
if (regval & SPI_DMA_ABORT_INT_MASK) {
665-
p->dma_aborted_wr = true;
666-
spi_int_fired = IRQ_HANDLED;
725+
if (regval) {
726+
if (regval & SPI_DMA_DONE_INT_MASK(p->hw_inst)) {
727+
pci1xxxx_spi_setup_next_dma_transfer(p);
728+
spi_int_fired = IRQ_HANDLED;
729+
}
730+
if (regval & SPI_DMA_ABORT_INT_MASK(p->hw_inst)) {
731+
p->dma_aborted_wr = true;
732+
spi_int_fired = IRQ_HANDLED;
733+
}
667734
}
668-
writel(regval, p->parent->dma_offset_bar + SPI_DMA_INTR_WR_CLR);
735+
writel((SPI_DMA_DONE_INT_MASK(p->hw_inst) | SPI_DMA_ABORT_INT_MASK(p->hw_inst)),
736+
p->parent->dma_offset_bar + SPI_DMA_INTR_WR_CLR);
669737
spin_unlock_irqrestore(&p->parent->dma_reg_lock, flags);
738+
return spi_int_fired;
739+
}
740+
741+
static irqreturn_t pci1xxxx_spi_isr_dma(int irq, void *dev)
742+
{
743+
struct pci1xxxx_spi_internal *p = dev;
744+
irqreturn_t spi_int_fired = IRQ_NONE;
745+
u32 regval;
670746

671747
/* Clear the SPI GO_BIT Interrupt */
672748
regval = readl(p->parent->reg_base + SPI_MST_EVENT_REG_OFFSET(p->hw_inst));
@@ -764,7 +840,7 @@ static int pci1xxxx_spi_probe(struct pci_dev *pdev, const struct pci_device_id *
764840
if (!spi_bus->reg_base)
765841
return -EINVAL;
766842

767-
num_vector = pci_alloc_irq_vectors(pdev, 1, hw_inst_cnt,
843+
num_vector = pci_alloc_irq_vectors(pdev, 1, hw_inst_cnt * NUM_VEC_PER_INST,
768844
PCI_IRQ_INTX | PCI_IRQ_MSI);
769845
if (num_vector < 0) {
770846
dev_err(&pdev->dev, "Error allocating MSI vectors\n");
@@ -778,27 +854,23 @@ static int pci1xxxx_spi_probe(struct pci_dev *pdev, const struct pci_device_id *
778854
regval &= ~SPI_INTR;
779855
writel(regval, spi_bus->reg_base +
780856
SPI_MST_EVENT_MASK_REG_OFFSET(spi_sub_ptr->hw_inst));
781-
spi_sub_ptr->irq = pci_irq_vector(pdev, 0);
857+
spi_sub_ptr->irq[0] = pci_irq_vector(pdev, 0);
782858

783859
if (num_vector >= hw_inst_cnt)
784-
ret = devm_request_irq(&pdev->dev, spi_sub_ptr->irq,
860+
ret = devm_request_irq(&pdev->dev, spi_sub_ptr->irq[0],
785861
pci1xxxx_spi_isr, PCI1XXXX_IRQ_FLAGS,
786862
pci_name(pdev), spi_sub_ptr);
787863
else
788-
ret = devm_request_irq(&pdev->dev, spi_sub_ptr->irq,
864+
ret = devm_request_irq(&pdev->dev, spi_sub_ptr->irq[0],
789865
pci1xxxx_spi_shared_isr,
790866
PCI1XXXX_IRQ_FLAGS | IRQF_SHARED,
791867
pci_name(pdev), spi_bus);
792868
if (ret < 0) {
793869
dev_err(&pdev->dev, "Unable to request irq : %d",
794-
spi_sub_ptr->irq);
870+
spi_sub_ptr->irq[0]);
795871
return -ENODEV;
796872
}
797873

798-
ret = pci1xxxx_spi_dma_init(spi_bus, spi_sub_ptr->irq);
799-
if (ret && ret != -EOPNOTSUPP)
800-
return ret;
801-
802874
/* This register is only applicable for 1st instance */
803875
regval = readl(spi_bus->reg_base + SPI_PCI_CTRL_REG_OFFSET(0));
804876
if (!only_sec_inst)
@@ -820,13 +892,13 @@ static int pci1xxxx_spi_probe(struct pci_dev *pdev, const struct pci_device_id *
820892
writel(regval, spi_bus->reg_base +
821893
SPI_MST_EVENT_MASK_REG_OFFSET(spi_sub_ptr->hw_inst));
822894
if (num_vector >= hw_inst_cnt) {
823-
spi_sub_ptr->irq = pci_irq_vector(pdev, iter);
824-
ret = devm_request_irq(&pdev->dev, spi_sub_ptr->irq,
895+
spi_sub_ptr->irq[0] = pci_irq_vector(pdev, iter);
896+
ret = devm_request_irq(&pdev->dev, spi_sub_ptr->irq[0],
825897
pci1xxxx_spi_isr, PCI1XXXX_IRQ_FLAGS,
826898
pci_name(pdev), spi_sub_ptr);
827899
if (ret < 0) {
828900
dev_err(&pdev->dev, "Unable to request irq : %d",
829-
spi_sub_ptr->irq);
901+
spi_sub_ptr->irq[0]);
830902
return -ENODEV;
831903
}
832904
}
@@ -849,6 +921,10 @@ static int pci1xxxx_spi_probe(struct pci_dev *pdev, const struct pci_device_id *
849921
if (ret)
850922
return ret;
851923
}
924+
ret = pci1xxxx_spi_dma_init(spi_bus, hw_inst_cnt, num_vector);
925+
if (ret && ret != -EOPNOTSUPP)
926+
return ret;
927+
852928
pci_set_drvdata(pdev, spi_bus);
853929

854930
return 0;

0 commit comments

Comments
 (0)