97
97
#define SPI_DMA_CH1_DONE_INT BIT(1)
98
98
#define SPI_DMA_CH0_ABORT_INT BIT(16)
99
99
#define SPI_DMA_CH1_ABORT_INT BIT(17)
100
- #define SPI_DMA_DONE_INT_MASK (SPI_DMA_CH0_DONE_INT | SPI_DMA_CH1_DONE_INT )
101
- #define SPI_DMA_ABORT_INT_MASK (SPI_DMA_CH0_ABORT_INT | SPI_DMA_CH1_ABORT_INT )
100
+ #define SPI_DMA_DONE_INT_MASK ( x ) (1 << (x) )
101
+ #define SPI_DMA_ABORT_INT_MASK ( x ) (1 << (16 + (x)) )
102
102
#define DMA_CH_CONTROL_LIE BIT(3)
103
103
#define DMA_CH_CONTROL_RIE BIT(4)
104
104
#define DMA_INTR_EN (DMA_CH_CONTROL_RIE | DMA_CH_CONTROL_LIE)
132
132
#define SPI_SUSPEND_CONFIG 0x101
133
133
#define SPI_RESUME_CONFIG 0x203
134
134
135
+ #define NUM_VEC_PER_INST 3
136
+
135
137
struct pci1xxxx_spi_internal {
136
138
u8 hw_inst ;
137
139
u8 clkdiv ;
138
- int irq ;
140
+ int irq [ NUM_VEC_PER_INST ] ;
139
141
int mode ;
140
142
bool spi_xfer_in_progress ;
141
143
void * rx_buf ;
@@ -193,6 +195,9 @@ static const struct pci_device_id pci1xxxx_spi_pci_id_table[] = {
193
195
194
196
MODULE_DEVICE_TABLE (pci , pci1xxxx_spi_pci_id_table );
195
197
198
+ static irqreturn_t pci1xxxx_spi_isr_dma_rd (int irq , void * dev );
199
+ static irqreturn_t pci1xxxx_spi_isr_dma_wr (int irq , void * dev );
200
+
196
201
static int pci1xxxx_set_sys_lock (struct pci1xxxx_spi * par )
197
202
{
198
203
writel (SPI_SYSLOCK , par -> reg_base + SPI_SYSLOCK_REG );
@@ -213,13 +218,16 @@ static void pci1xxxx_release_sys_lock(struct pci1xxxx_spi *par)
213
218
writel (0x0 , par -> reg_base + SPI_SYSLOCK_REG );
214
219
}
215
220
216
- static int pci1xxxx_check_spi_can_dma (struct pci1xxxx_spi * spi_bus , int irq )
221
+ static int pci1xxxx_check_spi_can_dma (struct pci1xxxx_spi * spi_bus , int hw_inst , int num_vector )
217
222
{
218
223
struct pci_dev * pdev = spi_bus -> dev ;
219
224
u32 pf_num ;
220
225
u32 regval ;
221
226
int ret ;
222
227
228
+ if (num_vector != hw_inst * NUM_VEC_PER_INST )
229
+ return - EOPNOTSUPP ;
230
+
223
231
/*
224
232
* DEV REV Registers is a system register, HW Syslock bit
225
233
* should be acquired before accessing the register
@@ -247,16 +255,6 @@ static int pci1xxxx_check_spi_can_dma(struct pci1xxxx_spi *spi_bus, int irq)
247
255
if (spi_bus -> dev_rev < 0xC0 || pf_num )
248
256
return - EOPNOTSUPP ;
249
257
250
- /*
251
- * DMA Supported only with MSI Interrupts
252
- * One of the SPI instance's MSI vector address and data
253
- * is used for DMA Interrupt
254
- */
255
- if (!irq_get_msi_desc (irq )) {
256
- dev_warn (& pdev -> dev , "Error MSI Interrupt not supported, will operate in PIO mode\n" );
257
- return - EOPNOTSUPP ;
258
- }
259
-
260
258
spi_bus -> dma_offset_bar = pcim_iomap (pdev , 2 , pci_resource_len (pdev , 2 ));
261
259
if (!spi_bus -> dma_offset_bar ) {
262
260
dev_warn (& pdev -> dev , "Error failed to map dma bar, will operate in PIO mode\n" );
@@ -273,29 +271,90 @@ static int pci1xxxx_check_spi_can_dma(struct pci1xxxx_spi *spi_bus, int irq)
273
271
return 0 ;
274
272
}
275
273
276
- static int pci1xxxx_spi_dma_init (struct pci1xxxx_spi * spi_bus , int irq )
274
+ static void pci1xxxx_spi_dma_config (struct pci1xxxx_spi * spi_bus )
277
275
{
276
+ struct pci1xxxx_spi_internal * spi_sub_ptr ;
277
+ u8 iter , irq_index ;
278
278
struct msi_msg msi ;
279
+ u32 regval ;
280
+ u16 data ;
281
+
282
+ irq_index = spi_bus -> total_hw_instances ;
283
+ for (iter = 0 ; iter < spi_bus -> total_hw_instances ; iter ++ ) {
284
+ spi_sub_ptr = spi_bus -> spi_int [iter ];
285
+ get_cached_msi_msg (spi_sub_ptr -> irq [1 ], & msi );
286
+ if (iter == 0 ) {
287
+ writel (msi .address_hi , spi_bus -> dma_offset_bar +
288
+ SPI_DMA_INTR_IMWR_WDONE_HIGH );
289
+ writel (msi .address_hi , spi_bus -> dma_offset_bar +
290
+ SPI_DMA_INTR_IMWR_WABORT_HIGH );
291
+ writel (msi .address_hi , spi_bus -> dma_offset_bar +
292
+ SPI_DMA_INTR_IMWR_RDONE_HIGH );
293
+ writel (msi .address_hi , spi_bus -> dma_offset_bar +
294
+ SPI_DMA_INTR_IMWR_RABORT_HIGH );
295
+ writel (msi .address_lo , spi_bus -> dma_offset_bar +
296
+ SPI_DMA_INTR_IMWR_WDONE_LOW );
297
+ writel (msi .address_lo , spi_bus -> dma_offset_bar +
298
+ SPI_DMA_INTR_IMWR_WABORT_LOW );
299
+ writel (msi .address_lo , spi_bus -> dma_offset_bar +
300
+ SPI_DMA_INTR_IMWR_RDONE_LOW );
301
+ writel (msi .address_lo , spi_bus -> dma_offset_bar +
302
+ SPI_DMA_INTR_IMWR_RABORT_LOW );
303
+ writel (0 , spi_bus -> dma_offset_bar + SPI_DMA_INTR_WR_IMWR_DATA );
304
+ writel (0 , spi_bus -> dma_offset_bar + SPI_DMA_INTR_RD_IMWR_DATA );
305
+ }
306
+ regval = readl (spi_bus -> dma_offset_bar + SPI_DMA_INTR_WR_IMWR_DATA );
307
+ data = msi .data + irq_index ;
308
+ writel ((regval | (data << (iter * 16 ))), spi_bus -> dma_offset_bar +
309
+ SPI_DMA_INTR_WR_IMWR_DATA );
310
+ regval = readl (spi_bus -> dma_offset_bar + SPI_DMA_INTR_WR_IMWR_DATA );
311
+ irq_index ++ ;
312
+
313
+ data = msi .data + irq_index ;
314
+ regval = readl (spi_bus -> dma_offset_bar + SPI_DMA_INTR_RD_IMWR_DATA );
315
+ writel (regval | (data << (iter * 16 )), spi_bus -> dma_offset_bar +
316
+ SPI_DMA_INTR_RD_IMWR_DATA );
317
+ regval = readl (spi_bus -> dma_offset_bar + SPI_DMA_INTR_RD_IMWR_DATA );
318
+ irq_index ++ ;
319
+ }
320
+ }
321
+
322
+ static int pci1xxxx_spi_dma_init (struct pci1xxxx_spi * spi_bus , int hw_inst , int num_vector )
323
+ {
324
+ struct pci1xxxx_spi_internal * spi_sub_ptr ;
325
+ u8 iter , irq_index ;
279
326
int ret ;
280
327
281
- ret = pci1xxxx_check_spi_can_dma (spi_bus , irq );
328
+ irq_index = hw_inst ;
329
+ ret = pci1xxxx_check_spi_can_dma (spi_bus , hw_inst , num_vector );
282
330
if (ret )
283
331
return ret ;
284
332
285
333
spin_lock_init (& spi_bus -> dma_reg_lock );
286
- get_cached_msi_msg (irq , & msi );
287
334
writel (SPI_DMA_ENGINE_EN , spi_bus -> dma_offset_bar + SPI_DMA_GLOBAL_WR_ENGINE_EN );
288
335
writel (SPI_DMA_ENGINE_EN , spi_bus -> dma_offset_bar + SPI_DMA_GLOBAL_RD_ENGINE_EN );
289
- writel (msi .address_hi , spi_bus -> dma_offset_bar + SPI_DMA_INTR_IMWR_WDONE_HIGH );
290
- writel (msi .address_hi , spi_bus -> dma_offset_bar + SPI_DMA_INTR_IMWR_WABORT_HIGH );
291
- writel (msi .address_hi , spi_bus -> dma_offset_bar + SPI_DMA_INTR_IMWR_RDONE_HIGH );
292
- writel (msi .address_hi , spi_bus -> dma_offset_bar + SPI_DMA_INTR_IMWR_RABORT_HIGH );
293
- writel (msi .address_lo , spi_bus -> dma_offset_bar + SPI_DMA_INTR_IMWR_WDONE_LOW );
294
- writel (msi .address_lo , spi_bus -> dma_offset_bar + SPI_DMA_INTR_IMWR_WABORT_LOW );
295
- writel (msi .address_lo , spi_bus -> dma_offset_bar + SPI_DMA_INTR_IMWR_RDONE_LOW );
296
- writel (msi .address_lo , spi_bus -> dma_offset_bar + SPI_DMA_INTR_IMWR_RABORT_LOW );
297
- writel (msi .data , spi_bus -> dma_offset_bar + SPI_DMA_INTR_WR_IMWR_DATA );
298
- writel (msi .data , spi_bus -> dma_offset_bar + SPI_DMA_INTR_RD_IMWR_DATA );
336
+
337
+ for (iter = 0 ; iter < hw_inst ; iter ++ ) {
338
+ spi_sub_ptr = spi_bus -> spi_int [iter ];
339
+ spi_sub_ptr -> irq [1 ] = pci_irq_vector (spi_bus -> dev , irq_index );
340
+ ret = devm_request_irq (& spi_bus -> dev -> dev , spi_sub_ptr -> irq [1 ],
341
+ pci1xxxx_spi_isr_dma_wr , PCI1XXXX_IRQ_FLAGS ,
342
+ pci_name (spi_bus -> dev ), spi_sub_ptr );
343
+ if (ret < 0 )
344
+ return ret ;
345
+
346
+ irq_index ++ ;
347
+
348
+ spi_sub_ptr -> irq [2 ] = pci_irq_vector (spi_bus -> dev , irq_index );
349
+ ret = devm_request_irq (& spi_bus -> dev -> dev , spi_sub_ptr -> irq [2 ],
350
+ pci1xxxx_spi_isr_dma_rd , PCI1XXXX_IRQ_FLAGS ,
351
+ pci_name (spi_bus -> dev ), spi_sub_ptr );
352
+ if (ret < 0 )
353
+ return ret ;
354
+
355
+ irq_index ++ ;
356
+ }
357
+ pci1xxxx_spi_dma_config (spi_bus );
299
358
dma_set_max_seg_size (& spi_bus -> dev -> dev , PCI1XXXX_SPI_BUFFER_SIZE );
300
359
spi_bus -> can_dma = true;
301
360
return 0 ;
@@ -401,13 +460,13 @@ static void pci1xxxx_spi_setup(struct pci1xxxx_spi *par, u8 hw_inst, u32 mode,
401
460
writel (regval , par -> reg_base + SPI_MST_CTL_REG_OFFSET (hw_inst ));
402
461
}
403
462
404
- static void pci1xxxx_start_spi_xfer (struct pci1xxxx_spi_internal * p , u8 hw_inst )
463
+ static void pci1xxxx_start_spi_xfer (struct pci1xxxx_spi_internal * p )
405
464
{
406
465
u32 regval ;
407
466
408
- regval = readl (p -> parent -> reg_base + SPI_MST_CTL_REG_OFFSET (hw_inst ));
467
+ regval = readl (p -> parent -> reg_base + SPI_MST_CTL_REG_OFFSET (p -> hw_inst ));
409
468
regval |= SPI_MST_CTL_GO ;
410
- writel (regval , p -> parent -> reg_base + SPI_MST_CTL_REG_OFFSET (hw_inst ));
469
+ writel (regval , p -> parent -> reg_base + SPI_MST_CTL_REG_OFFSET (p -> hw_inst ));
411
470
}
412
471
413
472
static int pci1xxxx_spi_transfer_with_io (struct spi_controller * spi_ctlr ,
@@ -451,7 +510,7 @@ static int pci1xxxx_spi_transfer_with_io(struct spi_controller *spi_ctlr,
451
510
& tx_buf [bytes_transfered ], len );
452
511
bytes_transfered += len ;
453
512
pci1xxxx_spi_setup (par , p -> hw_inst , spi -> mode , clkdiv , len );
454
- pci1xxxx_start_spi_xfer (p , p -> hw_inst );
513
+ pci1xxxx_start_spi_xfer (p );
455
514
456
515
/* Wait for DMA_TERM interrupt */
457
516
result = wait_for_completion_timeout (& p -> spi_xfer_done ,
@@ -627,7 +686,7 @@ static void pci1xxxx_spi_setup_next_dma_transfer(struct pci1xxxx_spi_internal *p
627
686
}
628
687
}
629
688
630
- static irqreturn_t pci1xxxx_spi_isr_dma (int irq , void * dev )
689
+ static irqreturn_t pci1xxxx_spi_isr_dma_rd (int irq , void * dev )
631
690
{
632
691
struct pci1xxxx_spi_internal * p = dev ;
633
692
irqreturn_t spi_int_fired = IRQ_NONE ;
@@ -637,36 +696,53 @@ static irqreturn_t pci1xxxx_spi_isr_dma(int irq, void *dev)
637
696
spin_lock_irqsave (& p -> parent -> dma_reg_lock , flags );
638
697
/* Clear the DMA RD INT and start spi xfer*/
639
698
regval = readl (p -> parent -> dma_offset_bar + SPI_DMA_INTR_RD_STS );
640
- if (regval & SPI_DMA_DONE_INT_MASK ) {
641
- if (regval & SPI_DMA_CH0_DONE_INT )
642
- pci1xxxx_start_spi_xfer (p , SPI0 );
643
- if (regval & SPI_DMA_CH1_DONE_INT )
644
- pci1xxxx_start_spi_xfer (p , SPI1 );
645
- spi_int_fired = IRQ_HANDLED ;
646
- }
647
- if (regval & SPI_DMA_ABORT_INT_MASK ) {
648
- p -> dma_aborted_rd = true;
649
- spi_int_fired = IRQ_HANDLED ;
699
+ if (regval ) {
700
+ if (regval & SPI_DMA_DONE_INT_MASK (p -> hw_inst )) {
701
+ pci1xxxx_start_spi_xfer (p );
702
+ spi_int_fired = IRQ_HANDLED ;
703
+ }
704
+ if (regval & SPI_DMA_ABORT_INT_MASK (p -> hw_inst )) {
705
+ p -> dma_aborted_rd = true;
706
+ spi_int_fired = IRQ_HANDLED ;
707
+ }
650
708
}
651
- writel (regval , p -> parent -> dma_offset_bar + SPI_DMA_INTR_RD_CLR );
709
+ writel ((SPI_DMA_DONE_INT_MASK (p -> hw_inst ) | SPI_DMA_ABORT_INT_MASK (p -> hw_inst )),
710
+ p -> parent -> dma_offset_bar + SPI_DMA_INTR_RD_CLR );
711
+ spin_unlock_irqrestore (& p -> parent -> dma_reg_lock , flags );
712
+ return spi_int_fired ;
713
+ }
652
714
715
+ static irqreturn_t pci1xxxx_spi_isr_dma_wr (int irq , void * dev )
716
+ {
717
+ struct pci1xxxx_spi_internal * p = dev ;
718
+ irqreturn_t spi_int_fired = IRQ_NONE ;
719
+ unsigned long flags ;
720
+ u32 regval ;
721
+
722
+ spin_lock_irqsave (& p -> parent -> dma_reg_lock , flags );
653
723
/* Clear the DMA WR INT */
654
724
regval = readl (p -> parent -> dma_offset_bar + SPI_DMA_INTR_WR_STS );
655
- if (regval & SPI_DMA_DONE_INT_MASK ) {
656
- if (regval & SPI_DMA_CH0_DONE_INT )
657
- pci1xxxx_spi_setup_next_dma_transfer (p -> parent -> spi_int [SPI0 ]);
658
-
659
- if (regval & SPI_DMA_CH1_DONE_INT )
660
- pci1xxxx_spi_setup_next_dma_transfer (p -> parent -> spi_int [SPI1 ]);
661
-
662
- spi_int_fired = IRQ_HANDLED ;
663
- }
664
- if (regval & SPI_DMA_ABORT_INT_MASK ) {
665
- p -> dma_aborted_wr = true;
666
- spi_int_fired = IRQ_HANDLED ;
725
+ if (regval ) {
726
+ if (regval & SPI_DMA_DONE_INT_MASK (p -> hw_inst )) {
727
+ pci1xxxx_spi_setup_next_dma_transfer (p );
728
+ spi_int_fired = IRQ_HANDLED ;
729
+ }
730
+ if (regval & SPI_DMA_ABORT_INT_MASK (p -> hw_inst )) {
731
+ p -> dma_aborted_wr = true;
732
+ spi_int_fired = IRQ_HANDLED ;
733
+ }
667
734
}
668
- writel (regval , p -> parent -> dma_offset_bar + SPI_DMA_INTR_WR_CLR );
735
+ writel ((SPI_DMA_DONE_INT_MASK (p -> hw_inst ) | SPI_DMA_ABORT_INT_MASK (p -> hw_inst )),
736
+ p -> parent -> dma_offset_bar + SPI_DMA_INTR_WR_CLR );
669
737
spin_unlock_irqrestore (& p -> parent -> dma_reg_lock , flags );
738
+ return spi_int_fired ;
739
+ }
740
+
741
+ static irqreturn_t pci1xxxx_spi_isr_dma (int irq , void * dev )
742
+ {
743
+ struct pci1xxxx_spi_internal * p = dev ;
744
+ irqreturn_t spi_int_fired = IRQ_NONE ;
745
+ u32 regval ;
670
746
671
747
/* Clear the SPI GO_BIT Interrupt */
672
748
regval = readl (p -> parent -> reg_base + SPI_MST_EVENT_REG_OFFSET (p -> hw_inst ));
@@ -764,7 +840,7 @@ static int pci1xxxx_spi_probe(struct pci_dev *pdev, const struct pci_device_id *
764
840
if (!spi_bus -> reg_base )
765
841
return - EINVAL ;
766
842
767
- num_vector = pci_alloc_irq_vectors (pdev , 1 , hw_inst_cnt ,
843
+ num_vector = pci_alloc_irq_vectors (pdev , 1 , hw_inst_cnt * NUM_VEC_PER_INST ,
768
844
PCI_IRQ_INTX | PCI_IRQ_MSI );
769
845
if (num_vector < 0 ) {
770
846
dev_err (& pdev -> dev , "Error allocating MSI vectors\n" );
@@ -778,27 +854,23 @@ static int pci1xxxx_spi_probe(struct pci_dev *pdev, const struct pci_device_id *
778
854
regval &= ~SPI_INTR ;
779
855
writel (regval , spi_bus -> reg_base +
780
856
SPI_MST_EVENT_MASK_REG_OFFSET (spi_sub_ptr -> hw_inst ));
781
- spi_sub_ptr -> irq = pci_irq_vector (pdev , 0 );
857
+ spi_sub_ptr -> irq [ 0 ] = pci_irq_vector (pdev , 0 );
782
858
783
859
if (num_vector >= hw_inst_cnt )
784
- ret = devm_request_irq (& pdev -> dev , spi_sub_ptr -> irq ,
860
+ ret = devm_request_irq (& pdev -> dev , spi_sub_ptr -> irq [ 0 ] ,
785
861
pci1xxxx_spi_isr , PCI1XXXX_IRQ_FLAGS ,
786
862
pci_name (pdev ), spi_sub_ptr );
787
863
else
788
- ret = devm_request_irq (& pdev -> dev , spi_sub_ptr -> irq ,
864
+ ret = devm_request_irq (& pdev -> dev , spi_sub_ptr -> irq [ 0 ] ,
789
865
pci1xxxx_spi_shared_isr ,
790
866
PCI1XXXX_IRQ_FLAGS | IRQF_SHARED ,
791
867
pci_name (pdev ), spi_bus );
792
868
if (ret < 0 ) {
793
869
dev_err (& pdev -> dev , "Unable to request irq : %d" ,
794
- spi_sub_ptr -> irq );
870
+ spi_sub_ptr -> irq [ 0 ] );
795
871
return - ENODEV ;
796
872
}
797
873
798
- ret = pci1xxxx_spi_dma_init (spi_bus , spi_sub_ptr -> irq );
799
- if (ret && ret != - EOPNOTSUPP )
800
- return ret ;
801
-
802
874
/* This register is only applicable for 1st instance */
803
875
regval = readl (spi_bus -> reg_base + SPI_PCI_CTRL_REG_OFFSET (0 ));
804
876
if (!only_sec_inst )
@@ -820,13 +892,13 @@ static int pci1xxxx_spi_probe(struct pci_dev *pdev, const struct pci_device_id *
820
892
writel (regval , spi_bus -> reg_base +
821
893
SPI_MST_EVENT_MASK_REG_OFFSET (spi_sub_ptr -> hw_inst ));
822
894
if (num_vector >= hw_inst_cnt ) {
823
- spi_sub_ptr -> irq = pci_irq_vector (pdev , iter );
824
- ret = devm_request_irq (& pdev -> dev , spi_sub_ptr -> irq ,
895
+ spi_sub_ptr -> irq [ 0 ] = pci_irq_vector (pdev , iter );
896
+ ret = devm_request_irq (& pdev -> dev , spi_sub_ptr -> irq [ 0 ] ,
825
897
pci1xxxx_spi_isr , PCI1XXXX_IRQ_FLAGS ,
826
898
pci_name (pdev ), spi_sub_ptr );
827
899
if (ret < 0 ) {
828
900
dev_err (& pdev -> dev , "Unable to request irq : %d" ,
829
- spi_sub_ptr -> irq );
901
+ spi_sub_ptr -> irq [ 0 ] );
830
902
return - ENODEV ;
831
903
}
832
904
}
@@ -849,6 +921,10 @@ static int pci1xxxx_spi_probe(struct pci_dev *pdev, const struct pci_device_id *
849
921
if (ret )
850
922
return ret ;
851
923
}
924
+ ret = pci1xxxx_spi_dma_init (spi_bus , hw_inst_cnt , num_vector );
925
+ if (ret && ret != - EOPNOTSUPP )
926
+ return ret ;
927
+
852
928
pci_set_drvdata (pdev , spi_bus );
853
929
854
930
return 0 ;
0 commit comments