2
2
// Copyright (c) 2017-2018, The Linux foundation. All rights reserved.
3
3
4
4
#include <linux/clk.h>
5
+ #include <linux/dmaengine.h>
6
+ #include <linux/dma-mapping.h>
7
+ #include <linux/dma/qcom-gpi-dma.h>
5
8
#include <linux/interrupt.h>
6
9
#include <linux/io.h>
7
10
#include <linux/log2.h>
63
66
#define TIMESTAMP_AFTER BIT(3)
64
67
#define POST_CMD_DELAY BIT(4)
65
68
69
+ #define GSI_LOOPBACK_EN BIT(0)
70
+ #define GSI_CS_TOGGLE BIT(3)
71
+ #define GSI_CPHA BIT(4)
72
+ #define GSI_CPOL BIT(5)
73
+
74
+ #define MAX_TX_SG 3
75
+ #define NUM_SPI_XFER 8
76
+ #define SPI_XFER_TIMEOUT_MS 250
77
+
66
78
struct spi_geni_master {
67
79
struct geni_se se ;
68
80
struct device * dev ;
@@ -84,6 +96,9 @@ struct spi_geni_master {
84
96
int irq ;
85
97
bool cs_flag ;
86
98
bool abort_failed ;
99
+ struct dma_chan * tx ;
100
+ struct dma_chan * rx ;
101
+ int cur_xfer_mode ;
87
102
};
88
103
89
104
static int get_spi_clk_cfg (unsigned int speed_hz ,
@@ -330,34 +345,197 @@ static int setup_fifo_params(struct spi_device *spi_slv,
330
345
return geni_spi_set_clock_and_bw (mas , spi_slv -> max_speed_hz );
331
346
}
332
347
348
+ static void
349
+ spi_gsi_callback_result (void * cb , const struct dmaengine_result * result )
350
+ {
351
+ struct spi_master * spi = cb ;
352
+
353
+ if (result -> result != DMA_TRANS_NOERROR ) {
354
+ dev_err (& spi -> dev , "DMA txn failed: %d\n" , result -> result );
355
+ return ;
356
+ }
357
+
358
+ if (!result -> residue ) {
359
+ dev_dbg (& spi -> dev , "DMA txn completed\n" );
360
+ spi_finalize_current_transfer (spi );
361
+ } else {
362
+ dev_err (& spi -> dev , "DMA xfer has pending: %d\n" , result -> residue );
363
+ }
364
+ }
365
+
366
+ static int setup_gsi_xfer (struct spi_transfer * xfer , struct spi_geni_master * mas ,
367
+ struct spi_device * spi_slv , struct spi_master * spi )
368
+ {
369
+ unsigned long flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK ;
370
+ struct dma_slave_config config = {};
371
+ struct gpi_spi_config peripheral = {};
372
+ struct dma_async_tx_descriptor * tx_desc , * rx_desc ;
373
+ int ret ;
374
+
375
+ config .peripheral_config = & peripheral ;
376
+ config .peripheral_size = sizeof (peripheral );
377
+ peripheral .set_config = true;
378
+
379
+ if (xfer -> bits_per_word != mas -> cur_bits_per_word ||
380
+ xfer -> speed_hz != mas -> cur_speed_hz ) {
381
+ mas -> cur_bits_per_word = xfer -> bits_per_word ;
382
+ mas -> cur_speed_hz = xfer -> speed_hz ;
383
+ }
384
+
385
+ if (xfer -> tx_buf && xfer -> rx_buf ) {
386
+ peripheral .cmd = SPI_DUPLEX ;
387
+ } else if (xfer -> tx_buf ) {
388
+ peripheral .cmd = SPI_TX ;
389
+ peripheral .rx_len = 0 ;
390
+ } else if (xfer -> rx_buf ) {
391
+ peripheral .cmd = SPI_RX ;
392
+ if (!(mas -> cur_bits_per_word % MIN_WORD_LEN )) {
393
+ peripheral .rx_len = ((xfer -> len << 3 ) / mas -> cur_bits_per_word );
394
+ } else {
395
+ int bytes_per_word = (mas -> cur_bits_per_word / BITS_PER_BYTE ) + 1 ;
396
+
397
+ peripheral .rx_len = (xfer -> len / bytes_per_word );
398
+ }
399
+ }
400
+
401
+ peripheral .loopback_en = !!(spi_slv -> mode & SPI_LOOP );
402
+ peripheral .clock_pol_high = !!(spi_slv -> mode & SPI_CPOL );
403
+ peripheral .data_pol_high = !!(spi_slv -> mode & SPI_CPHA );
404
+ peripheral .cs = spi_slv -> chip_select ;
405
+ peripheral .pack_en = true;
406
+ peripheral .word_len = xfer -> bits_per_word - MIN_WORD_LEN ;
407
+
408
+ ret = get_spi_clk_cfg (mas -> cur_speed_hz , mas ,
409
+ & peripheral .clk_src , & peripheral .clk_div );
410
+ if (ret ) {
411
+ dev_err (mas -> dev , "Err in get_spi_clk_cfg() :%d\n" , ret );
412
+ return ret ;
413
+ }
414
+
415
+ if (!xfer -> cs_change ) {
416
+ if (!list_is_last (& xfer -> transfer_list , & spi -> cur_msg -> transfers ))
417
+ peripheral .fragmentation = FRAGMENTATION ;
418
+ }
419
+
420
+ if (peripheral .cmd & SPI_RX ) {
421
+ dmaengine_slave_config (mas -> rx , & config );
422
+ rx_desc = dmaengine_prep_slave_sg (mas -> rx , xfer -> rx_sg .sgl , xfer -> rx_sg .nents ,
423
+ DMA_DEV_TO_MEM , flags );
424
+ if (!rx_desc ) {
425
+ dev_err (mas -> dev , "Err setting up rx desc\n" );
426
+ return - EIO ;
427
+ }
428
+ }
429
+
430
+ /*
431
+ * Prepare the TX always, even for RX or tx_buf being null, we would
432
+ * need TX to be prepared per GSI spec
433
+ */
434
+ dmaengine_slave_config (mas -> tx , & config );
435
+ tx_desc = dmaengine_prep_slave_sg (mas -> tx , xfer -> tx_sg .sgl , xfer -> tx_sg .nents ,
436
+ DMA_MEM_TO_DEV , flags );
437
+ if (!tx_desc ) {
438
+ dev_err (mas -> dev , "Err setting up tx desc\n" );
439
+ return - EIO ;
440
+ }
441
+
442
+ tx_desc -> callback_result = spi_gsi_callback_result ;
443
+ tx_desc -> callback_param = spi ;
444
+
445
+ if (peripheral .cmd & SPI_RX )
446
+ dmaengine_submit (rx_desc );
447
+ dmaengine_submit (tx_desc );
448
+
449
+ if (peripheral .cmd & SPI_RX )
450
+ dma_async_issue_pending (mas -> rx );
451
+
452
+ dma_async_issue_pending (mas -> tx );
453
+ return 1 ;
454
+ }
455
+
456
+ static bool geni_can_dma (struct spi_controller * ctlr ,
457
+ struct spi_device * slv , struct spi_transfer * xfer )
458
+ {
459
+ struct spi_geni_master * mas = spi_master_get_devdata (slv -> master );
460
+
461
+ /* check if dma is supported */
462
+ return mas -> cur_xfer_mode != GENI_SE_FIFO ;
463
+ }
464
+
333
465
static int spi_geni_prepare_message (struct spi_master * spi ,
334
466
struct spi_message * spi_msg )
335
467
{
336
- int ret ;
337
468
struct spi_geni_master * mas = spi_master_get_devdata (spi );
469
+ int ret ;
338
470
339
- if (spi_geni_is_abort_still_pending (mas ))
340
- return - EBUSY ;
471
+ switch (mas -> cur_xfer_mode ) {
472
+ case GENI_SE_FIFO :
473
+ if (spi_geni_is_abort_still_pending (mas ))
474
+ return - EBUSY ;
475
+ ret = setup_fifo_params (spi_msg -> spi , spi );
476
+ if (ret )
477
+ dev_err (mas -> dev , "Couldn't select mode %d\n" , ret );
478
+ return ret ;
341
479
342
- ret = setup_fifo_params (spi_msg -> spi , spi );
343
- if (ret )
344
- dev_err (mas -> dev , "Couldn't select mode %d\n" , ret );
480
+ case GENI_GPI_DMA :
481
+ /* nothing to do for GPI DMA */
482
+ return 0 ;
483
+ }
484
+
485
+ dev_err (mas -> dev , "Mode not supported %d" , mas -> cur_xfer_mode );
486
+ return - EINVAL ;
487
+ }
488
+
489
+ static int spi_geni_grab_gpi_chan (struct spi_geni_master * mas )
490
+ {
491
+ int ret ;
492
+
493
+ mas -> tx = dma_request_chan (mas -> dev , "tx" );
494
+ ret = dev_err_probe (mas -> dev , IS_ERR (mas -> tx ), "Failed to get tx DMA ch\n" );
495
+ if (ret < 0 )
496
+ goto err_tx ;
497
+
498
+ mas -> rx = dma_request_chan (mas -> dev , "rx" );
499
+ ret = dev_err_probe (mas -> dev , IS_ERR (mas -> rx ), "Failed to get rx DMA ch\n" );
500
+ if (ret < 0 )
501
+ goto err_rx ;
502
+
503
+ return 0 ;
504
+
505
+ err_rx :
506
+ dma_release_channel (mas -> tx );
507
+ mas -> tx = NULL ;
508
+ err_tx :
509
+ mas -> rx = NULL ;
345
510
return ret ;
346
511
}
347
512
513
+ static void spi_geni_release_dma_chan (struct spi_geni_master * mas )
514
+ {
515
+ if (mas -> rx ) {
516
+ dma_release_channel (mas -> rx );
517
+ mas -> rx = NULL ;
518
+ }
519
+
520
+ if (mas -> tx ) {
521
+ dma_release_channel (mas -> tx );
522
+ mas -> tx = NULL ;
523
+ }
524
+ }
525
+
348
526
static int spi_geni_init (struct spi_geni_master * mas )
349
527
{
350
528
struct geni_se * se = & mas -> se ;
351
529
unsigned int proto , major , minor , ver ;
352
- u32 spi_tx_cfg ;
530
+ u32 spi_tx_cfg , fifo_disable ;
531
+ int ret = - ENXIO ;
353
532
354
533
pm_runtime_get_sync (mas -> dev );
355
534
356
535
proto = geni_se_read_proto (se );
357
536
if (proto != GENI_SE_SPI ) {
358
537
dev_err (mas -> dev , "Invalid proto %d\n" , proto );
359
- pm_runtime_put (mas -> dev );
360
- return - ENXIO ;
538
+ goto out_pm ;
361
539
}
362
540
mas -> tx_fifo_depth = geni_se_get_tx_fifo_depth (se );
363
541
@@ -380,15 +558,38 @@ static int spi_geni_init(struct spi_geni_master *mas)
380
558
else
381
559
mas -> oversampling = 1 ;
382
560
383
- geni_se_select_mode (se , GENI_SE_FIFO );
561
+ fifo_disable = readl (se -> base + GENI_IF_DISABLE_RO ) & FIFO_IF_DISABLE ;
562
+ switch (fifo_disable ) {
563
+ case 1 :
564
+ ret = spi_geni_grab_gpi_chan (mas );
565
+ if (!ret ) { /* success case */
566
+ mas -> cur_xfer_mode = GENI_GPI_DMA ;
567
+ geni_se_select_mode (se , GENI_GPI_DMA );
568
+ dev_dbg (mas -> dev , "Using GPI DMA mode for SPI\n" );
569
+ break ;
570
+ }
571
+ /*
572
+ * in case of failure to get dma channel, we can still do the
573
+ * FIFO mode, so fallthrough
574
+ */
575
+ dev_warn (mas -> dev , "FIFO mode disabled, but couldn't get DMA, fall back to FIFO mode\n" );
576
+ fallthrough ;
577
+
578
+ case 0 :
579
+ mas -> cur_xfer_mode = GENI_SE_FIFO ;
580
+ geni_se_select_mode (se , GENI_SE_FIFO );
581
+ ret = 0 ;
582
+ break ;
583
+ }
384
584
385
585
/* We always control CS manually */
386
586
spi_tx_cfg = readl (se -> base + SE_SPI_TRANS_CFG );
387
587
spi_tx_cfg &= ~CS_TOGGLE ;
388
588
writel (spi_tx_cfg , se -> base + SE_SPI_TRANS_CFG );
389
589
590
+ out_pm :
390
591
pm_runtime_put (mas -> dev );
391
- return 0 ;
592
+ return ret ;
392
593
}
393
594
394
595
static unsigned int geni_byte_per_fifo_word (struct spi_geni_master * mas )
@@ -569,8 +770,11 @@ static int spi_geni_transfer_one(struct spi_master *spi,
569
770
if (!xfer -> len )
570
771
return 0 ;
571
772
572
- setup_fifo_xfer (xfer , mas , slv -> mode , spi );
573
- return 1 ;
773
+ if (mas -> cur_xfer_mode == GENI_SE_FIFO ) {
774
+ setup_fifo_xfer (xfer , mas , slv -> mode , spi );
775
+ return 1 ;
776
+ }
777
+ return setup_gsi_xfer (xfer , mas , slv , spi );
574
778
}
575
779
576
780
static irqreturn_t geni_spi_isr (int irq , void * data )
@@ -665,6 +869,13 @@ static int spi_geni_probe(struct platform_device *pdev)
665
869
if (irq < 0 )
666
870
return irq ;
667
871
872
+ ret = dma_set_mask_and_coherent (dev , DMA_BIT_MASK (64 ));
873
+ if (ret ) {
874
+ ret = dma_set_mask_and_coherent (dev , DMA_BIT_MASK (32 ));
875
+ if (ret )
876
+ return dev_err_probe (dev , ret , "could not set DMA mask\n" );
877
+ }
878
+
668
879
base = devm_platform_ioremap_resource (pdev , 0 );
669
880
if (IS_ERR (base ))
670
881
return PTR_ERR (base );
@@ -704,9 +915,10 @@ static int spi_geni_probe(struct platform_device *pdev)
704
915
spi -> max_speed_hz = 50000000 ;
705
916
spi -> prepare_message = spi_geni_prepare_message ;
706
917
spi -> transfer_one = spi_geni_transfer_one ;
918
+ spi -> can_dma = geni_can_dma ;
919
+ spi -> dma_map_dev = dev -> parent ;
707
920
spi -> auto_runtime_pm = true;
708
921
spi -> handle_err = handle_fifo_timeout ;
709
- spi -> set_cs = spi_geni_set_cs ;
710
922
spi -> use_gpio_descriptors = true;
711
923
712
924
init_completion (& mas -> cs_done );
@@ -732,9 +944,17 @@ static int spi_geni_probe(struct platform_device *pdev)
732
944
if (ret )
733
945
goto spi_geni_probe_runtime_disable ;
734
946
947
+ /*
948
+ * check the mode supported and set_cs for fifo mode only
949
+ * for dma (gsi) mode, the gsi will set cs based on params passed in
950
+ * TRE
951
+ */
952
+ if (mas -> cur_xfer_mode == GENI_SE_FIFO )
953
+ spi -> set_cs = spi_geni_set_cs ;
954
+
735
955
ret = request_irq (mas -> irq , geni_spi_isr , 0 , dev_name (dev ), spi );
736
956
if (ret )
737
- goto spi_geni_probe_runtime_disable ;
957
+ goto spi_geni_release_dma ;
738
958
739
959
ret = spi_register_master (spi );
740
960
if (ret )
@@ -743,6 +963,8 @@ static int spi_geni_probe(struct platform_device *pdev)
743
963
return 0 ;
744
964
spi_geni_probe_free_irq :
745
965
free_irq (mas -> irq , spi );
966
+ spi_geni_release_dma :
967
+ spi_geni_release_dma_chan (mas );
746
968
spi_geni_probe_runtime_disable :
747
969
pm_runtime_disable (dev );
748
970
return ret ;
@@ -756,6 +978,8 @@ static int spi_geni_remove(struct platform_device *pdev)
756
978
/* Unregister _before_ disabling pm_runtime() so we stop transfers */
757
979
spi_unregister_master (spi );
758
980
981
+ spi_geni_release_dma_chan (mas );
982
+
759
983
free_irq (mas -> irq , spi );
760
984
pm_runtime_disable (& pdev -> dev );
761
985
return 0 ;
0 commit comments