@@ -256,6 +256,8 @@ static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first)
256
256
ATC_SPIP_BOUNDARY (first -> boundary ));
257
257
channel_writel (atchan , DPIP , ATC_DPIP_HOLE (first -> dst_hole ) |
258
258
ATC_DPIP_BOUNDARY (first -> boundary ));
259
+ /* Don't allow CPU to reorder channel enable. */
260
+ wmb ();
259
261
dma_writel (atdma , CHER , atchan -> mask );
260
262
261
263
vdbg_dump_regs (atchan );
@@ -316,7 +318,8 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie)
316
318
struct at_desc * desc_first = atc_first_active (atchan );
317
319
struct at_desc * desc ;
318
320
int ret ;
319
- u32 ctrla , dscr , trials ;
321
+ u32 ctrla , dscr ;
322
+ unsigned int i ;
320
323
321
324
/*
322
325
* If the cookie doesn't match to the currently running transfer then
@@ -386,7 +389,7 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie)
386
389
dscr = channel_readl (atchan , DSCR );
387
390
rmb (); /* ensure DSCR is read before CTRLA */
388
391
ctrla = channel_readl (atchan , CTRLA );
389
- for (trials = 0 ; trials < ATC_MAX_DSCR_TRIALS ; ++ trials ) {
392
+ for (i = 0 ; i < ATC_MAX_DSCR_TRIALS ; ++ i ) {
390
393
u32 new_dscr ;
391
394
392
395
rmb (); /* ensure DSCR is read after CTRLA */
@@ -412,7 +415,7 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie)
412
415
rmb (); /* ensure DSCR is read before CTRLA */
413
416
ctrla = channel_readl (atchan , CTRLA );
414
417
}
415
- if (unlikely (trials > = ATC_MAX_DSCR_TRIALS ))
418
+ if (unlikely (i = = ATC_MAX_DSCR_TRIALS ))
416
419
return - ETIMEDOUT ;
417
420
418
421
/* for the first descriptor we can be more accurate */
@@ -462,18 +465,6 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
462
465
if (!atc_chan_is_cyclic (atchan ))
463
466
dma_cookie_complete (txd );
464
467
465
- /* If the transfer was a memset, free our temporary buffer */
466
- if (desc -> memset_buffer ) {
467
- dma_pool_free (atdma -> memset_pool , desc -> memset_vaddr ,
468
- desc -> memset_paddr );
469
- desc -> memset_buffer = false;
470
- }
471
-
472
- /* move children to free_list */
473
- list_splice_init (& desc -> tx_list , & atchan -> free_list );
474
- /* move myself to free_list */
475
- list_move (& desc -> desc_node , & atchan -> free_list );
476
-
477
468
spin_unlock_irqrestore (& atchan -> lock , flags );
478
469
479
470
dma_descriptor_unmap (txd );
@@ -483,42 +474,20 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
483
474
dmaengine_desc_get_callback_invoke (txd , NULL );
484
475
485
476
dma_run_dependencies (txd );
486
- }
487
-
488
- /**
489
- * atc_complete_all - finish work for all transactions
490
- * @atchan: channel to complete transactions for
491
- *
492
- * Eventually submit queued descriptors if any
493
- *
494
- * Assume channel is idle while calling this function
495
- * Called with atchan->lock held and bh disabled
496
- */
497
- static void atc_complete_all (struct at_dma_chan * atchan )
498
- {
499
- struct at_desc * desc , * _desc ;
500
- LIST_HEAD (list );
501
- unsigned long flags ;
502
-
503
- dev_vdbg (chan2dev (& atchan -> chan_common ), "complete all\n" );
504
477
505
478
spin_lock_irqsave (& atchan -> lock , flags );
506
-
507
- /*
508
- * Submit queued descriptors ASAP, i.e. before we go through
509
- * the completed ones.
510
- */
511
- if (!list_empty (& atchan -> queue ))
512
- atc_dostart (atchan , atc_first_queued (atchan ));
513
- /* empty active_list now it is completed */
514
- list_splice_init (& atchan -> active_list , & list );
515
- /* empty queue list by moving descriptors (if any) to active_list */
516
- list_splice_init (& atchan -> queue , & atchan -> active_list );
517
-
479
+ /* move children to free_list */
480
+ list_splice_init (& desc -> tx_list , & atchan -> free_list );
481
+ /* add myself to free_list */
482
+ list_add (& desc -> desc_node , & atchan -> free_list );
518
483
spin_unlock_irqrestore (& atchan -> lock , flags );
519
484
520
- list_for_each_entry_safe (desc , _desc , & list , desc_node )
521
- atc_chain_complete (atchan , desc );
485
+ /* If the transfer was a memset, free our temporary buffer */
486
+ if (desc -> memset_buffer ) {
487
+ dma_pool_free (atdma -> memset_pool , desc -> memset_vaddr ,
488
+ desc -> memset_paddr );
489
+ desc -> memset_buffer = false;
490
+ }
522
491
}
523
492
524
493
/**
@@ -527,26 +496,28 @@ static void atc_complete_all(struct at_dma_chan *atchan)
527
496
*/
528
497
static void atc_advance_work (struct at_dma_chan * atchan )
529
498
{
499
+ struct at_desc * desc ;
530
500
unsigned long flags ;
531
- int ret ;
532
501
533
502
dev_vdbg (chan2dev (& atchan -> chan_common ), "advance_work\n" );
534
503
535
504
spin_lock_irqsave (& atchan -> lock , flags );
536
- ret = atc_chan_is_enabled (atchan );
537
- spin_unlock_irqrestore (& atchan -> lock , flags );
538
- if (ret )
539
- return ;
540
-
541
- if (list_empty (& atchan -> active_list ) ||
542
- list_is_singular (& atchan -> active_list ))
543
- return atc_complete_all (atchan );
505
+ if (atc_chan_is_enabled (atchan ) || list_empty (& atchan -> active_list ))
506
+ return spin_unlock_irqrestore (& atchan -> lock , flags );
544
507
545
- atc_chain_complete (atchan , atc_first_active (atchan ));
508
+ desc = atc_first_active (atchan );
509
+ /* Remove the transfer node from the active list. */
510
+ list_del_init (& desc -> desc_node );
511
+ spin_unlock_irqrestore (& atchan -> lock , flags );
512
+ atc_chain_complete (atchan , desc );
546
513
547
514
/* advance work */
548
515
spin_lock_irqsave (& atchan -> lock , flags );
549
- atc_dostart (atchan , atc_first_active (atchan ));
516
+ if (!list_empty (& atchan -> active_list )) {
517
+ desc = atc_first_queued (atchan );
518
+ list_move_tail (& desc -> desc_node , & atchan -> active_list );
519
+ atc_dostart (atchan , desc );
520
+ }
550
521
spin_unlock_irqrestore (& atchan -> lock , flags );
551
522
}
552
523
@@ -558,6 +529,7 @@ static void atc_advance_work(struct at_dma_chan *atchan)
558
529
static void atc_handle_error (struct at_dma_chan * atchan )
559
530
{
560
531
struct at_desc * bad_desc ;
532
+ struct at_desc * desc ;
561
533
struct at_desc * child ;
562
534
unsigned long flags ;
563
535
@@ -570,13 +542,12 @@ static void atc_handle_error(struct at_dma_chan *atchan)
570
542
bad_desc = atc_first_active (atchan );
571
543
list_del_init (& bad_desc -> desc_node );
572
544
573
- /* As we are stopped, take advantage to push queued descriptors
574
- * in active_list */
575
- list_splice_init (& atchan -> queue , atchan -> active_list .prev );
576
-
577
545
/* Try to restart the controller */
578
- if (!list_empty (& atchan -> active_list ))
579
- atc_dostart (atchan , atc_first_active (atchan ));
546
+ if (!list_empty (& atchan -> active_list )) {
547
+ desc = atc_first_queued (atchan );
548
+ list_move_tail (& desc -> desc_node , & atchan -> active_list );
549
+ atc_dostart (atchan , desc );
550
+ }
580
551
581
552
/*
582
553
* KERN_CRITICAL may seem harsh, but since this only happens
@@ -691,19 +662,11 @@ static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx)
691
662
spin_lock_irqsave (& atchan -> lock , flags );
692
663
cookie = dma_cookie_assign (tx );
693
664
694
- if (list_empty (& atchan -> active_list )) {
695
- dev_vdbg (chan2dev (tx -> chan ), "tx_submit: started %u\n" ,
696
- desc -> txd .cookie );
697
- atc_dostart (atchan , desc );
698
- list_add_tail (& desc -> desc_node , & atchan -> active_list );
699
- } else {
700
- dev_vdbg (chan2dev (tx -> chan ), "tx_submit: queued %u\n" ,
701
- desc -> txd .cookie );
702
- list_add_tail (& desc -> desc_node , & atchan -> queue );
703
- }
704
-
665
+ list_add_tail (& desc -> desc_node , & atchan -> queue );
705
666
spin_unlock_irqrestore (& atchan -> lock , flags );
706
667
668
+ dev_vdbg (chan2dev (tx -> chan ), "tx_submit: queued %u\n" ,
669
+ desc -> txd .cookie );
707
670
return cookie ;
708
671
}
709
672
@@ -1445,11 +1408,8 @@ static int atc_terminate_all(struct dma_chan *chan)
1445
1408
struct at_dma_chan * atchan = to_at_dma_chan (chan );
1446
1409
struct at_dma * atdma = to_at_dma (chan -> device );
1447
1410
int chan_id = atchan -> chan_common .chan_id ;
1448
- struct at_desc * desc , * _desc ;
1449
1411
unsigned long flags ;
1450
1412
1451
- LIST_HEAD (list );
1452
-
1453
1413
dev_vdbg (chan2dev (chan ), "%s\n" , __func__ );
1454
1414
1455
1415
/*
@@ -1468,19 +1428,15 @@ static int atc_terminate_all(struct dma_chan *chan)
1468
1428
cpu_relax ();
1469
1429
1470
1430
/* active_list entries will end up before queued entries */
1471
- list_splice_init (& atchan -> queue , & list );
1472
- list_splice_init (& atchan -> active_list , & list );
1473
-
1474
- spin_unlock_irqrestore (& atchan -> lock , flags );
1475
-
1476
- /* Flush all pending and queued descriptors */
1477
- list_for_each_entry_safe (desc , _desc , & list , desc_node )
1478
- atc_chain_complete (atchan , desc );
1431
+ list_splice_tail_init (& atchan -> queue , & atchan -> free_list );
1432
+ list_splice_tail_init (& atchan -> active_list , & atchan -> free_list );
1479
1433
1480
1434
clear_bit (ATC_IS_PAUSED , & atchan -> status );
1481
1435
/* if channel dedicated to cyclic operations, free it */
1482
1436
clear_bit (ATC_IS_CYCLIC , & atchan -> status );
1483
1437
1438
+ spin_unlock_irqrestore (& atchan -> lock , flags );
1439
+
1484
1440
return 0 ;
1485
1441
}
1486
1442
@@ -1535,20 +1491,26 @@ atc_tx_status(struct dma_chan *chan,
1535
1491
}
1536
1492
1537
1493
/**
1538
- * atc_issue_pending - try to finish work
1494
+ * atc_issue_pending - takes the first transaction descriptor in the pending
1495
+ * queue and starts the transfer.
1539
1496
* @chan: target DMA channel
1540
1497
*/
1541
1498
static void atc_issue_pending (struct dma_chan * chan )
1542
1499
{
1543
- struct at_dma_chan * atchan = to_at_dma_chan (chan );
1500
+ struct at_dma_chan * atchan = to_at_dma_chan (chan );
1501
+ struct at_desc * desc ;
1502
+ unsigned long flags ;
1544
1503
1545
1504
dev_vdbg (chan2dev (chan ), "issue_pending\n" );
1546
1505
1547
- /* Not needed for cyclic transfers */
1548
- if (atc_chan_is_cyclic (atchan ))
1549
- return ;
1506
+ spin_lock_irqsave ( & atchan -> lock , flags );
1507
+ if (atc_chan_is_enabled (atchan ) || list_empty ( & atchan -> queue ))
1508
+ return spin_unlock_irqrestore ( & atchan -> lock , flags ) ;
1550
1509
1551
- atc_advance_work (atchan );
1510
+ desc = atc_first_queued (atchan );
1511
+ list_move_tail (& desc -> desc_node , & atchan -> active_list );
1512
+ atc_dostart (atchan , desc );
1513
+ spin_unlock_irqrestore (& atchan -> lock , flags );
1552
1514
}
1553
1515
1554
1516
/**
@@ -1966,7 +1928,11 @@ static int __init at_dma_probe(struct platform_device *pdev)
1966
1928
dma_has_cap (DMA_SLAVE , atdma -> dma_common .cap_mask ) ? "slave " : "" ,
1967
1929
plat_dat -> nr_channels );
1968
1930
1969
- dma_async_device_register (& atdma -> dma_common );
1931
+ err = dma_async_device_register (& atdma -> dma_common );
1932
+ if (err ) {
1933
+ dev_err (& pdev -> dev , "Unable to register: %d.\n" , err );
1934
+ goto err_dma_async_device_register ;
1935
+ }
1970
1936
1971
1937
/*
1972
1938
* Do not return an error if the dmac node is not present in order to
@@ -1986,6 +1952,7 @@ static int __init at_dma_probe(struct platform_device *pdev)
1986
1952
1987
1953
err_of_dma_controller_register :
1988
1954
dma_async_device_unregister (& atdma -> dma_common );
1955
+ err_dma_async_device_register :
1989
1956
dma_pool_destroy (atdma -> memset_pool );
1990
1957
err_memset_pool_create :
1991
1958
dma_pool_destroy (atdma -> dma_desc_pool );
0 commit comments