Skip to content

Commit 3f134c9

Browse files
committed
Merge branch 'fixes' into next
Merge due to at_hdmac driver dependency
2 parents 739153a + c47e640 commit 3f134c9

File tree

14 files changed

+150
-121
lines changed

14 files changed

+150
-121
lines changed

drivers/dma/apple-admac.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -585,7 +585,7 @@ static struct dma_chan *admac_dma_of_xlate(struct of_phandle_args *dma_spec,
585585
return NULL;
586586
}
587587

588-
return &ad->channels[index].chan;
588+
return dma_get_slave_channel(&ad->channels[index].chan);
589589
}
590590

591591
static int admac_drain_reports(struct admac_data *ad, int channo)

drivers/dma/at_hdmac.c

Lines changed: 60 additions & 93 deletions
Original file line numberDiff line numberDiff line change
@@ -256,6 +256,8 @@ static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first)
256256
ATC_SPIP_BOUNDARY(first->boundary));
257257
channel_writel(atchan, DPIP, ATC_DPIP_HOLE(first->dst_hole) |
258258
ATC_DPIP_BOUNDARY(first->boundary));
259+
/* Don't allow CPU to reorder channel enable. */
260+
wmb();
259261
dma_writel(atdma, CHER, atchan->mask);
260262

261263
vdbg_dump_regs(atchan);
@@ -316,7 +318,8 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie)
316318
struct at_desc *desc_first = atc_first_active(atchan);
317319
struct at_desc *desc;
318320
int ret;
319-
u32 ctrla, dscr, trials;
321+
u32 ctrla, dscr;
322+
unsigned int i;
320323

321324
/*
322325
* If the cookie doesn't match to the currently running transfer then
@@ -386,7 +389,7 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie)
386389
dscr = channel_readl(atchan, DSCR);
387390
rmb(); /* ensure DSCR is read before CTRLA */
388391
ctrla = channel_readl(atchan, CTRLA);
389-
for (trials = 0; trials < ATC_MAX_DSCR_TRIALS; ++trials) {
392+
for (i = 0; i < ATC_MAX_DSCR_TRIALS; ++i) {
390393
u32 new_dscr;
391394

392395
rmb(); /* ensure DSCR is read after CTRLA */
@@ -412,7 +415,7 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie)
412415
rmb(); /* ensure DSCR is read before CTRLA */
413416
ctrla = channel_readl(atchan, CTRLA);
414417
}
415-
if (unlikely(trials >= ATC_MAX_DSCR_TRIALS))
418+
if (unlikely(i == ATC_MAX_DSCR_TRIALS))
416419
return -ETIMEDOUT;
417420

418421
/* for the first descriptor we can be more accurate */
@@ -462,18 +465,6 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
462465
if (!atc_chan_is_cyclic(atchan))
463466
dma_cookie_complete(txd);
464467

465-
/* If the transfer was a memset, free our temporary buffer */
466-
if (desc->memset_buffer) {
467-
dma_pool_free(atdma->memset_pool, desc->memset_vaddr,
468-
desc->memset_paddr);
469-
desc->memset_buffer = false;
470-
}
471-
472-
/* move children to free_list */
473-
list_splice_init(&desc->tx_list, &atchan->free_list);
474-
/* move myself to free_list */
475-
list_move(&desc->desc_node, &atchan->free_list);
476-
477468
spin_unlock_irqrestore(&atchan->lock, flags);
478469

479470
dma_descriptor_unmap(txd);
@@ -483,42 +474,20 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
483474
dmaengine_desc_get_callback_invoke(txd, NULL);
484475

485476
dma_run_dependencies(txd);
486-
}
487-
488-
/**
489-
* atc_complete_all - finish work for all transactions
490-
* @atchan: channel to complete transactions for
491-
*
492-
* Eventually submit queued descriptors if any
493-
*
494-
* Assume channel is idle while calling this function
495-
* Called with atchan->lock held and bh disabled
496-
*/
497-
static void atc_complete_all(struct at_dma_chan *atchan)
498-
{
499-
struct at_desc *desc, *_desc;
500-
LIST_HEAD(list);
501-
unsigned long flags;
502-
503-
dev_vdbg(chan2dev(&atchan->chan_common), "complete all\n");
504477

505478
spin_lock_irqsave(&atchan->lock, flags);
506-
507-
/*
508-
* Submit queued descriptors ASAP, i.e. before we go through
509-
* the completed ones.
510-
*/
511-
if (!list_empty(&atchan->queue))
512-
atc_dostart(atchan, atc_first_queued(atchan));
513-
/* empty active_list now it is completed */
514-
list_splice_init(&atchan->active_list, &list);
515-
/* empty queue list by moving descriptors (if any) to active_list */
516-
list_splice_init(&atchan->queue, &atchan->active_list);
517-
479+
/* move children to free_list */
480+
list_splice_init(&desc->tx_list, &atchan->free_list);
481+
/* add myself to free_list */
482+
list_add(&desc->desc_node, &atchan->free_list);
518483
spin_unlock_irqrestore(&atchan->lock, flags);
519484

520-
list_for_each_entry_safe(desc, _desc, &list, desc_node)
521-
atc_chain_complete(atchan, desc);
485+
/* If the transfer was a memset, free our temporary buffer */
486+
if (desc->memset_buffer) {
487+
dma_pool_free(atdma->memset_pool, desc->memset_vaddr,
488+
desc->memset_paddr);
489+
desc->memset_buffer = false;
490+
}
522491
}
523492

524493
/**
@@ -527,26 +496,28 @@ static void atc_complete_all(struct at_dma_chan *atchan)
527496
*/
528497
static void atc_advance_work(struct at_dma_chan *atchan)
529498
{
499+
struct at_desc *desc;
530500
unsigned long flags;
531-
int ret;
532501

533502
dev_vdbg(chan2dev(&atchan->chan_common), "advance_work\n");
534503

535504
spin_lock_irqsave(&atchan->lock, flags);
536-
ret = atc_chan_is_enabled(atchan);
537-
spin_unlock_irqrestore(&atchan->lock, flags);
538-
if (ret)
539-
return;
540-
541-
if (list_empty(&atchan->active_list) ||
542-
list_is_singular(&atchan->active_list))
543-
return atc_complete_all(atchan);
505+
if (atc_chan_is_enabled(atchan) || list_empty(&atchan->active_list))
506+
return spin_unlock_irqrestore(&atchan->lock, flags);
544507

545-
atc_chain_complete(atchan, atc_first_active(atchan));
508+
desc = atc_first_active(atchan);
509+
/* Remove the transfer node from the active list. */
510+
list_del_init(&desc->desc_node);
511+
spin_unlock_irqrestore(&atchan->lock, flags);
512+
atc_chain_complete(atchan, desc);
546513

547514
/* advance work */
548515
spin_lock_irqsave(&atchan->lock, flags);
549-
atc_dostart(atchan, atc_first_active(atchan));
516+
if (!list_empty(&atchan->active_list)) {
517+
desc = atc_first_queued(atchan);
518+
list_move_tail(&desc->desc_node, &atchan->active_list);
519+
atc_dostart(atchan, desc);
520+
}
550521
spin_unlock_irqrestore(&atchan->lock, flags);
551522
}
552523

@@ -558,6 +529,7 @@ static void atc_advance_work(struct at_dma_chan *atchan)
558529
static void atc_handle_error(struct at_dma_chan *atchan)
559530
{
560531
struct at_desc *bad_desc;
532+
struct at_desc *desc;
561533
struct at_desc *child;
562534
unsigned long flags;
563535

@@ -570,13 +542,12 @@ static void atc_handle_error(struct at_dma_chan *atchan)
570542
bad_desc = atc_first_active(atchan);
571543
list_del_init(&bad_desc->desc_node);
572544

573-
/* As we are stopped, take advantage to push queued descriptors
574-
* in active_list */
575-
list_splice_init(&atchan->queue, atchan->active_list.prev);
576-
577545
/* Try to restart the controller */
578-
if (!list_empty(&atchan->active_list))
579-
atc_dostart(atchan, atc_first_active(atchan));
546+
if (!list_empty(&atchan->active_list)) {
547+
desc = atc_first_queued(atchan);
548+
list_move_tail(&desc->desc_node, &atchan->active_list);
549+
atc_dostart(atchan, desc);
550+
}
580551

581552
/*
582553
* KERN_CRITICAL may seem harsh, but since this only happens
@@ -691,19 +662,11 @@ static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx)
691662
spin_lock_irqsave(&atchan->lock, flags);
692663
cookie = dma_cookie_assign(tx);
693664

694-
if (list_empty(&atchan->active_list)) {
695-
dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n",
696-
desc->txd.cookie);
697-
atc_dostart(atchan, desc);
698-
list_add_tail(&desc->desc_node, &atchan->active_list);
699-
} else {
700-
dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n",
701-
desc->txd.cookie);
702-
list_add_tail(&desc->desc_node, &atchan->queue);
703-
}
704-
665+
list_add_tail(&desc->desc_node, &atchan->queue);
705666
spin_unlock_irqrestore(&atchan->lock, flags);
706667

668+
dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n",
669+
desc->txd.cookie);
707670
return cookie;
708671
}
709672

@@ -1445,11 +1408,8 @@ static int atc_terminate_all(struct dma_chan *chan)
14451408
struct at_dma_chan *atchan = to_at_dma_chan(chan);
14461409
struct at_dma *atdma = to_at_dma(chan->device);
14471410
int chan_id = atchan->chan_common.chan_id;
1448-
struct at_desc *desc, *_desc;
14491411
unsigned long flags;
14501412

1451-
LIST_HEAD(list);
1452-
14531413
dev_vdbg(chan2dev(chan), "%s\n", __func__);
14541414

14551415
/*
@@ -1468,19 +1428,15 @@ static int atc_terminate_all(struct dma_chan *chan)
14681428
cpu_relax();
14691429

14701430
/* active_list entries will end up before queued entries */
1471-
list_splice_init(&atchan->queue, &list);
1472-
list_splice_init(&atchan->active_list, &list);
1473-
1474-
spin_unlock_irqrestore(&atchan->lock, flags);
1475-
1476-
/* Flush all pending and queued descriptors */
1477-
list_for_each_entry_safe(desc, _desc, &list, desc_node)
1478-
atc_chain_complete(atchan, desc);
1431+
list_splice_tail_init(&atchan->queue, &atchan->free_list);
1432+
list_splice_tail_init(&atchan->active_list, &atchan->free_list);
14791433

14801434
clear_bit(ATC_IS_PAUSED, &atchan->status);
14811435
/* if channel dedicated to cyclic operations, free it */
14821436
clear_bit(ATC_IS_CYCLIC, &atchan->status);
14831437

1438+
spin_unlock_irqrestore(&atchan->lock, flags);
1439+
14841440
return 0;
14851441
}
14861442

@@ -1535,20 +1491,26 @@ atc_tx_status(struct dma_chan *chan,
15351491
}
15361492

15371493
/**
1538-
* atc_issue_pending - try to finish work
1494+
* atc_issue_pending - takes the first transaction descriptor in the pending
1495+
* queue and starts the transfer.
15391496
* @chan: target DMA channel
15401497
*/
15411498
static void atc_issue_pending(struct dma_chan *chan)
15421499
{
1543-
struct at_dma_chan *atchan = to_at_dma_chan(chan);
1500+
struct at_dma_chan *atchan = to_at_dma_chan(chan);
1501+
struct at_desc *desc;
1502+
unsigned long flags;
15441503

15451504
dev_vdbg(chan2dev(chan), "issue_pending\n");
15461505

1547-
/* Not needed for cyclic transfers */
1548-
if (atc_chan_is_cyclic(atchan))
1549-
return;
1506+
spin_lock_irqsave(&atchan->lock, flags);
1507+
if (atc_chan_is_enabled(atchan) || list_empty(&atchan->queue))
1508+
return spin_unlock_irqrestore(&atchan->lock, flags);
15501509

1551-
atc_advance_work(atchan);
1510+
desc = atc_first_queued(atchan);
1511+
list_move_tail(&desc->desc_node, &atchan->active_list);
1512+
atc_dostart(atchan, desc);
1513+
spin_unlock_irqrestore(&atchan->lock, flags);
15521514
}
15531515

15541516
/**
@@ -1966,7 +1928,11 @@ static int __init at_dma_probe(struct platform_device *pdev)
19661928
dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "",
19671929
plat_dat->nr_channels);
19681930

1969-
dma_async_device_register(&atdma->dma_common);
1931+
err = dma_async_device_register(&atdma->dma_common);
1932+
if (err) {
1933+
dev_err(&pdev->dev, "Unable to register: %d.\n", err);
1934+
goto err_dma_async_device_register;
1935+
}
19701936

19711937
/*
19721938
* Do not return an error if the dmac node is not present in order to
@@ -1986,6 +1952,7 @@ static int __init at_dma_probe(struct platform_device *pdev)
19861952

19871953
err_of_dma_controller_register:
19881954
dma_async_device_unregister(&atdma->dma_common);
1955+
err_dma_async_device_register:
19891956
dma_pool_destroy(atdma->memset_pool);
19901957
err_memset_pool_create:
19911958
dma_pool_destroy(atdma->dma_desc_pool);

drivers/dma/at_hdmac_regs.h

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -186,13 +186,13 @@
186186
/* LLI == Linked List Item; aka DMA buffer descriptor */
187187
struct at_lli {
188188
/* values that are not changed by hardware */
189-
dma_addr_t saddr;
190-
dma_addr_t daddr;
189+
u32 saddr;
190+
u32 daddr;
191191
/* value that may get written back: */
192-
u32 ctrla;
192+
u32 ctrla;
193193
/* more values that are not changed by hardware */
194-
u32 ctrlb;
195-
dma_addr_t dscr; /* chain to next lli */
194+
u32 ctrlb;
195+
u32 dscr; /* chain to next lli */
196196
};
197197

198198
/**

drivers/dma/idxd/cdev.c

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -312,6 +312,24 @@ static int idxd_user_drv_probe(struct idxd_dev *idxd_dev)
312312
if (idxd->state != IDXD_DEV_ENABLED)
313313
return -ENXIO;
314314

315+
/*
316+
* User type WQ is enabled only when SVA is enabled for two reasons:
317+
* - If no IOMMU or IOMMU Passthrough without SVA, userspace
318+
* can directly access physical address through the WQ.
319+
* - The IDXD cdev driver does not provide any ways to pin
320+
* user pages and translate the address from user VA to IOVA or
321+
* PA without IOMMU SVA. Therefore the application has no way
322+
* to instruct the device to perform DMA function. This makes
323+
* the cdev not usable for normal application usage.
324+
*/
325+
if (!device_user_pasid_enabled(idxd)) {
326+
idxd->cmd_status = IDXD_SCMD_WQ_USER_NO_IOMMU;
327+
dev_dbg(&idxd->pdev->dev,
328+
"User type WQ cannot be enabled without SVA.\n");
329+
330+
return -EOPNOTSUPP;
331+
}
332+
315333
mutex_lock(&wq->wq_lock);
316334
wq->type = IDXD_WQT_USER;
317335
rc = drv_enable_wq(wq);

drivers/dma/idxd/device.c

Lines changed: 17 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -390,7 +390,7 @@ static void idxd_wq_disable_cleanup(struct idxd_wq *wq)
390390
clear_bit(WQ_FLAG_ATS_DISABLE, &wq->flags);
391391
memset(wq->name, 0, WQ_NAME_SIZE);
392392
wq->max_xfer_bytes = WQ_DEFAULT_MAX_XFER;
393-
wq->max_batch_size = WQ_DEFAULT_MAX_BATCH;
393+
idxd_wq_set_max_batch_size(idxd->data->type, wq, WQ_DEFAULT_MAX_BATCH);
394394
if (wq->opcap_bmap)
395395
bitmap_copy(wq->opcap_bmap, idxd->opcap_bmap, IDXD_MAX_OPCAP_BITS);
396396
}
@@ -730,13 +730,21 @@ static void idxd_device_wqs_clear_state(struct idxd_device *idxd)
730730

731731
void idxd_device_clear_state(struct idxd_device *idxd)
732732
{
733-
if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
734-
return;
733+
/* IDXD is always disabled. Other states are cleared only when IDXD is configurable. */
734+
if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) {
735+
/*
736+
* Clearing wq state is protected by wq lock.
737+
* So no need to be protected by device lock.
738+
*/
739+
idxd_device_wqs_clear_state(idxd);
740+
741+
spin_lock(&idxd->dev_lock);
742+
idxd_groups_clear_state(idxd);
743+
idxd_engines_clear_state(idxd);
744+
} else {
745+
spin_lock(&idxd->dev_lock);
746+
}
735747

736-
idxd_device_wqs_clear_state(idxd);
737-
spin_lock(&idxd->dev_lock);
738-
idxd_groups_clear_state(idxd);
739-
idxd_engines_clear_state(idxd);
740748
idxd->state = IDXD_DEV_DISABLED;
741749
spin_unlock(&idxd->dev_lock);
742750
}
@@ -869,7 +877,7 @@ static int idxd_wq_config_write(struct idxd_wq *wq)
869877

870878
/* bytes 12-15 */
871879
wq->wqcfg->max_xfer_shift = ilog2(wq->max_xfer_bytes);
872-
wq->wqcfg->max_batch_shift = ilog2(wq->max_batch_size);
880+
idxd_wqcfg_set_max_batch_shift(idxd->data->type, wq->wqcfg, ilog2(wq->max_batch_size));
873881

874882
/* bytes 32-63 */
875883
if (idxd->hw.wq_cap.op_config && wq->opcap_bmap) {
@@ -1051,7 +1059,7 @@ static int idxd_wq_load_config(struct idxd_wq *wq)
10511059
wq->priority = wq->wqcfg->priority;
10521060

10531061
wq->max_xfer_bytes = 1ULL << wq->wqcfg->max_xfer_shift;
1054-
wq->max_batch_size = 1ULL << wq->wqcfg->max_batch_shift;
1062+
idxd_wq_set_max_batch_size(idxd->data->type, wq, 1U << wq->wqcfg->max_batch_shift);
10551063

10561064
for (i = 0; i < WQCFG_STRIDES(idxd); i++) {
10571065
wqcfg_offset = WQCFG_OFFSET(idxd, wq->id, i);

0 commit comments

Comments
 (0)