Skip to content

Commit 51c42ae

Browse files
rohitvisavaliavinodkoul
authored andcommitted
dmaengine: xilinx: dpdma: Add support for cyclic dma mode
This patch adds support for DPDMA cyclic dma mode, DMA cyclic transfers are required by audio streaming. Signed-off-by: Rohit Visavalia <[email protected]> Signed-off-by: Radhey Shyam Pandey <[email protected]> Signed-off-by: Vishal Sagar <[email protected]> Reviewed-by: Tomi Valkeinen <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Vinod Koul <[email protected]>
1 parent 654beb7 commit 51c42ae

File tree

1 file changed

+97
-0
lines changed

1 file changed

+97
-0
lines changed

drivers/dma/xilinx/xilinx_dpdma.c

Lines changed: 97 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -670,6 +670,84 @@ static void xilinx_dpdma_chan_free_tx_desc(struct virt_dma_desc *vdesc)
670670
kfree(desc);
671671
}
672672

673+
/**
674+
* xilinx_dpdma_chan_prep_cyclic - Prepare a cyclic dma descriptor
675+
* @chan: DPDMA channel
676+
* @buf_addr: buffer address
677+
* @buf_len: buffer length
678+
* @period_len: number of periods
679+
* @flags: tx flags argument passed in to prepare function
680+
*
681+
* Prepare a tx descriptor incudling internal software/hardware descriptors
682+
* for the given cyclic transaction.
683+
*
684+
* Return: A dma async tx descriptor on success, or NULL.
685+
*/
686+
static struct dma_async_tx_descriptor *
687+
xilinx_dpdma_chan_prep_cyclic(struct xilinx_dpdma_chan *chan,
688+
dma_addr_t buf_addr, size_t buf_len,
689+
size_t period_len, unsigned long flags)
690+
{
691+
struct xilinx_dpdma_tx_desc *tx_desc;
692+
struct xilinx_dpdma_sw_desc *sw_desc, *last = NULL;
693+
unsigned int periods = buf_len / period_len;
694+
unsigned int i;
695+
696+
tx_desc = xilinx_dpdma_chan_alloc_tx_desc(chan);
697+
if (!tx_desc)
698+
return NULL;
699+
700+
for (i = 0; i < periods; i++) {
701+
struct xilinx_dpdma_hw_desc *hw_desc;
702+
703+
if (!IS_ALIGNED(buf_addr, XILINX_DPDMA_ALIGN_BYTES)) {
704+
dev_err(chan->xdev->dev,
705+
"buffer should be aligned at %d B\n",
706+
XILINX_DPDMA_ALIGN_BYTES);
707+
goto error;
708+
}
709+
710+
sw_desc = xilinx_dpdma_chan_alloc_sw_desc(chan);
711+
if (!sw_desc)
712+
goto error;
713+
714+
xilinx_dpdma_sw_desc_set_dma_addrs(chan->xdev, sw_desc, last,
715+
&buf_addr, 1);
716+
hw_desc = &sw_desc->hw;
717+
hw_desc->xfer_size = period_len;
718+
hw_desc->hsize_stride =
719+
FIELD_PREP(XILINX_DPDMA_DESC_HSIZE_STRIDE_HSIZE_MASK,
720+
period_len) |
721+
FIELD_PREP(XILINX_DPDMA_DESC_HSIZE_STRIDE_STRIDE_MASK,
722+
period_len);
723+
hw_desc->control = XILINX_DPDMA_DESC_CONTROL_PREEMBLE |
724+
XILINX_DPDMA_DESC_CONTROL_IGNORE_DONE |
725+
XILINX_DPDMA_DESC_CONTROL_COMPLETE_INTR;
726+
727+
list_add_tail(&sw_desc->node, &tx_desc->descriptors);
728+
729+
buf_addr += period_len;
730+
last = sw_desc;
731+
}
732+
733+
sw_desc = list_first_entry(&tx_desc->descriptors,
734+
struct xilinx_dpdma_sw_desc, node);
735+
last->hw.next_desc = lower_32_bits(sw_desc->dma_addr);
736+
if (chan->xdev->ext_addr)
737+
last->hw.addr_ext |=
738+
FIELD_PREP(XILINX_DPDMA_DESC_ADDR_EXT_NEXT_ADDR_MASK,
739+
upper_32_bits(sw_desc->dma_addr));
740+
741+
last->hw.control |= XILINX_DPDMA_DESC_CONTROL_LAST_OF_FRAME;
742+
743+
return vchan_tx_prep(&chan->vchan, &tx_desc->vdesc, flags);
744+
745+
error:
746+
xilinx_dpdma_chan_free_tx_desc(&tx_desc->vdesc);
747+
748+
return NULL;
749+
}
750+
673751
/**
674752
* xilinx_dpdma_chan_prep_interleaved_dma - Prepare an interleaved dma
675753
* descriptor
@@ -1189,6 +1267,23 @@ static void xilinx_dpdma_chan_handle_err(struct xilinx_dpdma_chan *chan)
11891267
/* -----------------------------------------------------------------------------
11901268
* DMA Engine Operations
11911269
*/
1270+
static struct dma_async_tx_descriptor *
1271+
xilinx_dpdma_prep_dma_cyclic(struct dma_chan *dchan, dma_addr_t buf_addr,
1272+
size_t buf_len, size_t period_len,
1273+
enum dma_transfer_direction direction,
1274+
unsigned long flags)
1275+
{
1276+
struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
1277+
1278+
if (direction != DMA_MEM_TO_DEV)
1279+
return NULL;
1280+
1281+
if (buf_len % period_len)
1282+
return NULL;
1283+
1284+
return xilinx_dpdma_chan_prep_cyclic(chan, buf_addr, buf_len,
1285+
period_len, flags);
1286+
}
11921287

11931288
static struct dma_async_tx_descriptor *
11941289
xilinx_dpdma_prep_interleaved_dma(struct dma_chan *dchan,
@@ -1672,13 +1767,15 @@ static int xilinx_dpdma_probe(struct platform_device *pdev)
16721767

16731768
dma_cap_set(DMA_SLAVE, ddev->cap_mask);
16741769
dma_cap_set(DMA_PRIVATE, ddev->cap_mask);
1770+
dma_cap_set(DMA_CYCLIC, ddev->cap_mask);
16751771
dma_cap_set(DMA_INTERLEAVE, ddev->cap_mask);
16761772
dma_cap_set(DMA_REPEAT, ddev->cap_mask);
16771773
dma_cap_set(DMA_LOAD_EOT, ddev->cap_mask);
16781774
ddev->copy_align = fls(XILINX_DPDMA_ALIGN_BYTES - 1);
16791775

16801776
ddev->device_alloc_chan_resources = xilinx_dpdma_alloc_chan_resources;
16811777
ddev->device_free_chan_resources = xilinx_dpdma_free_chan_resources;
1778+
ddev->device_prep_dma_cyclic = xilinx_dpdma_prep_dma_cyclic;
16821779
ddev->device_prep_interleaved_dma = xilinx_dpdma_prep_interleaved_dma;
16831780
/* TODO: Can we achieve better granularity ? */
16841781
ddev->device_tx_status = dma_cookie_status;

0 commit comments

Comments
 (0)