Skip to content

Commit 3e184e6

Browse files
Jan Kuligavinodkoul
authored andcommitted
dmaengine: xilinx: xdma: Prepare the introduction of interleaved DMA transfers
Make generic code generic. As descriptor-filling logic stays the same regardless of a dmaengine's type of transfer, it is possible to write the descriptor-filling function in a generic way, so that it can be used for every single type of transfer preparation callback. Signed-off-by: Jan Kuliga <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Vinod Koul <[email protected]>
1 parent fd0e1d8 commit 3e184e6

File tree

1 file changed

+57
-44
lines changed

1 file changed

+57
-44
lines changed

drivers/dma/xilinx/xdma.c

Lines changed: 57 additions & 44 deletions
Original file line numberDiff line numberDiff line change
@@ -542,6 +542,43 @@ static void xdma_synchronize(struct dma_chan *chan)
542542
vchan_synchronize(&xdma_chan->vchan);
543543
}
544544

545+
/**
546+
* xdma_fill_descs - Fill hardware descriptors with contiguous memory block addresses
547+
* @sw_desc - tx descriptor state container
548+
* @src_addr - Value for a ->src_addr field of a first descriptor
549+
* @dst_addr - Value for a ->dst_addr field of a first descriptor
550+
* @size - Total size of a contiguous memory block
551+
* @filled_descs_num - Number of filled hardware descriptors for corresponding sw_desc
552+
*/
553+
static inline u32 xdma_fill_descs(struct xdma_desc *sw_desc, u64 src_addr,
554+
u64 dst_addr, u32 size, u32 filled_descs_num)
555+
{
556+
u32 left = size, len, desc_num = filled_descs_num;
557+
struct xdma_desc_block *dblk;
558+
struct xdma_hw_desc *desc;
559+
560+
dblk = sw_desc->desc_blocks + (desc_num / XDMA_DESC_ADJACENT);
561+
desc = dblk->virt_addr;
562+
desc += desc_num & XDMA_DESC_ADJACENT_MASK;
563+
do {
564+
len = min_t(u32, left, XDMA_DESC_BLEN_MAX);
565+
/* set hardware descriptor */
566+
desc->bytes = cpu_to_le32(len);
567+
desc->src_addr = cpu_to_le64(src_addr);
568+
desc->dst_addr = cpu_to_le64(dst_addr);
569+
if (!(++desc_num & XDMA_DESC_ADJACENT_MASK))
570+
desc = (++dblk)->virt_addr;
571+
else
572+
desc++;
573+
574+
src_addr += len;
575+
dst_addr += len;
576+
left -= len;
577+
} while (left);
578+
579+
return desc_num - filled_descs_num;
580+
}
581+
545582
/**
546583
* xdma_prep_device_sg - prepare a descriptor for a DMA transaction
547584
* @chan: DMA channel pointer
@@ -558,13 +595,10 @@ xdma_prep_device_sg(struct dma_chan *chan, struct scatterlist *sgl,
558595
{
559596
struct xdma_chan *xdma_chan = to_xdma_chan(chan);
560597
struct dma_async_tx_descriptor *tx_desc;
561-
u32 desc_num = 0, i, len, rest;
562-
struct xdma_desc_block *dblk;
563-
struct xdma_hw_desc *desc;
564598
struct xdma_desc *sw_desc;
565-
u64 dev_addr, *src, *dst;
599+
u32 desc_num = 0, i;
600+
u64 addr, dev_addr, *src, *dst;
566601
struct scatterlist *sg;
567-
u64 addr;
568602

569603
for_each_sg(sgl, sg, sg_len, i)
570604
desc_num += DIV_ROUND_UP(sg_dma_len(sg), XDMA_DESC_BLEN_MAX);
@@ -584,32 +618,11 @@ xdma_prep_device_sg(struct dma_chan *chan, struct scatterlist *sgl,
584618
dst = &addr;
585619
}
586620

587-
dblk = sw_desc->desc_blocks;
588-
desc = dblk->virt_addr;
589-
desc_num = 1;
621+
desc_num = 0;
590622
for_each_sg(sgl, sg, sg_len, i) {
591623
addr = sg_dma_address(sg);
592-
rest = sg_dma_len(sg);
593-
594-
do {
595-
len = min_t(u32, rest, XDMA_DESC_BLEN_MAX);
596-
/* set hardware descriptor */
597-
desc->bytes = cpu_to_le32(len);
598-
desc->src_addr = cpu_to_le64(*src);
599-
desc->dst_addr = cpu_to_le64(*dst);
600-
601-
if (!(desc_num & XDMA_DESC_ADJACENT_MASK)) {
602-
dblk++;
603-
desc = dblk->virt_addr;
604-
} else {
605-
desc++;
606-
}
607-
608-
desc_num++;
609-
dev_addr += len;
610-
addr += len;
611-
rest -= len;
612-
} while (rest);
624+
desc_num += xdma_fill_descs(sw_desc, *src, *dst, sg_dma_len(sg), desc_num);
625+
dev_addr += sg_dma_len(sg);
613626
}
614627

615628
tx_desc = vchan_tx_prep(&xdma_chan->vchan, &sw_desc->vdesc, flags);
@@ -643,9 +656,9 @@ xdma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t address,
643656
struct xdma_device *xdev = xdma_chan->xdev_hdl;
644657
unsigned int periods = size / period_size;
645658
struct dma_async_tx_descriptor *tx_desc;
646-
struct xdma_desc_block *dblk;
647-
struct xdma_hw_desc *desc;
648659
struct xdma_desc *sw_desc;
660+
u64 addr, dev_addr, *src, *dst;
661+
u32 desc_num;
649662
unsigned int i;
650663

651664
/*
@@ -670,21 +683,21 @@ xdma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t address,
670683
sw_desc->period_size = period_size;
671684
sw_desc->dir = dir;
672685

673-
dblk = sw_desc->desc_blocks;
674-
desc = dblk->virt_addr;
686+
addr = address;
687+
if (dir == DMA_MEM_TO_DEV) {
688+
dev_addr = xdma_chan->cfg.dst_addr;
689+
src = &addr;
690+
dst = &dev_addr;
691+
} else {
692+
dev_addr = xdma_chan->cfg.src_addr;
693+
src = &dev_addr;
694+
dst = &addr;
695+
}
675696

676-
/* fill hardware descriptor */
697+
desc_num = 0;
677698
for (i = 0; i < periods; i++) {
678-
desc->bytes = cpu_to_le32(period_size);
679-
if (dir == DMA_MEM_TO_DEV) {
680-
desc->src_addr = cpu_to_le64(address + i * period_size);
681-
desc->dst_addr = cpu_to_le64(xdma_chan->cfg.dst_addr);
682-
} else {
683-
desc->src_addr = cpu_to_le64(xdma_chan->cfg.src_addr);
684-
desc->dst_addr = cpu_to_le64(address + i * period_size);
685-
}
686-
687-
desc++;
699+
desc_num += xdma_fill_descs(sw_desc, *src, *dst, period_size, desc_num);
700+
addr += i * period_size;
688701
}
689702

690703
tx_desc = vchan_tx_prep(&xdma_chan->vchan, &sw_desc->vdesc, flags);

0 commit comments

Comments
 (0)