Skip to content

Commit 51f3422

Browse files
talih0kartben
authored andcommitted
drivers: dma: dma_xmc4xxx: Add multi-block support
Adds dma multi-block support xmc4xxx SoCs. Signed-off-by: Andriy Gelman <[email protected]>
1 parent 57d77ac commit 51f3422

File tree

2 files changed

+170
-33
lines changed

2 files changed

+170
-33
lines changed

drivers/dma/Kconfig.xmc4xxx

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,3 +9,11 @@ config DMA_XMC4XXX
99
depends on DT_HAS_INFINEON_XMC4XXX_DMA_ENABLED
1010
help
1111
DMA driver for Infineon xmc4xxx series MCUs.
12+
13+
config DMA_XMC4XXX_NUM_DESCRIPTORS
14+
int "Max DMA descriptors in a linked list"
15+
default 4
16+
depends on DMA_XMC4XXX
17+
help
18+
Maximum number of blocks in a DMA block transfer configuration.
19+
Only supported by dma0 channels 0 and 1.

drivers/dma/dma_xmc4xxx.c

Lines changed: 162 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,12 @@ LOG_MODULE_REGISTER(dma_xmc4xxx, CONFIG_DMA_LOG_LEVEL);
2424
#define DLR_SRSEL_RS_BITSIZE 4
2525
#define DLR_SRSEL_RS_MSK 0xf
2626

27+
#define MULTI_BLOCK_NUM_CHANNELS 2
28+
29+
#define XMC_DMA_CTLL_MEMORY_TO_MEMORY 0
30+
#define XMC_DMA_CTLL_MEMORY_TO_PERIPHERAL 1
31+
#define XMC_DMA_CTLL_PERIPHERAL_TO_MEMORY 2
32+
2733
#define ALL_EVENTS \
2834
(XMC_DMA_CH_EVENT_TRANSFER_COMPLETE | XMC_DMA_CH_EVENT_BLOCK_TRANSFER_COMPLETE | \
2935
XMC_DMA_CH_EVENT_SRC_TRANSACTION_COMPLETE | XMC_DMA_CH_EVENT_DST_TRANSACTION_COMPLETE | \
@@ -38,8 +44,27 @@ struct dma_xmc4xxx_channel {
3844
uint8_t dlr_line;
3945
uint8_t channel_direction;
4046
uint8_t dest_addr_adj;
47+
bool multi_block;
48+
};
49+
50+
struct dma_xmc4xxx_descriptor {
51+
uint32_t sar; /* source address */
52+
uint32_t dar; /* destination address */
53+
uint32_t llp; /* linked-list pointer to the next descriptor or null if last descriptor */
54+
uint32_t ctll; /* control register low */
55+
uint32_t ctlh; /* control register high */
56+
uint32_t dstat; /* status register fetched from address DSTATAR after block completes*/
57+
} __packed;
58+
59+
struct dma_xmc4xxx_scatter_gather {
60+
bool enabled;
61+
uint32_t interval;
62+
uint16_t count;
4163
};
4264

65+
static struct dma_xmc4xxx_descriptor descriptor_list[MULTI_BLOCK_NUM_CHANNELS]
66+
[CONFIG_DMA_XMC4XXX_NUM_DESCRIPTORS];
67+
4368
struct dma_xmc4xxx_config {
4469
XMC_DMA_t *dma;
4570
void (*irq_configure)(void);
@@ -123,13 +148,63 @@ static void dma_xmc4xxx_isr(const struct device *dev)
123148
}
124149
}
125150

151+
static uint32_t dma_xmc4xxx_reg_ctll(struct dma_block_config *block, struct dma_config *config)
152+
{
153+
uint32_t ctll;
154+
155+
ctll = config->dest_data_size / 2 << GPDMA0_CH_CTLL_DST_TR_WIDTH_Pos |
156+
config->source_data_size / 2 << GPDMA0_CH_CTLL_SRC_TR_WIDTH_Pos |
157+
block->dest_addr_adj << GPDMA0_CH_CTLL_DINC_Pos |
158+
block->source_addr_adj << GPDMA0_CH_CTLL_SINC_Pos |
159+
config->dest_burst_length / 4 << GPDMA0_CH_CTLL_DEST_MSIZE_Pos |
160+
config->source_burst_length / 4 << GPDMA0_CH_CTLL_SRC_MSIZE_Pos |
161+
BIT(GPDMA0_CH_CTLL_INT_EN_Pos);
162+
163+
/* Only GPDMA flow controller supported */
164+
if (config->channel_direction == MEMORY_TO_PERIPHERAL) {
165+
ctll |= XMC_DMA_CTLL_MEMORY_TO_PERIPHERAL << GPDMA0_CH_CTLL_TT_FC_Pos;
166+
}
167+
168+
if (config->channel_direction == PERIPHERAL_TO_MEMORY) {
169+
ctll |= XMC_DMA_CTLL_PERIPHERAL_TO_MEMORY << GPDMA0_CH_CTLL_TT_FC_Pos;
170+
}
171+
172+
if (block->source_gather_en && block->source_gather_count > 0) {
173+
ctll |= BIT(GPDMA0_CH_CTLL_SRC_GATHER_EN_Pos);
174+
}
175+
176+
if (block->dest_scatter_en && block->dest_scatter_count > 0) {
177+
ctll |= BIT(GPDMA0_CH_CTLL_DST_SCATTER_EN_Pos);
178+
}
179+
180+
return ctll;
181+
}
182+
183+
#define SET_CHECK_SCATTER_GATHER(type) \
184+
do { \
185+
if (block->type##_en && block->type##_count > 0 && !type.enabled) { \
186+
type.enabled = true; \
187+
type.interval = block->type##_interval; \
188+
type.count = block->type##_count; \
189+
} else if (block->type##_en && type.enabled) { \
190+
if (block->type##_interval != type.interval || \
191+
block->type##_count != type.count) { \
192+
LOG_ERR(STRINGIFY(type) " parameters must be consistent " \
193+
"across enabled blocks"); \
194+
return -EINVAL; \
195+
} \
196+
} \
197+
} while (0)
198+
126199
static int dma_xmc4xxx_config(const struct device *dev, uint32_t channel, struct dma_config *config)
127200
{
128201
struct dma_xmc4xxx_data *dev_data = dev->data;
129202
const struct dma_xmc4xxx_config *dev_cfg = dev->config;
130203
struct dma_block_config *block = config->head_block;
131204
XMC_DMA_t *dma = dev_cfg->dma;
132205
uint8_t dlr_line = DLR_LINE_UNSET;
206+
struct dma_xmc4xxx_scatter_gather source_gather = { 0 };
207+
struct dma_xmc4xxx_scatter_gather dest_scatter = { 0 };
133208

134209
if (channel >= dev_data->ctx.dma_channels) {
135210
LOG_ERR("Invalid channel number");
@@ -153,14 +228,14 @@ static int dma_xmc4xxx_config(const struct device *dev, uint32_t channel, struct
153228
return -EINVAL;
154229
}
155230

156-
if (config->block_count != 1) {
157-
LOG_ERR("Invalid block count");
231+
if (config->block_count > CONFIG_DMA_XMC4XXX_NUM_DESCRIPTORS) {
232+
LOG_ERR("Block count exceeds descriptor array size");
158233
return -EINVAL;
159234
}
160235

161-
if (block->source_gather_en || block->dest_scatter_en) {
162-
if (dma != XMC_DMA0 || channel >= 2) {
163-
LOG_ERR("Gather/scatter only supported on DMA0 on ch0 and ch1");
236+
if (block->source_gather_en || block->dest_scatter_en || config->block_count != 1) {
237+
if ((uint32_t)dma != (uint32_t)XMC_DMA0 || channel >= 2) {
238+
LOG_ERR("Multi-block and gather/scatter only supported on DMA0 on ch0 and ch1");
164239
return -EINVAL;
165240
}
166241
}
@@ -202,41 +277,79 @@ static int dma_xmc4xxx_config(const struct device *dev, uint32_t channel, struct
202277
XMC_DMA_CH_ClearEventStatus(dma, channel, ALL_EVENTS);
203278

204279
/* check dma slot number */
205-
dma->CH[channel].SAR = block->source_address;
206-
dma->CH[channel].DAR = block->dest_address;
207-
dma->CH[channel].LLP = 0;
280+
if (config->block_count == 1) {
281+
uint32_t ctll;
282+
283+
dma->CH[channel].SAR = block->source_address;
284+
dma->CH[channel].DAR = block->dest_address;
285+
dma->CH[channel].LLP = 0;
286+
287+
/* set number of transactions */
288+
dma->CH[channel].CTLH = block->block_size / config->source_data_size;
289+
290+
ctll = dma_xmc4xxx_reg_ctll(block, config);
291+
292+
SET_CHECK_SCATTER_GATHER(source_gather);
293+
SET_CHECK_SCATTER_GATHER(dest_scatter);
294+
295+
dma->CH[channel].CTLL = ctll;
296+
297+
} else {
298+
struct dma_xmc4xxx_descriptor *desc;
299+
300+
dma->CH[channel].LLP = (uint32_t)&descriptor_list[channel][0];
301+
dma->CH[channel].CTLL = BIT(GPDMA0_CH_CTLL_LLP_DST_EN_Pos) |
302+
BIT(GPDMA0_CH_CTLL_LLP_SRC_EN_Pos);
303+
for (int i = 0; i < config->block_count; i++) {
304+
uint32_t ctll;
305+
306+
desc = &descriptor_list[channel][i];
307+
308+
desc->sar = block->source_address;
309+
desc->dar = block->dest_address;
310+
desc->ctlh = block->block_size / config->source_data_size;
311+
312+
ctll = dma_xmc4xxx_reg_ctll(block, config);
313+
314+
if (i < config->block_count - 1) {
315+
desc->llp = (uint32_t)&descriptor_list[channel][i + 1];
316+
ctll |= BIT(GPDMA0_CH_CTLL_LLP_DST_EN_Pos) |
317+
BIT(GPDMA0_CH_CTLL_LLP_SRC_EN_Pos);
318+
} else {
319+
desc->llp = 0;
320+
}
321+
322+
desc->ctll = ctll;
323+
324+
SET_CHECK_SCATTER_GATHER(source_gather);
325+
SET_CHECK_SCATTER_GATHER(dest_scatter);
326+
327+
block = block->next_block;
328+
}
329+
}
330+
331+
block = config->head_block;
208332

209-
/* set number of transactions */
210-
dma->CH[channel].CTLH = block->block_size / config->source_data_size;
211333
/* set priority and software handshaking for src/dst. if hardware hankshaking is used */
212334
/* it will be enabled later in the code */
213335
dma->CH[channel].CFGL = (config->channel_priority << GPDMA0_CH_CFGL_CH_PRIOR_Pos) |
214336
GPDMA0_CH_CFGL_HS_SEL_SRC_Msk | GPDMA0_CH_CFGL_HS_SEL_DST_Msk;
215337

216338
dma->CH[channel].CFGH = 0;
217339

218-
dma->CH[channel].CTLL = config->dest_data_size / 2 << GPDMA0_CH_CTLL_DST_TR_WIDTH_Pos |
219-
config->source_data_size / 2 << GPDMA0_CH_CTLL_SRC_TR_WIDTH_Pos |
220-
block->dest_addr_adj << GPDMA0_CH_CTLL_DINC_Pos |
221-
block->source_addr_adj << GPDMA0_CH_CTLL_SINC_Pos |
222-
config->dest_burst_length / 4 << GPDMA0_CH_CTLL_DEST_MSIZE_Pos |
223-
config->source_burst_length / 4 << GPDMA0_CH_CTLL_SRC_MSIZE_Pos |
224-
BIT(GPDMA0_CH_CTLL_INT_EN_Pos);
225-
226-
dma->CH[channel].CFGH = 0;
227340
if (config->channel_direction == MEMORY_TO_PERIPHERAL ||
228341
config->channel_direction == PERIPHERAL_TO_MEMORY) {
229342
uint8_t request_source = XMC4XXX_DMA_GET_REQUEST_SOURCE(config->dma_slot);
230343
uint8_t dlr_line_reg = XMC4XXX_DMA_GET_LINE(config->dma_slot);
231344

232345
dlr_line = dlr_line_reg;
233-
if (dma == XMC_DMA0 && dlr_line > 7) {
346+
if ((uint32_t)dma == (uint32_t)XMC_DMA0 && dlr_line > 7) {
234347
LOG_ERR("Unsupported request line %d for DMA0."
235348
"Should be in range [0,7]", dlr_line);
236349
return -EINVAL;
237350
}
238351

239-
if (dma == XMC_DMA1 && (dlr_line < 8 || dlr_line > 11)) {
352+
if ((uint32_t)dma == (uint32_t)XMC_DMA1 && (dlr_line < 8 || dlr_line > 11)) {
240353
LOG_ERR("Unsupported request line %d for DMA1."
241354
"Should be in range [8,11]", dlr_line);
242355
return -EINVAL;
@@ -249,12 +362,12 @@ static int dma_xmc4xxx_config(const struct device *dev, uint32_t channel, struct
249362
DLR->LNEN |= BIT(dlr_line);
250363

251364
/* connect DMA Line to SR */
252-
if (dma == XMC_DMA0) {
365+
if ((uint32_t)dma == (uint32_t)XMC_DMA0) {
253366
DLR->SRSEL0 &= ~(DLR_SRSEL_RS_MSK << (dlr_line_reg * DLR_SRSEL_RS_BITSIZE));
254367
DLR->SRSEL0 |= request_source << (dlr_line_reg * DLR_SRSEL_RS_BITSIZE);
255368
}
256369

257-
if (dma == XMC_DMA1) {
370+
if ((uint32_t)dma == (uint32_t)XMC_DMA1) {
258371
dlr_line_reg -= 8;
259372
DLR->SRSEL1 &= ~(DLR_SRSEL_RS_MSK << (dlr_line_reg * DLR_SRSEL_RS_BITSIZE));
260373
DLR->SRSEL1 |= request_source << (dlr_line_reg * DLR_SRSEL_RS_BITSIZE);
@@ -264,32 +377,36 @@ static int dma_xmc4xxx_config(const struct device *dev, uint32_t channel, struct
264377
if (config->channel_direction == MEMORY_TO_PERIPHERAL) {
265378
dma->CH[channel].CFGH = (dlr_line_reg << GPDMA0_CH_CFGH_DEST_PER_Pos) | 4;
266379
dma->CH[channel].CFGL &= ~BIT(GPDMA0_CH_CFGL_HS_SEL_DST_Pos);
267-
dma->CH[channel].CTLL |= 1 << GPDMA0_CH_CTLL_TT_FC_Pos;
268380
}
269381

270382
if (config->channel_direction == PERIPHERAL_TO_MEMORY) {
271383
dma->CH[channel].CFGH = (dlr_line_reg << GPDMA0_CH_CFGH_SRC_PER_Pos) | 4;
272384
dma->CH[channel].CFGL &= ~BIT(GPDMA0_CH_CFGL_HS_SEL_SRC_Pos);
273-
dma->CH[channel].CTLL |= 2 << GPDMA0_CH_CTLL_TT_FC_Pos;
274385
}
275386
}
276387

277388
if (block->fifo_mode_control > 0) {
278389
dma->CH[channel].CFGH |= GPDMA0_CH_CFGH_FIFO_MODE_Msk;
279390
}
280391

281-
if (block->source_gather_en) {
282-
dma->CH[channel].CTLL |= BIT(GPDMA0_CH_CTLL_SRC_GATHER_EN_Pos);
392+
if ((uint32_t)dma == (uint32_t)XMC_DMA0) {
393+
if (channel == 0 || channel == 1) {
394+
/* reset scatter/gather registers */
395+
dma->CH[channel].SGR = 0;
396+
dma->CH[channel].DSR = 0;
397+
}
398+
}
399+
400+
if (source_gather.enabled) {
283401
/* truncate if we are out of range */
284-
dma->CH[channel].SGR = (block->source_gather_interval & GPDMA0_CH_SGR_SGI_Msk) |
285-
block->source_gather_count << GPDMA0_CH_SGR_SGC_Pos;
402+
dma->CH[channel].SGR = (source_gather.interval & GPDMA0_CH_SGR_SGI_Msk) |
403+
source_gather.count << GPDMA0_CH_SGR_SGC_Pos;
286404
}
287405

288-
if (block->dest_scatter_en) {
289-
dma->CH[channel].CTLL |= BIT(GPDMA0_CH_CTLL_DST_SCATTER_EN_Pos);
406+
if (dest_scatter.enabled) {
290407
/* truncate if we are out of range */
291-
dma->CH[channel].DSR = (block->dest_scatter_interval & GPDMA0_CH_DSR_DSI_Msk) |
292-
block->dest_scatter_count << GPDMA0_CH_DSR_DSC_Pos;
408+
dma->CH[channel].DSR = (dest_scatter.interval & GPDMA0_CH_DSR_DSI_Msk) |
409+
dest_scatter.count << GPDMA0_CH_DSR_DSC_Pos;
293410
}
294411

295412
dev_data->channels[channel].cb = config->dma_callback;
@@ -301,6 +418,12 @@ static int dma_xmc4xxx_config(const struct device *dev, uint32_t channel, struct
301418
dev_data->channels[channel].dest_addr_adj = block->dest_addr_adj;
302419
dev_data->channels[channel].dest_address = block->dest_address;
303420

421+
if (config->block_count > 1) {
422+
dev_data->channels[channel].multi_block = true;
423+
} else {
424+
dev_data->channels[channel].multi_block = false;
425+
}
426+
304427
XMC_DMA_CH_DisableEvent(dma, channel, ALL_EVENTS);
305428
XMC_DMA_CH_EnableEvent(dma, channel, XMC_DMA_CH_EVENT_TRANSFER_COMPLETE);
306429

@@ -412,6 +535,12 @@ static int dma_xmc4xxx_get_status(const struct device *dev, uint32_t channel,
412535

413536
stat->busy = XMC_DMA_CH_IsEnabled(dma, channel);
414537

538+
if (dma_channel->multi_block) {
539+
/* not supported for multi-block transfers */
540+
stat->pending_length = 0;
541+
return 0;
542+
}
543+
415544
/* Use DAR to check for transferred bytes when possible. Value CTL.BLOCK_TS does not */
416545
/* appear to guarantee that the last value is fully transferred to dest. */
417546
if (dma_channel->dest_addr_adj == DMA_ADDR_ADJ_INCREMENT) {

0 commit comments

Comments
 (0)