@@ -24,6 +24,12 @@ LOG_MODULE_REGISTER(dma_xmc4xxx, CONFIG_DMA_LOG_LEVEL);
24
24
#define DLR_SRSEL_RS_BITSIZE 4
25
25
#define DLR_SRSEL_RS_MSK 0xf
26
26
27
+ #define MULTI_BLOCK_NUM_CHANNELS 2
28
+
29
+ #define XMC_DMA_CTLL_MEMORY_TO_MEMORY 0
30
+ #define XMC_DMA_CTLL_MEMORY_TO_PERIPHERAL 1
31
+ #define XMC_DMA_CTLL_PERIPHERAL_TO_MEMORY 2
32
+
27
33
#define ALL_EVENTS \
28
34
(XMC_DMA_CH_EVENT_TRANSFER_COMPLETE | XMC_DMA_CH_EVENT_BLOCK_TRANSFER_COMPLETE | \
29
35
XMC_DMA_CH_EVENT_SRC_TRANSACTION_COMPLETE | XMC_DMA_CH_EVENT_DST_TRANSACTION_COMPLETE | \
@@ -38,8 +44,27 @@ struct dma_xmc4xxx_channel {
38
44
uint8_t dlr_line ;
39
45
uint8_t channel_direction ;
40
46
uint8_t dest_addr_adj ;
47
+ bool multi_block ;
48
+ };
49
+
50
+ struct dma_xmc4xxx_descriptor {
51
+ uint32_t sar ; /* source address */
52
+ uint32_t dar ; /* destination address */
53
+ uint32_t llp ; /* linked-list pointer to the next descriptor or null if last descriptor */
54
+ uint32_t ctll ; /* control register low */
55
+ uint32_t ctlh ; /* control register high */
56
+ uint32_t dstat ; /* status register fetched from address DSTATAR after block completes*/
57
+ } __packed ;
58
+
59
+ struct dma_xmc4xxx_scatter_gather {
60
+ bool enabled ;
61
+ uint32_t interval ;
62
+ uint16_t count ;
41
63
};
42
64
65
+ static struct dma_xmc4xxx_descriptor descriptor_list [MULTI_BLOCK_NUM_CHANNELS ]
66
+ [CONFIG_DMA_XMC4XXX_NUM_DESCRIPTORS ];
67
+
43
68
struct dma_xmc4xxx_config {
44
69
XMC_DMA_t * dma ;
45
70
void (* irq_configure )(void );
@@ -123,13 +148,63 @@ static void dma_xmc4xxx_isr(const struct device *dev)
123
148
}
124
149
}
125
150
151
+ static uint32_t dma_xmc4xxx_reg_ctll (struct dma_block_config * block , struct dma_config * config )
152
+ {
153
+ uint32_t ctll ;
154
+
155
+ ctll = config -> dest_data_size / 2 << GPDMA0_CH_CTLL_DST_TR_WIDTH_Pos |
156
+ config -> source_data_size / 2 << GPDMA0_CH_CTLL_SRC_TR_WIDTH_Pos |
157
+ block -> dest_addr_adj << GPDMA0_CH_CTLL_DINC_Pos |
158
+ block -> source_addr_adj << GPDMA0_CH_CTLL_SINC_Pos |
159
+ config -> dest_burst_length / 4 << GPDMA0_CH_CTLL_DEST_MSIZE_Pos |
160
+ config -> source_burst_length / 4 << GPDMA0_CH_CTLL_SRC_MSIZE_Pos |
161
+ BIT (GPDMA0_CH_CTLL_INT_EN_Pos );
162
+
163
+ /* Only GPDMA flow controller supported */
164
+ if (config -> channel_direction == MEMORY_TO_PERIPHERAL ) {
165
+ ctll |= XMC_DMA_CTLL_MEMORY_TO_PERIPHERAL << GPDMA0_CH_CTLL_TT_FC_Pos ;
166
+ }
167
+
168
+ if (config -> channel_direction == PERIPHERAL_TO_MEMORY ) {
169
+ ctll |= XMC_DMA_CTLL_PERIPHERAL_TO_MEMORY << GPDMA0_CH_CTLL_TT_FC_Pos ;
170
+ }
171
+
172
+ if (block -> source_gather_en && block -> source_gather_count > 0 ) {
173
+ ctll |= BIT (GPDMA0_CH_CTLL_SRC_GATHER_EN_Pos );
174
+ }
175
+
176
+ if (block -> dest_scatter_en && block -> dest_scatter_count > 0 ) {
177
+ ctll |= BIT (GPDMA0_CH_CTLL_DST_SCATTER_EN_Pos );
178
+ }
179
+
180
+ return ctll ;
181
+ }
182
+
183
+ #define SET_CHECK_SCATTER_GATHER (type ) \
184
+ do { \
185
+ if (block->type##_en && block->type##_count > 0 && !type.enabled) { \
186
+ type.enabled = true; \
187
+ type.interval = block->type##_interval; \
188
+ type.count = block->type##_count; \
189
+ } else if (block->type##_en && type.enabled) { \
190
+ if (block->type##_interval != type.interval || \
191
+ block->type##_count != type.count) { \
192
+ LOG_ERR(STRINGIFY(type) " parameters must be consistent " \
193
+ "across enabled blocks"); \
194
+ return -EINVAL; \
195
+ } \
196
+ } \
197
+ } while (0)
198
+
126
199
static int dma_xmc4xxx_config (const struct device * dev , uint32_t channel , struct dma_config * config )
127
200
{
128
201
struct dma_xmc4xxx_data * dev_data = dev -> data ;
129
202
const struct dma_xmc4xxx_config * dev_cfg = dev -> config ;
130
203
struct dma_block_config * block = config -> head_block ;
131
204
XMC_DMA_t * dma = dev_cfg -> dma ;
132
205
uint8_t dlr_line = DLR_LINE_UNSET ;
206
+ struct dma_xmc4xxx_scatter_gather source_gather = { 0 };
207
+ struct dma_xmc4xxx_scatter_gather dest_scatter = { 0 };
133
208
134
209
if (channel >= dev_data -> ctx .dma_channels ) {
135
210
LOG_ERR ("Invalid channel number" );
@@ -153,14 +228,14 @@ static int dma_xmc4xxx_config(const struct device *dev, uint32_t channel, struct
153
228
return - EINVAL ;
154
229
}
155
230
156
- if (config -> block_count != 1 ) {
157
- LOG_ERR ("Invalid block count" );
231
+ if (config -> block_count > CONFIG_DMA_XMC4XXX_NUM_DESCRIPTORS ) {
232
+ LOG_ERR ("Block count exceeds descriptor array size " );
158
233
return - EINVAL ;
159
234
}
160
235
161
- if (block -> source_gather_en || block -> dest_scatter_en ) {
162
- if (dma != XMC_DMA0 || channel >= 2 ) {
163
- LOG_ERR ("Gather /scatter only supported on DMA0 on ch0 and ch1" );
236
+ if (block -> source_gather_en || block -> dest_scatter_en || config -> block_count != 1 ) {
237
+ if (( uint32_t ) dma != ( uint32_t ) XMC_DMA0 || channel >= 2 ) {
238
+ LOG_ERR ("Multi-block and gather /scatter only supported on DMA0 on ch0 and ch1" );
164
239
return - EINVAL ;
165
240
}
166
241
}
@@ -202,41 +277,79 @@ static int dma_xmc4xxx_config(const struct device *dev, uint32_t channel, struct
202
277
XMC_DMA_CH_ClearEventStatus (dma , channel , ALL_EVENTS );
203
278
204
279
/* check dma slot number */
205
- dma -> CH [channel ].SAR = block -> source_address ;
206
- dma -> CH [channel ].DAR = block -> dest_address ;
207
- dma -> CH [channel ].LLP = 0 ;
280
+ if (config -> block_count == 1 ) {
281
+ uint32_t ctll ;
282
+
283
+ dma -> CH [channel ].SAR = block -> source_address ;
284
+ dma -> CH [channel ].DAR = block -> dest_address ;
285
+ dma -> CH [channel ].LLP = 0 ;
286
+
287
+ /* set number of transactions */
288
+ dma -> CH [channel ].CTLH = block -> block_size / config -> source_data_size ;
289
+
290
+ ctll = dma_xmc4xxx_reg_ctll (block , config );
291
+
292
+ SET_CHECK_SCATTER_GATHER (source_gather );
293
+ SET_CHECK_SCATTER_GATHER (dest_scatter );
294
+
295
+ dma -> CH [channel ].CTLL = ctll ;
296
+
297
+ } else {
298
+ struct dma_xmc4xxx_descriptor * desc ;
299
+
300
+ dma -> CH [channel ].LLP = (uint32_t )& descriptor_list [channel ][0 ];
301
+ dma -> CH [channel ].CTLL = BIT (GPDMA0_CH_CTLL_LLP_DST_EN_Pos ) |
302
+ BIT (GPDMA0_CH_CTLL_LLP_SRC_EN_Pos );
303
+ for (int i = 0 ; i < config -> block_count ; i ++ ) {
304
+ uint32_t ctll ;
305
+
306
+ desc = & descriptor_list [channel ][i ];
307
+
308
+ desc -> sar = block -> source_address ;
309
+ desc -> dar = block -> dest_address ;
310
+ desc -> ctlh = block -> block_size / config -> source_data_size ;
311
+
312
+ ctll = dma_xmc4xxx_reg_ctll (block , config );
313
+
314
+ if (i < config -> block_count - 1 ) {
315
+ desc -> llp = (uint32_t )& descriptor_list [channel ][i + 1 ];
316
+ ctll |= BIT (GPDMA0_CH_CTLL_LLP_DST_EN_Pos ) |
317
+ BIT (GPDMA0_CH_CTLL_LLP_SRC_EN_Pos );
318
+ } else {
319
+ desc -> llp = 0 ;
320
+ }
321
+
322
+ desc -> ctll = ctll ;
323
+
324
+ SET_CHECK_SCATTER_GATHER (source_gather );
325
+ SET_CHECK_SCATTER_GATHER (dest_scatter );
326
+
327
+ block = block -> next_block ;
328
+ }
329
+ }
330
+
331
+ block = config -> head_block ;
208
332
209
- /* set number of transactions */
210
- dma -> CH [channel ].CTLH = block -> block_size / config -> source_data_size ;
211
333
/* set priority and software handshaking for src/dst. if hardware hankshaking is used */
212
334
/* it will be enabled later in the code */
213
335
dma -> CH [channel ].CFGL = (config -> channel_priority << GPDMA0_CH_CFGL_CH_PRIOR_Pos ) |
214
336
GPDMA0_CH_CFGL_HS_SEL_SRC_Msk | GPDMA0_CH_CFGL_HS_SEL_DST_Msk ;
215
337
216
338
dma -> CH [channel ].CFGH = 0 ;
217
339
218
- dma -> CH [channel ].CTLL = config -> dest_data_size / 2 << GPDMA0_CH_CTLL_DST_TR_WIDTH_Pos |
219
- config -> source_data_size / 2 << GPDMA0_CH_CTLL_SRC_TR_WIDTH_Pos |
220
- block -> dest_addr_adj << GPDMA0_CH_CTLL_DINC_Pos |
221
- block -> source_addr_adj << GPDMA0_CH_CTLL_SINC_Pos |
222
- config -> dest_burst_length / 4 << GPDMA0_CH_CTLL_DEST_MSIZE_Pos |
223
- config -> source_burst_length / 4 << GPDMA0_CH_CTLL_SRC_MSIZE_Pos |
224
- BIT (GPDMA0_CH_CTLL_INT_EN_Pos );
225
-
226
- dma -> CH [channel ].CFGH = 0 ;
227
340
if (config -> channel_direction == MEMORY_TO_PERIPHERAL ||
228
341
config -> channel_direction == PERIPHERAL_TO_MEMORY ) {
229
342
uint8_t request_source = XMC4XXX_DMA_GET_REQUEST_SOURCE (config -> dma_slot );
230
343
uint8_t dlr_line_reg = XMC4XXX_DMA_GET_LINE (config -> dma_slot );
231
344
232
345
dlr_line = dlr_line_reg ;
233
- if (dma == XMC_DMA0 && dlr_line > 7 ) {
346
+ if (( uint32_t ) dma == ( uint32_t ) XMC_DMA0 && dlr_line > 7 ) {
234
347
LOG_ERR ("Unsupported request line %d for DMA0."
235
348
"Should be in range [0,7]" , dlr_line );
236
349
return - EINVAL ;
237
350
}
238
351
239
- if (dma == XMC_DMA1 && (dlr_line < 8 || dlr_line > 11 )) {
352
+ if (( uint32_t ) dma == ( uint32_t ) XMC_DMA1 && (dlr_line < 8 || dlr_line > 11 )) {
240
353
LOG_ERR ("Unsupported request line %d for DMA1."
241
354
"Should be in range [8,11]" , dlr_line );
242
355
return - EINVAL ;
@@ -249,12 +362,12 @@ static int dma_xmc4xxx_config(const struct device *dev, uint32_t channel, struct
249
362
DLR -> LNEN |= BIT (dlr_line );
250
363
251
364
/* connect DMA Line to SR */
252
- if (dma == XMC_DMA0 ) {
365
+ if (( uint32_t ) dma == ( uint32_t ) XMC_DMA0 ) {
253
366
DLR -> SRSEL0 &= ~(DLR_SRSEL_RS_MSK << (dlr_line_reg * DLR_SRSEL_RS_BITSIZE ));
254
367
DLR -> SRSEL0 |= request_source << (dlr_line_reg * DLR_SRSEL_RS_BITSIZE );
255
368
}
256
369
257
- if (dma == XMC_DMA1 ) {
370
+ if (( uint32_t ) dma == ( uint32_t ) XMC_DMA1 ) {
258
371
dlr_line_reg -= 8 ;
259
372
DLR -> SRSEL1 &= ~(DLR_SRSEL_RS_MSK << (dlr_line_reg * DLR_SRSEL_RS_BITSIZE ));
260
373
DLR -> SRSEL1 |= request_source << (dlr_line_reg * DLR_SRSEL_RS_BITSIZE );
@@ -264,32 +377,36 @@ static int dma_xmc4xxx_config(const struct device *dev, uint32_t channel, struct
264
377
if (config -> channel_direction == MEMORY_TO_PERIPHERAL ) {
265
378
dma -> CH [channel ].CFGH = (dlr_line_reg << GPDMA0_CH_CFGH_DEST_PER_Pos ) | 4 ;
266
379
dma -> CH [channel ].CFGL &= ~BIT (GPDMA0_CH_CFGL_HS_SEL_DST_Pos );
267
- dma -> CH [channel ].CTLL |= 1 << GPDMA0_CH_CTLL_TT_FC_Pos ;
268
380
}
269
381
270
382
if (config -> channel_direction == PERIPHERAL_TO_MEMORY ) {
271
383
dma -> CH [channel ].CFGH = (dlr_line_reg << GPDMA0_CH_CFGH_SRC_PER_Pos ) | 4 ;
272
384
dma -> CH [channel ].CFGL &= ~BIT (GPDMA0_CH_CFGL_HS_SEL_SRC_Pos );
273
- dma -> CH [channel ].CTLL |= 2 << GPDMA0_CH_CTLL_TT_FC_Pos ;
274
385
}
275
386
}
276
387
277
388
if (block -> fifo_mode_control > 0 ) {
278
389
dma -> CH [channel ].CFGH |= GPDMA0_CH_CFGH_FIFO_MODE_Msk ;
279
390
}
280
391
281
- if (block -> source_gather_en ) {
282
- dma -> CH [channel ].CTLL |= BIT (GPDMA0_CH_CTLL_SRC_GATHER_EN_Pos );
392
+ if ((uint32_t )dma == (uint32_t )XMC_DMA0 ) {
393
+ if (channel == 0 || channel == 1 ) {
394
+ /* reset scatter/gather registers */
395
+ dma -> CH [channel ].SGR = 0 ;
396
+ dma -> CH [channel ].DSR = 0 ;
397
+ }
398
+ }
399
+
400
+ if (source_gather .enabled ) {
283
401
/* truncate if we are out of range */
284
- dma -> CH [channel ].SGR = (block -> source_gather_interval & GPDMA0_CH_SGR_SGI_Msk ) |
285
- block -> source_gather_count << GPDMA0_CH_SGR_SGC_Pos ;
402
+ dma -> CH [channel ].SGR = (source_gather . interval & GPDMA0_CH_SGR_SGI_Msk ) |
403
+ source_gather . count << GPDMA0_CH_SGR_SGC_Pos ;
286
404
}
287
405
288
- if (block -> dest_scatter_en ) {
289
- dma -> CH [channel ].CTLL |= BIT (GPDMA0_CH_CTLL_DST_SCATTER_EN_Pos );
406
+ if (dest_scatter .enabled ) {
290
407
/* truncate if we are out of range */
291
- dma -> CH [channel ].DSR = (block -> dest_scatter_interval & GPDMA0_CH_DSR_DSI_Msk ) |
292
- block -> dest_scatter_count << GPDMA0_CH_DSR_DSC_Pos ;
408
+ dma -> CH [channel ].DSR = (dest_scatter . interval & GPDMA0_CH_DSR_DSI_Msk ) |
409
+ dest_scatter . count << GPDMA0_CH_DSR_DSC_Pos ;
293
410
}
294
411
295
412
dev_data -> channels [channel ].cb = config -> dma_callback ;
@@ -301,6 +418,12 @@ static int dma_xmc4xxx_config(const struct device *dev, uint32_t channel, struct
301
418
dev_data -> channels [channel ].dest_addr_adj = block -> dest_addr_adj ;
302
419
dev_data -> channels [channel ].dest_address = block -> dest_address ;
303
420
421
+ if (config -> block_count > 1 ) {
422
+ dev_data -> channels [channel ].multi_block = true;
423
+ } else {
424
+ dev_data -> channels [channel ].multi_block = false;
425
+ }
426
+
304
427
XMC_DMA_CH_DisableEvent (dma , channel , ALL_EVENTS );
305
428
XMC_DMA_CH_EnableEvent (dma , channel , XMC_DMA_CH_EVENT_TRANSFER_COMPLETE );
306
429
@@ -412,6 +535,12 @@ static int dma_xmc4xxx_get_status(const struct device *dev, uint32_t channel,
412
535
413
536
stat -> busy = XMC_DMA_CH_IsEnabled (dma , channel );
414
537
538
+ if (dma_channel -> multi_block ) {
539
+ /* not supported for multi-block transfers */
540
+ stat -> pending_length = 0 ;
541
+ return 0 ;
542
+ }
543
+
415
544
/* Use DAR to check for transferred bytes when possible. Value CTL.BLOCK_TS does not */
416
545
/* appear to guarantee that the last value is fully transferred to dest. */
417
546
if (dma_channel -> dest_addr_adj == DMA_ADDR_ADJ_INCREMENT ) {
0 commit comments