@@ -55,6 +55,8 @@ struct i2s_esp32_stream_data {
55
55
struct k_msgq queue ;
56
56
struct intr_handle_data_t * irq_handle ;
57
57
bool dma_pending ;
58
+ uint8_t chunks_rem ;
59
+ uint8_t chunk_idx ;
58
60
};
59
61
60
62
struct i2s_esp32_stream_conf {
@@ -188,6 +190,47 @@ static void i2s_esp32_rx_callback(void *arg, int status)
188
190
}
189
191
}
190
192
193
+ #if SOC_GDMA_SUPPORTED
194
+ const i2s_hal_context_t * hal = & (dev_cfg -> hal );
195
+ uint16_t chunk_len ;
196
+
197
+ if (stream -> data -> chunks_rem ) {
198
+ uint32_t dst ;
199
+
200
+ stream -> data -> chunk_idx ++ ;
201
+ stream -> data -> chunks_rem -- ;
202
+ if (stream -> data -> chunks_rem ) {
203
+ chunk_len = I2S_ESP32_DMA_BUFFER_MAX_SIZE ;
204
+ } else {
205
+ chunk_len = stream -> data -> mem_block_len % I2S_ESP32_DMA_BUFFER_MAX_SIZE ;
206
+ if (chunk_len == 0 ) {
207
+ chunk_len = I2S_ESP32_DMA_BUFFER_MAX_SIZE ;
208
+ }
209
+ }
210
+
211
+ dst = (uint32_t )stream -> data -> mem_block + (stream -> data -> chunk_idx *
212
+ I2S_ESP32_DMA_BUFFER_MAX_SIZE );
213
+ err = dma_reload (stream -> conf -> dma_dev , stream -> conf -> dma_channel , (uint32_t )NULL ,
214
+ (uint32_t )dst , chunk_len );
215
+ if (err < 0 ) {
216
+ LOG_ERR ("Failed to reload DMA channel: %" PRIu32 , stream -> conf -> dma_channel );
217
+ goto rx_disable ;
218
+ }
219
+
220
+ i2s_ll_rx_set_eof_num (hal -> dev , chunk_len );
221
+
222
+ err = dma_start (stream -> conf -> dma_dev , stream -> conf -> dma_channel );
223
+ if (err < 0 ) {
224
+ LOG_ERR ("Failed to start DMA channel: %" PRIu32 , stream -> conf -> dma_channel );
225
+ goto rx_disable ;
226
+ }
227
+
228
+ stream -> data -> dma_pending = true;
229
+
230
+ return ;
231
+ }
232
+ #endif /* SOC_GDMA_SUPPORTED */
233
+
191
234
struct queue_item item = {
192
235
.buffer = stream -> data -> mem_block ,
193
236
.size = stream -> data -> mem_block_len
@@ -259,16 +302,9 @@ static int i2s_esp32_rx_start_transfer(const struct device *dev)
259
302
}
260
303
stream -> data -> mem_block_len = stream -> data -> i2s_cfg .block_size ;
261
304
262
- i2s_hal_rx_stop (hal );
263
- i2s_hal_rx_reset (hal );
264
- #if !SOC_GDMA_SUPPORTED
265
- i2s_hal_rx_reset_dma (hal );
266
- #endif /* !SOC_GDMA_SUPPORTED */
267
- i2s_hal_rx_reset_fifo (hal );
268
-
269
305
err = i2s_esp32_start_dma (dev , I2S_DIR_RX );
270
306
if (err < 0 ) {
271
- LOG_DBG ("Failed to start RX DMA transfer: %d" , err );
307
+ LOG_ERR ("Failed to start RX DMA transfer: %d" , err );
272
308
return - EIO ;
273
309
}
274
310
@@ -358,7 +394,6 @@ static void i2s_esp32_tx_callback(void *arg, int status)
358
394
stream -> data -> state = I2S_STATE_READY ;
359
395
goto tx_disable ;
360
396
}
361
- /*else: DRAIN trigger, so continue until queue is empty*/
362
397
}
363
398
364
399
if (stream -> data -> last_block ) {
@@ -369,7 +404,7 @@ static void i2s_esp32_tx_callback(void *arg, int status)
369
404
err = k_msgq_get (& stream -> data -> queue , & item , K_NO_WAIT );
370
405
if (err < 0 ) {
371
406
stream -> data -> state = I2S_STATE_ERROR ;
372
- LOG_WRN ("TX queue empty: %d" , err );
407
+ LOG_ERR ("TX queue empty: %d" , err );
373
408
goto tx_disable ;
374
409
}
375
410
@@ -430,16 +465,9 @@ static int i2s_esp32_tx_start_transfer(const struct device *dev)
430
465
stream -> data -> mem_block = item .buffer ;
431
466
stream -> data -> mem_block_len = item .size ;
432
467
433
- i2s_hal_tx_stop (hal );
434
- i2s_hal_tx_reset (hal );
435
- #if !SOC_GDMA_SUPPORTED
436
- i2s_hal_tx_reset_dma (hal );
437
- #endif /* !SOC_GDMA_SUPPORTED */
438
- i2s_hal_tx_reset_fifo (hal );
439
-
440
468
err = i2s_esp32_start_dma (dev , I2S_DIR_TX );
441
469
if (err < 0 ) {
442
- LOG_DBG ("Failed to start TX DMA transfer: %d" , err );
470
+ LOG_ERR ("Failed to start TX DMA transfer: %d" , err );
443
471
return - EIO ;
444
472
}
445
473
@@ -512,19 +540,19 @@ int i2s_esp32_config_dma(const struct device *dev, enum i2s_dir dir,
512
540
513
541
err = dma_config (stream -> conf -> dma_dev , stream -> conf -> dma_channel , & dma_cfg );
514
542
if (err < 0 ) {
515
- LOG_DBG ("Failed to configure DMA channel: %" PRIu32 , stream -> conf -> dma_channel );
543
+ LOG_ERR ("Failed to configure DMA channel: %" PRIu32 , stream -> conf -> dma_channel );
516
544
return - EINVAL ;
517
545
}
518
546
#else
519
547
lldesc_t * desc_iter = stream -> conf -> dma_desc ;
520
548
521
549
if (!mem_block ) {
522
- LOG_DBG ("At least one dma block is required" );
550
+ LOG_ERR ("At least one dma block is required" );
523
551
return - EINVAL ;
524
552
}
525
553
526
554
if (!esp_ptr_dma_capable ((void * )mem_block )) {
527
- LOG_DBG ("Buffer is not in DMA capable memory: %p" ,
555
+ LOG_ERR ("Buffer is not in DMA capable memory: %p" ,
528
556
(uint32_t * )mem_block );
529
557
530
558
return - EINVAL ;
@@ -566,7 +594,7 @@ int i2s_esp32_config_dma(const struct device *dev, enum i2s_dir dir,
566
594
567
595
if (desc_iter -> empty ) {
568
596
stream -> data -> dma_pending = false;
569
- LOG_DBG ("Run out of descriptors. Increase CONFIG_I2S_ESP32_DMA_DESC_NUM_MAX" );
597
+ LOG_ERR ("Run out of descriptors. Increase CONFIG_I2S_ESP32_DMA_DESC_NUM_MAX" );
570
598
return - EINVAL ;
571
599
}
572
600
#endif /* SOC_GDMA_SUPPORTED */
@@ -587,39 +615,62 @@ static int i2s_esp32_start_dma(const struct device *dev, enum i2s_dir dir)
587
615
} else if (dir == I2S_DIR_TX ) {
588
616
stream = & dev_cfg -> tx ;
589
617
} else {
590
- LOG_DBG ("Invalid DMA direction" );
618
+ LOG_ERR ("Invalid DMA direction" );
591
619
return - EINVAL ;
592
620
}
593
621
594
622
key = irq_lock ();
595
623
596
624
err = i2s_esp32_config_dma (dev , dir , stream );
597
625
if (err < 0 ) {
598
- LOG_DBG ("Dma configuration failed: %i" , err );
626
+ LOG_ERR ("Dma configuration failed: %i" , err );
599
627
goto unlock ;
600
628
}
601
629
630
+ #if I2S_ESP32_IS_DIR_EN (rx )
602
631
if (dir == I2S_DIR_RX ) {
603
- i2s_ll_rx_set_eof_num (hal -> dev , stream -> data -> mem_block_len );
632
+ uint16_t chunk_len ;
633
+
634
+ #if SOC_GDMA_SUPPORTED
635
+ if (stream -> data -> mem_block_len < I2S_ESP32_DMA_BUFFER_MAX_SIZE ) {
636
+ chunk_len = stream -> data -> mem_block_len ;
637
+ stream -> data -> chunks_rem = 0 ;
638
+ } else {
639
+ chunk_len = I2S_ESP32_DMA_BUFFER_MAX_SIZE ;
640
+ stream -> data -> chunks_rem = ((stream -> data -> mem_block_len +
641
+ (I2S_ESP32_DMA_BUFFER_MAX_SIZE - 1 )) /
642
+ I2S_ESP32_DMA_BUFFER_MAX_SIZE ) - 1 ;
643
+ }
644
+ stream -> data -> chunk_idx = 0 ;
645
+ #else
646
+ chunk_len = stream -> data -> mem_block_len ;
647
+ #endif
648
+ i2s_ll_rx_set_eof_num (hal -> dev , chunk_len );
604
649
}
650
+ #endif /* I2S_ESP32_IS_DIR_EN(rx) */
605
651
606
652
#if SOC_GDMA_SUPPORTED
607
653
err = dma_start (stream -> conf -> dma_dev , stream -> conf -> dma_channel );
608
654
if (err < 0 ) {
609
- LOG_DBG ("Failed to start DMA channel: %" PRIu32 , stream -> conf -> dma_channel );
655
+ LOG_ERR ("Failed to start DMA channel: %" PRIu32 , stream -> conf -> dma_channel );
610
656
goto unlock ;
611
657
}
612
658
stream -> data -> dma_pending = true;
613
659
#else
660
+ #if I2S_ESP32_IS_DIR_EN (rx )
614
661
if (dir == I2S_DIR_RX ) {
615
662
i2s_hal_rx_enable_dma (hal );
616
663
i2s_hal_rx_enable_intr (hal );
617
664
i2s_hal_rx_start_link (hal , (uint32_t )& (stream -> conf -> dma_desc [0 ]));
618
- } else {
665
+ #endif /* I2S_ESP32_IS_DIR_EN(rx) */
666
+
667
+ #if I2S_ESP32_IS_DIR_EN (tx )
668
+ if (dir == I2S_DIR_TX ) {
619
669
i2s_hal_tx_enable_dma (hal );
620
670
i2s_hal_tx_enable_intr (hal );
621
671
i2s_hal_tx_start_link (hal , (uint32_t )& (stream -> conf -> dma_desc [0 ]));
622
672
}
673
+ #endif /* I2S_ESP32_IS_DIR_EN(tx) */
623
674
#endif /* SOC_GDMA_SUPPORTED */
624
675
625
676
unlock :
@@ -646,61 +697,74 @@ static int i2s_esp32_restart_dma(const struct device *dev, enum i2s_dir dir)
646
697
#if SOC_GDMA_SUPPORTED
647
698
void * src = NULL , * dst = NULL ;
648
699
649
- if (dir == I2S_DIR_RX ) {
650
700
#if I2S_ESP32_IS_DIR_EN (rx )
701
+ uint16_t chunk_len ;
702
+
703
+ if (dir == I2S_DIR_RX ) {
651
704
dst = stream -> data -> mem_block ;
705
+
706
+ if (stream -> data -> mem_block_len < I2S_ESP32_DMA_BUFFER_MAX_SIZE ) {
707
+ chunk_len = stream -> data -> mem_block_len ;
708
+ stream -> data -> chunks_rem = 0 ;
709
+ } else {
710
+ chunk_len = I2S_ESP32_DMA_BUFFER_MAX_SIZE ;
711
+ stream -> data -> chunks_rem = ((stream -> data -> mem_block_len +
712
+ (I2S_ESP32_DMA_BUFFER_MAX_SIZE - 1 )) /
713
+ I2S_ESP32_DMA_BUFFER_MAX_SIZE ) - 1 ;
714
+ }
715
+ stream -> data -> chunk_idx = 0 ;
716
+ }
652
717
#endif /* I2S_ESP32_IS_DIR_EN(rx) */
653
- } else {
718
+
654
719
#if I2S_ESP32_IS_DIR_EN (tx )
720
+ if (dir == I2S_DIR_TX ) {
655
721
src = stream -> data -> mem_block ;
656
- #endif /* I2S_ESP32_IS_DIR_EN(tx) */
722
+ chunk_len = stream -> data -> mem_block_len ;
657
723
}
724
+ #endif /* I2S_ESP32_IS_DIR_EN(tx) */
658
725
659
726
err = dma_reload (stream -> conf -> dma_dev , stream -> conf -> dma_channel , (uint32_t )src ,
660
- (uint32_t )dst , stream -> data -> mem_block_len );
727
+ (uint32_t )dst , chunk_len );
661
728
if (err < 0 ) {
662
- LOG_DBG ("Failed to reload DMA channel: %" PRIu32 , stream -> conf -> dma_channel );
663
- } else {
729
+ LOG_ERR ("Failed to reload DMA channel: %" PRIu32 , stream -> conf -> dma_channel );
730
+ return - EIO ;
731
+ }
732
+
664
733
#if I2S_ESP32_IS_DIR_EN (rx )
665
- if (dir == I2S_DIR_RX ) {
666
- i2s_ll_rx_set_eof_num (hal -> dev , stream -> data -> mem_block_len );
667
- }
734
+ if (dir == I2S_DIR_RX ) {
735
+ i2s_ll_rx_set_eof_num (hal -> dev , chunk_len );
736
+ }
668
737
#endif /* I2S_ESP32_IS_DIR_EN(rx) */
669
738
670
- err = dma_start (stream -> conf -> dma_dev , stream -> conf -> dma_channel );
671
- if (err < 0 ) {
672
- LOG_DBG ("Failed to start DMA channel: %" PRIu32 , stream -> conf -> dma_channel );
673
- return - EIO ;
674
- }
739
+ err = dma_start (stream -> conf -> dma_dev , stream -> conf -> dma_channel );
740
+ if (err < 0 ) {
741
+ LOG_ERR ("Failed to start DMA channel: %" PRIu32 , stream -> conf -> dma_channel );
742
+ return - EIO ;
675
743
}
676
- stream -> data -> dma_pending = true;
677
744
#else
678
745
err = i2s_esp32_config_dma (dev , dir , stream );
679
746
if (err < 0 ) {
680
- LOG_DBG ("Failed to configure DMA" );
681
- } else {
682
- if (dir == I2S_DIR_RX ) {
747
+ LOG_ERR ("Failed to configure DMA" );
748
+ return - EIO ;
749
+ }
750
+
683
751
#if I2S_ESP32_IS_DIR_EN (rx )
684
- i2s_ll_rx_set_eof_num ( hal -> dev , stream -> data -> mem_block_len );
685
- i2s_hal_rx_enable_intr (hal );
686
- i2s_hal_rx_enable_dma (hal );
687
- i2s_hal_rx_start_link ( hal , ( uint32_t ) stream -> conf -> dma_desc );
752
+ if ( dir == I2S_DIR_RX ) {
753
+ i2s_ll_rx_set_eof_num (hal -> dev , stream -> data -> mem_block_len );
754
+ i2s_hal_rx_start_link (hal , ( uint32_t ) stream -> conf -> dma_desc );
755
+ }
688
756
#endif /* I2S_ESP32_IS_DIR_EN(rx) */
689
- } else {
757
+
690
758
#if I2S_ESP32_IS_DIR_EN (tx )
691
- i2s_hal_tx_enable_intr (hal );
692
- i2s_hal_tx_enable_dma (hal );
693
- i2s_hal_tx_start_link (hal , (uint32_t )stream -> conf -> dma_desc );
694
- #endif /* I2S_ESP32_IS_DIR_EN(tx) */
695
- }
759
+ if (dir == I2S_DIR_TX ) {
760
+ i2s_hal_tx_start_link (hal , (uint32_t )stream -> conf -> dma_desc );
696
761
}
762
+ #endif /* I2S_ESP32_IS_DIR_EN(tx) */
697
763
#endif /* SOC_GDMA_SUPPORTED */
698
764
699
- if (err < 0 ) {
700
- LOG_ERR ("Error restarting DMA: %i" , err );
701
- }
765
+ stream -> data -> dma_pending = true;
702
766
703
- return err ;
767
+ return 0 ;
704
768
}
705
769
706
770
static int i2s_esp32_initialize (const struct device * dev )
@@ -1116,6 +1180,8 @@ static const struct i2s_config *i2s_esp32_config_get(const struct device *dev, e
1116
1180
static int i2s_esp32_trigger_stream (const struct device * dev , const struct i2s_esp32_stream * stream ,
1117
1181
enum i2s_dir dir , enum i2s_trigger_cmd cmd )
1118
1182
{
1183
+ const struct i2s_esp32_cfg * dev_cfg = dev -> config ;
1184
+ const i2s_hal_context_t * hal = & dev_cfg -> hal ;
1119
1185
unsigned int key ;
1120
1186
int err ;
1121
1187
@@ -1127,6 +1193,23 @@ static int i2s_esp32_trigger_stream(const struct device *dev, const struct i2s_e
1127
1193
}
1128
1194
1129
1195
key = irq_lock ();
1196
+
1197
+ if (dir == I2S_DIR_RX ) {
1198
+ i2s_hal_rx_stop (hal );
1199
+ i2s_hal_rx_reset (hal );
1200
+ #if !SOC_GDMA_SUPPORTED
1201
+ i2s_hal_rx_reset_dma (hal );
1202
+ #endif /* !SOC_GDMA_SUPPORTED */
1203
+ i2s_hal_rx_reset_fifo (hal );
1204
+ } else if (dir == I2S_DIR_TX ) {
1205
+ i2s_hal_tx_stop (hal );
1206
+ i2s_hal_tx_reset (hal );
1207
+ #if !SOC_GDMA_SUPPORTED
1208
+ i2s_hal_tx_reset_dma (hal );
1209
+ #endif /* !SOC_GDMA_SUPPORTED */
1210
+ i2s_hal_tx_reset_fifo (hal );
1211
+ }
1212
+
1130
1213
err = stream -> conf -> start_transfer (dev );
1131
1214
if (err < 0 ) {
1132
1215
LOG_ERR ("START - Transfer start failed: %d" , err );
@@ -1166,8 +1249,8 @@ static int i2s_esp32_trigger_stream(const struct device *dev, const struct i2s_e
1166
1249
return - EIO ;
1167
1250
}
1168
1251
1169
- if (dir == I2S_DIR_TX ) {
1170
1252
#if I2S_ESP32_IS_DIR_EN (tx )
1253
+ if (dir == I2S_DIR_TX ) {
1171
1254
if (k_msgq_num_used_get (& stream -> data -> queue ) > 0 ||
1172
1255
stream -> data -> dma_pending ) {
1173
1256
stream -> data -> stop_without_draining = false;
@@ -1176,9 +1259,11 @@ static int i2s_esp32_trigger_stream(const struct device *dev, const struct i2s_e
1176
1259
stream -> conf -> stop_transfer (dev );
1177
1260
stream -> data -> state = I2S_STATE_READY ;
1178
1261
}
1262
+ }
1179
1263
#endif /* I2S_ESP32_IS_DIR_EN(tx) */
1180
- } else if ( dir == I2S_DIR_RX ) {
1264
+
1181
1265
#if I2S_ESP32_IS_DIR_EN (rx )
1266
+ if (dir == I2S_DIR_RX ) {
1182
1267
if (stream -> data -> dma_pending ) {
1183
1268
stream -> data -> stop_without_draining = true;
1184
1269
stream -> data -> state = I2S_STATE_STOPPING ;
@@ -1187,12 +1272,8 @@ static int i2s_esp32_trigger_stream(const struct device *dev, const struct i2s_e
1187
1272
stream -> data -> last_block = true;
1188
1273
stream -> data -> state = I2S_STATE_READY ;
1189
1274
}
1190
- #endif /* I2S_ESP32_IS_DIR_EN(rx) */
1191
- } else {
1192
- irq_unlock (key );
1193
- LOG_ERR ("Invalid direction: %d" , (int )dir );
1194
- return - EINVAL ;
1195
1275
}
1276
+ #endif /* I2S_ESP32_IS_DIR_EN(rx) */
1196
1277
1197
1278
irq_unlock (key );
1198
1279
break ;
0 commit comments