Skip to content

Commit 5c97f96

Browse files
committed
Merge branch 'features/twai_send_isr' into 'master'
features(twai): Added support for calling twai_node_transmit() from ISR contexts See merge request espressif/esp-idf!41383
2 parents 47a659c + e263cd2 commit 5c97f96

File tree

8 files changed

+246
-18
lines changed

8 files changed

+246
-18
lines changed

components/esp_driver_twai/Kconfig

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,13 @@ menu "ESP-Driver:TWAI Configurations"
88
help
99
Place the TWAI ISR in to IRAM to reduce latency and increase performance
1010

11+
config TWAI_IO_FUNC_IN_IRAM
12+
bool "Place TWAI I/O functions in IRAM"
13+
select TWAI_OBJ_CACHE_SAFE
14+
default n
15+
help
16+
Place certain TWAI I/O functions (like twai_transmit) in IRAM to reduce latency
17+
1118
config TWAI_ISR_CACHE_SAFE
1219
bool "Allow TWAI ISR execute when cache disabled" if !SPI_FLASH_AUTO_SUSPEND
1320
select TWAI_ISR_IN_IRAM

components/esp_driver_twai/esp_twai.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -136,8 +136,8 @@ esp_err_t twai_node_get_info(twai_node_handle_t node, twai_node_status_t *status
136136

137137
esp_err_t twai_node_transmit(twai_node_handle_t node, const twai_frame_t *frame, int timeout_ms)
138138
{
139-
ESP_RETURN_ON_FALSE(node && frame, ESP_ERR_INVALID_ARG, TAG, "invalid argument: null");
140-
ESP_RETURN_ON_FALSE(node->transmit, ESP_ERR_NOT_SUPPORTED, TAG, "transmit is not supported");
139+
ESP_RETURN_ON_FALSE_ISR(node && frame, ESP_ERR_INVALID_ARG, TAG, "invalid argument: null");
140+
ESP_RETURN_ON_FALSE_ISR(node->transmit, ESP_ERR_NOT_SUPPORTED, TAG, "transmit is not supported");
141141

142142
return node->transmit(node, frame, timeout_ms);
143143
}

components/esp_driver_twai/esp_twai_onchip.c

Lines changed: 34 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -582,15 +582,15 @@ static esp_err_t _node_queue_tx(twai_node_handle_t node, const twai_frame_t *fra
582582
{
583583
twai_onchip_ctx_t *twai_ctx = __containerof(node, twai_onchip_ctx_t, api_base);
584584
if (frame->header.dlc && frame->buffer_len) {
585-
ESP_RETURN_ON_FALSE(frame->header.dlc == twaifd_len2dlc(frame->buffer_len), ESP_ERR_INVALID_ARG, TAG, "unmatched dlc and buffer_len");
585+
ESP_RETURN_ON_FALSE_ISR(frame->header.dlc == twaifd_len2dlc(frame->buffer_len), ESP_ERR_INVALID_ARG, TAG, "unmatched dlc and buffer_len");
586586
}
587587
#if !SOC_TWAI_SUPPORT_FD
588-
ESP_RETURN_ON_FALSE(!frame->header.fdf || frame->buffer_len <= TWAI_FRAME_MAX_LEN, ESP_ERR_INVALID_ARG, TAG, "fdf flag or buffer_len not supported");
588+
ESP_RETURN_ON_FALSE_ISR(!frame->header.fdf || frame->buffer_len <= TWAI_FRAME_MAX_LEN, ESP_ERR_INVALID_ARG, TAG, "fdf flag or buffer_len not supported");
589589
#endif
590-
ESP_RETURN_ON_FALSE(frame->buffer_len <= (frame->header.fdf ? TWAIFD_FRAME_MAX_LEN : TWAI_FRAME_MAX_LEN), ESP_ERR_INVALID_ARG, TAG, "illegal transfer length (buffer_len %ld)", frame->buffer_len);
591-
ESP_RETURN_ON_FALSE((!frame->header.brs) || (twai_ctx->valid_fd_timing), ESP_ERR_INVALID_ARG, TAG, "brs can't be used without config data_timing");
592-
ESP_RETURN_ON_FALSE(!twai_ctx->hal->enable_listen_only, ESP_ERR_NOT_SUPPORTED, TAG, "node is config as listen only");
593-
ESP_RETURN_ON_FALSE(atomic_load(&twai_ctx->state) != TWAI_ERROR_BUS_OFF, ESP_ERR_INVALID_STATE, TAG, "node is bus off");
590+
ESP_RETURN_ON_FALSE_ISR(frame->buffer_len <= (frame->header.fdf ? TWAIFD_FRAME_MAX_LEN : TWAI_FRAME_MAX_LEN), ESP_ERR_INVALID_ARG, TAG, "illegal transfer length (buffer_len %ld)", frame->buffer_len);
591+
ESP_RETURN_ON_FALSE_ISR((!frame->header.brs) || (twai_ctx->valid_fd_timing), ESP_ERR_INVALID_ARG, TAG, "brs can't be used without config data_timing");
592+
ESP_RETURN_ON_FALSE_ISR(!twai_ctx->hal->enable_listen_only, ESP_ERR_NOT_SUPPORTED, TAG, "node is config as listen only");
593+
ESP_RETURN_ON_FALSE_ISR(atomic_load(&twai_ctx->state) != TWAI_ERROR_BUS_OFF, ESP_ERR_INVALID_STATE, TAG, "node is bus off");
594594
TickType_t ticks_to_wait = (timeout == -1) ? portMAX_DELAY : pdMS_TO_TICKS(timeout);
595595

596596
xEventGroupClearBits(twai_ctx->event_group, TWAI_IDLE_EVENT_BIT); //going to send, clear the idle event
@@ -599,15 +599,38 @@ static esp_err_t _node_queue_tx(twai_node_handle_t node, const twai_frame_t *fra
599599
twai_ctx->p_curr_tx = frame;
600600
_node_start_trans(twai_ctx);
601601
} else {
602-
//options in following steps (in_queue->2nd_check->pop_queue) should exec ASAP
603-
//within about 50us (minimum time for one msg), to ensure data safe
604-
ESP_RETURN_ON_FALSE(xQueueSend(twai_ctx->tx_mount_queue, &frame, ticks_to_wait), ESP_ERR_TIMEOUT, TAG, "tx queue full");
602+
// Hardware busy, need to queue the frame
603+
BaseType_t is_isr_context = xPortInIsrContext();
604+
BaseType_t yield_required = pdFALSE;
605+
606+
if (is_isr_context) {
607+
// In ISR context - use ISR-safe queue operations
608+
ESP_RETURN_ON_FALSE_ISR(xQueueSendFromISR(twai_ctx->tx_mount_queue, &frame, &yield_required), ESP_ERR_TIMEOUT, TAG, "tx queue full");
609+
} else {
610+
// In task context - use normal queue operations
611+
ESP_RETURN_ON_FALSE(xQueueSend(twai_ctx->tx_mount_queue, &frame, ticks_to_wait), ESP_ERR_TIMEOUT, TAG, "tx queue full");
612+
}
613+
614+
// Second chance check for hardware availability
605615
false_var = false;
606616
if (atomic_compare_exchange_strong(&twai_ctx->hw_busy, &false_var, true)) {
607-
if (xQueueReceive(twai_ctx->tx_mount_queue, &twai_ctx->p_curr_tx, 0) != pdTRUE) {
617+
BaseType_t dequeue_result;
618+
if (is_isr_context) {
619+
dequeue_result = xQueueReceiveFromISR(twai_ctx->tx_mount_queue, &twai_ctx->p_curr_tx, &yield_required);
620+
} else {
621+
dequeue_result = xQueueReceive(twai_ctx->tx_mount_queue, &twai_ctx->p_curr_tx, 0);
622+
}
623+
624+
if (dequeue_result == pdTRUE) {
625+
_node_start_trans(twai_ctx);
626+
} else {
608627
assert(false && "should always get frame at this moment");
609628
}
610-
_node_start_trans(twai_ctx);
629+
}
630+
631+
// Handle ISR yield if required
632+
if (is_isr_context && yield_required) {
633+
portYIELD_FROM_ISR();
611634
}
612635
}
613636
return ESP_OK;

components/esp_driver_twai/linker.lf

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,10 @@ entries:
77
esp_twai_onchip: _node_start_trans (noflash)
88
esp_twai_onchip: _node_parse_rx (noflash)
99

10+
if TWAI_IO_FUNC_IN_IRAM = y:
11+
esp_twai_onchip: _node_queue_tx (noflash)
12+
esp_twai: twai_node_transmit (noflash)
13+
1014
[mapping:twai_hal]
1115
archive: libhal.a
1216
entries:

components/esp_driver_twai/test_apps/test_twai/main/test_twai_common.cpp

Lines changed: 138 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -48,6 +48,8 @@ TEST_CASE("twai install uninstall (loopback)", "[twai]")
4848
twai_onchip_node_config_t node_config = {};
4949
node_config.io_cfg.tx = TEST_TX_GPIO;
5050
node_config.io_cfg.rx = TEST_TX_GPIO; // Using same pin for test without transceiver
51+
node_config.io_cfg.quanta_clk_out = GPIO_NUM_NC;
52+
node_config.io_cfg.bus_off_indicator = GPIO_NUM_NC;
5153
node_config.bit_timing.bitrate = 1000000;
5254
node_config.tx_queue_depth = TEST_TWAI_QUEUE_DEPTH;
5355
node_config.flags.enable_self_test = true;
@@ -126,7 +128,7 @@ static void test_twai_baudrate_correctness(twai_clock_source_t clk_src, uint32_t
126128
TEST_ESP_OK(uart_detect_bitrate_start(UART_NUM_1, &detect_config));
127129

128130
twai_frame_t tx_frame = {};
129-
tx_frame.header.id = 0x55555;
131+
tx_frame.header.id = 0x15555555;
130132
tx_frame.header.dlc = 8;
131133
tx_frame.header.ide = true;
132134
tx_frame.buffer = (uint8_t []) {
@@ -183,6 +185,8 @@ TEST_CASE("twai transmit stop resume (loopback)", "[twai]")
183185
twai_onchip_node_config_t node_config = {};
184186
node_config.io_cfg.tx = TEST_TX_GPIO;
185187
node_config.io_cfg.rx = TEST_TX_GPIO; // Using same pin for test without transceiver
188+
node_config.io_cfg.quanta_clk_out = GPIO_NUM_NC;
189+
node_config.io_cfg.bus_off_indicator = GPIO_NUM_NC;
186190
node_config.bit_timing.bitrate = 200000;
187191
node_config.tx_queue_depth = TEST_TWAI_QUEUE_DEPTH;
188192
node_config.flags.enable_self_test = true;
@@ -280,6 +284,8 @@ TEST_CASE("twai mask filter (loopback)", "[twai]")
280284
twai_onchip_node_config_t node_config = {};
281285
node_config.io_cfg.tx = TEST_TX_GPIO;
282286
node_config.io_cfg.rx = TEST_TX_GPIO; // Using same pin for test without transceiver
287+
node_config.io_cfg.quanta_clk_out = GPIO_NUM_NC;
288+
node_config.io_cfg.bus_off_indicator = GPIO_NUM_NC;
283289
node_config.bit_timing.bitrate = 1000000;
284290
node_config.tx_queue_depth = TEST_TWAI_QUEUE_DEPTH;
285291
node_config.flags.enable_self_test = true;
@@ -363,6 +369,8 @@ TEST_CASE("twai dual 16bit mask filter (loopback)", "[twai]")
363369
twai_onchip_node_config_t node_config = {};
364370
node_config.io_cfg.tx = TEST_TX_GPIO;
365371
node_config.io_cfg.rx = TEST_TX_GPIO; // Using same pin for test without transceiver
372+
node_config.io_cfg.quanta_clk_out = GPIO_NUM_NC;
373+
node_config.io_cfg.bus_off_indicator = GPIO_NUM_NC;
366374
node_config.bit_timing.bitrate = 1000000;
367375
node_config.tx_queue_depth = TEST_TWAI_QUEUE_DEPTH;
368376
node_config.flags.enable_self_test = true;
@@ -436,6 +444,8 @@ TEST_CASE("twai driver cache safe (loopback)", "[twai]")
436444
twai_onchip_node_config_t node_config = {};
437445
node_config.io_cfg.tx = TEST_TX_GPIO;
438446
node_config.io_cfg.rx = TEST_TX_GPIO; // Using same pin for test without transceiver
447+
node_config.io_cfg.quanta_clk_out = GPIO_NUM_NC;
448+
node_config.io_cfg.bus_off_indicator = GPIO_NUM_NC;
439449
node_config.bit_timing.bitrate = 50000; //slow bitrate to ensure cache disabled before tx_queue finish
440450
node_config.tx_queue_depth = TEST_FRAME_NUM;
441451
node_config.flags.enable_loopback = true;
@@ -577,3 +587,130 @@ TEST_CASE("twai tx_wait_all_done thread safe", "[twai]")
577587
TEST_ESP_OK(twai_node_disable(node_hdl));
578588
TEST_ESP_OK(twai_node_delete(node_hdl));
579589
}
590+
591+
// Test data for ISR send functionality
592+
typedef struct {
593+
twai_node_handle_t node;
594+
uint32_t rx_count;
595+
uint32_t tx_isr_send_count;
596+
uint32_t rx_isr_send_count;
597+
bool test_completed;
598+
} isr_send_test_ctx_t;
599+
600+
static IRAM_ATTR bool test_tx_isr_send_cb(twai_node_handle_t handle, const twai_tx_done_event_data_t *edata, void *user_ctx)
601+
{
602+
isr_send_test_ctx_t *ctx = (isr_send_test_ctx_t *)user_ctx;
603+
604+
// Test sending from TX ISR context
605+
if (ctx->tx_isr_send_count < 3) {
606+
twai_frame_t isr_frame = {};
607+
isr_frame.header.id = 0x200 + ctx->tx_isr_send_count;
608+
isr_frame.header.dlc = 1;
609+
isr_frame.buffer = (uint8_t*)(&ctx->tx_isr_send_count);
610+
isr_frame.buffer_len = 1;
611+
612+
esp_err_t err = twai_node_transmit(handle, &isr_frame, 0); // timeout must be 0 in ISR
613+
if (err == ESP_OK) {
614+
ctx->tx_isr_send_count++;
615+
}
616+
}
617+
618+
return false;
619+
}
620+
621+
static IRAM_ATTR bool test_rx_isr_send_cb(twai_node_handle_t handle, const twai_rx_done_event_data_t *edata, void *user_ctx)
622+
{
623+
isr_send_test_ctx_t *ctx = (isr_send_test_ctx_t *)user_ctx;
624+
twai_frame_t rx_frame = {};
625+
uint8_t buffer[8];
626+
rx_frame.buffer = buffer;
627+
rx_frame.buffer_len = sizeof(buffer);
628+
629+
if (ESP_OK == twai_node_receive_from_isr(handle, &rx_frame)) {
630+
ctx->rx_count++;
631+
632+
// Test sending from RX ISR context (response pattern)
633+
if ((rx_frame.header.id >= 0x100) && (rx_frame.header.id < 0x103) && (ctx->rx_isr_send_count < 3)) {
634+
twai_frame_t response_frame = {};
635+
response_frame.header.id = 0x300 + ctx->rx_isr_send_count;
636+
response_frame.header.dlc = 1;
637+
response_frame.buffer = (uint8_t*)(&ctx->rx_isr_send_count);
638+
response_frame.buffer_len = 1;
639+
640+
esp_err_t err = twai_node_transmit(handle, &response_frame, 0); // timeout must be 0 in ISR
641+
if (err == ESP_OK) {
642+
ctx->rx_isr_send_count++;
643+
}
644+
}
645+
646+
// Mark test completed when we receive expected frames
647+
if (ctx->rx_count >= 9) { // 3 initial + 3 tx_isr + 3 rx_isr responses
648+
ctx->test_completed = true;
649+
}
650+
}
651+
return false;
652+
}
653+
654+
TEST_CASE("twai send from ISR context (loopback)", "[twai]")
655+
{
656+
isr_send_test_ctx_t test_ctx = {};
657+
658+
twai_onchip_node_config_t node_config = {};
659+
node_config.io_cfg.tx = TEST_TX_GPIO;
660+
node_config.io_cfg.rx = TEST_TX_GPIO; // Using same pin for test without transceiver
661+
node_config.io_cfg.quanta_clk_out = GPIO_NUM_NC;
662+
node_config.io_cfg.bus_off_indicator = GPIO_NUM_NC;
663+
node_config.bit_timing.bitrate = 500000;
664+
node_config.tx_queue_depth = 10; // Larger queue for ISR sends
665+
node_config.flags.enable_self_test = true;
666+
node_config.flags.enable_loopback = true;
667+
668+
TEST_ESP_OK(twai_new_node_onchip(&node_config, &test_ctx.node));
669+
670+
twai_event_callbacks_t user_cbs = {};
671+
user_cbs.on_rx_done = test_rx_isr_send_cb;
672+
user_cbs.on_tx_done = test_tx_isr_send_cb;
673+
TEST_ESP_OK(twai_node_register_event_callbacks(test_ctx.node, &user_cbs, &test_ctx));
674+
TEST_ESP_OK(twai_node_enable(test_ctx.node));
675+
676+
printf("Testing ISR context sending...\n");
677+
678+
// Send initial frames to trigger RX ISR responses
679+
for (int i = 0; i < 3; i++) {
680+
twai_frame_t trigger_frame = {};
681+
trigger_frame.header.id = 0x100 + i;
682+
trigger_frame.header.dlc = 1;
683+
trigger_frame.buffer = (uint8_t[]) {
684+
(uint8_t)i
685+
};
686+
trigger_frame.buffer_len = 1;
687+
688+
TEST_ESP_OK(twai_node_transmit(test_ctx.node, &trigger_frame, 500));
689+
printf("Sent trigger frame 0x%" PRIx32 "\n", trigger_frame.header.id);
690+
vTaskDelay(pdMS_TO_TICKS(50)); // Allow ISR processing
691+
}
692+
693+
// Wait for test completion
694+
int timeout_count = 0;
695+
while (!test_ctx.test_completed && timeout_count < 100) {
696+
vTaskDelay(pdMS_TO_TICKS(10));
697+
timeout_count++;
698+
}
699+
700+
printf("Test results:\n");
701+
printf(" RX count: %" PRIu32 "\n", test_ctx.rx_count);
702+
printf(" TX ISR sends: %" PRIu32 "\n", test_ctx.tx_isr_send_count);
703+
printf(" RX ISR sends: %" PRIu32 "\n", test_ctx.rx_isr_send_count);
704+
printf(" Test completed: %s\n", test_ctx.test_completed ? "YES" : "NO");
705+
706+
// Verify test results
707+
TEST_ASSERT_TRUE(test_ctx.test_completed);
708+
TEST_ASSERT_EQUAL_UINT32(3, test_ctx.tx_isr_send_count); // 3 sends from TX ISR
709+
TEST_ASSERT_EQUAL_UINT32(3, test_ctx.rx_isr_send_count); // 3 sends from RX ISR
710+
TEST_ASSERT_GREATER_OR_EQUAL_UINT32(9, test_ctx.rx_count); // At least 9 received frames
711+
712+
TEST_ESP_OK(twai_node_disable(test_ctx.node));
713+
TEST_ESP_OK(twai_node_delete(test_ctx.node));
714+
715+
printf("ISR send test passed!\n");
716+
}
Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
CONFIG_COMPILER_DUMP_RTL_FILES=y
22
CONFIG_TWAI_ISR_CACHE_SAFE=y
33
CONFIG_FREERTOS_IN_IRAM=y
4+
CONFIG_TWAI_IO_FUNC_IN_IRAM=y
45
CONFIG_COMPILER_OPTIMIZATION_NONE=y
56
CONFIG_COMPILER_OPTIMIZATION_CHECKS_SILENT=y
67
CONFIG_COMPILER_OPTIMIZATION_ASSERTIONS_DISABLE=y

docs/en/api-reference/peripherals/twai.rst

Lines changed: 30 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -122,7 +122,7 @@ To reduce performance overhead caused by memory copying, the TWAI driver uses po
122122
ESP_ERROR_CHECK(twai_node_transmit(node_hdl, &tx_msg, 0)); // Timeout = 0: returns immediately if queue is full
123123
ESP_ERROR_CHECK(twai_node_transmit_wait_all_done(node_hdl, -1)); // Wait for transmission to finish
124124
125-
In this example, :cpp:member:`twai_frame_t::header::id` specifies the ID of the message as 0x01. Message IDs are typically used to indicate the type of message in an application and also play a role in bus arbitration during transmission—lower values indicate higher priority on the bus. :cpp:member:`twai_frame_t::buffer` points to the memory address where the data to be transmitted is stored, and :cpp:member:`twai_frame_t::buffer_len` specifies the length of that data.
125+
In this example, :cpp:member:`twai_frame_t::header::id` specifies the ID of the message as 0x01. Message IDs are typically used to indicate the type of message in an application and also play a role in bus arbitration during transmission—lower values indicate higher priority on the bus. :cpp:member:`twai_frame_t::buffer` points to the memory address where the data to be transmitted is stored, and :cpp:member:`twai_frame_t::buffer_len` specifies the length of that data. The :cpp:func:`twai_node_transmit` function is thread-safe and can also be called from an ISR. When called from an ISR, the ``timeout`` parameter is ignored, and the function will not block.
126126

127127
Note that :cpp:member:`twai_frame_t::header::dlc` can also specify the length of the data in the frame. The DLC (Data Length Code) is mapped to the actual data length as defined in ISO 11898-1. You can use either :cpp:func:`twaifd_dlc2len` or :cpp:func:`twaifd_len2dlc` for conversion. If both dlc and buffer_len are non-zero, they must represent the same length.
128128

@@ -181,6 +181,32 @@ After understanding the basic usage, you can further explore more advanced capab
181181
.. image:: ../../../_static/diagrams/twai/full_flow.drawio.svg
182182
:align: center
183183

184+
Transmit from ISR
185+
^^^^^^^^^^^^^^^^^
186+
187+
The TWAI driver supports transmitting messages from an Interrupt Service Routine (ISR). This is particularly useful for applications requiring low-latency responses or periodic transmissions triggered by hardware timers. For example, you can trigger a new transmission from within the ``on_tx_done`` callback, which is executed in an ISR context.
188+
189+
.. code:: c
190+
191+
static bool twai_tx_done_cb(twai_node_handle_t handle, const twai_tx_done_event_data_t *edata, void *user_ctx)
192+
{
193+
// A frame has been successfully transmitted. Queue another one.
194+
// The frame and its data buffer must be valid until transmission is complete.
195+
static const uint8_t data_buffer[] = {1, 2, 3, 4};
196+
static const twai_frame_t tx_frame = {
197+
.header.id = 0x2,
198+
.buffer = (uint8_t *)data_buffer,
199+
.buffer_len = sizeof(data_buffer),
200+
};
201+
202+
// The `twai_node_transmit` is safe to be called in an ISR context
203+
twai_node_transmit(handle, &tx_frame, 0);
204+
return false;
205+
}
206+
207+
.. note::
208+
When calling :cpp:func:`twai_node_transmit` from an ISR, the ``timeout`` parameter is ignored, and the function will not block. If the transmit queue is full, the function will return immediately with an error. It is the application's responsibility to handle cases where the queue is full.
209+
184210
Bit Timing Customization
185211
^^^^^^^^^^^^^^^^^^^^^^^^
186212

@@ -318,7 +344,9 @@ The driver guarantees thread safety for all public TWAI APIs. You can safely cal
318344
Performance
319345
^^^^^^^^^^^
320346

321-
To improve the real-time performance of interrupt handling, the driver provides the :ref:`CONFIG_TWAI_ISR_IN_IRAM` option. When enabled, the TWAI ISR (Interrupt Service Routine) is placed in internal RAM, reducing latency caused by instruction fetching from Flash.
347+
To improve the real-time performance of interrupt handling, the driver provides the :ref:`CONFIG_TWAI_ISR_IN_IRAM` option. When enabled, the TWAI ISR (Interrupt Service Routine) and receive operations are placed in internal RAM, reducing latency caused by instruction fetching from Flash.
348+
349+
For applications that require high-performance transmit operations, the driver provides the :ref:`CONFIG_TWAI_IO_FUNC_IN_IRAM` option to place transmit functions in IRAM. This is particularly beneficial for time-critical applications that frequently call :cpp:func:`twai_node_transmit` from user tasks.
322350

323351
.. note::
324352

0 commit comments

Comments
 (0)