Skip to content

Commit 5cbd1f6

Browse files
SynchronicITnashif
authored andcommitted
usb: device_next: cdc_acm: Prevent polling for buffer in TX
In case the host doesn't pull the new data from the endpoint, the work task would schedule itself again delayed (at the max. priority). When there is no terminal program or active application reading the endpoint this results in a constant polling of the endpoint burning up to 5% of the CPU cycles. By using a atomic flag for tx busy, the polling is solved and changed into a postponed execution of the next work task which saves up to 5% of CPU cycles and allows a better real-time behavior for other tasks. Secondly, if the TX interrupt is disabled but there is still data in the TX FIFO (ring buffer), the implementation will continue to trigger subsequent TX work and attempt to flush the data to the host. Signed-off-by: Vincent van der Locht <[email protected]>
1 parent 744338f commit 5cbd1f6

File tree

1 file changed

+23
-1
lines changed

1 file changed

+23
-1
lines changed

subsys/usb/device_next/class/usbd_cdc_acm.c

Lines changed: 23 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -49,6 +49,7 @@ UDC_BUF_POOL_DEFINE(cdc_acm_ep_pool,
4949
#define CDC_ACM_IRQ_RX_ENABLED 2
5050
#define CDC_ACM_IRQ_TX_ENABLED 3
5151
#define CDC_ACM_RX_FIFO_BUSY 4
52+
#define CDC_ACM_TX_FIFO_BUSY 5
5253

5354
static struct k_work_q cdc_acm_work_q;
5455
static K_KERNEL_STACK_DEFINE(cdc_acm_stack,
@@ -228,6 +229,10 @@ static int usbd_cdc_acm_request(struct usbd_class_data *const c_data,
228229
atomic_clear_bit(&data->state, CDC_ACM_RX_FIFO_BUSY);
229230
}
230231

232+
if (bi->ep == cdc_acm_get_bulk_in(c_data)) {
233+
atomic_clear_bit(&data->state, CDC_ACM_TX_FIFO_BUSY);
234+
}
235+
231236
goto ep_request_error;
232237
}
233238

@@ -250,6 +255,14 @@ static int usbd_cdc_acm_request(struct usbd_class_data *const c_data,
250255
if (data->cb) {
251256
cdc_acm_work_submit(&data->irq_cb_work);
252257
}
258+
259+
atomic_clear_bit(&data->state, CDC_ACM_TX_FIFO_BUSY);
260+
261+
if (!ring_buf_is_empty(data->tx_fifo.rb)) {
262+
/* Queue pending TX data on IN endpoint */
263+
cdc_acm_work_schedule(&data->tx_fifo_work, K_NO_WAIT);
264+
}
265+
253266
}
254267

255268
if (bi->ep == cdc_acm_get_int_in(c_data)) {
@@ -548,8 +561,14 @@ static void cdc_acm_tx_fifo_handler(struct k_work *work)
548561
return;
549562
}
550563

564+
if (atomic_test_and_set_bit(&data->state, CDC_ACM_TX_FIFO_BUSY)) {
565+
LOG_DBG("TX transfer already in progress");
566+
return;
567+
}
568+
551569
buf = cdc_acm_buf_alloc(cdc_acm_get_bulk_in(c_data));
552570
if (buf == NULL) {
571+
atomic_clear_bit(&data->state, CDC_ACM_TX_FIFO_BUSY);
553572
cdc_acm_work_schedule(&data->tx_fifo_work, K_MSEC(1));
554573
return;
555574
}
@@ -561,6 +580,7 @@ static void cdc_acm_tx_fifo_handler(struct k_work *work)
561580
if (ret) {
562581
LOG_ERR("Failed to enqueue");
563582
net_buf_unref(buf);
583+
atomic_clear_bit(&data->state, CDC_ACM_TX_FIFO_BUSY);
564584
}
565585
}
566586

@@ -828,7 +848,9 @@ static void cdc_acm_irq_cb_handler(struct k_work *work)
828848

829849
if (data->tx_fifo.altered) {
830850
LOG_DBG("tx fifo altered, submit work");
831-
cdc_acm_work_schedule(&data->tx_fifo_work, K_NO_WAIT);
851+
if (!atomic_test_bit(&data->state, CDC_ACM_TX_FIFO_BUSY)) {
852+
cdc_acm_work_schedule(&data->tx_fifo_work, K_NO_WAIT);
853+
}
832854
}
833855

834856
if (atomic_test_bit(&data->state, CDC_ACM_IRQ_RX_ENABLED) &&

0 commit comments

Comments
 (0)