Skip to content

Commit d62a69c

Browse files
nordic-krchnordicjm
authored andcommitted
[nrf fromtree] drivers: serial: nrfx_uarte: Refactor to use RX async struct pointer
Refactor RX asynchronous API function to use a pointer to the RX async data structure instead of top level data structure pointer. It improves readability with more concise code. Signed-off-by: Krzysztof Chruściński <[email protected]> (cherry picked from commit d82808e)
1 parent 6bd3a7e commit d62a69c

File tree

1 file changed

+66
-64
lines changed

1 file changed

+66
-64
lines changed

drivers/serial/uart_nrfx_uarte.c

Lines changed: 66 additions & 64 deletions
Original file line numberDiff line numberDiff line change
@@ -816,6 +816,7 @@ static int uarte_nrfx_rx_enable(const struct device *dev, uint8_t *buf,
816816
int32_t timeout)
817817
{
818818
struct uarte_nrfx_data *data = dev->data;
819+
struct uarte_async_rx *async_rx = &data->async->rx;
819820
const struct uarte_nrfx_config *cfg = dev->config;
820821
NRF_UARTE_Type *uarte = get_uarte_instance(dev);
821822
int ret = 0;
@@ -829,34 +830,34 @@ static int uarte_nrfx_rx_enable(const struct device *dev, uint8_t *buf,
829830
* for the RXTO event after a call to uart_rx_disable() to discard
830831
* data from the UARTE internal RX FIFO.
831832
*/
832-
if (data->async->rx.enabled || data->async->rx.discard_fifo) {
833+
if (async_rx->enabled || async_rx->discard_fifo) {
833834
return -EBUSY;
834835
}
835836

836-
data->async->rx.timeout = timeout;
837-
data->async->rx.timeout_slab = timeout / RX_TIMEOUT_DIV;
837+
async_rx->timeout = timeout;
838+
async_rx->timeout_slab = timeout / RX_TIMEOUT_DIV;
838839

839-
data->async->rx.buf = buf;
840-
data->async->rx.buf_len = len;
841-
data->async->rx.offset = 0;
842-
data->async->rx.next_buf = NULL;
843-
data->async->rx.next_buf_len = 0;
840+
async_rx->buf = buf;
841+
async_rx->buf_len = len;
842+
async_rx->offset = 0;
843+
async_rx->next_buf = NULL;
844+
async_rx->next_buf_len = 0;
844845

845846
if (cfg->flags & UARTE_CFG_FLAG_LOW_POWER) {
846-
if (data->async->rx.flush_cnt) {
847-
int cpy_len = MIN(len, data->async->rx.flush_cnt);
847+
if (async_rx->flush_cnt) {
848+
int cpy_len = MIN(len, async_rx->flush_cnt);
848849

849-
memcpy(buf, data->async->rx.flush_buffer, cpy_len);
850+
memcpy(buf, async_rx->flush_buffer, cpy_len);
850851
buf += cpy_len;
851852
len -= cpy_len;
852853

853854
/* If flush content filled whole new buffer complete the
854855
* request and indicate rx being disabled.
855856
*/
856857
if (!len) {
857-
data->async->rx.flush_cnt -= cpy_len;
858+
async_rx->flush_cnt -= cpy_len;
858859
notify_uart_rx_rdy(dev, cpy_len);
859-
rx_buf_release(dev, &data->async->rx.buf);
860+
rx_buf_release(dev, &async_rx->buf);
860861
notify_rx_disable(dev);
861862
return 0;
862863
}
@@ -868,7 +869,7 @@ static int uarte_nrfx_rx_enable(const struct device *dev, uint8_t *buf,
868869
nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ENDRX);
869870
nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_RXSTARTED);
870871

871-
data->async->rx.enabled = true;
872+
async_rx->enabled = true;
872873
if (cfg->flags & UARTE_CFG_FLAG_LOW_POWER) {
873874
unsigned int key = irq_lock();
874875

@@ -885,15 +886,16 @@ static int uarte_nrfx_rx_buf_rsp(const struct device *dev, uint8_t *buf,
885886
size_t len)
886887
{
887888
struct uarte_nrfx_data *data = dev->data;
889+
struct uarte_async_rx *async_rx = &data->async->rx;
888890
int err;
889891
NRF_UARTE_Type *uarte = get_uarte_instance(dev);
890892
unsigned int key = irq_lock();
891893

892-
if (data->async->rx.buf == NULL) {
894+
if (async_rx->buf == NULL) {
893895
err = -EACCES;
894-
} else if (data->async->rx.next_buf == NULL) {
895-
data->async->rx.next_buf = buf;
896-
data->async->rx.next_buf_len = len;
896+
} else if (async_rx->next_buf == NULL) {
897+
async_rx->next_buf = buf;
898+
async_rx->next_buf_len = len;
897899
nrf_uarte_rx_buffer_set(uarte, buf, len);
898900
nrf_uarte_shorts_enable(uarte, NRF_UARTE_SHORT_ENDRX_STARTRX);
899901
err = 0;
@@ -925,19 +927,20 @@ static int uarte_nrfx_callback_set(const struct device *dev,
925927
static int uarte_nrfx_rx_disable(const struct device *dev)
926928
{
927929
struct uarte_nrfx_data *data = dev->data;
930+
struct uarte_async_rx *async_rx = &data->async->rx;
928931
NRF_UARTE_Type *uarte = get_uarte_instance(dev);
929932

930-
if (data->async->rx.buf == NULL) {
933+
if (async_rx->buf == NULL) {
931934
return -EFAULT;
932935
}
933-
if (data->async->rx.next_buf != NULL) {
936+
if (async_rx->next_buf != NULL) {
934937
nrf_uarte_shorts_disable(uarte, NRF_UARTE_SHORT_ENDRX_STARTRX);
935938
nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_RXSTARTED);
936939
}
937940

938-
k_timer_stop(&data->async->rx.timer);
939-
data->async->rx.enabled = false;
940-
data->async->rx.discard_fifo = true;
941+
k_timer_stop(&async_rx->timer);
942+
async_rx->enabled = false;
943+
async_rx->discard_fifo = true;
941944

942945
nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STOPRX);
943946

@@ -961,11 +964,12 @@ static void tx_timeout(struct k_timer *timer)
961964
static void rx_timeout(struct k_timer *timer)
962965
{
963966
struct uarte_nrfx_data *data = k_timer_user_data_get(timer);
967+
struct uarte_async_rx *async_rx = &data->async->rx;
964968
const struct device *dev = data->dev;
965969
const struct uarte_nrfx_config *cfg = dev->config;
966970
uint32_t read;
967971

968-
if (data->async->rx.is_in_irq) {
972+
if (async_rx->is_in_irq) {
969973
return;
970974
}
971975

@@ -978,21 +982,20 @@ static void rx_timeout(struct k_timer *timer)
978982
if (HW_RX_COUNTING_ENABLED(cfg)) {
979983
read = nrfx_timer_capture(&cfg->timer, 0);
980984
} else {
981-
read = data->async->rx.cnt.cnt;
985+
read = async_rx->cnt.cnt;
982986
}
983987

984988
/* Check if data was received since last function call */
985-
if (read != data->async->rx.total_byte_cnt) {
986-
data->async->rx.total_byte_cnt = read;
987-
data->async->rx.timeout_left = data->async->rx.timeout;
989+
if (read != async_rx->total_byte_cnt) {
990+
async_rx->total_byte_cnt = read;
991+
async_rx->timeout_left = async_rx->timeout;
988992
}
989993

990994
/* Check if there is data that was not sent to user yet
991995
* Note though that 'len' is a count of data bytes received, but not
992996
* necessarily the amount available in the current buffer
993997
*/
994-
int32_t len = data->async->rx.total_byte_cnt
995-
- data->async->rx.total_user_byte_cnt;
998+
int32_t len = async_rx->total_byte_cnt - async_rx->total_user_byte_cnt;
996999

9971000
if (!HW_RX_COUNTING_ENABLED(cfg) &&
9981001
(len < 0)) {
@@ -1001,7 +1004,7 @@ static void rx_timeout(struct k_timer *timer)
10011004
* At this point, the number of received bytes is at least
10021005
* equal to what was reported to the user.
10031006
*/
1004-
data->async->rx.cnt.cnt = data->async->rx.total_user_byte_cnt;
1007+
async_rx->cnt.cnt = async_rx->total_user_byte_cnt;
10051008
len = 0;
10061009
}
10071010

@@ -1013,31 +1016,28 @@ static void rx_timeout(struct k_timer *timer)
10131016
*/
10141017
bool clipped = false;
10151018

1016-
if (len + data->async->rx.offset > data->async->rx.buf_len) {
1017-
len = data->async->rx.buf_len - data->async->rx.offset;
1019+
if (len + async_rx->offset > async_rx->buf_len) {
1020+
len = async_rx->buf_len - async_rx->offset;
10181021
clipped = true;
10191022
}
10201023

10211024
if (len > 0) {
1022-
if (clipped ||
1023-
(data->async->rx.timeout_left
1024-
< data->async->rx.timeout_slab)) {
1025+
if (clipped || (async_rx->timeout_left < async_rx->timeout_slab)) {
10251026
/* rx_timeout us elapsed since last receiving */
1026-
if (data->async->rx.buf != NULL) {
1027+
if (async_rx->buf != NULL) {
10271028
notify_uart_rx_rdy(dev, len);
1028-
data->async->rx.offset += len;
1029-
data->async->rx.total_user_byte_cnt += len;
1029+
async_rx->offset += len;
1030+
async_rx->total_user_byte_cnt += len;
10301031
}
10311032
} else {
1032-
data->async->rx.timeout_left -=
1033-
data->async->rx.timeout_slab;
1033+
async_rx->timeout_left -= async_rx->timeout_slab;
10341034
}
10351035

10361036
/* If there's nothing left to report until the buffers are
10371037
* switched then the timer can be stopped
10381038
*/
10391039
if (clipped) {
1040-
k_timer_stop(&data->async->rx.timer);
1040+
k_timer_stop(&async_rx->timer);
10411041
}
10421042
}
10431043

@@ -1083,22 +1083,23 @@ static void rxstarted_isr(const struct device *dev)
10831083
static void endrx_isr(const struct device *dev)
10841084
{
10851085
struct uarte_nrfx_data *data = dev->data;
1086+
struct uarte_async_rx *async_rx = &data->async->rx;
10861087
NRF_UARTE_Type *uarte = get_uarte_instance(dev);
10871088

1088-
data->async->rx.is_in_irq = true;
1089+
async_rx->is_in_irq = true;
10891090

10901091
/* ensure rx timer is stopped - it will be restarted in RXSTARTED
10911092
* handler if needed
10921093
*/
1093-
k_timer_stop(&data->async->rx.timer);
1094+
k_timer_stop(&async_rx->timer);
10941095

10951096
/* this is the amount that the EasyDMA controller has copied into the
10961097
* buffer
10971098
*/
10981099
const int rx_amount = nrf_uarte_rx_amount_get(uarte) +
1099-
data->async->rx.flush_cnt;
1100+
async_rx->flush_cnt;
11001101

1101-
data->async->rx.flush_cnt = 0;
1102+
async_rx->flush_cnt = 0;
11021103

11031104
/* The 'rx_offset' can be bigger than 'rx_amount', so it the length
11041105
* of data we report back the user may need to be clipped.
@@ -1107,25 +1108,25 @@ static void endrx_isr(const struct device *dev)
11071108
* here to handle this buffer. (The next buffer is now already active
11081109
* because of the ENDRX_STARTRX shortcut)
11091110
*/
1110-
int rx_len = rx_amount - data->async->rx.offset;
1111+
int rx_len = rx_amount - async_rx->offset;
11111112

11121113
if (rx_len < 0) {
11131114
rx_len = 0;
11141115
}
11151116

1116-
data->async->rx.total_user_byte_cnt += rx_len;
1117+
async_rx->total_user_byte_cnt += rx_len;
11171118

11181119
/* Only send the RX_RDY event if there is something to send */
11191120
if (rx_len > 0) {
11201121
notify_uart_rx_rdy(dev, rx_len);
11211122
}
11221123

1123-
if (!data->async->rx.enabled) {
1124-
data->async->rx.is_in_irq = false;
1124+
if (!async_rx->enabled) {
1125+
async_rx->is_in_irq = false;
11251126
return;
11261127
}
11271128

1128-
rx_buf_release(dev, &data->async->rx.buf);
1129+
rx_buf_release(dev, &async_rx->buf);
11291130

11301131
/* If there is a next buffer, then STARTRX will have already been
11311132
* invoked by the short (the next buffer will be filling up already)
@@ -1134,13 +1135,13 @@ static void endrx_isr(const struct device *dev)
11341135
*/
11351136
unsigned int key = irq_lock();
11361137

1137-
if (data->async->rx.next_buf) {
1138-
data->async->rx.buf = data->async->rx.next_buf;
1139-
data->async->rx.buf_len = data->async->rx.next_buf_len;
1140-
data->async->rx.next_buf = NULL;
1141-
data->async->rx.next_buf_len = 0;
1138+
if (async_rx->next_buf) {
1139+
async_rx->buf = async_rx->next_buf;
1140+
async_rx->buf_len = async_rx->next_buf_len;
1141+
async_rx->next_buf = NULL;
1142+
async_rx->next_buf_len = 0;
11421143

1143-
data->async->rx.offset = 0;
1144+
async_rx->offset = 0;
11441145
/* Check is based on assumption that ISR handler handles
11451146
* ENDRX before RXSTARTED so if short was set on time, RXSTARTED
11461147
* event will be set.
@@ -1156,7 +1157,7 @@ static void endrx_isr(const struct device *dev)
11561157

11571158
irq_unlock(key);
11581159

1159-
data->async->rx.is_in_irq = false;
1160+
async_rx->is_in_irq = false;
11601161
}
11611162

11621163
/* Function for flushing internal RX fifo. Function can be called in case
@@ -1253,9 +1254,10 @@ static void rxto_isr(const struct device *dev)
12531254
{
12541255
const struct uarte_nrfx_config *config = dev->config;
12551256
struct uarte_nrfx_data *data = dev->data;
1257+
struct uarte_async_rx *async_rx = &data->async->rx;
12561258

1257-
rx_buf_release(dev, &data->async->rx.buf);
1258-
rx_buf_release(dev, &data->async->rx.next_buf);
1259+
rx_buf_release(dev, &async_rx->buf);
1260+
rx_buf_release(dev, &async_rx->next_buf);
12591261

12601262
/* This point can be reached in two cases:
12611263
* 1. RX is disabled because all provided RX buffers have been filled.
@@ -1265,17 +1267,17 @@ static void rxto_isr(const struct device *dev)
12651267
* In the second case, additionally, data from the UARTE internal RX
12661268
* FIFO need to be discarded.
12671269
*/
1268-
data->async->rx.enabled = false;
1269-
if (data->async->rx.discard_fifo) {
1270+
async_rx->enabled = false;
1271+
if (async_rx->discard_fifo) {
12701272
uint8_t flushed;
12711273

1272-
data->async->rx.discard_fifo = false;
1274+
async_rx->discard_fifo = false;
12731275
flushed = rx_flush(dev, NULL, 0);
12741276
if (HW_RX_COUNTING_ENABLED(config)) {
12751277
/* It need to be included because TIMER+PPI got RXDRDY events
12761278
* and counted those flushed bytes.
12771279
*/
1278-
data->async->rx.total_user_byte_cnt += flushed;
1280+
async_rx->total_user_byte_cnt += flushed;
12791281
}
12801282
}
12811283

0 commit comments

Comments
 (0)