@@ -875,6 +875,78 @@ static void tx_start(const struct device *dev, const uint8_t *buf, size_t len)
875
875
static void rx_timeout (struct k_timer * timer );
876
876
static void tx_timeout (struct k_timer * timer );
877
877
878
+ static void user_callback (const struct device * dev , struct uart_event * evt )
879
+ {
880
+ struct uarte_nrfx_data * data = dev -> data ;
881
+
882
+ if (data -> async -> user_callback ) {
883
+ data -> async -> user_callback (dev , evt , data -> async -> user_data );
884
+ }
885
+ }
886
+
887
+ static void rx_buf_release (const struct device * dev , uint8_t * buf )
888
+ {
889
+ struct uart_event evt = {
890
+ .type = UART_RX_BUF_RELEASED ,
891
+ .data .rx_buf .buf = buf ,
892
+ };
893
+
894
+ user_callback (dev , & evt );
895
+ }
896
+
897
+ static void notify_rx_disable (const struct device * dev )
898
+ {
899
+ const struct uarte_nrfx_config * cfg = dev -> config ;
900
+ struct uart_event evt = {
901
+ .type = UART_RX_DISABLED ,
902
+ };
903
+
904
+ if (LOW_POWER_ENABLED (cfg )) {
905
+ uint32_t key = irq_lock ();
906
+
907
+ uarte_disable_locked (dev , UARTE_FLAG_LOW_POWER_RX );
908
+ irq_unlock (key );
909
+ }
910
+
911
+ user_callback (dev , (struct uart_event * )& evt );
912
+
913
+ /* runtime PM is put after the callback. In case uart is re-enabled from that
914
+ * callback we avoid suspending/resuming the device.
915
+ */
916
+ if (IS_ENABLED (CONFIG_PM_DEVICE_RUNTIME )) {
917
+ pm_device_runtime_put_async (dev , K_NO_WAIT );
918
+ }
919
+ }
920
+
921
+ static int uarte_nrfx_rx_disable (const struct device * dev )
922
+ {
923
+ struct uarte_nrfx_data * data = dev -> data ;
924
+ struct uarte_async_rx * async_rx = & data -> async -> rx ;
925
+ NRF_UARTE_Type * uarte = get_uarte_instance (dev );
926
+ int key ;
927
+
928
+ if (async_rx -> buf == NULL ) {
929
+ return - EFAULT ;
930
+ }
931
+
932
+ k_timer_stop (& async_rx -> timer );
933
+
934
+ key = irq_lock ();
935
+
936
+ if (async_rx -> next_buf != NULL ) {
937
+ nrf_uarte_shorts_disable (uarte , NRF_UARTE_SHORT_ENDRX_STARTRX );
938
+ nrf_uarte_event_clear (uarte , NRF_UARTE_EVENT_RXSTARTED );
939
+ }
940
+
941
+ async_rx -> enabled = false;
942
+ async_rx -> discard_fifo = true;
943
+
944
+ nrf_uarte_task_trigger (uarte , NRF_UARTE_TASK_STOPRX );
945
+ irq_unlock (key );
946
+
947
+ return 0 ;
948
+ }
949
+
878
950
#if !defined(CONFIG_UART_NRFX_UARTE_ENHANCED_RX )
879
951
static void timer_handler (nrf_timer_event_t event_type , void * p_context ) { }
880
952
@@ -1073,15 +1145,6 @@ static int uarte_nrfx_tx_abort(const struct device *dev)
1073
1145
return 0 ;
1074
1146
}
1075
1147
1076
- static void user_callback (const struct device * dev , struct uart_event * evt )
1077
- {
1078
- struct uarte_nrfx_data * data = dev -> data ;
1079
-
1080
- if (data -> async -> user_callback ) {
1081
- data -> async -> user_callback (dev , evt , data -> async -> user_data );
1082
- }
1083
- }
1084
-
1085
1148
static void notify_uart_rx_rdy (const struct device * dev , size_t len )
1086
1149
{
1087
1150
struct uarte_nrfx_data * data = dev -> data ;
@@ -1095,29 +1158,6 @@ static void notify_uart_rx_rdy(const struct device *dev, size_t len)
1095
1158
user_callback (dev , & evt );
1096
1159
}
1097
1160
1098
- static void rx_buf_release (const struct device * dev , uint8_t * buf )
1099
- {
1100
- struct uart_event evt = {
1101
- .type = UART_RX_BUF_RELEASED ,
1102
- .data .rx_buf .buf = buf ,
1103
- };
1104
-
1105
- user_callback (dev , & evt );
1106
- }
1107
-
1108
- static void notify_rx_disable (const struct device * dev )
1109
- {
1110
- struct uart_event evt = {
1111
- .type = UART_RX_DISABLED ,
1112
- };
1113
-
1114
- user_callback (dev , (struct uart_event * )& evt );
1115
-
1116
- if (IS_ENABLED (CONFIG_PM_DEVICE_RUNTIME )) {
1117
- pm_device_runtime_put_async (dev , K_NO_WAIT );
1118
- }
1119
- }
1120
-
1121
1161
#ifdef UARTE_HAS_FRAME_TIMEOUT
1122
1162
static uint32_t us_to_bauds (uint32_t baudrate , int32_t timeout )
1123
1163
{
@@ -1344,35 +1384,6 @@ static int uarte_nrfx_callback_set(const struct device *dev,
1344
1384
return 0 ;
1345
1385
}
1346
1386
1347
- static int uarte_nrfx_rx_disable (const struct device * dev )
1348
- {
1349
- struct uarte_nrfx_data * data = dev -> data ;
1350
- struct uarte_async_rx * async_rx = & data -> async -> rx ;
1351
- NRF_UARTE_Type * uarte = get_uarte_instance (dev );
1352
- int key ;
1353
-
1354
- if (async_rx -> buf == NULL ) {
1355
- return - EFAULT ;
1356
- }
1357
-
1358
- k_timer_stop (& async_rx -> timer );
1359
-
1360
- key = irq_lock ();
1361
-
1362
- if (async_rx -> next_buf != NULL ) {
1363
- nrf_uarte_shorts_disable (uarte , NRF_UARTE_SHORT_ENDRX_STARTRX );
1364
- nrf_uarte_event_clear (uarte , NRF_UARTE_EVENT_RXSTARTED );
1365
- }
1366
-
1367
- async_rx -> enabled = false;
1368
- async_rx -> discard_fifo = true;
1369
-
1370
- nrf_uarte_task_trigger (uarte , NRF_UARTE_TASK_STOPRX );
1371
- irq_unlock (key );
1372
-
1373
- return 0 ;
1374
- }
1375
-
1376
1387
static void tx_timeout (struct k_timer * timer )
1377
1388
{
1378
1389
const struct device * dev = k_timer_user_data_get (timer );
0 commit comments