Skip to content

Commit 71d3ac0

Browse files
committed
[nrf fromlist] drivers: serial: nrfx_uarte: Add support for DMM and cache
Add support for DMM which manages cache and dedicated memory spaces. Added support for data cache for buffers which are not DMM managed. Upstream PR: zephyrproject-rtos/zephyr#75462 Signed-off-by: Krzysztof Chruściński <[email protected]>
1 parent 7185cbf commit 71d3ac0

File tree

1 file changed

+108
-1
lines changed

1 file changed

+108
-1
lines changed

drivers/serial/uart_nrfx_uarte.c

Lines changed: 108 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22,15 +22,21 @@
2222
#include <helpers/nrfx_gppi.h>
2323
#include <zephyr/logging/log.h>
2424
LOG_MODULE_REGISTER(uart_nrfx_uarte, 0);
25-
25+
#ifdef CONFIG_HAS_NORDIC_DMM
26+
#include <dmm.h>
27+
#else
2628
#define DMM_MEMORY_SECTION(...)
29+
#define DMM_IS_REG_CACHEABLE(...) false
30+
#endif
2731

2832
#define RX_FLUSH_WORKAROUND 1
2933

3034
#define UARTE(idx) DT_NODELABEL(uart##idx)
3135
#define UARTE_HAS_PROP(idx, prop) DT_NODE_HAS_PROP(UARTE(idx), prop)
3236
#define UARTE_PROP(idx, prop) DT_PROP(UARTE(idx), prop)
3337

38+
#define UARTE_IS_CACHEABLE(idx) DMM_IS_REG_CACHEABLE(DT_PHANDLE(UARTE(idx), memory_regions))
39+
3440
/* Execute macro f(x) for all instances. */
3541
#define UARTE_FOR_EACH_INSTANCE(f, sep, off_code, ...) \
3642
NRFX_FOREACH_PRESENT(UARTE, f, sep, off_code, __VA_ARGS__)
@@ -91,6 +97,12 @@ LOG_MODULE_REGISTER(uart_nrfx_uarte, 0);
9197
#define UARTE_HAS_FRAME_TIMEOUT 1
9298
#endif
9399

100+
#define INSTANCE_NEEDS_CACHE_MGMT(unused, prefix, i, prop) UARTE_IS_CACHEABLE(prefix##i)
101+
102+
#if UARTE_FOR_EACH_INSTANCE(INSTANCE_NEEDS_CACHE_MGMT, (+), (0), _)
103+
#define UARTE_ANY_CACHE 1
104+
#endif
105+
94106
/*
95107
* RX timeout is divided into time slabs, this define tells how many divisions
96108
* should be made. More divisions - higher timeout accuracy and processor usage.
@@ -115,6 +127,10 @@ struct uarte_async_tx {
115127

116128
struct uarte_async_rx {
117129
struct k_timer timer;
130+
#ifdef CONFIG_HAS_NORDIC_DMM
131+
uint8_t *usr_buf;
132+
uint8_t *next_usr_buf;
133+
#endif
118134
uint8_t *buf;
119135
size_t buf_len;
120136
size_t offset;
@@ -236,6 +252,9 @@ struct uarte_nrfx_config {
236252
uint32_t flags;
237253
bool disable_rx;
238254
const struct pinctrl_dev_config *pcfg;
255+
#ifdef CONFIG_HAS_NORDIC_DMM
256+
void *mem_reg;
257+
#endif
239258
#ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE
240259
/* None-zero in case of high speed instances. Baudrate is adjusted by that ratio. */
241260
uint32_t clock_freq;
@@ -582,6 +601,10 @@ static void tx_start(const struct device *dev, const uint8_t *buf, size_t len)
582601
}
583602
#endif
584603

604+
if (IS_ENABLED(UARTE_ANY_CACHE) && (config->flags & UARTE_CFG_FLAG_CACHEABLE)) {
605+
sys_cache_data_flush_range((void *)buf, len);
606+
}
607+
585608
nrf_uarte_tx_buffer_set(uarte, buf, len);
586609
nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ENDTX);
587610
nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_TXSTOPPED);
@@ -881,6 +904,19 @@ static int uarte_nrfx_rx_enable(const struct device *dev, uint8_t *buf,
881904
return -EBUSY;
882905
}
883906

907+
#ifdef CONFIG_HAS_NORDIC_DMM
908+
uint8_t *dma_buf;
909+
int ret;
910+
911+
ret = dmm_buffer_in_prepare(cfg->mem_reg, buf, len, (void **)&dma_buf);
912+
if (ret < 0) {
913+
return ret;
914+
}
915+
916+
rdata->usr_buf = buf;
917+
buf = dma_buf;
918+
#endif
919+
884920
#ifdef CONFIG_UART_NRFX_UARTE_ENHANCED_RX
885921
#ifdef UARTE_HAS_FRAME_TIMEOUT
886922
if (timeout && (timeout != SYS_FOREVER_US)) {
@@ -913,8 +949,18 @@ static int uarte_nrfx_rx_enable(const struct device *dev, uint8_t *buf,
913949
if (rdata->flush_cnt) {
914950
int cpy_len = MIN(len, rdata->flush_cnt);
915951

952+
if (IS_ENABLED(UARTE_ANY_CACHE) &&
953+
(cfg->flags & UARTE_CFG_FLAG_CACHEABLE)) {
954+
sys_cache_data_invd_range(cfg->rx_flush_buf, cpy_len);
955+
}
956+
916957
memcpy(buf, cfg->rx_flush_buf, cpy_len);
917958

959+
if (IS_ENABLED(UARTE_ANY_CACHE) &&
960+
(cfg->flags & UARTE_CFG_FLAG_CACHEABLE)) {
961+
sys_cache_data_flush_range(buf, cpy_len);
962+
}
963+
918964
buf += cpy_len;
919965
len -= cpy_len;
920966

@@ -925,6 +971,11 @@ static int uarte_nrfx_rx_enable(const struct device *dev, uint8_t *buf,
925971
rdata->flush_cnt -= cpy_len;
926972
memmove(cfg->rx_flush_buf, &cfg->rx_flush_buf[cpy_len],
927973
rdata->flush_cnt);
974+
if (IS_ENABLED(UARTE_ANY_CACHE) &&
975+
(cfg->flags & UARTE_CFG_FLAG_CACHEABLE)) {
976+
sys_cache_data_flush_range(cfg->rx_flush_buf,
977+
rdata->flush_cnt);
978+
}
928979
atomic_or(&data->flags, UARTE_FLAG_TRIG_RXTO);
929980
NRFX_IRQ_PENDING_SET(nrfx_get_irq_number(uarte));
930981
return 0;
@@ -969,6 +1020,17 @@ static int uarte_nrfx_rx_buf_rsp(const struct device *dev, uint8_t *buf,
9691020
if (rdata->buf == NULL) {
9701021
err = -EACCES;
9711022
} else if (rdata->next_buf == NULL) {
1023+
#ifdef CONFIG_HAS_NORDIC_DMM
1024+
uint8_t *dma_buf;
1025+
const struct uarte_nrfx_config *config = dev->config;
1026+
1027+
err = dmm_buffer_in_prepare(config->mem_reg, buf, len, (void **)&dma_buf);
1028+
if (err < 0) {
1029+
return err;
1030+
}
1031+
rdata->next_usr_buf = buf;
1032+
buf = dma_buf;
1033+
#endif
9721034
rdata->next_buf = buf;
9731035
rdata->next_buf_len = len;
9741036
nrf_uarte_rx_buffer_set(uarte, buf, len);
@@ -1230,6 +1292,14 @@ static void endrx_isr(const struct device *dev)
12301292
*/
12311293
const int rx_amount = nrf_uarte_rx_amount_get(uarte) + rdata->flush_cnt;
12321294

1295+
#ifdef CONFIG_HAS_NORDIC_DMM
1296+
const struct uarte_nrfx_config *config = dev->config;
1297+
int err = dmm_buffer_in_release(config->mem_reg, rdata->usr_buf, rx_amount, rdata->buf);
1298+
1299+
(void)err;
1300+
__ASSERT_NO_MSG(err == 0);
1301+
rdata->buf = rdata->usr_buf;
1302+
#endif
12331303
rdata->flush_cnt = 0;
12341304

12351305
/* The 'rx_offset' can be bigger than 'rx_amount', so it the length
@@ -1257,6 +1327,9 @@ static void endrx_isr(const struct device *dev)
12571327
rx_buf_release(dev, rdata->buf);
12581328
rdata->buf = rdata->next_buf;
12591329
rdata->buf_len = rdata->next_buf_len;
1330+
#ifdef CONFIG_HAS_NORDIC_DMM
1331+
rdata->usr_buf = rdata->next_usr_buf;
1332+
#endif
12601333
rdata->next_buf = NULL;
12611334
rdata->next_buf_len = 0;
12621335
rdata->offset = 0;
@@ -1320,11 +1393,15 @@ static uint8_t rx_flush(const struct device *dev, uint8_t *buf)
13201393
/* Flushing RX fifo requires buffer bigger than 4 bytes to empty fifo*/
13211394
static const uint8_t dirty = 0xAA;
13221395
NRF_UARTE_Type *uarte = get_uarte_instance(dev);
1396+
const struct uarte_nrfx_config *config = dev->config;
13231397
uint32_t prev_rx_amount;
13241398
uint32_t rx_amount;
13251399

13261400
if (IS_ENABLED(RX_FLUSH_WORKAROUND)) {
13271401
memset(buf, dirty, UARTE_HW_RX_FIFO_SIZE);
1402+
if (IS_ENABLED(UARTE_ANY_CACHE) && (config->flags & UARTE_CFG_FLAG_CACHEABLE)) {
1403+
sys_cache_data_flush_range(buf, UARTE_HW_RX_FIFO_SIZE);
1404+
}
13281405
prev_rx_amount = nrf_uarte_rx_amount_get(uarte);
13291406
} else {
13301407
prev_rx_amount = 0;
@@ -1355,6 +1432,10 @@ static uint8_t rx_flush(const struct device *dev, uint8_t *buf)
13551432
return 0;
13561433
}
13571434

1435+
if (IS_ENABLED(UARTE_ANY_CACHE) && (config->flags & UARTE_CFG_FLAG_CACHEABLE)) {
1436+
sys_cache_data_invd_range(buf, UARTE_HW_RX_FIFO_SIZE);
1437+
}
1438+
13581439
for (int i = 0; i < rx_amount; i++) {
13591440
if (buf[i] != dirty) {
13601441
return rx_amount;
@@ -1374,6 +1455,10 @@ static void rxto_isr(const struct device *dev)
13741455
struct uarte_async_rx *rdata = &data->async->rx;
13751456

13761457
if (rdata->buf) {
1458+
#ifdef CONFIG_HAS_NORDIC_DMM
1459+
(void)dmm_buffer_in_release(config->mem_reg, rdata->usr_buf, 0, rdata->buf);
1460+
rdata->buf = rdata->usr_buf;
1461+
#endif
13771462
rx_buf_release(dev, rdata->buf);
13781463
rdata->buf = NULL;
13791464
}
@@ -1593,6 +1678,16 @@ static void uarte_nrfx_isr_async(const void *arg)
15931678
}
15941679

15951680
if (atomic_and(&data->flags, ~UARTE_FLAG_TRIG_RXTO) & UARTE_FLAG_TRIG_RXTO) {
1681+
#ifdef CONFIG_HAS_NORDIC_DMM
1682+
int ret;
1683+
1684+
ret = dmm_buffer_in_release(config->mem_reg, rdata->usr_buf,
1685+
rdata->buf_len, rdata->buf);
1686+
1687+
(void)ret;
1688+
__ASSERT_NO_MSG(ret == 0);
1689+
rdata->buf = rdata->usr_buf;
1690+
#endif
15961691
notify_uart_rx_rdy(dev, rdata->buf_len);
15971692
rx_buf_release(dev, rdata->buf);
15981693
rdata->buf_len = 0;
@@ -1628,6 +1723,10 @@ static int uarte_nrfx_poll_in(const struct device *dev, unsigned char *c)
16281723
return -1;
16291724
}
16301725

1726+
if (IS_ENABLED(UARTE_ANY_CACHE) && (config->flags & UARTE_CFG_FLAG_CACHEABLE)) {
1727+
sys_cache_data_invd_range(config->poll_in_byte, 1);
1728+
}
1729+
16311730
*c = *config->poll_in_byte;
16321731

16331732
/* clear the interrupt */
@@ -1722,6 +1821,10 @@ static int uarte_nrfx_fifo_read(const struct device *dev,
17221821
/* Clear the interrupt */
17231822
nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ENDRX);
17241823

1824+
if (IS_ENABLED(UARTE_ANY_CACHE) && (config->flags & UARTE_CFG_FLAG_CACHEABLE)) {
1825+
sys_cache_data_invd_range(config->poll_in_byte, 1);
1826+
}
1827+
17251828
/* Receive a character */
17261829
rx_data[num_rx++] = *config->poll_in_byte;
17271830

@@ -2233,11 +2336,15 @@ static int uarte_nrfx_pm_action(const struct device *dev,
22332336
.hw_config = UARTE_NRF_CONFIG(idx),)) \
22342337
.pcfg = PINCTRL_DT_DEV_CONFIG_GET(UARTE(idx)), \
22352338
.uarte_regs = _CONCAT(NRF_UARTE, idx), \
2339+
IF_ENABLED(CONFIG_HAS_NORDIC_DMM, \
2340+
(.mem_reg = DMM_DEV_TO_REG(UARTE(idx)),)) \
22362341
.flags = \
22372342
(IS_ENABLED(CONFIG_UART_##idx##_ENHANCED_POLL_OUT) ? \
22382343
UARTE_CFG_FLAG_PPI_ENDTX : 0) | \
22392344
(IS_ENABLED(CONFIG_UART_##idx##_NRF_HW_ASYNC) ? \
22402345
UARTE_CFG_FLAG_HW_BYTE_COUNTING : 0) | \
2346+
(!IS_ENABLED(CONFIG_HAS_NORDIC_DMM) ? 0 : \
2347+
(UARTE_IS_CACHEABLE(idx) ? UARTE_CFG_FLAG_CACHEABLE : 0)) | \
22412348
USE_LOW_POWER(idx), \
22422349
UARTE_DISABLE_RX_INIT(UARTE(idx)), \
22432350
.poll_out_byte = &uarte##idx##_poll_out_byte, \

0 commit comments

Comments
 (0)