Skip to content

Commit 62d0246

Browse files
committed
[nrf fromtree] drivers: mspi: mspi_dw: Add DMA support
Initial DMA support. DMA supports implementation of SSI IP but using vendor specific DMA in the wrapper. The setup of the DMA is done in mspi_dw_vendor_specific.h. Signed-off-by: David Jewsbury <[email protected]> (cherry picked from commit d9677bb)
1 parent f93e971 commit 62d0246

File tree

7 files changed

+439
-76
lines changed

7 files changed

+439
-76
lines changed

doc/hardware/peripherals/mspi.rst

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -194,6 +194,7 @@ Related configuration options:
194194
* :kconfig:option:`CONFIG_MSPI_TIMING`
195195
* :kconfig:option:`CONFIG_MSPI_INIT_PRIORITY`
196196
* :kconfig:option:`CONFIG_MSPI_COMPLETION_TIMEOUT_TOLERANCE`
197+
* :kconfig:option:`CONFIG_MSPI_DMA`
197198

198199
API Reference
199200
*************

drivers/mspi/Kconfig

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -55,6 +55,12 @@ config MSPI_TIMING
5555
Enables mspi_timing_config calls in device drivers for those
5656
controllers that need this to proper function at high frequencies.
5757

58+
config MSPI_DMA
59+
bool "DMA support"
60+
help
61+
Enables DMA capabilities, depending on the driver and hardware it
62+
runs on.
63+
5864
module = MSPI
5965
module-str = mspi
6066
source "subsys/logging/Kconfig.template.log_config"

drivers/mspi/Kconfig.dw

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,6 @@ config MSPI_DW
77
default y
88
depends on DT_HAS_SNPS_DESIGNWARE_SSI_ENABLED
99
select PINCTRL if $(dt_compat_any_has_prop,$(DT_COMPAT_SNPS_DESIGNWARE_SSI),pinctrl-0)
10-
imply MSPI_XIP
1110
imply MSPI_TIMING
1211

1312
if MSPI_DW

drivers/mspi/mspi_dw.c

Lines changed: 154 additions & 73 deletions
Original file line numberDiff line numberDiff line change
@@ -95,6 +95,7 @@ struct mspi_dw_data {
9595

9696
struct mspi_dw_config {
9797
DEVICE_MMIO_ROM;
98+
void *wrapper_regs;
9899
void (*irq_config)(void);
99100
uint32_t clock_frequency;
100101
#if defined(CONFIG_PINCTRL)
@@ -112,6 +113,11 @@ struct mspi_dw_config {
112113
uint8_t max_queued_dummy_bytes;
113114
uint8_t tx_fifo_threshold;
114115
uint8_t rx_fifo_threshold;
116+
#ifdef CONFIG_MSPI_DMA
117+
uint8_t dma_tx_data_level;
118+
uint8_t dma_rx_data_level;
119+
#endif
120+
void *vendor_specific_data;
115121
DECLARE_REG_ACCESS();
116122
bool sw_multi_periph;
117123
enum mspi_op_mode op_mode;
@@ -139,6 +145,11 @@ DEFINE_MM_REG_RD_WR(dr, 0x60)
139145
DEFINE_MM_REG_WR(rx_sample_dly, 0xf0)
140146
DEFINE_MM_REG_WR(spi_ctrlr0, 0xf4)
141147
DEFINE_MM_REG_WR(txd_drive_edge, 0xf8)
148+
#if defined(CONFIG_MSPI_DMA)
149+
DEFINE_MM_REG_WR(dmacr, 0x4C)
150+
DEFINE_MM_REG_WR(dmatdlr, 0x50)
151+
DEFINE_MM_REG_WR(dmardlr, 0x54)
152+
#endif
142153

143154
#if defined(CONFIG_MSPI_XIP)
144155
DEFINE_MM_REG_WR(xip_incr_inst, 0x100)
@@ -541,6 +552,19 @@ static void fifo_work_handler(struct k_work *work)
541552

542553
static void mspi_dw_isr(const struct device *dev)
543554
{
555+
#if defined(CONFIG_MSPI_DMA)
556+
struct mspi_dw_data *dev_data = dev->data;
557+
558+
if (dev_data->xfer.xfer_mode == MSPI_DMA) {
559+
if (vendor_specific_read_dma_irq(dev)) {
560+
set_imr(dev, 0);
561+
handle_end_of_packet(dev_data);
562+
}
563+
vendor_specific_irq_clear(dev);
564+
return;
565+
}
566+
#endif
567+
544568
#if defined(CONFIG_MSPI_DW_HANDLE_FIFOS_IN_SYSTEM_WORKQUEUE)
545569
struct mspi_dw_data *dev_data = dev->data;
546570
int rc;
@@ -1067,7 +1091,7 @@ static int start_next_packet(const struct device *dev)
10671091
(false));
10681092
unsigned int key;
10691093
uint32_t packet_frames;
1070-
uint32_t imr;
1094+
uint32_t imr = 0;
10711095
int rc = 0;
10721096

10731097
if (packet->num_bytes == 0 &&
@@ -1115,6 +1139,18 @@ static int start_next_packet(const struct device *dev)
11151139
return -EINVAL;
11161140
}
11171141

1142+
#if defined(CONFIG_MSPI_DMA)
1143+
if (dev_data->xfer.xfer_mode == MSPI_DMA) {
1144+
/* Check if the packet buffer is accessible */
1145+
if (packet->num_bytes > 0 &&
1146+
!vendor_specific_dma_accessible_check(dev, packet->data_buf)) {
1147+
LOG_ERR("Buffer not DMA accessible: ptr=0x%lx, size=%u",
1148+
(uintptr_t)packet->data_buf, packet->num_bytes);
1149+
return -EINVAL;
1150+
}
1151+
}
1152+
#endif
1153+
11181154
if (packet->dir == MSPI_TX || packet->num_bytes == 0) {
11191155
imr = IMR_TXEIM_BIT;
11201156
dev_data->ctrlr0 |= FIELD_PREP(CTRLR0_TMOD_MASK,
@@ -1123,6 +1159,12 @@ static int start_next_packet(const struct device *dev)
11231159
dev_data->xfer.tx_dummy);
11241160

11251161
write_rxftlr(dev, 0);
1162+
#if defined(CONFIG_MSPI_DMA)
1163+
} else if (dev_data->xfer.xfer_mode == MSPI_DMA) {
1164+
dev_data->ctrlr0 |= FIELD_PREP(CTRLR0_TMOD_MASK, CTRLR0_TMOD_RX);
1165+
dev_data->spi_ctrlr0 |= FIELD_PREP(SPI_CTRLR0_WAIT_CYCLES_MASK,
1166+
dev_data->xfer.rx_dummy);
1167+
#endif
11261168
} else {
11271169
uint32_t tmod;
11281170
uint8_t rx_fifo_threshold;
@@ -1211,95 +1253,124 @@ static int start_next_packet(const struct device *dev)
12111253
irq_unlock(key);
12121254
}
12131255

1214-
dev_data->buf_pos = packet->data_buf;
1215-
dev_data->buf_end = &packet->data_buf[packet->num_bytes];
1216-
1217-
/* Set the TX FIFO threshold and its transmit start level. */
1218-
if (packet->num_bytes) {
1219-
/* If there is some data to send/receive, set the threshold to
1220-
* the value configured for the driver instance and the start
1221-
* level to the maximum possible value (it will be updated later
1222-
* in tx_fifo() or tx_dummy_bytes() when TX is to be finished).
1223-
* This helps avoid a situation when the TX FIFO becomes empty
1224-
* before the transfer is complete and the SSI core finishes the
1225-
* transaction and deactivates the CE line. This could occur
1226-
* right before the data phase in enhanced SPI modes, when the
1227-
* clock stretching feature does not work yet, or in Standard
1228-
* SPI mode, where the clock stretching is not available at all.
1229-
*/
1230-
uint8_t start_level = dev_data->dummy_bytes != 0
1231-
? dev_config->max_queued_dummy_bytes - 1
1232-
: dev_config->tx_fifo_depth_minus_1;
1256+
#if defined(CONFIG_MSPI_DMA)
1257+
if (dev_data->xfer.xfer_mode == MSPI_DMA) {
1258+
/* For DMA mode, set start level based on transfer length to prevent underflow */
1259+
uint32_t total_transfer_bytes = packet->num_bytes + dev_data->xfer.addr_length +
1260+
dev_data->xfer.cmd_length;
1261+
uint32_t transfer_frames = total_transfer_bytes >> dev_data->bytes_per_frame_exp;
12331262

1234-
write_txftlr(dev, FIELD_PREP(TXFTLR_TXFTHR_MASK, start_level) |
1235-
FIELD_PREP(TXFTLR_TFT_MASK,
1236-
dev_config->tx_fifo_threshold));
1263+
/* Use minimum of transfer length or FIFO depth, but at least 1 */
1264+
uint8_t dma_start_level = MIN(transfer_frames - 1,
1265+
dev_config->tx_fifo_depth_minus_1);
1266+
1267+
dma_start_level = (dma_start_level > 0 ? dma_start_level : 1);
1268+
1269+
/* Only TXFTHR needs to be set to the minimum number of frames */
1270+
write_txftlr(dev, FIELD_PREP(TXFTLR_TXFTHR_MASK, dma_start_level));
1271+
write_dmatdlr(dev, FIELD_PREP(DMATDLR_DMATDL_MASK, dev_config->dma_tx_data_level));
1272+
write_dmardlr(dev, FIELD_PREP(DMARDLR_DMARDL_MASK, dev_config->dma_rx_data_level));
1273+
write_dmacr(dev, DMACR_TDMAE_BIT | DMACR_RDMAE_BIT);
1274+
write_imr(dev, 0);
1275+
write_ssienr(dev, SSIENR_SSIC_EN_BIT);
1276+
1277+
vendor_specific_start_dma_xfer(dev);
12371278
} else {
1238-
uint32_t total_tx_entries = 0;
1279+
#endif
1280+
/* PIO mode */
1281+
dev_data->buf_pos = packet->data_buf;
1282+
dev_data->buf_end = &packet->data_buf[packet->num_bytes];
1283+
/* Set the TX FIFO threshold and its transmit start level. */
1284+
if (packet->num_bytes) {
1285+
/* If there is some data to send/receive, set the threshold to
1286+
* the value configured for the driver instance and the start
1287+
* level to the maximum possible value (it will be updated later
1288+
* in tx_fifo() or tx_dummy_bytes() when TX is to be finished).
1289+
* This helps avoid a situation when the TX FIFO becomes empty
1290+
* before the transfer is complete and the SSI core finishes the
1291+
* transaction and deactivates the CE line. This could occur
1292+
* right before the data phase in enhanced SPI modes, when the
1293+
* clock stretching feature does not work yet, or in Standard
1294+
* SPI mode, where the clock stretching is not available at all.
1295+
*/
1296+
uint8_t start_level = dev_data->dummy_bytes != 0
1297+
? dev_config->max_queued_dummy_bytes - 1
1298+
: dev_config->tx_fifo_depth_minus_1;
12391299

1240-
/* It the whole transfer is to contain only the command and/or
1241-
* address, set up the transfer to start right after entries
1242-
* for those appear in the TX FIFO, and the threshold to 0,
1243-
* so that the interrupt occurs when the TX FIFO gets emptied.
1244-
*/
1245-
if (dev_data->xfer.cmd_length) {
1246-
if (dev_data->standard_spi) {
1247-
total_tx_entries += dev_data->xfer.cmd_length;
1248-
} else {
1249-
total_tx_entries += 1;
1300+
write_txftlr(dev, FIELD_PREP(TXFTLR_TXFTHR_MASK, start_level) |
1301+
FIELD_PREP(TXFTLR_TFT_MASK,
1302+
dev_config->tx_fifo_threshold));
1303+
1304+
} else {
1305+
uint32_t total_tx_entries = 0;
1306+
1307+
/* It the whole transfer is to contain only the command and/or
1308+
* address, set up the transfer to start right after entries
1309+
* for those appear in the TX FIFO, and the threshold to 0,
1310+
* so that the interrupt occurs when the TX FIFO gets emptied.
1311+
*/
1312+
if (dev_data->xfer.cmd_length) {
1313+
if (dev_data->standard_spi) {
1314+
total_tx_entries += dev_data->xfer.cmd_length;
1315+
} else {
1316+
total_tx_entries += 1;
1317+
}
12501318
}
1251-
}
12521319

1253-
if (dev_data->xfer.addr_length) {
1254-
if (dev_data->standard_spi) {
1255-
total_tx_entries += dev_data->xfer.addr_length;
1256-
} else {
1257-
total_tx_entries += 1;
1320+
if (dev_data->xfer.addr_length) {
1321+
if (dev_data->standard_spi) {
1322+
total_tx_entries += dev_data->xfer.addr_length;
1323+
} else {
1324+
total_tx_entries += 1;
1325+
}
12581326
}
1327+
1328+
write_txftlr(dev, FIELD_PREP(TXFTLR_TXFTHR_MASK,
1329+
total_tx_entries - 1));
12591330
}
12601331

1261-
write_txftlr(dev, FIELD_PREP(TXFTLR_TXFTHR_MASK,
1262-
total_tx_entries - 1));
1263-
}
1332+
/* Ensure that there will be no interrupt from the controller yet. */
1333+
write_imr(dev, 0);
1334+
/* Enable the controller. This must be done before DR is written. */
1335+
write_ssienr(dev, SSIENR_SSIC_EN_BIT);
12641336

1265-
/* Ensure that there will be no interrupt from the controller yet. */
1266-
write_imr(dev, 0);
1267-
/* Enable the controller. This must be done before DR is written. */
1268-
write_ssienr(dev, SSIENR_SSIC_EN_BIT);
1337+
/* Since the FIFO depth in SSI is always at least 8, it can be safely
1338+
* assumed that the command and address fields (max. 2 and 4 bytes,
1339+
* respectively) can be written here before the TX FIFO gets filled up.
1340+
*/
1341+
if (dev_data->standard_spi) {
1342+
if (dev_data->xfer.cmd_length) {
1343+
tx_control_field(dev, packet->cmd,
1344+
dev_data->xfer.cmd_length);
1345+
}
12691346

1270-
/* Since the FIFO depth in SSI is always at least 8, it can be safely
1271-
* assumed that the command and address fields (max. 2 and 4 bytes,
1272-
* respectively) can be written here before the TX FIFO gets filled up.
1273-
*/
1274-
if (dev_data->standard_spi) {
1275-
if (dev_data->xfer.cmd_length) {
1276-
tx_control_field(dev, packet->cmd,
1277-
dev_data->xfer.cmd_length);
1278-
}
1347+
if (dev_data->xfer.addr_length) {
1348+
tx_control_field(dev, packet->address,
1349+
dev_data->xfer.addr_length);
1350+
}
1351+
} else {
1352+
if (dev_data->xfer.cmd_length) {
1353+
write_dr(dev, packet->cmd);
1354+
}
12791355

1280-
if (dev_data->xfer.addr_length) {
1281-
tx_control_field(dev, packet->address,
1282-
dev_data->xfer.addr_length);
1283-
}
1284-
} else {
1285-
if (dev_data->xfer.cmd_length) {
1286-
write_dr(dev, packet->cmd);
1356+
if (dev_data->xfer.addr_length) {
1357+
write_dr(dev, packet->address);
1358+
}
12871359
}
12881360

1289-
if (dev_data->xfer.addr_length) {
1290-
write_dr(dev, packet->address);
1361+
/* Prefill TX FIFO with any data we can */
1362+
if (dev_data->dummy_bytes && tx_dummy_bytes(dev, NULL)) {
1363+
imr = IMR_RXFIM_BIT;
1364+
} else if (packet->dir == MSPI_TX && packet->num_bytes) {
1365+
tx_data(dev, packet);
12911366
}
1292-
}
12931367

1294-
/* Prefill TX FIFO with any data we can */
1295-
if (dev_data->dummy_bytes && tx_dummy_bytes(dev, NULL)) {
1296-
imr = IMR_RXFIM_BIT;
1297-
} else if (packet->dir == MSPI_TX && packet->num_bytes) {
1298-
tx_data(dev, packet);
1368+
/* Enable interrupts now and wait until the packet is done unless async. */
1369+
write_imr(dev, imr);
1370+
#if defined(CONFIG_MSPI_DMA)
12991371
}
1372+
#endif
13001373

1301-
/* Enable interrupts now */
1302-
write_imr(dev, imr);
13031374
/* Write SER to start transfer */
13041375
write_ser(dev, BIT(dev_data->dev_id->dev_idx));
13051376

@@ -1867,9 +1938,16 @@ static DEVICE_API(mspi, drv_api) = {
18671938
DT_INST_PROP_OR(inst, rx_fifo_threshold, \
18681939
1 * RX_FIFO_DEPTH(inst) / 8 - 1)
18691940

1941+
#define MSPI_DW_DMA_DATA_LEVELS(inst) \
1942+
.dma_tx_data_level = \
1943+
DT_INST_PROP_OR(inst, dma_transmit_data_level, 0), \
1944+
.dma_rx_data_level = \
1945+
DT_INST_PROP_OR(inst, dma_receive_data_level, 0)
1946+
18701947
#define MSPI_DW_INST(inst) \
18711948
PM_DEVICE_DT_INST_DEFINE(inst, dev_pm_action_cb); \
18721949
IF_ENABLED(CONFIG_PINCTRL, (PINCTRL_DT_INST_DEFINE(inst);)) \
1950+
VENDOR_SPECIFIC_DATA_DEFINE(inst); \
18731951
static void irq_config##inst(void) \
18741952
{ \
18751953
LISTIFY(DT_INST_NUM_IRQS(inst), \
@@ -1878,13 +1956,16 @@ static DEVICE_API(mspi, drv_api) = {
18781956
static struct mspi_dw_data dev##inst##_data; \
18791957
static const struct mspi_dw_config dev##inst##_config = { \
18801958
MSPI_DW_MMIO_ROM_INIT(DT_DRV_INST(inst)), \
1959+
.wrapper_regs = (void *)DT_INST_REG_ADDR(inst), \
18811960
.irq_config = irq_config##inst, \
18821961
.clock_frequency = MSPI_DW_CLOCK_FREQUENCY(inst), \
18831962
IF_ENABLED(CONFIG_PINCTRL, \
18841963
(.pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(inst),)) \
18851964
IF_ENABLED(DT_INST_NODE_HAS_PROP(inst, ce_gpios), \
18861965
(MSPI_DW_CE_GPIOS(inst),)) \
18871966
MSPI_DW_FIFO_PROPS(inst), \
1967+
IF_ENABLED(CONFIG_MSPI_DMA, (MSPI_DW_DMA_DATA_LEVELS(inst),)) \
1968+
.vendor_specific_data = VENDOR_SPECIFIC_DATA_GET(inst), \
18881969
DEFINE_REG_ACCESS(inst) \
18891970
.sw_multi_periph = \
18901971
DT_INST_PROP(inst, software_multiperipheral), \

drivers/mspi/mspi_dw.h

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -173,6 +173,22 @@
173173
#define XIP_WRITE_CTRL_FRF_QUAD 2UL
174174
#define XIP_WRITE_CTRL_FRF_OCTAL 3UL
175175

176+
/* DMACR - DMA Control Register */
177+
#define DMACR_ATW_MASK GENMASK(4, 3)
178+
#define DMACR_ATW_1 0UL
179+
#define DMACR_ATW_2 1UL
180+
#define DMACR_ATW_4 2UL
181+
#define DMACR_ATW_8 3UL
182+
#define DMACR_IDMAE_BIT BIT(2)
183+
#define DMACR_TDMAE_BIT BIT(1)
184+
#define DMACR_RDMAE_BIT BIT(0)
185+
186+
/* DMATDLR - DMA Transmit Data Level */
187+
#define DMATDLR_DMATDL_MASK GENMASK(3, 0)
188+
189+
/* DMARDLR - DMA Receive Data Level */
190+
#define DMARDLR_DMARDL_MASK GENMASK(3, 0)
191+
176192
/* Register access helpers. */
177193
#define USES_AUX_REG(inst) + DT_INST_PROP(inst, aux_reg_enable)
178194
#define AUX_REG_INSTANCES (0 DT_INST_FOREACH_STATUS_OKAY(USES_AUX_REG))

0 commit comments

Comments
 (0)