|
| 1 | +/* |
| 2 | + * Copyright (c) 2021 BrainCo Inc. |
| 3 | + * |
| 4 | + * SPDX-License-Identifier: Apache-2.0 |
| 5 | + */ |
| 6 | + |
| 7 | +#define DT_DRV_COMPAT gd_gd32_spi |
| 8 | + |
| 9 | +#include <errno.h> |
| 10 | +#include <kernel.h> |
| 11 | +#include <drivers/pinctrl.h> |
| 12 | +#include <drivers/spi.h> |
| 13 | +#include <soc.h> |
| 14 | + |
| 15 | +#include <logging/log.h> |
| 16 | +LOG_MODULE_REGISTER(spi_gd32); |
| 17 | + |
| 18 | +#include "spi_context.h" |
| 19 | + |
| 20 | +/* SPI error status mask. */ |
| 21 | +#define SPI_GD32_ERR_MASK (SPI_STAT_RXORERR | SPI_STAT_CONFERR | SPI_STAT_CRCERR) |
| 22 | + |
| 23 | +#define GD32_SPI_PSC_MAX 0x7U |
| 24 | + |
| 25 | +#if defined(CONFIG_SOC_SERIES_GD32F4XX) || \ |
| 26 | + defined(CONFIG_SOC_SERIES_GD32F403) || \ |
| 27 | + defined(CONFIG_SOC_SERIES_GD32VF103) || \ |
| 28 | + defined(CONFIG_SOC_SERIES_GD32E10X) |
| 29 | +#define RCU_APB1EN_OFFSET APB1EN_REG_OFFSET |
| 30 | +#elif defined(CONFIG_SOC_SERIES_GD32F3X0) |
| 31 | +#define RCU_APB1EN_OFFSET IDX_APB1EN |
| 32 | +#else |
| 33 | +#error Unknown GD32 soc series |
| 34 | +#endif |
| 35 | + |
| 36 | +/* Obtain RCU register offset from RCU clock value */ |
| 37 | +#define RCU_CLOCK_OFFSET(rcu_clock) ((rcu_clock) >> 6U) |
| 38 | + |
| 39 | +struct spi_gd32_config { |
| 40 | + uint32_t reg; |
| 41 | + uint32_t rcu_periph_clock; |
| 42 | + const struct pinctrl_dev_config *pcfg; |
| 43 | +}; |
| 44 | + |
| 45 | +struct spi_gd32_data { |
| 46 | + struct spi_context ctx; |
| 47 | +}; |
| 48 | + |
| 49 | +static int spi_gd32_get_err(const struct spi_gd32_config *cfg) |
| 50 | +{ |
| 51 | + uint32_t stat = SPI_STAT(cfg->reg); |
| 52 | + |
| 53 | + if (stat & SPI_GD32_ERR_MASK) { |
| 54 | + LOG_ERR("spi%u error status detected, err = %u", |
| 55 | + cfg->reg, stat & (uint32_t)SPI_GD32_ERR_MASK); |
| 56 | + |
| 57 | + return -EIO; |
| 58 | + } |
| 59 | + |
| 60 | + return 0; |
| 61 | +} |
| 62 | + |
| 63 | +static bool spi_gd32_transfer_ongoing(struct spi_gd32_data *data) |
| 64 | +{ |
| 65 | + return spi_context_tx_on(&data->ctx) || |
| 66 | + spi_context_rx_on(&data->ctx); |
| 67 | +} |
| 68 | + |
| 69 | +static uint32_t spi_gd32_bus_freq_get(uint32_t rcu_periph_clock) |
| 70 | +{ |
| 71 | + uint32_t rcu_bus; |
| 72 | + |
| 73 | + if (RCU_CLOCK_OFFSET(rcu_periph_clock) == RCU_APB1EN_OFFSET) { |
| 74 | + rcu_bus = CK_APB1; |
| 75 | + } else { |
| 76 | + rcu_bus = CK_APB2; |
| 77 | + } |
| 78 | + |
| 79 | + return rcu_clock_freq_get(rcu_bus); |
| 80 | +} |
| 81 | + |
| 82 | +static int spi_gd32_configure(const struct device *dev, |
| 83 | + const struct spi_config *config) |
| 84 | +{ |
| 85 | + struct spi_gd32_data *data = dev->data; |
| 86 | + const struct spi_gd32_config *cfg = dev->config; |
| 87 | + uint32_t bus_freq; |
| 88 | + |
| 89 | + if (spi_context_configured(&data->ctx, config)) { |
| 90 | + return 0; |
| 91 | + } |
| 92 | + |
| 93 | + if (SPI_OP_MODE_GET(config->operation) == SPI_OP_MODE_SLAVE) { |
| 94 | + LOG_ERR("Slave mode not supported"); |
| 95 | + return -ENOTSUP; |
| 96 | + } |
| 97 | + |
| 98 | + SPI_CTL0(cfg->reg) &= ~SPI_CTL0_SPIEN; |
| 99 | + |
| 100 | + SPI_CTL0(cfg->reg) |= SPI_MASTER; |
| 101 | + SPI_CTL0(cfg->reg) &= ~SPI_TRANSMODE_BDTRANSMIT; |
| 102 | + |
| 103 | + if (SPI_WORD_SIZE_GET(config->operation) == 8) { |
| 104 | + SPI_CTL0(cfg->reg) |= SPI_FRAMESIZE_8BIT; |
| 105 | + } else { |
| 106 | + SPI_CTL0(cfg->reg) |= SPI_FRAMESIZE_16BIT; |
| 107 | + } |
| 108 | + |
| 109 | + /* Reset to hardware NSS mode. */ |
| 110 | + SPI_CTL0(cfg->reg) &= ~SPI_CTL0_SWNSSEN; |
| 111 | + if (config->cs != NULL) { |
| 112 | + SPI_CTL0(cfg->reg) |= SPI_CTL0_SWNSSEN; |
| 113 | + } else { |
| 114 | + /* |
| 115 | + * For single master env, |
| 116 | + * hardware NSS mode also need to set the NSSDRV bit. |
| 117 | + */ |
| 118 | + SPI_CTL1(cfg->reg) |= SPI_CTL1_NSSDRV; |
| 119 | + } |
| 120 | + |
| 121 | + SPI_CTL0(cfg->reg) &= ~SPI_CTL0_LF; |
| 122 | + if (config->operation & SPI_TRANSFER_LSB) { |
| 123 | + SPI_CTL0(cfg->reg) |= SPI_CTL0_LF; |
| 124 | + } |
| 125 | + |
| 126 | + SPI_CTL0(cfg->reg) &= ~SPI_CTL0_CKPL; |
| 127 | + if (config->operation & SPI_MODE_CPOL) { |
| 128 | + SPI_CTL0(cfg->reg) |= SPI_CTL0_CKPL; |
| 129 | + } |
| 130 | + |
| 131 | + SPI_CTL0(cfg->reg) &= ~SPI_CTL0_CKPH; |
| 132 | + if (config->operation & SPI_MODE_CPHA) { |
| 133 | + SPI_CTL0(cfg->reg) |= SPI_CTL0_CKPH; |
| 134 | + } |
| 135 | + |
| 136 | + bus_freq = spi_gd32_bus_freq_get(cfg->rcu_periph_clock); |
| 137 | + |
| 138 | + for (uint8_t i = 0U; i <= GD32_SPI_PSC_MAX; i++) { |
| 139 | + bus_freq = bus_freq >> 1U; |
| 140 | + if (bus_freq <= config->frequency) { |
| 141 | + SPI_CTL0(cfg->reg) &= ~SPI_CTL0_PSC; |
| 142 | + SPI_CTL0(cfg->reg) |= CTL0_PSC(i); |
| 143 | + break; |
| 144 | + } |
| 145 | + } |
| 146 | + |
| 147 | + data->ctx.config = config; |
| 148 | + |
| 149 | + return 0; |
| 150 | +} |
| 151 | + |
| 152 | +static int spi_gd32_frame_exchange(const struct device *dev) |
| 153 | +{ |
| 154 | + struct spi_gd32_data *data = dev->data; |
| 155 | + const struct spi_gd32_config *cfg = dev->config; |
| 156 | + struct spi_context *ctx = &data->ctx; |
| 157 | + uint16_t tx_frame = 0U, rx_frame = 0U; |
| 158 | + |
| 159 | + if (SPI_WORD_SIZE_GET(ctx->config->operation) == 8) { |
| 160 | + if (spi_context_tx_buf_on(ctx)) { |
| 161 | + tx_frame = UNALIGNED_GET((uint8_t *)(data->ctx.tx_buf)); |
| 162 | + } |
| 163 | + /* For 8 bits mode, spi will forced SPI_DATA[15:8] to 0. */ |
| 164 | + SPI_DATA(cfg->reg) = tx_frame; |
| 165 | + |
| 166 | + spi_context_update_tx(ctx, 1, 1); |
| 167 | + } else { |
| 168 | + if (spi_context_tx_buf_on(ctx)) { |
| 169 | + tx_frame = UNALIGNED_GET((uint8_t *)(data->ctx.tx_buf)); |
| 170 | + } |
| 171 | + SPI_DATA(cfg->reg) = tx_frame; |
| 172 | + |
| 173 | + spi_context_update_tx(ctx, 2, 1); |
| 174 | + } |
| 175 | + |
| 176 | + while ((SPI_STAT(cfg->reg) & SPI_STAT_RBNE) == 0) { |
| 177 | + /* NOP */ |
| 178 | + } |
| 179 | + |
| 180 | + if (SPI_WORD_SIZE_GET(data->ctx.config->operation) == 8) { |
| 181 | + /* For 8 bits mode, spi will forced SPI_DATA[15:8] to 0. */ |
| 182 | + rx_frame = SPI_DATA(cfg->reg); |
| 183 | + if (spi_context_rx_buf_on(ctx)) { |
| 184 | + UNALIGNED_PUT(rx_frame, (uint8_t *)data->ctx.rx_buf); |
| 185 | + } |
| 186 | + |
| 187 | + spi_context_update_rx(ctx, 1, 1); |
| 188 | + } else { |
| 189 | + rx_frame = SPI_DATA(cfg->reg); |
| 190 | + if (spi_context_rx_buf_on(ctx)) { |
| 191 | + UNALIGNED_PUT(rx_frame, (uint16_t *)data->ctx.rx_buf); |
| 192 | + } |
| 193 | + |
| 194 | + spi_context_update_rx(ctx, 2, 1); |
| 195 | + } |
| 196 | + |
| 197 | + return spi_gd32_get_err(cfg); |
| 198 | +} |
| 199 | + |
| 200 | +static int spi_gd32_transceive(const struct device *dev, |
| 201 | + const struct spi_config *config, |
| 202 | + const struct spi_buf_set *tx_bufs, |
| 203 | + const struct spi_buf_set *rx_bufs) |
| 204 | +{ |
| 205 | + struct spi_gd32_data *data = dev->data; |
| 206 | + const struct spi_gd32_config *cfg = dev->config; |
| 207 | + int ret; |
| 208 | + |
| 209 | + spi_context_lock(&data->ctx, false, NULL, config); |
| 210 | + |
| 211 | + ret = spi_gd32_configure(dev, config); |
| 212 | + if (ret < 0) { |
| 213 | + goto error; |
| 214 | + } |
| 215 | + |
| 216 | + SPI_CTL0(cfg->reg) |= SPI_CTL0_SPIEN; |
| 217 | + |
| 218 | + spi_context_buffers_setup(&data->ctx, tx_bufs, rx_bufs, 1); |
| 219 | + |
| 220 | + spi_context_cs_control(&data->ctx, true); |
| 221 | + |
| 222 | + do { |
| 223 | + ret = spi_gd32_frame_exchange(dev); |
| 224 | + if (ret < 0) { |
| 225 | + break; |
| 226 | + } |
| 227 | + } while (spi_gd32_transfer_ongoing(data)); |
| 228 | + |
| 229 | + spi_context_cs_control(&data->ctx, false); |
| 230 | + |
| 231 | + SPI_CTL0(cfg->reg) &= ~SPI_CTL0_SPIEN; |
| 232 | + |
| 233 | +error: |
| 234 | + spi_context_release(&data->ctx, ret); |
| 235 | + |
| 236 | + return ret; |
| 237 | +} |
| 238 | + |
| 239 | +static int spi_gd32_release(const struct device *dev, |
| 240 | + const struct spi_config *config) |
| 241 | +{ |
| 242 | + struct spi_gd32_data *data = dev->data; |
| 243 | + |
| 244 | + spi_context_unlock_unconditionally(&data->ctx); |
| 245 | + |
| 246 | + return 0; |
| 247 | +} |
| 248 | + |
| 249 | +static struct spi_driver_api spi_gd32_driver_api = { |
| 250 | + .transceive = spi_gd32_transceive, |
| 251 | + .release = spi_gd32_release |
| 252 | +}; |
| 253 | + |
| 254 | +int spi_gd32_init(const struct device *dev) |
| 255 | +{ |
| 256 | + struct spi_gd32_data *data = dev->data; |
| 257 | + const struct spi_gd32_config *cfg = dev->config; |
| 258 | + int ret; |
| 259 | + |
| 260 | + rcu_periph_clock_enable(cfg->rcu_periph_clock); |
| 261 | + |
| 262 | + ret = pinctrl_apply_state(cfg->pcfg, PINCTRL_STATE_DEFAULT); |
| 263 | + if (ret) { |
| 264 | + LOG_ERR("Failed to apply pinctrl state"); |
| 265 | + return ret; |
| 266 | + } |
| 267 | + |
| 268 | + ret = spi_context_cs_configure_all(&data->ctx); |
| 269 | + if (ret < 0) { |
| 270 | + return ret; |
| 271 | + } |
| 272 | + |
| 273 | + spi_context_unlock_unconditionally(&data->ctx); |
| 274 | + |
| 275 | + return 0; |
| 276 | +} |
| 277 | + |
| 278 | +#define GD32_SPI_INIT(idx) \ |
| 279 | + PINCTRL_DT_INST_DEFINE(idx); \ |
| 280 | + static struct spi_gd32_data spi_gd32_data_##idx = { \ |
| 281 | + SPI_CONTEXT_INIT_LOCK(spi_gd32_data_##idx, ctx), \ |
| 282 | + SPI_CONTEXT_INIT_SYNC(spi_gd32_data_##idx, ctx), \ |
| 283 | + SPI_CONTEXT_CS_GPIOS_INITIALIZE(DT_DRV_INST(idx), ctx) \ |
| 284 | + }; \ |
| 285 | + static struct spi_gd32_config spi_gd32_config_##idx = { \ |
| 286 | + .reg = DT_INST_REG_ADDR(idx), \ |
| 287 | + .rcu_periph_clock = DT_INST_PROP(idx, rcu_periph_clock), \ |
| 288 | + .pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(idx), \ |
| 289 | + }; \ |
| 290 | + DEVICE_DT_INST_DEFINE(idx, &spi_gd32_init, NULL, &spi_gd32_data_##idx, \ |
| 291 | + &spi_gd32_config_##idx, POST_KERNEL, \ |
| 292 | + CONFIG_SPI_INIT_PRIORITY, &spi_gd32_driver_api); |
| 293 | + |
| 294 | +DT_INST_FOREACH_STATUS_OKAY(GD32_SPI_INIT) |
0 commit comments