diff --git a/drivers/mspi/CMakeLists.txt b/drivers/mspi/CMakeLists.txt index f248cb0b533..78f704393f4 100644 --- a/drivers/mspi/CMakeLists.txt +++ b/drivers/mspi/CMakeLists.txt @@ -4,4 +4,5 @@ zephyr_syscall_header(${ZEPHYR_BASE}/include/zephyr/drivers/mspi.h) zephyr_library() zephyr_library_sources_ifdef(CONFIG_MSPI_AMBIQ_AP3 mspi_ambiq_ap3.c) +zephyr_library_sources_ifdef(CONFIG_MSPI_DW mspi_dw.c) zephyr_library_sources_ifdef(CONFIG_MSPI_EMUL mspi_emul.c) diff --git a/drivers/mspi/Kconfig b/drivers/mspi/Kconfig index 0adb2a34393..269d8d16f04 100644 --- a/drivers/mspi/Kconfig +++ b/drivers/mspi/Kconfig @@ -60,6 +60,7 @@ module-str = mspi source "subsys/logging/Kconfig.template.log_config" source "drivers/mspi/Kconfig.ambiq" +source "drivers/mspi/Kconfig.dw" source "drivers/mspi/Kconfig.mspi_emul" endif # MSPI diff --git a/drivers/mspi/Kconfig.dw b/drivers/mspi/Kconfig.dw new file mode 100644 index 00000000000..1ab82da0e85 --- /dev/null +++ b/drivers/mspi/Kconfig.dw @@ -0,0 +1,9 @@ +# Copyright (c) 2024 Nordic Semiconductor ASA +# SPDX-License-Identifier: Apache-2.0 + +config MSPI_DW + bool "DesignWare SSI controller driver" + default y + depends on DT_HAS_SNPS_DESIGNWARE_SSI_ENABLED + select PINCTRL if $(dt_compat_any_has_prop,$(DT_COMPAT_SNPS_DESIGNWARE_SSI),pinctrl-0) + imply MSPI_XIP diff --git a/drivers/mspi/mspi_dw.c b/drivers/mspi/mspi_dw.c new file mode 100644 index 00000000000..886d24462a2 --- /dev/null +++ b/drivers/mspi/mspi_dw.c @@ -0,0 +1,1355 @@ +/* + * Copyright (c) 2024 Nordic Semiconductor ASA + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#define DT_DRV_COMPAT snps_designware_ssi + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mspi_dw.h" +#include "mspi_dw_vendor_specific.h" + +LOG_MODULE_REGISTER(mspi_dw, CONFIG_MSPI_LOG_LEVEL); + +#define DUMMY_BYTE 0xAA + +#if defined(CONFIG_MSPI_XIP) +struct xip_params { + uint32_t read_cmd; + uint32_t write_cmd; + uint16_t rx_dummy; + uint16_t tx_dummy; + uint8_t cmd_length; + uint8_t addr_length; + enum mspi_io_mode io_mode; +}; + +struct xip_ctrl { + uint32_t read; + uint32_t write; +}; +#endif + +struct mspi_dw_data { + const struct mspi_dev_id *dev_id; + uint32_t packets_done; + uint8_t *buf_pos; + const uint8_t *buf_end; + + uint32_t ctrlr0; + uint32_t spi_ctrlr0; + uint32_t baudr; + +#if defined(CONFIG_MSPI_XIP) + uint32_t xip_freq; + struct xip_params xip_params_stored; + struct xip_params xip_params_active; + uint16_t xip_enabled; + enum mspi_cpp_mode xip_cpp; +#endif + + uint16_t dummy_bytes; + uint8_t bytes_to_discard; + uint8_t bytes_per_frame_exp; + bool standard_spi; + + struct k_sem finished; + struct k_sem ctx_lock; + struct k_sem cfg_lock; + struct mspi_xfer xfer; +}; + +struct mspi_dw_config { + DEVICE_MMIO_ROM; + void (*irq_config)(void); + uint32_t clock_frequency; +#if defined(CONFIG_PINCTRL) + const struct pinctrl_dev_config *pcfg; +#endif + const struct gpio_dt_spec *ce_gpios; + uint8_t ce_gpios_len; + uint8_t tx_fifo_depth_minus_1; + uint8_t tx_fifo_threshold; + uint8_t rx_fifo_threshold; + DECLARE_REG_ACCESS(); +}; + +/* Register access helpers. */ +#define DEFINE_MM_REG_RD_WR(reg, off) \ + DEFINE_MM_REG_RD(reg, off) \ + DEFINE_MM_REG_WR(reg, off) + +DEFINE_MM_REG_WR(ctrlr0, 0x00) +DEFINE_MM_REG_WR(ctrlr1, 0x04) +DEFINE_MM_REG_WR(ssienr, 0x08) +DEFINE_MM_REG_WR(ser, 0x10) +DEFINE_MM_REG_WR(baudr, 0x14) +DEFINE_MM_REG_RD_WR(txftlr, 0x18) +DEFINE_MM_REG_RD_WR(rxftlr, 0x1c) +DEFINE_MM_REG_RD(txflr, 0x20) +DEFINE_MM_REG_RD(rxflr, 0x24) +DEFINE_MM_REG_RD(sr, 0x28) +DEFINE_MM_REG_WR(imr, 0x2c) +DEFINE_MM_REG_RD(isr, 0x30) +DEFINE_MM_REG_RD_WR(dr, 0x60) +DEFINE_MM_REG_WR(spi_ctrlr0, 0xf4) + +#if defined(CONFIG_MSPI_XIP) +DEFINE_MM_REG_WR(xip_incr_inst, 0x100) +DEFINE_MM_REG_WR(xip_wrap_inst, 0x104) +DEFINE_MM_REG_WR(xip_ctrl, 0x108) +DEFINE_MM_REG_WR(xip_write_incr_inst, 0x140) +DEFINE_MM_REG_WR(xip_write_wrap_inst, 0x144) +DEFINE_MM_REG_WR(xip_write_ctrl, 0x148) +#endif + +static void tx_data(const struct device *dev, + const struct mspi_xfer_packet *packet) +{ + struct mspi_dw_data *dev_data = dev->data; + const struct mspi_dw_config *dev_config = dev->config; + const uint8_t *buf_pos = dev_data->buf_pos; + const uint8_t *buf_end = dev_data->buf_end; + /* When the function is called, it is known that at least one item + * can be written to the FIFO. The loop below writes to the FIFO + * the number of items that is known to fit and then updates that + * number basing on the actual FIFO level (because some data may get + * sent while the FIFO is written; especially for high frequencies + * this may often occur) and continues until the FIFO is filled up + * or the buffer end is reached. + */ + uint32_t room = 1; + uint8_t bytes_per_frame_exp = dev_data->bytes_per_frame_exp; + uint8_t tx_fifo_depth = dev_config->tx_fifo_depth_minus_1 + 1; + uint32_t data; + + do { + if (bytes_per_frame_exp == 2) { + data = sys_get_be32(buf_pos); + buf_pos += 4; + } else if (bytes_per_frame_exp == 1) { + data = sys_get_be16(buf_pos); + buf_pos += 2; + } else { + data = *buf_pos; + buf_pos += 1; + } + write_dr(dev, data); + + if (buf_pos >= buf_end) { + write_txftlr(dev, 0); + break; + } + + if (--room == 0) { + room = tx_fifo_depth + - FIELD_GET(TXFLR_TXTFL_MASK, read_txflr(dev)); + } + } while (room); + + dev_data->buf_pos = (uint8_t *)buf_pos; +} + +static bool make_rx_cycles(const struct device *dev) +{ + struct mspi_dw_data *dev_data = dev->data; + const struct mspi_dw_config *dev_config = dev->config; + uint16_t dummy_bytes = dev_data->dummy_bytes; + /* See tx_data(). */ + uint32_t room = 1; + uint8_t tx_fifo_depth = dev_config->tx_fifo_depth_minus_1 + 1; + + do { + write_dr(dev, DUMMY_BYTE); + + --dummy_bytes; + if (!dummy_bytes) { + dev_data->dummy_bytes = 0; + return true; + } + + if (--room == 0) { + room = tx_fifo_depth + - FIELD_GET(TXFLR_TXTFL_MASK, read_txflr(dev)); + } + } while (room); + + dev_data->dummy_bytes = dummy_bytes; + return false; +} + +static void read_rx_fifo(const struct device *dev, + const struct mspi_xfer_packet *packet) +{ + struct mspi_dw_data *dev_data = dev->data; + const struct mspi_dw_config *dev_config = dev->config; + uint8_t bytes_to_discard = dev_data->bytes_to_discard; + uint8_t *buf_pos = dev_data->buf_pos; + const uint8_t *buf_end = &packet->data_buf[packet->num_bytes]; + uint8_t bytes_per_frame_exp = dev_data->bytes_per_frame_exp; + /* See `room` in tx_data(). */ + uint32_t in_fifo = 1; + uint32_t remaining_frames; + + do { + uint32_t data = read_dr(dev); + + if (bytes_to_discard) { + --bytes_to_discard; + } else { + if (bytes_per_frame_exp == 2) { + sys_put_be32(data, buf_pos); + buf_pos += 4; + } else if (bytes_per_frame_exp == 1) { + sys_put_be16(data, buf_pos); + buf_pos += 2; + } else { + *buf_pos = (uint8_t)data; + buf_pos += 1; + } + + if (buf_pos >= buf_end) { + dev_data->bytes_to_discard = bytes_to_discard; + dev_data->buf_pos = buf_pos; + return; + } + } + + if (--in_fifo == 0) { + in_fifo = FIELD_GET(RXFLR_RXTFL_MASK, read_rxflr(dev)); + } + } while (in_fifo); + + remaining_frames = (bytes_to_discard + buf_end - buf_pos) + >> bytes_per_frame_exp; + if (remaining_frames - 1 < dev_config->rx_fifo_threshold) { + write_rxftlr(dev, remaining_frames - 1); + } + + dev_data->bytes_to_discard = bytes_to_discard; + dev_data->buf_pos = buf_pos; +} + +static void mspi_dw_isr(const struct device *dev) +{ + struct mspi_dw_data *dev_data = dev->data; + const struct mspi_xfer_packet *packet = + &dev_data->xfer.packets[dev_data->packets_done]; + uint32_t int_status = read_isr(dev); + + if (int_status & ISR_RXFIS_BIT) { + read_rx_fifo(dev, packet); + } + + if (dev_data->buf_pos >= dev_data->buf_end) { + write_imr(dev, 0); + /* It may happen that at this point the controller is still + * shifting out the last frame (the last interrupt occurs when + * the TX FIFO is empty). Wait if it signals that it is busy. + */ + while (read_sr(dev) & SR_BUSY_BIT) { + } + + k_sem_give(&dev_data->finished); + } else { + if (int_status & ISR_TXEIS_BIT) { + if (dev_data->dummy_bytes) { + if (make_rx_cycles(dev)) { + write_imr(dev, IMR_RXFIM_BIT); + } + } else { + tx_data(dev, packet); + } + } + } + + vendor_specific_irq_clear(dev); +} + +static int api_config(const struct mspi_dt_spec *spec) +{ + ARG_UNUSED(spec); + + return -ENOTSUP; +} + +static bool apply_io_mode(struct mspi_dw_data *dev_data, + enum mspi_io_mode io_mode) +{ + dev_data->ctrlr0 &= ~CTRLR0_SPI_FRF_MASK; + dev_data->spi_ctrlr0 &= ~SPI_CTRLR0_TRANS_TYPE_MASK; + + /* Frame format used for transferring data. */ + + if (io_mode == MSPI_IO_MODE_SINGLE) { + dev_data->ctrlr0 |= FIELD_PREP(CTRLR0_SPI_FRF_MASK, + CTRLR0_SPI_FRF_STANDARD); + dev_data->standard_spi = true; + return true; + } + + dev_data->standard_spi = false; + + switch (io_mode) { + case MSPI_IO_MODE_DUAL: + case MSPI_IO_MODE_DUAL_1_1_2: + case MSPI_IO_MODE_DUAL_1_2_2: + dev_data->ctrlr0 |= FIELD_PREP(CTRLR0_SPI_FRF_MASK, + CTRLR0_SPI_FRF_DUAL); + break; + case MSPI_IO_MODE_QUAD: + case MSPI_IO_MODE_QUAD_1_1_4: + case MSPI_IO_MODE_QUAD_1_4_4: + dev_data->ctrlr0 |= FIELD_PREP(CTRLR0_SPI_FRF_MASK, + CTRLR0_SPI_FRF_QUAD); + break; + case MSPI_IO_MODE_OCTAL: + case MSPI_IO_MODE_OCTAL_1_1_8: + case MSPI_IO_MODE_OCTAL_1_8_8: + dev_data->ctrlr0 |= FIELD_PREP(CTRLR0_SPI_FRF_MASK, + CTRLR0_SPI_FRF_OCTAL); + break; + default: + LOG_ERR("IO mode %d not supported", io_mode); + return false; + } + + /* Transfer format used for Address and Instruction: */ + + switch (io_mode) { + case MSPI_IO_MODE_DUAL_1_1_2: + case MSPI_IO_MODE_QUAD_1_1_4: + case MSPI_IO_MODE_OCTAL_1_1_8: + /* - both sent in Standard SPI mode */ + dev_data->spi_ctrlr0 |= FIELD_PREP(SPI_CTRLR0_TRANS_TYPE_MASK, + SPI_CTRLR0_TRANS_TYPE_TT0); + break; + case MSPI_IO_MODE_DUAL_1_2_2: + case MSPI_IO_MODE_QUAD_1_4_4: + case MSPI_IO_MODE_OCTAL_1_8_8: + /* - Instruction sent in Standard SPI mode, + * Address sent the same way as data + */ + dev_data->spi_ctrlr0 |= FIELD_PREP(SPI_CTRLR0_TRANS_TYPE_MASK, + SPI_CTRLR0_TRANS_TYPE_TT1); + break; + default: + /* - both sent the same way as data. */ + dev_data->spi_ctrlr0 |= FIELD_PREP(SPI_CTRLR0_TRANS_TYPE_MASK, + SPI_CTRLR0_TRANS_TYPE_TT2); + break; + } + + return true; +} + +static bool apply_cmd_length(struct mspi_dw_data *dev_data, uint32_t cmd_length) +{ + switch (cmd_length) { + case 0: + dev_data->spi_ctrlr0 |= FIELD_PREP(SPI_CTRLR0_INST_L_MASK, + SPI_CTRLR0_INST_L0); + break; + case 1: + dev_data->spi_ctrlr0 |= FIELD_PREP(SPI_CTRLR0_INST_L_MASK, + SPI_CTRLR0_INST_L8); + break; + case 2: + dev_data->spi_ctrlr0 |= FIELD_PREP(SPI_CTRLR0_INST_L_MASK, + SPI_CTRLR0_INST_L16); + break; + default: + LOG_ERR("Command length %d not supported", cmd_length); + return false; + } + + return true; +} + +static bool apply_addr_length(struct mspi_dw_data *dev_data, + uint32_t addr_length) +{ + dev_data->spi_ctrlr0 |= FIELD_PREP(SPI_CTRLR0_ADDR_L_MASK, + addr_length * 2); + + return true; +} + +#if defined(CONFIG_MSPI_XIP) +static bool apply_xip_io_mode(const struct mspi_dw_data *dev_data, + struct xip_ctrl *ctrl) +{ + enum mspi_io_mode io_mode = dev_data->xip_params_active.io_mode; + + /* Frame format used for transferring data. */ + + if (io_mode == MSPI_IO_MODE_SINGLE) { + LOG_ERR("XIP not available in single line mode"); + return false; + } + + switch (io_mode) { + case MSPI_IO_MODE_DUAL: + case MSPI_IO_MODE_DUAL_1_1_2: + case MSPI_IO_MODE_DUAL_1_2_2: + ctrl->read |= FIELD_PREP(XIP_CTRL_FRF_MASK, + XIP_CTRL_FRF_DUAL); + ctrl->write |= FIELD_PREP(XIP_WRITE_CTRL_FRF_MASK, + XIP_WRITE_CTRL_FRF_DUAL); + break; + case MSPI_IO_MODE_QUAD: + case MSPI_IO_MODE_QUAD_1_1_4: + case MSPI_IO_MODE_QUAD_1_4_4: + ctrl->read |= FIELD_PREP(XIP_CTRL_FRF_MASK, + XIP_CTRL_FRF_QUAD); + ctrl->write |= FIELD_PREP(XIP_WRITE_CTRL_FRF_MASK, + XIP_WRITE_CTRL_FRF_QUAD); + break; + case MSPI_IO_MODE_OCTAL: + case MSPI_IO_MODE_OCTAL_1_1_8: + case MSPI_IO_MODE_OCTAL_1_8_8: + ctrl->read |= FIELD_PREP(XIP_CTRL_FRF_MASK, + XIP_CTRL_FRF_OCTAL); + ctrl->write |= FIELD_PREP(XIP_WRITE_CTRL_FRF_MASK, + XIP_WRITE_CTRL_FRF_OCTAL); + break; + default: + LOG_ERR("IO mode %d not supported", io_mode); + return false; + } + + /* Transfer format used for Address and Instruction: */ + + switch (io_mode) { + case MSPI_IO_MODE_DUAL_1_1_2: + case MSPI_IO_MODE_QUAD_1_1_4: + case MSPI_IO_MODE_OCTAL_1_1_8: + /* - both sent in Standard SPI mode */ + ctrl->read |= FIELD_PREP(XIP_CTRL_TRANS_TYPE_MASK, + XIP_CTRL_TRANS_TYPE_TT0); + ctrl->write |= FIELD_PREP(XIP_WRITE_CTRL_TRANS_TYPE_MASK, + XIP_WRITE_CTRL_TRANS_TYPE_TT0); + break; + case MSPI_IO_MODE_DUAL_1_2_2: + case MSPI_IO_MODE_QUAD_1_4_4: + case MSPI_IO_MODE_OCTAL_1_8_8: + /* - Instruction sent in Standard SPI mode, + * Address sent the same way as data + */ + ctrl->read |= FIELD_PREP(XIP_CTRL_TRANS_TYPE_MASK, + XIP_CTRL_TRANS_TYPE_TT1); + ctrl->write |= FIELD_PREP(XIP_WRITE_CTRL_TRANS_TYPE_MASK, + XIP_WRITE_CTRL_TRANS_TYPE_TT1); + break; + default: + /* - both sent the same way as data. */ + ctrl->read |= FIELD_PREP(XIP_CTRL_TRANS_TYPE_MASK, + XIP_CTRL_TRANS_TYPE_TT2); + ctrl->write |= FIELD_PREP(XIP_WRITE_CTRL_TRANS_TYPE_MASK, + XIP_WRITE_CTRL_TRANS_TYPE_TT2); + break; + } + + return true; +} + +static bool apply_xip_cmd_length(const struct mspi_dw_data *dev_data, + struct xip_ctrl *ctrl) +{ + uint8_t cmd_length = dev_data->xip_params_active.cmd_length; + + switch (cmd_length) { + case 0: + ctrl->read |= FIELD_PREP(XIP_CTRL_INST_L_MASK, + XIP_CTRL_INST_L0); + ctrl->write |= FIELD_PREP(XIP_WRITE_CTRL_INST_L_MASK, + XIP_WRITE_CTRL_INST_L0); + break; + case 1: + ctrl->read |= XIP_CTRL_INST_EN_BIT + | FIELD_PREP(XIP_CTRL_INST_L_MASK, + XIP_CTRL_INST_L8); + ctrl->write |= FIELD_PREP(XIP_WRITE_CTRL_INST_L_MASK, + XIP_WRITE_CTRL_INST_L8); + break; + case 2: + ctrl->read |= XIP_CTRL_INST_EN_BIT + | FIELD_PREP(XIP_CTRL_INST_L_MASK, + XIP_CTRL_INST_L16); + ctrl->write |= FIELD_PREP(XIP_WRITE_CTRL_INST_L_MASK, + XIP_WRITE_CTRL_INST_L16); + break; + default: + LOG_ERR("Command length %d not supported", cmd_length); + return false; + } + + return true; +} + +static bool apply_xip_addr_length(const struct mspi_dw_data *dev_data, + struct xip_ctrl *ctrl) +{ + uint8_t addr_length = dev_data->xip_params_active.addr_length; + + ctrl->read |= FIELD_PREP(XIP_CTRL_ADDR_L_MASK, addr_length * 2); + ctrl->write |= FIELD_PREP(XIP_WRITE_CTRL_ADDR_L_MASK, addr_length * 2); + + return true; +} +#endif /* defined(CONFIG_MSPI_XIP) */ + +static int _api_dev_config(const struct device *dev, + const enum mspi_dev_cfg_mask param_mask, + const struct mspi_dev_cfg *cfg) +{ + const struct mspi_dw_config *dev_config = dev->config; + struct mspi_dw_data *dev_data = dev->data; + + if (param_mask & MSPI_DEVICE_CONFIG_ENDIAN) { + if (cfg->endian != MSPI_XFER_BIG_ENDIAN) { + LOG_ERR("Only big endian transfers are supported."); + return -ENOTSUP; + } + } + + if (param_mask & MSPI_DEVICE_CONFIG_CE_POL) { + if (cfg->ce_polarity != MSPI_CE_ACTIVE_LOW) { + LOG_ERR("Only active low CE is supported."); + return -ENOTSUP; + } + } + + if (param_mask & MSPI_DEVICE_CONFIG_MEM_BOUND) { + if (cfg->mem_boundary) { + LOG_ERR("Auto CE break is not supported."); + return -ENOTSUP; + } + } + + if (param_mask & MSPI_DEVICE_CONFIG_BREAK_TIME) { + if (cfg->time_to_break) { + LOG_ERR("Auto CE break is not supported."); + return -ENOTSUP; + } + } + + if (param_mask & MSPI_DEVICE_CONFIG_IO_MODE) { +#if defined(CONFIG_MSPI_XIP) + dev_data->xip_params_stored.io_mode = cfg->io_mode; +#endif + + if (!apply_io_mode(dev_data, cfg->io_mode)) { + return -EINVAL; + } + } + + if (param_mask & MSPI_DEVICE_CONFIG_CPP) { +#if defined(CONFIG_MSPI_XIP) + /* Make sure the new setting is compatible with the one used + * for XIP if it is enabled. + */ + if (!dev_data->xip_enabled) { + dev_data->xip_cpp = cfg->cpp; + } else if (dev_data->xip_cpp != cfg->cpp) { + LOG_ERR("Conflict with configuration used for XIP."); + return -EINVAL; + } +#endif + + dev_data->ctrlr0 &= ~(CTRLR0_SCPOL_BIT | CTRLR0_SCPH_BIT); + + switch (cfg->cpp) { + default: + case MSPI_CPP_MODE_0: + dev_data->ctrlr0 |= FIELD_PREP(CTRLR0_SCPOL_BIT, 0) | + FIELD_PREP(CTRLR0_SCPH_BIT, 0); + break; + case MSPI_CPP_MODE_1: + dev_data->ctrlr0 |= FIELD_PREP(CTRLR0_SCPOL_BIT, 0) | + FIELD_PREP(CTRLR0_SCPH_BIT, 1); + break; + case MSPI_CPP_MODE_2: + dev_data->ctrlr0 |= FIELD_PREP(CTRLR0_SCPOL_BIT, 1) | + FIELD_PREP(CTRLR0_SCPH_BIT, 0); + break; + case MSPI_CPP_MODE_3: + dev_data->ctrlr0 |= FIELD_PREP(CTRLR0_SCPOL_BIT, 1) | + FIELD_PREP(CTRLR0_SCPH_BIT, 1); + break; + } + } + + if (param_mask & MSPI_DEVICE_CONFIG_FREQUENCY) { + if (cfg->freq > dev_config->clock_frequency / 2 || + cfg->freq < dev_config->clock_frequency / 65534) { + LOG_ERR("Invalid frequency: %u, MIN: %u, MAX: %u", + cfg->freq, dev_config->clock_frequency / 65534, + dev_config->clock_frequency / 2); + return -EINVAL; + } + +#if defined(CONFIG_MSPI_XIP) + /* Make sure the new setting is compatible with the one used + * for XIP if it is enabled. + */ + if (!dev_data->xip_enabled) { + dev_data->xip_freq = cfg->freq; + } else if (dev_data->xip_freq != cfg->freq) { + LOG_ERR("Conflict with configuration used for XIP."); + return -EINVAL; + } +#endif + + dev_data->baudr = dev_config->clock_frequency / cfg->freq; + } + + if (param_mask & MSPI_DEVICE_CONFIG_DATA_RATE) { + /* TODO: add support for DDR */ + if (cfg->data_rate != MSPI_DATA_RATE_SINGLE) { + LOG_ERR("Only single data rate is supported."); + return -ENOTSUP; + } + } + + if (param_mask & MSPI_DEVICE_CONFIG_DQS) { + /* TODO: add support for DQS */ + if (cfg->dqs_enable) { + LOG_ERR("DQS line is not supported."); + return -ENOTSUP; + } + } + +#if defined(CONFIG_MSPI_XIP) + if (param_mask & MSPI_DEVICE_CONFIG_READ_CMD) { + dev_data->xip_params_stored.read_cmd = cfg->read_cmd; + } + if (param_mask & MSPI_DEVICE_CONFIG_WRITE_CMD) { + dev_data->xip_params_stored.write_cmd = cfg->write_cmd; + } + if (param_mask & MSPI_DEVICE_CONFIG_RX_DUMMY) { + dev_data->xip_params_stored.rx_dummy = cfg->rx_dummy; + } + if (param_mask & MSPI_DEVICE_CONFIG_TX_DUMMY) { + dev_data->xip_params_stored.tx_dummy = cfg->tx_dummy; + } + if (param_mask & MSPI_DEVICE_CONFIG_CMD_LEN) { + dev_data->xip_params_stored.cmd_length = cfg->cmd_length; + } + if (param_mask & MSPI_DEVICE_CONFIG_ADDR_LEN) { + dev_data->xip_params_stored.addr_length = cfg->addr_length; + } +#endif + + /* Always use Motorola SPI frame format. */ + dev_data->ctrlr0 |= FIELD_PREP(CTRLR0_FRF_MASK, CTRLR0_FRF_SPI); + /* Enable clock stretching. */ + dev_data->spi_ctrlr0 |= SPI_CTRLR0_CLK_STRETCH_EN_BIT; + + return 0; +} + +static int api_dev_config(const struct device *dev, + const struct mspi_dev_id *dev_id, + const enum mspi_dev_cfg_mask param_mask, + const struct mspi_dev_cfg *cfg) +{ + struct mspi_dw_data *dev_data = dev->data; + int rc; + + if (dev_id != dev_data->dev_id) { + rc = k_sem_take(&dev_data->cfg_lock, + K_MSEC(CONFIG_MSPI_COMPLETION_TIMEOUT_TOLERANCE)); + if (rc < 0) { + LOG_ERR("Failed to switch controller to device"); + return -EBUSY; + } + + dev_data->dev_id = dev_id; + + if (param_mask == MSPI_DEVICE_CONFIG_NONE) { + return 0; + } + } + + (void)k_sem_take(&dev_data->ctx_lock, K_FOREVER); + + rc = _api_dev_config(dev, param_mask, cfg); + + k_sem_give(&dev_data->ctx_lock); + + if (rc < 0) { + dev_data->dev_id = NULL; + k_sem_give(&dev_data->cfg_lock); + } + + return rc; +} + +static int api_get_channel_status(const struct device *dev, uint8_t ch) +{ + ARG_UNUSED(ch); + + struct mspi_dw_data *dev_data = dev->data; + + (void)k_sem_take(&dev_data->ctx_lock, K_FOREVER); + + dev_data->dev_id = NULL; + k_sem_give(&dev_data->cfg_lock); + + k_sem_give(&dev_data->ctx_lock); + + return 0; +} + +static void tx_control_field(const struct device *dev, + uint32_t field, uint8_t len) +{ + uint8_t shift = 8 * len; + + do { + shift -= 8; + write_dr(dev, field >> shift); + } while (shift); +} + +static int start_next_packet(const struct device *dev, k_timeout_t timeout) +{ + const struct mspi_dw_config *dev_config = dev->config; + struct mspi_dw_data *dev_data = dev->data; + const struct mspi_xfer_packet *packet = + &dev_data->xfer.packets[dev_data->packets_done]; + bool xip_enabled = COND_CODE_1(CONFIG_MSPI_XIP, + (dev_data->xip_enabled != 0), + (false)); + unsigned int key; + uint8_t tx_fifo_threshold; + uint32_t packet_frames; + uint32_t imr; + int rc = 0; + + if (packet->num_bytes == 0 && + dev_data->xfer.cmd_length == 0 && + dev_data->xfer.addr_length == 0) { + return 0; + } + + dev_data->dummy_bytes = 0; + + dev_data->ctrlr0 &= ~CTRLR0_TMOD_MASK + & ~CTRLR0_DFS_MASK; + + dev_data->spi_ctrlr0 &= ~SPI_CTRLR0_WAIT_CYCLES_MASK; + + if (dev_data->standard_spi && + (dev_data->xfer.cmd_length != 0 || + dev_data->xfer.addr_length != 0)) { + dev_data->bytes_per_frame_exp = 0; + dev_data->ctrlr0 |= FIELD_PREP(CTRLR0_DFS_MASK, 7); + } else { + if ((packet->num_bytes % 4) == 0) { + dev_data->bytes_per_frame_exp = 2; + dev_data->ctrlr0 |= FIELD_PREP(CTRLR0_DFS_MASK, 31); + } else if ((packet->num_bytes % 2) == 0) { + dev_data->bytes_per_frame_exp = 1; + dev_data->ctrlr0 |= FIELD_PREP(CTRLR0_DFS_MASK, 15); + } else { + dev_data->bytes_per_frame_exp = 0; + dev_data->ctrlr0 |= FIELD_PREP(CTRLR0_DFS_MASK, 7); + } + } + + packet_frames = packet->num_bytes >> dev_data->bytes_per_frame_exp; + + if (packet_frames > UINT16_MAX + 1) { + LOG_ERR("Packet length (%u) exceeds supported maximum", + packet->num_bytes); + return -EINVAL; + } + + if (packet->dir == MSPI_TX || packet->num_bytes == 0) { + imr = IMR_TXEIM_BIT; + dev_data->ctrlr0 |= FIELD_PREP(CTRLR0_TMOD_MASK, + CTRLR0_TMOD_TX); + dev_data->spi_ctrlr0 |= FIELD_PREP(SPI_CTRLR0_WAIT_CYCLES_MASK, + dev_data->xfer.tx_dummy); + + write_rxftlr(dev, 0); + tx_fifo_threshold = dev_config->tx_fifo_threshold; + } else { + uint32_t tmod; + uint8_t rx_fifo_threshold; + + /* In Standard SPI Mode, the controller does not support + * sending the command and address fields separately, they + * need to be sent as data; hence, for RX packets with these + * fields, the TX/RX transfer mode needs to be used and + * consequently, dummy bytes need to be transmitted so that + * clock cycles for the RX part are provided (the controller + * does not do it automatically in the TX/RX mode). + */ + if (dev_data->standard_spi && + (dev_data->xfer.cmd_length != 0 || + dev_data->xfer.addr_length != 0)) { + uint32_t rx_total_bytes; + + dev_data->bytes_to_discard = dev_data->xfer.cmd_length + + dev_data->xfer.addr_length; + rx_total_bytes = dev_data->bytes_to_discard + + packet->num_bytes; + + dev_data->dummy_bytes = packet->num_bytes; + + imr = IMR_TXEIM_BIT | IMR_RXFIM_BIT; + tmod = CTRLR0_TMOD_TX_RX; + tx_fifo_threshold = dev_config->tx_fifo_threshold; + /* For standard SPI, only 1-byte frames are used. */ + rx_fifo_threshold = MIN(rx_total_bytes - 1, + dev_config->rx_fifo_threshold); + } else { + imr = IMR_RXFIM_BIT; + tmod = CTRLR0_TMOD_RX; + tx_fifo_threshold = 0; + rx_fifo_threshold = MIN(packet_frames - 1, + dev_config->rx_fifo_threshold); + } + + dev_data->ctrlr0 |= FIELD_PREP(CTRLR0_TMOD_MASK, tmod); + dev_data->spi_ctrlr0 |= FIELD_PREP(SPI_CTRLR0_WAIT_CYCLES_MASK, + dev_data->xfer.rx_dummy); + + write_rxftlr(dev, FIELD_PREP(RXFTLR_RFT_MASK, + rx_fifo_threshold)); + } + + if (dev_data->dev_id->ce.port) { + rc = gpio_pin_set_dt(&dev_data->dev_id->ce, 1); + if (rc < 0) { + LOG_ERR("Failed to activate CE line (%d)", rc); + return rc; + } + } + + if (xip_enabled) { + key = irq_lock(); + write_ssienr(dev, 0); + } + + /* These registers cannot be written when the controller is enabled, + * that's why it is temporarily disabled above; with locked interrupts, + * to prevent potential XIP transfers during that period. + */ + write_ctrlr0(dev, dev_data->ctrlr0); + write_ctrlr1(dev, packet_frames > 0 + ? FIELD_PREP(CTRLR1_NDF_MASK, packet_frames - 1) + : 0); + write_spi_ctrlr0(dev, dev_data->spi_ctrlr0); + write_baudr(dev, dev_data->baudr); + write_ser(dev, BIT(dev_data->dev_id->dev_idx)); + + if (xip_enabled) { + write_ssienr(dev, SSIENR_SSIC_EN_BIT); + irq_unlock(key); + } + + dev_data->buf_pos = packet->data_buf; + dev_data->buf_end = &packet->data_buf[packet->num_bytes]; + + if ((imr & IMR_TXEIM_BIT) && dev_data->buf_pos < dev_data->buf_end) { + uint32_t start_level = tx_fifo_threshold; + + if (dev_data->dummy_bytes) { + uint32_t tx_total = dev_data->bytes_to_discard + + dev_data->dummy_bytes; + + if (start_level > tx_total - 1) { + start_level = tx_total - 1; + } + } + + write_txftlr(dev, + FIELD_PREP(TXFTLR_TXFTHR_MASK, start_level) | + FIELD_PREP(TXFTLR_TFT_MASK, tx_fifo_threshold)); + } else { + write_txftlr(dev, 0); + } + + /* Ensure that there will be no interrupt from the controller yet. */ + write_imr(dev, 0); + /* Enable the controller. This must be done before DR is written. */ + write_ssienr(dev, SSIENR_SSIC_EN_BIT); + + if (dev_data->standard_spi) { + if (dev_data->xfer.cmd_length) { + tx_control_field(dev, packet->cmd, + dev_data->xfer.cmd_length); + } + + if (dev_data->xfer.addr_length) { + tx_control_field(dev, packet->address, + dev_data->xfer.addr_length); + } + } else { + if (dev_data->xfer.cmd_length) { + write_dr(dev, packet->cmd); + } + + if (dev_data->xfer.addr_length) { + write_dr(dev, packet->address); + } + } + + if (dev_data->dummy_bytes) { + if (make_rx_cycles(dev)) { + imr = IMR_RXFIM_BIT; + } + } else if (packet->dir == MSPI_TX && packet->num_bytes) { + tx_data(dev, packet); + } + + /* Enable interrupts now and wait until the packet is done. */ + write_imr(dev, imr); + + rc = k_sem_take(&dev_data->finished, timeout); + if (rc < 0) { + rc = -ETIMEDOUT; + } + + /* Disable the controller. This will immediately halt the transfer + * if it hasn't finished yet. + */ + if (xip_enabled) { + /* If XIP is enabled, the controller must be kept enabled, + * so disable it only momentarily if there's a need to halt + * a transfer that has timeout out. + */ + if (rc == -ETIMEDOUT) { + key = irq_lock(); + + write_ssienr(dev, 0); + write_ssienr(dev, SSIENR_SSIC_EN_BIT); + + irq_unlock(key); + } + } else { + write_ssienr(dev, 0); + } + + if (dev_data->dev_id->ce.port) { + int rc2; + + /* Do not use `rc` to not overwrite potential timeout error. */ + rc2 = gpio_pin_set_dt(&dev_data->dev_id->ce, 0); + if (rc2 < 0) { + LOG_ERR("Failed to deactivate CE line (%d)", rc2); + return rc2; + } + } + + return rc; +} + +static int _api_transceive(const struct device *dev, + const struct mspi_xfer *req) +{ + struct mspi_dw_data *dev_data = dev->data; + int rc; + + dev_data->spi_ctrlr0 &= ~SPI_CTRLR0_WAIT_CYCLES_MASK + & ~SPI_CTRLR0_INST_L_MASK + & ~SPI_CTRLR0_ADDR_L_MASK; + + if (!apply_cmd_length(dev_data, req->cmd_length) || + !apply_addr_length(dev_data, req->addr_length)) { + return -EINVAL; + } + + if (dev_data->standard_spi && + (req->rx_dummy != 0 || req->tx_dummy != 0)) { + LOG_ERR("Dummy cycles unsupported in single line mode"); + return -EINVAL; + } else if (req->rx_dummy > SPI_CTRLR0_WAIT_CYCLES_MAX || + req->tx_dummy > SPI_CTRLR0_WAIT_CYCLES_MAX) { + LOG_ERR("Unsupported RX (%u) or TX (%u) dummy cycles", + req->rx_dummy, req->tx_dummy); + return -EINVAL; + } + + dev_data->xfer = *req; + + for (dev_data->packets_done = 0; + dev_data->packets_done < dev_data->xfer.num_packet; + dev_data->packets_done++) { + rc = start_next_packet(dev, K_MSEC(dev_data->xfer.timeout)); + if (rc < 0) { + return rc; + } + } + + return 0; +} + +static int api_transceive(const struct device *dev, + const struct mspi_dev_id *dev_id, + const struct mspi_xfer *req) +{ + struct mspi_dw_data *dev_data = dev->data; + int rc, rc2; + + if (dev_id != dev_data->dev_id) { + LOG_ERR("Controller is not configured for this device"); + return -EINVAL; + } + + /* TODO: add support for asynchronous transfers */ + if (req->async) { + LOG_ERR("Asynchronous transfers are not supported"); + return -ENOTSUP; + } + + rc = pm_device_runtime_get(dev); + if (rc < 0) { + LOG_ERR("pm_device_runtime_get() failed: %d", rc); + return rc; + } + + (void)k_sem_take(&dev_data->ctx_lock, K_FOREVER); + + rc = _api_transceive(dev, req); + + k_sem_give(&dev_data->ctx_lock); + + rc2 = pm_device_runtime_put(dev); + if (rc2 < 0) { + LOG_ERR("pm_device_runtime_put() failed: %d", rc2); + rc = (rc < 0 ? rc : rc2); + } + + return rc; +} + +#if defined(CONFIG_MSPI_XIP) +static int _api_xip_config(const struct device *dev, + const struct mspi_dev_id *dev_id, + const struct mspi_xip_cfg *cfg) +{ + struct mspi_dw_data *dev_data = dev->data; + int rc; + + if (!cfg->enable) { + rc = vendor_specific_xip_disable(dev, dev_id, cfg); + if (rc < 0) { + return rc; + } + + dev_data->xip_enabled &= ~BIT(dev_id->dev_idx); + + if (!dev_data->xip_enabled) { + write_ssienr(dev, 0); + + /* Since XIP is disabled, it is okay for the controller + * to be suspended. + */ + rc = pm_device_runtime_put(dev); + if (rc < 0) { + LOG_ERR("pm_device_runtime_put() failed: %d", rc); + return rc; + } + } + + return 0; + } + + if (!dev_data->xip_enabled) { + struct xip_params *params = &dev_data->xip_params_active; + struct xip_ctrl ctrl = {0}; + + *params = dev_data->xip_params_stored; + + if (!apply_xip_io_mode(dev_data, &ctrl) || + !apply_xip_cmd_length(dev_data, &ctrl) || + !apply_xip_addr_length(dev_data, &ctrl)) { + return -EINVAL; + } + + if (params->rx_dummy > SPI_CTRLR0_WAIT_CYCLES_MAX || + params->tx_dummy > SPI_CTRLR0_WAIT_CYCLES_MAX) { + LOG_ERR("Unsupported RX (%u) or TX (%u) dummy cycles", + params->rx_dummy, params->tx_dummy); + return -EINVAL; + } + + /* Increase usage count additionally to prevent the controller + * from being suspended as long as XIP is active. + */ + rc = pm_device_runtime_get(dev); + if (rc < 0) { + LOG_ERR("pm_device_runtime_get() failed: %d", rc); + return rc; + } + + ctrl.read |= FIELD_PREP(XIP_CTRL_WAIT_CYCLES_MASK, + params->rx_dummy); + ctrl.write |= FIELD_PREP(XIP_WRITE_CTRL_WAIT_CYCLES_MASK, + params->tx_dummy); + + /* Make sure the baud rate and serial clock phase/polarity + * registers are configured properly. They may not be if + * non-XIP transfers have not been performed yet. + */ + write_ctrlr0(dev, dev_data->ctrlr0); + write_baudr(dev, dev_data->baudr); + + write_xip_incr_inst(dev, params->read_cmd); + write_xip_wrap_inst(dev, params->read_cmd); + write_xip_ctrl(dev, ctrl.read); + write_xip_write_incr_inst(dev, params->write_cmd); + write_xip_write_wrap_inst(dev, params->write_cmd); + write_xip_write_ctrl(dev, ctrl.write); + } else if (dev_data->xip_params_active.read_cmd != + dev_data->xip_params_stored.read_cmd || + dev_data->xip_params_active.write_cmd != + dev_data->xip_params_stored.write_cmd || + dev_data->xip_params_active.cmd_length != + dev_data->xip_params_stored.cmd_length || + dev_data->xip_params_active.addr_length != + dev_data->xip_params_stored.addr_length || + dev_data->xip_params_active.rx_dummy != + dev_data->xip_params_stored.rx_dummy || + dev_data->xip_params_active.tx_dummy != + dev_data->xip_params_stored.tx_dummy) { + LOG_ERR("Conflict with configuration already used for XIP."); + return -EINVAL; + } + + rc = vendor_specific_xip_enable(dev, dev_id, cfg); + if (rc < 0) { + return rc; + } + + write_ssienr(dev, SSIENR_SSIC_EN_BIT); + + dev_data->xip_enabled |= BIT(dev_id->dev_idx); + + return 0; +} + +static int api_xip_config(const struct device *dev, + const struct mspi_dev_id *dev_id, + const struct mspi_xip_cfg *cfg) +{ + struct mspi_dw_data *dev_data = dev->data; + int rc, rc2; + + if (cfg->enable && dev_id != dev_data->dev_id) { + LOG_ERR("Controller is not configured for this device"); + return -EINVAL; + } + + rc = pm_device_runtime_get(dev); + if (rc < 0) { + LOG_ERR("pm_device_runtime_get() failed: %d", rc); + return rc; + } + + (void)k_sem_take(&dev_data->ctx_lock, K_FOREVER); + + rc = _api_xip_config(dev, dev_id, cfg); + + k_sem_give(&dev_data->ctx_lock); + + rc2 = pm_device_runtime_put(dev); + if (rc2 < 0) { + LOG_ERR("pm_device_runtime_put() failed: %d", rc2); + rc = (rc < 0 ? rc : rc2); + } + + return rc; +} +#endif /* defined(CONFIG_MSPI_XIP) */ + +static int dev_pm_action_cb(const struct device *dev, + enum pm_device_action action) +{ + struct mspi_dw_data *dev_data = dev->data; + + if (action == PM_DEVICE_ACTION_RESUME) { +#if defined(CONFIG_PINCTRL) + const struct mspi_dw_config *dev_config = dev->config; + int rc = pinctrl_apply_state(dev_config->pcfg, + PINCTRL_STATE_DEFAULT); + + if (rc < 0) { + LOG_ERR("Cannot apply default pins state (%d)", rc); + return rc; + } +#endif + vendor_specific_resume(dev); + + k_sem_give(&dev_data->ctx_lock); + + return 0; + } + + if (IS_ENABLED(CONFIG_PM_DEVICE) && + action == PM_DEVICE_ACTION_SUSPEND) { + bool xip_enabled = COND_CODE_1(CONFIG_MSPI_XIP, + (dev_data->xip_enabled != 0), + (false)); + +#if defined(CONFIG_PINCTRL) + const struct mspi_dw_config *dev_config = dev->config; + int rc = pinctrl_apply_state(dev_config->pcfg, + PINCTRL_STATE_SLEEP); + + if (rc < 0) { + LOG_ERR("Cannot apply sleep pins state (%d)", rc); + return rc; + } +#endif + if (xip_enabled || + k_sem_take(&dev_data->ctx_lock, K_NO_WAIT) != 0) { + LOG_ERR("Controller in use, cannot be suspended"); + return -EBUSY; + } + + vendor_specific_suspend(dev); + + return 0; + } + + return -ENOTSUP; +} + +static int dev_init(const struct device *dev) +{ + struct mspi_dw_data *dev_data = dev->data; + const struct mspi_dw_config *dev_config = dev->config; + const struct gpio_dt_spec *ce_gpio; + int rc; + + DEVICE_MMIO_MAP(dev, K_MEM_CACHE_NONE); + + vendor_specific_init(dev); + + dev_config->irq_config(); + + k_sem_init(&dev_data->finished, 0, 1); + k_sem_init(&dev_data->cfg_lock, 1, 1); + /* This semaphore will be set to 1 after the device is resumed. */ + k_sem_init(&dev_data->ctx_lock, 0, 1); + + for (ce_gpio = dev_config->ce_gpios; + ce_gpio < &dev_config->ce_gpios[dev_config->ce_gpios_len]; + ce_gpio++) { + if (!device_is_ready(ce_gpio->port)) { + LOG_ERR("CE GPIO port %s is not ready", + ce_gpio->port->name); + return -ENODEV; + } + + rc = gpio_pin_configure_dt(ce_gpio, GPIO_OUTPUT_INACTIVE); + if (rc < 0) { + return rc; + } + } + +#if defined(CONFIG_PINCTRL) + if (IS_ENABLED(CONFIG_PM_DEVICE_RUNTIME)) { + rc = pinctrl_apply_state(dev_config->pcfg, PINCTRL_STATE_SLEEP); + if (rc < 0) { + LOG_ERR("Cannot apply sleep pins state (%d)", rc); + return rc; + } + } +#endif + + return pm_device_driver_init(dev, dev_pm_action_cb); +} + +static const struct mspi_driver_api drv_api = { + .config = api_config, + .dev_config = api_dev_config, + .get_channel_status = api_get_channel_status, + .transceive = api_transceive, +#if defined(CONFIG_MSPI_XIP) + .xip_config = api_xip_config, +#endif +}; + +#define MSPI_DW_INST_IRQ(idx, inst) \ + IRQ_CONNECT(DT_INST_IRQ_BY_IDX(inst, idx, irq), \ + DT_INST_IRQ_BY_IDX(inst, idx, priority), \ + mspi_dw_isr, DEVICE_DT_INST_GET(inst), 0); \ + irq_enable(DT_INST_IRQ_BY_IDX(inst, idx, irq)) + +#define MSPI_DW_MMIO_ROM_INIT(node_id) \ + COND_CODE_1(DT_REG_HAS_NAME(node_id, core), \ + (Z_DEVICE_MMIO_NAMED_ROM_INITIALIZER(core, node_id)), \ + (DEVICE_MMIO_ROM_INIT(node_id))) + +#define MSPI_DW_CLOCK_FREQUENCY(inst) \ + COND_CODE_1(DT_NODE_HAS_PROP(DT_INST_PHANDLE(inst, clocks), \ + clock_frequency), \ + (DT_INST_PROP_BY_PHANDLE(inst, clocks, \ + clock_frequency)), \ + (DT_INST_PROP(inst, clock_frequency))) + +#define MSPI_DW_DT_INST_PROP(inst, prop) .prop = DT_INST_PROP(inst, prop) + +#define FOREACH_CE_GPIOS_ELEM(inst) \ + DT_INST_FOREACH_PROP_ELEM_SEP(inst, ce_gpios, \ + GPIO_DT_SPEC_GET_BY_IDX, (,)) +#define MSPI_DW_CE_GPIOS(inst) \ + .ce_gpios = (const struct gpio_dt_spec []) \ + { FOREACH_CE_GPIOS_ELEM(inst) }, \ + .ce_gpios_len = DT_INST_PROP_LEN(inst, ce_gpios) + +#define TX_FIFO_DEPTH(inst) DT_INST_PROP(inst, fifo_depth) +#define RX_FIFO_DEPTH(inst) DT_INST_PROP_OR(inst, rx_fifo_depth, \ + TX_FIFO_DEPTH(inst)) +#define MSPI_DW_FIFO_PROPS(inst) \ + .tx_fifo_depth_minus_1 = TX_FIFO_DEPTH(inst) - 1, \ + .tx_fifo_threshold = \ + DT_INST_PROP_OR(inst, tx_fifo_threshold, \ + 7 * TX_FIFO_DEPTH(inst) / 8 - 1), \ + .rx_fifo_threshold = \ + DT_INST_PROP_OR(inst, rx_fifo_threshold, \ + 1 * RX_FIFO_DEPTH(inst) / 8 - 1) + +#define MSPI_DW_INST(inst) \ + PM_DEVICE_DT_INST_DEFINE(inst, dev_pm_action_cb); \ + IF_ENABLED(CONFIG_PINCTRL, (PINCTRL_DT_INST_DEFINE(inst);)) \ + static void irq_config##inst(void) \ + { \ + LISTIFY(DT_INST_NUM_IRQS(inst), \ + MSPI_DW_INST_IRQ, (;), inst); \ + } \ + static struct mspi_dw_data dev##inst##_data; \ + static const struct mspi_dw_config dev##inst##_config = { \ + MSPI_DW_MMIO_ROM_INIT(DT_DRV_INST(inst)), \ + .irq_config = irq_config##inst, \ + .clock_frequency = MSPI_DW_CLOCK_FREQUENCY(inst), \ + IF_ENABLED(CONFIG_PINCTRL, \ + (.pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(inst),)) \ + IF_ENABLED(DT_INST_NODE_HAS_PROP(inst, ce_gpios), \ + (MSPI_DW_CE_GPIOS(inst),)) \ + MSPI_DW_FIFO_PROPS(inst), \ + DEFINE_REG_ACCESS(inst) \ + }; \ + DEVICE_DT_INST_DEFINE(inst, \ + dev_init, PM_DEVICE_DT_INST_GET(inst), \ + &dev##inst##_data, &dev##inst##_config, \ + POST_KERNEL, CONFIG_MSPI_INIT_PRIORITY, \ + &drv_api); + +DT_INST_FOREACH_STATUS_OKAY(MSPI_DW_INST) diff --git a/drivers/mspi/mspi_dw.h b/drivers/mspi/mspi_dw.h new file mode 100644 index 00000000000..6f4a2c5a981 --- /dev/null +++ b/drivers/mspi/mspi_dw.h @@ -0,0 +1,239 @@ +/* + * Copyright (c) 2024 Nordic Semiconductor ASA + * + * SPDX-License-Identifier: Apache-2.0 + */ + +/* + * This file is a part of mspi_dw.c extracted only for clarity. + * It is not supposed to be included by any file other than mspi_dw.c. + */ + +/* CTRLR0 - Control Register 0 */ +#define CTRLR0_SPI_FRF_MASK GENMASK(23, 22) +#define CTRLR0_SPI_FRF_STANDARD 0UL +#define CTRLR0_SPI_FRF_DUAL 1UL +#define CTRLR0_SPI_FRF_QUAD 2UL +#define CTRLR0_SPI_FRF_OCTAL 3UL +#define CTRLR0_TMOD_MASK GENMASK(11, 10) +#define CTRLR0_TMOD_TX_RX 0UL +#define CTRLR0_TMOD_TX 1UL +#define CTRLR0_TMOD_RX 2UL +#define CTRLR0_TMOD_EEPROM 3UL +#define CTRLR0_SCPOL_BIT BIT(9) +#define CTRLR0_SCPH_BIT BIT(8) +#define CTRLR0_FRF_MASK GENMASK(7, 6) +#define CTRLR0_FRF_SPI 0UL +#define CTRLR0_FRF_SSP 1UL +#define CTRLR0_FRF_MICROWIRE 2UL +#define CTRLR0_DFS_MASK GENMASK(4, 0) + +/* CTRLR1- Control Register 1 */ +#define CTRLR1_NDF_MASK GENMASK(15, 0) + +/* SSIENR - SSI Enable Register */ +#define SSIENR_SSIC_EN_BIT BIT(0) + +/* TXFTLR - Transmit FIFO Threshold Level */ +#define TXFTLR_TXFTHR_MASK GENMASK(23, 16) +#define TXFTLR_TFT_MASK GENMASK(7, 0) + +/* RXFTLR - Receive FIFO Threshold Level */ +#define RXFTLR_RFT_MASK GENMASK(7, 0) + +/* TXFLR - Transmit FIFO Level Register */ +#define TXFLR_TXTFL_MASK GENMASK(7, 0) + +/* RXFLR - Receive FIFO Level Register */ +#define RXFLR_RXTFL_MASK GENMASK(7, 0) + +/* SR - Status Register */ +#define SR_BUSY_BIT BIT(0) + +/* IMR - Interrupt Mask Register */ +#define IMR_TXEIM_BIT BIT(0) +#define IMR_TXOIM_BIT BIT(1) +#define IMR_RXUIM_BIT BIT(2) +#define IMR_RXOIM_BIT BIT(3) +#define IMR_RXFIM_BIT BIT(4) +#define IMR_MSTIM_BIT BIT(5) + +/* ISR - Interrupt Status Register */ +#define ISR_TXEIS_BIT BIT(0) +#define ISR_TXOIS_BIT BIT(1) +#define ISR_RXUIS_BIT BIT(2) +#define ISR_RXOIS_BIT BIT(3) +#define ISR_RXFIS_BIT BIT(4) +#define ISR_MSTIS_BIT BIT(5) + +/* SPI_CTRLR0 - SPI Control Register */ +#define SPI_CTRLR0_CLK_STRETCH_EN_BIT BIT(30) +#define SPI_CTRLR0_XIP_PREFETCH_EN_BIT BIT(29) +#define SPI_CTRLR0_XIP_MBL_BIT BIT(26) +#define SPI_CTRLR0_SPI_RXDS_SIG_EN_BIT BIT(25) +#define SPI_CTRLR0_SPI_DM_EN_BIT BIT(24) +#define SPI_CTRLR0_RXDS_VL_EN_BIT BIT(23) +#define SPI_CTRLR0_SSIC_XIP_CONT_XFER_EN_BIT BIT(21) +#define SPI_CTRLR0_XIP_INST_EN_BIT BIT(20) +#define SPI_CTRLR0_XIP_DFS_HC_BIT BIT(19) +#define SPI_CTRLR0_SPI_RXDS_EN_BIT BIT(18) +#define SPI_CTRLR0_INST_DDR_EN_BIT BIT(17) +#define SPI_CTRLR0_SPI_DDR_EN_BIT BIT(16) +#define SPI_CTRLR0_WAIT_CYCLES_MASK GENMASK(15, 11) +#define SPI_CTRLR0_WAIT_CYCLES_MAX BIT_MASK(5) +#define SPI_CTRLR0_INST_L_MASK GENMASK(9, 8) +#define SPI_CTRLR0_INST_L0 0UL +#define SPI_CTRLR0_INST_L4 1UL +#define SPI_CTRLR0_INST_L8 2UL +#define SPI_CTRLR0_INST_L16 3UL +#define SPI_CTRLR0_XIP_MD_BIT_EN_BIT BIT(7) +#define SPI_CTRLR0_ADDR_L_MASK GENMASK(5, 2) +#define SPI_CTRLR0_TRANS_TYPE_MASK GENMASK(1, 0) +#define SPI_CTRLR0_TRANS_TYPE_TT0 0UL +#define SPI_CTRLR0_TRANS_TYPE_TT1 1UL +#define SPI_CTRLR0_TRANS_TYPE_TT2 2UL +#define SPI_CTRLR0_TRANS_TYPE_TT3 3UL + +/* XIP_CTRL - XIP Control Register */ +#define XIP_CTRL_XIP_PREFETCH_EN_BIT BIT(28) +#define XIP_CTRL_XIP_MBL_MASK GENMASK(27, 26) +#define XIP_CTRL_XIP_MBL_2 0UL +#define XIP_CTRL_XIP_MBL_4 1UL +#define XIP_CTRL_XIP_MBL_8 2UL +#define XIP_CTRL_XIP_MBL_16 3UL +#define XIP_CTRL_RXDS_SIG_EN_BIT BIT(25) +#define XIP_CTRL_XIP_HYBERBUS_EN_BIT BIT(24) +#define XIP_CTRL_CONT_XFER_EN_BIT BIT(23) +#define XIP_CTRL_INST_EN_BIT BIT(22) +#define XIP_CTRL_RXDS_EN_BIT BIT(21) +#define XIP_CTRL_INST_DDR_EN_BIT BIT(20) +#define XIP_CTRL_DDR_EN_BIT BIT(19) +#define XIP_CTRL_DFS_HC_BIT BIT(18) +#define XIP_CTRL_WAIT_CYCLES_MASK GENMASK(17, 13) +#define XIP_CTRL_WAIT_CYCLES_MAX BIT_MASK(5) +#define XIP_CTRL_MD_BITS_EN_BIT BIT(12) +#define XIP_CTRL_INST_L_MASK GENMASK(10, 9) +#define XIP_CTRL_INST_L0 0UL +#define XIP_CTRL_INST_L4 1UL +#define XIP_CTRL_INST_L8 2UL +#define XIP_CTRL_INST_L16 3UL +#define XIP_CTRL_ADDR_L_MASK GENMASK(7, 4) +#define XIP_CTRL_TRANS_TYPE_MASK GENMASK(3, 2) +#define XIP_CTRL_TRANS_TYPE_TT0 0UL +#define XIP_CTRL_TRANS_TYPE_TT1 1UL +#define XIP_CTRL_TRANS_TYPE_TT2 2UL +#define XIP_CTRL_FRF_MASK GENMASK(1, 0) +#define XIP_CTRL_FRF_DUAL 1UL +#define XIP_CTRL_FRF_QUAD 2UL +#define XIP_CTRL_FRF_OCTAL 3UL + +/* XIP_CTRL - XIP Control Register */ +#define XIP_CTRL_XIP_PREFETCH_EN_BIT BIT(28) +#define XIP_CTRL_XIP_MBL_MASK GENMASK(27, 26) +#define XIP_CTRL_XIP_MBL_2 0UL +#define XIP_CTRL_XIP_MBL_4 1UL +#define XIP_CTRL_XIP_MBL_8 2UL +#define XIP_CTRL_XIP_MBL_16 3UL +#define XIP_CTRL_XIP_HYBERBUS_EN_BIT BIT(24) +#define XIP_CTRL_CONT_XFER_EN_BIT BIT(23) +#define XIP_CTRL_INST_EN_BIT BIT(22) +#define XIP_CTRL_RXDS_EN_BIT BIT(21) +#define XIP_CTRL_INST_DDR_EN_BIT BIT(20) +#define XIP_CTRL_DDR_EN_BIT BIT(19) +#define XIP_CTRL_DFS_HC_BIT BIT(18) + +/* XIP_WRITE_CTRL - XIP Write Control Register */ +#define XIP_WRITE_CTRL_WAIT_CYCLES_MASK GENMASK(20, 16) +#define XIP_WRITE_CTRL_WAIT_CYCLES_MAX BIT_MASK(5) +#define XIP_WRITE_CTRL_RXDS_SIG_EN_BIT BIT(13) +#define XIP_WRITE_CTRL_HYBERBUS_EN_BIT BIT(12) +#define XIP_WRITE_CTRL_INST_DDR_EN_BIT BIT(11) +#define XIP_WRITE_CTRL_SPI_DDR_EN_BIT BIT(10) +#define XIP_WRITE_CTRL_INST_L_MASK GENMASK(9, 8) +#define XIP_WRITE_CTRL_INST_L0 0UL +#define XIP_WRITE_CTRL_INST_L4 1UL +#define XIP_WRITE_CTRL_INST_L8 2UL +#define XIP_WRITE_CTRL_INST_L16 3UL +#define XIP_WRITE_CTRL_ADDR_L_MASK GENMASK(7, 4) +#define XIP_WRITE_CTRL_TRANS_TYPE_MASK GENMASK(3, 2) +#define XIP_WRITE_CTRL_TRANS_TYPE_TT0 0UL +#define XIP_WRITE_CTRL_TRANS_TYPE_TT1 1UL +#define XIP_WRITE_CTRL_TRANS_TYPE_TT2 2UL +#define XIP_WRITE_CTRL_FRF_MASK GENMASK(1, 0) +#define XIP_WRITE_CTRL_FRF_DUAL 1UL +#define XIP_WRITE_CTRL_FRF_QUAD 2UL +#define XIP_WRITE_CTRL_FRF_OCTAL 3UL + +/* Register access helpers. */ +#define USES_AUX_REG(inst) + DT_INST_PROP(inst, aux_reg_enable) +#define AUX_REG_INSTANCES (0 DT_INST_FOREACH_STATUS_OKAY(USES_AUX_REG)) +#define BASE_ADDR(dev) (mm_reg_t)DEVICE_MMIO_GET(dev) + +#if AUX_REG_INSTANCES != 0 +static uint32_t aux_reg_read(const struct device *dev, uint32_t off) +{ + return sys_in32(BASE_ADDR(dev) + off/4); +} +static void aux_reg_write(uint32_t data, const struct device *dev, uint32_t off) +{ + sys_out32(data, BASE_ADDR(dev) + off/4); +} +#endif + +#if AUX_REG_INSTANCES != DT_NUM_INST_STATUS_OKAY(DT_DRV_COMPAT) +static uint32_t reg_read(const struct device *dev, uint32_t off) +{ + return sys_read32(BASE_ADDR(dev) + off); +} +static void reg_write(uint32_t data, const struct device *dev, uint32_t off) +{ + sys_write32(data, BASE_ADDR(dev) + off); +} +#endif + +#if AUX_REG_INSTANCES == 0 +/* If no instance uses aux-reg access. */ +#define DECLARE_REG_ACCESS() +#define DEFINE_REG_ACCESS(inst) +#define DEFINE_MM_REG_RD(reg, off) \ + static inline uint32_t read_##reg(const struct device *dev) \ + { return reg_read(dev, off); } +#define DEFINE_MM_REG_WR(reg, off) \ + static inline void write_##reg(const struct device *dev, uint32_t data) \ + { reg_write(data, dev, off); } + +#elif AUX_REG_INSTANCES == DT_NUM_INST_STATUS_OKAY(DT_DRV_COMPAT) +/* If all instances use aux-reg access. */ +#define DECLARE_REG_ACCESS() +#define DEFINE_REG_ACCESS(inst) +#define DEFINE_MM_REG_RD(reg, off) \ + static inline uint32_t read_##reg(const struct device *dev) \ + { return aux_reg_read(dev, off); } +#define DEFINE_MM_REG_WR(reg, off) \ + static inline void write_##reg(const struct device *dev, uint32_t data) \ + { aux_reg_write(data, dev, off); } + +#else +/* If register access varies by instance. */ +#define DECLARE_REG_ACCESS() \ + uint32_t (*read)(const struct device *dev, uint32_t off); \ + void (*write)(uint32_t data, const struct device *dev, uint32_t off) +#define DEFINE_REG_ACCESS(inst) \ + COND_CODE_1(DT_INST_PROP(inst, aux_reg_enable), \ + (.read = aux_reg_read, \ + .write = aux_reg_write,), \ + (.read = reg_read, \ + .write = reg_write,)) +#define DEFINE_MM_REG_RD(reg, off) \ + static inline uint32_t read_##reg(const struct device *dev) \ + { \ + const struct mspi_dw_config *dev_config = dev->config; \ + return dev_config->read(dev, off); \ + } +#define DEFINE_MM_REG_WR(reg, off) \ + static inline void write_##reg(const struct device *dev, uint32_t data) \ + { \ + const struct mspi_dw_config *dev_config = dev->config; \ + dev_config->write(data, dev, off); \ + } +#endif diff --git a/drivers/mspi/mspi_dw_vendor_specific.h b/drivers/mspi/mspi_dw_vendor_specific.h new file mode 100644 index 00000000000..7255f926fa8 --- /dev/null +++ b/drivers/mspi/mspi_dw_vendor_specific.h @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2024 Nordic Semiconductor ASA + * + * SPDX-License-Identifier: Apache-2.0 + */ + +/* + * This file is a part of mspi_dw.c extracted only for clarity. + * It is not supposed to be included by any file other than mspi_dw.c. + */ + +#if DT_HAS_COMPAT_STATUS_OKAY(nordic_nrf_exmif) + +#include + +static void vendor_specific_init(const struct device *dev) +{ + ARG_UNUSED(dev); + + NRF_EXMIF->EVENTS_CORE = 0; + NRF_EXMIF->INTENSET = BIT(EXMIF_INTENSET_CORE_Pos); +} + +static void vendor_specific_suspend(const struct device *dev) +{ + ARG_UNUSED(dev); + + NRF_EXMIF->TASKS_STOP = 1; +} + +static void vendor_specific_resume(const struct device *dev) +{ + ARG_UNUSED(dev); + + NRF_EXMIF->TASKS_START = 1; +} + +static void vendor_specific_irq_clear(const struct device *dev) +{ + ARG_UNUSED(dev); + + NRF_EXMIF->EVENTS_CORE = 0; +} + +#if defined(CONFIG_MSPI_XIP) +static int vendor_specific_xip_enable(const struct device *dev, + const struct mspi_dev_id *dev_id, + const struct mspi_xip_cfg *cfg) +{ + ARG_UNUSED(dev); + + if (dev_id->dev_idx == 0) { + NRF_EXMIF->EXTCONF1.OFFSET = cfg->address_offset; + NRF_EXMIF->EXTCONF1.SIZE = cfg->address_offset + + cfg->size - 1; + NRF_EXMIF->EXTCONF1.ENABLE = 1; + } else if (dev_id->dev_idx == 1) { + NRF_EXMIF->EXTCONF2.OFFSET = cfg->address_offset; + NRF_EXMIF->EXTCONF2.SIZE = cfg->address_offset + + cfg->size - 1; + NRF_EXMIF->EXTCONF2.ENABLE = 1; + } else { + return -EINVAL; + } + + return 0; +} + +static int vendor_specific_xip_disable(const struct device *dev, + const struct mspi_dev_id *dev_id, + const struct mspi_xip_cfg *cfg) +{ + ARG_UNUSED(dev); + + if (dev_id->dev_idx == 0) { + NRF_EXMIF->EXTCONF1.ENABLE = 0; + } else if (dev_id->dev_idx == 1) { + NRF_EXMIF->EXTCONF2.ENABLE = 0; + } else { + return -EINVAL; + } + + return 0; +} +#endif /* defined(CONFIG_MSPI_XIP) */ + +#endif /* DT_HAS_COMPAT_STATUS_OKAY(nordic_nrf_exmif) */ diff --git a/dts/bindings/mspi/snps,designware-ssi.yaml b/dts/bindings/mspi/snps,designware-ssi.yaml new file mode 100644 index 00000000000..fb516cb7835 --- /dev/null +++ b/dts/bindings/mspi/snps,designware-ssi.yaml @@ -0,0 +1,45 @@ +# Copyright (c) 2024 Nordic Semiconductor ASA +# SPDX-License-Identifier: Apache-2.0 + +description: Synopsys DesignWare Synchronous Serial Interface (SSI) node + +compatible: "snps,designware-ssi" + +include: [mspi-controller.yaml, pinctrl-device.yaml] + +properties: + reg: + required: true + + interrupts: + required: true + + aux-reg-enable: + type: boolean + description: | + Activates auxiliary register access that is needed on some platforms. + + fifo-depth: + required: true + type: int + description: | + Number of items that can be stored in the TX FIFO. Range: 8-256. + If the RX FIFO depth is not specified separately in the rx-fifo-depth + property, this value specifies depth of both TX and RX FIFOs. + + rx-fifo-depth: + type: int + description: | + Number of items that can be stored in the RX FIFO. Range: 8-256. + + tx-fifo-threshold: + type: int + description: | + Number of entries in the TX FIFO above which the TX transfer is started. + Maximum value is the TX FIFO depth - 1. + + rx-fifo-threshold: + type: int + description: | + Number of entries in the RX FIFO above which the controller gets an RX + interrupt. Maximum value is the RX FIFO depth - 1.