diff --git a/drivers/flash/flash_mspi_atxp032.c b/drivers/flash/flash_mspi_atxp032.c index b69c796e7ab8..b981cc163508 100644 --- a/drivers/flash/flash_mspi_atxp032.c +++ b/drivers/flash/flash_mspi_atxp032.c @@ -124,6 +124,7 @@ static int flash_mspi_atxp032_command_write(const struct device *flash, uint8_t data->trans.async = false; data->trans.xfer_mode = MSPI_PIO; data->trans.tx_dummy = tx_dummy; + data->trans.rx_dummy = data->dev_cfg.rx_dummy; data->trans.cmd_length = 1; data->trans.addr_length = addr_len; data->trans.hold_ce = false; @@ -155,6 +156,7 @@ static int flash_mspi_atxp032_command_read(const struct device *flash, uint8_t c data->trans.async = false; data->trans.xfer_mode = MSPI_PIO; + data->trans.tx_dummy = data->dev_cfg.tx_dummy; data->trans.rx_dummy = rx_dummy; data->trans.cmd_length = 1; data->trans.addr_length = addr_len; @@ -261,7 +263,7 @@ static int flash_mspi_atxp032_get_vendor_id(const struct device *flash, uint8_t ret = flash_mspi_atxp032_command_read(flash, SPI_NOR_CMD_RDID, 0, 0, 0, buffer, 11); *vendor_id = buffer[7]; - data->jedec_id = (buffer[7] << 16) | (buffer[8] << 8) | buffer[9]; + memcpy(&data->jedec_id, buffer + 7, 3); return ret; } @@ -326,10 +328,11 @@ static int flash_mspi_atxp032_page_program(const struct device *flash, off_t off data->trans.async = false; data->trans.xfer_mode = MSPI_DMA; data->trans.tx_dummy = data->dev_cfg.tx_dummy; + data->trans.rx_dummy = data->dev_cfg.rx_dummy; data->trans.cmd_length = data->dev_cfg.cmd_length; data->trans.addr_length = data->dev_cfg.addr_length; data->trans.hold_ce = false; - data->trans.priority = 1; + data->trans.priority = MSPI_XFER_PRIORITY_MEDIUM; data->trans.packets = &data->packet; data->trans.num_packet = 1; data->trans.timeout = CONFIG_MSPI_COMPLETION_TIMEOUT_TOLERANCE; @@ -375,6 +378,7 @@ static int flash_mspi_atxp032_busy_wait(const struct device *flash) return ret; } LOG_DBG("status: 0x%x", status); + k_sleep(K_MSEC(1)); } while (status & SPI_NOR_WIP_BIT); if (data->dev_cfg.io_mode != MSPI_IO_MODE_SINGLE) { @@ -407,11 +411,12 @@ static int flash_mspi_atxp032_read(const struct device *flash, off_t offset, voi data->trans.async = false; data->trans.xfer_mode = MSPI_DMA; + data->trans.tx_dummy = data->dev_cfg.tx_dummy; data->trans.rx_dummy = data->dev_cfg.rx_dummy; data->trans.cmd_length = data->dev_cfg.cmd_length; data->trans.addr_length = data->dev_cfg.addr_length; data->trans.hold_ce = false; - data->trans.priority = 1; + data->trans.priority = MSPI_XFER_PRIORITY_MEDIUM; data->trans.packets = &data->packet; data->trans.num_packet = 1; data->trans.timeout = CONFIG_MSPI_COMPLETION_TIMEOUT_TOLERANCE; @@ -681,6 +686,7 @@ static int flash_mspi_atxp032_init(const struct device *flash) } data->timing_cfg = cfg->tar_timing_cfg; +#if CONFIG_MSPI_XIP if (cfg->tar_xip_cfg.enable) { if (mspi_xip_config(cfg->bus, &cfg->dev_id, &cfg->tar_xip_cfg)) { LOG_ERR("Failed to enable XIP/%u", __LINE__); @@ -688,7 +694,9 @@ static int flash_mspi_atxp032_init(const struct device *flash) } data->xip_cfg = cfg->tar_xip_cfg; } +#endif /* CONFIG_MSPI_XIP */ +#if CONFIG_MSPI_SCRAMBLE if (cfg->tar_scramble_cfg.enable) { if (mspi_scramble_config(cfg->bus, &cfg->dev_id, &cfg->tar_scramble_cfg)) { LOG_ERR("Failed to enable scrambling/%u", __LINE__); @@ -696,6 +704,7 @@ static int flash_mspi_atxp032_init(const struct device *flash) } data->scramble_cfg = cfg->tar_scramble_cfg; } +#endif /* MSPI_SCRAMBLE */ release(flash); @@ -720,11 +729,12 @@ static int flash_mspi_atxp032_read_sfdp(const struct device *flash, off_t addr, data->trans.async = false; data->trans.xfer_mode = MSPI_DMA; + data->trans.tx_dummy = data->dev_cfg.tx_dummy; data->trans.rx_dummy = 8; data->trans.cmd_length = 1; data->trans.addr_length = 3; data->trans.hold_ce = false; - data->trans.priority = 1; + data->trans.priority = MSPI_XFER_PRIORITY_MEDIUM; data->trans.packets = &data->packet; data->trans.num_packet = 1; data->trans.timeout = CONFIG_MSPI_COMPLETION_TIMEOUT_TOLERANCE; @@ -745,7 +755,7 @@ static int flash_mspi_atxp032_read_jedec_id(const struct device *flash, uint8_t { struct flash_mspi_atxp032_data *data = flash->data; - id = &data->jedec_id; + memcpy(id, &data->jedec_id, 3); return 0; } #endif /* CONFIG_FLASH_JESD216_API */ @@ -808,23 +818,17 @@ static const struct flash_driver_api flash_mspi_atxp032_api = { .time_to_break = 0, \ } -#if CONFIG_SOC_FAMILY_AMBIQ #define MSPI_TIMING_CONFIG(n) \ - { \ - .ui8WriteLatency = DT_INST_PROP_BY_IDX(n, ambiq_timing_config, 0), \ - .ui8TurnAround = DT_INST_PROP_BY_IDX(n, ambiq_timing_config, 1), \ - .bTxNeg = DT_INST_PROP_BY_IDX(n, ambiq_timing_config, 2), \ - .bRxNeg = DT_INST_PROP_BY_IDX(n, ambiq_timing_config, 3), \ - .bRxCap = DT_INST_PROP_BY_IDX(n, ambiq_timing_config, 4), \ - .ui32TxDQSDelay = DT_INST_PROP_BY_IDX(n, ambiq_timing_config, 5), \ - .ui32RxDQSDelay = DT_INST_PROP_BY_IDX(n, ambiq_timing_config, 6), \ - .ui32RXDQSDelayEXT = DT_INST_PROP_BY_IDX(n, ambiq_timing_config, 7), \ - } -#define MSPI_TIMING_CONFIG_MASK(n) DT_INST_PROP(n, ambiq_timing_config_mask) -#else -#define MSPI_TIMING_CONFIG(n) -#define MSPI_TIMING_CONFIG_MASK(n) -#endif + COND_CODE_1(CONFIG_SOC_FAMILY_AMBIQ, \ + (MSPI_AMBIQ_TIMING_CONFIG(n)), ({})) \ + +#define MSPI_TIMING_CONFIG_MASK(n) \ + COND_CODE_1(CONFIG_SOC_FAMILY_AMBIQ, \ + (MSPI_AMBIQ_TIMING_CONFIG_MASK(n)), (MSPI_TIMING_PARAM_DUMMY)) \ + +#define MSPI_PORT(n) \ + COND_CODE_1(CONFIG_SOC_FAMILY_AMBIQ, \ + (MSPI_AMBIQ_PORT(n)), (0)) \ #define FLASH_MSPI_ATXP032(n) \ static const struct flash_mspi_atxp032_config flash_mspi_atxp032_config_##n = { \ diff --git a/drivers/flash/flash_mspi_emul_device.c b/drivers/flash/flash_mspi_emul_device.c index 1bfefd33dd2a..5117e838d3a2 100644 --- a/drivers/flash/flash_mspi_emul_device.c +++ b/drivers/flash/flash_mspi_emul_device.c @@ -215,7 +215,7 @@ static int flash_mspi_emul_write(const struct device *flash, off_t offset, data->xfer.cmd_length = data->dev_cfg.cmd_length; data->xfer.addr_length = data->dev_cfg.addr_length; data->xfer.hold_ce = false; - data->xfer.priority = 1; + data->xfer.priority = MSPI_XFER_PRIORITY_MEDIUM; data->xfer.packets = &data->packet; data->xfer.num_packet = 1; data->xfer.timeout = CONFIG_MSPI_COMPLETION_TIMEOUT_TOLERANCE; @@ -288,7 +288,7 @@ static int flash_mspi_emul_read(const struct device *flash, off_t offset, data->xfer.cmd_length = data->dev_cfg.cmd_length; data->xfer.addr_length = data->dev_cfg.addr_length; data->xfer.hold_ce = false; - data->xfer.priority = 1; + data->xfer.priority = MSPI_XFER_PRIORITY_MEDIUM; data->xfer.packets = &data->packet; data->xfer.num_packet = 1; data->xfer.timeout = CONFIG_MSPI_COMPLETION_TIMEOUT_TOLERANCE; diff --git a/drivers/memc/memc_mspi_aps6404l.c b/drivers/memc/memc_mspi_aps6404l.c index a3bf64cb4500..b3db096fa4f9 100644 --- a/drivers/memc/memc_mspi_aps6404l.c +++ b/drivers/memc/memc_mspi_aps6404l.c @@ -82,6 +82,7 @@ static int memc_mspi_aps6404l_command_write(const struct device *psram, uint8_t data->trans.async = false; data->trans.xfer_mode = MSPI_PIO; data->trans.tx_dummy = 0; + data->trans.rx_dummy = data->dev_cfg.rx_dummy; data->trans.cmd_length = 1; data->trans.addr_length = 0; data->trans.hold_ce = false; @@ -118,6 +119,7 @@ static int memc_mspi_aps6404l_command_read(const struct device *psram, uint8_t c data->trans.async = false; data->trans.xfer_mode = MSPI_PIO; + data->trans.tx_dummy = data->dev_cfg.tx_dummy; data->trans.rx_dummy = 0; data->trans.cmd_length = 1; data->trans.addr_length = 3; @@ -393,8 +395,8 @@ static int memc_mspi_aps6404l_init(const struct device *psram) .write_cmd = APS6404L_WRITE, \ .cmd_length = 1, \ .addr_length = 3, \ - .mem_boundary = 1024, \ - .time_to_break = 8, \ + .mem_boundary = 1024, \ + .time_to_break = 8, \ } #define MSPI_DEVICE_CONFIG_QUAD(n) \ @@ -413,28 +415,21 @@ static int memc_mspi_aps6404l_init(const struct device *psram) .write_cmd = APS6404L_QUAD_WRITE, \ .cmd_length = 1, \ .addr_length = 3, \ - .mem_boundary = 1024, \ - .time_to_break = 4, \ + .mem_boundary = 1024, \ + .time_to_break = 4, \ } -#if CONFIG_SOC_FAMILY_AMBIQ #define MSPI_TIMING_CONFIG(n) \ - { \ - .ui8WriteLatency = DT_INST_PROP_BY_IDX(n, ambiq_timing_config, 0), \ - .ui8TurnAround = DT_INST_PROP_BY_IDX(n, ambiq_timing_config, 1), \ - .bTxNeg = DT_INST_PROP_BY_IDX(n, ambiq_timing_config, 2), \ - .bRxNeg = DT_INST_PROP_BY_IDX(n, ambiq_timing_config, 3), \ - .bRxCap = DT_INST_PROP_BY_IDX(n, ambiq_timing_config, 4), \ - .ui32TxDQSDelay = DT_INST_PROP_BY_IDX(n, ambiq_timing_config, 5), \ - .ui32RxDQSDelay = DT_INST_PROP_BY_IDX(n, ambiq_timing_config, 6), \ - .ui32RXDQSDelayEXT = DT_INST_PROP_BY_IDX(n, ambiq_timing_config, 7), \ - } -#define MSPI_TIMING_CONFIG_MASK(n) DT_INST_PROP(n, ambiq_timing_config_mask) -#else -#define MSPI_TIMING_CONFIG(n) {} -#define MSPI_TIMING_CONFIG_MASK(n) MSPI_TIMING_PARAM_DUMMY -#define MSPI_PORT(n) 0 -#endif + COND_CODE_1(CONFIG_SOC_FAMILY_AMBIQ, \ + (MSPI_AMBIQ_TIMING_CONFIG(n)), ({})) \ + +#define MSPI_TIMING_CONFIG_MASK(n) \ + COND_CODE_1(CONFIG_SOC_FAMILY_AMBIQ, \ + (MSPI_AMBIQ_TIMING_CONFIG_MASK(n)), (MSPI_TIMING_PARAM_DUMMY)) \ + +#define MSPI_PORT(n) \ + COND_CODE_1(CONFIG_SOC_FAMILY_AMBIQ, \ + (MSPI_AMBIQ_PORT(n)), (0)) \ #define MEMC_MSPI_APS6404L(n) \ static const struct memc_mspi_aps6404l_config \ diff --git a/drivers/mspi/mspi_ambiq.h b/drivers/mspi/mspi_ambiq.h index b289f190f7da..d7025511f95c 100644 --- a/drivers/mspi/mspi_ambiq.h +++ b/drivers/mspi/mspi_ambiq.h @@ -25,6 +25,38 @@ }, \ } +#define MSPI_CQ_MAX_ENTRY MSPI0_CQCURIDX_CQCURIDX_Msk + +#define TIMING_CFG_GET_RX_DUMMY(cfg) \ + { \ + mspi_timing_cfg *timing = (mspi_timing_cfg *)cfg; \ + timing->ui8TurnAround; \ + } + +#define TIMING_CFG_SET_RX_DUMMY(cfg, num) \ + { \ + mspi_timing_cfg *timing = (mspi_timing_cfg *)cfg; \ + timing->ui8TurnAround = num; \ + } + +#define MSPI_AMBIQ_TIMING_CONFIG(n) \ + { \ + .ui8WriteLatency = DT_INST_PROP_BY_IDX(n, ambiq_timing_config, 0), \ + .ui8TurnAround = DT_INST_PROP_BY_IDX(n, ambiq_timing_config, 1), \ + .bTxNeg = DT_INST_PROP_BY_IDX(n, ambiq_timing_config, 2), \ + .bRxNeg = DT_INST_PROP_BY_IDX(n, ambiq_timing_config, 3), \ + .bRxCap = DT_INST_PROP_BY_IDX(n, ambiq_timing_config, 4), \ + .ui32TxDQSDelay = DT_INST_PROP_BY_IDX(n, ambiq_timing_config, 5), \ + .ui32RxDQSDelay = DT_INST_PROP_BY_IDX(n, ambiq_timing_config, 6), \ + .ui32RXDQSDelayEXT = DT_INST_PROP_BY_IDX(n, ambiq_timing_config, 7), \ + } + +#define MSPI_AMBIQ_TIMING_CONFIG_MASK(n) DT_INST_PROP(n, ambiq_timing_config_mask) + +#define MSPI_AMBIQ_PORT(n) \ + ((DT_REG_ADDR(DT_INST_BUS(n)) - MSPI0_BASE) / (DT_REG_SIZE(DT_INST_BUS(n)) * 4)) + + struct mspi_ambiq_timing_cfg { uint8_t ui8WriteLatency; uint8_t ui8TurnAround; @@ -47,19 +79,4 @@ enum mspi_ambiq_timing_param { MSPI_AMBIQ_SET_RXDQSDLYEXT = BIT(7), }; -#define MSPI_PORT(n) ((DT_REG_ADDR(DT_INST_BUS(n)) - MSPI0_BASE) / \ - (DT_REG_SIZE(DT_INST_BUS(n)) * 4)) - -#define TIMING_CFG_GET_RX_DUMMY(cfg) \ - { \ - mspi_timing_cfg *timing = (mspi_timing_cfg *)cfg; \ - timing->ui8TurnAround; \ - } - -#define TIMING_CFG_SET_RX_DUMMY(cfg, num) \ - { \ - mspi_timing_cfg *timing = (mspi_timing_cfg *)cfg; \ - timing->ui8TurnAround = num; \ - } - #endif diff --git a/drivers/mspi/mspi_ambiq_ap3.c b/drivers/mspi/mspi_ambiq_ap3.c index 04fbea5c679b..174e6a5762c1 100644 --- a/drivers/mspi/mspi_ambiq_ap3.c +++ b/drivers/mspi/mspi_ambiq_ap3.c @@ -35,13 +35,14 @@ struct mspi_context { struct mspi_xfer xfer; int packets_left; - int packets_done; + volatile int packets_done; mspi_callback_handler_t callback; struct mspi_callback_context *callback_ctx; - bool asynchronous; - struct k_sem lock; + struct k_sem lock; + struct k_sem sync; + volatile int sync_status; }; struct mspi_ambiq_config { @@ -53,6 +54,8 @@ struct mspi_ambiq_config { const struct pinctrl_dev_config *pcfg; irq_config_func_t irq_cfg_func; + am_hal_mspi_seq_mode_e seq_mode; + LOG_INSTANCE_PTR_DECLARE(log); }; @@ -222,7 +225,7 @@ static inline int mspi_context_lock(struct mspi_context *ctx, struct mspi_callback_context *callback_ctx, bool lockon) { - int ret = 1; + int ret = 0; if ((k_sem_count_get(&ctx->lock) == 0) && !lockon && (ctx->owner == req)) { @@ -232,27 +235,7 @@ static inline int mspi_context_lock(struct mspi_context *ctx, if (k_sem_take(&ctx->lock, K_MSEC(xfer->timeout))) { return -EBUSY; } - if (ctx->xfer.async) { - if ((xfer->tx_dummy == ctx->xfer.tx_dummy) && - (xfer->rx_dummy == ctx->xfer.rx_dummy) && - (xfer->cmd_length == ctx->xfer.cmd_length) && - (xfer->addr_length == ctx->xfer.addr_length)) { - ret = 0; - } else if (ctx->packets_left == 0) { - if (ctx->callback_ctx) { - volatile struct mspi_event_data *evt_data; - - evt_data = &ctx->callback_ctx->mspi_evt.evt_data; - while (evt_data->status != 0) { - } - ret = 1; - } else { - ret = 0; - } - } else { - return -EIO; - } - } + ctx->owner = req; ctx->xfer = *xfer; ctx->packets_done = 0; @@ -295,7 +278,9 @@ static inline int mspi_verify_device(const struct device *controller, static int mspi_ambiq_deinit(const struct device *controller) { +#if CONFIG_LOG const struct mspi_ambiq_config *cfg = controller->config; +#endif struct mspi_ambiq_data *data = controller->data; int ret = 0; @@ -355,35 +340,16 @@ static int mspi_ambiq_deinit(const struct device *controller) } /** DMA specific config */ -static int mspi_xfer_config(const struct device *controller, - const struct mspi_xfer *xfer) +static int mspi_xfer_config_update(const struct device *controller, + const struct mspi_xfer *xfer) { +#if CONFIG_LOG const struct mspi_ambiq_config *cfg = controller->config; +#endif struct mspi_ambiq_data *data = controller->data; am_hal_mspi_dev_config_t hal_dev_cfg = data->hal_dev_cfg; - am_hal_mspi_request_e eRequest; int ret = 0; - if (data->scramble_cfg.enable) { - eRequest = AM_HAL_MSPI_REQ_SCRAMB_EN; - } else { - eRequest = AM_HAL_MSPI_REQ_SCRAMB_DIS; - } - - ret = am_hal_mspi_disable(data->mspiHandle); - if (ret) { - LOG_INST_ERR(cfg->log, "%u, fail to disable MSPI, code:%d.", - __LINE__, ret); - return -EHOSTDOWN; - } - - ret = am_hal_mspi_control(data->mspiHandle, eRequest, NULL); - if (ret) { - LOG_INST_ERR(cfg->log, "%u,Unable to complete scramble config:%d.", - __LINE__, data->scramble_cfg.enable); - return -EHOSTDOWN; - } - if (xfer->cmd_length > AM_HAL_MSPI_INSTR_2_BYTE + 1) { LOG_INST_ERR(cfg->log, "%u, cmd_length is too large.", __LINE__); return -ENOTSUP; @@ -411,20 +377,6 @@ static int mspi_xfer_config(const struct device *controller, hal_dev_cfg.bEnWriteLatency = (xfer->tx_dummy != 0); hal_dev_cfg.ui8WriteLatency = (uint8_t)xfer->tx_dummy; - ret = am_hal_mspi_device_configure(data->mspiHandle, &hal_dev_cfg); - if (ret) { - LOG_INST_ERR(cfg->log, "%u, fail to configure MSPI, code:%d.", - __LINE__, ret); - return -EHOSTDOWN; - } - - ret = am_hal_mspi_enable(data->mspiHandle); - if (ret) { - LOG_INST_ERR(cfg->log, "%u, fail to enable MSPI, code:%d.", - __LINE__, ret); - return -EHOSTDOWN; - } - data->hal_dev_cfg = hal_dev_cfg; return ret; } @@ -563,7 +515,9 @@ static int mspi_ambiq_dev_config(const struct device *controller, MSPI_DEVICE_CONFIG_CE_NUM | MSPI_DEVICE_CONFIG_DATA_RATE | MSPI_DEVICE_CONFIG_CMD_LEN | - MSPI_DEVICE_CONFIG_ADDR_LEN)))) { + MSPI_DEVICE_CONFIG_ADDR_LEN | + MSPI_DEVICE_CONFIG_RX_DUMMY | + MSPI_DEVICE_CONFIG_TX_DUMMY)))) { LOG_INST_ERR(cfg->log, "%u, config type not supported.", __LINE__); ret = -ENOTSUP; goto e_return; @@ -590,6 +544,28 @@ static int mspi_ambiq_dev_config(const struct device *controller, if ((param_mask & MSPI_DEVICE_CONFIG_IO_MODE) || (param_mask & MSPI_DEVICE_CONFIG_CE_NUM) || (param_mask & MSPI_DEVICE_CONFIG_DATA_RATE)) { + enum mspi_io_mode io_mode; + enum mspi_data_rate data_rate; + uint8_t ce_num; + + if (param_mask & MSPI_DEVICE_CONFIG_IO_MODE) { + io_mode = dev_cfg->io_mode; + } else { + io_mode = data->dev_cfg.io_mode; + } + + if (param_mask & MSPI_DEVICE_CONFIG_DATA_RATE) { + data_rate = dev_cfg->data_rate; + } else { + data_rate = data->dev_cfg.data_rate; + } + + if (param_mask & MSPI_DEVICE_CONFIG_CE_NUM) { + ce_num = dev_cfg->ce_num; + } else { + ce_num = data->dev_cfg.ce_num; + } + hal_dev_cfg.eDeviceConfig = mspi_set_line(cfg, dev_cfg->io_mode, dev_cfg->data_rate, dev_cfg->ce_num); @@ -605,7 +581,7 @@ static int mspi_ambiq_dev_config(const struct device *controller, ret = -EHOSTDOWN; goto e_return; } - data->dev_cfg.freq = dev_cfg->io_mode; + data->dev_cfg.io_mode = dev_cfg->io_mode; data->dev_cfg.data_rate = dev_cfg->data_rate; data->dev_cfg.ce_num = dev_cfg->ce_num; } @@ -650,6 +626,48 @@ static int mspi_ambiq_dev_config(const struct device *controller, data->dev_cfg.addr_length = dev_cfg->addr_length; } + if (param_mask & MSPI_DEVICE_CONFIG_TX_DUMMY) { + am_hal_mspi_timing_scan_t timing; + + if (dev_cfg->tx_dummy) { + hal_dev_cfg.bEnWriteLatency = true; + } else { + hal_dev_cfg.bEnWriteLatency = false; + } + hal_dev_cfg.ui8WriteLatency = dev_cfg->tx_dummy; + timing.ui8WriteLatency = hal_dev_cfg.ui8WriteLatency; + timing.ui8Turnaround = hal_dev_cfg.ui8TurnAround; + ret = am_hal_mspi_control(data->mspiHandle, + AM_HAL_MSPI_REQ_TIMING_SET, &timing); + if (ret) { + LOG_INST_ERR(cfg->log, "%u, fail to configure timing.", + __LINE__); + return -EHOSTDOWN; + } + data->dev_cfg.tx_dummy = dev_cfg->tx_dummy; + } + + if (param_mask & MSPI_DEVICE_CONFIG_RX_DUMMY) { + am_hal_mspi_timing_scan_t timing; + + if (dev_cfg->rx_dummy) { + hal_dev_cfg.bTurnaround = true; + } else { + hal_dev_cfg.bTurnaround = false; + } + hal_dev_cfg.ui8TurnAround = dev_cfg->rx_dummy; + timing.ui8WriteLatency = hal_dev_cfg.ui8WriteLatency; + timing.ui8Turnaround = hal_dev_cfg.ui8TurnAround; + ret = am_hal_mspi_control(data->mspiHandle, + AM_HAL_MSPI_REQ_TIMING_SET, &timing); + if (ret) { + LOG_INST_ERR(cfg->log, "%u, fail to configure timing.", + __LINE__); + return -EHOSTDOWN; + } + data->dev_cfg.rx_dummy = dev_cfg->rx_dummy; + } + } else { if (data->dev_id != dev_id) { @@ -690,7 +708,8 @@ static int mspi_ambiq_dev_config(const struct device *controller, goto e_return; } - hal_dev_cfg.eDeviceConfig = mspi_set_line(cfg, dev_cfg->io_mode, dev_cfg->data_rate, + hal_dev_cfg.eDeviceConfig = mspi_set_line(cfg, dev_cfg->io_mode, + dev_cfg->data_rate, dev_cfg->ce_num); if (hal_dev_cfg.eDeviceConfig == AM_HAL_MSPI_FLASH_MAX) { ret = -ENOTSUP; @@ -736,22 +755,24 @@ static int mspi_ambiq_dev_config(const struct device *controller, ret = am_hal_mspi_disable(data->mspiHandle); if (ret) { - LOG_INST_ERR(cfg->log, "%u, fail to disable MSPI, code:%d.", __LINE__, ret); + LOG_INST_ERR(cfg->log, "%u, fail to disable MSPI, code:%d.", + __LINE__, ret); ret = -EHOSTDOWN; goto e_return; } ret = am_hal_mspi_device_configure(data->mspiHandle, &hal_dev_cfg); if (ret) { - LOG_INST_ERR(cfg->log, "%u, fail to configure MSPI, code:%d.", __LINE__, - ret); + LOG_INST_ERR(cfg->log, "%u, fail to configure MSPI, code:%d.", + __LINE__, ret); ret = -EHOSTDOWN; goto e_return; } ret = am_hal_mspi_enable(data->mspiHandle); if (ret) { - LOG_INST_ERR(cfg->log, "%u, fail to enable MSPI, code:%d.", __LINE__, ret); + LOG_INST_ERR(cfg->log, "%u, fail to enable MSPI, code:%d.", + __LINE__, ret); ret = -EHOSTDOWN; goto e_return; } @@ -771,7 +792,9 @@ static int mspi_ambiq_xip_config(const struct device *controller, const struct mspi_dev_id *dev_id, const struct mspi_xip_cfg *xip_cfg) { +#if CONFIG_LOG const struct mspi_ambiq_config *cfg = controller->config; +#endif struct mspi_ambiq_data *data = controller->data; am_hal_mspi_request_e eRequest; int ret = 0; @@ -802,10 +825,12 @@ static int mspi_ambiq_scramble_config(const struct device *controller, const struct mspi_dev_id *dev_id, const struct mspi_scramble_cfg *scramble_cfg) { +#if CONFIG_LOG const struct mspi_ambiq_config *cfg = controller->config; +#endif struct mspi_ambiq_data *data = controller->data; am_hal_mspi_dev_config_t hal_dev_cfg = data->hal_dev_cfg; - am_hal_mspi_request_e eRequest; + am_hal_mspi_scramble_config_t hal_scramble_cfg; int ret = 0; if (mspi_is_inp(controller)) { @@ -817,39 +842,23 @@ static int mspi_ambiq_scramble_config(const struct device *controller, return -ESTALE; } - if (scramble_cfg->enable) { - eRequest = AM_HAL_MSPI_REQ_SCRAMB_EN; - } else { - eRequest = AM_HAL_MSPI_REQ_SCRAMB_DIS; - } + hal_scramble_cfg.bEnable = scramble_cfg->enable; + hal_scramble_cfg.scramblingStartAddr = 0 + scramble_cfg->address_offset; + hal_scramble_cfg.scramblingEndAddr = hal_dev_cfg.scramblingStartAddr + + scramble_cfg->size; - ret = am_hal_mspi_disable(data->mspiHandle); + ret = am_hal_mspi_control(data->mspiHandle, + AM_HAL_MSPI_REQ_SCRAMB_CONFIG, + &hal_scramble_cfg); if (ret) { - LOG_INST_ERR(cfg->log, "%u, fail to disable MSPI, code:%d.", __LINE__, ret); - return -EHOSTDOWN; - } - - ret = am_hal_mspi_control(data->mspiHandle, eRequest, NULL); - if (ret) { - LOG_INST_ERR(cfg->log, "%u,Unable to complete scramble config:%d.", __LINE__, - scramble_cfg->enable); + LOG_INST_ERR(cfg->log, "%u, Unable to complete scramble config.", + __LINE__); return -EHOSTDOWN; } hal_dev_cfg.scramblingStartAddr = 0 + scramble_cfg->address_offset; - hal_dev_cfg.scramblingEndAddr = hal_dev_cfg.scramblingStartAddr + scramble_cfg->size; - - ret = am_hal_mspi_device_configure(data->mspiHandle, &hal_dev_cfg); - if (ret) { - LOG_INST_ERR(cfg->log, "%u, fail to configure MSPI, code:%d.", __LINE__, ret); - return -EHOSTDOWN; - } - - ret = am_hal_mspi_enable(data->mspiHandle); - if (ret) { - LOG_INST_ERR(cfg->log, "%u, fail to enable MSPI, code:%d.", __LINE__, ret); - return -EHOSTDOWN; - } + hal_dev_cfg.scramblingEndAddr = hal_dev_cfg.scramblingStartAddr + + scramble_cfg->size; data->scramble_cfg = *scramble_cfg; data->hal_dev_cfg = hal_dev_cfg; @@ -861,7 +870,9 @@ static int mspi_ambiq_timing_config(const struct device *controller, const uint32_t param_mask, void *timing_cfg) { +#if CONFIG_LOG const struct mspi_ambiq_config *cfg = controller->config; +#endif struct mspi_ambiq_data *data = controller->data; am_hal_mspi_dev_config_t hal_dev_cfg = data->hal_dev_cfg; struct mspi_ambiq_timing_cfg *time_cfg = timing_cfg; @@ -903,7 +914,8 @@ static int mspi_ambiq_timing_config(const struct device *controller, timing.ui8Turnaround = hal_dev_cfg.ui8TurnAround; timing.ui8WriteLatency = hal_dev_cfg.ui8WriteLatency; - ret = am_hal_mspi_control(data->mspiHandle, AM_HAL_MSPI_REQ_TIMING_SCAN, &timing); + ret = am_hal_mspi_control(data->mspiHandle, + AM_HAL_MSPI_REQ_TIMING_SET, &timing); if (ret) { LOG_INST_ERR(cfg->log, "%u, fail to configure timing.", __LINE__); return -EHOSTDOWN; @@ -919,12 +931,22 @@ static int mspi_ambiq_get_channel_status(const struct device *controller, uint8_ const struct mspi_ambiq_config *cfg = controller->config; struct mspi_ambiq_data *data = controller->data; + am_hal_mspi_status_t dma_stat; int ret = 0; if (sys_read32(cfg->reg_base) & MSPI_BUSY) { ret = -EBUSY; } + if (am_hal_mspi_status_get(data->mspiHandle, &dma_stat)) { + LOG_INST_ERR(cfg->log, "%u, fail to get mspi status.", __LINE__); + return -EHOSTDOWN; + } + + if (dma_stat.bTIP) { + return -EBUSY; + } + if (mspi_is_inp(controller)) { return -EBUSY; } @@ -938,26 +960,60 @@ static int mspi_ambiq_get_channel_status(const struct device *controller, uint8_ static void mspi_ambiq_isr(const struct device *dev) { struct mspi_ambiq_data *data = dev->data; - uint32_t ui32Status; + uint32_t status; - am_hal_mspi_interrupt_status_get(data->mspiHandle, &ui32Status, false); - am_hal_mspi_interrupt_clear(data->mspiHandle, ui32Status); - am_hal_mspi_interrupt_service(data->mspiHandle, ui32Status); + am_hal_mspi_interrupt_status_get(data->mspiHandle, &status, false); + am_hal_mspi_interrupt_clear(data->mspiHandle, status); + am_hal_mspi_interrupt_service(data->mspiHandle, status); } /** Manage sync dma transceive */ -static void hal_mspi_callback(void *pCallbackCtxt, uint32_t status) +static void hal_mspi_callback(void *callback_ctx, uint32_t status) { - const struct device *controller = pCallbackCtxt; + const struct device *controller = callback_ctx; struct mspi_ambiq_data *data = controller->data; + struct mspi_context *ctx = &data->ctx; - data->ctx.packets_done++; + if (!ctx->xfer.async) { + ctx->sync_status = status; + if (ctx->packets_done == ctx->xfer.num_packet - 1) { + k_sem_give(&ctx->sync); + } + ctx->packets_done++; + } +} + +/** Manage overall dma transceive */ +void am_hal_mspi_zephyr_callback(void *user_cb, void *user_cb_ctx, uint32_t status) +{ + if (user_cb == 0) { + return; + } + if (user_cb != hal_mspi_callback) { + mspi_callback_handler_t cb = user_cb; + struct mspi_callback_context *cb_ctx = user_cb_ctx; + struct mspi_event *evt = &cb_ctx->mspi_evt; + + if (evt->evt_data.packet->cb_mask == MSPI_BUS_XFER_COMPLETE_CB) { + evt->evt_type = MSPI_BUS_XFER_COMPLETE; + cb(cb_ctx); + } + evt->evt_data.status = status; + evt->evt_data.packet++; + evt->evt_data.packet_idx++; + } else { + am_hal_mspi_callback_t cb = user_cb; + + cb(user_cb_ctx, status); + } } static int mspi_pio_prepare(const struct device *controller, am_hal_mspi_pio_transfer_t *trans) { +#if CONFIG_LOG const struct mspi_ambiq_config *cfg = controller->config; +#endif struct mspi_ambiq_data *data = controller->data; const struct mspi_xfer *xfer = &data->ctx.xfer; int ret = 0; @@ -978,7 +1034,8 @@ static int mspi_pio_prepare(const struct device *controller, if (xfer->cmd_length != 0) { am_hal_mspi_instr_e eInstrCfg = xfer->cmd_length - 1; - ret = am_hal_mspi_control(data->mspiHandle, AM_HAL_MSPI_REQ_ISIZE_SET, &eInstrCfg); + ret = am_hal_mspi_control(data->mspiHandle, + AM_HAL_MSPI_REQ_ISIZE_SET, &eInstrCfg); if (ret) { LOG_INST_ERR(cfg->log, "%u, failed to configure cmd_length.", __LINE__); @@ -995,9 +1052,11 @@ static int mspi_pio_prepare(const struct device *controller, if (xfer->addr_length != 0) { am_hal_mspi_addr_e eAddrCfg = xfer->addr_length - 1; - ret = am_hal_mspi_control(data->mspiHandle, AM_HAL_MSPI_REQ_ASIZE_SET, &eAddrCfg); + ret = am_hal_mspi_control(data->mspiHandle, + AM_HAL_MSPI_REQ_ASIZE_SET, &eAddrCfg); if (ret) { - LOG_INST_ERR(cfg->log, "%u, failed to configure addr_length.", __LINE__); + LOG_INST_ERR(cfg->log, "%u, failed to configure addr_length.", + __LINE__); return -EHOSTDOWN; } data->hal_dev_cfg.eAddrCfg = eAddrCfg; @@ -1012,14 +1071,15 @@ static int mspi_pio_transceive(const struct device *controller, mspi_callback_handler_t cb, struct mspi_callback_context *cb_ctx) { +#if CONFIG_LOG const struct mspi_ambiq_config *cfg = controller->config; +#endif struct mspi_ambiq_data *data = controller->data; struct mspi_context *ctx = &data->ctx; const struct mspi_xfer_packet *packet; uint32_t packet_idx; am_hal_mspi_pio_transfer_t trans; int ret = 0; - int cfg_flag = 0; if (xfer->num_packet == 0 || !xfer->packets || @@ -1027,87 +1087,34 @@ static int mspi_pio_transceive(const struct device *controller, return -EFAULT; } - cfg_flag = mspi_context_lock(ctx, data->dev_id, xfer, cb, cb_ctx, true); - /** For async, user must make sure when cfg_flag = 0 the dummy and instr addr length - * in mspi_xfer of the two calls are the same if the first one has not finished yet. - */ - if (cfg_flag) { - if (cfg_flag == 1) { - ret = mspi_pio_prepare(controller, &trans); - if (ret) { - goto pio_err; - } - } else { - ret = cfg_flag; - goto pio_err; - } + if (xfer->async) { + LOG_INST_ERR(cfg->log, "%u, async PIO not supported.", __LINE__); + return -ENOTSUP; } - if (!ctx->xfer.async) { - - while (ctx->packets_left > 0) { - packet_idx = ctx->xfer.num_packet - ctx->packets_left; - packet = &ctx->xfer.packets[packet_idx]; - trans.eDirection = packet->dir; - trans.ui16DeviceInstr = (uint16_t)packet->cmd; - trans.ui32DeviceAddr = packet->address; - trans.ui32NumBytes = packet->num_bytes; - trans.pui32Buffer = (uint32_t *)packet->data_buf; - - ret = am_hal_mspi_blocking_transfer(data->mspiHandle, &trans, - MSPI_TIMEOUT_US); - ctx->packets_left--; - if (ret) { - ret = -EIO; - goto pio_err; - } - } + mspi_context_lock(ctx, data->dev_id, xfer, cb, cb_ctx, true); - } else { + ret = mspi_pio_prepare(controller, &trans); + if (ret) { + goto pio_err; + } - ret = am_hal_mspi_interrupt_enable(data->mspiHandle, AM_HAL_MSPI_INT_DMACMP); + while (ctx->packets_left > 0) { + packet_idx = ctx->xfer.num_packet - ctx->packets_left; + packet = &ctx->xfer.packets[packet_idx]; + trans.eDirection = packet->dir; + trans.ui16DeviceInstr = (uint16_t)packet->cmd; + trans.ui32DeviceAddr = packet->address; + trans.ui32NumBytes = packet->num_bytes; + trans.pui32Buffer = (uint32_t *)packet->data_buf; + + ret = am_hal_mspi_blocking_transfer(data->mspiHandle, &trans, + MSPI_TIMEOUT_US); + ctx->packets_left--; if (ret) { - LOG_INST_ERR(cfg->log, "%u, failed to enable interrupt.", __LINE__); - ret = -EHOSTDOWN; + ret = -EIO; goto pio_err; } - - while (ctx->packets_left > 0) { - packet_idx = ctx->xfer.num_packet - ctx->packets_left; - packet = &ctx->xfer.packets[packet_idx]; - trans.eDirection = packet->dir; - trans.ui16DeviceInstr = (uint16_t)packet->cmd; - trans.ui32DeviceAddr = packet->address; - trans.ui32NumBytes = packet->num_bytes; - trans.pui32Buffer = (uint32_t *)packet->data_buf; - - if (ctx->callback && packet->cb_mask == MSPI_BUS_XFER_COMPLETE_CB) { - ctx->callback_ctx->mspi_evt.evt_type = MSPI_BUS_XFER_COMPLETE; - ctx->callback_ctx->mspi_evt.evt_data.controller = controller; - ctx->callback_ctx->mspi_evt.evt_data.dev_id = data->ctx.owner; - ctx->callback_ctx->mspi_evt.evt_data.packet = packet; - ctx->callback_ctx->mspi_evt.evt_data.packet_idx = packet_idx; - ctx->callback_ctx->mspi_evt.evt_data.status = ~0; - } - - am_hal_mspi_callback_t callback = NULL; - - if (packet->cb_mask == MSPI_BUS_XFER_COMPLETE_CB) { - callback = (am_hal_mspi_callback_t)ctx->callback; - } - - ret = am_hal_mspi_nonblocking_transfer(data->mspiHandle, &trans, MSPI_PIO, - callback, (void *)ctx->callback_ctx); - ctx->packets_left--; - if (ret) { - if (ret == AM_HAL_STATUS_OUT_OF_RANGE) { - ret = -ENOMEM; - } else { - ret = -EIO; - } - goto pio_err; - } - } } pio_err: @@ -1123,9 +1130,9 @@ static int mspi_dma_transceive(const struct device *controller, const struct mspi_ambiq_config *cfg = controller->config; struct mspi_ambiq_data *data = controller->data; struct mspi_context *ctx = &data->ctx; - am_hal_mspi_dma_transfer_t trans; + am_hal_mspi_seq_device_cfg_t seq_dev_cfg; + am_hal_mspi_cq_scatter_xfer_t trans; int ret = 0; - int cfg_flag = 0; if (xfer->num_packet == 0 || !xfer->packets || @@ -1133,20 +1140,17 @@ static int mspi_dma_transceive(const struct device *controller, return -EFAULT; } - cfg_flag = mspi_context_lock(ctx, data->dev_id, xfer, cb, cb_ctx, true); - /** For async, user must make sure when cfg_flag = 0 the dummy and instr addr length - * in mspi_xfer of the two calls are the same if the first one has not finished yet. - */ - if (cfg_flag) { - if (cfg_flag == 1) { - ret = mspi_xfer_config(controller, xfer); - if (ret) { - goto dma_err; - } - } else { - ret = cfg_flag; - goto dma_err; - } + if (xfer->num_packet > MSPI_CQ_MAX_ENTRY) { + LOG_INST_ERR(cfg->log, "%u, Number of packets exceed %ld", + __LINE__, MSPI_CQ_MAX_ENTRY); + return -ENOTSUP; + } + + mspi_context_lock(ctx, data->dev_id, xfer, cb, cb_ctx, true); + + ret = mspi_xfer_config_update(controller, xfer); + if (ret) { + goto dma_err; } ret = am_hal_mspi_interrupt_enable(data->mspiHandle, AM_HAL_MSPI_INT_DMACMP); @@ -1156,57 +1160,92 @@ static int mspi_dma_transceive(const struct device *controller, goto dma_err; } + seq_dev_cfg.ui8InstrLen = ctx->xfer.cmd_length; + seq_dev_cfg.ui8AddrLen = ctx->xfer.addr_length; + seq_dev_cfg.ui8Turnaround = ctx->xfer.rx_dummy; + seq_dev_cfg.ui8WriteLatency = ctx->xfer.tx_dummy; + seq_dev_cfg.ui16TotalPackets = ctx->xfer.num_packet; + seq_dev_cfg.eSeqMode = ctx->xfer.num_packet == 1 ? AM_HAL_MSPI_SEQ_NORM_MODE : + cfg->seq_mode; + if (ctx->callback_ctx) { + struct mspi_event_data *evt_data = &ctx->callback_ctx->mspi_evt.evt_data; + + evt_data->controller = controller; + evt_data->dev_id = ctx->owner; + evt_data->packet = &ctx->xfer.packets[0]; + evt_data->packet_idx = 0; + } + + ret = am_hal_mspi_control(data->mspiHandle, AM_HAL_MSPI_REQ_SET_SEQMODE, &seq_dev_cfg); + if (ret) { + LOG_INST_ERR(cfg->log, "%u, failed to set sequence mode.", __LINE__); + ret = -EHOSTDOWN; + goto dma_err; + } + while (ctx->packets_left > 0) { uint32_t packet_idx = ctx->xfer.num_packet - ctx->packets_left; - const struct mspi_xfer_packet *packet; + const struct mspi_xfer_packet *packet = &ctx->xfer.packets[packet_idx]; - packet = &ctx->xfer.packets[packet_idx]; - trans.ui8Priority = ctx->xfer.priority; trans.eDirection = packet->dir; - trans.ui32TransferCount = packet->num_bytes; + trans.ui16DeviceInstr = packet->cmd; trans.ui32DeviceAddress = packet->address; trans.ui32SRAMAddress = (uint32_t)packet->data_buf; - trans.ui32PauseCondition = 0; - trans.ui32StatusSetClr = 0; + trans.ui32TransferCount = packet->num_bytes; + trans.ui8Priority = ctx->xfer.priority; + trans.ui16PacketIndex = packet_idx; if (ctx->xfer.async) { - - if (ctx->callback && packet->cb_mask == MSPI_BUS_XFER_COMPLETE_CB) { - ctx->callback_ctx->mspi_evt.evt_type = MSPI_BUS_XFER_COMPLETE; - ctx->callback_ctx->mspi_evt.evt_data.controller = controller; - ctx->callback_ctx->mspi_evt.evt_data.dev_id = data->ctx.owner; - ctx->callback_ctx->mspi_evt.evt_data.packet = packet; - ctx->callback_ctx->mspi_evt.evt_data.packet_idx = packet_idx; - ctx->callback_ctx->mspi_evt.evt_data.status = ~0; - } - - am_hal_mspi_callback_t callback = NULL; - - if (packet->cb_mask == MSPI_BUS_XFER_COMPLETE_CB) { - callback = (am_hal_mspi_callback_t)ctx->callback; - } - - ret = am_hal_mspi_nonblocking_transfer(data->mspiHandle, &trans, MSPI_DMA, - callback, (void *)ctx->callback_ctx); + ret = am_hal_mspi_cq_scatter_xfer(data->mspiHandle, &trans, + (am_hal_mspi_callback_t)ctx->callback, + (void *)ctx->callback_ctx); } else { - ret = am_hal_mspi_nonblocking_transfer(data->mspiHandle, &trans, MSPI_DMA, - hal_mspi_callback, - (void *)controller); + ret = am_hal_mspi_cq_scatter_xfer(data->mspiHandle, &trans, + hal_mspi_callback, (void *)controller); } - ctx->packets_left--; + if (ret) { if (ret == AM_HAL_STATUS_OUT_OF_RANGE) { + LOG_INST_ERR(cfg->log, "%u, failed to transfer, cq out of memory.", + __LINE__); ret = -ENOMEM; } else { + LOG_INST_ERR(cfg->log, "%u, failed to transfer, error %d", + __LINE__, ret); ret = -EIO; + am_hal_mspi_disable(data->mspiHandle); + am_hal_mspi_enable(data->mspiHandle); } goto dma_err; } + + ctx->packets_left--; } if (!ctx->xfer.async) { - while (ctx->packets_done < ctx->xfer.num_packet) { - k_busy_wait(10); + k_timeout_t timeout = K_MSEC(ctx->xfer.timeout); + + if (k_sem_take(&ctx->sync, timeout)) { + LOG_INST_ERR(cfg->log, "%u, failed to transfer, timeout.", __LINE__); + ret = -ETIMEDOUT; + } + + if (ret) { + /* No transfer should exceed this timeout */ + if (ctx->xfer.timeout == CONFIG_MSPI_COMPLETION_TIMEOUT_TOLERANCE || + ctx->packets_done != ctx->xfer.num_packet) { + LOG_INST_ERR(cfg->log, "%u, unknown error, attempting recovery.", + __LINE__); + ret = -EIO; + am_hal_mspi_disable(data->mspiHandle); + am_hal_mspi_enable(data->mspiHandle); + } + } + + if (ctx->sync_status) { + LOG_INST_ERR(cfg->log, "%u, failed to transfer, code:%d.", + __LINE__, ctx->sync_status); + ret = -EIO; } } @@ -1219,7 +1258,9 @@ static int mspi_ambiq_transceive(const struct device *controller, const struct mspi_dev_id *dev_id, const struct mspi_xfer *xfer) { +#if CONFIG_LOG const struct mspi_ambiq_config *cfg = controller->config; +#endif struct mspi_ambiq_data *data = controller->data; mspi_callback_handler_t cb = NULL; struct mspi_callback_context *cb_ctx = NULL; @@ -1249,7 +1290,9 @@ static int mspi_ambiq_register_callback(const struct device *controller, mspi_callback_handler_t cb, struct mspi_callback_context *ctx) { +#if CONFIG_LOG const struct mspi_ambiq_config *cfg = controller->config; +#endif struct mspi_ambiq_data *data = controller->data; if (mspi_is_inp(controller)) { @@ -1274,7 +1317,9 @@ static int mspi_ambiq_register_callback(const struct device *controller, #if CONFIG_PM_DEVICE static int mspi_ambiq_pm_action(const struct device *controller, enum pm_device_action action) { +#if CONFIG_LOG const struct mspi_ambiq_config *cfg = controller->config; +#endif struct mspi_ambiq_data *data = controller->data; int ret = 0; @@ -1286,8 +1331,8 @@ static int mspi_ambiq_pm_action(const struct device *controller, enum pm_device_ case PM_DEVICE_ACTION_TURN_ON: ret = am_hal_mspi_power_control(data->mspiHandle, AM_HAL_SYSCTRL_WAKE, true); if (ret) { - LOG_INST_ERR(cfg->log, "%u, fail to power on MSPI, code:%d.", __LINE__, - ret); + LOG_INST_ERR(cfg->log, "%u, fail to power on MSPI, code:%d.", + __LINE__, ret); return -EHOSTDOWN; } break; @@ -1295,8 +1340,8 @@ static int mspi_ambiq_pm_action(const struct device *controller, enum pm_device_ case PM_DEVICE_ACTION_TURN_OFF: ret = am_hal_mspi_power_control(data->mspiHandle, AM_HAL_SYSCTRL_DEEPSLEEP, true); if (ret) { - LOG_INST_ERR(cfg->log, "%u, fail to power off MSPI, code:%d.", __LINE__, - ret); + LOG_INST_ERR(cfg->log, "%u, fail to power off MSPI, code:%d.", + __LINE__, ret); return -EHOSTDOWN; } break; @@ -1416,6 +1461,7 @@ static struct mspi_driver_api mspi_ambiq_driver_api = { .cbs = {0}, \ .cb_ctxs = {0}, \ .ctx.lock = Z_SEM_INITIALIZER(mspi_ambiq_data##n.ctx.lock, 0, 1), \ + .ctx.sync = Z_SEM_INITIALIZER(mspi_ambiq_data##n.ctx.sync, 0, 1), \ .ctx.callback = 0, \ .ctx.callback_ctx = 0, \ }; \ @@ -1428,6 +1474,7 @@ static struct mspi_driver_api mspi_ambiq_driver_api = { .mspicfg.re_init = false, \ .pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), \ .irq_cfg_func = mspi_ambiq_irq_cfg_func_##n, \ + .seq_mode = AM_HAL_MSPI_SEQ_STREAM_MODE, \ LOG_INSTANCE_PTR_INIT(log, DT_DRV_INST(n), mspi##n) \ }; \ PM_DEVICE_DT_INST_DEFINE(n, mspi_ambiq_pm_action); \ diff --git a/include/zephyr/drivers/mspi.h b/include/zephyr/drivers/mspi.h index 3c8a535ab3ed..811c3360a8e8 100644 --- a/include/zephyr/drivers/mspi.h +++ b/include/zephyr/drivers/mspi.h @@ -129,7 +129,7 @@ enum mspi_bus_event { /** * @brief MSPI bus event callback mask - * This is a preliminary list same as mspi_bus_event. I encourage the + * This is a preliminary list same as mspi_bus_event. I encourage the * community to fill it up. */ enum mspi_bus_event_cb_mask { @@ -147,6 +147,16 @@ enum mspi_xfer_mode { MSPI_DMA, }; +/** + * @brief MSPI transfer priority + * This is a preliminary list of priorities that are typically used with DMA + */ +enum mspi_xfer_priority { + MSPI_XFER_PRIORITY_LOW, + MSPI_XFER_PRIORITY_MEDIUM, + MSPI_XFER_PRIORITY_HIGH, +}; + /** * @brief MSPI transfer directions */ @@ -401,10 +411,8 @@ struct mspi_xfer { bool hold_ce; /** @brief Software CE control */ struct mspi_ce_control ce_sw_ctrl; - /** @brief Priority 0 = Low (best effort) - * 1 = High (service immediately) - */ - uint8_t priority; + /** @brief MSPI transfer priority */ + enum mspi_xfer_priority priority; /** @brief Transfer packets */ const struct mspi_xfer_packet *packets; /** @brief Number of transfer packets */ diff --git a/samples/drivers/jesd216/boards/apollo3p_evb.conf b/samples/drivers/jesd216/boards/apollo3p_evb.conf new file mode 100644 index 000000000000..e3c0311ec6f4 --- /dev/null +++ b/samples/drivers/jesd216/boards/apollo3p_evb.conf @@ -0,0 +1,9 @@ +# Copyright (c) 2024 Ambiq Micro Inc. +# SPDX-License-Identifier: Apache-2.0 + +CONFIG_FLASH_MSPI_ATXP032=y +CONFIG_MSPI_INIT_PRIORITY=40 +CONFIG_FLASH_INIT_PRIORITY=50 +CONFIG_PM_DEVICE=y +CONFIG_SPI=n +CONFIG_SPI_NOR=n diff --git a/samples/drivers/jesd216/boards/apollo3p_evb.overlay b/samples/drivers/jesd216/boards/apollo3p_evb.overlay new file mode 100644 index 000000000000..cef3bf322ed6 --- /dev/null +++ b/samples/drivers/jesd216/boards/apollo3p_evb.overlay @@ -0,0 +1,107 @@ +/* + * Copyright (c) 2024 Ambiq Micro Inc. + * SPDX-License-Identifier: Apache-2.0 + */ + +/ { + aliases { + mspi0 = &mspi1; + }; +}; + +&gpio32_63 { + status = "okay"; +}; + +&mspi1 { + + compatible = "ambiq,mspi-controller"; + pinctrl-0 = <&mspi1_default>; + pinctrl-1 = <&mspi1_sleep>; + pinctrl-2 = <&mspi1_flash>; + pinctrl-names = "default","sleep","flash"; + status = "okay"; + + ce-gpios = <&gpio32_63 18 GPIO_ACTIVE_LOW>; + + cmdq-buffer-location = ".mspi_buff"; + cmdq-buffer-size = <256>; + + atxp032: atxp032@0 { + compatible = "ambiq,mspi-device", "mspi-atxp032"; + size = ; + reg = <0>; + status = "okay"; + mspi-max-frequency = <48000000>; + mspi-io-mode = "MSPI_IO_MODE_OCTAL"; + mspi-data-rate = "MSPI_DATA_RATE_SINGLE"; + mspi-hardware-ce-num = <0>; + read-command = <0x0B>; + write-command = <0x02>; + command-length = "INSTR_1_BYTE"; + address-length = "ADDR_4_BYTE"; + rx-dummy = <8>; + tx-dummy = <0>; + xip-config = <1 0 0 0>; + ce-break-config = <0 0>; + ambiq,timing-config-mask = <3>; + ambiq,timing-config = <0 8 0 0 0 0 0 0>; + }; + +}; + +&pinctrl { + + mspi1_sleep: mspi1_sleep{ + group1 { + pinmux = , + , + , + , + , + , + , + , + , + , + ; + }; + }; + + mspi1_flash: mspi1_flash{ + + group1 { + pinmux = , + , + , + , + , + , + , + ; + drive-strength = "0.75"; + ambiq,iom-mspi = <0>; + ambiq,iom-num = <1>; + }; + + group2 { + pinmux = ; + drive-strength = "0.75"; + ambiq,iom-mspi = <0>; + ambiq,iom-num = <2>; + }; + + group3 { + pinmux = ; + drive-strength = "1.0"; + ambiq,iom-mspi = <0>; + ambiq,iom-num = <1>; + }; + + group4 { + pinmux = ; + }; + + }; + +}; diff --git a/samples/drivers/jesd216/sample.yaml b/samples/drivers/jesd216/sample.yaml index 5dcdc6678dc4..301738a8e9ea 100644 --- a/samples/drivers/jesd216/sample.yaml +++ b/samples/drivers/jesd216/sample.yaml @@ -35,3 +35,12 @@ tests: or dt_compat_enabled("st,stm32-ospi-nor") or dt_compat_enabled("st,stm32-qspi-nor") depends_on: spi + sample.drivers.jesd216.atxp032: + tags: + - mspi + filter: dt_compat_enabled("mspi-atxp032") + platform_allow: + - apollo3p_evb + integration_platforms: + - apollo3p_evb + depends_on: mspi diff --git a/samples/drivers/jesd216/src/main.c b/samples/drivers/jesd216/src/main.c index 480649397b6b..bcf3c779ff83 100644 --- a/samples/drivers/jesd216/src/main.c +++ b/samples/drivers/jesd216/src/main.c @@ -26,6 +26,8 @@ #define FLASH_NODE DT_COMPAT_GET_ANY_STATUS_OKAY(nxp_s32_qspi_nor) #elif DT_HAS_COMPAT_STATUS_OKAY(nxp_imx_flexspi_nor) #define FLASH_NODE DT_COMPAT_GET_ANY_STATUS_OKAY(nxp_imx_flexspi_nor) +#elif DT_HAS_COMPAT_STATUS_OKAY(mspi_atxp032) +#define FLASH_NODE DT_COMPAT_GET_ANY_STATUS_OKAY(mspi_atxp032) #else #error Unsupported flash driver #define FLASH_NODE DT_INVALID_NODE diff --git a/samples/drivers/mspi/mspi_async/README.rst b/samples/drivers/mspi/mspi_async/README.rst index a3c52013cc53..b026b8e9c650 100644 --- a/samples/drivers/mspi/mspi_async/README.rst +++ b/samples/drivers/mspi/mspi_async/README.rst @@ -32,5 +32,8 @@ Sample Output .. code-block:: console *** Booting Zephyr OS build zephyr-v3.5.0-8581-gc80b243c7598 *** - w:3,r:3 + + Starting the mspi async example.. + Waiting for complete..., xfer1 completed:3, xfer2 completed:2 + xfer1 completed:4, xfer2 completed:4 Read data matches written data diff --git a/samples/drivers/mspi/mspi_async/src/main.c b/samples/drivers/mspi/mspi_async/src/main.c index 7c2f17a9e378..cc081203d164 100644 --- a/samples/drivers/mspi/mspi_async/src/main.c +++ b/samples/drivers/mspi/mspi_async/src/main.c @@ -14,13 +14,15 @@ #define MSPI_BUS DT_BUS(DT_ALIAS(dev0)) #define MSPI_TARGET DT_ALIAS(dev0) +/* This size is arbitrary and should be modified based on how fast the controller is */ +#define BUF_SIZE 16*1024 -#define BUF_SIZE 1024 - -#if CONFIG_MEMC_MSPI_APS6404L -#define DEVICE_MEM_WRITE_INSTR 0x38 -#define DEVICE_MEM_READ_INSTR 0xEB -#endif +#define DEVICE_MEM_WRITE_INSTR DT_PROP(DT_ALIAS(dev0), write_command) +#define DEVICE_MEM_READ_INSTR DT_PROP(DT_ALIAS(dev0), read_command) +#define DEVICE_MEM_TX_DUMMY DT_PROP(DT_ALIAS(dev0), tx_dummy) +#define DEVICE_MEM_RX_DUMMY DT_PROP(DT_ALIAS(dev0), rx_dummy) +#define DEVICE_MEM_CMD_LENGTH DT_ENUM_IDX(DT_ALIAS(dev0), command_length) +#define DEVICE_MEM_ADDR_LENGTH DT_ENUM_IDX(DT_ALIAS(dev0), address_length) uint8_t memc_write_buffer[BUF_SIZE]; uint8_t memc_read_buffer[BUF_SIZE]; @@ -30,82 +32,82 @@ struct user_context { uint32_t total_packets; }; -void async_cb(struct mspi_callback_context *mspi_cb_ctx, uint32_t status) +void async_cb(struct mspi_callback_context *mspi_cb_ctx) { - volatile struct user_context *usr_ctx = mspi_cb_ctx->ctx; + struct user_context *usr_ctx = mspi_cb_ctx->ctx; + struct mspi_event *evt = &mspi_cb_ctx->mspi_evt; - mspi_cb_ctx->mspi_evt.evt_data.status = status; - if (mspi_cb_ctx->mspi_evt.evt_data.packet_idx == usr_ctx->total_packets - 1) { + if (evt->evt_data.packet_idx == usr_ctx->total_packets - 1) { usr_ctx->status = 0; } } - +/* The packets doesn't have to have equal size or consecutive address or same transfer direction */ struct mspi_xfer_packet packet1[] = { { .dir = MSPI_TX, .cmd = DEVICE_MEM_WRITE_INSTR, .address = 0, - .num_bytes = 256, + .num_bytes = BUF_SIZE / 4, .data_buf = memc_write_buffer, .cb_mask = MSPI_BUS_NO_CB, }, { - .dir = MSPI_TX, - .cmd = DEVICE_MEM_WRITE_INSTR, - .address = 256, - .num_bytes = 256, - .data_buf = memc_write_buffer + 256, + .dir = MSPI_RX, + .cmd = DEVICE_MEM_READ_INSTR, + .address = 0, + .num_bytes = BUF_SIZE / 4, + .data_buf = memc_read_buffer, .cb_mask = MSPI_BUS_NO_CB, }, { .dir = MSPI_TX, .cmd = DEVICE_MEM_WRITE_INSTR, - .address = 512, - .num_bytes = 256, - .data_buf = memc_write_buffer + 512, + .address = BUF_SIZE / 4, + .num_bytes = BUF_SIZE / 4, + .data_buf = memc_write_buffer + BUF_SIZE / 4, .cb_mask = MSPI_BUS_NO_CB, }, { - .dir = MSPI_TX, - .cmd = DEVICE_MEM_WRITE_INSTR, - .address = 512 + 256, - .num_bytes = 256, - .data_buf = memc_write_buffer + 512 + 256, + .dir = MSPI_RX, + .cmd = DEVICE_MEM_READ_INSTR, + .address = BUF_SIZE / 4, + .num_bytes = BUF_SIZE / 4, + .data_buf = memc_read_buffer + BUF_SIZE / 4, .cb_mask = MSPI_BUS_XFER_COMPLETE_CB, }, }; struct mspi_xfer_packet packet2[] = { { - .dir = MSPI_RX, - .cmd = DEVICE_MEM_READ_INSTR, - .address = 0, - .num_bytes = 256, - .data_buf = memc_read_buffer, + .dir = MSPI_TX, + .cmd = DEVICE_MEM_WRITE_INSTR, + .address = BUF_SIZE / 2, + .num_bytes = BUF_SIZE / 4, + .data_buf = memc_write_buffer + BUF_SIZE / 2, .cb_mask = MSPI_BUS_NO_CB, }, { - .dir = MSPI_RX, - .cmd = DEVICE_MEM_READ_INSTR, - .address = 256, - .num_bytes = 256, - .data_buf = memc_read_buffer + 256, + .dir = MSPI_TX, + .cmd = DEVICE_MEM_WRITE_INSTR, + .address = BUF_SIZE / 2 + BUF_SIZE / 4, + .num_bytes = BUF_SIZE / 4, + .data_buf = memc_write_buffer + BUF_SIZE / 2 + BUF_SIZE / 4, .cb_mask = MSPI_BUS_NO_CB, }, { .dir = MSPI_RX, .cmd = DEVICE_MEM_READ_INSTR, - .address = 512, - .num_bytes = 256, - .data_buf = memc_read_buffer + 512, + .address = BUF_SIZE / 2 + BUF_SIZE / 4, + .num_bytes = BUF_SIZE / 4, + .data_buf = memc_read_buffer + BUF_SIZE / 2 + BUF_SIZE / 4, .cb_mask = MSPI_BUS_NO_CB, }, { .dir = MSPI_RX, .cmd = DEVICE_MEM_READ_INSTR, - .address = 512 + 256, - .num_bytes = 256, - .data_buf = memc_read_buffer + 512 + 256, + .address = BUF_SIZE / 2, + .num_bytes = BUF_SIZE / 4, + .data_buf = memc_read_buffer + BUF_SIZE / 2, .cb_mask = MSPI_BUS_XFER_COMPLETE_CB, }, }; @@ -113,66 +115,77 @@ struct mspi_xfer_packet packet2[] = { struct mspi_xfer xfer1 = { .async = true, .xfer_mode = MSPI_DMA, - .tx_dummy = 0, - .cmd_length = 1, - .addr_length = 3, - .priority = 1, + .tx_dummy = DEVICE_MEM_TX_DUMMY, + .rx_dummy = DEVICE_MEM_RX_DUMMY, + .cmd_length = DEVICE_MEM_CMD_LENGTH, + .addr_length = DEVICE_MEM_ADDR_LENGTH, + .priority = MSPI_XFER_PRIORITY_MEDIUM, .packets = (struct mspi_xfer_packet *)&packet1, .num_packet = sizeof(packet1) / sizeof(struct mspi_xfer_packet), + .timeout = CONFIG_MSPI_COMPLETION_TIMEOUT_TOLERANCE, }; struct mspi_xfer xfer2 = { .async = true, .xfer_mode = MSPI_DMA, - .rx_dummy = 6, - .cmd_length = 1, - .addr_length = 3, - .priority = 1, + .tx_dummy = DEVICE_MEM_TX_DUMMY, + .rx_dummy = DEVICE_MEM_RX_DUMMY, + .cmd_length = DEVICE_MEM_CMD_LENGTH, + .addr_length = DEVICE_MEM_ADDR_LENGTH, + .priority = MSPI_XFER_PRIORITY_MEDIUM, .packets = (struct mspi_xfer_packet *)&packet2, .num_packet = sizeof(packet2) / sizeof(struct mspi_xfer_packet), + .timeout = CONFIG_MSPI_COMPLETION_TIMEOUT_TOLERANCE, }; int main(void) { const struct device *controller = DEVICE_DT_GET(MSPI_BUS); struct mspi_dev_id dev_id = MSPI_DEVICE_ID_DT(MSPI_TARGET); - struct mspi_callback_context cb_ctx1, cb_ctx2; - volatile struct user_context write_ctx, read_ctx; + volatile struct mspi_callback_context cb_ctx1, cb_ctx2; + volatile struct user_context usr_ctx1, usr_ctx2; int i, j; int ret; + printk("\nStarting the mspi async example..\n"); + /* Initialize write buffer */ for (i = 0; i < BUF_SIZE; i++) { memc_write_buffer[i] = (uint8_t)i; } + memset((void *)&cb_ctx1.mspi_evt, 0, sizeof(struct mspi_event)); + memset((void *)&cb_ctx2.mspi_evt, 0, sizeof(struct mspi_event)); + ret = mspi_dev_config(controller, &dev_id, MSPI_DEVICE_CONFIG_NONE, NULL); if (ret) { printk("Failed to get controller access\n"); return 1; } - write_ctx.total_packets = xfer1.num_packet; - write_ctx.status = ~0; - cb_ctx1.ctx = (void *)&write_ctx; + usr_ctx1.total_packets = xfer1.num_packet; + usr_ctx1.status = ~0; + cb_ctx1.ctx = (void *)&usr_ctx1; ret = mspi_register_callback(controller, &dev_id, MSPI_BUS_XFER_COMPLETE, - (mspi_callback_handler_t)async_cb, &cb_ctx1); + (mspi_callback_handler_t)async_cb, + (struct mspi_callback_context *)&cb_ctx1); if (ret) { - printk("Failed to register callback\n"); + printk("Failed to register callback and context for xfer1\n"); return 1; } ret = mspi_transceive(controller, &dev_id, &xfer1); if (ret) { - printk("Failed to send transceive\n"); + printk("Failed to send transceive xfer1\n"); return 1; } - read_ctx.total_packets = xfer2.num_packet; - read_ctx.status = ~0; - cb_ctx2.ctx = (void *)&read_ctx; + usr_ctx2.total_packets = xfer2.num_packet; + usr_ctx2.status = ~0; + cb_ctx2.ctx = (void *)&usr_ctx2; ret = mspi_register_callback(controller, &dev_id, MSPI_BUS_XFER_COMPLETE, - (mspi_callback_handler_t)async_cb, &cb_ctx2); + (mspi_callback_handler_t)async_cb, + (struct mspi_callback_context *)&cb_ctx2); if (ret) { printk("Failed to register callback\n"); return 1; @@ -180,17 +193,21 @@ int main(void) ret = mspi_transceive(controller, &dev_id, &xfer2); if (ret) { - printk("Failed to send transceive\n"); + printk("Failed to register callback and context for xfer2\n"); return 1; } - while (write_ctx.status != 0 || read_ctx.status != 0) { - printk("Waiting for complete..., write completed:%d, read completed:%d\n", + while (usr_ctx1.status != 0 || usr_ctx2.status != 0) { + printk("Waiting for complete..., xfer1 completed:%d, xfer2 completed:%d\n", cb_ctx1.mspi_evt.evt_data.packet_idx, cb_ctx2.mspi_evt.evt_data.packet_idx); k_busy_wait(100000); } + printk("xfer1 completed:%d, xfer2 completed:%d\n", + cb_ctx1.mspi_evt.evt_data.packet_idx, + cb_ctx2.mspi_evt.evt_data.packet_idx); + for (j = 0; j < BUF_SIZE; j++) { if (memc_write_buffer[j] != memc_read_buffer[j]) { printk("Error: data differs at offset %d\n", j); diff --git a/west.yml b/west.yml index 1a1c8d28bdbe..dc57469552a6 100644 --- a/west.yml +++ b/west.yml @@ -147,7 +147,7 @@ manifest: groups: - hal - name: hal_ambiq - revision: 87a188b91aca22ce3ce7deb4a1cbf7780d784673 + revision: 1f1c8f72d686cbc626b9b82e75dc32d3148c2e2e path: modules/hal/ambiq groups: - hal