From c9213886d25257dae221b005ad09e8dc4673d760 Mon Sep 17 00:00:00 2001 From: Nikodem Kastelik Date: Thu, 31 Jul 2025 08:32:18 +0200 Subject: [PATCH 1/2] Revert "[nrf fromlist] drivers: spi: nrfx_spim: use dmm" This reverts commit 88454f25331307067f2267ceeac34e6d27537ddc. Signed-off-by: Nikodem Kastelik --- drivers/spi/spi_nrfx_spim.c | 75 +++++++++++++++++++------------------ 1 file changed, 38 insertions(+), 37 deletions(-) diff --git a/drivers/spi/spi_nrfx_spim.c b/drivers/spi/spi_nrfx_spim.c index 5d5d0b4ad95..438a1b22999 100644 --- a/drivers/spi/spi_nrfx_spim.c +++ b/drivers/spi/spi_nrfx_spim.c @@ -16,7 +16,6 @@ #ifdef CONFIG_SOC_NRF54H20_GPD #include #endif -#include #ifdef CONFIG_SOC_NRF52832_ALLOW_SPIM_DESPITE_PAN_58 #include #endif @@ -124,6 +123,9 @@ struct spi_nrfx_config { #endif uint32_t wake_pin; nrfx_gpiote_t wake_gpiote; +#ifdef CONFIG_DCACHE + uint32_t mem_attr; +#endif #ifdef USE_CLOCK_REQUESTS const struct device *clk_dev; struct nrf_clock_spec clk_spec; @@ -132,7 +134,6 @@ struct spi_nrfx_config { bool cross_domain; int8_t default_port; #endif - void *mem_reg; }; static void event_handler(const nrfx_spim_evt_t *p_event, void *p_context); @@ -503,6 +504,11 @@ static void transfer_next_chunk(const struct device *dev) } memcpy(dev_data->tx_buffer, tx_buf, chunk_len); +#ifdef CONFIG_DCACHE + if (dev_config->mem_attr & DT_MEM_CACHEABLE) { + sys_cache_data_flush_range(dev_data->tx_buffer, chunk_len); + } +#endif tx_buf = dev_data->tx_buffer; } @@ -519,20 +525,10 @@ static void transfer_next_chunk(const struct device *dev) dev_data->chunk_len = chunk_len; - xfer.tx_length = spi_context_tx_buf_on(ctx) ? chunk_len : 0; - xfer.rx_length = spi_context_rx_buf_on(ctx) ? chunk_len : 0; - - error = dmm_buffer_out_prepare(dev_config->mem_reg, tx_buf, xfer.tx_length, - (void **)&xfer.p_tx_buffer); - if (error != 0) { - goto out_alloc_failed; - } - - error = dmm_buffer_in_prepare(dev_config->mem_reg, rx_buf, xfer.rx_length, - (void **)&xfer.p_rx_buffer); - if (error != 0) { - goto in_alloc_failed; - } + xfer.p_tx_buffer = tx_buf; + xfer.tx_length = spi_context_tx_buf_on(ctx) ? chunk_len : 0; + xfer.p_rx_buffer = rx_buf; + xfer.rx_length = spi_context_rx_buf_on(ctx) ? chunk_len : 0; #ifdef CONFIG_SOC_NRF52832_ALLOW_SPIM_DESPITE_PAN_58 if (xfer.rx_length == 1 && xfer.tx_length <= 1) { @@ -555,13 +551,6 @@ static void transfer_next_chunk(const struct device *dev) anomaly_58_workaround_clear(dev_data); #endif } - - /* On nrfx_spim_xfer() error */ - dmm_buffer_in_release(dev_config->mem_reg, rx_buf, xfer.rx_length, - (void **)&xfer.p_rx_buffer); -in_alloc_failed: - dmm_buffer_out_release(dev_config->mem_reg, (void **)&xfer.p_tx_buffer); -out_alloc_failed: } finish_transaction(dev, error); @@ -571,7 +560,9 @@ static void event_handler(const nrfx_spim_evt_t *p_event, void *p_context) { const struct device *dev = p_context; struct spi_nrfx_data *dev_data = dev->data; +#ifdef CONFIG_DCACHE const struct spi_nrfx_config *dev_config = dev->config; +#endif if (p_event->type == NRFX_SPIM_EVENT_DONE) { /* Chunk length is set to 0 when a transaction is aborted @@ -585,21 +576,15 @@ static void event_handler(const nrfx_spim_evt_t *p_event, void *p_context) #ifdef CONFIG_SOC_NRF52832_ALLOW_SPIM_DESPITE_PAN_58 anomaly_58_workaround_clear(dev_data); #endif - - if (spi_context_tx_buf_on(&dev_data->ctx)) { - dmm_buffer_out_release(dev_config->mem_reg, - (void **)p_event->xfer_desc.p_tx_buffer); - } - - if (spi_context_rx_buf_on(&dev_data->ctx)) { - dmm_buffer_in_release(dev_config->mem_reg, dev_data->ctx.rx_buf, - dev_data->chunk_len, p_event->xfer_desc.p_rx_buffer); - } - #ifdef SPI_BUFFER_IN_RAM if (spi_context_rx_buf_on(&dev_data->ctx) && p_event->xfer_desc.p_rx_buffer != NULL && p_event->xfer_desc.p_rx_buffer != dev_data->ctx.rx_buf) { +#ifdef CONFIG_DCACHE + if (dev_config->mem_attr & DT_MEM_CACHEABLE) { + sys_cache_data_invd_range(dev_data->rx_buffer, dev_data->chunk_len); + } +#endif (void)memcpy(dev_data->ctx.rx_buf, dev_data->rx_buffer, dev_data->chunk_len); @@ -893,6 +878,8 @@ static int spi_nrfx_deinit(const struct device *dev) return 0; } +#define SPIM_MEM_REGION(idx) DT_PHANDLE(SPIM(idx), memory_regions) + #define SPI_NRFX_SPIM_EXTENDED_CONFIG(idx) \ IF_ENABLED(NRFX_SPIM_EXTENDED_ENABLED, \ (.dcx_pin = NRF_SPIM_PIN_NOT_CONNECTED, \ @@ -901,6 +888,13 @@ static int spi_nrfx_deinit(const struct device *dev) ()) \ )) +#define SPIM_GET_MEM_ATTR(idx) \ + COND_CODE_1(SPIM_HAS_PROP(idx, memory_regions), \ + (COND_CODE_1(DT_NODE_HAS_PROP(SPIM_MEM_REGION(idx), zephyr_memory_attr), \ + (DT_PROP(SPIM_MEM_REGION(idx), zephyr_memory_attr)), \ + (0))), \ + (0)) + /* Fast instances depend on the global HSFLL clock controller (as they need * to request the highest frequency from it to operate correctly), so they * must be initialized after that controller driver, hence the default SPI @@ -927,10 +921,10 @@ static int spi_nrfx_deinit(const struct device *dev) IF_ENABLED(SPI_BUFFER_IN_RAM, \ (static uint8_t spim_##idx##_tx_buffer \ [CONFIG_SPI_NRFX_RAM_BUFFER_SIZE] \ - DMM_MEMORY_SECTION(SPIM(idx)); \ + SPIM_MEMORY_SECTION(idx); \ static uint8_t spim_##idx##_rx_buffer \ [CONFIG_SPI_NRFX_RAM_BUFFER_SIZE] \ - DMM_MEMORY_SECTION(SPIM(idx));)) \ + SPIM_MEMORY_SECTION(idx);)) \ static struct spi_nrfx_data spi_##idx##_data = { \ IF_ENABLED(CONFIG_MULTITHREADING, \ (SPI_CONTEXT_INIT_LOCK(spi_##idx##_data, ctx),)) \ @@ -967,6 +961,8 @@ static int spi_nrfx_deinit(const struct device *dev) .wake_pin = NRF_DT_GPIOS_TO_PSEL_OR(SPIM(idx), wake_gpios, \ WAKE_PIN_NOT_USED), \ .wake_gpiote = WAKE_GPIOTE_INSTANCE(SPIM(idx)), \ + IF_ENABLED(CONFIG_DCACHE, \ + (.mem_attr = SPIM_GET_MEM_ATTR(idx),)) \ IF_ENABLED(USE_CLOCK_REQUESTS, \ (.clk_dev = SPIM_REQUESTS_CLOCK(SPIM(idx)) \ ? DEVICE_DT_GET(DT_CLOCKS_CTLR(SPIM(idx))) \ @@ -979,7 +975,6 @@ static int spi_nrfx_deinit(const struct device *dev) .default_port = \ DT_PROP_OR(DT_PHANDLE(SPIM(idx), \ default_gpio_port), port, -1),)) \ - .mem_reg = DMM_DEV_TO_REG(SPIM(idx)), \ }; \ BUILD_ASSERT(!SPIM_HAS_PROP(idx, wake_gpios) || \ !(DT_GPIO_FLAGS(SPIM(idx), wake_gpios) & GPIO_ACTIVE_LOW),\ @@ -994,6 +989,12 @@ static int spi_nrfx_deinit(const struct device *dev) POST_KERNEL, SPIM_INIT_PRIORITY(idx), \ &spi_nrfx_driver_api) +#define SPIM_MEMORY_SECTION(idx) \ + COND_CODE_1(SPIM_HAS_PROP(idx, memory_regions), \ + (__attribute__((__section__(LINKER_DT_NODE_REGION_NAME( \ + SPIM_MEM_REGION(idx)))))), \ + ()) + #define COND_NRF_SPIM_DEVICE(unused, prefix, i, _) \ IF_ENABLED(CONFIG_HAS_HW_NRF_SPIM##prefix##i, (SPI_NRFX_SPIM_DEFINE(prefix##i);)) From 9a43aff1fbe16fa9c22dd8a1d73f2d58253ad2f6 Mon Sep 17 00:00:00 2001 From: Nikodem Kastelik Date: Wed, 12 Jun 2024 12:33:35 +0200 Subject: [PATCH 2/2] [nrf fromlist] drivers: spi: nrfx_spim: use dmm Some nRF SoCs (i.e. nRF54H20) can peform DMA transfers only from specific memory regions - `dmm` facilitates that. Upstream PR #: 93487 Signed-off-by: Nikodem Kastelik --- drivers/spi/spi_nrfx_spim.c | 75 ++++++++++++++++++------------------- 1 file changed, 37 insertions(+), 38 deletions(-) diff --git a/drivers/spi/spi_nrfx_spim.c b/drivers/spi/spi_nrfx_spim.c index 438a1b22999..c8a0566b68a 100644 --- a/drivers/spi/spi_nrfx_spim.c +++ b/drivers/spi/spi_nrfx_spim.c @@ -16,6 +16,7 @@ #ifdef CONFIG_SOC_NRF54H20_GPD #include #endif +#include #ifdef CONFIG_SOC_NRF52832_ALLOW_SPIM_DESPITE_PAN_58 #include #endif @@ -123,9 +124,6 @@ struct spi_nrfx_config { #endif uint32_t wake_pin; nrfx_gpiote_t wake_gpiote; -#ifdef CONFIG_DCACHE - uint32_t mem_attr; -#endif #ifdef USE_CLOCK_REQUESTS const struct device *clk_dev; struct nrf_clock_spec clk_spec; @@ -134,6 +132,7 @@ struct spi_nrfx_config { bool cross_domain; int8_t default_port; #endif + void *mem_reg; }; static void event_handler(const nrfx_spim_evt_t *p_event, void *p_context); @@ -504,11 +503,6 @@ static void transfer_next_chunk(const struct device *dev) } memcpy(dev_data->tx_buffer, tx_buf, chunk_len); -#ifdef CONFIG_DCACHE - if (dev_config->mem_attr & DT_MEM_CACHEABLE) { - sys_cache_data_flush_range(dev_data->tx_buffer, chunk_len); - } -#endif tx_buf = dev_data->tx_buffer; } @@ -525,10 +519,20 @@ static void transfer_next_chunk(const struct device *dev) dev_data->chunk_len = chunk_len; - xfer.p_tx_buffer = tx_buf; - xfer.tx_length = spi_context_tx_buf_on(ctx) ? chunk_len : 0; - xfer.p_rx_buffer = rx_buf; - xfer.rx_length = spi_context_rx_buf_on(ctx) ? chunk_len : 0; + xfer.tx_length = spi_context_tx_buf_on(ctx) ? chunk_len : 0; + xfer.rx_length = spi_context_rx_buf_on(ctx) ? chunk_len : 0; + + error = dmm_buffer_out_prepare(dev_config->mem_reg, tx_buf, xfer.tx_length, + (void **)&xfer.p_tx_buffer); + if (error != 0) { + goto out_alloc_failed; + } + + error = dmm_buffer_in_prepare(dev_config->mem_reg, rx_buf, xfer.rx_length, + (void **)&xfer.p_rx_buffer); + if (error != 0) { + goto in_alloc_failed; + } #ifdef CONFIG_SOC_NRF52832_ALLOW_SPIM_DESPITE_PAN_58 if (xfer.rx_length == 1 && xfer.tx_length <= 1) { @@ -551,8 +555,15 @@ static void transfer_next_chunk(const struct device *dev) anomaly_58_workaround_clear(dev_data); #endif } + + /* On nrfx_spim_xfer() error */ + dmm_buffer_in_release(dev_config->mem_reg, rx_buf, xfer.rx_length, + (void **)&xfer.p_rx_buffer); +in_alloc_failed: + dmm_buffer_out_release(dev_config->mem_reg, (void **)&xfer.p_tx_buffer); } +out_alloc_failed: finish_transaction(dev, error); } @@ -560,9 +571,7 @@ static void event_handler(const nrfx_spim_evt_t *p_event, void *p_context) { const struct device *dev = p_context; struct spi_nrfx_data *dev_data = dev->data; -#ifdef CONFIG_DCACHE const struct spi_nrfx_config *dev_config = dev->config; -#endif if (p_event->type == NRFX_SPIM_EVENT_DONE) { /* Chunk length is set to 0 when a transaction is aborted @@ -576,15 +585,21 @@ static void event_handler(const nrfx_spim_evt_t *p_event, void *p_context) #ifdef CONFIG_SOC_NRF52832_ALLOW_SPIM_DESPITE_PAN_58 anomaly_58_workaround_clear(dev_data); #endif + + if (spi_context_tx_buf_on(&dev_data->ctx)) { + dmm_buffer_out_release(dev_config->mem_reg, + (void **)p_event->xfer_desc.p_tx_buffer); + } + + if (spi_context_rx_buf_on(&dev_data->ctx)) { + dmm_buffer_in_release(dev_config->mem_reg, dev_data->ctx.rx_buf, + dev_data->chunk_len, p_event->xfer_desc.p_rx_buffer); + } + #ifdef SPI_BUFFER_IN_RAM if (spi_context_rx_buf_on(&dev_data->ctx) && p_event->xfer_desc.p_rx_buffer != NULL && p_event->xfer_desc.p_rx_buffer != dev_data->ctx.rx_buf) { -#ifdef CONFIG_DCACHE - if (dev_config->mem_attr & DT_MEM_CACHEABLE) { - sys_cache_data_invd_range(dev_data->rx_buffer, dev_data->chunk_len); - } -#endif (void)memcpy(dev_data->ctx.rx_buf, dev_data->rx_buffer, dev_data->chunk_len); @@ -878,8 +893,6 @@ static int spi_nrfx_deinit(const struct device *dev) return 0; } -#define SPIM_MEM_REGION(idx) DT_PHANDLE(SPIM(idx), memory_regions) - #define SPI_NRFX_SPIM_EXTENDED_CONFIG(idx) \ IF_ENABLED(NRFX_SPIM_EXTENDED_ENABLED, \ (.dcx_pin = NRF_SPIM_PIN_NOT_CONNECTED, \ @@ -888,13 +901,6 @@ static int spi_nrfx_deinit(const struct device *dev) ()) \ )) -#define SPIM_GET_MEM_ATTR(idx) \ - COND_CODE_1(SPIM_HAS_PROP(idx, memory_regions), \ - (COND_CODE_1(DT_NODE_HAS_PROP(SPIM_MEM_REGION(idx), zephyr_memory_attr), \ - (DT_PROP(SPIM_MEM_REGION(idx), zephyr_memory_attr)), \ - (0))), \ - (0)) - /* Fast instances depend on the global HSFLL clock controller (as they need * to request the highest frequency from it to operate correctly), so they * must be initialized after that controller driver, hence the default SPI @@ -921,10 +927,10 @@ static int spi_nrfx_deinit(const struct device *dev) IF_ENABLED(SPI_BUFFER_IN_RAM, \ (static uint8_t spim_##idx##_tx_buffer \ [CONFIG_SPI_NRFX_RAM_BUFFER_SIZE] \ - SPIM_MEMORY_SECTION(idx); \ + DMM_MEMORY_SECTION(SPIM(idx)); \ static uint8_t spim_##idx##_rx_buffer \ [CONFIG_SPI_NRFX_RAM_BUFFER_SIZE] \ - SPIM_MEMORY_SECTION(idx);)) \ + DMM_MEMORY_SECTION(SPIM(idx));)) \ static struct spi_nrfx_data spi_##idx##_data = { \ IF_ENABLED(CONFIG_MULTITHREADING, \ (SPI_CONTEXT_INIT_LOCK(spi_##idx##_data, ctx),)) \ @@ -961,8 +967,6 @@ static int spi_nrfx_deinit(const struct device *dev) .wake_pin = NRF_DT_GPIOS_TO_PSEL_OR(SPIM(idx), wake_gpios, \ WAKE_PIN_NOT_USED), \ .wake_gpiote = WAKE_GPIOTE_INSTANCE(SPIM(idx)), \ - IF_ENABLED(CONFIG_DCACHE, \ - (.mem_attr = SPIM_GET_MEM_ATTR(idx),)) \ IF_ENABLED(USE_CLOCK_REQUESTS, \ (.clk_dev = SPIM_REQUESTS_CLOCK(SPIM(idx)) \ ? DEVICE_DT_GET(DT_CLOCKS_CTLR(SPIM(idx))) \ @@ -975,6 +979,7 @@ static int spi_nrfx_deinit(const struct device *dev) .default_port = \ DT_PROP_OR(DT_PHANDLE(SPIM(idx), \ default_gpio_port), port, -1),)) \ + .mem_reg = DMM_DEV_TO_REG(SPIM(idx)), \ }; \ BUILD_ASSERT(!SPIM_HAS_PROP(idx, wake_gpios) || \ !(DT_GPIO_FLAGS(SPIM(idx), wake_gpios) & GPIO_ACTIVE_LOW),\ @@ -989,12 +994,6 @@ static int spi_nrfx_deinit(const struct device *dev) POST_KERNEL, SPIM_INIT_PRIORITY(idx), \ &spi_nrfx_driver_api) -#define SPIM_MEMORY_SECTION(idx) \ - COND_CODE_1(SPIM_HAS_PROP(idx, memory_regions), \ - (__attribute__((__section__(LINKER_DT_NODE_REGION_NAME( \ - SPIM_MEM_REGION(idx)))))), \ - ()) - #define COND_NRF_SPIM_DEVICE(unused, prefix, i, _) \ IF_ENABLED(CONFIG_HAS_HW_NRF_SPIM##prefix##i, (SPI_NRFX_SPIM_DEFINE(prefix##i);))