|
17 | 17 | #include <zephyr/pm/policy.h> |
18 | 18 | #include <zephyr/logging/log.h> |
19 | 19 | #include <zephyr/irq.h> |
| 20 | +#include <zephyr/cache.h> |
20 | 21 | #include <soc.h> |
21 | 22 | #include <stm32_ll_rcc.h> |
22 | 23 |
|
@@ -518,6 +519,12 @@ static int stm32_sdmmc_read_blocks(HandleTypeDef *hsd, uint8_t *data_buf, |
518 | 519 |
|
519 | 520 | #if STM32_SDMMC_USE_DMA || IS_ENABLED(DT_PROP(DT_DRV_INST(0), idma)) |
520 | 521 |
|
| 522 | + /* A flush is performed before the DMA operation, to prevent accidental data |
| 523 | + * loss when the buffer is not properly aligned to the cache-line (e.g: |
| 524 | + * 32-bytes for STM32H7). |
| 525 | + */ |
| 526 | + sys_cache_data_flush_and_invd_range(data_buf, BLOCKSIZE * num_sector); |
| 527 | + |
521 | 528 | #ifdef CONFIG_SDMMC_STM32_EMMC |
522 | 529 | hal_ret = HAL_MMC_ReadBlocks_DMA(hsd, data_buf, start_sector, num_sector); |
523 | 530 | #else |
@@ -568,6 +575,13 @@ static int stm32_sdmmc_access_read(struct disk_info *disk, uint8_t *data_buf, |
568 | 575 |
|
569 | 576 | k_sem_take(&priv->sync, K_FOREVER); |
570 | 577 |
|
| 578 | +#if STM32_SDMMC_USE_DMA || IS_ENABLED(DT_PROP(DT_DRV_INST(0), idma)) |
| 579 | + /* Invalidate after the operation is complete, to protect against |
| 580 | + * speculative / spurious reads. |
| 581 | + */ |
| 582 | + sys_cache_data_invd_range(data_buf, BLOCKSIZE * num_sector); |
| 583 | +#endif |
| 584 | + |
571 | 585 | #if STM32_SDMMC_USE_DMA_SHARED |
572 | 586 | if (HAL_DMA_DeInit(&priv->dma_txrx_handle) != HAL_OK) { |
573 | 587 | err = -EIO; |
@@ -598,6 +612,8 @@ static int stm32_sdmmc_write_blocks(HandleTypeDef *hsd, |
598 | 612 |
|
599 | 613 | #if STM32_SDMMC_USE_DMA || IS_ENABLED(DT_PROP(DT_DRV_INST(0), idma)) |
600 | 614 |
|
| 615 | + sys_cache_data_flush_range(data_buf, BLOCKSIZE * num_sector); |
| 616 | + |
601 | 617 | #ifdef CONFIG_SDMMC_STM32_EMMC |
602 | 618 | hal_ret = HAL_MMC_WriteBlocks_DMA(hsd, data_buf, start_sector, num_sector); |
603 | 619 | #else |
|
0 commit comments