|
33 | 33 | #include "systick.h" |
34 | 34 | #include "dma.h" |
35 | 35 | #include "irq.h" |
| 36 | +#include "mpu.h" |
36 | 37 |
|
37 | 38 | // When this option is enabled, the DMA will turn off automatically after |
38 | 39 | // a period of inactivity. |
@@ -1852,3 +1853,55 @@ void dma_external_acquire(uint32_t controller, uint32_t stream) { |
1852 | 1853 | void dma_external_release(uint32_t controller, uint32_t stream) { |
1853 | 1854 | dma_disable_clock(DMA_ID_FROM_CONTROLLER_STREAM(controller, stream)); |
1854 | 1855 | } |
| 1856 | + |
| 1857 | +#if __DCACHE_PRESENT |
| 1858 | + |
| 1859 | +void dma_protect_rx_region(void *dest, size_t len) { |
| 1860 | + #if __DCACHE_PRESENT |
| 1861 | + uint32_t start_addr = (uint32_t)dest; |
| 1862 | + uint32_t start_aligned = start_addr & ~(__SCB_DCACHE_LINE_SIZE - 1U); |
| 1863 | + uint32_t end_addr = start_addr + len - 1; // Address of last byte in the buffer |
| 1864 | + uint32_t end_aligned = end_addr & ~(__SCB_DCACHE_LINE_SIZE - 1U); |
| 1865 | + |
| 1866 | + uint32_t irq_state = mpu_config_start(); |
| 1867 | + |
| 1868 | + // Clean (write back) any cached memory in this region, so there's no dirty |
| 1869 | + // cache entries that might be written back later after DMA RX is done. |
| 1870 | + MP_HAL_CLEAN_DCACHE(dest, len); |
| 1871 | + |
| 1872 | + // The way we protect the whole region is to mark the first and last cache |
| 1873 | + // line as UNCACHED using the MPU. This means any unrelated reads/writes in |
| 1874 | + // these cache lines will bypass the cache, and can coexist with DMA also |
| 1875 | + // writing to parts of these cache lines. |
| 1876 | + // |
| 1877 | + // This is redundant sometimes (because the DMA region fills the entire cache line, or because |
| 1878 | + // the region fits in a single cache line.) However, the implementation is only 3 register writes so |
| 1879 | + // it's more efficient to call it every time. |
| 1880 | + mpu_config_region(MPU_REGION_DMA_UNCACHED_1, start_aligned, MPU_CONFIG_UNCACHED(MPU_REGION_SIZE_32B)); |
| 1881 | + mpu_config_region(MPU_REGION_DMA_UNCACHED_2, end_aligned, MPU_CONFIG_UNCACHED(MPU_REGION_SIZE_32B)); |
| 1882 | + |
| 1883 | + mpu_config_end(irq_state); |
| 1884 | + #endif |
| 1885 | +} |
| 1886 | + |
| 1887 | +void dma_unprotect_rx_region(void *dest, size_t len) { |
| 1888 | + #if __DCACHE_PRESENT |
| 1889 | + uint32_t irq_state = mpu_config_start(); |
| 1890 | + |
| 1891 | + // Disabling these regions removes them from the MPU |
| 1892 | + mpu_config_region(MPU_REGION_DMA_UNCACHED_1, 0, MPU_CONFIG_DISABLE); |
| 1893 | + mpu_config_region(MPU_REGION_DMA_UNCACHED_2, 0, MPU_CONFIG_DISABLE); |
| 1894 | + |
| 1895 | + // Invalidate the whole region in the cache. This may seem redundant, but it |
| 1896 | + // is possible that during the DMA operation the CPU read inside this region |
| 1897 | + // (excluding the first & last cache lines), and cache lines were filled. |
| 1898 | + // |
| 1899 | + // (This can happen in SPI if src==dest, for example, possibly due to speculative |
| 1900 | + // cache line fills.) |
| 1901 | + MP_HAL_CLEANINVALIDATE_DCACHE(dest, len); |
| 1902 | + |
| 1903 | + mpu_config_end(irq_state); |
| 1904 | + #endif |
| 1905 | +} |
| 1906 | + |
| 1907 | +#endif |
0 commit comments