From 06db791093f0d1ed146772ee9df067b28e1a0945 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?D=C3=A1niel=20Buga?= Date: Fri, 15 Nov 2024 10:39:25 +0100 Subject: [PATCH] Implement burst configuration --- esp-hal/src/dma/buffers.rs | 558 ++++++++++++------ esp-hal/src/dma/gdma.rs | 10 + esp-hal/src/dma/mod.rs | 158 +++-- esp-hal/src/dma/pdma.rs | 48 ++ esp-hal/src/soc/esp32s2/mod.rs | 30 + esp-metadata/devices/esp32s2.toml | 3 + esp-metadata/devices/esp32s3.toml | 4 + examples/src/bin/spi_loopback_dma_psram.rs | 4 +- hil-test/tests/spi_half_duplex_write_psram.rs | 28 +- 9 files changed, 580 insertions(+), 263 deletions(-) diff --git a/esp-hal/src/dma/buffers.rs b/esp-hal/src/dma/buffers.rs index 3da7c1e2451..3ba7233f2a6 100644 --- a/esp-hal/src/dma/buffers.rs +++ b/esp-hal/src/dma/buffers.rs @@ -1,25 +1,252 @@ use core::ptr::null_mut; use super::*; -use crate::soc::is_slice_in_dram; -#[cfg(esp32s3)] -use crate::soc::is_slice_in_psram; +#[cfg(psram_dma)] +use crate::soc::is_valid_psram_address; +use crate::soc::{is_slice_in_dram, is_slice_in_psram}; + +cfg_if::cfg_if! { + if #[cfg(psram_dma)] { + /// PSRAM access burst size. + #[derive(Clone, Copy, PartialEq, Eq, Debug)] + #[cfg_attr(feature = "defmt", derive(defmt::Format))] + pub enum ExternalBurstSize { + /// 16 bytes + Size16 = 16, + + /// 32 bytes + Size32 = 32, + + /// 64 bytes + Size64 = 64, + } -/// Burst transfer configuration. -#[derive(Clone, Copy, PartialEq, Eq, Debug)] -#[cfg_attr(feature = "defmt", derive(defmt::Format))] -pub enum BurstTransfer { - /// Burst mode is disabled. - Disabled, + impl ExternalBurstSize { + /// The default external memory burst length. + pub const DEFAULT: Self = Self::Size16; + } + + impl Default for ExternalBurstSize { + fn default() -> Self { + Self::DEFAULT + } + } + + /// Internal memory access burst mode. + #[derive(Clone, Copy, PartialEq, Eq, Debug)] + #[cfg_attr(feature = "defmt", derive(defmt::Format))] + pub enum InternalBurstTransfer { + /// Burst mode is disabled. + Disabled, + + /// Burst mode is enabled. + Enabled, + } + + impl InternalBurstTransfer { + /// The default internal burst mode configuration. + pub const DEFAULT: Self = Self::Disabled; + } + + impl Default for InternalBurstTransfer { + fn default() -> Self { + Self::DEFAULT + } + } + + /// Burst transfer configuration. + #[derive(Clone, Copy, PartialEq, Eq, Debug)] + #[cfg_attr(feature = "defmt", derive(defmt::Format))] + pub struct BurstTransfer { + /// Configures the burst size for PSRAM transfers. + /// + /// Burst mode is always enabled for PSRAM transfers. + pub external: ExternalBurstSize, + + /// Enables or disables the burst mode for internal memory transfers. + /// + /// The burst size is not configurable. + pub internal: InternalBurstTransfer, + } + + impl BurstTransfer { + /// The default burst mode configuration. + pub const DEFAULT: Self = Self { + external: ExternalBurstSize::DEFAULT, + internal: InternalBurstTransfer::DEFAULT, + }; + } + + impl Default for BurstTransfer { + fn default() -> Self { + Self::DEFAULT + } + } + } else { + /// Burst transfer configuration. + #[derive(Clone, Copy, PartialEq, Eq, Debug)] + #[cfg_attr(feature = "defmt", derive(defmt::Format))] + pub enum BurstTransfer { + /// Burst mode is disabled. + Disabled, + + /// Burst mode is enabled. + Enabled, + } - /// Burst mode is enabled. - Enabled, + impl BurstTransfer { + /// The default burst mode configuration. + pub const DEFAULT: Self = Self::Disabled; + } + + impl Default for BurstTransfer { + fn default() -> Self { + Self::DEFAULT + } + } + + type InternalBurstTransfer = BurstTransfer; + } } -impl BurstTransfer { +#[cfg(psram_dma)] +impl ExternalBurstSize { + const fn min_psram_alignment(self, direction: TransferDirection) -> usize { + // S2: Specifically, size and buffer address pointer in receive descriptors + // should be 16-byte, 32-byte or 64-byte aligned. For data frame whose + // length is not a multiple of 16 bytes, 32 bytes, or 64 bytes, EDMA adds + // padding bytes to the end. + + // S3: Size and Address for IN transfers must be block aligned. For receive + // descriptors, if the data length received are not aligned with block size, + // GDMA will pad the data received with 0 until they are aligned to + // initiate burst transfer. You can read the length field in receive descriptors + // to obtain the length of valid data received + if matches!(direction, TransferDirection::In) { + self as usize + } else { + // S2: Size, length and buffer address pointer in transmit descriptors are not + // necessarily aligned with block size. + + // S3: Size, length, and buffer address pointer in transmit descriptors do not + // need to be aligned. + 1 + } + } +} + +impl InternalBurstTransfer { pub(super) fn is_burst_enabled(self) -> bool { !matches!(self, Self::Disabled) } + + const fn min_dram_alignment(self, direction: TransferDirection) -> usize { + // IN transfers must be word aligned + if matches!(direction, TransferDirection::In) { + 4 + } else { + // OUT transfers have no alignment requirements, except for ESP32, which is + // described below. + if cfg!(esp32) { + // SPI DMA: Burst transmission is supported. The data size for + // a single transfer must be four bytes aligned. + // I2S DMA: Burst transfer is supported. However, unlike the + // SPI DMA channels, the data size for a single transfer is + // one word, or four bytes. + 4 + } else { + 1 + } + } + } +} + +const fn max(a: usize, b: usize) -> usize { + if a > b { + a + } else { + b + } +} + +impl BurstTransfer { + delegate::delegate! { + #[cfg(psram_dma)] + to self.internal { + pub(super) const fn min_dram_alignment(self, direction: TransferDirection) -> usize; + pub(super) fn is_burst_enabled(self) -> bool; + } + } + + /// Calculates an alignment that is compatible with the current burst + /// configuration. + /// + /// This is an over-estimation so that Descriptors can be safely used with + /// any DMA channel in any direction. + pub const fn min_compatible_alignment(self) -> usize { + let in_alignment = self.min_dram_alignment(TransferDirection::In); + let out_alignment = self.min_dram_alignment(TransferDirection::Out); + let alignment = max(in_alignment, out_alignment); + + #[cfg(psram_dma)] + let alignment = max(alignment, self.external as usize); + + alignment + } + + /// Calculates a chunk size that is compatible with the current burst + /// configuration's alignment requirements. + /// + /// This is an over-estimation so that Descriptors can be safely used with + /// any DMA channel in any direction. + pub const fn max_compatible_chunk_size(self) -> usize { + 4096 - self.min_compatible_alignment() + } + + fn min_alignment(self, _buffer: &[u8], direction: TransferDirection) -> usize { + let alignment = self.min_dram_alignment(direction); + + cfg_if::cfg_if! { + if #[cfg(psram_dma)] { + let mut alignment = alignment; + if is_slice_in_psram(_buffer) { + alignment = max(alignment, self.external.min_psram_alignment(direction)); + } + } + } + + alignment + } + + // Note: this function ignores address alignment as we assume the buffers are + // aligned. + fn max_chunk_size_for(self, buffer: &[u8], direction: TransferDirection) -> usize { + 4096 - self.min_alignment(buffer, direction) + } + + fn is_buffer_aligned(self, buffer: &[u8], direction: TransferDirection) -> bool { + let alignment = self.min_alignment(buffer, direction); + buffer.as_ptr() as usize % alignment == 0 + } + + fn ensure_buffer_compatible( + self, + buffer: &[u8], + direction: TransferDirection, + ) -> Result<(), DmaBufError> { + // buffer can be either DRAM or PSRAM (if supported) + let is_in_dram = is_slice_in_dram(buffer); + let is_in_psram = cfg!(psram_dma) && is_slice_in_psram(buffer); + if !(is_in_dram || is_in_psram) { + return Err(DmaBufError::UnsupportedMemoryRegion); + } + + if !self.is_buffer_aligned(buffer, direction) { + return Err(DmaBufError::InvalidAlignment); + } + + Ok(()) + } } /// The direction of the DMA transfer. @@ -40,29 +267,14 @@ pub struct Preparation { /// The direction of the DMA transfer. pub direction: TransferDirection, - /// Block size for PSRAM transfers. - /// - /// If the buffer is in PSRAM, the implementation must ensure the following: - /// - /// - The implementation of the buffer must provide a non-`None` block size. - /// - For [`TransferDirection::In`] transfers, the implementation of the - /// buffer must invalidate the cache that contains the buffer before the - /// DMA starts. - /// - For [`TransferDirection::Out`] transfers, the implementation of the - /// buffer must write back the cache that contains the buffer before the - /// DMA starts. - #[cfg(esp32s3)] - pub external_memory_block_size: Option, - /// Configures the DMA to transfer data in bursts. /// - /// The implementation of the buffer must ensure that burst mode is only - /// enabled when alignment requirements are met. + /// The implementation of the buffer must ensure that buffer size + /// and alignment in each descriptor is compatible with the burst + /// transfer configuration. /// - /// There are no additional alignment requirements for - /// [`TransferDirection::Out`] burst transfers, but - /// [`TransferDirection::In`] transfers require all descriptors to have - /// buffer pointers and sizes that are a multiple of 4 (word aligned). + /// For details on alignment requirements, refer to your chip's + #[doc = crate::trm_markdown_link!()] pub burst_transfer: BurstTransfer, /// Configures the "check owner" feature of the DMA channel. @@ -181,18 +393,6 @@ pub enum DmaBufError { InvalidChunkSize, } -/// DMA buffer alignments -#[derive(Debug, Clone, Copy, PartialEq)] -#[cfg_attr(feature = "defmt", derive(defmt::Format))] -pub enum DmaBufBlkSize { - /// 16 bytes - Size16 = 16, - /// 32 bytes - Size32 = 32, - /// 64 bytes - Size64 = 64, -} - /// DMA transmit buffer /// /// This is a contiguous buffer linked together by DMA descriptors of length @@ -203,14 +403,15 @@ pub enum DmaBufBlkSize { pub struct DmaTxBuf { descriptors: DescriptorSet<'static>, buffer: &'static mut [u8], - block_size: Option, + burst: BurstTransfer, } impl DmaTxBuf { /// Creates a new [DmaTxBuf] from some descriptors and a buffer. /// /// There must be enough descriptors for the provided buffer. - /// Each descriptor can handle 4092 bytes worth of buffer. + /// Depending on alignment requirements, each descriptor can handle at most + /// 4095 bytes worth of buffer. /// /// Both the descriptors and buffer must be in DMA-capable memory. /// Only DRAM is supported for descriptors. @@ -218,81 +419,57 @@ impl DmaTxBuf { descriptors: &'static mut [DmaDescriptor], buffer: &'static mut [u8], ) -> Result { - Self::new_with_block_size(descriptors, buffer, None) - } - - /// Compute max chunk size based on block size - pub const fn compute_chunk_size(block_size: Option) -> usize { - max_chunk_size(block_size) - } - - /// Compute the number of descriptors required for a given block size and - /// buffer size - pub const fn compute_descriptor_count( - buffer_size: usize, - block_size: Option, - ) -> usize { - descriptor_count(buffer_size, Self::compute_chunk_size(block_size), false) + Self::new_with_config(descriptors, buffer, BurstTransfer::default()) } /// Creates a new [DmaTxBuf] from some descriptors and a buffer. /// /// There must be enough descriptors for the provided buffer. - /// Each descriptor can handle at most 4095 bytes worth of buffer. - /// Optionally, a block size can be provided for PSRAM & Burst transfers. + /// Depending on alignment requirements, each descriptor can handle at most + /// 4095 bytes worth of buffer. /// /// Both the descriptors and buffer must be in DMA-capable memory. /// Only DRAM is supported for descriptors. - pub fn new_with_block_size( + pub fn new_with_config( descriptors: &'static mut [DmaDescriptor], buffer: &'static mut [u8], - block_size: Option, + config: BurstTransfer, ) -> Result { - cfg_if::cfg_if! { - if #[cfg(esp32s3)] { - // buffer can be either DRAM or PSRAM (if supported) - if !is_slice_in_dram(buffer) && !is_slice_in_psram(buffer) { - return Err(DmaBufError::UnsupportedMemoryRegion); - } - // if its PSRAM, the block_size/alignment must be specified - if is_slice_in_psram(buffer) && block_size.is_none() { - return Err(DmaBufError::InvalidAlignment); - } - } else { - #[cfg(any(esp32,esp32s2))] - if buffer.len() % 4 != 0 && buffer.as_ptr() as usize % 4 != 0 { - // ESP32 requires word alignment for DMA buffers. - // ESP32-S2 technically supports byte-aligned DMA buffers, but the - // transfer ends up writing out of bounds if the buffer's length - // is 2 or 3 (mod 4). - return Err(DmaBufError::InvalidAlignment); - } - // buffer can only be DRAM - if !is_slice_in_dram(buffer) { - return Err(DmaBufError::UnsupportedMemoryRegion); - } - } - } - - let block_size = if is_slice_in_dram(buffer) { - // no need for block size if the buffer is in DRAM - None - } else { - block_size - }; let mut buf = Self { descriptors: DescriptorSet::new(descriptors)?, buffer, - block_size, + burst: config, }; - buf.descriptors - .link_with_buffer(buf.buffer, max_chunk_size(block_size))?; - buf.set_length(buf.capacity()); + let capacity = buf.capacity(); + buf.configure(config, capacity)?; Ok(buf) } + fn configure(&mut self, burst: BurstTransfer, length: usize) -> Result<(), DmaBufError> { + burst.ensure_buffer_compatible(self.buffer, TransferDirection::Out)?; + + self.descriptors.link_with_buffer( + self.buffer, + burst.max_chunk_size_for(self.buffer, TransferDirection::Out), + )?; + self.set_length_fallible(length, burst)?; + + self.burst = burst; + Ok(()) + } + + /// Configures the DMA to use burst transfers to access this buffer. + /// + /// Note that the hardware is allowed to ignore this setting. If you attempt + /// to use burst transfers with improperly aligned buffers, starting the + /// transfer will result in [`DmaError::InvalidAlignment`]. + pub fn set_burst_transfer(&mut self, burst: BurstTransfer) -> Result<(), DmaBufError> { + let len = self.len(); + self.configure(burst, len) + } + /// Consume the buf, returning the descriptors and buffer. pub fn split(self) -> (&'static mut [DmaDescriptor], &'static mut [u8]) { (self.descriptors.into_inner(), self.buffer) @@ -312,17 +489,22 @@ impl DmaTxBuf { .sum::() } + fn set_length_fallible(&mut self, len: usize, burst: BurstTransfer) -> Result<(), DmaBufError> { + assert!(len <= self.buffer.len()); + + self.descriptors.set_tx_length( + len, + burst.max_chunk_size_for(self.buffer, TransferDirection::Out), + ) + } + /// Reset the descriptors to only transmit `len` amount of bytes from this /// buf. /// /// The number of bytes in data must be less than or equal to the buffer /// size. pub fn set_length(&mut self, len: usize) { - assert!(len <= self.buffer.len()); - - unwrap!(self - .descriptors - .set_tx_length(len, max_chunk_size(self.block_size))); + unwrap!(self.set_length_fallible(len, self.burst)) } /// Fills the TX buffer with the bytes provided in `data` and reset the @@ -356,8 +538,8 @@ unsafe impl DmaTxBuffer for DmaTxBuf { desc.reset_for_tx(desc.next.is_null()); } - #[cfg(esp32s3)] - if crate::soc::is_valid_psram_address(self.buffer.as_ptr() as usize) { + #[cfg(psram_dma)] + if is_valid_psram_address(self.buffer.as_ptr() as usize) { unsafe { crate::soc::cache_writeback_addr( self.buffer.as_ptr() as u32, @@ -369,10 +551,7 @@ unsafe impl DmaTxBuffer for DmaTxBuf { Preparation { start: self.descriptors.head(), direction: TransferDirection::Out, - #[cfg(esp32s3)] - external_memory_block_size: self.block_size, - // TODO: support burst transfers. - burst_transfer: BurstTransfer::Disabled, + burst_transfer: self.burst, check_owner: None, } } @@ -398,6 +577,7 @@ unsafe impl DmaTxBuffer for DmaTxBuf { pub struct DmaRxBuf { descriptors: DescriptorSet<'static>, buffer: &'static mut [u8], + burst: BurstTransfer, } impl DmaRxBuf { @@ -412,22 +592,40 @@ impl DmaRxBuf { descriptors: &'static mut [DmaDescriptor], buffer: &'static mut [u8], ) -> Result { - if !is_slice_in_dram(buffer) { - return Err(DmaBufError::UnsupportedMemoryRegion); - } - let mut buf = Self { descriptors: DescriptorSet::new(descriptors)?, buffer, + burst: BurstTransfer::default(), }; - buf.descriptors - .link_with_buffer(buf.buffer, max_chunk_size(None))?; - buf.set_length(buf.capacity()); + buf.configure(buf.burst, buf.capacity())?; Ok(buf) } + fn configure(&mut self, burst: BurstTransfer, length: usize) -> Result<(), DmaBufError> { + burst.ensure_buffer_compatible(self.buffer, TransferDirection::In)?; + + self.descriptors.link_with_buffer( + self.buffer, + burst.max_chunk_size_for(self.buffer, TransferDirection::In), + )?; + self.set_length_fallible(length, burst)?; + + self.burst = burst; + Ok(()) + } + + /// Configures the DMA to use burst transfers to access this buffer. + /// + /// Note that the hardware is allowed to ignore this setting. If you attempt + /// to use burst transfers with improperly aligned buffers, starting the + /// transfer will result in [`DmaError::InvalidAlignment`]. + pub fn set_burst_transfer(&mut self, burst: BurstTransfer) -> Result<(), DmaBufError> { + let len = self.len(); + self.configure(burst, len) + } + /// Consume the buf, returning the descriptors and buffer. pub fn split(self) -> (&'static mut [DmaDescriptor], &'static mut [u8]) { (self.descriptors.into_inner(), self.buffer) @@ -448,15 +646,22 @@ impl DmaRxBuf { .sum::() } + fn set_length_fallible(&mut self, len: usize, burst: BurstTransfer) -> Result<(), DmaBufError> { + assert!(len <= self.buffer.len()); + + self.descriptors.set_rx_length( + len, + burst.max_chunk_size_for(self.buffer, TransferDirection::In), + ) + } + /// Reset the descriptors to only receive `len` amount of bytes into this /// buf. /// /// The number of bytes in data must be less than or equal to the buffer /// size. pub fn set_length(&mut self, len: usize) { - assert!(len <= self.buffer.len()); - - unwrap!(self.descriptors.set_rx_length(len, max_chunk_size(None))); + unwrap!(self.set_length_fallible(len, self.burst)); } /// Returns the entire underlying buffer as a slice than can be read. @@ -520,15 +725,7 @@ unsafe impl DmaRxBuffer for DmaRxBuf { Preparation { start: self.descriptors.head(), direction: TransferDirection::In, - - // TODO: support external memory access. - #[cfg(esp32s3)] - external_memory_block_size: None, - - // TODO: DmaRxBuf doesn't currently enforce the alignment requirements required for - // bursting. In the future, it could either enforce the alignment or - // calculate if the alignment requirements happen to be met. - burst_transfer: BurstTransfer::Disabled, + burst_transfer: self.burst, check_owner: None, } } @@ -556,6 +753,7 @@ pub struct DmaRxTxBuf { rx_descriptors: DescriptorSet<'static>, tx_descriptors: DescriptorSet<'static>, buffer: &'static mut [u8], + burst: BurstTransfer, } impl DmaRxTxBuf { @@ -571,24 +769,48 @@ impl DmaRxTxBuf { tx_descriptors: &'static mut [DmaDescriptor], buffer: &'static mut [u8], ) -> Result { - if !is_slice_in_dram(buffer) { - return Err(DmaBufError::UnsupportedMemoryRegion); - } - let mut buf = Self { rx_descriptors: DescriptorSet::new(rx_descriptors)?, tx_descriptors: DescriptorSet::new(tx_descriptors)?, buffer, + burst: BurstTransfer::default(), }; - buf.rx_descriptors - .link_with_buffer(buf.buffer, max_chunk_size(None))?; - buf.tx_descriptors - .link_with_buffer(buf.buffer, max_chunk_size(None))?; - buf.set_length(buf.capacity()); + + let capacity = buf.capacity(); + buf.configure(buf.burst, capacity)?; Ok(buf) } + fn configure(&mut self, burst: BurstTransfer, length: usize) -> Result<(), DmaBufError> { + burst.ensure_buffer_compatible(self.buffer, TransferDirection::In)?; + burst.ensure_buffer_compatible(self.buffer, TransferDirection::Out)?; + + self.rx_descriptors.link_with_buffer( + self.buffer, + burst.max_chunk_size_for(self.buffer, TransferDirection::In), + )?; + self.tx_descriptors.link_with_buffer( + self.buffer, + burst.max_chunk_size_for(self.buffer, TransferDirection::Out), + )?; + + self.set_length_fallible(length, burst)?; + self.burst = burst; + + Ok(()) + } + + /// Configures the DMA to use burst transfers to access this buffer. + /// + /// Note that the hardware is allowed to ignore this setting. If you attempt + /// to use burst transfers with improperly aligned buffers, starting the + /// transfer will result in [`DmaError::InvalidAlignment`]. + pub fn set_burst_transfer(&mut self, burst: BurstTransfer) -> Result<(), DmaBufError> { + let len = self.len(); + self.configure(burst, len) + } + /// Consume the buf, returning the rx descriptors, tx descriptors and /// buffer. pub fn split( @@ -629,15 +851,27 @@ impl DmaRxTxBuf { self.buffer } + fn set_length_fallible(&mut self, len: usize, burst: BurstTransfer) -> Result<(), DmaBufError> { + assert!(len <= self.buffer.len()); + + self.rx_descriptors.set_rx_length( + len, + burst.max_chunk_size_for(self.buffer, TransferDirection::In), + )?; + self.tx_descriptors.set_tx_length( + len, + burst.max_chunk_size_for(self.buffer, TransferDirection::Out), + )?; + + Ok(()) + } + /// Reset the descriptors to only transmit/receive `len` amount of bytes /// with this buf. /// /// `len` must be less than or equal to the buffer size. pub fn set_length(&mut self, len: usize) { - assert!(len <= self.buffer.len()); - - unwrap!(self.rx_descriptors.set_rx_length(len, max_chunk_size(None))); - unwrap!(self.tx_descriptors.set_tx_length(len, max_chunk_size(None))); + unwrap!(self.set_length_fallible(len, self.burst)); } } @@ -654,13 +888,7 @@ unsafe impl DmaTxBuffer for DmaRxTxBuf { Preparation { start: self.tx_descriptors.head(), direction: TransferDirection::Out, - - // TODO: support external memory access. - #[cfg(esp32s3)] - external_memory_block_size: None, - - // TODO: This is TX, the DMA channel is free to do a burst transfer. - burst_transfer: BurstTransfer::Disabled, + burst_transfer: self.burst, check_owner: None, } } @@ -689,14 +917,7 @@ unsafe impl DmaRxBuffer for DmaRxTxBuf { Preparation { start: self.rx_descriptors.head(), direction: TransferDirection::In, - - // TODO: support external memory access. - #[cfg(esp32s3)] - external_memory_block_size: None, - - // TODO: DmaRxTxBuf doesn't currently enforce the alignment requirements required for - // bursting. - burst_transfer: BurstTransfer::Disabled, + burst_transfer: self.burst, check_owner: None, } } @@ -757,6 +978,7 @@ unsafe impl DmaRxBuffer for DmaRxTxBuf { pub struct DmaRxStreamBuf { descriptors: &'static mut [DmaDescriptor], buffer: &'static mut [u8], + burst: BurstTransfer, } impl DmaRxStreamBuf { @@ -815,6 +1037,7 @@ impl DmaRxStreamBuf { Ok(Self { descriptors, buffer, + burst: BurstTransfer::default(), }) } @@ -834,14 +1057,7 @@ unsafe impl DmaRxBuffer for DmaRxStreamBuf { Preparation { start: self.descriptors.as_mut_ptr(), direction: TransferDirection::In, - - // TODO: support external memory access. - #[cfg(esp32s3)] - external_memory_block_size: None, - - // TODO: DmaRxStreamBuf doesn't currently enforce the alignment requirements required - // for bursting. - burst_transfer: BurstTransfer::Disabled, + burst_transfer: self.burst, // Whilst we give ownership of the descriptors the DMA, the correctness of this buffer // implementation doesn't rely on the DMA checking for descriptor ownership. @@ -1052,9 +1268,7 @@ unsafe impl DmaTxBuffer for EmptyBuf { Preparation { start: unsafe { core::ptr::addr_of_mut!(EMPTY).cast() }, direction: TransferDirection::Out, - #[cfg(esp32s3)] - external_memory_block_size: None, - burst_transfer: BurstTransfer::Disabled, + burst_transfer: BurstTransfer::default(), // As we don't give ownership of the descriptor to the DMA, it's important that the DMA // channel does *NOT* check for ownership, otherwise the channel will return an error. @@ -1083,9 +1297,7 @@ unsafe impl DmaRxBuffer for EmptyBuf { Preparation { start: unsafe { core::ptr::addr_of_mut!(EMPTY).cast() }, direction: TransferDirection::In, - #[cfg(esp32s3)] - external_memory_block_size: None, - burst_transfer: BurstTransfer::Disabled, + burst_transfer: BurstTransfer::default(), // As we don't give ownership of the descriptor to the DMA, it's important that the DMA // channel does *NOT* check for ownership, otherwise the channel will return an error. diff --git a/esp-hal/src/dma/gdma.rs b/esp-hal/src/dma/gdma.rs index feec817c2e9..ed1c6951b1b 100644 --- a/esp-hal/src/dma/gdma.rs +++ b/esp-hal/src/dma/gdma.rs @@ -179,6 +179,11 @@ impl RegisterAccess for AnyGdmaTxChannel { .out_conf1() .modify(|_, w| unsafe { w.out_ext_mem_bk_size().bits(size as u8) }); } + + #[cfg(psram_dma)] + fn can_access_psram(&self) -> bool { + true + } } impl TxRegisterAccess for AnyGdmaTxChannel { @@ -416,6 +421,11 @@ impl RegisterAccess for AnyGdmaRxChannel { .in_conf1() .modify(|_, w| unsafe { w.in_ext_mem_bk_size().bits(size as u8) }); } + + #[cfg(psram_dma)] + fn can_access_psram(&self) -> bool { + true + } } impl RxRegisterAccess for AnyGdmaRxChannel { diff --git a/esp-hal/src/dma/mod.rs b/esp-hal/src/dma/mod.rs index 3bdb3bdd9ed..2aa5b18fb7a 100644 --- a/esp-hal/src/dma/mod.rs +++ b/esp-hal/src/dma/mod.rs @@ -65,11 +65,13 @@ pub use self::gdma::*; pub use self::m2m::*; #[cfg(pdma)] pub use self::pdma::*; +#[cfg(psram_dma)] +use crate::soc::is_valid_psram_address; use crate::{ interrupt::InterruptHandler, peripheral::{Peripheral, PeripheralRef}, peripherals::Interrupt, - soc::is_slice_in_dram, + soc::{is_slice_in_dram, is_valid_memory_address, is_valid_ram_address}, Async, Blocking, Cpu, @@ -367,6 +369,26 @@ impl DmaDescriptor { true => Owner::Dma, } } + + fn iter(&self) -> impl Iterator { + core::iter::successors(Some(self), |d| { + if d.next.is_null() { + None + } else { + Some(unsafe { &*d.next }) + } + }) + } + + fn iter_mut(&mut self) -> impl Iterator { + core::iter::successors(Some(self), |d| { + if d.next.is_null() { + None + } else { + Some(unsafe { &mut *d.next }) + } + }) + } } // The pointers in the descriptor can be Sent. @@ -679,6 +701,14 @@ macro_rules! dma_buffers_impl { ) } }}; + + ($size:expr, is_circular = $circular:tt) => { + $crate::dma_buffers_impl!( + $size, + $crate::dma::BurstTransfer::DEFAULT.max_compatible_chunk_size(), + is_circular = $circular + ); + }; } #[doc(hidden)] @@ -726,7 +756,6 @@ macro_rules! dma_descriptor_count { /// ```rust,no_run #[doc = crate::before_snippet!()] /// use esp_hal::dma_tx_buffer; -/// use esp_hal::dma::DmaBufBlkSize; /// /// let tx_buf = dma_tx_buffer!(32000); /// # } @@ -734,11 +763,7 @@ macro_rules! dma_descriptor_count { #[macro_export] macro_rules! dma_tx_buffer { ($tx_size:expr) => {{ - let (tx_buffer, tx_descriptors) = $crate::dma_buffers_impl!( - $tx_size, - $crate::dma::DmaTxBuf::compute_chunk_size(None), - is_circular = false - ); + let (tx_buffer, tx_descriptors) = $crate::dma_buffers_impl!($tx_size, is_circular = false); $crate::dma::DmaTxBuf::new(tx_descriptors, tx_buffer) }}; @@ -1028,10 +1053,10 @@ impl DescriptorChain { len: usize, prepare_descriptor: impl Fn(&mut DmaDescriptor, usize), ) -> Result<(), DmaError> { - if !crate::soc::is_valid_ram_address(self.first() as usize) - || !crate::soc::is_valid_ram_address(self.last() as usize) - || !crate::soc::is_valid_memory_address(data as usize) - || !crate::soc::is_valid_memory_address(unsafe { data.add(len) } as usize) + if !is_valid_ram_address(self.first() as usize) + || !is_valid_ram_address(self.last() as usize) + || !is_valid_memory_address(data as usize) + || !is_valid_memory_address(unsafe { data.add(len) } as usize) { return Err(DmaError::UnsupportedMemoryRegion); } @@ -1078,17 +1103,6 @@ pub const fn descriptor_count(buffer_size: usize, chunk_size: usize, is_circular buffer_size.div_ceil(chunk_size) } -/// Compute max chunk size based on block size. -const fn max_chunk_size(block_size: Option) -> usize { - match block_size { - Some(size) => 4096 - size as usize, - #[cfg(esp32)] - None => 4092, // esp32 requires 4 byte alignment - #[cfg(not(esp32))] - None => 4095, - } -} - #[derive(Debug)] #[cfg_attr(feature = "defmt", derive(defmt::Format))] struct DescriptorSet<'a> { @@ -1131,28 +1145,15 @@ impl<'a> DescriptorSet<'a> { /// Returns an iterator over the linked descriptors. fn linked_iter(&self) -> impl Iterator { - let mut was_last = false; - self.descriptors.iter().take_while(move |d| { - if was_last { - false - } else { - was_last = d.next.is_null(); - true - } - }) + self.descriptors.first().into_iter().flat_map(|d| d.iter()) } /// Returns an iterator over the linked descriptors. fn linked_iter_mut(&mut self) -> impl Iterator { - let mut was_last = false; - self.descriptors.iter_mut().take_while(move |d| { - if was_last { - false - } else { - was_last = d.next.is_null(); - true - } - }) + self.descriptors + .first_mut() + .into_iter() + .flat_map(|d| d.iter_mut()) } /// Associate each descriptor with a chunk of the buffer. @@ -1284,6 +1285,7 @@ impl<'a> DescriptorSet<'a> { } /// Block size for transfers to/from PSRAM +#[cfg(psram_dma)] #[derive(Copy, Clone, Debug, PartialEq)] pub enum DmaExtMemBKSize { /// External memory block size of 16 bytes. @@ -1294,12 +1296,13 @@ pub enum DmaExtMemBKSize { Size64 = 2, } -impl From for DmaExtMemBKSize { - fn from(size: DmaBufBlkSize) -> Self { +#[cfg(psram_dma)] +impl From for DmaExtMemBKSize { + fn from(size: ExternalBurstSize) -> Self { match size { - DmaBufBlkSize::Size16 => DmaExtMemBKSize::Size16, - DmaBufBlkSize::Size32 => DmaExtMemBKSize::Size32, - DmaBufBlkSize::Size64 => DmaExtMemBKSize::Size64, + ExternalBurstSize::Size16 => DmaExtMemBKSize::Size16, + ExternalBurstSize::Size32 => DmaExtMemBKSize::Size32, + ExternalBurstSize::Size64 => DmaExtMemBKSize::Size64, } } } @@ -1715,7 +1718,7 @@ pub trait Rx: crate::private::Sealed { fn stop_transfer(&mut self); - #[cfg(esp32s3)] + #[cfg(psram_dma)] fn set_ext_mem_block_size(&self, size: DmaExtMemBKSize); #[cfg(gdma)] @@ -1885,17 +1888,17 @@ where peri: DmaPeripheral, chain: &DescriptorChain, ) -> Result<(), DmaError> { - // For ESP32-S3 we check each descriptor buffer that points to PSRAM for + // We check each descriptor buffer that points to PSRAM for // alignment and invalidate the cache for that buffer. // NOTE: for RX the `buffer` and `size` need to be aligned but the `len` does // not. TRM section 3.4.9 // Note that DmaBuffer implementations are required to do this for us. - #[cfg(esp32s3)] + #[cfg(psram_dma)] for des in chain.descriptors.iter() { // we are forcing the DMA alignment to the cache line size // required when we are using dcache let alignment = crate::soc::cache_get_dcache_line_size() as usize; - if crate::soc::is_valid_psram_address(des.buffer as usize) { + if is_valid_psram_address(des.buffer as usize) { // both the size and address of the buffer must be aligned if des.buffer as usize % alignment != 0 && des.size() % alignment != 0 { return Err(DmaError::InvalidAlignment); @@ -1904,17 +1907,13 @@ where } } - self.do_prepare( - Preparation { - start: chain.first().cast_mut(), - #[cfg(esp32s3)] - external_memory_block_size: None, - direction: TransferDirection::In, - burst_transfer: BurstTransfer::Disabled, - check_owner: Some(false), - }, - peri, - ) + let preparation = Preparation { + start: chain.first().cast_mut(), + direction: TransferDirection::In, + burst_transfer: BurstTransfer::default(), + check_owner: Some(false), + }; + self.do_prepare(preparation, peri) } unsafe fn prepare_transfer( @@ -1944,7 +1943,7 @@ where self.rx_impl.stop() } - #[cfg(esp32s3)] + #[cfg(psram_dma)] fn set_ext_mem_block_size(&self, size: DmaExtMemBKSize) { self.rx_impl.set_ext_mem_block_size(size); } @@ -2017,7 +2016,7 @@ pub trait Tx: crate::private::Sealed { fn stop_transfer(&mut self); - #[cfg(esp32s3)] + #[cfg(psram_dma)] fn set_ext_mem_block_size(&self, size: DmaExtMemBKSize); fn is_done(&self) -> bool { @@ -2128,10 +2127,8 @@ where ) -> Result<(), DmaError> { debug_assert_eq!(preparation.direction, TransferDirection::Out); - #[cfg(esp32s3)] - if let Some(block_size) = preparation.external_memory_block_size { - self.set_ext_mem_block_size(block_size.into()); - } + #[cfg(psram_dma)] + self.set_ext_mem_block_size(preparation.burst_transfer.external.into()); self.tx_impl.set_burst_mode(preparation.burst_transfer); self.tx_impl.set_descr_burst_mode(true); @@ -2169,10 +2166,10 @@ where ) -> Result<(), DmaError> { // Based on the ESP32-S3 TRM the alignment check is not needed for TX - // For esp32s3 we check each descriptor buffer that points to PSRAM for + // We check each descriptor buffer that points to PSRAM for // alignment and writeback the cache for that buffer. // Note that DmaBuffer implementations are required to do this for us. - #[cfg(esp32s3)] + #[cfg(psram_dma)] for des in chain.descriptors.iter() { // we are forcing the DMA alignment to the cache line size // required when we are using dcache @@ -2186,17 +2183,13 @@ where } } - self.do_prepare( - Preparation { - start: chain.first().cast_mut(), - #[cfg(esp32s3)] - external_memory_block_size: None, - direction: TransferDirection::Out, - burst_transfer: BurstTransfer::Disabled, - check_owner: Some(false), - }, - peri, - )?; + let preparation = Preparation { + start: chain.first().cast_mut(), + direction: TransferDirection::Out, + burst_transfer: BurstTransfer::default(), + check_owner: Some(false), + }; + self.do_prepare(preparation, peri)?; // enable descriptor write back in circular mode self.tx_impl @@ -2232,7 +2225,7 @@ where self.tx_impl.stop() } - #[cfg(esp32s3)] + #[cfg(psram_dma)] fn set_ext_mem_block_size(&self, size: DmaExtMemBKSize) { self.tx_impl.set_ext_mem_block_size(size); } @@ -2307,11 +2300,14 @@ pub trait RegisterAccess: crate::private::Sealed { /// descriptor. fn set_check_owner(&self, check_owner: Option); - #[cfg(esp32s3)] + #[cfg(psram_dma)] fn set_ext_mem_block_size(&self, size: DmaExtMemBKSize); #[cfg(pdma)] fn is_compatible_with(&self, peripheral: DmaPeripheral) -> bool; + + #[cfg(psram_dma)] + fn can_access_psram(&self) -> bool; } #[doc(hidden)] diff --git a/esp-hal/src/dma/pdma.rs b/esp-hal/src/dma/pdma.rs index e0baa8f78c9..bbf0bd5ea5b 100644 --- a/esp-hal/src/dma/pdma.rs +++ b/esp-hal/src/dma/pdma.rs @@ -120,6 +120,18 @@ impl RegisterAccess for AnySpiDmaTxChannel { fn is_compatible_with(&self, peripheral: DmaPeripheral) -> bool { self.0.is_compatible_with(peripheral) } + + #[cfg(psram_dma)] + fn set_ext_mem_block_size(&self, size: DmaExtMemBKSize) { + let spi = self.0.register_block(); + spi.dma_conf() + .modify(|_, w| unsafe { w.ext_mem_bk_size().bits(size as u8) }); + } + + #[cfg(psram_dma)] + fn can_access_psram(&self) -> bool { + matches!(self.0, AnySpiDmaChannel(AnySpiDmaChannelInner::Spi2(_))) + } } impl TxRegisterAccess for AnySpiDmaTxChannel { @@ -278,6 +290,18 @@ impl RegisterAccess for AnySpiDmaRxChannel { fn is_compatible_with(&self, peripheral: DmaPeripheral) -> bool { self.0.is_compatible_with(peripheral) } + + #[cfg(psram_dma)] + fn set_ext_mem_block_size(&self, size: DmaExtMemBKSize) { + let spi = self.0.register_block(); + spi.dma_conf() + .modify(|_, w| unsafe { w.ext_mem_bk_size().bits(size as u8) }); + } + + #[cfg(psram_dma)] + fn can_access_psram(&self) -> bool { + matches!(self.0, AnySpiDmaChannel(AnySpiDmaChannelInner::Spi2(_))) + } } impl RxRegisterAccess for AnySpiDmaRxChannel { @@ -473,6 +497,18 @@ impl RegisterAccess for AnyI2sDmaTxChannel { fn is_compatible_with(&self, peripheral: DmaPeripheral) -> bool { self.0.is_compatible_with(peripheral) } + + #[cfg(psram_dma)] + fn set_ext_mem_block_size(&self, size: DmaExtMemBKSize) { + let spi = self.0.register_block(); + spi.lc_conf() + .modify(|_, w| unsafe { w.ext_mem_bk_size().bits(size as u8) }); + } + + #[cfg(psram_dma)] + fn can_access_psram(&self) -> bool { + matches!(self.0, AnyI2sDmaChannel(AnyI2sDmaChannelInner::I2s0(_))) + } } impl TxRegisterAccess for AnyI2sDmaTxChannel { @@ -643,6 +679,18 @@ impl RegisterAccess for AnyI2sDmaRxChannel { fn is_compatible_with(&self, peripheral: DmaPeripheral) -> bool { self.0.is_compatible_with(peripheral) } + + #[cfg(psram_dma)] + fn set_ext_mem_block_size(&self, size: DmaExtMemBKSize) { + let spi = self.0.register_block(); + spi.lc_conf() + .modify(|_, w| unsafe { w.ext_mem_bk_size().bits(size as u8) }); + } + + #[cfg(psram_dma)] + fn can_access_psram(&self) -> bool { + matches!(self.0, AnyI2sDmaChannel(AnyI2sDmaChannelInner::I2s0(_))) + } } impl RxRegisterAccess for AnyI2sDmaRxChannel { diff --git a/esp-hal/src/soc/esp32s2/mod.rs b/esp-hal/src/soc/esp32s2/mod.rs index c6fdd8db12e..b85203c4d59 100644 --- a/esp-hal/src/soc/esp32s2/mod.rs +++ b/esp-hal/src/soc/esp32s2/mod.rs @@ -133,3 +133,33 @@ pub unsafe extern "C" fn ESP32Reset() -> ! { pub extern "Rust" fn __init_data() -> bool { false } + +/// Write back a specific range of data in the cache. +#[doc(hidden)] +#[link_section = ".rwtext"] +pub unsafe fn cache_writeback_addr(addr: u32, size: u32) { + extern "C" { + fn Cache_WriteBack_Addr(addr: u32, size: u32); + } + Cache_WriteBack_Addr(addr, size); +} + +/// Invalidate a specific range of addresses in the cache. +#[doc(hidden)] +#[link_section = ".rwtext"] +pub unsafe fn cache_invalidate_addr(addr: u32, size: u32) { + extern "C" { + fn Cache_Invalidate_Addr(addr: u32, size: u32); + } + Cache_Invalidate_Addr(addr, size); +} + +/// Get the size of a cache line in the DCache. +#[doc(hidden)] +#[link_section = ".rwtext"] +pub unsafe fn cache_get_dcache_line_size() -> u32 { + extern "C" { + fn Cache_Get_DCache_Line_Size() -> u32; + } + Cache_Get_DCache_Line_Size() +} diff --git a/esp-metadata/devices/esp32s2.toml b/esp-metadata/devices/esp32s2.toml index 2b556cd00cf..121b1e139f0 100644 --- a/esp-metadata/devices/esp32s2.toml +++ b/esp-metadata/devices/esp32s2.toml @@ -71,4 +71,7 @@ symbols = [ "uart_support_wakeup_int", "ulp_supported", "riscv_coproc_supported", + + # Other capabilities + "psram_dma", ] diff --git a/esp-metadata/devices/esp32s3.toml b/esp-metadata/devices/esp32s3.toml index 96fbe57278a..900c3602dfe 100644 --- a/esp-metadata/devices/esp32s3.toml +++ b/esp-metadata/devices/esp32s3.toml @@ -68,6 +68,7 @@ symbols = [ "bt", "wifi", "psram", + "psram_dma", "octal_psram", "ulp_riscv_core", "timg_timer1", @@ -87,4 +88,7 @@ symbols = [ "uart_support_wakeup_int", "ulp_supported", "riscv_coproc_supported", + + # Other capabilities + "psram_dma", ] diff --git a/examples/src/bin/spi_loopback_dma_psram.rs b/examples/src/bin/spi_loopback_dma_psram.rs index 5aeb74b8621..504a19763a4 100644 --- a/examples/src/bin/spi_loopback_dma_psram.rs +++ b/examples/src/bin/spi_loopback_dma_psram.rs @@ -25,7 +25,7 @@ use esp_backtrace as _; use esp_hal::{ delay::Delay, - dma::{Dma, DmaBufBlkSize, DmaRxBuf, DmaTxBuf}, + dma::{Dma, DmaRxBuf, DmaTxBuf, ExternalBurstSize}, peripheral::Peripheral, prelude::*, spi::{ @@ -51,7 +51,7 @@ macro_rules! dma_alloc_buffer { } const DMA_BUFFER_SIZE: usize = 8192; -const DMA_ALIGNMENT: DmaBufBlkSize = DmaBufBlkSize::Size64; +const DMA_ALIGNMENT: ExternalBurstSize = ExternalBurstSize::Size64; const DMA_CHUNK_SIZE: usize = 4096 - DMA_ALIGNMENT as usize; #[entry] diff --git a/hil-test/tests/spi_half_duplex_write_psram.rs b/hil-test/tests/spi_half_duplex_write_psram.rs index e0eead9bde9..d8f25f70d94 100644 --- a/hil-test/tests/spi_half_duplex_write_psram.rs +++ b/hil-test/tests/spi_half_duplex_write_psram.rs @@ -7,7 +7,7 @@ use defmt::error; use esp_alloc as _; use esp_hal::{ - dma::{Dma, DmaBufBlkSize, DmaRxBuf, DmaTxBuf}, + dma::{BurstTransfer, Dma, DmaRxBuf, DmaTxBuf, ExternalBurstSize, InternalBurstTransfer}, dma_buffers, dma_descriptors_chunk_size, gpio::interconnect::InputSignal, @@ -86,13 +86,20 @@ mod tests { #[timeout(3)] fn test_spi_writes_are_correctly_by_pcnt(ctx: Context) { const DMA_BUFFER_SIZE: usize = 4; - const DMA_ALIGNMENT: DmaBufBlkSize = DmaBufBlkSize::Size32; + const DMA_ALIGNMENT: ExternalBurstSize = ExternalBurstSize::Size32; const DMA_CHUNK_SIZE: usize = 4096 - DMA_ALIGNMENT as usize; let (_, descriptors) = dma_descriptors_chunk_size!(0, DMA_BUFFER_SIZE, DMA_CHUNK_SIZE); let buffer = dma_alloc_buffer!(DMA_BUFFER_SIZE, DMA_ALIGNMENT as usize); - let mut dma_tx_buf = - DmaTxBuf::new_with_block_size(descriptors, buffer, Some(DMA_ALIGNMENT)).unwrap(); + let mut dma_tx_buf = DmaTxBuf::new_with_config( + descriptors, + buffer, + BurstTransfer { + internal: InternalBurstTransfer::default(), + external: ExternalBurstSize::Size32, + }, + ) + .unwrap(); let unit = ctx.pcnt_unit; let mut spi = ctx.spi; @@ -136,13 +143,20 @@ mod tests { #[timeout(3)] fn test_spidmabus_writes_are_correctly_by_pcnt(ctx: Context) { const DMA_BUFFER_SIZE: usize = 4; - const DMA_ALIGNMENT: DmaBufBlkSize = DmaBufBlkSize::Size32; // matches dcache line size + const DMA_ALIGNMENT: ExternalBurstSize = ExternalBurstSize::Size32; // matches dcache line size const DMA_CHUNK_SIZE: usize = 4096 - DMA_ALIGNMENT as usize; // 64 byte aligned let (_, descriptors) = dma_descriptors_chunk_size!(0, DMA_BUFFER_SIZE, DMA_CHUNK_SIZE); let buffer = dma_alloc_buffer!(DMA_BUFFER_SIZE, DMA_ALIGNMENT as usize); - let dma_tx_buf = - DmaTxBuf::new_with_block_size(descriptors, buffer, Some(DMA_ALIGNMENT)).unwrap(); + let dma_tx_buf = DmaTxBuf::new_with_config( + descriptors, + buffer, + BurstTransfer { + internal: InternalBurstTransfer::default(), + external: ExternalBurstSize::Size32, + }, + ) + .unwrap(); let (rx, rxd, _, _) = dma_buffers!(1, 0); let dma_rx_buf = DmaRxBuf::new(rxd, rx).unwrap();