diff --git a/Cargo.toml b/Cargo.toml index da813f8..5e8726e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -66,6 +66,7 @@ defmt = [ cortex-m = { version = "^0.7.7", features = ["critical-section-single-core"] } stm32h5 = { package = "stm32h5", version = "0.16.0" } fugit = "0.3.7" +embedded-dma = "0.2" embedded-hal = "1.0.0" defmt = { version = "1.0.0", optional = true } paste = "1.0.15" diff --git a/examples/dma.rs b/examples/dma.rs new file mode 100644 index 0000000..5562e52 --- /dev/null +++ b/examples/dma.rs @@ -0,0 +1,124 @@ +// #![deny(warnings)] +#![no_main] +#![no_std] + +mod utilities; + +use cortex_m::singleton; +use cortex_m_rt::entry; +use cortex_m_semihosting::debug; +use stm32h5xx_hal::{ + gpdma::{config::transform::*, DmaConfig, DmaTransfer}, + pac, + prelude::*, +}; + +#[entry] +fn main() -> ! { + utilities::logger::init(); + + let dp = pac::Peripherals::take().unwrap(); + + let pwr = dp.PWR.constrain(); + let pwrcfg = pwr.vos0().freeze(); + + // Constrain and Freeze clock + let rcc = dp.RCC.constrain(); + let ccdr = rcc.sys_ck(250.MHz()).freeze(pwrcfg, &dp.SBS); + + let channels = dp.GPDMA1.channels(ccdr.peripheral.GPDMA1); + + log::info!("u8 to u8"); + let src = + singleton!(: [u8; 40] = core::array::from_fn(|i| i as u8)).unwrap(); + + let dest = singleton!(: [u8; 40] = [0u8; 40]).unwrap(); + + let mut channel = channels.0; + let config = DmaConfig::new(); + let mut transfer = + DmaTransfer::memory_to_memory(config, &mut channel, src, dest); + transfer.start().unwrap(); + transfer.wait_for_transfer_complete().unwrap(); + let (src, dest) = transfer.free(); + assert_eq!(src, dest); + + log::info!("u32 to u32 with data transform"); + let src = singleton!(: [u32; 10] = [0x12345678u32; 10]).unwrap(); + let dest = singleton!(: [u32; 10] = [0u32; 10]).unwrap(); + + let config = DmaConfig::new().with_data_transform( + DataTransform::builder() + .swap_destination_half_words() + .swap_destination_half_word_byte_order(), + ); + + let mut transfer = + DmaTransfer::memory_to_memory(config, &mut channel, src, dest); + + transfer.start().unwrap(); + transfer.wait_for_transfer_complete().unwrap(); + let (_, dest) = transfer.free(); + + let expected = [0x78563412; 10]; + assert_eq!(expected, *dest); + + log::info!("u32 to u16 with truncate"); + let src = singleton!(: [u32; 10] = [0x12345678u32; 10]).unwrap(); + let dest = singleton!(: [u16; 20] = [0u16; 20]).unwrap(); + + let config = DmaConfig::new().with_data_transform( + DataTransform::builder().left_align_right_truncate(), + ); + let mut transfer = + DmaTransfer::memory_to_memory(config, &mut channel, src, dest); + + transfer.start().unwrap(); + transfer.wait_for_transfer_complete().unwrap(); + let (_, dest) = transfer.free(); + + let expected = [0x1234; 10]; + assert_eq!(expected, (*dest)[0..10]); + + log::info!("u32 to u8 with unpack"); + let src = singleton!(: [u32; 10] = [0x12345678u32; 10]).unwrap(); + let dest = singleton!(: [u8; 40] = [0u8; 40]).unwrap(); + + let config = + DmaConfig::new().with_data_transform(DataTransform::builder().unpack()); + let mut transfer = + DmaTransfer::memory_to_memory(config, &mut channel, src, dest); + + transfer.start().unwrap(); + transfer.wait_for_transfer_complete().unwrap(); + let (_, dest) = transfer.free(); + let expected = [0x78, 0x56, 0x34, 0x12]; + assert_eq!(expected, (*dest)[0..4]); + assert_eq!(expected, (*dest)[36..40]); + + log::info!("u8 to u32 with pack"); + let src = singleton!(: [u8; 40] = [0u8; 40]).unwrap(); + let dest = singleton!(: [u32; 10] = [0u32; 10]).unwrap(); + + for chunk in src.chunks_mut(4) { + chunk.copy_from_slice(&[0x78, 0x56, 0x34, 0x12]); + } + + let config = + DmaConfig::new().with_data_transform(DataTransform::builder().pack()); + let mut transfer = + DmaTransfer::memory_to_memory(config, &mut channel, src, dest); + + transfer.start().unwrap(); + transfer.wait_for_transfer_complete().unwrap(); + let (_, dest) = transfer.free(); + + let expected = [0x12345678; 10]; + assert_eq!(expected, *dest); + assert_eq!(expected, *dest); + + log::info!("All tests passed!"); + loop { + debug::exit(debug::EXIT_SUCCESS) + } +} diff --git a/src/gpdma.rs b/src/gpdma.rs new file mode 100644 index 0000000..554b069 --- /dev/null +++ b/src/gpdma.rs @@ -0,0 +1,562 @@ +//! The GPDMA is the general purpose DMA engine in use on the STM32H5 family of processors. It is +//! used to perform programmable data transfers that are offloaded from the CPU to the DMA engine. +//! +//! The GPDMA can perform the following transfers from a *source* address to a *destination* +//! address: +//! - Memory to memory +//! - Memory to peripheral +//! - Peripheral to memory +//! - Peripheral to peripheral +//! +//! Each GPDMA has 8 channels. Each channel can service any hardware request (or memory to memory +//! transfer) that is supported by the processor (ie. they're not tied to specific channels). All +//! channels support direct and linked-buffer transfers. However, the channels do have different +//! capabilities (see RM0492 Rev 3 section 15.3.2 for full details), notably that channels 0-5 can +//! only service transfers in a linear address space, while channels 6 & 7 can also service transfers +//! using a 2D addressing scheme. Both GPDMA peripherals support the same requests/channel +//! capabilities. +//! +//! # Usage +//! At the most basic level transfers take a *source* address and a *destination* address and +//! transfers the data from the source to the destination. The [embedded-dma] traits `ReadBuffer` +//! and `WriteBuffer` represent a source and destination, respectively. +//! +//! ## Memory to memory transfers +//! As long as the buffers satisfy the constraints of embedded-dma's `ReadBuffer` and `WriteBuffer` +//! traits, they can be used directly with the Transfer API: +//! ``` +//! use stm32h5xx_hal::{pac, gpdma::{DmaConfig, DmaTransfer}; +//! +//! let source_buf = ... // source buffer +//! let dest_buf = ... // destination buffer +//! +//! let dp = pac::Peripherals::take().unwrap(); +//! let channels = dp.GPDMA1.channels(ccdr.peripheral.GPDMA1); +//! let channel = channels.0 +//! let config = DmaConfig::default(); +//! let mut transfer = DmaTransfer::memory_to_memory(config, channel, source_buf, dest_buf); +//! transfer.start().unwrap(); +//! transfer.wait_for_transfer_complete().unwrap(); +//! ``` +//! +//! ## Memory to peripheral transfers +//! +//! The peripheral must provide a `WriteBuffer` implementation for its data register to which the +//! DMA will write. Then it can be used similarly to the memory to memory transfer. The `Transfer` +//! API does provide for performing an operation immediately after enabling the DMA channel, via the +//! Transfer::start_with method, which allows for a closure to be provided. Additionally, a +//! hardware request line must be specified to the Config in order to connect the peripheral to the +//! DMA channel. Another additional option for these transfers is to perform block requests or burst +//! requests. +//! +//! ## Peripheral to memory transfers +//! +//! The peripheral must provide a `ReadBuffer` implementation for its data register from which the +//! DMA will read. Otherwise it is used similarly to Peripheral to memory transfers, including the +//! additional configuration requirements/options specified above. In addition, peripheral flow +//! control mode can be used to enable the peripheral to early terminate a transaction. Per RM0492 +//! Rev 3 section 15.3.6, this is only used by the I3C peripheral, and only on channels 0 and 7. +//! +//! ## Peripheral to peripheral transfers +//! +//! These work similarly to the peripheral to memory transfers, but the peripheral driving the +//! request must be identified via the typing of the TransferType implementation. +//! +//! ## Data transforms +//! +//! The GPDMA provides a data transformation pipeline which facilitates transforms for transfers +//! between peripherals or memory that have different source and destination data widths or byte +//! representations (e.g. little endian vs big endian) with zero CPU overhead. See +//! `config::DataTransformBuilder` for more information on it. +//! +//! # Channel/transfer arbitration +//! +//! Every transfer is assigned a priority and a AHB port assignments for each of it its source and +//! destination. The transfer priority is used by the GPDMA controller to arbitrate between requests +//! that are both ready to transfer data via one of the AHB ports. + +use crate::{ + pac::{gpdma1, GPDMA1, GPDMA2}, + rcc::{rec, ResetEnable}, + Sealed, +}; +use core::{ + marker::PhantomData, + mem, + ops::Deref, + sync::atomic::{fence, Ordering}, +}; +use embedded_dma::{ReadBuffer, Word as DmaWord, WriteBuffer}; + +mod ch; +pub mod config; +pub mod periph; + +pub use ch::{ + DmaChannel, DmaChannel0, DmaChannel1, DmaChannel2, DmaChannel3, + DmaChannel4, DmaChannel5, DmaChannel6, DmaChannel7, +}; +pub use config::DmaConfig; +use config::{ + HardwareRequest, MemoryToMemory, MemoryToPeripheral, PeripheralRequest, + PeripheralSource, PeripheralToMemory, PeripheralToPeripheral, + PeripheralToPeripheralDirection, TransferDirection, TransferType, +}; + +/// Supported word types for the STM32H5 GPDMA implementation. +/// +/// Currently only u8, u16, and u32 word types are supported. Signed types are currently not +/// supported because they would add a fair bit of complexity/redundancy to the DataTransform +/// implementation. This is easy to work around by having buffers of signed types implement Deref +/// to an unsigned type of the same width. +pub trait Word: DmaWord + Default + Copy {} + +impl Word for u32 {} +impl Word for u16 {} +impl Word for u8 {} + +/// Errors that can occur during operation +#[derive(Debug, Copy, Clone, Eq, PartialEq)] +#[cfg_attr(feature = "defmt", derive(defmt::Format))] +pub enum Error { + /// The DMA determined that a user setting was invalid while starting a transfer. + /// + /// See RM0492 Rev 3 Section 15.4.16 for details on how to debug + UserSettingError, + /// An error occurred transferring data during a transfer + /// + /// See RM0492 Rev 3 Section 15.4.16 for details on how to debug + DataTransferError, + /// An error occurred loading a linked transfer configuration + /// + /// See RM0492 Rev 3 Section 15.4.16 for details on how to debug + LinkTransferError, + + /// Resume was called on a channel that was not previously suspended + NotSuspendedError, +} + +pub trait GpdmaExt { + fn channels(self, rec: DMA::Rec) -> DmaChannels; +} + +impl GpdmaExt for DMA { + fn channels(self, rec: DMA::Rec) -> DmaChannels { + DmaChannels::new(self, rec) + } +} + +pub trait Instance: Sealed + Deref { + type Rec: ResetEnable; + + fn ptr() -> *const gpdma1::RegisterBlock; + + /// Access channel registers. Valid for channels 0-5 only. + /// # Safety + /// This function is unsafe because it allows access to the DMA channel registers + /// without enforcing exclusive access or checking that the channel index is valid. + /// The caller must ensure that the channel index is within bounds and that no data races occur. + unsafe fn ch(channel: usize) -> &'static gpdma1::CH { + (*Self::ptr()).ch(channel) + } + + /// Access 2D channel registers. Valid for channels 6 and 7 only. + /// # Safety + /// This function is unsafe because it allows access to the DMA channel registers + /// without enforcing exclusive access or checking that the channel index is valid. + /// The caller must ensure that the channel index is within bounds and that no data races occur. + unsafe fn ch2d(channel: usize) -> &'static gpdma1::CH2D { + // Note (unsafe): only accessing registers belonging to specific channel + (*Self::ptr()).ch2d(channel - 6) + } + + fn rec() -> Self::Rec; +} + +impl Sealed for GPDMA1 {} +impl Sealed for GPDMA2 {} + +impl Instance for GPDMA1 { + type Rec = rec::Gpdma1; + + fn ptr() -> *const gpdma1::RegisterBlock { + GPDMA1::ptr() + } + + fn rec() -> Self::Rec { + Self::Rec { + _marker: PhantomData, + } + } +} + +impl Instance for GPDMA2 { + type Rec = rec::Gpdma2; + + fn ptr() -> *const gpdma1::RegisterBlock { + GPDMA2::ptr() + } + + fn rec() -> Self::Rec { + Self::Rec { + _marker: PhantomData, + } + } +} + +/// DmaChannels represents the set of channels on each GPDMA peripheral. To use, simply move the +/// desired channel out of the tuple: +/// +/// ``` +/// let dp = pac::Peripherals::take().unwrap(); +/// let channels = dp.GPDMA1.channels(ccdr.peripheral.GPDMA1); +/// let channel = channels.0; +/// ``` +#[allow(private_interfaces)] +pub struct DmaChannels( + pub DmaChannel0, + pub DmaChannel1, + pub DmaChannel2, + pub DmaChannel3, + pub DmaChannel4, + pub DmaChannel5, + pub DmaChannel6, + pub DmaChannel7, +); + +impl DmaChannels { + /// Splits the DMA peripheral into channels. + pub(super) fn new(_regs: DMA, rec: DMA::Rec) -> Self { + let _ = rec.reset().enable(); + Self( + DmaChannel0::new(), + DmaChannel1::new(), + DmaChannel2::new(), + DmaChannel3::new(), + DmaChannel4::new(), + DmaChannel5::new(), + DmaChannel6::new(), + DmaChannel7::new(), + ) + } +} + +/// DmaTransfer represents a single transfer operation on a GPDMA channel. It is created using the +/// [`DmaTransfer::memory_to_memory`], [`DmaTransfer::memory_to_peripheral`], +/// [`DmaTransfer::peripheral_to_memory`], or [`DmaTransfer::peripheral_to_peripheral`] +/// methods, which take a channel and the source and destination buffers. The transfer can then be +/// started using the [`DmaTransfer::start`] or [`DmaTransfer::start_nonblocking`] methods. +pub struct DmaTransfer<'a, CH, S, D> +where + CH: DmaChannel, + S: ReadBuffer, + D: WriteBuffer, +{ + channel: &'a mut CH, + source: S, + destination: D, +} + +impl<'a, CH, S, D> DmaTransfer<'a, CH, S, D> +where + CH: DmaChannel, + S: ReadBuffer, + D: WriteBuffer, +{ + fn new( + channel: &'a mut CH, + config: DmaConfig, + source: S, + mut destination: D, + size: usize, + ) -> Self + where + T: TransferType, + { + assert!(size <= u16::MAX as usize, "Max block size is {}", u16::MAX); + + let (src_ptr, _) = unsafe { source.read_buffer() }; + let (dest_ptr, _) = unsafe { destination.write_buffer() }; + + channel.reset_channel(); + channel.set_source(src_ptr); + channel.set_destination(dest_ptr); + channel.set_transfer_size_bytes(size); + channel.apply_config(config); + + Self { + channel, + source, + destination, + } + } + + /// Create a new memory-to-memory transfer with the channel, source and destination buffers + /// provided. + pub fn memory_to_memory( + config: DmaConfig, + channel: &'a mut CH, + source: S, + mut destination: D, + ) -> Self { + let src_width = core::mem::size_of::(); + let dest_width = core::mem::size_of::(); + + let (_, src_words) = unsafe { source.read_buffer() }; + let src_size = src_width * src_words; + let (_, dest_words) = unsafe { destination.write_buffer() }; + let dest_size = dest_width * dest_words; + + // Size must be aligned with destination width if source width is greater than destination + // width and packing mode is used, therefore the maximum size must be dictated by + // destination size (width * count). When not in packing mode, this still holds true as + // the destination size must not be exceeded (so only read the same number of words from + // the source as there is room in the destination) + let size = if src_width > dest_width { + dest_size + } else { + // When the source data width is less than or equal to the destination data width, we + // just need to ensure that the destination buffer is large enough to hold all of the + // source data. + assert!(src_size <= dest_size, "Transfer size ({src_size} bytes) will overflow the destination buffer ({dest_size} bytes)!"); + src_size + }; + + Self::new::(channel, config, source, destination, size) + } + + /// Create a new memory-to-peripheral transfer with the channel, source buffer and destination + /// peripheral provided. + pub fn memory_to_peripheral( + config: DmaConfig, + channel: &'a mut CH, + source: S, + destination: D, + ) -> Self { + let (_, src_words) = unsafe { source.read_buffer() }; + let src_size = core::mem::size_of::() * src_words; + + Self::new::( + channel, + config, + source, + destination, + src_size, + ) + .apply_hardware_request_config(config) + } + + /// Create a new peripheral-to-memory transfer with the channel, source peripheral and + /// destination buffer provided. + pub fn peripheral_to_memory( + config: DmaConfig, + channel: &'a mut CH, + source: S, + mut destination: D, + ) -> Self { + let (_, dest_words) = unsafe { destination.write_buffer() }; + let dest_size = core::mem::size_of::() * dest_words; + + Self::new::( + channel, + config, + source, + destination, + dest_size, + ) + .apply_hardware_request_config(config) + .apply_peripheral_source_config(config) + } + + /// Create a new peripheral-to-peripheral transfer with source and destination peripherals + /// provided. + pub fn peripheral_to_peripheral( + config: DmaConfig, S::Word, D::Word>, + channel: &'a mut CH, + source: S, + mut destination: D, + ) -> Self + where + T: PeripheralToPeripheralDirection, + { + let (_, src_words) = unsafe { source.read_buffer() }; + let (_, dest_words) = unsafe { destination.write_buffer() }; + + let size = match T::DIRECTION { + TransferDirection::PeripheralToPeripheral( + PeripheralRequest::SourceRequest, + ) => src_words * core::mem::size_of::(), + TransferDirection::PeripheralToPeripheral( + PeripheralRequest::DestinationRequest, + ) => dest_words * core::mem::size_of::(), + _ => unreachable!(), + }; + + Self::new::>( + channel, + config, + source, + destination, + size, + ) + .apply_hardware_request_config(config) + .apply_peripheral_source_config(config) + } + + fn apply_hardware_request_config( + self, + config: DmaConfig, + ) -> Self { + self.channel.configure_hardware_request(config); + self + } + + fn apply_peripheral_source_config( + self, + config: DmaConfig, + ) -> Self { + self.channel.configure_peripheral_flow_control(config); + self + } + + fn start_transfer_internal(&mut self) { + // Preserve the instruction and bus ordering of preceding buffer access + // to the subsequent access by the DMA peripheral due to enabling it. + fence(Ordering::SeqCst); + + self.channel.enable(); + } + + /// Start a transfer. Does not block waiting for the transfer to start and does not check for + /// errors starting the transfer + pub fn start_nonblocking(&mut self) { + self.start_transfer_internal(); + } + + /// Start a transfer and block waiting for it to start. Returns an error if one occurred + /// starting the transfer. + pub fn start(&mut self) -> Result<(), Error> { + self.start_nonblocking(); + self.channel.wait_for_transfer_started() + } + + /// Suspend a transfer. Does not wait for channel transfer to be suspended and does not report + /// any errors that occur doing so. + pub fn suspend_nonblocking(&mut self) { + if self.channel.is_suspended() { + return; + } + self.channel.initiate_suspend(); + + // Preserve the instruction and bus sequence of the preceding disable and + // the subsequent buffer access. + fence(Ordering::SeqCst); + } + + /// Suspend a transfer and block waiting for it to be suspended. Returns an error if one + /// occurred during the transfer or while suspending the transfer. + pub fn suspend(&mut self) { + if !self.channel.is_suspended() { + self.channel.suspend_transfer(); + } + + // Preserve the instruction and bus sequence of the preceding disable and + // the subsequent buffer access. + fence(Ordering::SeqCst); + } + + /// Resume a transfer. Does not wait for channel transfer to be suspended and does not report + /// any errors that occur doing so. + pub fn resume_nonblocking(&mut self) -> Result<(), Error> { + if !self.channel.is_suspended() { + return Err(Error::NotSuspendedError); + } + // Preserve the instruction and bus ordering of preceding buffer access + // to the subsequent access by the DMA peripheral due to enabling it. + fence(Ordering::SeqCst); + + self.channel.initiate_resume(); + Ok(()) + } + + /// Resume a transfer and block waiting for it to be resumed. Returns an error if one occurred + /// resuming the transfer. + pub fn resume(&mut self) -> Result<(), Error> { + if !self.channel.is_suspended() { + return Err(Error::NotSuspendedError); + } + // Preserve the instruction and bus ordering of preceding buffer access + // to the subsequent access by the DMA peripheral due to enabling it. + fence(Ordering::SeqCst); + + self.channel.resume_transfer() + } + + pub fn is_running(&self) -> bool { + self.channel.is_running() + } + + /// Blocks waiting for a transfer to complete. Returns an error if one occurred during the + /// transfer. + pub fn wait_for_transfer_complete(&mut self) -> Result<(), Error> { + let result = self.channel.wait_for_transfer_complete(); + // Preserve the instruction and bus sequence of the preceding operation and + // the subsequent buffer access. + fence(Ordering::SeqCst); + + result + } + + /// Blocks waiting for the half transfer complete event. Returns an error if one occurred during + /// the transfer. + pub fn wait_for_half_transfer_complete(&mut self) -> Result<(), Error> { + self.channel.wait_for_half_transfer_complete() + } + + /// Enable interrupts for this transfer. This will enable the transfer complete and half + /// transfer complete interrupts, as well as error interrupts. + pub fn enable_interrupts(&mut self) { + self.channel.enable_transfer_interrupts(); + } + + /// Disable interrupts for this transfer. + pub fn disable_interrupts(&mut self) { + self.channel.disable_transfer_interrupts(); + } + + /// Abort a transaction and wait for it to suspend the transfer before resetting the channel + pub fn abort(&mut self) { + if self.is_running() { + self.channel.abort(); + } + + self.disable_interrupts(); + + // Preserve the instruction and bus sequence of the preceding operation and + // the subsequent buffer access. + fence(Ordering::SeqCst); + } + + pub fn free(mut self) -> (S, D) { + self.abort(); + let (src, dest) = unsafe { + ( + core::ptr::read(&self.source), + core::ptr::read(&self.destination), + ) + }; + mem::forget(self); + (src, dest) + } +} + +impl<'a, CH, S, D> Drop for DmaTransfer<'a, CH, S, D> +where + CH: DmaChannel, + S: ReadBuffer, + D: WriteBuffer, +{ + fn drop(&mut self) { + self.abort(); + } +} diff --git a/src/gpdma/ch.rs b/src/gpdma/ch.rs new file mode 100644 index 0000000..a7c67b4 --- /dev/null +++ b/src/gpdma/ch.rs @@ -0,0 +1,772 @@ +use core::{marker::PhantomData, ops::Deref}; + +use crate::stm32::gpdma1::{ + self, + ch::{CR, DAR, FCR, LBAR, SAR, SR, TR1, TR2}, +}; +use crate::Sealed; + +use super::{ + config::{ + transform::{DataTransform, PaddingAlignmentMode}, + AddressingMode, AhbPort, HardwareRequest, PeripheralRequest, + PeripheralSource, Priority, TransferDirection, TransferType, + }, + DmaConfig, Error, Instance, Word, +}; + +trait ChannelRegs: Sealed { + #[allow(unused)] // TODO: this will be used for linked-list transfers + fn lbar(&self) -> &LBAR; + fn fcr(&self) -> &FCR; + fn sr(&self) -> &SR; + fn cr(&self) -> &CR; + fn tr1(&self) -> &TR1; + fn tr2(&self) -> &TR2; + fn sar(&self) -> &SAR; + fn dar(&self) -> &DAR; + fn set_block_size(&self, size: u16); +} + +impl Sealed for gpdma1::CH {} +impl Sealed for gpdma1::CH2D {} + +impl ChannelRegs for gpdma1::CH { + fn lbar(&self) -> &LBAR { + self.lbar() + } + fn fcr(&self) -> &FCR { + self.fcr() + } + fn sr(&self) -> &SR { + self.sr() + } + fn cr(&self) -> &CR { + self.cr() + } + fn tr1(&self) -> &TR1 { + self.tr1() + } + fn tr2(&self) -> &TR2 { + self.tr2() + } + fn sar(&self) -> &SAR { + self.sar() + } + fn dar(&self) -> &DAR { + self.dar() + } + fn set_block_size(&self, size: u16) { + self.br1().modify(|_, w| w.bndt().set(size)); + } +} + +impl ChannelRegs for gpdma1::CH2D { + fn lbar(&self) -> &LBAR { + self.lbar() + } + fn fcr(&self) -> &FCR { + self.fcr() + } + fn sr(&self) -> &SR { + self.sr() + } + fn cr(&self) -> &CR { + self.cr() + } + fn tr1(&self) -> &TR1 { + self.tr1() + } + fn tr2(&self) -> &TR2 { + self.tr2() + } + fn sar(&self) -> &SAR { + self.sar() + } + fn dar(&self) -> &DAR { + self.dar() + } + fn set_block_size(&self, size: u16) { + self.br1().modify(|_, w| w.bndt().set(size)); + } +} + +/// DmaChannelRef provides access to individual channels of the GPDMA instance via Deref. +/// It implements the Channel and DmaChannel traits, and is exposed to user code via the DmaChannels +/// struct. It does not expose a public API to allow user code to use it directly, but should rather +/// be assigned to a DmaTransfer that manages a single transfer on a channel. +#[doc(hidden)] +pub struct DmaChannelRef { + _dma: PhantomData, + _ch: PhantomData, +} + +impl Deref + for DmaChannelRef +{ + type Target = gpdma1::CH; + + #[inline(always)] + fn deref(&self) -> &Self::Target { + // Note (unsafe): only accessing registers belonging to Channel N + unsafe { DMA::ch(N) } + } +} + +impl Deref + for DmaChannelRef +{ + type Target = gpdma1::CH2D; + + #[inline(always)] + fn deref(&self) -> &Self::Target { + // Note (unsafe): only accessing registers belonging to Channel N + unsafe { DMA::ch2d(N) } + } +} + +#[allow(private_bounds)] +impl DmaChannelRef +where + DMA: Instance, + CH: ChannelRegs, +{ + pub(super) fn new() -> Self { + DmaChannelRef { + _dma: PhantomData, + _ch: PhantomData, + } + } +} + +impl Sealed for DmaChannelRef {} + +/// Non-error transfer event, including transfer complete and half-transfer events. Half-transfer +/// events can be used for double-buffering/linked buffer transfers. +pub enum TransferEvent { + /// Transfer complete event has occurred + TransferComplete, + /// Half transfer event has occurred + HalfTransferComplete, +} + +// Checks for errors in the captured status register provided, and returns a Result<(), Error> +macro_rules! check_error { + ($sr:expr) => { + if $sr.usef().is_trigger() { + Err(Error::UserSettingError) + } else if $sr.dtef().is_trigger() { + Err(Error::DataTransferError) + } else if $sr.ulef().is_trigger() { + Err(Error::LinkTransferError) + } else { + Ok(()) + } + }; +} + +#[allow(private_bounds)] +impl DmaChannelRef +where + DMA: Instance, + CH: ChannelRegs, + Self: Deref, +{ + #[inline(always)] + fn reset(&self) { + self.cr().modify(|_, w| w.reset().reset()); + } + + // TODO: remove clippy allow when used. This will likely be useful in the future + #[allow(unused)] + #[inline(always)] + pub(super) fn is_enabled(&self) -> bool { + self.cr().read().en().is_enabled() + } + + /// Initiates the suspension of a transfer + #[inline(always)] + pub(super) fn suspend(&self) { + self.cr().modify(|_, w| w.susp().suspended()); + } + + /// Resume transfer + #[inline(always)] + fn resume(&self) { + self.cr().modify(|_, w| w.susp().not_suspended()); + } + + /// Clear all event flags in the FCR register. + fn clear_all_event_flags(&self) { + self.fcr().write(|w| { + w.tcf() + .clear() + .htf() + .clear() + .dtef() + .clear() + .usef() + .clear() + .ulef() + .clear() + .suspf() + .clear() + .tof() + .clear() + }); + } + + #[inline(always)] + /// Checks if the specified transfer event has triggered or if an error has occurred. If an + /// error has occurred, it is returned. If the event has triggered, `Ok(true)` is returned. + /// Otherwise, if the event has not triggered, `Ok(false)` is returned. + fn check_transfer_event( + &self, + event: TransferEvent, + ) -> Result { + let sr = self.sr().read(); + check_error!(sr)?; + let triggered = match event { + TransferEvent::TransferComplete => sr.tcf().is_trigger(), + TransferEvent::HalfTransferComplete => sr.htf().is_trigger(), + }; + + Ok(triggered) + } + + fn clear_transfer_event_flag(&self, event: TransferEvent) { + self.fcr().write(|w| match event { + TransferEvent::TransferComplete => w.tcf().clear(), + TransferEvent::HalfTransferComplete => w.htf().clear(), + }); + } + + // TODO: Remove clippy allow when FIFO use is implemented + #[allow(unused)] + #[inline(always)] + fn fifo_level(&self) -> u8 { + self.sr().read().fifol().bits() + } + + /// Checks if the channel is idle. Ignores error conditions. + #[inline(always)] + fn is_idle(&self) -> bool { + self.sr().read().idlef().is_trigger() + } + + #[inline(always)] + fn check_idle(&self) -> Result { + let sr = self.sr().read(); + check_error!(sr)?; + Ok(sr.idlef().is_trigger()) + } + + #[inline(always)] + fn set_source_address(&self, addr: u32) { + self.sar().write(|w| w.sa().set(addr)); + } + + #[inline(always)] + fn set_destination_address(&self, addr: u32) { + self.dar().write(|w| w.da().set(addr)); + } + + #[inline(always)] + fn set_source_addressing_mode(&self, mode: AddressingMode) { + self.tr1().modify(|_, w| match mode { + AddressingMode::ContiguouslyIncremented => w.sinc().contiguous(), + AddressingMode::Fixed => w.sinc().fixed_burst(), + }); + } + + #[inline(always)] + fn set_destination_addressing_mode(&self, mode: AddressingMode) { + self.tr1().modify(|_, w| match mode { + AddressingMode::ContiguouslyIncremented => w.dinc().contiguous(), + AddressingMode::Fixed => w.dinc().fixed_burst(), + }); + } + + #[inline(always)] + fn set_source_burst_length(&self, burst_length: u8) { + self.tr1().modify(|_, w| w.dbl_1().set(burst_length)); + } + + #[inline(always)] + fn set_destination_burst_length(&self, burst_length: u8) { + self.tr1().modify(|_, w| w.sbl_1().set(burst_length)); + } + + #[inline(always)] + fn set_source_ahb_port(&self, port: AhbPort) { + self.tr1().modify(|_, w| match port { + AhbPort::Port0 => w.sap().port0(), + AhbPort::Port1 => w.sap().port1(), + }); + } + + #[inline(always)] + fn set_destination_ahb_port(&self, port: AhbPort) { + self.tr1().modify(|_, w| match port { + AhbPort::Port0 => w.dap().port0(), + AhbPort::Port1 => w.dap().port1(), + }); + } + + #[inline(always)] + fn set_source_data_width(&self, width: usize) { + self.tr1().modify(|_, w| match width { + 1 => w.sdw_log2().byte(), + 2 => w.sdw_log2().half_word(), + 4 => w.sdw_log2().word(), + _ => unreachable!(), + }); + } + + #[inline(always)] + fn set_destination_data_width(&self, width: usize) { + self.tr1().modify(|_, w| match width { + 1 => w.ddw_log2().byte(), + 2 => w.ddw_log2().half_word(), + 4 => w.ddw_log2().word(), + _ => unreachable!(), + }); + } + + #[inline(always)] + fn set_source_byte_exchange(&self, source_byte_exchange: bool) { + self.tr1().modify(|_, w| { + if source_byte_exchange { + w.sbx().exchanged() + } else { + w.sbx().not_exchanged() + } + }); + } + + #[inline(always)] + fn set_padding_alignment_mode(&self, pam: PaddingAlignmentMode) { + self.tr1().modify(|_, w| match pam { + PaddingAlignmentMode::None => w, + _ => w.pam().set(pam.bits()), + }); + } + + #[inline(always)] + fn set_destination_half_word_exchange(&self, half_word_exchange: bool) { + self.tr1().modify(|_, w| { + if half_word_exchange { + w.dhx().exchanged() + } else { + w.dhx().not_exchanged() + } + }); + } + + #[inline(always)] + fn set_destination_byte_exchange(&self, destination_byte_exchange: bool) { + self.tr1().modify(|_, w| { + if destination_byte_exchange { + w.dbx().exchanged() + } else { + w.dbx().not_exchanged() + } + }); + } + + #[inline(always)] + fn set_priority(&self, priority: Priority) { + self.cr().modify(|_, w| match priority { + Priority::LowPriorityLowWeight => w.prio().low_prio_low_weight(), + Priority::LowPriorityMedWeight => w.prio().low_prio_mid_weight(), + Priority::LowPriorityHighWeight => w.prio().low_prio_high_weight(), + Priority::HighPriority => w.prio().high_prio(), + }); + } + + #[inline(always)] + fn set_transfer_type(&self, transfer_dir: TransferDirection) { + self.tr2().modify(|_, w| match transfer_dir { + TransferDirection::MemoryToMemory => w.swreq().software(), + TransferDirection::MemoryToPeripheral => { + w.swreq().hardware().dreq().destination() + } + TransferDirection::PeripheralToMemory => { + w.swreq().hardware().dreq().source() + } + TransferDirection::PeripheralToPeripheral( + PeripheralRequest::SourceRequest, + ) => w.swreq().hardware().dreq().source(), + TransferDirection::PeripheralToPeripheral( + PeripheralRequest::DestinationRequest, + ) => w.swreq().hardware().dreq().destination(), + }); + } + + // TODO: Use enum? + #[inline(always)] + fn set_request_line(&self, request: u8) { + self.tr2() + .modify(|_, w| unsafe { w.reqsel().bits(request) }); + } + + #[inline(always)] + fn set_block_request_mode(&self, block_requests_enabled: bool) { + self.tr2().modify(|_, w| { + if block_requests_enabled { + w.breq().block() + } else { + w.breq().burst() + } + }); + } + + #[inline(always)] + fn set_peripheral_flow_control_mode( + &self, + peripheral_control_enabled: bool, + ) { + self.tr2().modify(|_, w| { + if peripheral_control_enabled { + w.pfreq().peripheral_control_mode() + } else { + w.pfreq().gpdma_control_mode() + } + }); + } +} + +/// The Channel trait is a private trait that abstracts over control of the linear and 2D channels. +/// It exposes to the DmaTransfer struct all the methods needed to control transfers on a particular +/// channel. It is private in order to not expose the low level functionality beyond the gpdma +/// module. +#[doc(hidden)] +pub(super) trait Channel { + fn enable(&mut self); + + fn is_suspended(&self) -> bool; + + /// Initiates the suspension of a transfer + fn initiate_suspend(&mut self); + + /// Resume transfer + fn initiate_resume(&self); + + /// Checks whether the channel transfer is complete. If the channel indicates an error occurred, + /// during the transaction an `Error`` is returned. + fn check_transfer_complete(&self) -> Result; + + /// Checks whether the channel half transfer complete event has triggered. If the channel + /// indicates an error occurred, during the transaction an `Error`` is returned. + fn check_half_transfer_complete(&self) -> Result; + + /// Checks whether the channel transfer has started (has transitioned out of the idle state, or + /// the transfer complete event has already triggered if it is idle) + fn check_transfer_started(&self) -> Result; + + fn is_running(&self) -> bool; + + /// Reset the channel registers so it can be reused. + fn reset_channel(&mut self); + + /// Suspend the transfer and blocks until it has been suspended. Reports any that occur while + /// waiting for the transfer to suspend. + fn suspend_transfer(&mut self); + + /// Resumes a suspended transfer and blocks until the channel transitions out of the idle state + /// Reports any errors that occur resuming the transfer. + fn resume_transfer(&mut self) -> Result<(), Error>; + + /// Aborts an operation by suspending the transfer and resetting the channel. + fn abort(&mut self); + + /// Blocks waiting for a transfer to be started (or for it to be idle and complete). Reports any + /// errors that occur while waiting for the transfer to start. + fn wait_for_transfer_started(&mut self) -> Result<(), Error>; + + /// Blocks waiting for a transfer to complete. Reports any errors that occur during a transfer. + fn wait_for_transfer_complete(&mut self) -> Result<(), Error>; + + /// Blocks waiting for a half transfer event to trigger. Reports any errors that occur during a + /// transfer. + fn wait_for_half_transfer_complete(&mut self) -> Result<(), Error>; + + /// Apply a transfer configuration to the channel + fn apply_config( + &mut self, + config: DmaConfig, + ); + + /// Apply hardware request configuration to the channel. Not relevant to memory-to-memory + /// transfers. + fn configure_hardware_request( + &mut self, + config: DmaConfig, + ); + + /// Apply peripheral flow control configuration for transactions where a peripheral is the + /// source + fn configure_peripheral_flow_control< + T: PeripheralSource, + S: Word, + D: Word, + >( + &mut self, + config: DmaConfig, + ); + + /// Apply a data transform to the channel transfer + fn apply_data_transform(&mut self, data_transform: DataTransform); + /// Set the source address. This sets the source address and data width. + fn set_source(&mut self, ptr: *const W); + + /// Set the destination address. This sets the destination address and data width + fn set_destination(&mut self, ptr: *mut W); + + /// Set the transfer size in bytes (not words!). Size must be aligned with destination width if + /// source width is greater than destination width and packing mode is used. Otherwise the size + /// must be aligned with the source data width. + fn set_transfer_size_bytes(&mut self, size: usize); + + /// Enable transfer interrupts for the channel. This enables the transfer complete, + /// half-transfer complete, data transfer error and user setting error interrupts. This is + /// useful for starting a transfer that will be monitored by an interrupt handler. + fn enable_transfer_interrupts(&mut self); + + /// Disable transfer interrupts for the channel. It is expected that this will be called from + /// an interrupt handler after a transfer is completed. + fn disable_transfer_interrupts(&mut self); +} + +impl Channel for DmaChannelRef +where + DMA: Instance, + CH: ChannelRegs, + Self: Deref, +{ + #[inline(always)] + fn enable(&mut self) { + self.cr().modify(|_, w| w.en().enabled()); + } + + #[inline(always)] + fn is_suspended(&self) -> bool { + self.sr().read().suspf().bit_is_set() + } + + fn initiate_suspend(&mut self) { + if self.is_suspended() { + return; + } + self.suspend(); + } + + #[inline(always)] + fn initiate_resume(&self) { + self.resume(); + } + + fn check_transfer_complete(&self) -> Result { + self.check_transfer_event(TransferEvent::TransferComplete) + } + + fn check_half_transfer_complete(&self) -> Result { + self.check_transfer_event(TransferEvent::HalfTransferComplete) + } + + fn check_transfer_started(&self) -> Result { + // TODO: Resolve multiple status register reads + match self.check_idle() { + // If we're idle we might have finished the transaction already, so also check if the + // transfer complete flag is set + Ok(true) => self.check_transfer_complete(), + Ok(false) => Ok(false), + Err(error) => Err(error), + } + } + + #[inline(always)] + fn is_running(&self) -> bool { + !self.is_idle() + } + + fn reset_channel(&mut self) { + self.reset(); + self.clear_all_event_flags(); + } + + fn suspend_transfer(&mut self) { + self.initiate_suspend(); + while !self.is_suspended() {} + } + + fn resume_transfer(&mut self) -> Result<(), Error> { + self.initiate_resume(); + while !self.check_transfer_started()? {} + Ok(()) + } + + fn abort(&mut self) { + if !self.is_idle() { + self.suspend_transfer(); + } + + self.reset_channel(); + } + + fn wait_for_transfer_started(&mut self) -> Result<(), Error> { + while !self.check_transfer_started().inspect_err(|_| { + self.clear_all_event_flags(); + })? {} + Ok(()) + } + + fn wait_for_transfer_complete(&mut self) -> Result<(), Error> { + loop { + match self.check_transfer_complete() { + Ok(true) => { + self.clear_transfer_event_flag( + TransferEvent::TransferComplete, + ); + return Ok(()); + } + Ok(false) => continue, + Err(error) => { + self.clear_all_event_flags(); + return Err(error); + } + } + } + } + + fn wait_for_half_transfer_complete(&mut self) -> Result<(), Error> { + loop { + match self.check_half_transfer_complete() { + Ok(true) => { + self.clear_transfer_event_flag( + TransferEvent::HalfTransferComplete, + ); + return Ok(()); + } + Ok(false) => continue, + Err(error) => { + self.clear_all_event_flags(); + return Err(error); + } + } + } + } + + fn apply_config( + &mut self, + config: DmaConfig, + ) { + self.set_source_addressing_mode( + config.transfer_type.source_addressing_mode(), + ); + self.set_destination_addressing_mode( + config.transfer_type.destination_addressing_mode(), + ); + self.set_source_burst_length(config.source_burst_length); + self.set_destination_burst_length(config.destination_burst_length); + self.set_source_ahb_port(config.source_ahb_port); + self.set_destination_ahb_port(config.destination_ahb_port); + + self.set_transfer_type(T::DIRECTION); + self.set_priority(config.priority); + if config.enable_interrupts { + self.enable_transfer_interrupts(); + } + if let Some(data_transform) = config.data_transform { + self.apply_data_transform(data_transform); + } + } + + fn configure_hardware_request( + &mut self, + config: DmaConfig, + ) { + self.set_block_request_mode(config.transfer_type.block_request()); + self.set_request_line(config.transfer_type.request()); + } + + fn configure_peripheral_flow_control< + T: PeripheralSource, + S: Word, + D: Word, + >( + &mut self, + config: DmaConfig, + ) { + self.set_peripheral_flow_control_mode( + config.transfer_type.peripheral_flow_control(), + ); + } + + fn apply_data_transform(&mut self, data_transform: DataTransform) { + self.set_source_byte_exchange(data_transform.source_byte_exchange); + self.set_padding_alignment_mode(data_transform.padding_alignment); + self.set_destination_half_word_exchange( + data_transform.dest_half_word_exchange, + ); + self.set_destination_byte_exchange(data_transform.dest_byte_exchange); + } + + fn set_source(&mut self, ptr: *const W) { + self.set_source_address(ptr as u32); + self.set_source_data_width(core::mem::size_of::()); + } + + fn set_destination(&mut self, ptr: *mut W) { + self.set_destination_address(ptr as u32); + self.set_destination_data_width(core::mem::size_of::()); + } + + fn set_transfer_size_bytes(&mut self, size: usize) { + self.set_block_size(size as u16); + } + + #[inline(always)] + fn enable_transfer_interrupts(&mut self) { + self.cr().modify(|_, w| { + w.tcie().enabled().dteie().enabled().useie().enabled() + }); + } + + #[inline(always)] + fn disable_transfer_interrupts(&mut self) { + self.cr().modify(|_, w| { + w.tcie().disabled().dteie().disabled().useie().disabled() + }); + } +} + +/// DmaChannel trait provides the API contract that all GPDMA channels exposed to the user +/// implement. +#[allow(private_bounds)] +pub trait DmaChannel: Channel {} + +impl DmaChannel for DmaChannelRef +where + DMA: Instance, + CH: ChannelRegs, + Self: Deref, +{ +} + +/// Channel 0 on GPDMA controller +pub type DmaChannel0 = DmaChannelRef; +/// Channel 1 on GPDMA controller +pub type DmaChannel1 = DmaChannelRef; +/// Channel 2 on GPDMA controller +pub type DmaChannel2 = DmaChannelRef; +/// Channel 3 on GPDMA controller +pub type DmaChannel3 = DmaChannelRef; +/// Channel 4 on GPDMA controller +pub type DmaChannel4 = DmaChannelRef; +/// Channel 5 on GPDMA controller +pub type DmaChannel5 = DmaChannelRef; +/// Channel 6 on GPDMA controller +pub type DmaChannel6 = DmaChannelRef; +/// Channel 7 on GPDMA controller +pub type DmaChannel7 = DmaChannelRef; diff --git a/src/gpdma/config.rs b/src/gpdma/config.rs new file mode 100644 index 0000000..0be6299 --- /dev/null +++ b/src/gpdma/config.rs @@ -0,0 +1,425 @@ +use core::marker::PhantomData; + +use super::Word; + +pub mod transform; +use transform::*; + +/// PeripheralRequests is used for peripheral-to-peripheral transfers to indicate which side of the +/// transfer is driving the request (ie. which has the hardware request assigned) +#[derive(Clone, Copy, Debug, PartialEq)] +#[cfg_attr(feature = "defmt", derive(defmt::Format))] +pub enum PeripheralRequest { + SourceRequest, + DestinationRequest, +} + +/// The TransferDirection represents the available options for transfer types +#[derive(Clone, Copy, Debug, PartialEq)] +#[cfg_attr(feature = "defmt", derive(defmt::Format))] +pub enum TransferDirection { + MemoryToMemory, + MemoryToPeripheral, + PeripheralToMemory, + PeripheralToPeripheral(PeripheralRequest), +} + +/// Addressing mode represents whether the source or destination address is contiguously incremented +/// or fixed during a transfer +#[derive(Clone, Copy, Default, Debug, PartialEq)] +#[cfg_attr(feature = "defmt", derive(defmt::Format))] +pub enum AddressingMode { + #[default] + ContiguouslyIncremented, + Fixed, +} + +/// Transfer type encapsulates the transfer direction and the addressing mode for both the source +/// and destination of a transfer. +pub trait TransferType: crate::Sealed + Default { + const DIRECTION: TransferDirection; + + fn source_addressing_mode(&self) -> AddressingMode { + AddressingMode::ContiguouslyIncremented + } + + fn destination_addressing_mode(&self) -> AddressingMode { + AddressingMode::ContiguouslyIncremented + } +} + +/// Transfers to or from a peripheral have these additional options +pub trait HardwareRequest { + fn block_request(&self) -> bool; + fn enable_block_request(&mut self); + fn request(&self) -> u8; + fn set_request(&mut self, request: u8); +} + +/// When a peripheral is the source of the transfer it can optionally be configured in peripheral +/// flow control mode, when the peripheral supports it (currently just the I3C peripheral) +pub trait PeripheralSource { + fn peripheral_flow_control(&self) -> bool; + fn enable_peripheral_flow_control(&mut self); +} + +/// Represents the options specifically available for peripheral-to-memory transfers +#[derive(Clone, Copy, Debug, Default, PartialEq)] +#[cfg_attr(feature = "defmt", derive(defmt::Format))] +pub struct PeripheralToMemory { + request: u8, + block_request: bool, + peripheral_flow_control: bool, +} + +impl crate::Sealed for PeripheralToMemory {} + +impl TransferType for PeripheralToMemory { + const DIRECTION: TransferDirection = TransferDirection::PeripheralToMemory; + + fn source_addressing_mode(&self) -> AddressingMode { + AddressingMode::Fixed + } +} + +impl HardwareRequest for PeripheralToMemory { + fn block_request(&self) -> bool { + self.block_request + } + + fn enable_block_request(&mut self) { + self.block_request = true; + } + + fn request(&self) -> u8 { + self.request + } + + fn set_request(&mut self, request: u8) { + self.request = request; + } +} + +impl PeripheralSource for PeripheralToMemory { + fn peripheral_flow_control(&self) -> bool { + self.peripheral_flow_control + } + + fn enable_peripheral_flow_control(&mut self) { + self.peripheral_flow_control = true; + } +} + +/// Represents the options specifically available for memory-to-peripheral transfers +#[derive(Clone, Copy, Debug, Default, PartialEq)] +#[cfg_attr(feature = "defmt", derive(defmt::Format))] +pub struct MemoryToPeripheral { + request: u8, + block_request: bool, +} + +impl crate::Sealed for MemoryToPeripheral {} + +impl TransferType for MemoryToPeripheral { + const DIRECTION: TransferDirection = TransferDirection::MemoryToPeripheral; + + fn destination_addressing_mode(&self) -> AddressingMode { + AddressingMode::Fixed + } +} + +impl HardwareRequest for MemoryToPeripheral { + fn block_request(&self) -> bool { + self.block_request + } + + fn enable_block_request(&mut self) { + self.block_request = true; + } + + fn request(&self) -> u8 { + self.request + } + + fn set_request(&mut self, request: u8) { + self.request = request; + } +} + +/// Marker struct to indicate that the source peripheral drives the request via its request line. +#[derive(Clone, Copy, Debug, Default, PartialEq)] +#[cfg_attr(feature = "defmt", derive(defmt::Format))] +pub struct SourceRequest; + +/// Marker struct to indicate that the destination peripheral drives the request via its request +/// line. +#[derive(Clone, Copy, Debug, Default, PartialEq)] +#[cfg_attr(feature = "defmt", derive(defmt::Format))] +pub struct DestinationRequest; + +/// Indicates which peripheral in a peripheral-to-peripheral transfer is driving the request line +pub trait PeripheralToPeripheralDirection: Default + Clone + Copy { + const DIRECTION: TransferDirection; +} + +impl PeripheralToPeripheralDirection for SourceRequest { + const DIRECTION: TransferDirection = + TransferDirection::PeripheralToPeripheral( + PeripheralRequest::SourceRequest, + ); +} + +impl PeripheralToPeripheralDirection for DestinationRequest { + const DIRECTION: TransferDirection = + TransferDirection::PeripheralToPeripheral( + PeripheralRequest::DestinationRequest, + ); +} + +/// Represents the options specifically available for peripheral-to-peripheral transfers +#[derive(Clone, Copy, Debug, Default, PartialEq)] +#[cfg_attr(feature = "defmt", derive(defmt::Format))] +pub struct PeripheralToPeripheral { + _peripheral_request: PhantomData, + request: u8, + block_request: bool, + peripheral_flow_control: bool, +} + +impl crate::Sealed for PeripheralToPeripheral {} + +impl TransferType + for PeripheralToPeripheral +{ + const DIRECTION: TransferDirection = T::DIRECTION; + + fn source_addressing_mode(&self) -> AddressingMode { + AddressingMode::Fixed + } + + fn destination_addressing_mode(&self) -> AddressingMode { + AddressingMode::Fixed + } +} + +impl HardwareRequest for PeripheralToPeripheral { + fn block_request(&self) -> bool { + self.block_request + } + + fn enable_block_request(&mut self) { + self.block_request = true; + } + + fn request(&self) -> u8 { + self.request + } + + fn set_request(&mut self, request: u8) { + self.request = request; + } +} + +impl PeripheralSource for PeripheralToPeripheral { + fn peripheral_flow_control(&self) -> bool { + self.peripheral_flow_control + } + + fn enable_peripheral_flow_control(&mut self) { + self.peripheral_flow_control = true; + } +} + +/// Marker struct for memory-to-memory transfers (no special options) +#[derive(Clone, Copy, Debug, Default, PartialEq)] +#[cfg_attr(feature = "defmt", derive(defmt::Format))] +pub struct MemoryToMemory; + +impl crate::Sealed for MemoryToMemory {} + +impl TransferType for MemoryToMemory { + const DIRECTION: TransferDirection = TransferDirection::MemoryToMemory; +} + +/// Priority of the transfer. Used by the GPDMA channel arbitration to determine which transfer +/// to service. +#[derive(Clone, Copy, Debug, Default, PartialEq)] +#[cfg_attr(feature = "defmt", derive(defmt::Format))] +pub enum Priority { + LowPriorityLowWeight = 0, + #[default] + LowPriorityMedWeight = 1, + LowPriorityHighWeight = 2, + HighPriority = 3, +} + +#[derive(Clone, Copy, Debug, Default, PartialEq)] +#[cfg_attr(feature = "defmt", derive(defmt::Format))] +pub enum Continuation { + #[default] + Direct, + LinkedList, +} + +#[derive(Clone, Copy, Debug, Default, PartialEq)] +#[cfg_attr(feature = "defmt", derive(defmt::Format))] +pub enum AhbPort { + #[default] + Port0 = 0, + Port1 = 1, +} + +const MAX_BURST_LEN: u8 = 64; + +/// Configuration options for a DMA transfer +#[derive(Clone, Copy, Debug, Default, PartialEq)] +#[cfg_attr(feature = "defmt", derive(defmt::Format))] +pub struct DmaConfig { + _src_word: PhantomData, + _dest_word: PhantomData, + pub(super) transfer_type: T, + pub(super) priority: Priority, + pub(super) source_ahb_port: AhbPort, + pub(super) destination_ahb_port: AhbPort, + pub(super) source_burst_length: u8, + pub(super) destination_burst_length: u8, + pub(super) enable_interrupts: bool, + pub(super) data_transform: Option, +} + +impl DmaConfig { + /// Create a config with default settings + pub fn new() -> Self { + Self::default() + } + + /// Set the priority of the transfer. Default: Low Priority, Medium Weight + pub fn priority(mut self, priority: Priority) -> Self { + self.priority = priority; + self + } + + /// Set the source AHB port (0 or 1). Default: 0. + pub fn source_ahb_port(mut self, port: AhbPort) -> Self { + self.source_ahb_port = port; + self + } + + /// Set the destination AHB port (0 or 1). Default 0. + pub fn destination_ahb_port(mut self, port: AhbPort) -> Self { + self.destination_ahb_port = port; + self + } + + /// Set the source burst length in words (1 - 64 incl.). Default 1. + pub fn source_burst_length(mut self, len: u8) -> Self { + assert!( + (1..=MAX_BURST_LEN).contains(&len), + "Must specify a burst length between 1 and 64" + ); + self.source_burst_length = len - 1; + self + } + + /// Set the destination burst length in words (1 - 64 incl.). Default 1. + pub fn destination_burst_length(mut self, len: u8) -> Self { + assert!( + (1..=MAX_BURST_LEN).contains(&len), + "Must specify a burst length between 1 and 64" + ); + self.destination_burst_length = len - 1; + self + } + + pub fn enable_interrupts(mut self) -> Self { + self.enable_interrupts = true; + self + } + + /// Apply a data transform via a closure that takes a DataTransformBuilder that provides APIs + /// relevant to the source and destination data widths. + pub fn with_data_transform( + mut self, + builder: DataTransformBuilder, + ) -> Self { + self.data_transform = Some(builder.transform); + self + } +} + +impl DmaConfig { + /// Enable peripheral flow control (only supported by I3C) + pub fn enable_peripheral_flow_control(mut self) -> Self { + self.transfer_type.enable_peripheral_flow_control(); + self + } +} + +impl DmaConfig { + /// Enable block requests for peripherals that support it + pub fn enable_hardware_block_requests(mut self) -> Self { + self.transfer_type.enable_block_request(); + self + } + + /// Select the hardware request line + pub fn with_request(mut self, request: u8) -> Self { + self.transfer_type.set_request(request); + self + } +} + +#[cfg(test)] +mod test { + use crate::gpdma::{ + config::{self, MemoryToMemory}, + DmaConfig, + }; + + use super::*; + + impl DataTransform { + fn new( + source_byte_exchange: bool, + padding_alignment: PaddingAlignmentMode, + dest_half_word_exchange: bool, + dest_byte_exchange: bool, + ) -> Self { + Self { + source_byte_exchange, + padding_alignment, + dest_half_word_exchange, + dest_byte_exchange, + } + } + } + + #[test] + fn test_data_transform() { + let builder: DataTransformBuilder = + DataTransform::builder().swap_source_middle_bytes(); + assert_eq!( + builder.transform, + DataTransform::new(true, Default::default(), false, false) + ); + } + + #[test] + fn test_with_data_transform() { + let config: DmaConfig = DmaConfig::new(); + let transform = DataTransform::builder() + .swap_source_middle_bytes() + .left_align_right_truncate() + .swap_destination_half_word_byte_order(); + let config = config.with_data_transform(transform); + assert_eq!( + config.data_transform, + Some(DataTransform::new( + true, + PaddingAlignmentMode::LeftAlignedRightTruncated, + false, + true + )) + ); + } +} diff --git a/src/gpdma/config/transform.rs b/src/gpdma/config/transform.rs new file mode 100644 index 0000000..aeade1f --- /dev/null +++ b/src/gpdma/config/transform.rs @@ -0,0 +1,322 @@ +//! The transform module provides a configuration builder to set up the data transformations +//! supported by the GPDMA peripheral. +//! +//! # Usage +//! use stm32h5xx_hal::gpdma::DmaConfig; +//! use stm32h5xx_hal::gpdma::config::transform::*; // This ensures relevant traits are in scope +//! +//! let config: DmaConfig = DmaConfig::new().with_data_transform( +//! DataTransform::builder() +//! .swap_source_middle_bytes() +//! .right_align_left_truncate() +//! .swap_destination_half_word_byte_order() +//! ); +use core::marker::PhantomData; + +use super::Word; + +/// Represents the options available for the padding and alignment step in the data transformation +/// pipeline +#[derive(Clone, Copy, Debug, Default, PartialEq)] +#[cfg_attr(feature = "defmt", derive(defmt::Format))] +pub(crate) enum PaddingAlignmentMode { + #[default] + None, + + // PAM1 - Source data width < Destination data width + ZeroPadded, + SignExtended, + Packed, + + // PAM2 - Source data width > Destination data width + RightAlignedLeftTruncated, + LeftAlignedRightTruncated, + Unpacked, +} + +impl PaddingAlignmentMode { + pub fn bits(&self) -> u8 { + match self { + PaddingAlignmentMode::None => { + panic!("Do not set PAM bits if no PAM mode was chosen") + } + PaddingAlignmentMode::ZeroPadded => 0, + PaddingAlignmentMode::SignExtended => 1, + PaddingAlignmentMode::Packed => 2, + PaddingAlignmentMode::RightAlignedLeftTruncated => 0, + PaddingAlignmentMode::LeftAlignedRightTruncated => 1, + PaddingAlignmentMode::Unpacked => 2, + } + } +} + +pub trait SourceByteExchange { + fn swap_source_middle_bytes(self) -> Self; +} + +pub trait PaddingAlignment { + fn right_align_zero_pad(self) -> Self; + fn right_align_sign_extend(self) -> Self; + fn pack(self) -> Self; +} + +pub trait TruncationAlignment { + fn right_align_left_truncate(self) -> Self; + fn left_align_right_truncate(self) -> Self; + fn unpack(self) -> Self; +} + +pub trait DestinationHalfWordExchange { + fn swap_destination_half_words(self) -> Self; +} + +pub trait DestinationByteExchange { + fn swap_destination_half_word_byte_order(self) -> Self; +} + +/// The DataTransformBuilder is used to configure the data transformation pipeline that the GPDMA +/// peripheral implements. +/// +/// Depending upon what word sizes are used for transfers, different pipeline steps are applicable: +/// +/// - The first possible step in the pipeline, the source byte exchange step is applicable to 32-bit +/// sources only and swaps the middle 2 bytes of the 32-bit word +/// - The next step is applicable when the source data width is not equal to the destination data +/// width: +/// - If the destination width is less than the source width, the data can be truncated (left or +/// right aligned) or unpacked into a FIFO to output all the data to subsequent destination +/// words (destination buffer size must be large enough to accomodate the size in bytes of the +/// unpacked source data) +/// - If the destination width is greater than the source width, the data can be zero- or +/// sign-extended, or it can be packed into the destination words. +/// - After the padding/alignment step, the order of the destination 16-bit half-words in a 32-bit +/// destination word can be swapped (only applicable if the destination word is 32-bit) +/// - Finally, the order of the bytes in each 16-bit destination (half-) word can be swapped (only +/// applicable for 32- and 16-bit destination word sizes) +/// +/// This builder allows each step to be specified, only when relevant to the source and destination +/// data-widths. +/// +/// To get a builder use [`DataTransform::builder()`]. Type inference is used to determine the +/// source and destination word sizes, so the builder can be created without specifying the types +/// explicitly. +#[derive(Clone, Copy, Debug, Default, PartialEq)] +#[cfg_attr(feature = "defmt", derive(defmt::Format))] +pub struct DataTransformBuilder { + _source_type: PhantomData, + _destination_type: PhantomData, + pub(super) transform: DataTransform, +} + +impl DataTransformBuilder { + pub fn new() -> Self { + Self::default() + } +} + +impl SourceByteExchange for DataTransformBuilder { + /// The order of the unaligned middle bytes of a 32-bit source word is exchanged + /// ie. B3B2B1B0 -> B3B1B2B0 + fn swap_source_middle_bytes(mut self) -> Self { + self.transform.source_byte_exchange = true; + self + } +} + +impl PaddingAlignment for DataTransformBuilder { + /// Pad out the upper 16 bits of the 32-bit destination word with zeroes (default) + fn right_align_zero_pad(mut self) -> Self { + self.transform.padding_alignment = PaddingAlignmentMode::ZeroPadded; + self + } + + /// Sign extend the upper 16 bits of the 32-bit destination word + fn right_align_sign_extend(mut self) -> Self { + self.transform.padding_alignment = PaddingAlignmentMode::SignExtended; + self + } + + /// Pack subsequent 16-bit words into the 32-bit destination words + /// ie: B3B2,B1B0 -> B3B2B1B0 (see RM0492, Table 92) + fn pack(mut self) -> Self { + self.transform.padding_alignment = PaddingAlignmentMode::Packed; + self + } +} + +impl PaddingAlignment for DataTransformBuilder { + /// Pad out the upper 24 bits of the 32-bit destination word with zeroes (default) + fn right_align_zero_pad(mut self) -> Self { + self.transform.padding_alignment = PaddingAlignmentMode::ZeroPadded; + self + } + + /// Sign extend the upper 24 bits of the 32-bit destination word + fn right_align_sign_extend(mut self) -> Self { + self.transform.padding_alignment = PaddingAlignmentMode::SignExtended; + self + } + + /// Pack subsequent 8-bit words into the 32-bit destination words + /// ie: B3,B2,B1,B0 -> B3B2B1B0 (see RM0492, Table 92) + fn pack(mut self) -> Self { + self.transform.padding_alignment = PaddingAlignmentMode::Packed; + self + } +} + +impl PaddingAlignment for DataTransformBuilder { + /// Pad out the upper 8 bits of the 16-bit destination word with zeroes (default) + fn right_align_zero_pad(mut self) -> Self { + self.transform.padding_alignment = PaddingAlignmentMode::ZeroPadded; + self + } + + /// Sign extend the upper 8 bits of the 32-bit destination word + fn right_align_sign_extend(mut self) -> Self { + self.transform.padding_alignment = PaddingAlignmentMode::SignExtended; + self + } + + /// Pack subsequent 8-bit words into the 16-bit destination words + /// ie: B1,B0 -> B1B0 (see RM0492, Table 92) + fn pack(mut self) -> Self { + self.transform.padding_alignment = PaddingAlignmentMode::Packed; + self + } +} + +impl TruncationAlignment for DataTransformBuilder { + /// Keep the least significant 16-bits and truncate the rest (default) + /// + /// ie: B7B6B5B4,B3B2B1B0 -> B5B4,B1B0 (see RM0492, Table 92) + fn right_align_left_truncate(mut self) -> Self { + self.transform.padding_alignment = + PaddingAlignmentMode::RightAlignedLeftTruncated; + self + } + + /// Keep the most significant 16-bits and truncate the rest + /// + /// ie: B7B6B5B4,B3B2B1B0 -> B7B6,B3B2 (see RM0492, Table 92) + fn left_align_right_truncate(mut self) -> Self { + self.transform.padding_alignment = + PaddingAlignmentMode::LeftAlignedRightTruncated; + self + } + + /// Unpack each 32-bit word into separate 16-bit half-words. + /// Note that the destination buffer must have sufficient room for n*2 16-bit values where n is + /// the number of 32-bit words in the source buffer. + /// + /// ie: B7B6B5B4,B3B2B1B0 -> B7B6,B5B4,B3B2,B1B0 (see RM0492, Table 92) + fn unpack(mut self) -> Self { + self.transform.padding_alignment = PaddingAlignmentMode::Unpacked; + self + } +} + +impl TruncationAlignment for DataTransformBuilder { + /// Keep the least significant 8-bits and truncate the rest (default) + /// + /// ie: B7B6B5B4,B3B2B1B0 -> B4,B0 + fn right_align_left_truncate(mut self) -> Self { + self.transform.padding_alignment = + PaddingAlignmentMode::RightAlignedLeftTruncated; + self + } + + /// Keep the most significant 8-bits and truncate the rest + /// + /// i.e: B7B6B5B4,B3B2B1B0 -> B7,B3 + fn left_align_right_truncate(mut self) -> Self { + self.transform.padding_alignment = + PaddingAlignmentMode::LeftAlignedRightTruncated; + self + } + + /// Unpack each word or half-word into separate 8-bit bytes. + /// Note that the destination buffer must have sufficient room for n*2 8-bit values where n is + /// the number of word or half-words in the source buffer. + /// + /// ie: B7B6B5B4,B3B2B1B0 -> B7,B6,B5,B4,B3,B2,B1,B0 + fn unpack(mut self) -> Self { + self.transform.padding_alignment = PaddingAlignmentMode::Unpacked; + self + } +} + +impl TruncationAlignment for DataTransformBuilder { + /// Keep the least significant 8 bits and truncate the rest (default) + /// + /// ie: B3B2,B1B0 -> B2,B0 (see RM0492, Table 92) + fn right_align_left_truncate(mut self) -> Self { + self.transform.padding_alignment = + PaddingAlignmentMode::RightAlignedLeftTruncated; + self + } + + /// Keep the most significant 8 bits and truncate the rest + /// + /// ie: B3B2,B1B0 -> B3,B1 (see RM0492, Table 92) + fn left_align_right_truncate(mut self) -> Self { + self.transform.padding_alignment = + PaddingAlignmentMode::LeftAlignedRightTruncated; + self + } + + /// Unpack each 16-bit word into separate 8-bit half-words. + /// Note that the destination buffer must have sufficient room for n*2 16-bit values where n is + /// the number of 32-bit words in the source buffer. + /// + /// ie: B3B2,B1B0 -> B3,B2,B1,B0 (see RM0492, Table 92) + fn unpack(mut self) -> Self { + self.transform.padding_alignment = PaddingAlignmentMode::Unpacked; + self + } +} + +impl DestinationHalfWordExchange for DataTransformBuilder { + /// Swap the order of the 16-bit half-words in the 32-bit destination word + fn swap_destination_half_words(mut self) -> Self { + self.transform.dest_half_word_exchange = true; + self + } +} + +impl DestinationByteExchange for DataTransformBuilder { + /// Swap the order of bytes in each 16-bit destination word + fn swap_destination_half_word_byte_order(mut self) -> Self { + self.transform.dest_byte_exchange = true; + self + } +} + +impl DestinationByteExchange for DataTransformBuilder { + /// Swap the order of bytes in each 16-bit destination half-word + fn swap_destination_half_word_byte_order(mut self) -> Self { + self.transform.dest_byte_exchange = true; + self + } +} + +/// DataTransform represents the configuration of the data transformation pipeline as produced +/// by the above builder structs. +#[derive(Clone, Copy, Debug, Default, PartialEq)] +#[cfg_attr(feature = "defmt", derive(defmt::Format))] +pub struct DataTransform { + pub(crate) source_byte_exchange: bool, + pub(crate) padding_alignment: PaddingAlignmentMode, + pub(crate) dest_half_word_exchange: bool, + pub(crate) dest_byte_exchange: bool, +} + +impl DataTransform { + pub fn builder() -> DataTransformBuilder { + DataTransformBuilder { + _source_type: PhantomData, + _destination_type: PhantomData, + transform: DataTransform::default(), + } + } +} diff --git a/src/gpdma/periph.rs b/src/gpdma/periph.rs new file mode 100644 index 0000000..0baaca9 --- /dev/null +++ b/src/gpdma/periph.rs @@ -0,0 +1,280 @@ +//! This module provides traits and structs for managing DMA transactions for peripherals. +//! - By implementing the [`TxAddr`] and [`RxAddr`] traits, peripherals can be used with DMA for +//! memory-to-peripheral and peripheral-to-memory transfers, respectively. +//! - The [`Tx`] and [`Rx`] traits provide a define the interface for initiating DMA transfers for +//! TX and RX operations, respectively. +//! - The [`DmaTx`], [`DmaRx`] structs implement the [`Tx`] and [`Rx`] traits, respectively, and +//! encapsulate the logic for initializing these transfers. +//! - The [`DmaDuplex`] struct combines both TX and RX capabilities, allowing for full-duplex +//! operations. +use core::marker::PhantomData; + +use crate::Sealed; + +use super::{ + DmaChannel, DmaConfig, DmaTransfer, MemoryToPeripheral, PeripheralToMemory, + ReadBuffer, Word, WriteBuffer, +}; + +/// `PeriphTxBuffer` is a wrapper around a peripheral's transmit data register address, used to +/// provide a WriteBuffer implementation for initiating memory-to-peripheral DMA transfers. +pub struct PeriphTxBuffer, W: Word> { + _addr: PhantomData, + _word: PhantomData, +} + +unsafe impl, W: Word> WriteBuffer for PeriphTxBuffer { + type Word = W; + + unsafe fn write_buffer(&mut self) -> (*mut Self::Word, usize) { + (A::tx_addr(), 1) + } +} + +/// `PeriphRxBuffer` is a wrapper around a peripheral's receive data register address, used to +/// provide a ReadBuffer implementation for initiating peripheral-to-memory DMA transfers. +pub struct PeriphRxBuffer, W: Word> { + _addr: PhantomData, + _word: PhantomData, +} + +unsafe impl, W: Word> ReadBuffer for PeriphRxBuffer { + type Word = W; + + unsafe fn read_buffer(&self) -> (*const Self::Word, usize) { + (A::rx_addr(), 1) + } +} + +/// `TxAddr` is a trait that provides a method to obtain the address of the transmit data register +/// of a peripheral. This is used to facilitate memory-to-peripheral DMA transactions. The +/// peripheral must implement this trait. +pub trait TxAddr { + /// Returns a pointer to the peripheral's transmit data register. + /// + /// # Safety + /// + /// The caller must ensure that the returned pointer is only used when it is valid to access + /// the peripheral's transmit data register, and that no data races or invalid memory accesses + /// occur. + unsafe fn tx_addr() -> *mut W; +} + +/// `RxAddr` is a trait that provides a method to obtain the address of the receive data register +/// of a peripheral. This is used to facilitate peripheral-to-memory DMA transactions. The +/// peripheral must implement this trait. +pub trait RxAddr { + /// Returns a pointer to the peripheral's receive data register. + /// + /// # Safety + /// + /// The caller must ensure that the returned pointer is only used when it is valid to access + /// the peripheral's receive data register, and that no data races or invalid memory accesses + /// occur. + unsafe fn rx_addr() -> *const W; +} + +trait TxBuffer { + /// Returns a `PeriphTxBuffer` that provides a write buffer for the peripheral's transmit data + /// register. This is used to initiate memory-to-peripheral DMA transfers. Implemented + /// automatically for any implementer of `TxAddr`. + /// + /// # Safety + /// TxAddr already requires the caller to ensure that the returned pointer is valid and as such + /// is marked unsafe, so marking this method as unsafe is redundant. + fn tx_buffer() -> PeriphTxBuffer + where + Self: TxAddr + Sized, + { + PeriphTxBuffer { + _addr: PhantomData, + _word: PhantomData, + } + } +} + +impl> TxBuffer for T {} + +trait RxBuffer { + /// Returns a `PeriphRxBuffer` that provides a read buffer for the peripheral's receive data + /// register. This is used to initiate peripheral-to-memory DMA transfers. Implemented + /// automatically for any implementer of `RxAddr`. + /// + /// # Safety + /// RxAddr already requires the caller to ensure that the returned pointer is valid and as such + /// is marked unsafe, so marking this method as unsafe is redundant. + fn rx_buffer() -> PeriphRxBuffer + where + Self: RxAddr + Sized, + { + PeriphRxBuffer { + _addr: PhantomData, + _word: PhantomData, + } + } +} + +impl> RxBuffer for T {} + +/// `DmaRx` encapsulates the initialization of a peripheral-to-memory DMA transaction for receiving +/// data. Used by peripheral DMA implementations. +pub struct DmaRx { + _periph: PhantomData, + _word: PhantomData, + channel: CH, +} + +impl DmaRx { + fn new(channel: CH) -> Self { + Self { + _periph: PhantomData, + _word: PhantomData, + channel, + } + } + + pub fn free(self) -> CH { + self.channel + } +} + +impl From for DmaRx { + fn from(channel: CH) -> Self { + Self::new(channel) + } +} + +impl Sealed for DmaRx {} + +impl DmaRx +where + PERIPH: RxAddr, + CH: DmaChannel, + W: Word, +{ + pub fn init_rx_transfer<'a, D>( + &'a mut self, + config: DmaConfig, + destination: D, + ) -> DmaTransfer<'a, CH, PeriphRxBuffer, D> + where + D: WriteBuffer, + { + DmaTransfer::peripheral_to_memory( + config, + &mut self.channel, + PERIPH::rx_buffer(), + destination, + ) + } +} + +/// `DmaTx` encapsulates the initialization of a memory-to-peripheral DMA transaction for +/// transmitting data. Used by peripheral DMA implementations. +pub struct DmaTx { + _periph: PhantomData, + _word: PhantomData, + channel: CH, +} + +impl DmaTx { + fn new(channel: CH) -> Self { + Self { + _periph: PhantomData, + _word: PhantomData, + channel, + } + } + + pub fn free(self) -> CH { + self.channel + } +} + +impl From for DmaTx { + fn from(channel: CH) -> Self { + Self::new(channel) + } +} + +impl Sealed for DmaTx {} + +impl DmaTx +where + PERIPH: TxAddr, + CH: DmaChannel, + W: Word, +{ + pub fn init_tx_transfer<'a, S>( + &'a mut self, + config: DmaConfig, + source: S, + ) -> DmaTransfer<'a, CH, S, PeriphTxBuffer> + where + S: ReadBuffer, + { + DmaTransfer::memory_to_peripheral( + config, + &mut self.channel, + source, + PERIPH::tx_buffer(), + ) + } +} + +/// `DmaDuplex` encapsulates the initialization of both memory-to-peripheral and +/// peripheral-to-memory DMA transaction for to enable setting up of full-duplex transmission and +/// reception of data. Used by peripheral DMA implementations. +pub struct DmaDuplex { + tx: DmaTx, + rx: DmaRx, +} + +impl DmaDuplex +where + PERIPH: TxAddr + RxAddr, + W: Word, + TX: DmaChannel, + RX: DmaChannel, +{ + pub fn new(tx: TX, rx: RX) -> Self { + Self { + tx: DmaTx::from(tx), + rx: DmaRx::from(rx), + } + } + + pub fn free(self) -> (TX, RX) { + (self.tx.free(), self.rx.free()) + } +} + +impl Sealed for DmaDuplex {} + +impl DmaDuplex +where + PERIPH: TxAddr + RxAddr, + W: Word, + TX: DmaChannel, + RX: DmaChannel, +{ + #[allow(clippy::type_complexity)] + pub fn init_duplex_transfer<'a, S, D>( + &'a mut self, + tx_config: DmaConfig, + rx_config: DmaConfig, + source: S, + destination: D, + ) -> ( + DmaTransfer<'a, TX, S, PeriphTxBuffer>, + DmaTransfer<'a, RX, PeriphRxBuffer, D>, + ) + where + S: ReadBuffer, + D: WriteBuffer, + { + let tx = self.tx.init_tx_transfer(tx_config, source); + let rx = self.rx.init_rx_transfer(rx_config, destination); + (tx, rx) + } +} diff --git a/src/lib.rs b/src/lib.rs index 3c27058..cc3c317 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -82,6 +82,9 @@ pub mod dwt; #[cfg(feature = "device-selected")] pub mod usb; +#[cfg(feature = "device-selected")] +pub mod gpdma; + #[cfg(feature = "device-selected")] mod sealed { pub trait Sealed {} diff --git a/src/prelude.rs b/src/prelude.rs index becb02a..a73e785 100644 --- a/src/prelude.rs +++ b/src/prelude.rs @@ -2,6 +2,7 @@ pub use crate::delay::DelayExt as _stm32h5xx_hal_delay_DelayExt; pub use crate::dwt::DwtExt as _stm32h5xx_hal_delay_DwtExt; +pub use crate::gpdma::GpdmaExt as _stm32h5xx_hal_gpdma_GpdmaExt; pub use crate::gpio::GpioExt as _stm32h5xx_hal_gpio_GpioExt; pub use crate::i2c::I2cExt as _stm32h5xx_hal_i2c_I2cExt; pub use crate::icache::ICacheExt as _stm32h5xx_hal_icache_ICacheExt;