diff --git a/Cargo.toml b/Cargo.toml index da813f8..73ef5da 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -50,6 +50,11 @@ stm32h562 = ["stm32h5/stm32h562", "device-selected", "rm0481", "h56x_h573"] stm32h563 = ["stm32h5/stm32h563", "device-selected", "rm0481", "h56x_h573", "sdmmc2", "ethernet"] stm32h573 = ["stm32h5/stm32h573", "device-selected", "rm0481", "h56x_h573", "otfdec", "sdmmc2", "ethernet"] +# Flags for async APIs +futures = ["dep:futures-util"] +gpdma-futures = ["futures"] +async = ["gpdma-futures"] + # Flags for examples log = ["dep:log"] log-itm = ["log"] @@ -66,10 +71,13 @@ defmt = [ cortex-m = { version = "^0.7.7", features = ["critical-section-single-core"] } stm32h5 = { package = "stm32h5", version = "0.16.0" } fugit = "0.3.7" +embedded-dma = "0.2" embedded-hal = "1.0.0" +embedded-hal-async = "1.0.0" defmt = { version = "1.0.0", optional = true } paste = "1.0.15" log = { version = "0.4.20", optional = true} +futures-util = { version = "0.3", default-features = false, features = ["async-await-macro"], optional = true} stm32-usbd = "0.8.0" [dev-dependencies] @@ -86,6 +94,8 @@ cortex-m-semihosting = "0.5.0" panic-itm = { version = "~0.4.1" } panic-probe = "0.3.2" panic-semihosting = "0.6" +rtic = { version = "2.2", features = ["thumbv8main-backend"] } +rtic-monotonics = { version = "2.0", features = ["cortex-m-systick"]} usbd-serial = "0.2.2" usb-device = { version = "0.3.2", features = ["defmt", "log"] } diff --git a/examples/dma.rs b/examples/dma.rs new file mode 100644 index 0000000..5562e52 --- /dev/null +++ b/examples/dma.rs @@ -0,0 +1,124 @@ +// #![deny(warnings)] +#![no_main] +#![no_std] + +mod utilities; + +use cortex_m::singleton; +use cortex_m_rt::entry; +use cortex_m_semihosting::debug; +use stm32h5xx_hal::{ + gpdma::{config::transform::*, DmaConfig, DmaTransfer}, + pac, + prelude::*, +}; + +#[entry] +fn main() -> ! { + utilities::logger::init(); + + let dp = pac::Peripherals::take().unwrap(); + + let pwr = dp.PWR.constrain(); + let pwrcfg = pwr.vos0().freeze(); + + // Constrain and Freeze clock + let rcc = dp.RCC.constrain(); + let ccdr = rcc.sys_ck(250.MHz()).freeze(pwrcfg, &dp.SBS); + + let channels = dp.GPDMA1.channels(ccdr.peripheral.GPDMA1); + + log::info!("u8 to u8"); + let src = + singleton!(: [u8; 40] = core::array::from_fn(|i| i as u8)).unwrap(); + + let dest = singleton!(: [u8; 40] = [0u8; 40]).unwrap(); + + let mut channel = channels.0; + let config = DmaConfig::new(); + let mut transfer = + DmaTransfer::memory_to_memory(config, &mut channel, src, dest); + transfer.start().unwrap(); + transfer.wait_for_transfer_complete().unwrap(); + let (src, dest) = transfer.free(); + assert_eq!(src, dest); + + log::info!("u32 to u32 with data transform"); + let src = singleton!(: [u32; 10] = [0x12345678u32; 10]).unwrap(); + let dest = singleton!(: [u32; 10] = [0u32; 10]).unwrap(); + + let config = DmaConfig::new().with_data_transform( + DataTransform::builder() + .swap_destination_half_words() + .swap_destination_half_word_byte_order(), + ); + + let mut transfer = + DmaTransfer::memory_to_memory(config, &mut channel, src, dest); + + transfer.start().unwrap(); + transfer.wait_for_transfer_complete().unwrap(); + let (_, dest) = transfer.free(); + + let expected = [0x78563412; 10]; + assert_eq!(expected, *dest); + + log::info!("u32 to u16 with truncate"); + let src = singleton!(: [u32; 10] = [0x12345678u32; 10]).unwrap(); + let dest = singleton!(: [u16; 20] = [0u16; 20]).unwrap(); + + let config = DmaConfig::new().with_data_transform( + DataTransform::builder().left_align_right_truncate(), + ); + let mut transfer = + DmaTransfer::memory_to_memory(config, &mut channel, src, dest); + + transfer.start().unwrap(); + transfer.wait_for_transfer_complete().unwrap(); + let (_, dest) = transfer.free(); + + let expected = [0x1234; 10]; + assert_eq!(expected, (*dest)[0..10]); + + log::info!("u32 to u8 with unpack"); + let src = singleton!(: [u32; 10] = [0x12345678u32; 10]).unwrap(); + let dest = singleton!(: [u8; 40] = [0u8; 40]).unwrap(); + + let config = + DmaConfig::new().with_data_transform(DataTransform::builder().unpack()); + let mut transfer = + DmaTransfer::memory_to_memory(config, &mut channel, src, dest); + + transfer.start().unwrap(); + transfer.wait_for_transfer_complete().unwrap(); + let (_, dest) = transfer.free(); + let expected = [0x78, 0x56, 0x34, 0x12]; + assert_eq!(expected, (*dest)[0..4]); + assert_eq!(expected, (*dest)[36..40]); + + log::info!("u8 to u32 with pack"); + let src = singleton!(: [u8; 40] = [0u8; 40]).unwrap(); + let dest = singleton!(: [u32; 10] = [0u32; 10]).unwrap(); + + for chunk in src.chunks_mut(4) { + chunk.copy_from_slice(&[0x78, 0x56, 0x34, 0x12]); + } + + let config = + DmaConfig::new().with_data_transform(DataTransform::builder().pack()); + let mut transfer = + DmaTransfer::memory_to_memory(config, &mut channel, src, dest); + + transfer.start().unwrap(); + transfer.wait_for_transfer_complete().unwrap(); + let (_, dest) = transfer.free(); + + let expected = [0x12345678; 10]; + assert_eq!(expected, *dest); + assert_eq!(expected, *dest); + + log::info!("All tests passed!"); + loop { + debug::exit(debug::EXIT_SUCCESS) + } +} diff --git a/examples/spi-async-rtic.rs b/examples/spi-async-rtic.rs new file mode 100644 index 0000000..5b1e10b --- /dev/null +++ b/examples/spi-async-rtic.rs @@ -0,0 +1,177 @@ +#![no_main] +#![no_std] + +mod utilities; + +use embedded_dma::{ReadBuffer, WriteBuffer}; +use rtic::app; +use stm32h5xx_hal::{ + gpdma::{periph::DmaDuplex, DmaChannel0, DmaChannel1, Word}, + gpio::{Output, PA5}, + pac, + prelude::*, + spi::{self, dma::SpiDma, Config as SpiConfig}, +}; + +use rtic_monotonics::systick::prelude::*; +systick_monotonic!(Mono, 1000); + +// Buffer is used to manage a reference to a static buffer returned by the cortex_m::singleton! +// macro and which can be with the DmaTransfer API (which requires passing ReadBuffer and +// WriteBuffer implementations by value) and then used to access the buffer after the transfer has +// completed. +struct Buffer { + data: &'static mut [T; N], +} + +impl Buffer +where + T: Word + 'static, +{ + fn new(data: &'static mut [T; N]) -> Self { + Self { data } + } +} + +unsafe impl ReadBuffer for &mut Buffer +where + T: Word + 'static, +{ + type Word = T; + + unsafe fn read_buffer(&self) -> (*const Self::Word, usize) { + (self.data.as_ptr(), N) + } +} + +unsafe impl WriteBuffer for &mut Buffer +where + T: Word + 'static, +{ + type Word = T; + + unsafe fn write_buffer(&mut self) -> (*mut Self::Word, usize) { + (self.data.as_mut_ptr(), N) + } +} + +#[app(device = pac, dispatchers = [USART1, USART2], peripherals = true)] +mod app { + + use core::cell::Cell; + + use cortex_m::singleton; + use stm32h5::stm32h503::{GPDMA1, NVIC}; + + use super::*; + + #[shared] + struct Shared {} + + #[local] + struct Local { + led: PA5, + spi: SpiDma< + pac::SPI2, + DmaDuplex, DmaChannel1>, + >, + source: Cell>, + dest: Cell>, + } + + #[init] + fn init(ctx: init::Context) -> (Shared, Local) { + utilities::logger::init(); + + let pwr = ctx.device.PWR.constrain(); + let pwrcfg = pwr.vos0().freeze(); + + let rcc = ctx.device.RCC.constrain(); + let ccdr = rcc + .sys_ck(192.MHz()) + .pll1_q_ck(64.MHz()) + .freeze(pwrcfg, &ctx.device.SBS); + + log::info!("Starting RTIC SPI example..."); + + // Uncomment if use SysTick as monotonic timer + Mono::start(ctx.core.SYST, ccdr.clocks.sysclk().raw()); + + let gpioa = ctx.device.GPIOA.split(ccdr.peripheral.GPIOA); + let gpiob = ctx.device.GPIOB.split(ccdr.peripheral.GPIOB); + + let led = gpioa.pa5.into_push_pull_output(); + + let sck = gpiob.pb13.into_alternate(); + let miso = gpiob.pb14.into_alternate(); + let mosi = gpiob.pb15.into_alternate(); + + let channels = ctx.device.GPDMA1.channels(ccdr.peripheral.GPDMA1); + let tx_ch = channels.0; + let rx_ch = channels.1; + + let spi = ctx + .device + .SPI2 + .spi( + (sck, miso, mosi), + SpiConfig::new(spi::MODE_0), + 1.MHz(), + ccdr.peripheral.SPI2, + &ccdr.clocks, + ) + .use_dma_duplex(tx_ch, rx_ch); + + unsafe { + NVIC::unmask(pac::interrupt::GPDMA1_CH0); + NVIC::unmask(pac::interrupt::GPDMA1_CH1); + }; + + let src = singleton!(: [u8; 40] = [0; 40]).unwrap(); + let dest = singleton!(: [u8; 40] = [0; 40]).unwrap(); + + tick::spawn().unwrap(); + spi_transfer::spawn().unwrap(); + ( + Shared {}, + Local { + led, + spi, + source: Cell::new(Buffer::new(src)), + dest: Cell::new(Buffer::new(dest)), + }, + ) + } + + #[task(local = [led, count: u32 = 0], priority = 1)] + async fn tick(ctx: tick::Context) { + loop { + ctx.local.led.toggle(); + *ctx.local.count += 1; + log::info!("Tick {}", *ctx.local.count); + Mono::delay(1000.millis()).await; + } + } + + #[task(local = [spi, count: u32 = 0, source, dest], priority = 2)] + async fn spi_transfer(ctx: spi_transfer::Context) { + loop { + log::info!("Starting SPI transfer"); + + let src = ctx.local.source.get_mut(); + let dest = ctx.local.dest.get_mut(); + src.data.fill(*ctx.local.count as u8); + dest.data.fill(0); + + *ctx.local.count += 1; + ctx.local.spi.transfer_dma(src, dest).await.unwrap(); + + assert_eq!( + *ctx.local.source.get_mut().data, + *ctx.local.dest.get_mut().data + ); + log::info!("Success!"); + Mono::delay(1000.millis()).await; + } + } +} diff --git a/examples/spi-dma.rs b/examples/spi-dma.rs new file mode 100644 index 0000000..113ec73 --- /dev/null +++ b/examples/spi-dma.rs @@ -0,0 +1,113 @@ +//! This example shows off the using the SPI with the DMA engine +//! +//! For more docs, see https://docs.rs/stm32h7xx-hal/latest/stm32h5xx_hal/spi/index.html +//! +// #![deny(warnings)] +#![no_main] +#![no_std] + +mod utilities; + +use core::mem::MaybeUninit; + +use cortex_m_rt::entry; +use cortex_m_semihosting::debug; +use embedded_hal::delay::DelayNs; +use stm32h5xx_hal::{ + delay::Delay, + pac, + prelude::*, + spi::{self, Config as SpiConfig}, + time::MilliSeconds, +}; + +static mut SOURCE_BYTES: MaybeUninit<[u8; 40]> = MaybeUninit::uninit(); +static mut DEST_BYTES: MaybeUninit<[u8; 40]> = MaybeUninit::zeroed(); + +fn u8_buf_pair() -> (&'static [u8; 40], &'static mut [u8; 40]) { + let buf: &mut [MaybeUninit; 40] = unsafe { + &mut *(core::ptr::addr_of_mut!(SOURCE_BYTES) + as *mut [MaybeUninit; 40]) + }; + + for (i, value) in buf.iter_mut().enumerate() { + unsafe { + value.as_mut_ptr().write(i as u8); + } + } + #[allow(static_mut_refs)] // TODO: Fix this + let src = unsafe { SOURCE_BYTES.assume_init_ref() }; + + let dest = + unsafe { (*core::ptr::addr_of_mut!(DEST_BYTES)).assume_init_mut() }; + + dest.fill(0); + + (src, dest) +} + +#[entry] +fn main() -> ! { + utilities::logger::init(); + + let cp = cortex_m::Peripherals::take().unwrap(); + let dp = pac::Peripherals::take().unwrap(); + + // Select highest power mode for max possible clock frequency + let pwr = dp.PWR.constrain(); + let pwrcfg = pwr.vos0().freeze(); + + // Configure system PLLs and clocks - choose a PLL1 output so 1MHz SPI clock can be exactly + // derived from it + let rcc = dp.RCC.constrain(); + let ccdr = rcc + .sys_ck(192.MHz()) + .pll1_q_ck(64.MHz()) + .freeze(pwrcfg, &dp.SBS); + + // Acquire the GPIOB peripheral. This also enables the clock for + // GPIOB in the RCC register. + let gpiob = dp.GPIOB.split(ccdr.peripheral.GPIOB); + + // This example requires that MISO is connected to MOSI via a jumper (pins 28 and 26 on CN10 + // header on NUCLEO-H503RB) + let sck = gpiob.pb13.into_alternate(); + let miso = gpiob.pb14.into_alternate(); + let mosi = gpiob.pb15.into_alternate(); + + log::info!("stm32h5xx-hal example - SPI DMA"); + + // Initialise the SPI peripheral. + let spi = dp.SPI2.spi( + (sck, miso, mosi), + SpiConfig::new(spi::MODE_0), + 1.MHz(), + ccdr.peripheral.SPI2, + &ccdr.clocks, + ); + + let (source_buf, dest_buf) = u8_buf_pair(); + + let channels = dp.GPDMA1.channels(ccdr.peripheral.GPDMA1); + let tx_ch = channels.0; + let rx_ch = channels.1; + + let mut spi = spi.use_dma_duplex(tx_ch, rx_ch); + + let mut delay = Delay::new(cp.SYST, &ccdr.clocks); + let duration = MilliSeconds::secs(1).to_millis(); + + loop { + let (tx, rx) = + spi.start_dma_duplex_transfer(dest_buf, source_buf).unwrap(); + + tx.wait_for_transfer_complete().unwrap(); + rx.wait_for_transfer_complete().unwrap(); + + spi.finish_transfer(Ok(())).unwrap(); + assert_eq!(source_buf, dest_buf); + + log::info!("Success!"); + delay.delay_ms(duration); + } +} diff --git a/src/gpdma.rs b/src/gpdma.rs new file mode 100644 index 0000000..fcd04b2 --- /dev/null +++ b/src/gpdma.rs @@ -0,0 +1,565 @@ +//! The GPDMA is the general purpose DMA engine in use on the STM32H5 family of processors. It is +//! used to perform programmable data transfers that are offloaded from the CPU to the DMA engine. +//! +//! The GPDMA can perform the following transfers from a *source* address to a *destination* +//! address: +//! - Memory to memory +//! - Memory to peripheral +//! - Peripheral to memory +//! - Peripheral to peripheral +//! +//! Each GPDMA has 8 channels. Each channel can service any hardware request (or memory to memory +//! transfer) that is supported by the processor (ie. they're not tied to specific channels). All +//! channels support direct and linked-buffer transfers. However, the channels do have different +//! capabilities (see RM0492 Rev 3 section 15.3.2 for full details), notably that channels 0-5 can +//! only service transfers in a linear address space, while channels 6 & 7 can also service transfers +//! using a 2D addressing scheme. Both GPDMA peripherals support the same requests/channel +//! capabilities. +//! +//! # Usage +//! At the most basic level transfers take a *source* address and a *destination* address and +//! transfers the data from the source to the destination. The [embedded-dma] traits `ReadBuffer` +//! and `WriteBuffer` represent a source and destination, respectively. +//! +//! ## Memory to memory transfers +//! As long as the buffers satisfy the constraints of embedded-dma's `ReadBuffer` and `WriteBuffer` +//! traits, they can be used directly with the Transfer API: +//! ``` +//! use stm32h5xx_hal::{pac, gpdma::{DmaConfig, DmaTransfer}; +//! +//! let source_buf = ... // source buffer +//! let dest_buf = ... // destination buffer +//! +//! let dp = pac::Peripherals::take().unwrap(); +//! let channels = dp.GPDMA1.channels(ccdr.peripheral.GPDMA1); +//! let channel = channels.0 +//! let config = DmaConfig::default(); +//! let mut transfer = DmaTransfer::memory_to_memory(config, channel, source_buf, dest_buf); +//! transfer.start().unwrap(); +//! transfer.wait_for_transfer_complete().unwrap(); +//! ``` +//! +//! ## Memory to peripheral transfers +//! +//! The peripheral must provide a `WriteBuffer` implementation for its data register to which the +//! DMA will write. Then it can be used similarly to the memory to memory transfer. The `Transfer` +//! API does provide for performing an operation immediately after enabling the DMA channel, via the +//! Transfer::start_with method, which allows for a closure to be provided. Additionally, a +//! hardware request line must be specified to the Config in order to connect the peripheral to the +//! DMA channel. Another additional option for these transfers is to perform block requests or burst +//! requests. +//! +//! ## Peripheral to memory transfers +//! +//! The peripheral must provide a `ReadBuffer` implementation for its data register from which the +//! DMA will read. Otherwise it is used similarly to Peripheral to memory transfers, including the +//! additional configuration requirements/options specified above. In addition, peripheral flow +//! control mode can be used to enable the peripheral to early terminate a transaction. Per RM0492 +//! Rev 3 section 15.3.6, this is only used by the I3C peripheral, and only on channels 0 and 7. +//! +//! ## Peripheral to peripheral transfers +//! +//! These work similarly to the peripheral to memory transfers, but the peripheral driving the +//! request must be identified via the typing of the TransferType implementation. +//! +//! ## Data transforms +//! +//! The GPDMA provides a data transformation pipeline which facilitates transforms for transfers +//! between peripherals or memory that have different source and destination data widths or byte +//! representations (e.g. little endian vs big endian) with zero CPU overhead. See +//! `config::DataTransformBuilder` for more information on it. +//! +//! # Channel/transfer arbitration +//! +//! Every transfer is assigned a priority and a AHB port assignments for each of it its source and +//! destination. The transfer priority is used by the GPDMA controller to arbitrate between requests +//! that are both ready to transfer data via one of the AHB ports. + +use crate::{ + pac::{gpdma1, GPDMA1, GPDMA2}, + rcc::{rec, ResetEnable}, + Sealed, +}; +use core::{ + marker::PhantomData, + mem, + ops::Deref, + sync::atomic::{fence, Ordering}, +}; +use embedded_dma::{ReadBuffer, Word as DmaWord, WriteBuffer}; + +mod ch; +pub mod config; +#[cfg(feature = "gpdma-futures")] +mod future; +pub mod periph; + +pub use ch::{ + DmaChannel, DmaChannel0, DmaChannel1, DmaChannel2, DmaChannel3, + DmaChannel4, DmaChannel5, DmaChannel6, DmaChannel7, +}; +pub use config::DmaConfig; +use config::{ + HardwareRequest, MemoryToMemory, MemoryToPeripheral, PeripheralRequest, + PeripheralSource, PeripheralToMemory, PeripheralToPeripheral, + PeripheralToPeripheralDirection, TransferDirection, TransferType, +}; + +/// Supported word types for the STM32H5 GPDMA implementation. +/// +/// Currently only u8, u16, and u32 word types are supported. Signed types are currently not +/// supported because they would add a fair bit of complexity/redundancy to the DataTransform +/// implementation. This is easy to work around by having buffers of signed types implement Deref +/// to an unsigned type of the same width. +pub trait Word: DmaWord + Default + Copy {} + +impl Word for u32 {} +impl Word for u16 {} +impl Word for u8 {} + +/// Errors that can occur during operation +#[derive(Debug, Copy, Clone, Eq, PartialEq)] +#[cfg_attr(feature = "defmt", derive(defmt::Format))] +pub enum Error { + /// The DMA determined that a user setting was invalid while starting a transfer. + /// + /// See RM0492 Rev 3 Section 15.4.16 for details on how to debug + UserSettingError, + /// An error occurred transferring data during a transfer + /// + /// See RM0492 Rev 3 Section 15.4.16 for details on how to debug + DataTransferError, + /// An error occurred loading a linked transfer configuration + /// + /// See RM0492 Rev 3 Section 15.4.16 for details on how to debug + LinkTransferError, + + /// Resume was called on a channel that was not previously suspended + NotSuspendedError, +} + +pub trait GpdmaExt { + fn channels(self, rec: DMA::Rec) -> DmaChannels; +} + +impl GpdmaExt for DMA { + fn channels(self, rec: DMA::Rec) -> DmaChannels { + DmaChannels::new(self, rec) + } +} + +#[allow(private_bounds)] +pub trait Instance: Sealed + Deref { + type Rec: ResetEnable; + + fn ptr() -> *const gpdma1::RegisterBlock; + + /// Access channel registers. Valid for channels 0-5 only. + /// # Safety + /// This function is unsafe because it allows access to the DMA channel registers + /// without enforcing exclusive access or checking that the channel index is valid. + /// The caller must ensure that the channel index is within bounds and that no data races occur. + unsafe fn ch(channel: usize) -> &'static gpdma1::CH { + (*Self::ptr()).ch(channel) + } + + /// Access 2D channel registers. Valid for channels 6 and 7 only. + /// # Safety + /// This function is unsafe because it allows access to the DMA channel registers + /// without enforcing exclusive access or checking that the channel index is valid. + /// The caller must ensure that the channel index is within bounds and that no data races occur. + unsafe fn ch2d(channel: usize) -> &'static gpdma1::CH2D { + // Note (unsafe): only accessing registers belonging to specific channel + (*Self::ptr()).ch2d(channel - 6) + } + + fn rec() -> Self::Rec; +} + +impl Sealed for GPDMA1 {} +impl Sealed for GPDMA2 {} + +impl Instance for GPDMA1 { + type Rec = rec::Gpdma1; + + fn ptr() -> *const gpdma1::RegisterBlock { + GPDMA1::ptr() + } + + fn rec() -> Self::Rec { + Self::Rec { + _marker: PhantomData, + } + } +} + +impl Instance for GPDMA2 { + type Rec = rec::Gpdma2; + + fn ptr() -> *const gpdma1::RegisterBlock { + GPDMA2::ptr() + } + + fn rec() -> Self::Rec { + Self::Rec { + _marker: PhantomData, + } + } +} + +/// DmaChannels represents the set of channels on each GPDMA peripheral. To use, simply move the +/// desired channel out of the tuple: +/// +/// ``` +/// let dp = pac::Peripherals::take().unwrap(); +/// let channels = dp.GPDMA1.channels(ccdr.peripheral.GPDMA1); +/// let channel = channels.0; +/// ``` +#[allow(private_interfaces)] +pub struct DmaChannels( + pub DmaChannel0, + pub DmaChannel1, + pub DmaChannel2, + pub DmaChannel3, + pub DmaChannel4, + pub DmaChannel5, + pub DmaChannel6, + pub DmaChannel7, +); + +impl DmaChannels { + /// Splits the DMA peripheral into channels. + pub(super) fn new(_regs: DMA, rec: DMA::Rec) -> Self { + let _ = rec.reset().enable(); + Self( + DmaChannel0::new(), + DmaChannel1::new(), + DmaChannel2::new(), + DmaChannel3::new(), + DmaChannel4::new(), + DmaChannel5::new(), + DmaChannel6::new(), + DmaChannel7::new(), + ) + } +} + +/// DmaTransfer represents a single transfer operation on a GPDMA channel. It is created using the +/// [`DmaTransfer::memory_to_memory`], [`DmaTransfer::memory_to_peripheral`], +/// [`DmaTransfer::peripheral_to_memory`], or [`DmaTransfer::peripheral_to_peripheral`] +/// methods, which take a channel and the source and destination buffers. The transfer can then be +/// started using the [`DmaTransfer::start`] or [`DmaTransfer::start_nonblocking`] methods. +pub struct DmaTransfer<'a, CH, S, D> +where + CH: DmaChannel, + S: ReadBuffer, + D: WriteBuffer, +{ + channel: &'a mut CH, + source: S, + destination: D, +} + +impl<'a, CH, S, D> DmaTransfer<'a, CH, S, D> +where + CH: DmaChannel, + S: ReadBuffer, + D: WriteBuffer, +{ + fn new( + channel: &'a mut CH, + config: DmaConfig, + source: S, + mut destination: D, + size: usize, + ) -> Self + where + T: TransferType, + { + assert!(size <= u16::MAX as usize, "Max block size is {}", u16::MAX); + + let (src_ptr, _) = unsafe { source.read_buffer() }; + let (dest_ptr, _) = unsafe { destination.write_buffer() }; + + channel.reset_channel(); + channel.set_source(src_ptr); + channel.set_destination(dest_ptr); + channel.set_transfer_size_bytes(size); + channel.apply_config(config); + + Self { + channel, + source, + destination, + } + } + + /// Create a new memory-to-memory transfer with the channel, source and destination buffers + /// provided. + pub fn memory_to_memory( + config: DmaConfig, + channel: &'a mut CH, + source: S, + mut destination: D, + ) -> Self { + let src_width = core::mem::size_of::(); + let dest_width = core::mem::size_of::(); + + let (_, src_words) = unsafe { source.read_buffer() }; + let src_size = src_width * src_words; + let (_, dest_words) = unsafe { destination.write_buffer() }; + let dest_size = dest_width * dest_words; + + // Size must be aligned with destination width if source width is greater than destination + // width and packing mode is used, therefore the maximum size must be dictated by + // destination size (width * count). When not in packing mode, this still holds true as + // the destination size must not be exceeded (so only read the same number of words from + // the source as there is room in the destination) + let size = if src_width > dest_width { + dest_size + } else { + // When the source data width is less than or equal to the destination data width, we + // just need to ensure that the destination buffer is large enough to hold all of the + // source data. + assert!(src_size <= dest_size, "Transfer size ({src_size} bytes) will overflow the destination buffer ({dest_size} bytes)!"); + src_size + }; + + Self::new::(channel, config, source, destination, size) + } + + /// Create a new memory-to-peripheral transfer with the channel, source buffer and destination + /// peripheral provided. + pub fn memory_to_peripheral( + config: DmaConfig, + channel: &'a mut CH, + source: S, + destination: D, + ) -> Self { + let (_, src_words) = unsafe { source.read_buffer() }; + let src_size = core::mem::size_of::() * src_words; + + Self::new::( + channel, + config, + source, + destination, + src_size, + ) + .apply_hardware_request_config(config) + } + + /// Create a new peripheral-to-memory transfer with the channel, source peripheral and + /// destination buffer provided. + pub fn peripheral_to_memory( + config: DmaConfig, + channel: &'a mut CH, + source: S, + mut destination: D, + ) -> Self { + let (_, dest_words) = unsafe { destination.write_buffer() }; + let dest_size = core::mem::size_of::() * dest_words; + + Self::new::( + channel, + config, + source, + destination, + dest_size, + ) + .apply_hardware_request_config(config) + .apply_peripheral_source_config(config) + } + + /// Create a new peripheral-to-peripheral transfer with source and destination peripherals + /// provided. + pub fn peripheral_to_peripheral( + config: DmaConfig, S::Word, D::Word>, + channel: &'a mut CH, + source: S, + mut destination: D, + ) -> Self + where + T: PeripheralToPeripheralDirection, + { + let (_, src_words) = unsafe { source.read_buffer() }; + let (_, dest_words) = unsafe { destination.write_buffer() }; + + let size = match T::DIRECTION { + TransferDirection::PeripheralToPeripheral( + PeripheralRequest::SourceRequest, + ) => src_words * core::mem::size_of::(), + TransferDirection::PeripheralToPeripheral( + PeripheralRequest::DestinationRequest, + ) => dest_words * core::mem::size_of::(), + _ => unreachable!(), + }; + + Self::new::>( + channel, + config, + source, + destination, + size, + ) + .apply_hardware_request_config(config) + .apply_peripheral_source_config(config) + } + + fn apply_hardware_request_config( + self, + config: DmaConfig, + ) -> Self { + self.channel.configure_hardware_request(config); + self + } + + fn apply_peripheral_source_config( + self, + config: DmaConfig, + ) -> Self { + self.channel.configure_peripheral_flow_control(config); + self + } + + fn start_transfer_internal(&mut self) { + // Preserve the instruction and bus ordering of preceding buffer access + // to the subsequent access by the DMA peripheral due to enabling it. + fence(Ordering::SeqCst); + + self.channel.enable(); + } + + /// Start a transfer. Does not block waiting for the transfer to start and does not check for + /// errors starting the transfer + pub fn start_nonblocking(&mut self) { + self.start_transfer_internal(); + } + + /// Start a transfer and block waiting for it to start. Returns an error if one occurred + /// starting the transfer. + pub fn start(&mut self) -> Result<(), Error> { + self.start_nonblocking(); + self.channel.wait_for_transfer_started() + } + + /// Suspend a transfer. Does not wait for channel transfer to be suspended and does not report + /// any errors that occur doing so. + pub fn suspend_nonblocking(&mut self) { + if self.channel.is_suspended() { + return; + } + self.channel.initiate_suspend(); + + // Preserve the instruction and bus sequence of the preceding disable and + // the subsequent buffer access. + fence(Ordering::SeqCst); + } + + /// Suspend a transfer and block waiting for it to be suspended. Returns an error if one + /// occurred during the transfer or while suspending the transfer. + pub fn suspend(&mut self) { + if !self.channel.is_suspended() { + self.channel.suspend_transfer(); + } + + // Preserve the instruction and bus sequence of the preceding disable and + // the subsequent buffer access. + fence(Ordering::SeqCst); + } + + /// Resume a transfer. Does not wait for channel transfer to be suspended and does not report + /// any errors that occur doing so. + pub fn resume_nonblocking(&mut self) -> Result<(), Error> { + if !self.channel.is_suspended() { + return Err(Error::NotSuspendedError); + } + // Preserve the instruction and bus ordering of preceding buffer access + // to the subsequent access by the DMA peripheral due to enabling it. + fence(Ordering::SeqCst); + + self.channel.initiate_resume(); + Ok(()) + } + + /// Resume a transfer and block waiting for it to be resumed. Returns an error if one occurred + /// resuming the transfer. + pub fn resume(&mut self) -> Result<(), Error> { + if !self.channel.is_suspended() { + return Err(Error::NotSuspendedError); + } + // Preserve the instruction and bus ordering of preceding buffer access + // to the subsequent access by the DMA peripheral due to enabling it. + fence(Ordering::SeqCst); + + self.channel.resume_transfer() + } + + pub fn is_running(&self) -> bool { + self.channel.is_running() + } + + /// Blocks waiting for a transfer to complete. Returns an error if one occurred during the + /// transfer. + pub fn wait_for_transfer_complete(&mut self) -> Result<(), Error> { + let result = self.channel.wait_for_transfer_complete(); + // Preserve the instruction and bus sequence of the preceding operation and + // the subsequent buffer access. + fence(Ordering::SeqCst); + + result + } + + /// Blocks waiting for the half transfer complete event. Returns an error if one occurred during + /// the transfer. + pub fn wait_for_half_transfer_complete(&mut self) -> Result<(), Error> { + self.channel.wait_for_half_transfer_complete() + } + + /// Enable interrupts for this transfer. This will enable the transfer complete and half + /// transfer complete interrupts, as well as error interrupts. + pub fn enable_interrupts(&mut self) { + self.channel.enable_transfer_interrupts(); + } + + /// Disable interrupts for this transfer. + pub fn disable_interrupts(&mut self) { + self.channel.disable_transfer_interrupts(); + } + + /// Abort a transaction and wait for it to suspend the transfer before resetting the channel + pub fn abort(&mut self) { + if self.is_running() { + self.channel.abort(); + } + + self.disable_interrupts(); + + // Preserve the instruction and bus sequence of the preceding operation and + // the subsequent buffer access. + fence(Ordering::SeqCst); + } + + pub fn free(mut self) -> (S, D) { + self.abort(); + let (src, dest) = unsafe { + ( + core::ptr::read(&self.source), + core::ptr::read(&self.destination), + ) + }; + mem::forget(self); + (src, dest) + } +} + +impl<'a, CH, S, D> Drop for DmaTransfer<'a, CH, S, D> +where + CH: DmaChannel, + S: ReadBuffer, + D: WriteBuffer, +{ + fn drop(&mut self) { + self.abort(); + } +} diff --git a/src/gpdma/ch.rs b/src/gpdma/ch.rs new file mode 100644 index 0000000..b2c0966 --- /dev/null +++ b/src/gpdma/ch.rs @@ -0,0 +1,778 @@ +use core::{marker::PhantomData, ops::Deref}; + +use crate::stm32::gpdma1::{ + self, + ch::{CR, DAR, FCR, LBAR, SAR, SR, TR1, TR2}, +}; +use crate::Sealed; + +use super::{ + config::{ + transform::{DataTransform, PaddingAlignmentMode}, + AddressingMode, AhbPort, HardwareRequest, PeripheralRequest, + PeripheralSource, Priority, TransferDirection, TransferType, + }, + DmaConfig, Error, Instance, Word, +}; + +pub(super) trait ChannelRegs: Sealed { + #[allow(unused)] // TODO: this will be used for linked-list transfers + fn lbar(&self) -> &LBAR; + fn fcr(&self) -> &FCR; + fn sr(&self) -> &SR; + fn cr(&self) -> &CR; + fn tr1(&self) -> &TR1; + fn tr2(&self) -> &TR2; + fn sar(&self) -> &SAR; + fn dar(&self) -> &DAR; + fn set_block_size(&self, size: u16); +} + +impl Sealed for gpdma1::CH {} +impl Sealed for gpdma1::CH2D {} + +impl ChannelRegs for gpdma1::CH { + fn lbar(&self) -> &LBAR { + self.lbar() + } + fn fcr(&self) -> &FCR { + self.fcr() + } + fn sr(&self) -> &SR { + self.sr() + } + fn cr(&self) -> &CR { + self.cr() + } + fn tr1(&self) -> &TR1 { + self.tr1() + } + fn tr2(&self) -> &TR2 { + self.tr2() + } + fn sar(&self) -> &SAR { + self.sar() + } + fn dar(&self) -> &DAR { + self.dar() + } + fn set_block_size(&self, size: u16) { + self.br1().modify(|_, w| w.bndt().set(size)); + } +} + +impl ChannelRegs for gpdma1::CH2D { + fn lbar(&self) -> &LBAR { + self.lbar() + } + fn fcr(&self) -> &FCR { + self.fcr() + } + fn sr(&self) -> &SR { + self.sr() + } + fn cr(&self) -> &CR { + self.cr() + } + fn tr1(&self) -> &TR1 { + self.tr1() + } + fn tr2(&self) -> &TR2 { + self.tr2() + } + fn sar(&self) -> &SAR { + self.sar() + } + fn dar(&self) -> &DAR { + self.dar() + } + fn set_block_size(&self, size: u16) { + self.br1().modify(|_, w| w.bndt().set(size)); + } +} + +/// DmaChannelRef provides access to individual channels of the GPDMA instance via Deref. +/// It implements the Channel and DmaChannel traits, and is exposed to user code via the DmaChannels +/// struct. It does not expose a public API to allow user code to use it directly, but should rather +/// be assigned to a DmaTransfer that manages a single transfer on a channel. +#[doc(hidden)] +pub struct DmaChannelRef { + _dma: PhantomData, + _ch: PhantomData, +} + +impl Deref + for DmaChannelRef +{ + type Target = gpdma1::CH; + + #[inline(always)] + fn deref(&self) -> &Self::Target { + // Note (unsafe): only accessing registers belonging to Channel N + unsafe { DMA::ch(N) } + } +} + +impl Deref + for DmaChannelRef +{ + type Target = gpdma1::CH2D; + + #[inline(always)] + fn deref(&self) -> &Self::Target { + // Note (unsafe): only accessing registers belonging to Channel N + unsafe { DMA::ch2d(N) } + } +} + +#[allow(private_bounds)] +impl DmaChannelRef +where + DMA: Instance, + CH: ChannelRegs, +{ + pub(super) fn new() -> Self { + DmaChannelRef { + _dma: PhantomData, + _ch: PhantomData, + } + } +} + +impl Sealed for DmaChannelRef {} + +/// Non-error transfer event, including transfer complete and half-transfer events. Half-transfer +/// events can be used for double-buffering/linked buffer transfers. +pub enum TransferEvent { + /// Transfer complete event has occurred + TransferComplete, + /// Half transfer event has occurred + HalfTransferComplete, +} + +// Checks for errors in the captured status register provided, and returns a Result<(), Error> +macro_rules! check_error { + ($sr:expr) => { + if $sr.usef().is_trigger() { + Err(Error::UserSettingError) + } else if $sr.dtef().is_trigger() { + Err(Error::DataTransferError) + } else if $sr.ulef().is_trigger() { + Err(Error::LinkTransferError) + } else { + Ok(()) + } + }; +} + +#[allow(private_bounds)] +impl DmaChannelRef +where + DMA: Instance, + CH: ChannelRegs, + Self: Deref, +{ + #[inline(always)] + fn reset(&self) { + self.cr().modify(|_, w| w.reset().reset()); + } + + // TODO: remove clippy allow when used. This will likely be useful in the future + #[allow(unused)] + #[inline(always)] + pub(super) fn is_enabled(&self) -> bool { + self.cr().read().en().is_enabled() + } + + /// Initiates the suspension of a transfer + #[inline(always)] + pub(super) fn suspend(&self) { + self.cr().modify(|_, w| w.susp().suspended()); + } + + /// Resume transfer + #[inline(always)] + fn resume(&self) { + self.cr().modify(|_, w| w.susp().not_suspended()); + } + + /// Clear all event flags in the FCR register. + fn clear_all_event_flags(&self) { + self.fcr().write(|w| { + w.tcf() + .clear() + .htf() + .clear() + .dtef() + .clear() + .usef() + .clear() + .ulef() + .clear() + .suspf() + .clear() + .tof() + .clear() + }); + } + + #[inline(always)] + /// Checks if the specified transfer event has triggered or if an error has occurred. If an + /// error has occurred, it is returned. If the event has triggered, `Ok(true)` is returned. + /// Otherwise, if the event has not triggered, `Ok(false)` is returned. + fn check_transfer_event( + &self, + event: TransferEvent, + ) -> Result { + let sr = self.sr().read(); + check_error!(sr)?; + let triggered = match event { + TransferEvent::TransferComplete => sr.tcf().is_trigger(), + TransferEvent::HalfTransferComplete => sr.htf().is_trigger(), + }; + + Ok(triggered) + } + + fn clear_transfer_event_flag(&self, event: TransferEvent) { + self.fcr().write(|w| match event { + TransferEvent::TransferComplete => w.tcf().clear(), + TransferEvent::HalfTransferComplete => w.htf().clear(), + }); + } + + // TODO: Remove clippy allow when FIFO use is implemented + #[allow(unused)] + #[inline(always)] + fn fifo_level(&self) -> u8 { + self.sr().read().fifol().bits() + } + + /// Checks if the channel is idle. Ignores error conditions. + #[inline(always)] + fn is_idle(&self) -> bool { + self.sr().read().idlef().is_trigger() + } + + #[inline(always)] + fn check_idle(&self) -> Result { + let sr = self.sr().read(); + check_error!(sr)?; + Ok(sr.idlef().is_trigger()) + } + + #[inline(always)] + fn set_source_address(&self, addr: u32) { + self.sar().write(|w| w.sa().set(addr)); + } + + #[inline(always)] + fn set_destination_address(&self, addr: u32) { + self.dar().write(|w| w.da().set(addr)); + } + + #[inline(always)] + fn set_source_addressing_mode(&self, mode: AddressingMode) { + self.tr1().modify(|_, w| match mode { + AddressingMode::ContiguouslyIncremented => w.sinc().contiguous(), + AddressingMode::Fixed => w.sinc().fixed_burst(), + }); + } + + #[inline(always)] + fn set_destination_addressing_mode(&self, mode: AddressingMode) { + self.tr1().modify(|_, w| match mode { + AddressingMode::ContiguouslyIncremented => w.dinc().contiguous(), + AddressingMode::Fixed => w.dinc().fixed_burst(), + }); + } + + #[inline(always)] + fn set_source_burst_length(&self, burst_length: u8) { + self.tr1().modify(|_, w| w.dbl_1().set(burst_length)); + } + + #[inline(always)] + fn set_destination_burst_length(&self, burst_length: u8) { + self.tr1().modify(|_, w| w.sbl_1().set(burst_length)); + } + + #[inline(always)] + fn set_source_ahb_port(&self, port: AhbPort) { + self.tr1().modify(|_, w| match port { + AhbPort::Port0 => w.sap().port0(), + AhbPort::Port1 => w.sap().port1(), + }); + } + + #[inline(always)] + fn set_destination_ahb_port(&self, port: AhbPort) { + self.tr1().modify(|_, w| match port { + AhbPort::Port0 => w.dap().port0(), + AhbPort::Port1 => w.dap().port1(), + }); + } + + #[inline(always)] + fn set_source_data_width(&self, width: usize) { + self.tr1().modify(|_, w| match width { + 1 => w.sdw_log2().byte(), + 2 => w.sdw_log2().half_word(), + 4 => w.sdw_log2().word(), + _ => unreachable!(), + }); + } + + #[inline(always)] + fn set_destination_data_width(&self, width: usize) { + self.tr1().modify(|_, w| match width { + 1 => w.ddw_log2().byte(), + 2 => w.ddw_log2().half_word(), + 4 => w.ddw_log2().word(), + _ => unreachable!(), + }); + } + + #[inline(always)] + fn set_source_byte_exchange(&self, source_byte_exchange: bool) { + self.tr1().modify(|_, w| { + if source_byte_exchange { + w.sbx().exchanged() + } else { + w.sbx().not_exchanged() + } + }); + } + + #[inline(always)] + fn set_padding_alignment_mode(&self, pam: PaddingAlignmentMode) { + self.tr1().modify(|_, w| match pam { + PaddingAlignmentMode::None => w, + _ => w.pam().set(pam.bits()), + }); + } + + #[inline(always)] + fn set_destination_half_word_exchange(&self, half_word_exchange: bool) { + self.tr1().modify(|_, w| { + if half_word_exchange { + w.dhx().exchanged() + } else { + w.dhx().not_exchanged() + } + }); + } + + #[inline(always)] + fn set_destination_byte_exchange(&self, destination_byte_exchange: bool) { + self.tr1().modify(|_, w| { + if destination_byte_exchange { + w.dbx().exchanged() + } else { + w.dbx().not_exchanged() + } + }); + } + + #[inline(always)] + fn set_priority(&self, priority: Priority) { + self.cr().modify(|_, w| match priority { + Priority::LowPriorityLowWeight => w.prio().low_prio_low_weight(), + Priority::LowPriorityMedWeight => w.prio().low_prio_mid_weight(), + Priority::LowPriorityHighWeight => w.prio().low_prio_high_weight(), + Priority::HighPriority => w.prio().high_prio(), + }); + } + + #[inline(always)] + fn set_transfer_type(&self, transfer_dir: TransferDirection) { + self.tr2().modify(|_, w| match transfer_dir { + TransferDirection::MemoryToMemory => w.swreq().software(), + TransferDirection::MemoryToPeripheral => { + w.swreq().hardware().dreq().destination() + } + TransferDirection::PeripheralToMemory => { + w.swreq().hardware().dreq().source() + } + TransferDirection::PeripheralToPeripheral( + PeripheralRequest::SourceRequest, + ) => w.swreq().hardware().dreq().source(), + TransferDirection::PeripheralToPeripheral( + PeripheralRequest::DestinationRequest, + ) => w.swreq().hardware().dreq().destination(), + }); + } + + // TODO: Use enum? + #[inline(always)] + fn set_request_line(&self, request: u8) { + self.tr2() + .modify(|_, w| unsafe { w.reqsel().bits(request) }); + } + + #[inline(always)] + fn set_block_request_mode(&self, block_requests_enabled: bool) { + self.tr2().modify(|_, w| { + if block_requests_enabled { + w.breq().block() + } else { + w.breq().burst() + } + }); + } + + #[inline(always)] + fn set_peripheral_flow_control_mode( + &self, + peripheral_control_enabled: bool, + ) { + self.tr2().modify(|_, w| { + if peripheral_control_enabled { + w.pfreq().peripheral_control_mode() + } else { + w.pfreq().gpdma_control_mode() + } + }); + } +} + +/// The Channel trait is a private trait that abstracts over control of the linear and 2D channels. +/// It exposes to the DmaTransfer struct all the methods needed to control transfers on a particular +/// channel. It is private in order to not expose the low level functionality beyond the gpdma +/// module. +#[doc(hidden)] +pub(super) trait Channel { + fn enable(&mut self); + + fn is_suspended(&self) -> bool; + + /// Initiates the suspension of a transfer + fn initiate_suspend(&mut self); + + /// Resume transfer + fn initiate_resume(&self); + + /// Checks whether the channel transfer is complete. If the channel indicates an error occurred, + /// during the transaction an `Error`` is returned. + fn check_transfer_complete(&self) -> Result; + + /// Checks whether the channel half transfer complete event has triggered. If the channel + /// indicates an error occurred, during the transaction an `Error`` is returned. + fn check_half_transfer_complete(&self) -> Result; + + /// Checks whether the channel transfer has started (has transitioned out of the idle state, or + /// the transfer complete event has already triggered if it is idle) + fn check_transfer_started(&self) -> Result; + + fn is_running(&self) -> bool; + + /// Reset the channel registers so it can be reused. + fn reset_channel(&mut self); + + /// Suspend the transfer and blocks until it has been suspended. Reports any that occur while + /// waiting for the transfer to suspend. + fn suspend_transfer(&mut self); + + /// Resumes a suspended transfer and blocks until the channel transitions out of the idle state + /// Reports any errors that occur resuming the transfer. + fn resume_transfer(&mut self) -> Result<(), Error>; + + /// Aborts an operation by suspending the transfer and resetting the channel. + fn abort(&mut self); + + /// Blocks waiting for a transfer to be started (or for it to be idle and complete). Reports any + /// errors that occur while waiting for the transfer to start. + fn wait_for_transfer_started(&mut self) -> Result<(), Error>; + + /// Blocks waiting for a transfer to complete. Reports any errors that occur during a transfer. + fn wait_for_transfer_complete(&mut self) -> Result<(), Error>; + + /// Blocks waiting for a half transfer event to trigger. Reports any errors that occur during a + /// transfer. + fn wait_for_half_transfer_complete(&mut self) -> Result<(), Error>; + + /// Apply a transfer configuration to the channel + fn apply_config( + &mut self, + config: DmaConfig, + ); + + /// Apply hardware request configuration to the channel. Not relevant to memory-to-memory + /// transfers. + fn configure_hardware_request( + &mut self, + config: DmaConfig, + ); + + /// Apply peripheral flow control configuration for transactions where a peripheral is the + /// source + fn configure_peripheral_flow_control< + T: PeripheralSource, + S: Word, + D: Word, + >( + &mut self, + config: DmaConfig, + ); + + /// Apply a data transform to the channel transfer + fn apply_data_transform(&mut self, data_transform: DataTransform); + /// Set the source address. This sets the source address and data width. + fn set_source(&mut self, ptr: *const W); + + /// Set the destination address. This sets the destination address and data width + fn set_destination(&mut self, ptr: *mut W); + + /// Set the transfer size in bytes (not words!). Size must be aligned with destination width if + /// source width is greater than destination width and packing mode is used. Otherwise the size + /// must be aligned with the source data width. + fn set_transfer_size_bytes(&mut self, size: usize); + + /// Enable transfer interrupts for the channel. This enables the transfer complete, + /// half-transfer complete, data transfer error and user setting error interrupts. This is + /// useful for starting a transfer that will be monitored by an interrupt handler. + fn enable_transfer_interrupts(&mut self); + + /// Disable transfer interrupts for the channel. It is expected that this will be called from + /// an interrupt handler after a transfer is completed. + fn disable_transfer_interrupts(&mut self); +} + +impl Channel for DmaChannelRef +where + DMA: Instance, + CH: ChannelRegs, + Self: Deref, +{ + #[inline(always)] + fn enable(&mut self) { + self.cr().modify(|_, w| w.en().enabled()); + } + + #[inline(always)] + fn is_suspended(&self) -> bool { + self.sr().read().suspf().bit_is_set() + } + + fn initiate_suspend(&mut self) { + if self.is_suspended() { + return; + } + self.suspend(); + } + + #[inline(always)] + fn initiate_resume(&self) { + self.resume(); + } + + fn check_transfer_complete(&self) -> Result { + self.check_transfer_event(TransferEvent::TransferComplete) + } + + fn check_half_transfer_complete(&self) -> Result { + self.check_transfer_event(TransferEvent::HalfTransferComplete) + } + + fn check_transfer_started(&self) -> Result { + // TODO: Resolve multiple status register reads + match self.check_idle() { + // If we're idle we might have finished the transaction already, so also check if the + // transfer complete flag is set + Ok(true) => self.check_transfer_complete(), + Ok(false) => Ok(false), + Err(error) => Err(error), + } + } + + #[inline(always)] + fn is_running(&self) -> bool { + !self.is_idle() + } + + fn reset_channel(&mut self) { + self.reset(); + self.clear_all_event_flags(); + } + + fn suspend_transfer(&mut self) { + self.initiate_suspend(); + while !self.is_suspended() {} + } + + fn resume_transfer(&mut self) -> Result<(), Error> { + self.initiate_resume(); + while !self.check_transfer_started()? {} + Ok(()) + } + + fn abort(&mut self) { + if !self.is_idle() { + self.suspend_transfer(); + } + + self.reset_channel(); + } + + fn wait_for_transfer_started(&mut self) -> Result<(), Error> { + while !self.check_transfer_started().inspect_err(|_| { + self.clear_all_event_flags(); + })? {} + Ok(()) + } + + fn wait_for_transfer_complete(&mut self) -> Result<(), Error> { + loop { + match self.check_transfer_complete() { + Ok(true) => { + self.clear_transfer_event_flag( + TransferEvent::TransferComplete, + ); + return Ok(()); + } + Ok(false) => continue, + Err(error) => { + self.clear_all_event_flags(); + return Err(error); + } + } + } + } + + fn wait_for_half_transfer_complete(&mut self) -> Result<(), Error> { + loop { + match self.check_half_transfer_complete() { + Ok(true) => { + self.clear_transfer_event_flag( + TransferEvent::HalfTransferComplete, + ); + return Ok(()); + } + Ok(false) => continue, + Err(error) => { + self.clear_all_event_flags(); + return Err(error); + } + } + } + } + + fn apply_config( + &mut self, + config: DmaConfig, + ) { + self.set_source_addressing_mode( + config.transfer_type.source_addressing_mode(), + ); + self.set_destination_addressing_mode( + config.transfer_type.destination_addressing_mode(), + ); + self.set_source_burst_length(config.source_burst_length); + self.set_destination_burst_length(config.destination_burst_length); + self.set_source_ahb_port(config.source_ahb_port); + self.set_destination_ahb_port(config.destination_ahb_port); + + self.set_transfer_type(T::DIRECTION); + self.set_priority(config.priority); + if config.enable_interrupts { + self.enable_transfer_interrupts(); + } + if let Some(data_transform) = config.data_transform { + self.apply_data_transform(data_transform); + } + } + + fn configure_hardware_request( + &mut self, + config: DmaConfig, + ) { + self.set_block_request_mode(config.transfer_type.block_request()); + self.set_request_line(config.transfer_type.request()); + } + + fn configure_peripheral_flow_control< + T: PeripheralSource, + S: Word, + D: Word, + >( + &mut self, + config: DmaConfig, + ) { + self.set_peripheral_flow_control_mode( + config.transfer_type.peripheral_flow_control(), + ); + } + + fn apply_data_transform(&mut self, data_transform: DataTransform) { + self.set_source_byte_exchange(data_transform.source_byte_exchange); + self.set_padding_alignment_mode(data_transform.padding_alignment); + self.set_destination_half_word_exchange( + data_transform.dest_half_word_exchange, + ); + self.set_destination_byte_exchange(data_transform.dest_byte_exchange); + } + + fn set_source(&mut self, ptr: *const W) { + self.set_source_address(ptr as u32); + self.set_source_data_width(core::mem::size_of::()); + } + + fn set_destination(&mut self, ptr: *mut W) { + self.set_destination_address(ptr as u32); + self.set_destination_data_width(core::mem::size_of::()); + } + + fn set_transfer_size_bytes(&mut self, size: usize) { + self.set_block_size(size as u16); + } + + #[inline(always)] + fn enable_transfer_interrupts(&mut self) { + self.cr().modify(|_, w| { + w.tcie().enabled().dteie().enabled().useie().enabled() + }); + } + + #[inline(always)] + fn disable_transfer_interrupts(&mut self) { + self.cr().modify(|_, w| { + w.tcie().disabled().dteie().disabled().useie().disabled() + }); + } +} + +#[cfg(feature = "gpdma-futures")] +pub use super::future::DmaChannel; + +/// DmaChannel trait provides the API contract that all GPDMA channels exposed to the user +/// implement. +#[cfg(not(feature = "gpdma-futures"))] +#[allow(private_bounds)] +pub trait DmaChannel: Channel {} + +#[cfg(not(feature = "gpdma-futures"))] +#[allow(private_bounds)] +impl DmaChannel for DmaChannelRef +where + DMA: Instance, + CH: ChannelRegs, + Self: Deref, +{ +} + +/// Channel 0 on GPDMA controller +pub type DmaChannel0 = DmaChannelRef; +/// Channel 1 on GPDMA controller +pub type DmaChannel1 = DmaChannelRef; +/// Channel 2 on GPDMA controller +pub type DmaChannel2 = DmaChannelRef; +/// Channel 3 on GPDMA controller +pub type DmaChannel3 = DmaChannelRef; +/// Channel 4 on GPDMA controller +pub type DmaChannel4 = DmaChannelRef; +/// Channel 5 on GPDMA controller +pub type DmaChannel5 = DmaChannelRef; +/// Channel 6 on GPDMA controller +pub type DmaChannel6 = DmaChannelRef; +/// Channel 7 on GPDMA controller +pub type DmaChannel7 = DmaChannelRef; diff --git a/src/gpdma/config.rs b/src/gpdma/config.rs new file mode 100644 index 0000000..0be6299 --- /dev/null +++ b/src/gpdma/config.rs @@ -0,0 +1,425 @@ +use core::marker::PhantomData; + +use super::Word; + +pub mod transform; +use transform::*; + +/// PeripheralRequests is used for peripheral-to-peripheral transfers to indicate which side of the +/// transfer is driving the request (ie. which has the hardware request assigned) +#[derive(Clone, Copy, Debug, PartialEq)] +#[cfg_attr(feature = "defmt", derive(defmt::Format))] +pub enum PeripheralRequest { + SourceRequest, + DestinationRequest, +} + +/// The TransferDirection represents the available options for transfer types +#[derive(Clone, Copy, Debug, PartialEq)] +#[cfg_attr(feature = "defmt", derive(defmt::Format))] +pub enum TransferDirection { + MemoryToMemory, + MemoryToPeripheral, + PeripheralToMemory, + PeripheralToPeripheral(PeripheralRequest), +} + +/// Addressing mode represents whether the source or destination address is contiguously incremented +/// or fixed during a transfer +#[derive(Clone, Copy, Default, Debug, PartialEq)] +#[cfg_attr(feature = "defmt", derive(defmt::Format))] +pub enum AddressingMode { + #[default] + ContiguouslyIncremented, + Fixed, +} + +/// Transfer type encapsulates the transfer direction and the addressing mode for both the source +/// and destination of a transfer. +pub trait TransferType: crate::Sealed + Default { + const DIRECTION: TransferDirection; + + fn source_addressing_mode(&self) -> AddressingMode { + AddressingMode::ContiguouslyIncremented + } + + fn destination_addressing_mode(&self) -> AddressingMode { + AddressingMode::ContiguouslyIncremented + } +} + +/// Transfers to or from a peripheral have these additional options +pub trait HardwareRequest { + fn block_request(&self) -> bool; + fn enable_block_request(&mut self); + fn request(&self) -> u8; + fn set_request(&mut self, request: u8); +} + +/// When a peripheral is the source of the transfer it can optionally be configured in peripheral +/// flow control mode, when the peripheral supports it (currently just the I3C peripheral) +pub trait PeripheralSource { + fn peripheral_flow_control(&self) -> bool; + fn enable_peripheral_flow_control(&mut self); +} + +/// Represents the options specifically available for peripheral-to-memory transfers +#[derive(Clone, Copy, Debug, Default, PartialEq)] +#[cfg_attr(feature = "defmt", derive(defmt::Format))] +pub struct PeripheralToMemory { + request: u8, + block_request: bool, + peripheral_flow_control: bool, +} + +impl crate::Sealed for PeripheralToMemory {} + +impl TransferType for PeripheralToMemory { + const DIRECTION: TransferDirection = TransferDirection::PeripheralToMemory; + + fn source_addressing_mode(&self) -> AddressingMode { + AddressingMode::Fixed + } +} + +impl HardwareRequest for PeripheralToMemory { + fn block_request(&self) -> bool { + self.block_request + } + + fn enable_block_request(&mut self) { + self.block_request = true; + } + + fn request(&self) -> u8 { + self.request + } + + fn set_request(&mut self, request: u8) { + self.request = request; + } +} + +impl PeripheralSource for PeripheralToMemory { + fn peripheral_flow_control(&self) -> bool { + self.peripheral_flow_control + } + + fn enable_peripheral_flow_control(&mut self) { + self.peripheral_flow_control = true; + } +} + +/// Represents the options specifically available for memory-to-peripheral transfers +#[derive(Clone, Copy, Debug, Default, PartialEq)] +#[cfg_attr(feature = "defmt", derive(defmt::Format))] +pub struct MemoryToPeripheral { + request: u8, + block_request: bool, +} + +impl crate::Sealed for MemoryToPeripheral {} + +impl TransferType for MemoryToPeripheral { + const DIRECTION: TransferDirection = TransferDirection::MemoryToPeripheral; + + fn destination_addressing_mode(&self) -> AddressingMode { + AddressingMode::Fixed + } +} + +impl HardwareRequest for MemoryToPeripheral { + fn block_request(&self) -> bool { + self.block_request + } + + fn enable_block_request(&mut self) { + self.block_request = true; + } + + fn request(&self) -> u8 { + self.request + } + + fn set_request(&mut self, request: u8) { + self.request = request; + } +} + +/// Marker struct to indicate that the source peripheral drives the request via its request line. +#[derive(Clone, Copy, Debug, Default, PartialEq)] +#[cfg_attr(feature = "defmt", derive(defmt::Format))] +pub struct SourceRequest; + +/// Marker struct to indicate that the destination peripheral drives the request via its request +/// line. +#[derive(Clone, Copy, Debug, Default, PartialEq)] +#[cfg_attr(feature = "defmt", derive(defmt::Format))] +pub struct DestinationRequest; + +/// Indicates which peripheral in a peripheral-to-peripheral transfer is driving the request line +pub trait PeripheralToPeripheralDirection: Default + Clone + Copy { + const DIRECTION: TransferDirection; +} + +impl PeripheralToPeripheralDirection for SourceRequest { + const DIRECTION: TransferDirection = + TransferDirection::PeripheralToPeripheral( + PeripheralRequest::SourceRequest, + ); +} + +impl PeripheralToPeripheralDirection for DestinationRequest { + const DIRECTION: TransferDirection = + TransferDirection::PeripheralToPeripheral( + PeripheralRequest::DestinationRequest, + ); +} + +/// Represents the options specifically available for peripheral-to-peripheral transfers +#[derive(Clone, Copy, Debug, Default, PartialEq)] +#[cfg_attr(feature = "defmt", derive(defmt::Format))] +pub struct PeripheralToPeripheral { + _peripheral_request: PhantomData, + request: u8, + block_request: bool, + peripheral_flow_control: bool, +} + +impl crate::Sealed for PeripheralToPeripheral {} + +impl TransferType + for PeripheralToPeripheral +{ + const DIRECTION: TransferDirection = T::DIRECTION; + + fn source_addressing_mode(&self) -> AddressingMode { + AddressingMode::Fixed + } + + fn destination_addressing_mode(&self) -> AddressingMode { + AddressingMode::Fixed + } +} + +impl HardwareRequest for PeripheralToPeripheral { + fn block_request(&self) -> bool { + self.block_request + } + + fn enable_block_request(&mut self) { + self.block_request = true; + } + + fn request(&self) -> u8 { + self.request + } + + fn set_request(&mut self, request: u8) { + self.request = request; + } +} + +impl PeripheralSource for PeripheralToPeripheral { + fn peripheral_flow_control(&self) -> bool { + self.peripheral_flow_control + } + + fn enable_peripheral_flow_control(&mut self) { + self.peripheral_flow_control = true; + } +} + +/// Marker struct for memory-to-memory transfers (no special options) +#[derive(Clone, Copy, Debug, Default, PartialEq)] +#[cfg_attr(feature = "defmt", derive(defmt::Format))] +pub struct MemoryToMemory; + +impl crate::Sealed for MemoryToMemory {} + +impl TransferType for MemoryToMemory { + const DIRECTION: TransferDirection = TransferDirection::MemoryToMemory; +} + +/// Priority of the transfer. Used by the GPDMA channel arbitration to determine which transfer +/// to service. +#[derive(Clone, Copy, Debug, Default, PartialEq)] +#[cfg_attr(feature = "defmt", derive(defmt::Format))] +pub enum Priority { + LowPriorityLowWeight = 0, + #[default] + LowPriorityMedWeight = 1, + LowPriorityHighWeight = 2, + HighPriority = 3, +} + +#[derive(Clone, Copy, Debug, Default, PartialEq)] +#[cfg_attr(feature = "defmt", derive(defmt::Format))] +pub enum Continuation { + #[default] + Direct, + LinkedList, +} + +#[derive(Clone, Copy, Debug, Default, PartialEq)] +#[cfg_attr(feature = "defmt", derive(defmt::Format))] +pub enum AhbPort { + #[default] + Port0 = 0, + Port1 = 1, +} + +const MAX_BURST_LEN: u8 = 64; + +/// Configuration options for a DMA transfer +#[derive(Clone, Copy, Debug, Default, PartialEq)] +#[cfg_attr(feature = "defmt", derive(defmt::Format))] +pub struct DmaConfig { + _src_word: PhantomData, + _dest_word: PhantomData, + pub(super) transfer_type: T, + pub(super) priority: Priority, + pub(super) source_ahb_port: AhbPort, + pub(super) destination_ahb_port: AhbPort, + pub(super) source_burst_length: u8, + pub(super) destination_burst_length: u8, + pub(super) enable_interrupts: bool, + pub(super) data_transform: Option, +} + +impl DmaConfig { + /// Create a config with default settings + pub fn new() -> Self { + Self::default() + } + + /// Set the priority of the transfer. Default: Low Priority, Medium Weight + pub fn priority(mut self, priority: Priority) -> Self { + self.priority = priority; + self + } + + /// Set the source AHB port (0 or 1). Default: 0. + pub fn source_ahb_port(mut self, port: AhbPort) -> Self { + self.source_ahb_port = port; + self + } + + /// Set the destination AHB port (0 or 1). Default 0. + pub fn destination_ahb_port(mut self, port: AhbPort) -> Self { + self.destination_ahb_port = port; + self + } + + /// Set the source burst length in words (1 - 64 incl.). Default 1. + pub fn source_burst_length(mut self, len: u8) -> Self { + assert!( + (1..=MAX_BURST_LEN).contains(&len), + "Must specify a burst length between 1 and 64" + ); + self.source_burst_length = len - 1; + self + } + + /// Set the destination burst length in words (1 - 64 incl.). Default 1. + pub fn destination_burst_length(mut self, len: u8) -> Self { + assert!( + (1..=MAX_BURST_LEN).contains(&len), + "Must specify a burst length between 1 and 64" + ); + self.destination_burst_length = len - 1; + self + } + + pub fn enable_interrupts(mut self) -> Self { + self.enable_interrupts = true; + self + } + + /// Apply a data transform via a closure that takes a DataTransformBuilder that provides APIs + /// relevant to the source and destination data widths. + pub fn with_data_transform( + mut self, + builder: DataTransformBuilder, + ) -> Self { + self.data_transform = Some(builder.transform); + self + } +} + +impl DmaConfig { + /// Enable peripheral flow control (only supported by I3C) + pub fn enable_peripheral_flow_control(mut self) -> Self { + self.transfer_type.enable_peripheral_flow_control(); + self + } +} + +impl DmaConfig { + /// Enable block requests for peripherals that support it + pub fn enable_hardware_block_requests(mut self) -> Self { + self.transfer_type.enable_block_request(); + self + } + + /// Select the hardware request line + pub fn with_request(mut self, request: u8) -> Self { + self.transfer_type.set_request(request); + self + } +} + +#[cfg(test)] +mod test { + use crate::gpdma::{ + config::{self, MemoryToMemory}, + DmaConfig, + }; + + use super::*; + + impl DataTransform { + fn new( + source_byte_exchange: bool, + padding_alignment: PaddingAlignmentMode, + dest_half_word_exchange: bool, + dest_byte_exchange: bool, + ) -> Self { + Self { + source_byte_exchange, + padding_alignment, + dest_half_word_exchange, + dest_byte_exchange, + } + } + } + + #[test] + fn test_data_transform() { + let builder: DataTransformBuilder = + DataTransform::builder().swap_source_middle_bytes(); + assert_eq!( + builder.transform, + DataTransform::new(true, Default::default(), false, false) + ); + } + + #[test] + fn test_with_data_transform() { + let config: DmaConfig = DmaConfig::new(); + let transform = DataTransform::builder() + .swap_source_middle_bytes() + .left_align_right_truncate() + .swap_destination_half_word_byte_order(); + let config = config.with_data_transform(transform); + assert_eq!( + config.data_transform, + Some(DataTransform::new( + true, + PaddingAlignmentMode::LeftAlignedRightTruncated, + false, + true + )) + ); + } +} diff --git a/src/gpdma/config/transform.rs b/src/gpdma/config/transform.rs new file mode 100644 index 0000000..aeade1f --- /dev/null +++ b/src/gpdma/config/transform.rs @@ -0,0 +1,322 @@ +//! The transform module provides a configuration builder to set up the data transformations +//! supported by the GPDMA peripheral. +//! +//! # Usage +//! use stm32h5xx_hal::gpdma::DmaConfig; +//! use stm32h5xx_hal::gpdma::config::transform::*; // This ensures relevant traits are in scope +//! +//! let config: DmaConfig = DmaConfig::new().with_data_transform( +//! DataTransform::builder() +//! .swap_source_middle_bytes() +//! .right_align_left_truncate() +//! .swap_destination_half_word_byte_order() +//! ); +use core::marker::PhantomData; + +use super::Word; + +/// Represents the options available for the padding and alignment step in the data transformation +/// pipeline +#[derive(Clone, Copy, Debug, Default, PartialEq)] +#[cfg_attr(feature = "defmt", derive(defmt::Format))] +pub(crate) enum PaddingAlignmentMode { + #[default] + None, + + // PAM1 - Source data width < Destination data width + ZeroPadded, + SignExtended, + Packed, + + // PAM2 - Source data width > Destination data width + RightAlignedLeftTruncated, + LeftAlignedRightTruncated, + Unpacked, +} + +impl PaddingAlignmentMode { + pub fn bits(&self) -> u8 { + match self { + PaddingAlignmentMode::None => { + panic!("Do not set PAM bits if no PAM mode was chosen") + } + PaddingAlignmentMode::ZeroPadded => 0, + PaddingAlignmentMode::SignExtended => 1, + PaddingAlignmentMode::Packed => 2, + PaddingAlignmentMode::RightAlignedLeftTruncated => 0, + PaddingAlignmentMode::LeftAlignedRightTruncated => 1, + PaddingAlignmentMode::Unpacked => 2, + } + } +} + +pub trait SourceByteExchange { + fn swap_source_middle_bytes(self) -> Self; +} + +pub trait PaddingAlignment { + fn right_align_zero_pad(self) -> Self; + fn right_align_sign_extend(self) -> Self; + fn pack(self) -> Self; +} + +pub trait TruncationAlignment { + fn right_align_left_truncate(self) -> Self; + fn left_align_right_truncate(self) -> Self; + fn unpack(self) -> Self; +} + +pub trait DestinationHalfWordExchange { + fn swap_destination_half_words(self) -> Self; +} + +pub trait DestinationByteExchange { + fn swap_destination_half_word_byte_order(self) -> Self; +} + +/// The DataTransformBuilder is used to configure the data transformation pipeline that the GPDMA +/// peripheral implements. +/// +/// Depending upon what word sizes are used for transfers, different pipeline steps are applicable: +/// +/// - The first possible step in the pipeline, the source byte exchange step is applicable to 32-bit +/// sources only and swaps the middle 2 bytes of the 32-bit word +/// - The next step is applicable when the source data width is not equal to the destination data +/// width: +/// - If the destination width is less than the source width, the data can be truncated (left or +/// right aligned) or unpacked into a FIFO to output all the data to subsequent destination +/// words (destination buffer size must be large enough to accomodate the size in bytes of the +/// unpacked source data) +/// - If the destination width is greater than the source width, the data can be zero- or +/// sign-extended, or it can be packed into the destination words. +/// - After the padding/alignment step, the order of the destination 16-bit half-words in a 32-bit +/// destination word can be swapped (only applicable if the destination word is 32-bit) +/// - Finally, the order of the bytes in each 16-bit destination (half-) word can be swapped (only +/// applicable for 32- and 16-bit destination word sizes) +/// +/// This builder allows each step to be specified, only when relevant to the source and destination +/// data-widths. +/// +/// To get a builder use [`DataTransform::builder()`]. Type inference is used to determine the +/// source and destination word sizes, so the builder can be created without specifying the types +/// explicitly. +#[derive(Clone, Copy, Debug, Default, PartialEq)] +#[cfg_attr(feature = "defmt", derive(defmt::Format))] +pub struct DataTransformBuilder { + _source_type: PhantomData, + _destination_type: PhantomData, + pub(super) transform: DataTransform, +} + +impl DataTransformBuilder { + pub fn new() -> Self { + Self::default() + } +} + +impl SourceByteExchange for DataTransformBuilder { + /// The order of the unaligned middle bytes of a 32-bit source word is exchanged + /// ie. B3B2B1B0 -> B3B1B2B0 + fn swap_source_middle_bytes(mut self) -> Self { + self.transform.source_byte_exchange = true; + self + } +} + +impl PaddingAlignment for DataTransformBuilder { + /// Pad out the upper 16 bits of the 32-bit destination word with zeroes (default) + fn right_align_zero_pad(mut self) -> Self { + self.transform.padding_alignment = PaddingAlignmentMode::ZeroPadded; + self + } + + /// Sign extend the upper 16 bits of the 32-bit destination word + fn right_align_sign_extend(mut self) -> Self { + self.transform.padding_alignment = PaddingAlignmentMode::SignExtended; + self + } + + /// Pack subsequent 16-bit words into the 32-bit destination words + /// ie: B3B2,B1B0 -> B3B2B1B0 (see RM0492, Table 92) + fn pack(mut self) -> Self { + self.transform.padding_alignment = PaddingAlignmentMode::Packed; + self + } +} + +impl PaddingAlignment for DataTransformBuilder { + /// Pad out the upper 24 bits of the 32-bit destination word with zeroes (default) + fn right_align_zero_pad(mut self) -> Self { + self.transform.padding_alignment = PaddingAlignmentMode::ZeroPadded; + self + } + + /// Sign extend the upper 24 bits of the 32-bit destination word + fn right_align_sign_extend(mut self) -> Self { + self.transform.padding_alignment = PaddingAlignmentMode::SignExtended; + self + } + + /// Pack subsequent 8-bit words into the 32-bit destination words + /// ie: B3,B2,B1,B0 -> B3B2B1B0 (see RM0492, Table 92) + fn pack(mut self) -> Self { + self.transform.padding_alignment = PaddingAlignmentMode::Packed; + self + } +} + +impl PaddingAlignment for DataTransformBuilder { + /// Pad out the upper 8 bits of the 16-bit destination word with zeroes (default) + fn right_align_zero_pad(mut self) -> Self { + self.transform.padding_alignment = PaddingAlignmentMode::ZeroPadded; + self + } + + /// Sign extend the upper 8 bits of the 32-bit destination word + fn right_align_sign_extend(mut self) -> Self { + self.transform.padding_alignment = PaddingAlignmentMode::SignExtended; + self + } + + /// Pack subsequent 8-bit words into the 16-bit destination words + /// ie: B1,B0 -> B1B0 (see RM0492, Table 92) + fn pack(mut self) -> Self { + self.transform.padding_alignment = PaddingAlignmentMode::Packed; + self + } +} + +impl TruncationAlignment for DataTransformBuilder { + /// Keep the least significant 16-bits and truncate the rest (default) + /// + /// ie: B7B6B5B4,B3B2B1B0 -> B5B4,B1B0 (see RM0492, Table 92) + fn right_align_left_truncate(mut self) -> Self { + self.transform.padding_alignment = + PaddingAlignmentMode::RightAlignedLeftTruncated; + self + } + + /// Keep the most significant 16-bits and truncate the rest + /// + /// ie: B7B6B5B4,B3B2B1B0 -> B7B6,B3B2 (see RM0492, Table 92) + fn left_align_right_truncate(mut self) -> Self { + self.transform.padding_alignment = + PaddingAlignmentMode::LeftAlignedRightTruncated; + self + } + + /// Unpack each 32-bit word into separate 16-bit half-words. + /// Note that the destination buffer must have sufficient room for n*2 16-bit values where n is + /// the number of 32-bit words in the source buffer. + /// + /// ie: B7B6B5B4,B3B2B1B0 -> B7B6,B5B4,B3B2,B1B0 (see RM0492, Table 92) + fn unpack(mut self) -> Self { + self.transform.padding_alignment = PaddingAlignmentMode::Unpacked; + self + } +} + +impl TruncationAlignment for DataTransformBuilder { + /// Keep the least significant 8-bits and truncate the rest (default) + /// + /// ie: B7B6B5B4,B3B2B1B0 -> B4,B0 + fn right_align_left_truncate(mut self) -> Self { + self.transform.padding_alignment = + PaddingAlignmentMode::RightAlignedLeftTruncated; + self + } + + /// Keep the most significant 8-bits and truncate the rest + /// + /// i.e: B7B6B5B4,B3B2B1B0 -> B7,B3 + fn left_align_right_truncate(mut self) -> Self { + self.transform.padding_alignment = + PaddingAlignmentMode::LeftAlignedRightTruncated; + self + } + + /// Unpack each word or half-word into separate 8-bit bytes. + /// Note that the destination buffer must have sufficient room for n*2 8-bit values where n is + /// the number of word or half-words in the source buffer. + /// + /// ie: B7B6B5B4,B3B2B1B0 -> B7,B6,B5,B4,B3,B2,B1,B0 + fn unpack(mut self) -> Self { + self.transform.padding_alignment = PaddingAlignmentMode::Unpacked; + self + } +} + +impl TruncationAlignment for DataTransformBuilder { + /// Keep the least significant 8 bits and truncate the rest (default) + /// + /// ie: B3B2,B1B0 -> B2,B0 (see RM0492, Table 92) + fn right_align_left_truncate(mut self) -> Self { + self.transform.padding_alignment = + PaddingAlignmentMode::RightAlignedLeftTruncated; + self + } + + /// Keep the most significant 8 bits and truncate the rest + /// + /// ie: B3B2,B1B0 -> B3,B1 (see RM0492, Table 92) + fn left_align_right_truncate(mut self) -> Self { + self.transform.padding_alignment = + PaddingAlignmentMode::LeftAlignedRightTruncated; + self + } + + /// Unpack each 16-bit word into separate 8-bit half-words. + /// Note that the destination buffer must have sufficient room for n*2 16-bit values where n is + /// the number of 32-bit words in the source buffer. + /// + /// ie: B3B2,B1B0 -> B3,B2,B1,B0 (see RM0492, Table 92) + fn unpack(mut self) -> Self { + self.transform.padding_alignment = PaddingAlignmentMode::Unpacked; + self + } +} + +impl DestinationHalfWordExchange for DataTransformBuilder { + /// Swap the order of the 16-bit half-words in the 32-bit destination word + fn swap_destination_half_words(mut self) -> Self { + self.transform.dest_half_word_exchange = true; + self + } +} + +impl DestinationByteExchange for DataTransformBuilder { + /// Swap the order of bytes in each 16-bit destination word + fn swap_destination_half_word_byte_order(mut self) -> Self { + self.transform.dest_byte_exchange = true; + self + } +} + +impl DestinationByteExchange for DataTransformBuilder { + /// Swap the order of bytes in each 16-bit destination half-word + fn swap_destination_half_word_byte_order(mut self) -> Self { + self.transform.dest_byte_exchange = true; + self + } +} + +/// DataTransform represents the configuration of the data transformation pipeline as produced +/// by the above builder structs. +#[derive(Clone, Copy, Debug, Default, PartialEq)] +#[cfg_attr(feature = "defmt", derive(defmt::Format))] +pub struct DataTransform { + pub(crate) source_byte_exchange: bool, + pub(crate) padding_alignment: PaddingAlignmentMode, + pub(crate) dest_half_word_exchange: bool, + pub(crate) dest_byte_exchange: bool, +} + +impl DataTransform { + pub fn builder() -> DataTransformBuilder { + DataTransformBuilder { + _source_type: PhantomData, + _destination_type: PhantomData, + transform: DataTransform::default(), + } + } +} diff --git a/src/gpdma/future.rs b/src/gpdma/future.rs new file mode 100644 index 0000000..2178b50 --- /dev/null +++ b/src/gpdma/future.rs @@ -0,0 +1,200 @@ +use core::{ + future::{Future, IntoFuture}, + ops::{Deref, DerefMut}, + pin::Pin, + task::{Context, Poll}, +}; + +use embedded_dma::{ReadBuffer, WriteBuffer}; +use futures_util::task::AtomicWaker; + +use crate::interrupt; +use crate::stm32::{GPDMA1, GPDMA2}; + +use super::{ + ch::{ + Channel, ChannelRegs, DmaChannel0, DmaChannel1, DmaChannel2, + DmaChannel3, DmaChannel4, DmaChannel5, DmaChannel6, DmaChannel7, + DmaChannelRef, + }, + DmaTransfer, Error, Instance, Word, +}; + +#[allow(private_bounds)] +pub trait DmaChannel: Channel + ChannelWaker {} + +impl DmaChannel for DmaChannelRef +where + DMA: Instance + InstanceWaker, + CH: ChannelRegs, + Self: Deref, +{ +} + +#[allow(private_bounds)] +impl<'a, CH, S, D> IntoFuture for DmaTransfer<'a, CH, S, D> +where + CH: DmaChannel, + S: ReadBuffer, + D: WriteBuffer, +{ + type Output = Result<(), Error>; + type IntoFuture = DmaTransferFuture<'a, CH, S, D>; + + fn into_future(mut self) -> DmaTransferFuture<'a, CH, S, D> { + self.enable_interrupts(); + DmaTransferFuture { transfer: self } + } +} + +pub struct DmaTransferFuture<'a, CH, S, D> +where + CH: DmaChannel, + S: ReadBuffer, + D: WriteBuffer, +{ + transfer: DmaTransfer<'a, CH, S, D>, +} + +impl<'a, CH, S, D> Deref for DmaTransferFuture<'a, CH, S, D> +where + CH: DmaChannel, + S: ReadBuffer, + D: WriteBuffer, +{ + type Target = DmaTransfer<'a, CH, S, D>; + + fn deref(&self) -> &Self::Target { + &self.transfer + } +} + +impl<'a, CH, S, D> DerefMut for DmaTransferFuture<'a, CH, S, D> +where + CH: DmaChannel, + S: ReadBuffer, + D: WriteBuffer, +{ + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.transfer + } +} + +impl<'a, CH, S, D> Unpin for DmaTransferFuture<'a, CH, S, D> +where + CH: DmaChannel, + S: ReadBuffer, + D: WriteBuffer, +{ +} + +impl<'a, CH, S, D> Future for DmaTransferFuture<'a, CH, S, D> +where + CH: DmaChannel + ChannelWaker, + S: ReadBuffer, + D: WriteBuffer, +{ + type Output = Result<(), Error>; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + self.channel.waker().register(cx.waker()); + if self.channel.check_transfer_complete()? { + Poll::Ready(Ok(())) + } else { + Poll::Pending + } + } +} + +#[allow(private_bounds)] +impl DmaChannelRef +where + DMA: Instance + InstanceWaker, + CH: ChannelRegs, + Self: Deref, +{ + #[inline(always)] + fn handle_interrupt() { + let mut ch = Self::new(); + ch.disable_transfer_interrupts(); + ch.waker().wake(); + } +} + +impl ChannelWaker for DmaChannelRef +where + DMA: Instance + InstanceWaker, + CH: ChannelRegs, + Self: Deref, +{ + #[inline(always)] + fn waker(&self) -> &'static AtomicWaker { + DMA::waker(N) + } +} + +macro_rules! gpdma_irq { + ($GPDMA:ident, $CH:literal) => { + paste::item! { + #[interrupt] + fn [<$GPDMA _CH $CH>]() { + [< DmaChannel $CH>]::<$GPDMA>::handle_interrupt(); + } + } + }; +} + +pub(super) trait InstanceWaker { + fn waker(idx: usize) -> &'static AtomicWaker; +} + +pub(super) trait ChannelWaker { + /// Returns a reference to the AtomicWaker for the channel. + fn waker(&self) -> &'static AtomicWaker; +} + +mod gpdma1 { + use super::*; + + static WAKERS_GPDMA1: [AtomicWaker; 8] = [const { AtomicWaker::new() }; 8]; + + #[allow(private_bounds)] + impl InstanceWaker for GPDMA1 { + #[inline(always)] + fn waker(idx: usize) -> &'static AtomicWaker { + &WAKERS_GPDMA1[idx] + } + } + + gpdma_irq!(GPDMA1, 0); + gpdma_irq!(GPDMA1, 1); + gpdma_irq!(GPDMA1, 2); + gpdma_irq!(GPDMA1, 3); + gpdma_irq!(GPDMA1, 4); + gpdma_irq!(GPDMA1, 5); + gpdma_irq!(GPDMA1, 6); + gpdma_irq!(GPDMA1, 7); +} + +mod gpdma2 { + use super::*; + + static WAKERS_GPDMA2: [AtomicWaker; 8] = [const { AtomicWaker::new() }; 8]; + + #[allow(private_bounds)] + impl InstanceWaker for GPDMA2 { + #[inline(always)] + fn waker(idx: usize) -> &'static AtomicWaker { + &WAKERS_GPDMA2[idx] + } + } + + gpdma_irq!(GPDMA2, 0); + gpdma_irq!(GPDMA2, 1); + gpdma_irq!(GPDMA2, 2); + gpdma_irq!(GPDMA2, 3); + gpdma_irq!(GPDMA2, 4); + gpdma_irq!(GPDMA2, 5); + gpdma_irq!(GPDMA2, 6); + gpdma_irq!(GPDMA2, 7); +} diff --git a/src/gpdma/periph.rs b/src/gpdma/periph.rs new file mode 100644 index 0000000..0baaca9 --- /dev/null +++ b/src/gpdma/periph.rs @@ -0,0 +1,280 @@ +//! This module provides traits and structs for managing DMA transactions for peripherals. +//! - By implementing the [`TxAddr`] and [`RxAddr`] traits, peripherals can be used with DMA for +//! memory-to-peripheral and peripheral-to-memory transfers, respectively. +//! - The [`Tx`] and [`Rx`] traits provide a define the interface for initiating DMA transfers for +//! TX and RX operations, respectively. +//! - The [`DmaTx`], [`DmaRx`] structs implement the [`Tx`] and [`Rx`] traits, respectively, and +//! encapsulate the logic for initializing these transfers. +//! - The [`DmaDuplex`] struct combines both TX and RX capabilities, allowing for full-duplex +//! operations. +use core::marker::PhantomData; + +use crate::Sealed; + +use super::{ + DmaChannel, DmaConfig, DmaTransfer, MemoryToPeripheral, PeripheralToMemory, + ReadBuffer, Word, WriteBuffer, +}; + +/// `PeriphTxBuffer` is a wrapper around a peripheral's transmit data register address, used to +/// provide a WriteBuffer implementation for initiating memory-to-peripheral DMA transfers. +pub struct PeriphTxBuffer, W: Word> { + _addr: PhantomData, + _word: PhantomData, +} + +unsafe impl, W: Word> WriteBuffer for PeriphTxBuffer { + type Word = W; + + unsafe fn write_buffer(&mut self) -> (*mut Self::Word, usize) { + (A::tx_addr(), 1) + } +} + +/// `PeriphRxBuffer` is a wrapper around a peripheral's receive data register address, used to +/// provide a ReadBuffer implementation for initiating peripheral-to-memory DMA transfers. +pub struct PeriphRxBuffer, W: Word> { + _addr: PhantomData, + _word: PhantomData, +} + +unsafe impl, W: Word> ReadBuffer for PeriphRxBuffer { + type Word = W; + + unsafe fn read_buffer(&self) -> (*const Self::Word, usize) { + (A::rx_addr(), 1) + } +} + +/// `TxAddr` is a trait that provides a method to obtain the address of the transmit data register +/// of a peripheral. This is used to facilitate memory-to-peripheral DMA transactions. The +/// peripheral must implement this trait. +pub trait TxAddr { + /// Returns a pointer to the peripheral's transmit data register. + /// + /// # Safety + /// + /// The caller must ensure that the returned pointer is only used when it is valid to access + /// the peripheral's transmit data register, and that no data races or invalid memory accesses + /// occur. + unsafe fn tx_addr() -> *mut W; +} + +/// `RxAddr` is a trait that provides a method to obtain the address of the receive data register +/// of a peripheral. This is used to facilitate peripheral-to-memory DMA transactions. The +/// peripheral must implement this trait. +pub trait RxAddr { + /// Returns a pointer to the peripheral's receive data register. + /// + /// # Safety + /// + /// The caller must ensure that the returned pointer is only used when it is valid to access + /// the peripheral's receive data register, and that no data races or invalid memory accesses + /// occur. + unsafe fn rx_addr() -> *const W; +} + +trait TxBuffer { + /// Returns a `PeriphTxBuffer` that provides a write buffer for the peripheral's transmit data + /// register. This is used to initiate memory-to-peripheral DMA transfers. Implemented + /// automatically for any implementer of `TxAddr`. + /// + /// # Safety + /// TxAddr already requires the caller to ensure that the returned pointer is valid and as such + /// is marked unsafe, so marking this method as unsafe is redundant. + fn tx_buffer() -> PeriphTxBuffer + where + Self: TxAddr + Sized, + { + PeriphTxBuffer { + _addr: PhantomData, + _word: PhantomData, + } + } +} + +impl> TxBuffer for T {} + +trait RxBuffer { + /// Returns a `PeriphRxBuffer` that provides a read buffer for the peripheral's receive data + /// register. This is used to initiate peripheral-to-memory DMA transfers. Implemented + /// automatically for any implementer of `RxAddr`. + /// + /// # Safety + /// RxAddr already requires the caller to ensure that the returned pointer is valid and as such + /// is marked unsafe, so marking this method as unsafe is redundant. + fn rx_buffer() -> PeriphRxBuffer + where + Self: RxAddr + Sized, + { + PeriphRxBuffer { + _addr: PhantomData, + _word: PhantomData, + } + } +} + +impl> RxBuffer for T {} + +/// `DmaRx` encapsulates the initialization of a peripheral-to-memory DMA transaction for receiving +/// data. Used by peripheral DMA implementations. +pub struct DmaRx { + _periph: PhantomData, + _word: PhantomData, + channel: CH, +} + +impl DmaRx { + fn new(channel: CH) -> Self { + Self { + _periph: PhantomData, + _word: PhantomData, + channel, + } + } + + pub fn free(self) -> CH { + self.channel + } +} + +impl From for DmaRx { + fn from(channel: CH) -> Self { + Self::new(channel) + } +} + +impl Sealed for DmaRx {} + +impl DmaRx +where + PERIPH: RxAddr, + CH: DmaChannel, + W: Word, +{ + pub fn init_rx_transfer<'a, D>( + &'a mut self, + config: DmaConfig, + destination: D, + ) -> DmaTransfer<'a, CH, PeriphRxBuffer, D> + where + D: WriteBuffer, + { + DmaTransfer::peripheral_to_memory( + config, + &mut self.channel, + PERIPH::rx_buffer(), + destination, + ) + } +} + +/// `DmaTx` encapsulates the initialization of a memory-to-peripheral DMA transaction for +/// transmitting data. Used by peripheral DMA implementations. +pub struct DmaTx { + _periph: PhantomData, + _word: PhantomData, + channel: CH, +} + +impl DmaTx { + fn new(channel: CH) -> Self { + Self { + _periph: PhantomData, + _word: PhantomData, + channel, + } + } + + pub fn free(self) -> CH { + self.channel + } +} + +impl From for DmaTx { + fn from(channel: CH) -> Self { + Self::new(channel) + } +} + +impl Sealed for DmaTx {} + +impl DmaTx +where + PERIPH: TxAddr, + CH: DmaChannel, + W: Word, +{ + pub fn init_tx_transfer<'a, S>( + &'a mut self, + config: DmaConfig, + source: S, + ) -> DmaTransfer<'a, CH, S, PeriphTxBuffer> + where + S: ReadBuffer, + { + DmaTransfer::memory_to_peripheral( + config, + &mut self.channel, + source, + PERIPH::tx_buffer(), + ) + } +} + +/// `DmaDuplex` encapsulates the initialization of both memory-to-peripheral and +/// peripheral-to-memory DMA transaction for to enable setting up of full-duplex transmission and +/// reception of data. Used by peripheral DMA implementations. +pub struct DmaDuplex { + tx: DmaTx, + rx: DmaRx, +} + +impl DmaDuplex +where + PERIPH: TxAddr + RxAddr, + W: Word, + TX: DmaChannel, + RX: DmaChannel, +{ + pub fn new(tx: TX, rx: RX) -> Self { + Self { + tx: DmaTx::from(tx), + rx: DmaRx::from(rx), + } + } + + pub fn free(self) -> (TX, RX) { + (self.tx.free(), self.rx.free()) + } +} + +impl Sealed for DmaDuplex {} + +impl DmaDuplex +where + PERIPH: TxAddr + RxAddr, + W: Word, + TX: DmaChannel, + RX: DmaChannel, +{ + #[allow(clippy::type_complexity)] + pub fn init_duplex_transfer<'a, S, D>( + &'a mut self, + tx_config: DmaConfig, + rx_config: DmaConfig, + source: S, + destination: D, + ) -> ( + DmaTransfer<'a, TX, S, PeriphTxBuffer>, + DmaTransfer<'a, RX, PeriphRxBuffer, D>, + ) + where + S: ReadBuffer, + D: WriteBuffer, + { + let tx = self.tx.init_tx_transfer(tx_config, source); + let rx = self.rx.init_rx_transfer(rx_config, destination); + (tx, rx) + } +} diff --git a/src/lib.rs b/src/lib.rs index 3c27058..cc3c317 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -82,6 +82,9 @@ pub mod dwt; #[cfg(feature = "device-selected")] pub mod usb; +#[cfg(feature = "device-selected")] +pub mod gpdma; + #[cfg(feature = "device-selected")] mod sealed { pub trait Sealed {} diff --git a/src/prelude.rs b/src/prelude.rs index becb02a..a73e785 100644 --- a/src/prelude.rs +++ b/src/prelude.rs @@ -2,6 +2,7 @@ pub use crate::delay::DelayExt as _stm32h5xx_hal_delay_DelayExt; pub use crate::dwt::DwtExt as _stm32h5xx_hal_delay_DwtExt; +pub use crate::gpdma::GpdmaExt as _stm32h5xx_hal_gpdma_GpdmaExt; pub use crate::gpio::GpioExt as _stm32h5xx_hal_gpio_GpioExt; pub use crate::i2c::I2cExt as _stm32h5xx_hal_i2c_I2cExt; pub use crate::icache::ICacheExt as _stm32h5xx_hal_icache_ICacheExt; diff --git a/src/spi.rs b/src/spi.rs index 3e3fa15..a97bb5d 100644 --- a/src/spi.rs +++ b/src/spi.rs @@ -141,6 +141,7 @@ pub use embedded_hal::spi::{ Mode, Phase, Polarity, MODE_0, MODE_1, MODE_2, MODE_3, }; +use crate::gpdma::Error as DmaError; use crate::rcc::{CoreClocks, ResetEnable}; use crate::stm32::spi1; @@ -148,6 +149,7 @@ use crate::time::Hertz; use spi1::{cfg1::MBR, cfg2::LSBFRST, cfg2::SSIOP}; mod config; +pub mod dma; mod hal; pub mod nonblocking; mod spi_def; @@ -182,6 +184,14 @@ pub enum Error { /// Caller makes invalid call (e.g. write in SimplexReceiver mode, or read in /// SimplexTransmitter) InvalidOperation, + /// A DMA error occurred during processing + DmaError(DmaError), +} + +impl From for Error { + fn from(error: DmaError) -> Self { + Error::DmaError(error) + } } pub trait Pins { @@ -252,6 +262,12 @@ pub trait Instance: #[doc(hidden)] fn rec() -> Self::Rec; + + #[doc(hidden)] + fn tx_dma_request() -> u8; + + #[doc(hidden)] + fn rx_dma_request() -> u8; } pub trait Word: Copy + Default + 'static + crate::Sealed { @@ -487,7 +503,37 @@ impl Inner { self.spi.cr1().modify(|_, w| w.spe().enabled()); } - /// Enable SPI + fn is_enabled(&self) -> bool { + self.spi.cr1().read().spe().is_enabled() + } + + #[inline] + pub fn start_transfer(&self) { + self.spi.cr1().modify(|_, w| w.cstart().started()); + } + + fn set_transfer_word_count(&self, words: u16) { + self.spi.cr2().modify(|_, w| w.tsize().set(words)); + } + + fn reset_transfer_word_count(&self) { + self.spi.cr2().modify(|_, w| w.tsize().set(0)); + } + + fn enable_dma_transfer_interrupts(&self) { + self.spi.ier().modify(|_, w| { + w.eotie() + .enabled() + .udrie() // Underrun + .enabled() + .ovrie() // Overrun + .enabled() + .modfie() // Mode fault + .enabled() + }); + } + + /// Disable SPI fn disable(&mut self) { self.spi.cr1().modify(|_, w| w.spe().disabled()); } @@ -564,6 +610,26 @@ impl Inner { let _ = self.spi.sr().read(); } + /// Disable DMA for both Rx and Tx + #[inline] + pub fn enable_tx_dma(&self) { + self.spi.cfg1().modify(|_, w| w.txdmaen().enabled()); + } + + /// Disable DMA for both Rx and Tx + #[inline] + pub fn enable_rx_dma(&self) { + self.spi.cfg1().modify(|_, w| w.rxdmaen().enabled()); + } + + /// Disable DMA for both Rx and Tx + #[inline] + pub fn disable_dma(&self) { + self.spi + .cfg1() + .modify(|_, w| w.rxdmaen().disabled().txdmaen().disabled()); + } + /// Read a single word from the receive data register #[inline(always)] fn read_data_reg(&mut self) -> W { @@ -789,9 +855,6 @@ impl Spi { } /// Sets up a frame transaction with the given amount of data words. - /// - /// If this is called when a transaction has already started, - /// then an error is returned with [Error::TransactionAlreadyStarted]. fn setup_transaction(&mut self, length: usize) { assert!( length <= u16::MAX as usize, @@ -799,25 +862,40 @@ impl Spi { u16::MAX ); - self.spi().cr2().write(|w| w.tsize().set(length as u16)); + self.inner.set_transfer_word_count(length as u16); + self.start_transaction(); + } + + fn start_transaction(&mut self) { // Re-enable self.inner.clear_modf(); self.inner.enable(); - self.spi().cr1().modify(|_, w| w.cstart().started()); + self.inner.start_transfer(); } - /// Checks if the current transaction is complete and disables the - /// peripheral if it is, returning true. If it isn't, returns false. - fn end_transaction_if_done(&mut self) -> bool { + fn is_transaction_complete(&mut self) -> bool { let sr = self.spi().sr().read(); - let is_complete = if self.inner.is_transmitter() { + if self.inner.is_transmitter() { sr.eot().is_completed() && sr.txc().is_completed() } else { sr.eot().is_completed() - }; + } + } - if !is_complete { + fn disable(&mut self) { + self.spi() + .ifcr() + .write(|w| w.txtfc().clear().eotc().clear().suspc().clear()); + + self.inner.disable(); + self.inner.reset_transfer_word_count(); + } + + /// Checks if the current transaction is complete and disables the + /// peripheral if it is, returning true. If it isn't, returns false. + fn end_transaction_if_done(&mut self) -> bool { + if !self.is_transaction_complete() { return false; } @@ -828,11 +906,7 @@ impl Spi { cortex_m::asm::nop() } - self.spi() - .ifcr() - .write(|w| w.txtfc().clear().eotc().clear().suspc().clear()); - - self.inner.disable(); + self.disable(); true } @@ -840,12 +914,11 @@ impl Spi { /// This must always be called when all data has been sent to /// properly terminate the transaction and reset the SPI peripheral. fn end_transaction(&mut self) { - // Result is only () or WouldBlock. Discard result. while !self.end_transaction_if_done() {} } fn abort_transaction(&mut self) { - self.inner.disable(); + self.disable(); } /// Deconstructs the SPI peripheral and returns the component parts. @@ -879,14 +952,7 @@ impl Spi { } } - fn start_write<'a>( - &mut self, - words: &'a [W], - ) -> Result, W>, Error> { - assert!( - !words.is_empty(), - "Write buffer should not be non-zero length" - ); + fn setup_write_mode(&mut self) -> Result<(), Error> { let communication_mode = self.inner.communication_mode(); if communication_mode == CommunicationMode::SimplexReceiver { return Err(Error::InvalidOperation); @@ -896,16 +962,25 @@ impl Spi { self.inner.set_dir_transmitter(); } + Ok(()) + } + + fn start_write<'a>( + &mut self, + words: &'a [W], + ) -> Result, W>, Error> { + assert!( + !words.is_empty(), + "Write buffer should not be non-zero length" + ); + + self.setup_write_mode()?; self.setup_transaction(words.len()); Ok(Transaction::, W>::write(words)) } - fn start_read<'a>( - &mut self, - buf: &'a mut [W], - ) -> Result, W>, Error> { - assert!(!buf.is_empty(), "Read buffer should not be non-zero length"); + fn setup_read_mode(&self) -> Result<(), Error> { let communication_mode = self.inner.communication_mode(); if communication_mode == CommunicationMode::SimplexTransmitter { return Err(Error::InvalidOperation); @@ -915,11 +990,29 @@ impl Spi { self.inner.set_dir_receiver(); } + Ok(()) + } + + fn start_read<'a>( + &mut self, + buf: &'a mut [W], + ) -> Result, W>, Error> { + assert!(!buf.is_empty(), "Read buffer should not be non-zero length"); + + self.setup_read_mode()?; self.setup_transaction(buf.len()); Ok(Transaction::, W>::read(buf)) } + fn check_transfer_mode(&mut self) -> Result<(), Error> { + if self.inner.communication_mode() != CommunicationMode::FullDuplex { + Err(Error::InvalidOperation) + } else { + Ok(()) + } + } + fn start_transfer<'a>( &mut self, read: &'a mut [W], @@ -929,10 +1022,8 @@ impl Spi { !read.is_empty() && !write.is_empty(), "Transfer buffers should not be of zero length" ); - if self.inner.communication_mode() != CommunicationMode::FullDuplex { - return Err(Error::InvalidOperation); - } + self.check_transfer_mode()?; self.setup_transaction(core::cmp::max(read.len(), write.len())); Ok(Transaction::, W>::transfer(write, read)) @@ -946,9 +1037,7 @@ impl Spi { !words.is_empty(), "Transfer buffer should not be of zero length" ); - if self.inner.communication_mode() != CommunicationMode::FullDuplex { - return Err(Error::InvalidOperation); - } + self.check_transfer_mode()?; self.setup_transaction(words.len()); diff --git a/src/spi/dma.rs b/src/spi/dma.rs new file mode 100644 index 0000000..72c1627 --- /dev/null +++ b/src/spi/dma.rs @@ -0,0 +1,482 @@ +use core::{ + future::{Future, IntoFuture}, + ops::{Deref, DerefMut}, + pin::Pin, + task::{Context, Poll}, +}; + +use embedded_dma::{ReadBuffer, WriteBuffer}; +use embedded_hal::spi::ErrorType; +use futures_util::join; +use futures_util::task::AtomicWaker; + +use crate::{ + gpdma::{ + config::DmaConfig, + periph::{DmaDuplex, DmaRx, DmaTx, PeriphRxBuffer, PeriphTxBuffer, RxAddr, TxAddr}, + DmaChannel, DmaTransfer, Error as DmaError, Word as DmaWord, + }, + interrupt, + spi::CommunicationMode, +}; + +use super::{Error, Instance, Spi, Word}; + +impl TxAddr for SPI { + unsafe fn tx_addr() -> *mut W { + (*SPI::ptr()).txdr().as_ptr() as *mut W + } +} + +impl RxAddr for SPI { + unsafe fn rx_addr() -> *const W { + (*SPI::ptr()).rxdr().as_ptr() as *const W + } +} + +impl Spi +where + SPI: Instance, + W: Word + DmaWord, +{ + pub fn use_dma_tx( + self, + channel: CH, + ) -> SpiDma, W> + where + CH: DmaChannel, + { + assert!(self.inner.is_transmitter()); + SpiDma::new_simplex_transmitter(self, channel) + } + + /// Use DMA for receiving data only in simplex receiver mode, or in half duplex mode as a + /// receiver + pub fn use_dma_rx( + self, + channel: CH, + ) -> SpiDma, W> + where + CH: DmaChannel, + { + // Using DMA for receiving data requires that the SPI is configured as a simplex receiver or + // in half duplex mode when receiving data only + // otherwise no data will be received because no clock pulses are generated + assert!( + self.inner.communication_mode() + == CommunicationMode::SimplexReceiver + || (self.inner.communication_mode() + == CommunicationMode::HalfDuplex + && !self.inner.is_half_duplex_transmitter()) + ); + SpiDma::new_simplex_receiver(self, channel) + } + + /// Use DMA for full duplex transfers + pub fn use_dma_duplex( + self, + tx_channel: TX, + rx_channel: RX, + ) -> SpiDma, W> + where + TX: DmaChannel, + RX: DmaChannel, + { + assert!( + self.inner.communication_mode() == CommunicationMode::FullDuplex + ); + SpiDma::new_duplex(self, tx_channel, rx_channel) + } +} + +pub struct SpiDma { + spi: Spi, + mode: MODE, +} + +#[allow(private_bounds)] +impl SpiDma +where + SPI: Instance + Waker, + W: Word, +{ + pub fn new(spi: Spi, mode: MODE) -> Self { + Self { spi, mode } + } + + pub fn finish_transfer( + &mut self, + result: Result<(), DmaError>, + ) -> Result<(), Error> { + let result = match result { + Ok(_) => { + self.end_transaction(); + Ok(()) + } + Err(error) => { + self.abort_transaction(); + Err(Error::DmaError(error)) + } + }; + self.inner.disable_dma(); + result + } + + async fn finish_transfer_async( + &mut self, + result: Result<(), DmaError>, + ) -> Result<(), Error> { + let result = match result { + Ok(_) => SpiDmaFuture::new(self).await, + Err(error) => { + self.abort_transaction(); + Err(Error::DmaError(error)) + } + }; + result + } +} + +impl SpiDma, W> +where + SPI: Instance, + W: Word, + CH: DmaChannel, +{ + pub fn new_simplex_transmitter(spi: Spi, channel: CH) -> Self { + Self { + spi, + mode: DmaTx::from(channel), + } + } + + pub fn free(self) -> (Spi, CH) { + let spi = self.spi; + let channel = self.mode.free(); + (spi, channel) + } +} + +impl SpiDma, W> +where + SPI: Instance, + W: Word, + CH: DmaChannel, +{ + pub fn new_simplex_receiver(spi: Spi, channel: CH) -> Self { + Self { + spi, + mode: DmaRx::from(channel), + } + } + + pub fn free(self) -> (Spi, CH) { + (self.spi, self.mode.free()) + } +} + +impl SpiDma, W> +where + SPI: Instance + TxAddr + RxAddr, + W: Word + DmaWord, + TX: DmaChannel, + RX: DmaChannel, +{ + pub fn new_duplex( + spi: Spi, + tx_channel: TX, + rx_channel: RX, + ) -> Self { + Self { + spi, + mode: DmaDuplex::new(tx_channel, rx_channel), + } + } + + pub fn free(self) -> (Spi, TX, RX) { + let (tx, rx) = self.mode.free(); + (self.spi, tx, rx) + } +} + +impl Deref for SpiDma +where + SPI: Instance, + W: Word, +{ + type Target = Spi; + + fn deref(&self) -> &Self::Target { + &self.spi + } +} + +impl DerefMut for SpiDma +where + SPI: Instance, + W: Word, +{ + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.spi + } +} + +#[allow(private_bounds)] +impl SpiDma, W> +where + SPI: Instance + Waker, + W: Word + DmaWord, + CH: DmaChannel, +{ + pub fn start_dma_read<'a, D>( + &'a mut self, + mut destination: D, + ) -> Result, D>, Error> + where + D: WriteBuffer, + { + let config = DmaConfig::new().with_request(SPI::rx_dma_request()); + let (_, len) = unsafe { destination.write_buffer() }; + + self.spi.inner.set_transfer_word_count(len as u16); + // Make sure to handle any errors before initializing a transfer + self.setup_read_mode()?; + + let spi = &mut self.spi; + let mut transfer = self.mode.init_rx_transfer(config, destination); + + spi.inner.enable_rx_dma(); + + // Start DMA before starting the transaction to avoid receieve buffer overruns + transfer.start_nonblocking(); + spi.start_transaction(); + + Ok(transfer) + } + + pub async fn read_dma(&mut self, destination: D) -> Result<(), Error> + where + D: WriteBuffer, + { + let result = self.start_dma_read(destination)?.await; + self.finish_transfer_async(result).await + } +} + +#[allow(private_bounds)] +impl SpiDma, W> +where + SPI: Instance + Waker, + W: Word + DmaWord, + CH: DmaChannel, +{ + pub fn start_dma_write<'a, S>( + &'a mut self, + source: S, + ) -> Result>, Error> + where + S: ReadBuffer, + { + let config = DmaConfig::new().with_request(SPI::tx_dma_request()); + + let (_, len) = unsafe { source.read_buffer() }; + + self.inner.set_transfer_word_count(len as u16); + + // Make sure to handle any errors before initializing a transfer + self.setup_write_mode()?; + + let spi = &mut self.spi; + let mut transfer = self.mode.init_tx_transfer(config, source); + + transfer.start_nonblocking(); + spi.inner.enable_tx_dma(); + spi.start_transaction(); + + Ok(transfer) + } + + pub async fn write_dma(&mut self, source: S) -> Result<(), Error> + where + S: ReadBuffer, + { + let result = self.start_dma_write(source)?.await; + self.finish_transfer_async(result).await + } +} + +#[allow(private_bounds)] +impl SpiDma, W> +where + SPI: Instance + Waker, + W: Word + DmaWord, + TX: DmaChannel, + RX: DmaChannel, +{ + #[allow(clippy::type_complexity)] + pub fn start_dma_duplex_transfer<'a, S, D>( + &'a mut self, + source: S, + mut destination: D, + ) -> Result<(DmaTransfer<'a, TX, S, PeriphTxBuffer>, + DmaTransfer<'a, RX, PeriphRxBuffer, D>), Error> + where + S: ReadBuffer, + D: WriteBuffer, + { + let (_, read_len) = unsafe { source.read_buffer() }; + let (_, write_len) = unsafe { destination.write_buffer() }; + + assert_eq!( + read_len, write_len, + "Read and write buffers must have the same length" + ); + + let tx_config = DmaConfig::new().with_request(SPI::tx_dma_request()); + let rx_config = DmaConfig::new().with_request(SPI::rx_dma_request()); + + self.inner.set_transfer_word_count(read_len as u16); + + self.check_transfer_mode()?; + + let spi = &mut self.spi; + let (mut tx_transfer, mut rx_transfer) = self + .mode + .init_duplex_transfer(tx_config, rx_config, source, destination); + + spi.inner.enable_rx_dma(); + rx_transfer.start_nonblocking(); + tx_transfer.start_nonblocking(); + spi.inner.enable_tx_dma(); + spi.start_transaction(); + + Ok((tx_transfer, rx_transfer)) + } + + pub async fn transfer_dma( + &mut self, + source: S, + destination: D, + ) -> Result<(), Error> + where + S: ReadBuffer, + D: WriteBuffer, + { + let (tx, rx) = self.start_dma_duplex_transfer(source, destination)?; + let (tx, rx) = (tx.into_future(), rx.into_future()); + let results = join!(tx, rx); + + let result = results.0.and(results.1); + + self.finish_transfer_async(result).await + } + + pub async fn transfer_inplace_dma( + &mut self, + mut buffer: B, + ) -> Result<(), Error> + where + B: WriteBuffer, + { + let (ptr, len) = unsafe { buffer.write_buffer() }; + + // Note (unsafe): Data will be read from the start of the buffer before data is written + // to those locations just like for blocking non-DMA in-place transfers, and the location + // is already guaranteed to be 'static + let source = unsafe { core::slice::from_raw_parts(ptr, len) }; + + self.transfer_dma(source, buffer).await + } +} + +impl ErrorType for SpiDma +where + SPI: Instance, + W: Word, +{ + type Error = Error; +} + +struct SpiDmaFuture<'a, SPI: Instance, MODE, W: Word> { + spi: &'a mut SpiDma, +} + +impl<'a, SPI: Instance, MODE, W: Word> SpiDmaFuture<'a, SPI, MODE, W> { + fn new(spi: &'a mut SpiDma) -> Self { + spi.inner.enable_dma_transfer_interrupts(); + Self { spi } + } +} + +impl Unpin for SpiDmaFuture<'_, SPI, MODE, W> {} + +impl Drop for SpiDmaFuture<'_, SPI, MODE, W> { + fn drop(&mut self) { + self.spi.disable(); + } +} + +impl Future + for SpiDmaFuture<'_, SPI, MODE, W> +{ + type Output = Result<(), Error>; + + fn poll( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll { + SPI::waker().register(cx.waker()); + + if self.spi.is_transaction_complete() { + Poll::Ready(Ok(())) + } else { + Poll::Pending + } + } +} + +trait Waker { + fn waker() -> &'static AtomicWaker; +} + +macro_rules! spi_dma_irq { + ($SPI:ident) => { + paste::item! { + static [<$SPI _WAKER>]: AtomicWaker = AtomicWaker::new(); + + impl Waker for $SPI { + #[inline(always)] + fn waker() -> &'static AtomicWaker { + &[<$SPI _WAKER>] + } + } + + #[interrupt] + fn $SPI() { + let spi = unsafe { &*$SPI::ptr() }; + unsafe { spi.ier().write_with_zero(|w| w); }; + $SPI::waker().wake(); + } + } + }; +} +use crate::pac::{SPI1, SPI2, SPI3}; + +spi_dma_irq!(SPI1); +spi_dma_irq!(SPI2); +spi_dma_irq!(SPI3); + +#[cfg(feature = "rm0481")] +mod rm0481 { + use super::*; + use crate::pac::SPI4; + spi_dma_irq!(SPI4); +} + +#[cfg(feature = "h56x_h573")] +mod h56x_h573 { + use super::*; + use crate::pac::{SPI5, SPI6}; + spi_dma_irq!(SPI5); + spi_dma_irq!(SPI6); +} diff --git a/src/spi/hal.rs b/src/spi/hal.rs index 30d6121..57f2781 100644 --- a/src/spi/hal.rs +++ b/src/spi/hal.rs @@ -15,6 +15,7 @@ impl HalError for Error { Error::TransactionAlreadyStarted => ErrorKind::Other, Error::BufferTooBig { max_size: _ } => ErrorKind::Other, Error::InvalidOperation => ErrorKind::Other, + Error::DmaError(_) => ErrorKind::Other, } } } diff --git a/src/spi/spi_def.rs b/src/spi/spi_def.rs index 9f72ae4..b54b113 100644 --- a/src/spi/spi_def.rs +++ b/src/spi/spi_def.rs @@ -87,6 +87,16 @@ macro_rules! instance { fn rec() -> Self::Rec { rec::$Spi { _marker: PhantomData } } + + fn tx_dma_request() -> u8 { + use $crate::pac::gpdma1::ch::tr2::REQSEL; + REQSEL::[<$Spi TxDma>] as u8 + } + + fn rx_dma_request() -> u8 { + use $crate::pac::gpdma1::ch::tr2::REQSEL; + REQSEL::[<$Spi RxDma>] as u8 + } } impl crate::Sealed for $SPIX {}