|
| 1 | +// SPDX-License-Identifier: GPL-2.0 |
| 2 | + |
| 3 | +//! Direct memory access (DMA). |
| 4 | +//! |
| 5 | +//! C header: [`include/linux/dma-mapping.h`](../../../../include/linux/dma-mapping.h) |
| 6 | +
|
| 7 | +use crate::{ |
| 8 | + bindings, |
| 9 | + device::{Device, RawDevice}, |
| 10 | + error::code::*, |
| 11 | + error::Result, |
| 12 | + str::CStr, |
| 13 | + sync::Arc, |
| 14 | +}; |
| 15 | +use core::marker::PhantomData; |
| 16 | + |
| 17 | +pub trait Allocator { |
| 18 | + type AllocationData; |
| 19 | + type DataSource; |
| 20 | + |
| 21 | + fn free(cpu_addr: *mut (), dma_handle: u64, size: usize, alloc_data: &mut Self::AllocationData); |
| 22 | + unsafe fn allocation_data(data: &Self::DataSource) -> Self::AllocationData; |
| 23 | +} |
| 24 | + |
| 25 | +pub struct CoherentAllocator; |
| 26 | + |
| 27 | +impl Allocator for CoherentAllocator { |
| 28 | + type AllocationData = Device; |
| 29 | + type DataSource = Device; |
| 30 | + |
| 31 | + fn free(cpu_addr: *mut (), dma_handle: u64, size: usize, dev: &mut Device) { |
| 32 | + unsafe { bindings::dma_free_attrs(dev.ptr, size, cpu_addr as _, dma_handle, 0) }; |
| 33 | + } |
| 34 | + |
| 35 | + unsafe fn allocation_data(data: &Device) -> Device { |
| 36 | + unsafe { Device::from_dev_no_reference(data) } |
| 37 | + } |
| 38 | +} |
| 39 | + |
| 40 | +pub fn try_alloc_coherent<T>( |
| 41 | + dev: &dyn RawDevice, |
| 42 | + count: usize, |
| 43 | + atomic: bool, |
| 44 | +) -> Result<CoherentAllocation<T, CoherentAllocator>> { |
| 45 | + let t_size = core::mem::size_of::<T>(); |
| 46 | + let size = count.checked_mul(t_size).ok_or(ENOMEM)?; |
| 47 | + let mut dma_handle = 0; |
| 48 | + let ret = unsafe { |
| 49 | + bindings::dma_alloc_attrs( |
| 50 | + dev.raw_device(), |
| 51 | + size, |
| 52 | + &mut dma_handle, |
| 53 | + if atomic { |
| 54 | + bindings::GFP_ATOMIC |
| 55 | + } else { |
| 56 | + bindings::GFP_KERNEL |
| 57 | + }, |
| 58 | + 0, |
| 59 | + ) |
| 60 | + }; |
| 61 | + if ret.is_null() { |
| 62 | + Err(ENOMEM) |
| 63 | + } else { |
| 64 | + Ok(CoherentAllocation::new( |
| 65 | + ret as _, |
| 66 | + dma_handle, |
| 67 | + count, |
| 68 | + Device::from_dev(dev), |
| 69 | + )) |
| 70 | + } |
| 71 | +} |
| 72 | + |
| 73 | +pub struct Pool<T> { |
| 74 | + ptr: *mut bindings::dma_pool, |
| 75 | + dev: Device, |
| 76 | + count: usize, |
| 77 | + _p: PhantomData<T>, |
| 78 | +} |
| 79 | + |
| 80 | +impl<T> Pool<T> { |
| 81 | + /// Creates a new DMA memory pool. |
| 82 | + pub fn try_new( |
| 83 | + name: &CStr, |
| 84 | + dev: &dyn RawDevice, |
| 85 | + count: usize, |
| 86 | + align: usize, |
| 87 | + boundary: usize, |
| 88 | + ) -> Result<Arc<Self>> { |
| 89 | + let t_size = core::mem::size_of::<T>(); |
| 90 | + let size = count.checked_mul(t_size).ok_or(ENOMEM)?; |
| 91 | + let ptr = unsafe { |
| 92 | + bindings::dma_pool_create(name.as_char_ptr(), dev.raw_device(), size, align, boundary) |
| 93 | + }; |
| 94 | + if ptr.is_null() { |
| 95 | + Err(ENOMEM) |
| 96 | + } else { |
| 97 | + Arc::try_new(Self { |
| 98 | + ptr, |
| 99 | + count, |
| 100 | + dev: Device::from_dev(dev), |
| 101 | + _p: PhantomData, |
| 102 | + }) |
| 103 | + .map_err(|e| e.into()) |
| 104 | + } |
| 105 | + } |
| 106 | + |
| 107 | + /// Allocates some memory from the pool. |
| 108 | + pub fn try_alloc(&self, atomic: bool) -> Result<CoherentAllocation<T, Self>> { |
| 109 | + let flags = if atomic { |
| 110 | + bindings::GFP_ATOMIC |
| 111 | + } else { |
| 112 | + bindings::GFP_KERNEL |
| 113 | + }; |
| 114 | + |
| 115 | + let mut dma_handle = 0; |
| 116 | + let ptr = unsafe { bindings::dma_pool_alloc(self.ptr, flags, &mut dma_handle) }; |
| 117 | + if ptr.is_null() { |
| 118 | + Err(ENOMEM) |
| 119 | + } else { |
| 120 | + Ok(CoherentAllocation::new( |
| 121 | + ptr as _, dma_handle, self.count, self.ptr, |
| 122 | + )) |
| 123 | + } |
| 124 | + } |
| 125 | +} |
| 126 | + |
| 127 | +impl<T> Allocator for Pool<T> { |
| 128 | + type AllocationData = *mut bindings::dma_pool; |
| 129 | + type DataSource = Arc<Pool<T>>; |
| 130 | + |
| 131 | + fn free(cpu_addr: *mut (), dma_handle: u64, _size: usize, pool: &mut *mut bindings::dma_pool) { |
| 132 | + unsafe { bindings::dma_pool_free(*pool, cpu_addr as _, dma_handle) }; |
| 133 | + } |
| 134 | + |
| 135 | + unsafe fn allocation_data(data: &Arc<Pool<T>>) -> *mut bindings::dma_pool { |
| 136 | + data.ptr |
| 137 | + } |
| 138 | +} |
| 139 | + |
| 140 | +impl<T> Drop for Pool<T> { |
| 141 | + fn drop(&mut self) { |
| 142 | + // SAFETY: `Pool` is always reference-counted and each allocation increments it, so all |
| 143 | + // allocations have been freed by the time this gets called. |
| 144 | + unsafe { bindings::dma_pool_destroy(self.ptr) }; |
| 145 | + } |
| 146 | +} |
| 147 | + |
| 148 | +pub struct CoherentAllocation<T, A: Allocator> { |
| 149 | + alloc_data: A::AllocationData, |
| 150 | + pub dma_handle: u64, |
| 151 | + count: usize, |
| 152 | + cpu_addr: *mut T, |
| 153 | +} |
| 154 | + |
| 155 | +impl<T, A: Allocator> CoherentAllocation<T, A> { |
| 156 | + fn new(cpu_addr: *mut T, dma_handle: u64, count: usize, alloc_data: A::AllocationData) -> Self { |
| 157 | + Self { |
| 158 | + dma_handle, |
| 159 | + count, |
| 160 | + cpu_addr, |
| 161 | + alloc_data, |
| 162 | + } |
| 163 | + } |
| 164 | + |
| 165 | + pub fn read(&self, index: usize) -> Option<T> { |
| 166 | + if index >= self.count { |
| 167 | + return None; |
| 168 | + } |
| 169 | + |
| 170 | + let ptr = self.cpu_addr.wrapping_add(index); |
| 171 | + // SAFETY: We just checked that the index is within bounds. |
| 172 | + Some(unsafe { ptr.read() }) |
| 173 | + } |
| 174 | + |
| 175 | + pub fn read_volatile(&self, index: usize) -> Option<T> { |
| 176 | + if index >= self.count { |
| 177 | + return None; |
| 178 | + } |
| 179 | + |
| 180 | + let ptr = self.cpu_addr.wrapping_add(index); |
| 181 | + // SAFETY: We just checked that the index is within bounds. |
| 182 | + Some(unsafe { ptr.read_volatile() }) |
| 183 | + } |
| 184 | + |
| 185 | + pub fn write(&self, index: usize, value: &T) -> bool |
| 186 | + where |
| 187 | + T: Copy, |
| 188 | + { |
| 189 | + if index >= self.count { |
| 190 | + return false; |
| 191 | + } |
| 192 | + |
| 193 | + let ptr = self.cpu_addr.wrapping_add(index); |
| 194 | + // SAFETY: We just checked that the index is within bounds. |
| 195 | + unsafe { ptr.write(*value) }; |
| 196 | + true |
| 197 | + } |
| 198 | + |
| 199 | + pub fn read_write(&self, index: usize, value: T) -> Option<T> { |
| 200 | + if index >= self.count { |
| 201 | + return None; |
| 202 | + } |
| 203 | + |
| 204 | + let ptr = self.cpu_addr.wrapping_add(index); |
| 205 | + // SAFETY: We just checked that the index is within bounds. |
| 206 | + let ret = unsafe { ptr.read() }; |
| 207 | + // SAFETY: We just checked that the index is within bounds. |
| 208 | + unsafe { ptr.write(value) }; |
| 209 | + Some(ret) |
| 210 | + } |
| 211 | + |
| 212 | + pub unsafe fn from_parts( |
| 213 | + data: &A::DataSource, |
| 214 | + ptr: usize, |
| 215 | + dma_handle: u64, |
| 216 | + count: usize, |
| 217 | + ) -> Self { |
| 218 | + Self { |
| 219 | + dma_handle, |
| 220 | + count, |
| 221 | + cpu_addr: ptr as _, |
| 222 | + // SAFETY: The safety requirements of the current function satisfy those of |
| 223 | + // `allocation_data`. |
| 224 | + alloc_data: unsafe { A::allocation_data(data) }, |
| 225 | + } |
| 226 | + } |
| 227 | + |
| 228 | + pub fn into_parts(self) -> (usize, u64) { |
| 229 | + let ret = (self.cpu_addr as _, self.dma_handle); |
| 230 | + core::mem::forget(self); |
| 231 | + ret |
| 232 | + } |
| 233 | + |
| 234 | + pub fn first_ptr(&self) -> *const T { |
| 235 | + self.cpu_addr |
| 236 | + } |
| 237 | + |
| 238 | + pub fn first_ptr_mut(&self) -> *mut T { |
| 239 | + self.cpu_addr |
| 240 | + } |
| 241 | +} |
| 242 | + |
| 243 | +impl<T, A: Allocator> Drop for CoherentAllocation<T, A> { |
| 244 | + fn drop(&mut self) { |
| 245 | + let size = self.count * core::mem::size_of::<T>(); |
| 246 | + A::free( |
| 247 | + self.cpu_addr as _, |
| 248 | + self.dma_handle, |
| 249 | + size, |
| 250 | + &mut self.alloc_data, |
| 251 | + ); |
| 252 | + } |
| 253 | +} |
0 commit comments