|
| 1 | +// SPDX-License-Identifier: GPL-2.0 |
| 2 | + |
| 3 | +//! IOMMU page table management |
| 4 | +//! |
| 5 | +//! C header: [`include/io-pgtable.h`](../../../../include/io-pgtable.h) |
| 6 | +
|
| 7 | +use crate::{ |
| 8 | + bindings, device, |
| 9 | + error::{code::*, to_result, Result}, |
| 10 | + types::{ForeignOwnable, ScopeGuard}, |
| 11 | +}; |
| 12 | + |
| 13 | +use core::marker::PhantomData; |
| 14 | +use core::mem; |
| 15 | +use core::num::NonZeroU64; |
| 16 | + |
| 17 | +/// Protection flags used with IOMMU mappings. |
| 18 | +pub mod prot { |
| 19 | + /// Read access. |
| 20 | + pub const READ: u32 = bindings::IOMMU_READ; |
| 21 | + /// Write access. |
| 22 | + pub const WRITE: u32 = bindings::IOMMU_WRITE; |
| 23 | + /// Request cache coherency. |
| 24 | + pub const CACHE: u32 = bindings::IOMMU_CACHE; |
| 25 | + /// Request no-execute permission. |
| 26 | + pub const NOEXEC: u32 = bindings::IOMMU_NOEXEC; |
| 27 | + /// MMIO peripheral mapping. |
| 28 | + pub const MMIO: u32 = bindings::IOMMU_MMIO; |
| 29 | + /// Privileged mapping. |
| 30 | + pub const PRIV: u32 = bindings::IOMMU_PRIV; |
| 31 | +} |
| 32 | + |
| 33 | +/// Represents a requested io_pgtable configuration. |
| 34 | +pub struct Config { |
| 35 | + /// Quirk bitmask (type-specific). |
| 36 | + pub quirks: usize, |
| 37 | + /// Valid page sizes, as a bitmask of powers of two. |
| 38 | + pub pgsize_bitmap: usize, |
| 39 | + /// Input address space size in bits. |
| 40 | + pub ias: usize, |
| 41 | + /// Output address space size in bits. |
| 42 | + pub oas: usize, |
| 43 | + /// IOMMU uses coherent accesses for page table walks. |
| 44 | + pub coherent_walk: bool, |
| 45 | +} |
| 46 | + |
| 47 | +/// IOMMU callbacks for TLB and page table management. |
| 48 | +/// |
| 49 | +/// Users must implement this trait to perform the TLB flush actions for this IOMMU, if |
| 50 | +/// required. |
| 51 | +pub trait FlushOps { |
| 52 | + /// User-specified type owned by the IOPagetable that will be passed to TLB operations. |
| 53 | + type Data: ForeignOwnable + Send + Sync; |
| 54 | + |
| 55 | + /// Synchronously invalidate the entire TLB context. |
| 56 | + fn tlb_flush_all(data: <Self::Data as ForeignOwnable>::Borrowed<'_>); |
| 57 | + |
| 58 | + /// Synchronously invalidate all intermediate TLB state (sometimes referred to as the "walk |
| 59 | + /// cache") for a virtual address range. |
| 60 | + fn tlb_flush_walk( |
| 61 | + data: <Self::Data as ForeignOwnable>::Borrowed<'_>, |
| 62 | + iova: usize, |
| 63 | + size: usize, |
| 64 | + granule: usize, |
| 65 | + ); |
| 66 | + |
| 67 | + /// Optional callback to queue up leaf TLB invalidation for a single page. |
| 68 | + /// |
| 69 | + /// IOMMUs that cannot batch TLB invalidation operations efficiently will typically issue |
| 70 | + /// them here, but others may decide to update the iommu_iotlb_gather structure and defer |
| 71 | + /// the invalidation until iommu_iotlb_sync() instead. |
| 72 | + /// |
| 73 | + /// TODO: Implement the gather argument for batching. |
| 74 | + fn tlb_add_page( |
| 75 | + data: <Self::Data as ForeignOwnable>::Borrowed<'_>, |
| 76 | + iova: usize, |
| 77 | + granule: usize, |
| 78 | + ); |
| 79 | +} |
| 80 | + |
| 81 | +/// Inner page table info shared across all table types. |
| 82 | +/// # Invariants |
| 83 | +/// |
| 84 | +/// - [`self.ops`] is valid and non-null. |
| 85 | +/// - [`self.cfg`] is valid and non-null. |
| 86 | +#[doc(hidden)] |
| 87 | +pub struct IoPageTableInner { |
| 88 | + ops: *mut bindings::io_pgtable_ops, |
| 89 | + cfg: bindings::io_pgtable_cfg, |
| 90 | + data: *mut core::ffi::c_void, |
| 91 | +} |
| 92 | + |
| 93 | +/// Helper trait to get the config type for a single page table type from the union. |
| 94 | +pub trait GetConfig { |
| 95 | + /// Returns the specific output configuration for this page table type. |
| 96 | + fn cfg(iopt: &impl IoPageTable) -> &Self |
| 97 | + where |
| 98 | + Self: Sized; |
| 99 | +} |
| 100 | + |
| 101 | +/// A generic IOMMU page table |
| 102 | +pub trait IoPageTable: crate::private::Sealed { |
| 103 | + #[doc(hidden)] |
| 104 | + const FLUSH_OPS: bindings::iommu_flush_ops; |
| 105 | + |
| 106 | + #[doc(hidden)] |
| 107 | + fn new_fmt<T: FlushOps>( |
| 108 | + dev: &dyn device::RawDevice, |
| 109 | + format: u32, |
| 110 | + config: Config, |
| 111 | + data: T::Data, |
| 112 | + ) -> Result<IoPageTableInner> { |
| 113 | + let ptr = data.into_foreign() as *mut _; |
| 114 | + let guard = ScopeGuard::new(|| { |
| 115 | + // SAFETY: `ptr` came from a previous call to `into_foreign`. |
| 116 | + unsafe { T::Data::from_foreign(ptr) }; |
| 117 | + }); |
| 118 | + |
| 119 | + let mut raw_cfg = bindings::io_pgtable_cfg { |
| 120 | + quirks: config.quirks.try_into()?, |
| 121 | + pgsize_bitmap: config.pgsize_bitmap.try_into()?, |
| 122 | + ias: config.ias.try_into()?, |
| 123 | + oas: config.oas.try_into()?, |
| 124 | + coherent_walk: config.coherent_walk, |
| 125 | + tlb: &Self::FLUSH_OPS, |
| 126 | + iommu_dev: dev.raw_device(), |
| 127 | + // SAFETY: This is an output field which is fine to zero-init. |
| 128 | + __bindgen_anon_1: unsafe { mem::zeroed() }, |
| 129 | + }; |
| 130 | + |
| 131 | + // SAFETY: FFI call, all input pointers are valid. |
| 132 | + let ops = unsafe { |
| 133 | + bindings::alloc_io_pgtable_ops(format as bindings::io_pgtable_fmt, &mut raw_cfg, ptr) |
| 134 | + }; |
| 135 | + |
| 136 | + if ops.is_null() { |
| 137 | + return Err(EINVAL); |
| 138 | + } |
| 139 | + |
| 140 | + guard.dismiss(); |
| 141 | + Ok(IoPageTableInner { |
| 142 | + ops, |
| 143 | + cfg: raw_cfg, |
| 144 | + data: ptr, |
| 145 | + }) |
| 146 | + } |
| 147 | + |
| 148 | + /// Map a range of pages. |
| 149 | + fn map_pages( |
| 150 | + &mut self, |
| 151 | + iova: usize, |
| 152 | + paddr: usize, |
| 153 | + pgsize: usize, |
| 154 | + pgcount: usize, |
| 155 | + prot: u32, |
| 156 | + ) -> Result<usize> { |
| 157 | + let mut mapped: usize = 0; |
| 158 | + |
| 159 | + // SAFETY: FFI call, ops is valid per the type invariant. |
| 160 | + to_result(unsafe { |
| 161 | + (*self.inner().ops).map_pages.unwrap()( |
| 162 | + self.inner().ops, |
| 163 | + iova as u64, |
| 164 | + paddr as u64, |
| 165 | + pgsize, |
| 166 | + pgcount, |
| 167 | + prot as i32, |
| 168 | + bindings::GFP_KERNEL, |
| 169 | + &mut mapped, |
| 170 | + ) |
| 171 | + })?; |
| 172 | + |
| 173 | + Ok(mapped) |
| 174 | + } |
| 175 | + |
| 176 | + /// Unmap a range of pages. |
| 177 | + fn unmap_pages( |
| 178 | + &mut self, |
| 179 | + iova: usize, |
| 180 | + pgsize: usize, |
| 181 | + pgcount: usize, |
| 182 | + // TODO: gather: *mut iommu_iotlb_gather, |
| 183 | + ) -> usize { |
| 184 | + // SAFETY: FFI call, ops is valid per the type invariant. |
| 185 | + unsafe { |
| 186 | + (*self.inner().ops).unmap_pages.unwrap()( |
| 187 | + self.inner().ops, |
| 188 | + iova as u64, |
| 189 | + pgsize, |
| 190 | + pgcount, |
| 191 | + core::ptr::null_mut(), |
| 192 | + ) |
| 193 | + } |
| 194 | + } |
| 195 | + |
| 196 | + /// Translate an IOVA to the corresponding physical address, if mapped. |
| 197 | + fn iova_to_phys(&self, iova: usize) -> Option<NonZeroU64> { |
| 198 | + // SAFETY: FFI call, ops is valid per the type invariant. |
| 199 | + NonZeroU64::new(unsafe { |
| 200 | + (*self.inner().ops).iova_to_phys.unwrap()(self.inner().ops, iova as u64) |
| 201 | + }) |
| 202 | + } |
| 203 | + |
| 204 | + #[doc(hidden)] |
| 205 | + fn inner(&self) -> &IoPageTableInner; |
| 206 | + |
| 207 | + #[doc(hidden)] |
| 208 | + fn raw_cfg(&self) -> &bindings::io_pgtable_cfg { |
| 209 | + &self.inner().cfg |
| 210 | + } |
| 211 | +} |
| 212 | + |
| 213 | +// SAFETY: All abstraction operations either require mutable references or are thread-safe, |
| 214 | +// and io_pgtable_ops objects can be passed between threads without issue. |
| 215 | +unsafe impl Send for IoPageTableInner {} |
| 216 | +unsafe impl Sync for IoPageTableInner {} |
| 217 | + |
| 218 | +unsafe extern "C" fn tlb_flush_all_callback<T: FlushOps>(cookie: *mut core::ffi::c_void) { |
| 219 | + // SAFETY: The cookie is always a ForeignOwnable of the right type, per new_fmt(). |
| 220 | + T::tlb_flush_all(unsafe { T::Data::borrow(cookie) }); |
| 221 | +} |
| 222 | + |
| 223 | +unsafe extern "C" fn tlb_flush_walk_callback<T: FlushOps>( |
| 224 | + iova: core::ffi::c_ulong, |
| 225 | + size: usize, |
| 226 | + granule: usize, |
| 227 | + cookie: *mut core::ffi::c_void, |
| 228 | +) { |
| 229 | + // SAFETY: The cookie is always a ForeignOwnable of the right type, per new_fmt(). |
| 230 | + T::tlb_flush_walk( |
| 231 | + unsafe { T::Data::borrow(cookie) }, |
| 232 | + iova as usize, |
| 233 | + size, |
| 234 | + granule, |
| 235 | + ); |
| 236 | +} |
| 237 | + |
| 238 | +unsafe extern "C" fn tlb_add_page_callback<T: FlushOps>( |
| 239 | + _gather: *mut bindings::iommu_iotlb_gather, |
| 240 | + iova: core::ffi::c_ulong, |
| 241 | + granule: usize, |
| 242 | + cookie: *mut core::ffi::c_void, |
| 243 | +) { |
| 244 | + // SAFETY: The cookie is always a ForeignOwnable of the right type, per new_fmt(). |
| 245 | + T::tlb_add_page(unsafe { T::Data::borrow(cookie) }, iova as usize, granule); |
| 246 | +} |
| 247 | + |
| 248 | +macro_rules! iopt_cfg { |
| 249 | + ($name:ident, $field:ident, $type:ident) => { |
| 250 | + /// An IOMMU page table configuration for a specific kind of pagetable. |
| 251 | + pub type $name = bindings::$type; |
| 252 | + |
| 253 | + impl GetConfig for $name { |
| 254 | + fn cfg(iopt: &impl IoPageTable) -> &$name { |
| 255 | + // SAFETY: The type system ensures we are accessing the right union field. |
| 256 | + unsafe { &iopt.raw_cfg().__bindgen_anon_1.$field } |
| 257 | + } |
| 258 | + } |
| 259 | + }; |
| 260 | +} |
| 261 | + |
| 262 | +impl GetConfig for () { |
| 263 | + fn cfg(_iopt: &impl IoPageTable) -> &() { |
| 264 | + &() |
| 265 | + } |
| 266 | +} |
| 267 | + |
| 268 | +macro_rules! iopt_type { |
| 269 | + ($type:ident, $cfg:ty, $fmt:ident) => { |
| 270 | + /// Represents an IOPagetable of this type. |
| 271 | + pub struct $type<T: FlushOps>(IoPageTableInner, PhantomData<T>); |
| 272 | + |
| 273 | + impl<T: FlushOps> $type<T> { |
| 274 | + /// Creates a new IOPagetable implementation of this type. |
| 275 | + pub fn new(dev: &dyn device::RawDevice, config: Config, data: T::Data) -> Result<Self> { |
| 276 | + Ok(Self( |
| 277 | + <Self as IoPageTable>::new_fmt::<T>(dev, bindings::$fmt, config, data)?, |
| 278 | + PhantomData, |
| 279 | + )) |
| 280 | + } |
| 281 | + |
| 282 | + /// Get the configuration for this IOPagetable. |
| 283 | + pub fn cfg(&self) -> &$cfg { |
| 284 | + <$cfg as GetConfig>::cfg(self) |
| 285 | + } |
| 286 | + } |
| 287 | + |
| 288 | + impl<T: FlushOps> crate::private::Sealed for $type<T> {} |
| 289 | + |
| 290 | + impl<T: FlushOps> IoPageTable for $type<T> { |
| 291 | + const FLUSH_OPS: bindings::iommu_flush_ops = bindings::iommu_flush_ops { |
| 292 | + tlb_flush_all: Some(tlb_flush_all_callback::<T>), |
| 293 | + tlb_flush_walk: Some(tlb_flush_walk_callback::<T>), |
| 294 | + tlb_add_page: Some(tlb_add_page_callback::<T>), |
| 295 | + }; |
| 296 | + |
| 297 | + fn inner(&self) -> &IoPageTableInner { |
| 298 | + &self.0 |
| 299 | + } |
| 300 | + } |
| 301 | + |
| 302 | + impl<T: FlushOps> Drop for $type<T> { |
| 303 | + fn drop(&mut self) { |
| 304 | + // SAFETY: The pointer is valid by the type invariant. |
| 305 | + unsafe { bindings::free_io_pgtable_ops(self.0.ops) }; |
| 306 | + |
| 307 | + // Free context data. |
| 308 | + // |
| 309 | + // SAFETY: This matches the call to `into_foreign` from `new_fmt`. |
| 310 | + unsafe { T::Data::from_foreign(self.0.data) }; |
| 311 | + } |
| 312 | + } |
| 313 | + }; |
| 314 | +} |
| 315 | + |
| 316 | +// Ew, bindgen unions really are quite messy... |
| 317 | +iopt_cfg!( |
| 318 | + ARMLPAES1Cfg, |
| 319 | + arm_lpae_s1_cfg, |
| 320 | + io_pgtable_cfg__bindgen_ty_1__bindgen_ty_1 |
| 321 | +); |
| 322 | +iopt_cfg!( |
| 323 | + ARMLPAES2Cfg, |
| 324 | + arm_lpae_s2_cfg, |
| 325 | + io_pgtable_cfg__bindgen_ty_1__bindgen_ty_2 |
| 326 | +); |
| 327 | +iopt_cfg!( |
| 328 | + ARMv7SCfg, |
| 329 | + arm_v7s_cfg, |
| 330 | + io_pgtable_cfg__bindgen_ty_1__bindgen_ty_3 |
| 331 | +); |
| 332 | +iopt_cfg!( |
| 333 | + ARMMaliLPAECfg, |
| 334 | + arm_mali_lpae_cfg, |
| 335 | + io_pgtable_cfg__bindgen_ty_1__bindgen_ty_4 |
| 336 | +); |
| 337 | +iopt_cfg!( |
| 338 | + AppleDARTCfg, |
| 339 | + apple_dart_cfg, |
| 340 | + io_pgtable_cfg__bindgen_ty_1__bindgen_ty_5 |
| 341 | +); |
| 342 | + |
| 343 | +iopt_type!(ARM32LPAES1, ARMLPAES1Cfg, io_pgtable_fmt_ARM_32_LPAE_S1); |
| 344 | +iopt_type!(ARM32LPAES2, ARMLPAES2Cfg, io_pgtable_fmt_ARM_32_LPAE_S2); |
| 345 | +iopt_type!(ARM64LPAES1, ARMLPAES1Cfg, io_pgtable_fmt_ARM_64_LPAE_S1); |
| 346 | +iopt_type!(ARM64LPAES2, ARMLPAES2Cfg, io_pgtable_fmt_ARM_64_LPAE_S2); |
| 347 | +iopt_type!(ARMv7S, ARMv7SCfg, io_pgtable_fmt_ARM_V7S); |
| 348 | +iopt_type!(ARMMaliLPAE, ARMMaliLPAECfg, io_pgtable_fmt_ARM_MALI_LPAE); |
| 349 | +iopt_type!(AMDIOMMUV1, (), io_pgtable_fmt_AMD_IOMMU_V1); |
| 350 | +iopt_type!(AppleDART, AppleDARTCfg, io_pgtable_fmt_APPLE_DART); |
| 351 | +iopt_type!(AppleDART2, AppleDARTCfg, io_pgtable_fmt_APPLE_DART2); |
0 commit comments