|
| 1 | +//! mmio provides functionality for interacting with memory-mapped |
| 2 | +//! I/O devices. |
| 3 | +
|
| 4 | +// This is fairly basic support for MMIO. You allocate a region from |
| 5 | +// a set of physical memory frames, which maps the address space. The |
| 6 | +// Region type contains the virtual addres space it includes, which |
| 7 | +// can then be used via the read and write methods to read/write the |
| 8 | +// MMIO space. |
| 9 | + |
| 10 | +use crate::memory; |
| 11 | +use crate::memory::MMIO_SPACE; |
| 12 | +use x86_64::structures::paging::frame::PhysFrameRange; |
| 13 | +use x86_64::structures::paging::page::Page; |
| 14 | +use x86_64::structures::paging::page_table::PageTableFlags; |
| 15 | +use x86_64::structures::paging::Mapper; |
| 16 | +use x86_64::VirtAddr; |
| 17 | + |
| 18 | +/// MMIO_START_ADDRESS is the address where the next MMIO mapping |
| 19 | +/// will be placed. |
| 20 | +/// |
| 21 | +static MMIO_START_ADDRESS: spin::Mutex<VirtAddr> = spin::Mutex::new(MMIO_SPACE.start()); |
| 22 | + |
| 23 | +/// reserve_space reserves the given amount of MMIO address space, |
| 24 | +/// returning the virtual address where the reservation begins. |
| 25 | +/// |
| 26 | +fn reserve_space(size: u64) -> VirtAddr { |
| 27 | + let mut start_address = MMIO_START_ADDRESS.lock(); |
| 28 | + let out = *start_address; |
| 29 | + |
| 30 | + // Check we haven't gone outside the bounds |
| 31 | + // of the reserved MMIO address space. |
| 32 | + if !MMIO_SPACE.contains_addr(out + size) { |
| 33 | + panic!("exceeded MMIO address space"); |
| 34 | + } |
| 35 | + |
| 36 | + *start_address = out + size; |
| 37 | + out |
| 38 | +} |
| 39 | + |
| 40 | +/// RegionOverflow indicates that a read or write in an MMIO |
| 41 | +/// region exceeded the bounds of the region. |
| 42 | +/// |
| 43 | +#[derive(Debug)] |
| 44 | +pub struct RegionOverflow(VirtAddr); |
| 45 | + |
| 46 | +/// Region describes a set of memory allocated for memory-mapped |
| 47 | +/// I/O. |
| 48 | +/// |
| 49 | +pub struct Region { |
| 50 | + // start is the first valid address in the region. |
| 51 | + start: VirtAddr, |
| 52 | + |
| 53 | + // end is the last valid address in the region. |
| 54 | + end: VirtAddr, |
| 55 | +} |
| 56 | + |
| 57 | +impl Region { |
| 58 | + /// map maps the given physical address region into the MMIO |
| 59 | + /// address space, returning a byte slice through which the region |
| 60 | + /// can be accessed. |
| 61 | + /// |
| 62 | + /// # Safety |
| 63 | + /// |
| 64 | + /// This function is unsafe because the caller must guarantee that the |
| 65 | + /// given physical memory region is not being used already for other |
| 66 | + /// purposes. |
| 67 | + /// |
| 68 | + pub unsafe fn map(range: PhysFrameRange) -> Self { |
| 69 | + let first_addr = range.start.start_address(); |
| 70 | + let last_addr = range.end.start_address(); |
| 71 | + let size = last_addr - first_addr; |
| 72 | + |
| 73 | + let mut mapper = memory::kernel_pml4(); |
| 74 | + let mut frame_allocator = memory::pmm::ALLOCATOR.lock(); |
| 75 | + let start_address = reserve_space(size); |
| 76 | + let mut next_address = start_address; |
| 77 | + for frame in range { |
| 78 | + let flags = PageTableFlags::PRESENT |
| 79 | + | PageTableFlags::WRITABLE |
| 80 | + | PageTableFlags::WRITE_THROUGH |
| 81 | + | PageTableFlags::NO_CACHE |
| 82 | + | PageTableFlags::GLOBAL |
| 83 | + | PageTableFlags::NO_EXECUTE; |
| 84 | + let page = Page::from_start_address(next_address).expect("bad start address"); |
| 85 | + next_address += page.size(); |
| 86 | + mapper |
| 87 | + .map_to(page, frame, flags, &mut *frame_allocator) |
| 88 | + .expect("failed to map MMIO page") |
| 89 | + .flush(); |
| 90 | + } |
| 91 | + |
| 92 | + Region { |
| 93 | + start: start_address, |
| 94 | + end: next_address - 1u64, |
| 95 | + } |
| 96 | + } |
| 97 | + |
| 98 | + /// read reads a generic value at the given offset into |
| 99 | + /// the region. |
| 100 | + /// |
| 101 | + pub fn read<T: Copy>(&self, offset: u64) -> Result<T, RegionOverflow> { |
| 102 | + let addr = self.start + offset; |
| 103 | + let size = core::mem::size_of::<T>() as u64; |
| 104 | + if (addr + size) > self.end { |
| 105 | + return Err(RegionOverflow(addr + size)); |
| 106 | + } |
| 107 | + |
| 108 | + let ptr = addr.as_ptr() as *const T; |
| 109 | + unsafe { Ok(*ptr) } |
| 110 | + } |
| 111 | + |
| 112 | + /// write writes a generic value to the given offset into |
| 113 | + /// the region. |
| 114 | + /// |
| 115 | + pub fn write<T: Copy>(&mut self, offset: u64, val: T) -> Result<(), RegionOverflow> { |
| 116 | + let addr = self.start + offset; |
| 117 | + let size = core::mem::size_of::<T>() as u64; |
| 118 | + if (addr + size) > self.end { |
| 119 | + return Err(RegionOverflow(addr + size)); |
| 120 | + } |
| 121 | + |
| 122 | + let ptr = addr.as_mut_ptr() as *mut T; |
| 123 | + unsafe { *ptr = val }; |
| 124 | + |
| 125 | + Ok(()) |
| 126 | + } |
| 127 | +} |
0 commit comments