Skip to content

Commit 9e1f78a

Browse files
committed
Added basic support for memory-mapped I/O
MMIO functionality is fairly basic, but it does the trick. You provide a set of physical memory frames and get back an MMIO range, which tracks the virtual address space it can use. The read and write methods can be used to read/write anything with the Copy trait, but it'll normally be an integer type. Everything is bounds checked, so only the initial map call is unsafe. This is also the first example of using the kernel_pml4 function to get the mapper and pmm::ALLOCATOR to get the physical memory allocator. To make space for the MMIO addres space, I've moved the physical memory offset up slightly. Signed-off-by: SlyMarbo <[email protected]>
1 parent 35db7ba commit 9e1f78a

File tree

6 files changed

+143
-6
lines changed

6 files changed

+143
-6
lines changed

kernel/Cargo.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@ run-command = [
3333
boot-info-address = "0xffff800040000000"
3434
kernel-stack-address = "0xffff80005554f000"
3535
kernel-stack-size = 128
36-
physical-memory-offset = "0xffff800060000000"
36+
physical-memory-offset = "0xffff800080000000"
3737

3838
[[test]]
3939
name = "should_panic"

kernel/src/memory/README.md

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,4 +11,5 @@ Firefly uses the following layout of virtual memory:
1111
| Kernel heap | `0xffff_8000_4444_0000` | `0xffff_8000_444b_ffff` | 128x 4 KiB page | 512 KiB |
1212
| Kernel stack guard | `0xffff_8000_5554_f000` | `0xffff_8000_5554_ffff` | not mapped | 4 KiB |
1313
| Kernel stack | `0xffff_8000_5555_0000` | `0xffff_8000_555c_ffff` | 128x 4 KiB page | 512 KiB |
14-
| Physical memory map | `0xffff_8000_6000_0000` | `0xffff_ffff_ffff_ffff` | rest of memory | < 128 TiB |
14+
| MMIO address space | `0xffff_8000_6666_0000` | `0xffff_8000_6675_ffff` | 256x 4 KiB page | 1 MiB |
15+
| Physical memory map | `0xffff_8000_8000_0000` | `0xffff_ffff_ffff_ffff` | rest of memory | < 128 TiB |

kernel/src/memory/constants.rs

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,8 @@ use x86_64::{PhysAddr, VirtAddr};
2323
// | Kernel heap | 0xffff_8000_4444_0000 | 0xffff_8000_444b_ffff |
2424
// | Kernel stack guard | 0xffff_8000_5554_f000 | 0xffff_8000_5554_ffff |
2525
// | Kernel stack | 0xffff_8000_5555_0000 | 0xffff_8000_555c_ffff |
26-
// | Physical memory map | 0xffff_8000_6000_0000 | 0xffff_ffff_ffff_ffff |
26+
// | MMIO address space | 0xffff_8000_6666_0000 | 0xffff_8000_6675_ffff |
27+
// | Physical memory map | 0xffff_8000_8000_0000 | 0xffff_ffff_ffff_ffff |
2728

2829
/// NULL_PAGE is reserved and always unmapped to ensure that null pointer
2930
/// dereferences always result in a page fault.
@@ -77,14 +78,21 @@ pub const KERNEL_STACK: VirtAddrRange = VirtAddrRange::new(KERNEL_STACK_END, KER
7778
const KERNEL_STACK_START: VirtAddr = const_virt_addr(0xffff_8000_555c_ffff as u64);
7879
const KERNEL_STACK_END: VirtAddr = const_virt_addr(0xffff_8000_5555_0000 as u64);
7980

81+
/// MMIO_SPACE is the virtual address space used for accessing
82+
/// hardware devices via memory mapped I/O.
83+
///
84+
pub const MMIO_SPACE: VirtAddrRange = VirtAddrRange::new(MMIO_SPACE_START, MMIO_SPACE_END);
85+
const MMIO_SPACE_START: VirtAddr = const_virt_addr(0xffff_8000_6666_0000 as u64);
86+
const MMIO_SPACE_END: VirtAddr = const_virt_addr(0xffff_8000_6675_ffff as u64);
87+
8088
/// PHYSICAL_MEMORY_OFFSET is the virtual address at which the mapping of
8189
/// all physical memory begins. That is, for any valid physical address,
8290
/// that address can be reached at the same virtual address, plus
8391
/// PHYSICAL_MEMORY_OFFSET.
8492
///
8593
pub const PHYSICAL_MEMORY: VirtAddrRange =
8694
VirtAddrRange::new(PHYSICAL_MEMORY_OFFSET, VIRTUAL_MEMORY_END);
87-
pub const PHYSICAL_MEMORY_OFFSET: VirtAddr = const_virt_addr(0xffff_8000_6000_0000 as u64);
95+
pub const PHYSICAL_MEMORY_OFFSET: VirtAddr = const_virt_addr(0xffff_8000_8000_0000 as u64);
8896
const VIRTUAL_MEMORY_END: VirtAddr = const_virt_addr(0xffff_ffff_ffff_ffff as u64);
8997

9098
/// phys_to_virt_addr returns a virtual address that is mapped to the

kernel/src/memory/mmio.rs

Lines changed: 127 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,127 @@
1+
//! mmio provides functionality for interacting with memory-mapped
2+
//! I/O devices.
3+
4+
// This is fairly basic support for MMIO. You allocate a region from
5+
// a set of physical memory frames, which maps the address space. The
6+
// Region type contains the virtual addres space it includes, which
7+
// can then be used via the read and write methods to read/write the
8+
// MMIO space.
9+
10+
use crate::memory;
11+
use crate::memory::MMIO_SPACE;
12+
use x86_64::structures::paging::frame::PhysFrameRange;
13+
use x86_64::structures::paging::page::Page;
14+
use x86_64::structures::paging::page_table::PageTableFlags;
15+
use x86_64::structures::paging::Mapper;
16+
use x86_64::VirtAddr;
17+
18+
/// MMIO_START_ADDRESS is the address where the next MMIO mapping
19+
/// will be placed.
20+
///
21+
static MMIO_START_ADDRESS: spin::Mutex<VirtAddr> = spin::Mutex::new(MMIO_SPACE.start());
22+
23+
/// reserve_space reserves the given amount of MMIO address space,
24+
/// returning the virtual address where the reservation begins.
25+
///
26+
fn reserve_space(size: u64) -> VirtAddr {
27+
let mut start_address = MMIO_START_ADDRESS.lock();
28+
let out = *start_address;
29+
30+
// Check we haven't gone outside the bounds
31+
// of the reserved MMIO address space.
32+
if !MMIO_SPACE.contains_addr(out + size) {
33+
panic!("exceeded MMIO address space");
34+
}
35+
36+
*start_address = out + size;
37+
out
38+
}
39+
40+
/// RegionOverflow indicates that a read or write in an MMIO
41+
/// region exceeded the bounds of the region.
42+
///
43+
#[derive(Debug)]
44+
pub struct RegionOverflow(VirtAddr);
45+
46+
/// Region describes a set of memory allocated for memory-mapped
47+
/// I/O.
48+
///
49+
pub struct Region {
50+
// start is the first valid address in the region.
51+
start: VirtAddr,
52+
53+
// end is the last valid address in the region.
54+
end: VirtAddr,
55+
}
56+
57+
impl Region {
58+
/// map maps the given physical address region into the MMIO
59+
/// address space, returning a byte slice through which the region
60+
/// can be accessed.
61+
///
62+
/// # Safety
63+
///
64+
/// This function is unsafe because the caller must guarantee that the
65+
/// given physical memory region is not being used already for other
66+
/// purposes.
67+
///
68+
pub unsafe fn map(range: PhysFrameRange) -> Self {
69+
let first_addr = range.start.start_address();
70+
let last_addr = range.end.start_address();
71+
let size = last_addr - first_addr;
72+
73+
let mut mapper = memory::kernel_pml4();
74+
let mut frame_allocator = memory::pmm::ALLOCATOR.lock();
75+
let start_address = reserve_space(size);
76+
let mut next_address = start_address;
77+
for frame in range {
78+
let flags = PageTableFlags::PRESENT
79+
| PageTableFlags::WRITABLE
80+
| PageTableFlags::WRITE_THROUGH
81+
| PageTableFlags::NO_CACHE
82+
| PageTableFlags::GLOBAL
83+
| PageTableFlags::NO_EXECUTE;
84+
let page = Page::from_start_address(next_address).expect("bad start address");
85+
next_address += page.size();
86+
mapper
87+
.map_to(page, frame, flags, &mut *frame_allocator)
88+
.expect("failed to map MMIO page")
89+
.flush();
90+
}
91+
92+
Region {
93+
start: start_address,
94+
end: next_address - 1u64,
95+
}
96+
}
97+
98+
/// read reads a generic value at the given offset into
99+
/// the region.
100+
///
101+
pub fn read<T: Copy>(&self, offset: u64) -> Result<T, RegionOverflow> {
102+
let addr = self.start + offset;
103+
let size = core::mem::size_of::<T>() as u64;
104+
if (addr + size) > self.end {
105+
return Err(RegionOverflow(addr + size));
106+
}
107+
108+
let ptr = addr.as_ptr() as *const T;
109+
unsafe { Ok(*ptr) }
110+
}
111+
112+
/// write writes a generic value to the given offset into
113+
/// the region.
114+
///
115+
pub fn write<T: Copy>(&mut self, offset: u64, val: T) -> Result<(), RegionOverflow> {
116+
let addr = self.start + offset;
117+
let size = core::mem::size_of::<T>() as u64;
118+
if (addr + size) > self.end {
119+
return Err(RegionOverflow(addr + size));
120+
}
121+
122+
let ptr = addr.as_mut_ptr() as *mut T;
123+
unsafe { *ptr = val };
124+
125+
Ok(())
126+
}
127+
}

kernel/src/memory/mod.rs

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -185,12 +185,13 @@ use x86_64::structures::paging::{OffsetPageTable, PageTable};
185185
use x86_64::VirtAddr;
186186

187187
mod constants;
188+
pub mod mmio;
188189
pub mod pmm;
189190
pub mod vmm;
190191

191192
pub use crate::memory::constants::{
192193
phys_to_virt_addr, VirtAddrRange, BOOT_INFO, KERNEL_BINARY, KERNEL_HEAP, KERNEL_STACK,
193-
KERNEL_STACK_GUARD, NULL_PAGE, PHYSICAL_MEMORY, PHYSICAL_MEMORY_OFFSET, USERSPACE,
194+
KERNEL_STACK_GUARD, MMIO_SPACE, NULL_PAGE, PHYSICAL_MEMORY, PHYSICAL_MEMORY_OFFSET, USERSPACE,
194195
};
195196

196197
// PML4 functionality.

kernel/src/memory/pmm/mod.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ lazy_static! {
1818
/// been set up. To bootstrap the heap, use a BootInfoFrameAllocator,
1919
/// then pass that to pmm::init so ALLOCATOR can take over.
2020
///
21-
static ref ALLOCATOR: Locked<BitmapFrameAllocator> = Locked::new(BitmapFrameAllocator::empty());
21+
pub static ref ALLOCATOR: Locked<BitmapFrameAllocator> = Locked::new(BitmapFrameAllocator::empty());
2222
}
2323

2424
/// init sets up the physical memory manager, taking over

0 commit comments

Comments
 (0)