Skip to content

Commit 76eb1a2

Browse files
committed
More clippy fixes for hyperlight_guest_bin
Signed-off-by: Mark Rossett <[email protected]>
1 parent cf38285 commit 76eb1a2

File tree

2 files changed

+48
-27
lines changed

2 files changed

+48
-27
lines changed

src/hyperlight_guest_bin/src/exceptions/handler.rs

Lines changed: 5 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -21,8 +21,6 @@ use hyperlight_common::flatbuffer_wrappers::guest_error::ErrorCode;
2121
use hyperlight_common::outb::Exception;
2222
use hyperlight_guest::exit::abort_with_code_and_message;
2323

24-
use crate::paging;
25-
2624
/// See AMD64 Architecture Programmer's Manual, Volume 2
2725
/// §8.9.3 Interrupt Stack Frame, pp. 283--284
2826
/// Figure 8-14: Long-Mode Stack After Interrupt---Same Privilege,
@@ -56,9 +54,8 @@ const _: () = assert!(size_of::<Context>() == 152 + 512);
5654

5755
// TODO: This will eventually need to end up in a per-thread context,
5856
// when there are threads.
59-
pub static handlers: [core::sync::atomic::AtomicU64; 31] =
57+
pub static HANDLERS: [core::sync::atomic::AtomicU64; 31] =
6058
[const { core::sync::atomic::AtomicU64::new(0) }; 31];
61-
type handler_t = fn(n: u64, info: *mut ExceptionInfo, ctx: *mut Context, pf_addr: u64) -> bool;
6259

6360
/// Exception handler
6461
#[unsafe(no_mangle)]
@@ -89,15 +86,12 @@ pub extern "C" fn hl_exception_handler(
8986
// vectors (0-31)
9087
if exception_number < 31 {
9188
let handler =
92-
handlers[exception_number as usize].load(core::sync::atomic::Ordering::Acquire);
89+
HANDLERS[exception_number as usize].load(core::sync::atomic::Ordering::Acquire);
9390
if handler != 0
9491
&& unsafe {
95-
core::mem::transmute::<_, handler_t>(handler)(
96-
exception_number,
97-
exn_info,
98-
ctx,
99-
page_fault_address,
100-
)
92+
core::mem::transmute::<u64, fn(u64, *mut ExceptionInfo, *mut Context, u64) -> bool>(
93+
handler,
94+
)(exception_number, exn_info, ctx, page_fault_address)
10195
}
10296
{
10397
return;

src/hyperlight_guest_bin/src/paging.rs

Lines changed: 43 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -38,22 +38,25 @@ pub fn ptov(x: u64) -> *mut u8 {
3838

3939
/// A helper structure indicating a mapping operation that needs to be
4040
/// performed
41-
struct MapRequest {
41+
pub struct MapRequest {
4242
table_base: u64,
4343
vmin: *mut u8,
4444
len: u64,
4545
}
4646

4747
/// A helper structure indicating that a particular PTE needs to be
4848
/// modified
49-
struct MapResponse {
49+
pub struct MapResponse {
5050
entry_ptr: *mut u64,
5151
vmin: *mut u8,
5252
len: u64,
5353
}
5454

5555
/// Assumption: all are page-aligned
56+
/// # Safety
57+
/// This function executes inlined assembly to set a register.
5658
pub unsafe fn map_region(phys_base: u64, virt_base: *mut u8, len: u64) {
59+
#[allow(unused_assignments)]
5760
let mut pml4_base: u64 = 0;
5861
unsafe {
5962
asm!("mov {}, cr3", out(reg) pml4_base);
@@ -71,12 +74,17 @@ pub unsafe fn map_region(phys_base: u64, virt_base: *mut u8, len: u64) {
7174
.map(|r| unsafe { alloc_pte_if_needed(r) })
7275
.flat_map(modify_ptes::<20, 12>)
7376
.map(|r| map_normal(phys_base, virt_base, r))
74-
.collect::<()>();
77+
.for_each(drop);
7578
}
7679

7780
#[allow(unused)]
7881
/// This function is not presently used for anything, but is useful
7982
/// for debugging
83+
/// # Safety
84+
/// This function executes inlined assembly to set a register.
85+
/// # Panics
86+
/// This function will panic if:
87+
/// - A page map request resolved to multiple page table entries
8088
pub unsafe fn dbg_print_address_pte(address: u64) -> u64 {
8189
let mut pml4_base: u64 = 0;
8290
unsafe {
@@ -105,11 +113,18 @@ pub unsafe fn dbg_print_address_pte(address: u64) -> u64 {
105113
if addrs.len() != 1 {
106114
panic!("impossible: 1 page map request resolved to multiple PTEs");
107115
}
108-
return addrs[0];
116+
addrs[0]
109117
}
110118

111119
/// Allocate n contiguous physical pages and return the physical
112120
/// addresses of the pages in question.
121+
/// # Safety
122+
/// This function uses raw pointer conversion to obtain a physical memory address
123+
/// of allocated memory.
124+
/// # Panics
125+
/// This function will panic if:
126+
/// - The Layout creation fails
127+
/// - Memory allocation fails
113128
pub unsafe fn alloc_phys_pages(n: u64) -> u64 {
114129
// Currently, since all of main memory is idmap'd, we can just
115130
// allocate any appropriately aligned section of memory.
@@ -125,7 +140,10 @@ pub unsafe fn alloc_phys_pages(n: u64) -> u64 {
125140
}
126141
}
127142

143+
/// # Safety
144+
/// This function executes inlined assembly.
128145
pub unsafe fn require_pte_exist(x: MapResponse) -> MapRequest {
146+
#[allow(unused_assignments)]
129147
let mut pte: u64 = 0;
130148
unsafe {
131149
asm!("mov {}, qword ptr [{}]", out(reg) pte, in(reg) x.entry_ptr);
@@ -141,8 +159,11 @@ pub unsafe fn require_pte_exist(x: MapResponse) -> MapRequest {
141159
}
142160
}
143161

144-
/// Page-mapping callback to allocate a next-level page table if necessary
162+
/// Page-mapping callback to allocate a next-level page table if necessary.
163+
/// # Safety
164+
/// This function executes inlined assembly.
145165
pub unsafe fn alloc_pte_if_needed(x: MapResponse) -> MapRequest {
166+
#[allow(unused_assignments)]
146167
let mut pte: u64 = 0;
147168
unsafe {
148169
asm!("mov {}, qword ptr [{}]", out(reg) pte, in(reg) x.entry_ptr);
@@ -157,6 +178,9 @@ pub unsafe fn alloc_pte_if_needed(x: MapResponse) -> MapRequest {
157178
}
158179
let page_addr = unsafe { alloc_phys_pages(1) };
159180
unsafe { ptov(page_addr).write_bytes(0u8, OS_PAGE_SIZE as usize) };
181+
182+
#[allow(clippy::identity_op)]
183+
#[allow(clippy::precedence)]
160184
let pte = page_addr |
161185
1 << 5 | // A - we don't track accesses at table level
162186
0 << 4 | // PCD - leave caching enabled
@@ -178,6 +202,8 @@ pub unsafe fn alloc_pte_if_needed(x: MapResponse) -> MapRequest {
178202
///
179203
/// TODO: support permissions; currently mapping is always RWX
180204
fn map_normal(phys_base: u64, virt_base: *mut u8, r: MapResponse) {
205+
#[allow(clippy::identity_op)]
206+
#[allow(clippy::precedence)]
181207
let pte = (phys_base + (r.vmin as u64 - virt_base as u64)) |
182208
1 << 6 | // D - we don't presently track dirty state for anything
183209
1 << 5 | // A - we don't presently track access for anything
@@ -194,27 +220,27 @@ fn map_normal(phys_base: u64, virt_base: *mut u8, r: MapResponse) {
194220
#[inline(always)]
195221
/// Utility function to extract an (inclusive on both ends) bit range
196222
/// from a quadword.
197-
fn bits<const high_bit: u8, const low_bit: u8>(x: u64) -> u64 {
198-
(x & ((1 << (high_bit + 1)) - 1)) >> low_bit
223+
fn bits<const HIGH_BIT: u8, const LOW_BIT: u8>(x: u64) -> u64 {
224+
(x & ((1 << (HIGH_BIT + 1)) - 1)) >> LOW_BIT
199225
}
200226

201-
struct ModifyPteIterator<const high_bit: u8, const low_bit: u8> {
227+
struct ModifyPteIterator<const HIGH_BIT: u8, const LOW_BIT: u8> {
202228
request: MapRequest,
203229
n: u64,
204230
}
205-
impl<const high_bit: u8, const low_bit: u8> Iterator for ModifyPteIterator<high_bit, low_bit> {
231+
impl<const HIGH_BIT: u8, const LOW_BIT: u8> Iterator for ModifyPteIterator<HIGH_BIT, LOW_BIT> {
206232
type Item = MapResponse;
207233
fn next(&mut self) -> Option<Self::Item> {
208-
if (self.n << low_bit) >= self.request.len {
234+
if (self.n << LOW_BIT) >= self.request.len {
209235
return None;
210236
}
211237
// next stage parameters
212-
let next_vmin = self.request.vmin.wrapping_add((self.n << low_bit) as usize);
238+
let next_vmin = self.request.vmin.wrapping_add((self.n << LOW_BIT) as usize);
213239
let entry_ptr = ptov(self.request.table_base)
214-
.wrapping_add((bits::<high_bit, low_bit>(next_vmin as u64) << 3) as usize)
240+
.wrapping_add((bits::<HIGH_BIT, LOW_BIT>(next_vmin as u64) << 3) as usize)
215241
as *mut u64;
216-
let len_from_here = self.request.len - (self.n << low_bit);
217-
let next_len = core::cmp::min(len_from_here, 1 << low_bit);
242+
let len_from_here = self.request.len - (self.n << LOW_BIT);
243+
let next_len = core::cmp::min(len_from_here, 1 << LOW_BIT);
218244

219245
// update our state
220246
self.n += 1;
@@ -226,16 +252,17 @@ impl<const high_bit: u8, const low_bit: u8> Iterator for ModifyPteIterator<high_
226252
})
227253
}
228254
}
229-
fn modify_ptes<const high_bit: u8, const low_bit: u8>(
255+
fn modify_ptes<const HIGH_BIT: u8, const LOW_BIT: u8>(
230256
r: MapRequest,
231-
) -> ModifyPteIterator<high_bit, low_bit> {
257+
) -> ModifyPteIterator<HIGH_BIT, LOW_BIT> {
232258
ModifyPteIterator { request: r, n: 0 }
233259
}
234260

235261
pub fn flush_tlb() {
236262
// Currently this just always flips CR4.PGE back and forth to
237263
// trigger a tlb flush. We should use a faster approach where
238264
// available
265+
#[allow(unused_assignments)]
239266
let mut orig_cr4: u64 = 0;
240267
unsafe {
241268
asm!("mov {}, cr4", out(reg) orig_cr4);

0 commit comments

Comments
 (0)