Skip to content

Commit 11c47f6

Browse files
authored
Promote call follows CoVE spec (#99)
* calculate htimedelta during promotion * validate confidential VM addresses and release scratch memory in promote procedure --------- Signed-off-by: Wojciech Ozga <[email protected]>
1 parent 2abb196 commit 11c47f6

File tree

17 files changed

+241
-200
lines changed

17 files changed

+241
-200
lines changed

security-monitor/src/confidential_flow/handlers/attestation/retrieve_secret.rs

Lines changed: 7 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -11,21 +11,24 @@ use crate::error::Error;
1111
/// Provides the confidential VM with a secret that was decoded from the attestation payload during the promotion of the VM to confidential
1212
/// VM. Secret is written to the buffer allocated by the confidential VM and passed as arguments to the call.
1313
pub struct RetrieveSecretRequest {
14-
output_buffer_address: ConfidentialVmPhysicalAddress,
14+
output_buffer_address: usize,
1515
output_buffer_size: usize,
1616
}
1717

1818
impl RetrieveSecretRequest {
19+
pub const ADDRESS_ALIGNMENT: usize = core::mem::size_of::<usize>();
20+
1921
pub fn from_confidential_hart(confidential_hart: &ConfidentialHart) -> Self {
2022
Self {
21-
output_buffer_address: ConfidentialVmPhysicalAddress::new(confidential_hart.gprs().read(GeneralPurposeRegister::a0)),
23+
output_buffer_address: confidential_hart.gprs().read(GeneralPurposeRegister::a0),
2224
output_buffer_size: confidential_hart.gprs().read(GeneralPurposeRegister::a1),
2325
}
2426
}
2527

2628
pub fn handle(self, confidential_flow: ConfidentialFlow) -> ! {
2729
let transformation = ControlDataStorage::try_confidential_vm(confidential_flow.confidential_vm_id(), |ref confidential_vm| {
28-
// ensure!(self.output_buffer_address.is_aligned_to(PageSize::Size4KiB.in_bytes()), Error::AddressNotAligned())?;
30+
let output_buffer_address = ConfidentialVmPhysicalAddress::new(self.output_buffer_address)?;
31+
ensure!(output_buffer_address.is_aligned_to(Self::ADDRESS_ALIGNMENT), Error::AddressNotAligned())?;
2932
ensure!(self.output_buffer_size <= PageSize::Size4KiB.in_bytes(), Error::AddressNotAligned())?;
3033
let secret = confidential_vm.secret(0)?;
3134
ensure!(secret.len() <= self.output_buffer_size, Error::AddressNotAligned())?;
@@ -35,7 +38,7 @@ impl RetrieveSecretRequest {
3538
(0..end_boundary).for_each(|i| buffer[i] = secret[offset + i]);
3639
(end_boundary..8).for_each(|i| buffer[i] = 0u8);
3740
let confidential_memory_address =
38-
confidential_vm.memory_protector().translate_address(&self.output_buffer_address.add(offset))?;
41+
confidential_vm.memory_protector().translate_address(&output_buffer_address.add(offset))?;
3942
unsafe { confidential_memory_address.write_volatile(usize::from_le_bytes(buffer)) };
4043
}
4144
Ok(SbiResponse::success_with_code(secret.len()))

security-monitor/src/confidential_flow/handlers/mmio/add_mmio_region.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ impl AddMmioRegion {
2626
match ControlDataStorage::try_confidential_vm_mut(confidential_flow.confidential_vm_id(), |mut confidential_vm| {
2727
ensure!(self.region_start_address % PageSize::Size4KiB.in_bytes() == 0, Error::AddressNotAligned())?;
2828
ensure!(self.region_length % PageSize::Size4KiB.in_bytes() == 0, Error::AddressNotAligned())?;
29-
Ok(confidential_vm.add_mmio_region(ConfidentialVmMmioRegion::new(self.region_start_address, self.region_length))?)
29+
Ok(confidential_vm.add_mmio_region(ConfidentialVmMmioRegion::new(self.region_start_address, self.region_length)?)?)
3030
}) {
3131
Ok(_) => confidential_flow
3232
.set_resumable_operation(ResumableOperation::SbiRequest())

security-monitor/src/confidential_flow/handlers/mmio/mmio_access_fault.rs

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -9,8 +9,6 @@ use crate::core::control_data::{
99
};
1010
use crate::core::memory_layout::ConfidentialVmPhysicalAddress;
1111

12-
use core::mem;
13-
1412
pub struct MmioAccessFault {
1513
cause: usize,
1614
mtval: usize,
@@ -19,15 +17,15 @@ pub struct MmioAccessFault {
1917
}
2018

2119
impl MmioAccessFault {
22-
pub const ADDRESS_ALIGNMENT: usize = mem::size_of::<usize>();
20+
pub const ADDRESS_ALIGNMENT: usize = core::mem::size_of::<usize>();
2321

2422
pub fn new(cause: usize, mtval: usize, mtinst: usize, fault_address: usize) -> Self {
2523
Self { cause, mtval, mtinst, fault_address }
2624
}
2725

2826
pub fn handle(self, mut confidential_flow: ConfidentialFlow) -> ! {
2927
match ControlDataStorage::try_confidential_vm(confidential_flow.confidential_vm_id(), |confidential_vm| {
30-
let confidential_vm_physical_address = ConfidentialVmPhysicalAddress::new(self.fault_address);
28+
let confidential_vm_physical_address = ConfidentialVmPhysicalAddress::new(self.fault_address)?;
3129
let page_size = confidential_vm.memory_protector_mut().map_empty_page(confidential_vm_physical_address, PageSize::Size4KiB)?;
3230
let request = RemoteHfenceGvmaVmid::all_harts(None, page_size, confidential_flow.confidential_vm_id());
3331
confidential_flow.broadcast_remote_command(&confidential_vm, ConfidentialHartRemoteCommand::RemoteHfenceGvmaVmid(request))?;
@@ -51,7 +49,7 @@ impl MmioAccessFault {
5149

5250
pub fn tried_to_access_valid_mmio_region(confidential_vm_id: ConfidentialVmId, fault_address: usize) -> bool {
5351
ControlDataStorage::try_confidential_vm(confidential_vm_id, |confidential_vm| {
54-
Ok(confidential_vm.is_mmio_region_defined(&ConfidentialVmMmioRegion::new(fault_address, Self::ADDRESS_ALIGNMENT)))
52+
Ok(confidential_vm.is_mmio_region_defined(&ConfidentialVmMmioRegion::new(fault_address, Self::ADDRESS_ALIGNMENT)?))
5553
})
5654
.unwrap_or(false)
5755
}

security-monitor/src/confidential_flow/handlers/mmio/remove_mmio_region.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ impl RemoveMmioRegion {
2626
match ControlDataStorage::try_confidential_vm_mut(confidential_flow.confidential_vm_id(), |mut confidential_vm| {
2727
ensure!(self.region_start_address % PageSize::Size4KiB.in_bytes() == 0, Error::AddressNotAligned())?;
2828
ensure!(self.region_length % PageSize::Size4KiB.in_bytes() == 0, Error::AddressNotAligned())?;
29-
Ok(confidential_vm.remove_mmio_region(&ConfidentialVmMmioRegion::new(self.region_start_address, self.region_length)))
29+
Ok(confidential_vm.remove_mmio_region(&ConfidentialVmMmioRegion::new(self.region_start_address, self.region_length)?))
3030
}) {
3131
Ok(_) => confidential_flow
3232
.set_resumable_operation(ResumableOperation::SbiRequest())

security-monitor/src/confidential_flow/handlers/shared_page/share_page_complete.rs

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -5,9 +5,9 @@ use crate::confidential_flow::handlers::sbi::SbiResponse;
55
use crate::confidential_flow::handlers::shared_page::SharePageRequest;
66
use crate::confidential_flow::handlers::symmetrical_multiprocessing::RemoteHfenceGvmaVmid;
77
use crate::confidential_flow::{ApplyToConfidentialHart, ConfidentialFlow};
8-
use crate::core::architecture::GeneralPurposeRegister;
8+
use crate::core::architecture::{GeneralPurposeRegister, PageSize};
99
use crate::core::control_data::{ConfidentialHartRemoteCommand, ControlDataStorage, HypervisorHart};
10-
use crate::core::memory_layout::NonConfidentialMemoryAddress;
10+
use crate::core::memory_layout::{ConfidentialVmPhysicalAddress, NonConfidentialMemoryAddress};
1111
use crate::error::Error;
1212

1313
/// Finishes the pending request of sharing a page between the confidential VM and the hypervisor. The hypervisor should provide information
@@ -40,11 +40,12 @@ impl SharePageComplete {
4040

4141
fn map_shared_page(&self, confidential_flow: &mut ConfidentialFlow) -> Result<(), Error> {
4242
ensure!(self.response_code == 0, Error::Failed())?;
43-
// Security: check that the start address is located in the non-confidential memory
43+
// Security: check that the start address is located in the non-confidential memory and is properly aligned
4444
let hypervisor_address = NonConfidentialMemoryAddress::new(self.hypervisor_page_address as *mut usize)?;
45-
45+
ensure!(hypervisor_address.usize() % PageSize::Size4KiB.in_bytes() == 0, Error::AddressNotAligned())?;
46+
let address = ConfidentialVmPhysicalAddress::new(self.request.confidential_vm_physical_address)?;
4647
ControlDataStorage::try_confidential_vm(confidential_flow.confidential_vm_id(), |confidential_vm| {
47-
let page_size = confidential_vm.memory_protector_mut().map_shared_page(hypervisor_address, self.request.address)?;
48+
let page_size = confidential_vm.memory_protector_mut().map_shared_page(hypervisor_address, &address)?;
4849
let request = RemoteHfenceGvmaVmid::all_harts(None, page_size, confidential_flow.confidential_vm_id());
4950
confidential_flow.broadcast_remote_command(&confidential_vm, ConfidentialHartRemoteCommand::RemoteHfenceGvmaVmid(request))?;
5051
Ok(())

security-monitor/src/confidential_flow/handlers/shared_page/share_page_request.rs

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -16,14 +16,14 @@ use crate::non_confidential_flow::DeclassifyToHypervisor;
1616
/// allocate a page of non-confidential memory and return back the `host physical address` of this page. Control flows back to the
1717
/// confidential hart if the request was invalid, e.g., the `guest physical address` was not correct.
1818
pub struct SharePageRequest {
19-
pub address: ConfidentialVmPhysicalAddress,
19+
pub confidential_vm_physical_address: usize,
2020
pub size: usize,
2121
}
2222

2323
impl SharePageRequest {
2424
pub fn from_confidential_hart(confidential_hart: &ConfidentialHart) -> Self {
2525
Self {
26-
address: ConfidentialVmPhysicalAddress::new(confidential_hart.gprs().read(GeneralPurposeRegister::a0)),
26+
confidential_vm_physical_address: confidential_hart.gprs().read(GeneralPurposeRegister::a0),
2727
size: confidential_hart.gprs().read(GeneralPurposeRegister::a1),
2828
}
2929
}
@@ -41,8 +41,9 @@ impl SharePageRequest {
4141
}
4242

4343
fn share_page_sbi_request(&self) -> Result<SbiRequest, Error> {
44-
ensure!(self.address.usize() % SharedPage::SIZE.in_bytes() == 0, Error::AddressNotAligned())?;
44+
let address = ConfidentialVmPhysicalAddress::new(self.confidential_vm_physical_address)?;
45+
ensure!(address.usize() % SharedPage::SIZE.in_bytes() == 0, Error::AddressNotAligned())?;
4546
ensure!(self.size == SharedPage::SIZE.in_bytes(), Error::InvalidParameter())?;
46-
Ok(SbiRequest::new(CovgExtension::EXTID, CovgExtension::SBI_EXT_COVG_SHARE_MEMORY, self.address.usize(), self.size))
47+
Ok(SbiRequest::new(CovgExtension::EXTID, CovgExtension::SBI_EXT_COVG_SHARE_MEMORY, address.usize(), self.size))
4748
}
4849
}

security-monitor/src/confidential_flow/handlers/shared_page/unshare_page_request.rs

Lines changed: 9 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -13,44 +13,41 @@ use crate::non_confidential_flow::DeclassifyToHypervisor;
1313

1414
/// Unshared memory that has been previously shared with the hypervisor.
1515
pub struct UnsharePageRequest {
16-
address: ConfidentialVmPhysicalAddress,
16+
confidential_vm_physical_address: usize,
1717
size: usize,
1818
}
1919

2020
impl UnsharePageRequest {
2121
pub fn from_confidential_hart(confidential_hart: &ConfidentialHart) -> Self {
2222
Self {
23-
address: ConfidentialVmPhysicalAddress::new(confidential_hart.gprs().read(GeneralPurposeRegister::a0)),
23+
confidential_vm_physical_address: confidential_hart.gprs().read(GeneralPurposeRegister::a0),
2424
size: confidential_hart.gprs().read(GeneralPurposeRegister::a1),
2525
}
2626
}
2727

2828
pub fn handle(self, mut confidential_flow: ConfidentialFlow) -> ! {
2929
match self.unmap_shared_page(&mut confidential_flow) {
30-
Ok(_) => confidential_flow
30+
Ok(sbi_request) => confidential_flow
3131
.set_resumable_operation(ResumableOperation::SbiRequest())
3232
.into_non_confidential_flow()
33-
.declassify_and_exit_to_hypervisor(DeclassifyToHypervisor::SbiRequest(self.unshare_page_sbi_request())),
33+
.declassify_and_exit_to_hypervisor(DeclassifyToHypervisor::SbiRequest(sbi_request)),
3434
Err(error) => {
3535
confidential_flow.apply_and_exit_to_confidential_hart(ApplyToConfidentialHart::SbiResponse(SbiResponse::error(error)))
3636
}
3737
}
3838
}
3939

40-
fn unshare_page_sbi_request(&self) -> SbiRequest {
41-
SbiRequest::new(CovgExtension::EXTID, CovgExtension::SBI_EXT_COVG_UNSHARE_MEMORY, self.address.usize(), self.size)
42-
}
43-
44-
fn unmap_shared_page(&self, confidential_flow: &mut ConfidentialFlow) -> Result<(), Error> {
45-
ensure!(self.address.usize() % SharedPage::SIZE.in_bytes() == 0, Error::AddressNotAligned())?;
40+
fn unmap_shared_page(&self, confidential_flow: &mut ConfidentialFlow) -> Result<SbiRequest, Error> {
41+
let address = ConfidentialVmPhysicalAddress::new(self.confidential_vm_physical_address)?;
42+
ensure!(address.usize() % SharedPage::SIZE.in_bytes() == 0, Error::AddressNotAligned())?;
4643
ensure!(self.size == SharedPage::SIZE.in_bytes(), Error::InvalidParameter())?;
4744

4845
let confidential_vm_id = confidential_flow.confidential_vm_id();
4946
ControlDataStorage::try_confidential_vm(confidential_vm_id, |confidential_vm| {
50-
let unmapped_page_size = confidential_vm.memory_protector_mut().unmap_shared_page(&self.address)?;
47+
let unmapped_page_size = confidential_vm.memory_protector_mut().unmap_shared_page(&address)?;
5148
let request = RemoteHfenceGvmaVmid::all_harts(None, unmapped_page_size, confidential_vm_id);
5249
confidential_flow.broadcast_remote_command(&confidential_vm, ConfidentialHartRemoteCommand::RemoteHfenceGvmaVmid(request))?;
53-
Ok(())
50+
Ok(SbiRequest::new(CovgExtension::EXTID, CovgExtension::SBI_EXT_COVG_UNSHARE_MEMORY, address.usize(), self.size))
5451
})
5552
}
5653
}

security-monitor/src/core/architecture/riscv/mmu/page_table.rs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -177,9 +177,9 @@ impl PageTable {
177177
/// The caller of this function must ensure that he synchronizes changes to page table configuration, i.e., by clearing address
178178
/// translation caches.
179179
pub fn map_shared_page(
180-
&mut self, hypervisor_address: NonConfidentialMemoryAddress, confidential_vm_physical_address: ConfidentialVmPhysicalAddress,
180+
&mut self, hypervisor_address: NonConfidentialMemoryAddress, confidential_vm_physical_address: &ConfidentialVmPhysicalAddress,
181181
) -> Result<PageSize, Error> {
182-
let shared_page = SharedPage::new(hypervisor_address, confidential_vm_physical_address)?;
182+
let shared_page = SharedPage::new(hypervisor_address, confidential_vm_physical_address.clone())?;
183183
let shared_page_size = shared_page.page_size();
184184
self.map_page(&confidential_vm_physical_address, &shared_page_size, LogicalPageTableEntry::PageSharedWithHypervisor(shared_page))?;
185185
Ok(shared_page_size)

security-monitor/src/core/control_data/confidential_hart.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -127,7 +127,7 @@ impl ConfidentialHart {
127127

128128
/// Constructs a confidential hart with the state of the non-confidential hart that made a call to promote the VM to confidential VM
129129
pub fn from_vm_hart(
130-
id: usize, program_counter: usize, fdt_address: ConfidentialVmPhysicalAddress, htimedelta: usize, shared_memory: &NaclSharedMemory,
130+
id: usize, program_counter: usize, fdt_address: &ConfidentialVmPhysicalAddress, htimedelta: usize, shared_memory: &NaclSharedMemory,
131131
) -> Self {
132132
// We first create a confidential hart in the reset state and then fill this state with the runtime state of the hart that made a
133133
// call to promote to confidential VM. This state consists of GPRs and VS-level CSRs.

security-monitor/src/core/control_data/confidential_vm_mmio_region.rs

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22
// SPDX-FileContributor: Wojciech Ozga <[email protected]>, IBM Research - Zurich
33
// SPDX-License-Identifier: Apache-2.0
44
use crate::core::memory_layout::ConfidentialVmPhysicalAddress;
5+
use crate::error::Error;
56

67
/// Defines a range of guest physical addresses that the security monitor interprets as MMIO address and expose load/store operations to the
78
/// hypervisor. The hypervisor can then emulate them to provide access to virtual devices.
@@ -13,10 +14,10 @@ pub struct ConfidentialVmMmioRegion {
1314
}
1415

1516
impl ConfidentialVmMmioRegion {
16-
pub fn new(start_address: usize, size_in_bytes: usize) -> Self {
17-
let base_address = ConfidentialVmPhysicalAddress::new(start_address);
17+
pub fn new(start_address: usize, size_in_bytes: usize) -> Result<Self, Error> {
18+
let base_address = ConfidentialVmPhysicalAddress::new(start_address)?;
1819
let one_past_the_end_address = base_address.add(size_in_bytes);
19-
Self { base_address, one_past_the_end_address }
20+
Ok(Self { base_address, one_past_the_end_address })
2021
}
2122

2223
pub fn overlaps(&self, other: &Self) -> bool {

0 commit comments

Comments
 (0)