@@ -666,6 +666,7 @@ static void __vm_mem_region_delete(struct kvm_vm *vm,
666
666
vm_ioctl (vm , KVM_SET_USER_MEMORY_REGION2 , & region -> region );
667
667
668
668
sparsebit_free (& region -> unused_phy_pages );
669
+ sparsebit_free (& region -> protected_phy_pages );
669
670
ret = munmap (region -> mmap_start , region -> mmap_size );
670
671
TEST_ASSERT (!ret , __KVM_SYSCALL_ERROR ("munmap()" , ret ));
671
672
if (region -> fd >= 0 ) {
@@ -1047,6 +1048,8 @@ void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type,
1047
1048
}
1048
1049
1049
1050
region -> unused_phy_pages = sparsebit_alloc ();
1051
+ if (vm_arch_has_protected_memory (vm ))
1052
+ region -> protected_phy_pages = sparsebit_alloc ();
1050
1053
sparsebit_set_num (region -> unused_phy_pages ,
1051
1054
guest_paddr >> vm -> page_shift , npages );
1052
1055
region -> region .slot = slot ;
@@ -1873,6 +1876,10 @@ void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
1873
1876
region -> host_mem );
1874
1877
fprintf (stream , "%*sunused_phy_pages: " , indent + 2 , "" );
1875
1878
sparsebit_dump (stream , region -> unused_phy_pages , 0 );
1879
+ if (region -> protected_phy_pages ) {
1880
+ fprintf (stream , "%*sprotected_phy_pages: " , indent + 2 , "" );
1881
+ sparsebit_dump (stream , region -> protected_phy_pages , 0 );
1882
+ }
1876
1883
}
1877
1884
fprintf (stream , "%*sMapped Virtual Pages:\n" , indent , "" );
1878
1885
sparsebit_dump (stream , vm -> vpages_mapped , indent + 2 );
@@ -1974,6 +1981,7 @@ const char *exit_reason_str(unsigned int exit_reason)
1974
1981
* num - number of pages
1975
1982
* paddr_min - Physical address minimum
1976
1983
* memslot - Memory region to allocate page from
1984
+ * protected - True if the pages will be used as protected/private memory
1977
1985
*
1978
1986
* Output Args: None
1979
1987
*
@@ -1985,8 +1993,9 @@ const char *exit_reason_str(unsigned int exit_reason)
1985
1993
* and their base address is returned. A TEST_ASSERT failure occurs if
1986
1994
* not enough pages are available at or above paddr_min.
1987
1995
*/
1988
- vm_paddr_t vm_phy_pages_alloc (struct kvm_vm * vm , size_t num ,
1989
- vm_paddr_t paddr_min , uint32_t memslot )
1996
+ vm_paddr_t __vm_phy_pages_alloc (struct kvm_vm * vm , size_t num ,
1997
+ vm_paddr_t paddr_min , uint32_t memslot ,
1998
+ bool protected )
1990
1999
{
1991
2000
struct userspace_mem_region * region ;
1992
2001
sparsebit_idx_t pg , base ;
@@ -1999,8 +2008,10 @@ vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
1999
2008
paddr_min , vm -> page_size );
2000
2009
2001
2010
region = memslot2region (vm , memslot );
2002
- base = pg = paddr_min >> vm -> page_shift ;
2011
+ TEST_ASSERT (!protected || region -> protected_phy_pages ,
2012
+ "Region doesn't support protected memory" );
2003
2013
2014
+ base = pg = paddr_min >> vm -> page_shift ;
2004
2015
do {
2005
2016
for (; pg < base + num ; ++ pg ) {
2006
2017
if (!sparsebit_is_set (region -> unused_phy_pages , pg )) {
@@ -2019,8 +2030,11 @@ vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
2019
2030
abort ();
2020
2031
}
2021
2032
2022
- for (pg = base ; pg < base + num ; ++ pg )
2033
+ for (pg = base ; pg < base + num ; ++ pg ) {
2023
2034
sparsebit_clear (region -> unused_phy_pages , pg );
2035
+ if (protected )
2036
+ sparsebit_set (region -> protected_phy_pages , pg );
2037
+ }
2024
2038
2025
2039
return base * vm -> page_size ;
2026
2040
}
0 commit comments