14
14
* and find the first range, but that's correct because the condition
15
15
* expression would cause us to quit the loop.
16
16
*/
17
- static void encrypt_region (struct kvm_vm * vm , struct userspace_mem_region * region )
17
+ static void encrypt_region (struct kvm_vm * vm , struct userspace_mem_region * region ,
18
+ uint8_t page_type , bool private )
18
19
{
19
20
const struct sparsebit * protected_phy_pages = region -> protected_phy_pages ;
20
21
const vm_paddr_t gpa_base = region -> region .guest_phys_addr ;
@@ -24,13 +25,23 @@ static void encrypt_region(struct kvm_vm *vm, struct userspace_mem_region *regio
24
25
if (!sparsebit_any_set (protected_phy_pages ))
25
26
return ;
26
27
27
- sev_register_encrypted_memory (vm , region );
28
+ if (!is_sev_snp_vm (vm ))
29
+ sev_register_encrypted_memory (vm , region );
28
30
29
31
sparsebit_for_each_set_range (protected_phy_pages , i , j ) {
30
32
const uint64_t size = (j - i + 1 ) * vm -> page_size ;
31
33
const uint64_t offset = (i - lowest_page_in_region ) * vm -> page_size ;
32
34
33
- sev_launch_update_data (vm , gpa_base + offset , size );
35
+ if (private )
36
+ vm_mem_set_private (vm , gpa_base + offset , size );
37
+
38
+ if (is_sev_snp_vm (vm ))
39
+ snp_launch_update_data (vm , gpa_base + offset ,
40
+ (uint64_t )addr_gpa2hva (vm , gpa_base + offset ),
41
+ size , page_type );
42
+ else
43
+ sev_launch_update_data (vm , gpa_base + offset , size );
44
+
34
45
}
35
46
}
36
47
@@ -60,6 +71,14 @@ void sev_es_vm_init(struct kvm_vm *vm)
60
71
}
61
72
}
62
73
74
+ void snp_vm_init (struct kvm_vm * vm )
75
+ {
76
+ struct kvm_sev_init init = { 0 };
77
+
78
+ TEST_ASSERT_EQ (vm -> type , KVM_X86_SNP_VM );
79
+ vm_sev_ioctl (vm , KVM_SEV_INIT2 , & init );
80
+ }
81
+
63
82
void sev_vm_launch (struct kvm_vm * vm , uint32_t policy )
64
83
{
65
84
struct kvm_sev_launch_start launch_start = {
@@ -76,7 +95,7 @@ void sev_vm_launch(struct kvm_vm *vm, uint32_t policy)
76
95
TEST_ASSERT_EQ (status .state , SEV_GUEST_STATE_LAUNCH_UPDATE );
77
96
78
97
hash_for_each (vm -> regions .slot_hash , ctr , region , slot_node )
79
- encrypt_region (vm , region );
98
+ encrypt_region (vm , region , KVM_SEV_PAGE_TYPE_INVALID , false );
80
99
81
100
if (policy & SEV_POLICY_ES )
82
101
vm_sev_ioctl (vm , KVM_SEV_LAUNCH_UPDATE_VMSA , NULL );
@@ -112,6 +131,33 @@ void sev_vm_launch_finish(struct kvm_vm *vm)
112
131
TEST_ASSERT_EQ (status .state , SEV_GUEST_STATE_RUNNING );
113
132
}
114
133
134
+ void snp_vm_launch_start (struct kvm_vm * vm , uint64_t policy )
135
+ {
136
+ struct kvm_sev_snp_launch_start launch_start = {
137
+ .policy = policy ,
138
+ };
139
+
140
+ vm_sev_ioctl (vm , KVM_SEV_SNP_LAUNCH_START , & launch_start );
141
+ }
142
+
143
+ void snp_vm_launch_update (struct kvm_vm * vm )
144
+ {
145
+ struct userspace_mem_region * region ;
146
+ int ctr ;
147
+
148
+ hash_for_each (vm -> regions .slot_hash , ctr , region , slot_node )
149
+ encrypt_region (vm , region , KVM_SEV_SNP_PAGE_TYPE_NORMAL , true);
150
+
151
+ vm -> arch .is_pt_protected = true;
152
+ }
153
+
154
+ void snp_vm_launch_finish (struct kvm_vm * vm )
155
+ {
156
+ struct kvm_sev_snp_launch_finish launch_finish = { 0 };
157
+
158
+ vm_sev_ioctl (vm , KVM_SEV_SNP_LAUNCH_FINISH , & launch_finish );
159
+ }
160
+
115
161
struct kvm_vm * vm_sev_create_with_one_vcpu (uint32_t type , void * guest_code ,
116
162
struct kvm_vcpu * * cpu )
117
163
{
@@ -128,8 +174,20 @@ struct kvm_vm *vm_sev_create_with_one_vcpu(uint32_t type, void *guest_code,
128
174
return vm ;
129
175
}
130
176
131
- void vm_sev_launch (struct kvm_vm * vm , uint32_t policy , uint8_t * measurement )
177
+ void vm_sev_launch (struct kvm_vm * vm , uint64_t policy , uint8_t * measurement )
132
178
{
179
+ if (is_sev_snp_vm (vm )) {
180
+ vm_enable_cap (vm , KVM_CAP_EXIT_HYPERCALL , BIT (KVM_HC_MAP_GPA_RANGE ));
181
+
182
+ snp_vm_launch_start (vm , policy );
183
+
184
+ snp_vm_launch_update (vm );
185
+
186
+ snp_vm_launch_finish (vm );
187
+
188
+ return ;
189
+ }
190
+
133
191
sev_vm_launch (vm , policy );
134
192
135
193
if (!measurement )
0 commit comments