@@ -560,6 +560,8 @@ static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
560
560
if (copy_from_user (& params , u64_to_user_ptr (argp -> data ), sizeof (params )))
561
561
return - EFAULT ;
562
562
563
+ sev -> policy = params .policy ;
564
+
563
565
memset (& start , 0 , sizeof (start ));
564
566
565
567
dh_blob = NULL ;
@@ -2199,6 +2201,8 @@ static int snp_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
2199
2201
if (params .policy & SNP_POLICY_MASK_SINGLE_SOCKET )
2200
2202
return - EINVAL ;
2201
2203
2204
+ sev -> policy = params .policy ;
2205
+
2202
2206
sev -> snp_context = snp_context_create (kvm , argp );
2203
2207
if (!sev -> snp_context )
2204
2208
return - ENOTTY ;
@@ -4922,3 +4926,97 @@ int sev_private_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn)
4922
4926
4923
4927
return level ;
4924
4928
}
4929
+
4930
+ struct vmcb_save_area * sev_decrypt_vmsa (struct kvm_vcpu * vcpu )
4931
+ {
4932
+ struct vcpu_svm * svm = to_svm (vcpu );
4933
+ struct vmcb_save_area * vmsa ;
4934
+ struct kvm_sev_info * sev ;
4935
+ int error = 0 ;
4936
+ int ret ;
4937
+
4938
+ if (!sev_es_guest (vcpu -> kvm ))
4939
+ return NULL ;
4940
+
4941
+ /*
4942
+ * If the VMSA has not yet been encrypted, return a pointer to the
4943
+ * current un-encrypted VMSA.
4944
+ */
4945
+ if (!vcpu -> arch .guest_state_protected )
4946
+ return (struct vmcb_save_area * )svm -> sev_es .vmsa ;
4947
+
4948
+ sev = to_kvm_sev_info (vcpu -> kvm );
4949
+
4950
+ /* Check if the SEV policy allows debugging */
4951
+ if (sev_snp_guest (vcpu -> kvm )) {
4952
+ if (!(sev -> policy & SNP_POLICY_DEBUG ))
4953
+ return NULL ;
4954
+ } else {
4955
+ if (sev -> policy & SEV_POLICY_NODBG )
4956
+ return NULL ;
4957
+ }
4958
+
4959
+ if (sev_snp_guest (vcpu -> kvm )) {
4960
+ struct sev_data_snp_dbg dbg = {0 };
4961
+
4962
+ vmsa = snp_alloc_firmware_page (__GFP_ZERO );
4963
+ if (!vmsa )
4964
+ return NULL ;
4965
+
4966
+ dbg .gctx_paddr = __psp_pa (sev -> snp_context );
4967
+ dbg .src_addr = svm -> vmcb -> control .vmsa_pa ;
4968
+ dbg .dst_addr = __psp_pa (vmsa );
4969
+
4970
+ ret = sev_do_cmd (SEV_CMD_SNP_DBG_DECRYPT , & dbg , & error );
4971
+
4972
+ /*
4973
+ * Return the target page to a hypervisor page no matter what.
4974
+ * If this fails, the page can't be used, so leak it and don't
4975
+ * try to use it.
4976
+ */
4977
+ if (snp_page_reclaim (vcpu -> kvm , PHYS_PFN (__pa (vmsa ))))
4978
+ return NULL ;
4979
+
4980
+ if (ret ) {
4981
+ pr_err ("SEV: SNP_DBG_DECRYPT failed ret=%d, fw_error=%d (%#x)\n" ,
4982
+ ret , error , error );
4983
+ free_page ((unsigned long )vmsa );
4984
+
4985
+ return NULL ;
4986
+ }
4987
+ } else {
4988
+ struct sev_data_dbg dbg = {0 };
4989
+ struct page * vmsa_page ;
4990
+
4991
+ vmsa_page = alloc_page (GFP_KERNEL );
4992
+ if (!vmsa_page )
4993
+ return NULL ;
4994
+
4995
+ vmsa = page_address (vmsa_page );
4996
+
4997
+ dbg .handle = sev -> handle ;
4998
+ dbg .src_addr = svm -> vmcb -> control .vmsa_pa ;
4999
+ dbg .dst_addr = __psp_pa (vmsa );
5000
+ dbg .len = PAGE_SIZE ;
5001
+
5002
+ ret = sev_do_cmd (SEV_CMD_DBG_DECRYPT , & dbg , & error );
5003
+ if (ret ) {
5004
+ pr_err ("SEV: SEV_CMD_DBG_DECRYPT failed ret=%d, fw_error=%d (0x%x)\n" ,
5005
+ ret , error , error );
5006
+ __free_page (vmsa_page );
5007
+
5008
+ return NULL ;
5009
+ }
5010
+ }
5011
+
5012
+ return vmsa ;
5013
+ }
5014
+
5015
+ void sev_free_decrypted_vmsa (struct kvm_vcpu * vcpu , struct vmcb_save_area * vmsa )
5016
+ {
5017
+ /* If the VMSA has not yet been encrypted, nothing was allocated */
5018
+ if (!vcpu -> arch .guest_state_protected || !vmsa )
5019
+ return ;
5020
+
5021
+ free_page ((unsigned long )vmsa );
5022
+ }
0 commit comments