@@ -1045,18 +1045,21 @@ int __init sev_es_setup_ap_jump_table(struct real_mode_header *rmh)
1045
1045
* This is needed by the OVMF UEFI firmware which will use whatever it finds in
1046
1046
* the GHCB MSR as its GHCB to talk to the hypervisor. So make sure the per-cpu
1047
1047
* runtime GHCBs used by the kernel are also mapped in the EFI page-table.
1048
+ *
1049
+ * When running under SVSM the CA page is needed too, so map it as well.
1048
1050
*/
1049
- int __init sev_es_efi_map_ghcbs (pgd_t * pgd )
1051
+ int __init sev_es_efi_map_ghcbs_cas (pgd_t * pgd )
1050
1052
{
1053
+ unsigned long address , pflags , pflags_enc ;
1051
1054
struct sev_es_runtime_data * data ;
1052
- unsigned long address , pflags ;
1053
1055
int cpu ;
1054
1056
u64 pfn ;
1055
1057
1056
1058
if (!cc_platform_has (CC_ATTR_GUEST_STATE_ENCRYPT ))
1057
1059
return 0 ;
1058
1060
1059
1061
pflags = _PAGE_NX | _PAGE_RW ;
1062
+ pflags_enc = cc_mkenc (pflags );
1060
1063
1061
1064
for_each_possible_cpu (cpu ) {
1062
1065
data = per_cpu (runtime_data , cpu );
@@ -1066,6 +1069,16 @@ int __init sev_es_efi_map_ghcbs(pgd_t *pgd)
1066
1069
1067
1070
if (kernel_map_pages_in_pgd (pgd , pfn , address , 1 , pflags ))
1068
1071
return 1 ;
1072
+
1073
+ if (snp_vmpl ) {
1074
+ address = per_cpu (svsm_caa_pa , cpu );
1075
+ if (!address )
1076
+ return 1 ;
1077
+
1078
+ pfn = address >> PAGE_SHIFT ;
1079
+ if (kernel_map_pages_in_pgd (pgd , pfn , address , 1 , pflags_enc ))
1080
+ return 1 ;
1081
+ }
1069
1082
}
1070
1083
1071
1084
return 0 ;
@@ -1389,16 +1402,16 @@ int snp_issue_svsm_attest_req(u64 call_id, struct svsm_call *call,
1389
1402
}
1390
1403
EXPORT_SYMBOL_GPL (snp_issue_svsm_attest_req );
1391
1404
1392
- static int snp_issue_guest_request (struct snp_guest_req * req , struct snp_req_data * input ,
1393
- struct snp_guest_request_ioctl * rio )
1405
+ static int snp_issue_guest_request (struct snp_guest_req * req )
1394
1406
{
1407
+ struct snp_req_data * input = & req -> input ;
1395
1408
struct ghcb_state state ;
1396
1409
struct es_em_ctxt ctxt ;
1397
1410
unsigned long flags ;
1398
1411
struct ghcb * ghcb ;
1399
1412
int ret ;
1400
1413
1401
- rio -> exitinfo2 = SEV_RET_NO_FW_CALL ;
1414
+ req -> exitinfo2 = SEV_RET_NO_FW_CALL ;
1402
1415
1403
1416
/*
1404
1417
* __sev_get_ghcb() needs to run with IRQs disabled because it is using
@@ -1423,8 +1436,8 @@ static int snp_issue_guest_request(struct snp_guest_req *req, struct snp_req_dat
1423
1436
if (ret )
1424
1437
goto e_put ;
1425
1438
1426
- rio -> exitinfo2 = ghcb -> save .sw_exit_info_2 ;
1427
- switch (rio -> exitinfo2 ) {
1439
+ req -> exitinfo2 = ghcb -> save .sw_exit_info_2 ;
1440
+ switch (req -> exitinfo2 ) {
1428
1441
case 0 :
1429
1442
break ;
1430
1443
@@ -1919,8 +1932,7 @@ static int enc_payload(struct snp_msg_desc *mdesc, u64 seqno, struct snp_guest_r
1919
1932
return 0 ;
1920
1933
}
1921
1934
1922
- static int __handle_guest_request (struct snp_msg_desc * mdesc , struct snp_guest_req * req ,
1923
- struct snp_guest_request_ioctl * rio )
1935
+ static int __handle_guest_request (struct snp_msg_desc * mdesc , struct snp_guest_req * req )
1924
1936
{
1925
1937
unsigned long req_start = jiffies ;
1926
1938
unsigned int override_npages = 0 ;
@@ -1934,7 +1946,7 @@ static int __handle_guest_request(struct snp_msg_desc *mdesc, struct snp_guest_r
1934
1946
* sequence number must be incremented or the VMPCK must be deleted to
1935
1947
* prevent reuse of the IV.
1936
1948
*/
1937
- rc = snp_issue_guest_request (req , & req -> input , rio );
1949
+ rc = snp_issue_guest_request (req );
1938
1950
switch (rc ) {
1939
1951
case - ENOSPC :
1940
1952
/*
@@ -1987,7 +1999,7 @@ static int __handle_guest_request(struct snp_msg_desc *mdesc, struct snp_guest_r
1987
1999
snp_inc_msg_seqno (mdesc );
1988
2000
1989
2001
if (override_err ) {
1990
- rio -> exitinfo2 = override_err ;
2002
+ req -> exitinfo2 = override_err ;
1991
2003
1992
2004
/*
1993
2005
* If an extended guest request was issued and the supplied certificate
@@ -2005,12 +2017,20 @@ static int __handle_guest_request(struct snp_msg_desc *mdesc, struct snp_guest_r
2005
2017
return rc ;
2006
2018
}
2007
2019
2008
- int snp_send_guest_request (struct snp_msg_desc * mdesc , struct snp_guest_req * req ,
2009
- struct snp_guest_request_ioctl * rio )
2020
+ int snp_send_guest_request (struct snp_msg_desc * mdesc , struct snp_guest_req * req )
2010
2021
{
2011
2022
u64 seqno ;
2012
2023
int rc ;
2013
2024
2025
+ /*
2026
+ * enc_payload() calls aesgcm_encrypt(), which can potentially offload to HW.
2027
+ * The offload's DMA SG list of data to encrypt has to be in linear mapping.
2028
+ */
2029
+ if (!virt_addr_valid (req -> req_buf ) || !virt_addr_valid (req -> resp_buf )) {
2030
+ pr_warn ("AES-GSM buffers must be in linear mapping" );
2031
+ return - EINVAL ;
2032
+ }
2033
+
2014
2034
guard (mutex )(& snp_cmd_mutex );
2015
2035
2016
2036
/* Check if the VMPCK is not empty */
@@ -2043,14 +2063,14 @@ int snp_send_guest_request(struct snp_msg_desc *mdesc, struct snp_guest_req *req
2043
2063
req -> input .resp_gpa = __pa (mdesc -> response );
2044
2064
req -> input .data_gpa = req -> certs_data ? __pa (req -> certs_data ) : 0 ;
2045
2065
2046
- rc = __handle_guest_request (mdesc , req , rio );
2066
+ rc = __handle_guest_request (mdesc , req );
2047
2067
if (rc ) {
2048
2068
if (rc == - EIO &&
2049
- rio -> exitinfo2 == SNP_GUEST_VMM_ERR (SNP_GUEST_VMM_ERR_INVALID_LEN ))
2069
+ req -> exitinfo2 == SNP_GUEST_VMM_ERR (SNP_GUEST_VMM_ERR_INVALID_LEN ))
2050
2070
return rc ;
2051
2071
2052
2072
pr_alert ("Detected error from ASP request. rc: %d, exitinfo2: 0x%llx\n" ,
2053
- rc , rio -> exitinfo2 );
2073
+ rc , req -> exitinfo2 );
2054
2074
2055
2075
snp_disable_vmpck (mdesc );
2056
2076
return rc ;
@@ -2069,11 +2089,10 @@ EXPORT_SYMBOL_GPL(snp_send_guest_request);
2069
2089
2070
2090
static int __init snp_get_tsc_info (void )
2071
2091
{
2072
- struct snp_guest_request_ioctl * rio ;
2073
2092
struct snp_tsc_info_resp * tsc_resp ;
2074
2093
struct snp_tsc_info_req * tsc_req ;
2075
2094
struct snp_msg_desc * mdesc ;
2076
- struct snp_guest_req * req ;
2095
+ struct snp_guest_req req = {} ;
2077
2096
int rc = - ENOMEM ;
2078
2097
2079
2098
tsc_req = kzalloc (sizeof (* tsc_req ), GFP_KERNEL );
@@ -2089,32 +2108,24 @@ static int __init snp_get_tsc_info(void)
2089
2108
if (!tsc_resp )
2090
2109
goto e_free_tsc_req ;
2091
2110
2092
- req = kzalloc (sizeof (* req ), GFP_KERNEL );
2093
- if (!req )
2094
- goto e_free_tsc_resp ;
2095
-
2096
- rio = kzalloc (sizeof (* rio ), GFP_KERNEL );
2097
- if (!rio )
2098
- goto e_free_req ;
2099
-
2100
2111
mdesc = snp_msg_alloc ();
2101
2112
if (IS_ERR_OR_NULL (mdesc ))
2102
- goto e_free_rio ;
2113
+ goto e_free_tsc_resp ;
2103
2114
2104
2115
rc = snp_msg_init (mdesc , snp_vmpl );
2105
2116
if (rc )
2106
2117
goto e_free_mdesc ;
2107
2118
2108
- req -> msg_version = MSG_HDR_VER ;
2109
- req -> msg_type = SNP_MSG_TSC_INFO_REQ ;
2110
- req -> vmpck_id = snp_vmpl ;
2111
- req -> req_buf = tsc_req ;
2112
- req -> req_sz = sizeof (* tsc_req );
2113
- req -> resp_buf = (void * )tsc_resp ;
2114
- req -> resp_sz = sizeof (* tsc_resp ) + AUTHTAG_LEN ;
2115
- req -> exit_code = SVM_VMGEXIT_GUEST_REQUEST ;
2119
+ req . msg_version = MSG_HDR_VER ;
2120
+ req . msg_type = SNP_MSG_TSC_INFO_REQ ;
2121
+ req . vmpck_id = snp_vmpl ;
2122
+ req . req_buf = tsc_req ;
2123
+ req . req_sz = sizeof (* tsc_req );
2124
+ req . resp_buf = (void * )tsc_resp ;
2125
+ req . resp_sz = sizeof (* tsc_resp ) + AUTHTAG_LEN ;
2126
+ req . exit_code = SVM_VMGEXIT_GUEST_REQUEST ;
2116
2127
2117
- rc = snp_send_guest_request (mdesc , req , rio );
2128
+ rc = snp_send_guest_request (mdesc , & req );
2118
2129
if (rc )
2119
2130
goto e_request ;
2120
2131
@@ -2135,11 +2146,7 @@ static int __init snp_get_tsc_info(void)
2135
2146
memzero_explicit (tsc_resp , sizeof (* tsc_resp ) + AUTHTAG_LEN );
2136
2147
e_free_mdesc :
2137
2148
snp_msg_free (mdesc );
2138
- e_free_rio :
2139
- kfree (rio );
2140
- e_free_req :
2141
- kfree (req );
2142
- e_free_tsc_resp :
2149
+ e_free_tsc_resp :
2143
2150
kfree (tsc_resp );
2144
2151
e_free_tsc_req :
2145
2152
kfree (tsc_req );
0 commit comments