@@ -2509,8 +2509,8 @@ int snp_issue_svsm_attest_req(u64 call_id, struct svsm_call *call,
2509
2509
}
2510
2510
EXPORT_SYMBOL_GPL (snp_issue_svsm_attest_req );
2511
2511
2512
- int snp_issue_guest_request (struct snp_guest_req * req , struct snp_req_data * input ,
2513
- struct snp_guest_request_ioctl * rio )
2512
+ static int snp_issue_guest_request (struct snp_guest_req * req , struct snp_req_data * input ,
2513
+ struct snp_guest_request_ioctl * rio )
2514
2514
{
2515
2515
struct ghcb_state state ;
2516
2516
struct es_em_ctxt ctxt ;
@@ -2572,7 +2572,6 @@ int snp_issue_guest_request(struct snp_guest_req *req, struct snp_req_data *inpu
2572
2572
2573
2573
return ret ;
2574
2574
}
2575
- EXPORT_SYMBOL_GPL (snp_issue_guest_request );
2576
2575
2577
2576
static struct platform_device sev_guest_device = {
2578
2577
.name = "sev-guest" ,
@@ -2838,3 +2837,292 @@ void snp_msg_free(struct snp_msg_desc *mdesc)
2838
2837
kfree (mdesc );
2839
2838
}
2840
2839
EXPORT_SYMBOL_GPL (snp_msg_free );
2840
+
2841
+ /* Mutex to serialize the shared buffer access and command handling. */
2842
+ static DEFINE_MUTEX (snp_cmd_mutex );
2843
+
2844
+ /*
2845
+ * If an error is received from the host or AMD Secure Processor (ASP) there
2846
+ * are two options. Either retry the exact same encrypted request or discontinue
2847
+ * using the VMPCK.
2848
+ *
2849
+ * This is because in the current encryption scheme GHCB v2 uses AES-GCM to
2850
+ * encrypt the requests. The IV for this scheme is the sequence number. GCM
2851
+ * cannot tolerate IV reuse.
2852
+ *
2853
+ * The ASP FW v1.51 only increments the sequence numbers on a successful
2854
+ * guest<->ASP back and forth and only accepts messages at its exact sequence
2855
+ * number.
2856
+ *
2857
+ * So if the sequence number were to be reused the encryption scheme is
2858
+ * vulnerable. If the sequence number were incremented for a fresh IV the ASP
2859
+ * will reject the request.
2860
+ */
2861
+ static void snp_disable_vmpck (struct snp_msg_desc * mdesc )
2862
+ {
2863
+ pr_alert ("Disabling VMPCK%d communication key to prevent IV reuse.\n" ,
2864
+ mdesc -> vmpck_id );
2865
+ memzero_explicit (mdesc -> vmpck , VMPCK_KEY_LEN );
2866
+ mdesc -> vmpck = NULL ;
2867
+ }
2868
+
2869
+ static inline u64 __snp_get_msg_seqno (struct snp_msg_desc * mdesc )
2870
+ {
2871
+ u64 count ;
2872
+
2873
+ lockdep_assert_held (& snp_cmd_mutex );
2874
+
2875
+ /* Read the current message sequence counter from secrets pages */
2876
+ count = * mdesc -> os_area_msg_seqno ;
2877
+
2878
+ return count + 1 ;
2879
+ }
2880
+
2881
+ /* Return a non-zero on success */
2882
+ static u64 snp_get_msg_seqno (struct snp_msg_desc * mdesc )
2883
+ {
2884
+ u64 count = __snp_get_msg_seqno (mdesc );
2885
+
2886
+ /*
2887
+ * The message sequence counter for the SNP guest request is a 64-bit
2888
+ * value but the version 2 of GHCB specification defines a 32-bit storage
2889
+ * for it. If the counter exceeds the 32-bit value then return zero.
2890
+ * The caller should check the return value, but if the caller happens to
2891
+ * not check the value and use it, then the firmware treats zero as an
2892
+ * invalid number and will fail the message request.
2893
+ */
2894
+ if (count >= UINT_MAX ) {
2895
+ pr_err ("request message sequence counter overflow\n" );
2896
+ return 0 ;
2897
+ }
2898
+
2899
+ return count ;
2900
+ }
2901
+
2902
+ static void snp_inc_msg_seqno (struct snp_msg_desc * mdesc )
2903
+ {
2904
+ /*
2905
+ * The counter is also incremented by the PSP, so increment it by 2
2906
+ * and save in secrets page.
2907
+ */
2908
+ * mdesc -> os_area_msg_seqno += 2 ;
2909
+ }
2910
+
2911
+ static int verify_and_dec_payload (struct snp_msg_desc * mdesc , struct snp_guest_req * req )
2912
+ {
2913
+ struct snp_guest_msg * resp_msg = & mdesc -> secret_response ;
2914
+ struct snp_guest_msg * req_msg = & mdesc -> secret_request ;
2915
+ struct snp_guest_msg_hdr * req_msg_hdr = & req_msg -> hdr ;
2916
+ struct snp_guest_msg_hdr * resp_msg_hdr = & resp_msg -> hdr ;
2917
+ struct aesgcm_ctx * ctx = mdesc -> ctx ;
2918
+ u8 iv [GCM_AES_IV_SIZE ] = {};
2919
+
2920
+ pr_debug ("response [seqno %lld type %d version %d sz %d]\n" ,
2921
+ resp_msg_hdr -> msg_seqno , resp_msg_hdr -> msg_type , resp_msg_hdr -> msg_version ,
2922
+ resp_msg_hdr -> msg_sz );
2923
+
2924
+ /* Copy response from shared memory to encrypted memory. */
2925
+ memcpy (resp_msg , mdesc -> response , sizeof (* resp_msg ));
2926
+
2927
+ /* Verify that the sequence counter is incremented by 1 */
2928
+ if (unlikely (resp_msg_hdr -> msg_seqno != (req_msg_hdr -> msg_seqno + 1 )))
2929
+ return - EBADMSG ;
2930
+
2931
+ /* Verify response message type and version number. */
2932
+ if (resp_msg_hdr -> msg_type != (req_msg_hdr -> msg_type + 1 ) ||
2933
+ resp_msg_hdr -> msg_version != req_msg_hdr -> msg_version )
2934
+ return - EBADMSG ;
2935
+
2936
+ /*
2937
+ * If the message size is greater than our buffer length then return
2938
+ * an error.
2939
+ */
2940
+ if (unlikely ((resp_msg_hdr -> msg_sz + ctx -> authsize ) > req -> resp_sz ))
2941
+ return - EBADMSG ;
2942
+
2943
+ /* Decrypt the payload */
2944
+ memcpy (iv , & resp_msg_hdr -> msg_seqno , min (sizeof (iv ), sizeof (resp_msg_hdr -> msg_seqno )));
2945
+ if (!aesgcm_decrypt (ctx , req -> resp_buf , resp_msg -> payload , resp_msg_hdr -> msg_sz ,
2946
+ & resp_msg_hdr -> algo , AAD_LEN , iv , resp_msg_hdr -> authtag ))
2947
+ return - EBADMSG ;
2948
+
2949
+ return 0 ;
2950
+ }
2951
+
2952
+ static int enc_payload (struct snp_msg_desc * mdesc , u64 seqno , struct snp_guest_req * req )
2953
+ {
2954
+ struct snp_guest_msg * msg = & mdesc -> secret_request ;
2955
+ struct snp_guest_msg_hdr * hdr = & msg -> hdr ;
2956
+ struct aesgcm_ctx * ctx = mdesc -> ctx ;
2957
+ u8 iv [GCM_AES_IV_SIZE ] = {};
2958
+
2959
+ memset (msg , 0 , sizeof (* msg ));
2960
+
2961
+ hdr -> algo = SNP_AEAD_AES_256_GCM ;
2962
+ hdr -> hdr_version = MSG_HDR_VER ;
2963
+ hdr -> hdr_sz = sizeof (* hdr );
2964
+ hdr -> msg_type = req -> msg_type ;
2965
+ hdr -> msg_version = req -> msg_version ;
2966
+ hdr -> msg_seqno = seqno ;
2967
+ hdr -> msg_vmpck = req -> vmpck_id ;
2968
+ hdr -> msg_sz = req -> req_sz ;
2969
+
2970
+ /* Verify the sequence number is non-zero */
2971
+ if (!hdr -> msg_seqno )
2972
+ return - ENOSR ;
2973
+
2974
+ pr_debug ("request [seqno %lld type %d version %d sz %d]\n" ,
2975
+ hdr -> msg_seqno , hdr -> msg_type , hdr -> msg_version , hdr -> msg_sz );
2976
+
2977
+ if (WARN_ON ((req -> req_sz + ctx -> authsize ) > sizeof (msg -> payload )))
2978
+ return - EBADMSG ;
2979
+
2980
+ memcpy (iv , & hdr -> msg_seqno , min (sizeof (iv ), sizeof (hdr -> msg_seqno )));
2981
+ aesgcm_encrypt (ctx , msg -> payload , req -> req_buf , req -> req_sz , & hdr -> algo ,
2982
+ AAD_LEN , iv , hdr -> authtag );
2983
+
2984
+ return 0 ;
2985
+ }
2986
+
2987
+ static int __handle_guest_request (struct snp_msg_desc * mdesc , struct snp_guest_req * req ,
2988
+ struct snp_guest_request_ioctl * rio )
2989
+ {
2990
+ unsigned long req_start = jiffies ;
2991
+ unsigned int override_npages = 0 ;
2992
+ u64 override_err = 0 ;
2993
+ int rc ;
2994
+
2995
+ retry_request :
2996
+ /*
2997
+ * Call firmware to process the request. In this function the encrypted
2998
+ * message enters shared memory with the host. So after this call the
2999
+ * sequence number must be incremented or the VMPCK must be deleted to
3000
+ * prevent reuse of the IV.
3001
+ */
3002
+ rc = snp_issue_guest_request (req , & mdesc -> input , rio );
3003
+ switch (rc ) {
3004
+ case - ENOSPC :
3005
+ /*
3006
+ * If the extended guest request fails due to having too
3007
+ * small of a certificate data buffer, retry the same
3008
+ * guest request without the extended data request in
3009
+ * order to increment the sequence number and thus avoid
3010
+ * IV reuse.
3011
+ */
3012
+ override_npages = mdesc -> input .data_npages ;
3013
+ req -> exit_code = SVM_VMGEXIT_GUEST_REQUEST ;
3014
+
3015
+ /*
3016
+ * Override the error to inform callers the given extended
3017
+ * request buffer size was too small and give the caller the
3018
+ * required buffer size.
3019
+ */
3020
+ override_err = SNP_GUEST_VMM_ERR (SNP_GUEST_VMM_ERR_INVALID_LEN );
3021
+
3022
+ /*
3023
+ * If this call to the firmware succeeds, the sequence number can
3024
+ * be incremented allowing for continued use of the VMPCK. If
3025
+ * there is an error reflected in the return value, this value
3026
+ * is checked further down and the result will be the deletion
3027
+ * of the VMPCK and the error code being propagated back to the
3028
+ * user as an ioctl() return code.
3029
+ */
3030
+ goto retry_request ;
3031
+
3032
+ /*
3033
+ * The host may return SNP_GUEST_VMM_ERR_BUSY if the request has been
3034
+ * throttled. Retry in the driver to avoid returning and reusing the
3035
+ * message sequence number on a different message.
3036
+ */
3037
+ case - EAGAIN :
3038
+ if (jiffies - req_start > SNP_REQ_MAX_RETRY_DURATION ) {
3039
+ rc = - ETIMEDOUT ;
3040
+ break ;
3041
+ }
3042
+ schedule_timeout_killable (SNP_REQ_RETRY_DELAY );
3043
+ goto retry_request ;
3044
+ }
3045
+
3046
+ /*
3047
+ * Increment the message sequence number. There is no harm in doing
3048
+ * this now because decryption uses the value stored in the response
3049
+ * structure and any failure will wipe the VMPCK, preventing further
3050
+ * use anyway.
3051
+ */
3052
+ snp_inc_msg_seqno (mdesc );
3053
+
3054
+ if (override_err ) {
3055
+ rio -> exitinfo2 = override_err ;
3056
+
3057
+ /*
3058
+ * If an extended guest request was issued and the supplied certificate
3059
+ * buffer was not large enough, a standard guest request was issued to
3060
+ * prevent IV reuse. If the standard request was successful, return -EIO
3061
+ * back to the caller as would have originally been returned.
3062
+ */
3063
+ if (!rc && override_err == SNP_GUEST_VMM_ERR (SNP_GUEST_VMM_ERR_INVALID_LEN ))
3064
+ rc = - EIO ;
3065
+ }
3066
+
3067
+ if (override_npages )
3068
+ mdesc -> input .data_npages = override_npages ;
3069
+
3070
+ return rc ;
3071
+ }
3072
+
3073
+ int snp_send_guest_request (struct snp_msg_desc * mdesc , struct snp_guest_req * req ,
3074
+ struct snp_guest_request_ioctl * rio )
3075
+ {
3076
+ u64 seqno ;
3077
+ int rc ;
3078
+
3079
+ guard (mutex )(& snp_cmd_mutex );
3080
+
3081
+ /* Check if the VMPCK is not empty */
3082
+ if (!mdesc -> vmpck || !memchr_inv (mdesc -> vmpck , 0 , VMPCK_KEY_LEN )) {
3083
+ pr_err_ratelimited ("VMPCK is disabled\n" );
3084
+ return - ENOTTY ;
3085
+ }
3086
+
3087
+ /* Get message sequence and verify that its a non-zero */
3088
+ seqno = snp_get_msg_seqno (mdesc );
3089
+ if (!seqno )
3090
+ return - EIO ;
3091
+
3092
+ /* Clear shared memory's response for the host to populate. */
3093
+ memset (mdesc -> response , 0 , sizeof (struct snp_guest_msg ));
3094
+
3095
+ /* Encrypt the userspace provided payload in mdesc->secret_request. */
3096
+ rc = enc_payload (mdesc , seqno , req );
3097
+ if (rc )
3098
+ return rc ;
3099
+
3100
+ /*
3101
+ * Write the fully encrypted request to the shared unencrypted
3102
+ * request page.
3103
+ */
3104
+ memcpy (mdesc -> request , & mdesc -> secret_request , sizeof (mdesc -> secret_request ));
3105
+
3106
+ rc = __handle_guest_request (mdesc , req , rio );
3107
+ if (rc ) {
3108
+ if (rc == - EIO &&
3109
+ rio -> exitinfo2 == SNP_GUEST_VMM_ERR (SNP_GUEST_VMM_ERR_INVALID_LEN ))
3110
+ return rc ;
3111
+
3112
+ pr_alert ("Detected error from ASP request. rc: %d, exitinfo2: 0x%llx\n" ,
3113
+ rc , rio -> exitinfo2 );
3114
+
3115
+ snp_disable_vmpck (mdesc );
3116
+ return rc ;
3117
+ }
3118
+
3119
+ rc = verify_and_dec_payload (mdesc , req );
3120
+ if (rc ) {
3121
+ pr_alert ("Detected unexpected decode failure from ASP. rc: %d\n" , rc );
3122
+ snp_disable_vmpck (mdesc );
3123
+ return rc ;
3124
+ }
3125
+
3126
+ return 0 ;
3127
+ }
3128
+ EXPORT_SYMBOL_GPL (snp_send_guest_request );
0 commit comments