|
33 | 33 | ICP_QAT_HW_CIPHER_KEY_CONVERT, \
|
34 | 34 | ICP_QAT_HW_CIPHER_DECRYPT)
|
35 | 35 |
|
| 36 | +#define QAT_AES_HW_CONFIG_DEC_NO_CONV(alg, mode) \ |
| 37 | + ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \ |
| 38 | + ICP_QAT_HW_CIPHER_NO_CONVERT, \ |
| 39 | + ICP_QAT_HW_CIPHER_DECRYPT) |
| 40 | + |
36 | 41 | #define HW_CAP_AES_V2(accel_dev) \
|
37 | 42 | (GET_HW_DATA(accel_dev)->accel_capabilities_mask & \
|
38 | 43 | ICP_ACCEL_CAPABILITIES_AES_V2)
|
@@ -95,6 +100,7 @@ struct qat_alg_skcipher_ctx {
|
95 | 100 | struct icp_qat_fw_la_bulk_req dec_fw_req;
|
96 | 101 | struct qat_crypto_instance *inst;
|
97 | 102 | struct crypto_skcipher *ftfm;
|
| 103 | + struct crypto_cipher *tweak; |
98 | 104 | bool fallback;
|
99 | 105 | int mode;
|
100 | 106 | };
|
@@ -428,7 +434,16 @@ static void qat_alg_skcipher_init_com(struct qat_alg_skcipher_ctx *ctx,
|
428 | 434 | cd_pars->u.s.content_desc_params_sz =
|
429 | 435 | sizeof(struct icp_qat_hw_cipher_algo_blk) >> 3;
|
430 | 436 |
|
431 |
| - if (aes_v2_capable && mode == ICP_QAT_HW_CIPHER_CTR_MODE) { |
| 437 | + if (aes_v2_capable && mode == ICP_QAT_HW_CIPHER_XTS_MODE) { |
| 438 | + ICP_QAT_FW_LA_SLICE_TYPE_SET(header->serv_specif_flags, |
| 439 | + ICP_QAT_FW_LA_USE_UCS_SLICE_TYPE); |
| 440 | + |
| 441 | + /* Store both XTS keys in CD, only the first key is sent |
| 442 | + * to the HW, the second key is used for tweak calculation |
| 443 | + */ |
| 444 | + memcpy(cd->ucs_aes.key, key, keylen); |
| 445 | + keylen = keylen / 2; |
| 446 | + } else if (aes_v2_capable && mode == ICP_QAT_HW_CIPHER_CTR_MODE) { |
432 | 447 | ICP_QAT_FW_LA_SLICE_TYPE_SET(header->serv_specif_flags,
|
433 | 448 | ICP_QAT_FW_LA_USE_UCS_SLICE_TYPE);
|
434 | 449 | keylen = round_up(keylen, 16);
|
@@ -458,23 +473,55 @@ static void qat_alg_skcipher_init_enc(struct qat_alg_skcipher_ctx *ctx,
|
458 | 473 | enc_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg, mode);
|
459 | 474 | }
|
460 | 475 |
|
| 476 | +static void qat_alg_xts_reverse_key(const u8 *key_forward, unsigned int keylen, |
| 477 | + u8 *key_reverse) |
| 478 | +{ |
| 479 | + struct crypto_aes_ctx aes_expanded; |
| 480 | + int nrounds; |
| 481 | + u8 *key; |
| 482 | + |
| 483 | + aes_expandkey(&aes_expanded, key_forward, keylen); |
| 484 | + if (keylen == AES_KEYSIZE_128) { |
| 485 | + nrounds = 10; |
| 486 | + key = (u8 *)aes_expanded.key_enc + (AES_BLOCK_SIZE * nrounds); |
| 487 | + memcpy(key_reverse, key, AES_BLOCK_SIZE); |
| 488 | + } else { |
| 489 | + /* AES_KEYSIZE_256 */ |
| 490 | + nrounds = 14; |
| 491 | + key = (u8 *)aes_expanded.key_enc + (AES_BLOCK_SIZE * nrounds); |
| 492 | + memcpy(key_reverse, key, AES_BLOCK_SIZE); |
| 493 | + memcpy(key_reverse + AES_BLOCK_SIZE, key - AES_BLOCK_SIZE, |
| 494 | + AES_BLOCK_SIZE); |
| 495 | + } |
| 496 | +} |
| 497 | + |
461 | 498 | static void qat_alg_skcipher_init_dec(struct qat_alg_skcipher_ctx *ctx,
|
462 | 499 | int alg, const u8 *key,
|
463 | 500 | unsigned int keylen, int mode)
|
464 | 501 | {
|
465 | 502 | struct icp_qat_hw_cipher_algo_blk *dec_cd = ctx->dec_cd;
|
466 | 503 | struct icp_qat_fw_la_bulk_req *req = &ctx->dec_fw_req;
|
467 | 504 | struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
|
| 505 | + bool aes_v2_capable = HW_CAP_AES_V2(ctx->inst->accel_dev); |
468 | 506 |
|
469 | 507 | qat_alg_skcipher_init_com(ctx, req, dec_cd, key, keylen);
|
470 | 508 | cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
|
471 | 509 |
|
472 |
| - if (mode != ICP_QAT_HW_CIPHER_CTR_MODE) |
| 510 | + if (aes_v2_capable && mode == ICP_QAT_HW_CIPHER_XTS_MODE) { |
| 511 | + /* Key reversing not supported, set no convert */ |
| 512 | + dec_cd->aes.cipher_config.val = |
| 513 | + QAT_AES_HW_CONFIG_DEC_NO_CONV(alg, mode); |
| 514 | + |
| 515 | + /* In-place key reversal */ |
| 516 | + qat_alg_xts_reverse_key(dec_cd->ucs_aes.key, keylen / 2, |
| 517 | + dec_cd->ucs_aes.key); |
| 518 | + } else if (mode != ICP_QAT_HW_CIPHER_CTR_MODE) { |
473 | 519 | dec_cd->aes.cipher_config.val =
|
474 | 520 | QAT_AES_HW_CONFIG_DEC(alg, mode);
|
475 |
| - else |
| 521 | + } else { |
476 | 522 | dec_cd->aes.cipher_config.val =
|
477 | 523 | QAT_AES_HW_CONFIG_ENC(alg, mode);
|
| 524 | + } |
478 | 525 | }
|
479 | 526 |
|
480 | 527 | static int qat_alg_validate_key(int key_len, int *alg, int mode)
|
@@ -1081,8 +1128,33 @@ static int qat_alg_skcipher_xts_setkey(struct crypto_skcipher *tfm,
|
1081 | 1128 |
|
1082 | 1129 | ctx->fallback = false;
|
1083 | 1130 |
|
1084 |
| - return qat_alg_skcipher_setkey(tfm, key, keylen, |
1085 |
| - ICP_QAT_HW_CIPHER_XTS_MODE); |
| 1131 | + ret = qat_alg_skcipher_setkey(tfm, key, keylen, |
| 1132 | + ICP_QAT_HW_CIPHER_XTS_MODE); |
| 1133 | + if (ret) |
| 1134 | + return ret; |
| 1135 | + |
| 1136 | + if (HW_CAP_AES_V2(ctx->inst->accel_dev)) |
| 1137 | + ret = crypto_cipher_setkey(ctx->tweak, key + (keylen / 2), |
| 1138 | + keylen / 2); |
| 1139 | + |
| 1140 | + return ret; |
| 1141 | +} |
| 1142 | + |
| 1143 | +static void qat_alg_set_req_iv(struct qat_crypto_request *qat_req) |
| 1144 | +{ |
| 1145 | + struct icp_qat_fw_la_cipher_req_params *cipher_param; |
| 1146 | + struct qat_alg_skcipher_ctx *ctx = qat_req->skcipher_ctx; |
| 1147 | + bool aes_v2_capable = HW_CAP_AES_V2(ctx->inst->accel_dev); |
| 1148 | + u8 *iv = qat_req->skcipher_req->iv; |
| 1149 | + |
| 1150 | + cipher_param = (void *)&qat_req->req.serv_specif_rqpars; |
| 1151 | + |
| 1152 | + if (aes_v2_capable && ctx->mode == ICP_QAT_HW_CIPHER_XTS_MODE) |
| 1153 | + crypto_cipher_encrypt_one(ctx->tweak, |
| 1154 | + (u8 *)cipher_param->u.cipher_IV_array, |
| 1155 | + iv); |
| 1156 | + else |
| 1157 | + memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE); |
1086 | 1158 | }
|
1087 | 1159 |
|
1088 | 1160 | static int qat_alg_skcipher_encrypt(struct skcipher_request *req)
|
@@ -1114,7 +1186,8 @@ static int qat_alg_skcipher_encrypt(struct skcipher_request *req)
|
1114 | 1186 | cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
|
1115 | 1187 | cipher_param->cipher_length = req->cryptlen;
|
1116 | 1188 | cipher_param->cipher_offset = 0;
|
1117 |
| - memcpy(cipher_param->u.cipher_IV_array, req->iv, AES_BLOCK_SIZE); |
| 1189 | + |
| 1190 | + qat_alg_set_req_iv(qat_req); |
1118 | 1191 |
|
1119 | 1192 | do {
|
1120 | 1193 | ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg);
|
@@ -1182,8 +1255,8 @@ static int qat_alg_skcipher_decrypt(struct skcipher_request *req)
|
1182 | 1255 | cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
|
1183 | 1256 | cipher_param->cipher_length = req->cryptlen;
|
1184 | 1257 | cipher_param->cipher_offset = 0;
|
1185 |
| - memcpy(cipher_param->u.cipher_IV_array, req->iv, AES_BLOCK_SIZE); |
1186 | 1258 |
|
| 1259 | + qat_alg_set_req_iv(qat_req); |
1187 | 1260 | qat_alg_update_iv(qat_req);
|
1188 | 1261 |
|
1189 | 1262 | do {
|
@@ -1293,6 +1366,12 @@ static int qat_alg_skcipher_init_xts_tfm(struct crypto_skcipher *tfm)
|
1293 | 1366 | if (IS_ERR(ctx->ftfm))
|
1294 | 1367 | return PTR_ERR(ctx->ftfm);
|
1295 | 1368 |
|
| 1369 | + ctx->tweak = crypto_alloc_cipher("aes", 0, 0); |
| 1370 | + if (IS_ERR(ctx->tweak)) { |
| 1371 | + crypto_free_skcipher(ctx->ftfm); |
| 1372 | + return PTR_ERR(ctx->tweak); |
| 1373 | + } |
| 1374 | + |
1296 | 1375 | reqsize = max(sizeof(struct qat_crypto_request),
|
1297 | 1376 | sizeof(struct skcipher_request) +
|
1298 | 1377 | crypto_skcipher_reqsize(ctx->ftfm));
|
@@ -1335,6 +1414,9 @@ static void qat_alg_skcipher_exit_xts_tfm(struct crypto_skcipher *tfm)
|
1335 | 1414 | if (ctx->ftfm)
|
1336 | 1415 | crypto_free_skcipher(ctx->ftfm);
|
1337 | 1416 |
|
| 1417 | + if (ctx->tweak) |
| 1418 | + crypto_free_cipher(ctx->tweak); |
| 1419 | + |
1338 | 1420 | qat_alg_skcipher_exit_tfm(tfm);
|
1339 | 1421 | }
|
1340 | 1422 |
|
|
0 commit comments