Skip to content

Commit 0eae14a

Browse files
Gilad Ben-Yossefherbertx
authored andcommitted
crypto: ccree - refactor AEAD IV in AAD handling
Our handling of ciphers with IV trailing the AAD was correct but overly complicated. Refactor to simplify and possibly save one DMA burst. This has the added bonus of behaving the same as the generic rfc4543 implementation for none compliants inputs where the IV in the iv field was not the same as the IV in the AAD. There should be no change in behaviour with correct inputs. Signed-off-by: Gilad Ben-Yossef <[email protected]> Signed-off-by: Herbert Xu <[email protected]>
1 parent b66c187 commit 0eae14a

File tree

3 files changed

+16
-103
lines changed

3 files changed

+16
-103
lines changed

drivers/crypto/ccree/cc_aead.c

Lines changed: 5 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -1609,7 +1609,6 @@ static void cc_proc_rfc4309_ccm(struct aead_request *req)
16091609
memcpy(areq_ctx->ctr_iv + CCM_BLOCK_IV_OFFSET, req->iv,
16101610
CCM_BLOCK_IV_SIZE);
16111611
req->iv = areq_ctx->ctr_iv;
1612-
areq_ctx->assoclen -= CCM_BLOCK_IV_SIZE;
16131612
}
16141613

16151614
static void cc_set_ghash_desc(struct aead_request *req,
@@ -1868,8 +1867,7 @@ static int config_gcm_context(struct aead_request *req)
18681867
*/
18691868
__be64 temp64;
18701869

1871-
temp64 = cpu_to_be64((req_ctx->assoclen +
1872-
GCM_BLOCK_RFC4_IV_SIZE + cryptlen) * 8);
1870+
temp64 = cpu_to_be64((req_ctx->assoclen + cryptlen) * 8);
18731871
memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64));
18741872
temp64 = 0;
18751873
memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8);
@@ -1889,7 +1887,6 @@ static void cc_proc_rfc4_gcm(struct aead_request *req)
18891887
memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_IV_OFFSET, req->iv,
18901888
GCM_BLOCK_RFC4_IV_SIZE);
18911889
req->iv = areq_ctx->ctr_iv;
1892-
areq_ctx->assoclen -= GCM_BLOCK_RFC4_IV_SIZE;
18931890
}
18941891

18951892
static int cc_proc_aead(struct aead_request *req,
@@ -2031,9 +2028,6 @@ static int cc_aead_encrypt(struct aead_request *req)
20312028
/* No generated IV required */
20322029
areq_ctx->backup_iv = req->iv;
20332030
areq_ctx->assoclen = req->assoclen;
2034-
areq_ctx->is_gcm4543 = false;
2035-
2036-
areq_ctx->plaintext_authenticate_only = false;
20372031

20382032
rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
20392033
if (rc != -EINPROGRESS && rc != -EBUSY)
@@ -2057,8 +2051,7 @@ static int cc_rfc4309_ccm_encrypt(struct aead_request *req)
20572051

20582052
/* No generated IV required */
20592053
areq_ctx->backup_iv = req->iv;
2060-
areq_ctx->assoclen = req->assoclen;
2061-
areq_ctx->is_gcm4543 = true;
2054+
areq_ctx->assoclen = req->assoclen - CCM_BLOCK_IV_SIZE;
20622055

20632056
cc_proc_rfc4309_ccm(req);
20642057

@@ -2079,9 +2072,6 @@ static int cc_aead_decrypt(struct aead_request *req)
20792072
/* No generated IV required */
20802073
areq_ctx->backup_iv = req->iv;
20812074
areq_ctx->assoclen = req->assoclen;
2082-
areq_ctx->is_gcm4543 = false;
2083-
2084-
areq_ctx->plaintext_authenticate_only = false;
20852075

20862076
rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
20872077
if (rc != -EINPROGRESS && rc != -EBUSY)
@@ -2103,9 +2093,8 @@ static int cc_rfc4309_ccm_decrypt(struct aead_request *req)
21032093

21042094
/* No generated IV required */
21052095
areq_ctx->backup_iv = req->iv;
2106-
areq_ctx->assoclen = req->assoclen;
2096+
areq_ctx->assoclen = req->assoclen - CCM_BLOCK_IV_SIZE;
21072097

2108-
areq_ctx->is_gcm4543 = true;
21092098
cc_proc_rfc4309_ccm(req);
21102099

21112100
rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
@@ -2216,11 +2205,9 @@ static int cc_rfc4106_gcm_encrypt(struct aead_request *req)
22162205

22172206
/* No generated IV required */
22182207
areq_ctx->backup_iv = req->iv;
2219-
areq_ctx->assoclen = req->assoclen;
2220-
areq_ctx->plaintext_authenticate_only = false;
2208+
areq_ctx->assoclen = req->assoclen - GCM_BLOCK_RFC4_IV_SIZE;
22212209

22222210
cc_proc_rfc4_gcm(req);
2223-
areq_ctx->is_gcm4543 = true;
22242211

22252212
rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
22262213
if (rc != -EINPROGRESS && rc != -EBUSY)
@@ -2248,7 +2235,6 @@ static int cc_rfc4543_gcm_encrypt(struct aead_request *req)
22482235
areq_ctx->assoclen = req->assoclen;
22492236

22502237
cc_proc_rfc4_gcm(req);
2251-
areq_ctx->is_gcm4543 = true;
22522238

22532239
rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
22542240
if (rc != -EINPROGRESS && rc != -EBUSY)
@@ -2270,11 +2256,9 @@ static int cc_rfc4106_gcm_decrypt(struct aead_request *req)
22702256

22712257
/* No generated IV required */
22722258
areq_ctx->backup_iv = req->iv;
2273-
areq_ctx->assoclen = req->assoclen;
2274-
areq_ctx->plaintext_authenticate_only = false;
2259+
areq_ctx->assoclen = req->assoclen - GCM_BLOCK_RFC4_IV_SIZE;
22752260

22762261
cc_proc_rfc4_gcm(req);
2277-
areq_ctx->is_gcm4543 = true;
22782262

22792263
rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
22802264
if (rc != -EINPROGRESS && rc != -EBUSY)
@@ -2302,7 +2286,6 @@ static int cc_rfc4543_gcm_decrypt(struct aead_request *req)
23022286
areq_ctx->assoclen = req->assoclen;
23032287

23042288
cc_proc_rfc4_gcm(req);
2305-
areq_ctx->is_gcm4543 = true;
23062289

23072290
rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
23082291
if (rc != -EINPROGRESS && rc != -EBUSY)

drivers/crypto/ccree/cc_aead.h

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -66,7 +66,7 @@ struct aead_req_ctx {
6666
/* used to prevent cache coherence problem */
6767
u8 backup_mac[MAX_MAC_SIZE];
6868
u8 *backup_iv; /* store orig iv */
69-
u32 assoclen; /* internal assoclen */
69+
u32 assoclen; /* size of AAD buffer to authenticate */
7070
dma_addr_t mac_buf_dma_addr; /* internal ICV DMA buffer */
7171
/* buffer for internal ccm configurations */
7272
dma_addr_t ccm_iv0_dma_addr;
@@ -79,7 +79,6 @@ struct aead_req_ctx {
7979
dma_addr_t gcm_iv_inc2_dma_addr;
8080
dma_addr_t hkey_dma_addr; /* Phys. address of hkey */
8181
dma_addr_t gcm_block_len_dma_addr; /* Phys. address of gcm block len */
82-
bool is_gcm4543;
8382

8483
u8 *icv_virt_addr; /* Virt. address of ICV */
8584
struct async_gen_req_ctx gen_ctx;

drivers/crypto/ccree/cc_buffer_mgr.c

Lines changed: 10 additions & 79 deletions
Original file line numberDiff line numberDiff line change
@@ -13,12 +13,6 @@
1313
#include "cc_hash.h"
1414
#include "cc_aead.h"
1515

16-
enum dma_buffer_type {
17-
DMA_NULL_TYPE = -1,
18-
DMA_SGL_TYPE = 1,
19-
DMA_BUFF_TYPE = 2,
20-
};
21-
2216
union buffer_array_entry {
2317
struct scatterlist *sgl;
2418
dma_addr_t buffer_dma;
@@ -30,7 +24,6 @@ struct buffer_array {
3024
unsigned int offset[MAX_NUM_OF_BUFFERS_IN_MLLI];
3125
int nents[MAX_NUM_OF_BUFFERS_IN_MLLI];
3226
int total_data_len[MAX_NUM_OF_BUFFERS_IN_MLLI];
33-
enum dma_buffer_type type[MAX_NUM_OF_BUFFERS_IN_MLLI];
3427
bool is_last[MAX_NUM_OF_BUFFERS_IN_MLLI];
3528
u32 *mlli_nents[MAX_NUM_OF_BUFFERS_IN_MLLI];
3629
};
@@ -60,11 +53,7 @@ static void cc_copy_mac(struct device *dev, struct aead_request *req,
6053
enum cc_sg_cpy_direct dir)
6154
{
6255
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
63-
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
64-
u32 skip = areq_ctx->assoclen + req->cryptlen;
65-
66-
if (areq_ctx->is_gcm4543)
67-
skip += crypto_aead_ivsize(tfm);
56+
u32 skip = req->assoclen + req->cryptlen;
6857

6958
cc_copy_sg_portion(dev, areq_ctx->backup_mac, req->src,
7059
(skip - areq_ctx->req_authsize), skip, dir);
@@ -216,14 +205,8 @@ static int cc_generate_mlli(struct device *dev, struct buffer_array *sg_data,
216205
u32 tot_len = sg_data->total_data_len[i];
217206
u32 offset = sg_data->offset[i];
218207

219-
if (sg_data->type[i] == DMA_SGL_TYPE)
220-
rc = cc_render_sg_to_mlli(dev, entry->sgl, tot_len,
221-
offset, &total_nents,
222-
&mlli_p);
223-
else /*DMA_BUFF_TYPE*/
224-
rc = cc_render_buff_to_mlli(dev, entry->buffer_dma,
225-
tot_len, &total_nents,
226-
&mlli_p);
208+
rc = cc_render_sg_to_mlli(dev, entry->sgl, tot_len, offset,
209+
&total_nents, &mlli_p);
227210
if (rc)
228211
return rc;
229212

@@ -249,27 +232,6 @@ static int cc_generate_mlli(struct device *dev, struct buffer_array *sg_data,
249232
return rc;
250233
}
251234

252-
static void cc_add_buffer_entry(struct device *dev,
253-
struct buffer_array *sgl_data,
254-
dma_addr_t buffer_dma, unsigned int buffer_len,
255-
bool is_last_entry, u32 *mlli_nents)
256-
{
257-
unsigned int index = sgl_data->num_of_buffers;
258-
259-
dev_dbg(dev, "index=%u single_buff=%pad buffer_len=0x%08X is_last=%d\n",
260-
index, &buffer_dma, buffer_len, is_last_entry);
261-
sgl_data->nents[index] = 1;
262-
sgl_data->entry[index].buffer_dma = buffer_dma;
263-
sgl_data->offset[index] = 0;
264-
sgl_data->total_data_len[index] = buffer_len;
265-
sgl_data->type[index] = DMA_BUFF_TYPE;
266-
sgl_data->is_last[index] = is_last_entry;
267-
sgl_data->mlli_nents[index] = mlli_nents;
268-
if (sgl_data->mlli_nents[index])
269-
*sgl_data->mlli_nents[index] = 0;
270-
sgl_data->num_of_buffers++;
271-
}
272-
273235
static void cc_add_sg_entry(struct device *dev, struct buffer_array *sgl_data,
274236
unsigned int nents, struct scatterlist *sgl,
275237
unsigned int data_len, unsigned int data_offset,
@@ -283,7 +245,6 @@ static void cc_add_sg_entry(struct device *dev, struct buffer_array *sgl_data,
283245
sgl_data->entry[index].sgl = sgl;
284246
sgl_data->offset[index] = data_offset;
285247
sgl_data->total_data_len[index] = data_len;
286-
sgl_data->type[index] = DMA_SGL_TYPE;
287248
sgl_data->is_last[index] = is_last_table;
288249
sgl_data->mlli_nents[index] = mlli_nents;
289250
if (sgl_data->mlli_nents[index])
@@ -606,17 +567,6 @@ static int cc_aead_chain_iv(struct cc_drvdata *drvdata,
606567

607568
dev_dbg(dev, "Mapped iv %u B at va=%pK to dma=%pad\n",
608569
hw_iv_size, req->iv, &areq_ctx->gen_ctx.iv_dma_addr);
609-
if (do_chain && areq_ctx->plaintext_authenticate_only) {
610-
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
611-
unsigned int iv_size_to_authenc = crypto_aead_ivsize(tfm);
612-
unsigned int iv_ofs = GCM_BLOCK_RFC4_IV_OFFSET;
613-
/* Chain to given list */
614-
cc_add_buffer_entry(dev, sg_data,
615-
(areq_ctx->gen_ctx.iv_dma_addr + iv_ofs),
616-
iv_size_to_authenc, is_last,
617-
&areq_ctx->assoc.mlli_nents);
618-
areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI;
619-
}
620570

621571
chain_iv_exit:
622572
return rc;
@@ -630,13 +580,8 @@ static int cc_aead_chain_assoc(struct cc_drvdata *drvdata,
630580
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
631581
int rc = 0;
632582
int mapped_nents = 0;
633-
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
634-
unsigned int size_of_assoc = areq_ctx->assoclen;
635583
struct device *dev = drvdata_to_dev(drvdata);
636584

637-
if (areq_ctx->is_gcm4543)
638-
size_of_assoc += crypto_aead_ivsize(tfm);
639-
640585
if (!sg_data) {
641586
rc = -EINVAL;
642587
goto chain_assoc_exit;
@@ -652,7 +597,7 @@ static int cc_aead_chain_assoc(struct cc_drvdata *drvdata,
652597
goto chain_assoc_exit;
653598
}
654599

655-
mapped_nents = sg_nents_for_len(req->src, size_of_assoc);
600+
mapped_nents = sg_nents_for_len(req->src, areq_ctx->assoclen);
656601
if (mapped_nents < 0)
657602
return mapped_nents;
658603

@@ -845,16 +790,11 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
845790
u32 src_mapped_nents = 0, dst_mapped_nents = 0;
846791
u32 offset = 0;
847792
/* non-inplace mode */
848-
unsigned int size_for_map = areq_ctx->assoclen + req->cryptlen;
849-
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
793+
unsigned int size_for_map = req->assoclen + req->cryptlen;
850794
u32 sg_index = 0;
851-
bool is_gcm4543 = areq_ctx->is_gcm4543;
852-
u32 size_to_skip = areq_ctx->assoclen;
795+
u32 size_to_skip = req->assoclen;
853796
struct scatterlist *sgl;
854797

855-
if (is_gcm4543)
856-
size_to_skip += crypto_aead_ivsize(tfm);
857-
858798
offset = size_to_skip;
859799

860800
if (!sg_data)
@@ -863,9 +803,6 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
863803
areq_ctx->src_sgl = req->src;
864804
areq_ctx->dst_sgl = req->dst;
865805

866-
if (is_gcm4543)
867-
size_for_map += crypto_aead_ivsize(tfm);
868-
869806
size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
870807
authsize : 0;
871808
src_mapped_nents = cc_get_sgl_nents(dev, req->src, size_for_map,
@@ -892,16 +829,13 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
892829
areq_ctx->src_offset = offset;
893830

894831
if (req->src != req->dst) {
895-
size_for_map = areq_ctx->assoclen + req->cryptlen;
832+
size_for_map = req->assoclen + req->cryptlen;
896833

897834
if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT)
898835
size_for_map += authsize;
899836
else
900837
size_for_map -= authsize;
901838

902-
if (is_gcm4543)
903-
size_for_map += crypto_aead_ivsize(tfm);
904-
905839
rc = cc_map_sg(dev, req->dst, size_for_map, DMA_BIDIRECTIONAL,
906840
&areq_ctx->dst.mapped_nents,
907841
LLI_MAX_NUM_OF_DATA_ENTRIES, &dst_last_bytes,
@@ -1008,12 +942,10 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
1008942
struct buffer_array sg_data;
1009943
unsigned int authsize = areq_ctx->req_authsize;
1010944
int rc = 0;
1011-
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1012-
bool is_gcm4543 = areq_ctx->is_gcm4543;
1013945
dma_addr_t dma_addr;
1014946
u32 mapped_nents = 0;
1015947
u32 dummy = 0; /*used for the assoc data fragments */
1016-
u32 size_to_map = 0;
948+
u32 size_to_map;
1017949
gfp_t flags = cc_gfp_flags(&req->base);
1018950

1019951
mlli_params->curr_pool = NULL;
@@ -1110,14 +1042,13 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
11101042
areq_ctx->gcm_iv_inc2_dma_addr = dma_addr;
11111043
}
11121044

1113-
size_to_map = req->cryptlen + areq_ctx->assoclen;
1045+
size_to_map = req->cryptlen + req->assoclen;
11141046
/* If we do in-place encryption, we also need the auth tag */
11151047
if ((areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT) &&
11161048
(req->src == req->dst)) {
11171049
size_to_map += authsize;
11181050
}
1119-
if (is_gcm4543)
1120-
size_to_map += crypto_aead_ivsize(tfm);
1051+
11211052
rc = cc_map_sg(dev, req->src, size_to_map, DMA_BIDIRECTIONAL,
11221053
&areq_ctx->src.mapped_nents,
11231054
(LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES +

0 commit comments

Comments
 (0)