Skip to content

Commit a714755

Browse files
arndbherbertx
authored andcommitted
crypto: ccp - reduce stack usage in ccp_run_aes_gcm_cmd
A number of functions in this file have large structures on the stack, ccp_run_aes_gcm_cmd() being the worst, in particular when KASAN is enabled on gcc: drivers/crypto/ccp/ccp-ops.c: In function 'ccp_run_sha_cmd': drivers/crypto/ccp/ccp-ops.c:1833:1: error: the frame size of 1136 bytes is larger than 1024 bytes [-Werror=frame-larger-than=] drivers/crypto/ccp/ccp-ops.c: In function 'ccp_run_aes_gcm_cmd': drivers/crypto/ccp/ccp-ops.c:914:1: error: the frame size of 1632 bytes is larger than 1024 bytes [-Werror=frame-larger-than=] Avoid the issue by using dynamic memory allocation in the worst one of these. Signed-off-by: Arnd Bergmann <[email protected]> Acked-by: Tom Lendacky <[email protected]> Signed-off-by: Herbert Xu <[email protected]>
1 parent 0fab5ee commit a714755

File tree

1 file changed

+86
-77
lines changed

1 file changed

+86
-77
lines changed

drivers/crypto/ccp/ccp-ops.c

Lines changed: 86 additions & 77 deletions
Original file line numberDiff line numberDiff line change
@@ -633,10 +633,16 @@ static noinline_for_stack int
633633
ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
634634
{
635635
struct ccp_aes_engine *aes = &cmd->u.aes;
636-
struct ccp_dm_workarea key, ctx, final_wa, tag;
637-
struct ccp_data src, dst;
638-
struct ccp_data aad;
639-
struct ccp_op op;
636+
struct {
637+
struct ccp_dm_workarea key;
638+
struct ccp_dm_workarea ctx;
639+
struct ccp_dm_workarea final;
640+
struct ccp_dm_workarea tag;
641+
struct ccp_data src;
642+
struct ccp_data dst;
643+
struct ccp_data aad;
644+
struct ccp_op op;
645+
} *wa __cleanup(kfree) = kzalloc(sizeof *wa, GFP_KERNEL);
640646
unsigned int dm_offset;
641647
unsigned int authsize;
642648
unsigned int jobid;
@@ -650,6 +656,9 @@ ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
650656
struct scatterlist *p_outp, sg_outp[2];
651657
struct scatterlist *p_aad;
652658

659+
if (!wa)
660+
return -ENOMEM;
661+
653662
if (!aes->iv)
654663
return -EINVAL;
655664

@@ -696,26 +705,26 @@ ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
696705

697706
jobid = CCP_NEW_JOBID(cmd_q->ccp);
698707

699-
memset(&op, 0, sizeof(op));
700-
op.cmd_q = cmd_q;
701-
op.jobid = jobid;
702-
op.sb_key = cmd_q->sb_key; /* Pre-allocated */
703-
op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */
704-
op.init = 1;
705-
op.u.aes.type = aes->type;
708+
memset(&wa->op, 0, sizeof(wa->op));
709+
wa->op.cmd_q = cmd_q;
710+
wa->op.jobid = jobid;
711+
wa->op.sb_key = cmd_q->sb_key; /* Pre-allocated */
712+
wa->op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */
713+
wa->op.init = 1;
714+
wa->op.u.aes.type = aes->type;
706715

707716
/* Copy the key to the LSB */
708-
ret = ccp_init_dm_workarea(&key, cmd_q,
717+
ret = ccp_init_dm_workarea(&wa->key, cmd_q,
709718
CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES,
710719
DMA_TO_DEVICE);
711720
if (ret)
712721
return ret;
713722

714723
dm_offset = CCP_SB_BYTES - aes->key_len;
715-
ret = ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
724+
ret = ccp_set_dm_area(&wa->key, dm_offset, aes->key, 0, aes->key_len);
716725
if (ret)
717726
goto e_key;
718-
ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
727+
ret = ccp_copy_to_sb(cmd_q, &wa->key, wa->op.jobid, wa->op.sb_key,
719728
CCP_PASSTHRU_BYTESWAP_256BIT);
720729
if (ret) {
721730
cmd->engine_error = cmd_q->cmd_error;
@@ -726,111 +735,111 @@ ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
726735
* There is an assumption here that the IV is 96 bits in length, plus
727736
* a nonce of 32 bits. If no IV is present, use a zeroed buffer.
728737
*/
729-
ret = ccp_init_dm_workarea(&ctx, cmd_q,
738+
ret = ccp_init_dm_workarea(&wa->ctx, cmd_q,
730739
CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES,
731740
DMA_BIDIRECTIONAL);
732741
if (ret)
733742
goto e_key;
734743

735744
dm_offset = CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES - aes->iv_len;
736-
ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
745+
ret = ccp_set_dm_area(&wa->ctx, dm_offset, aes->iv, 0, aes->iv_len);
737746
if (ret)
738747
goto e_ctx;
739748

740-
ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
749+
ret = ccp_copy_to_sb(cmd_q, &wa->ctx, wa->op.jobid, wa->op.sb_ctx,
741750
CCP_PASSTHRU_BYTESWAP_256BIT);
742751
if (ret) {
743752
cmd->engine_error = cmd_q->cmd_error;
744753
goto e_ctx;
745754
}
746755

747-
op.init = 1;
756+
wa->op.init = 1;
748757
if (aes->aad_len > 0) {
749758
/* Step 1: Run a GHASH over the Additional Authenticated Data */
750-
ret = ccp_init_data(&aad, cmd_q, p_aad, aes->aad_len,
759+
ret = ccp_init_data(&wa->aad, cmd_q, p_aad, aes->aad_len,
751760
AES_BLOCK_SIZE,
752761
DMA_TO_DEVICE);
753762
if (ret)
754763
goto e_ctx;
755764

756-
op.u.aes.mode = CCP_AES_MODE_GHASH;
757-
op.u.aes.action = CCP_AES_GHASHAAD;
765+
wa->op.u.aes.mode = CCP_AES_MODE_GHASH;
766+
wa->op.u.aes.action = CCP_AES_GHASHAAD;
758767

759-
while (aad.sg_wa.bytes_left) {
760-
ccp_prepare_data(&aad, NULL, &op, AES_BLOCK_SIZE, true);
768+
while (wa->aad.sg_wa.bytes_left) {
769+
ccp_prepare_data(&wa->aad, NULL, &wa->op, AES_BLOCK_SIZE, true);
761770

762-
ret = cmd_q->ccp->vdata->perform->aes(&op);
771+
ret = cmd_q->ccp->vdata->perform->aes(&wa->op);
763772
if (ret) {
764773
cmd->engine_error = cmd_q->cmd_error;
765774
goto e_aad;
766775
}
767776

768-
ccp_process_data(&aad, NULL, &op);
769-
op.init = 0;
777+
ccp_process_data(&wa->aad, NULL, &wa->op);
778+
wa->op.init = 0;
770779
}
771780
}
772781

773-
op.u.aes.mode = CCP_AES_MODE_GCTR;
774-
op.u.aes.action = aes->action;
782+
wa->op.u.aes.mode = CCP_AES_MODE_GCTR;
783+
wa->op.u.aes.action = aes->action;
775784

776785
if (ilen > 0) {
777786
/* Step 2: Run a GCTR over the plaintext */
778787
in_place = (sg_virt(p_inp) == sg_virt(p_outp)) ? true : false;
779788

780-
ret = ccp_init_data(&src, cmd_q, p_inp, ilen,
789+
ret = ccp_init_data(&wa->src, cmd_q, p_inp, ilen,
781790
AES_BLOCK_SIZE,
782791
in_place ? DMA_BIDIRECTIONAL
783792
: DMA_TO_DEVICE);
784793
if (ret)
785794
goto e_aad;
786795

787796
if (in_place) {
788-
dst = src;
797+
wa->dst = wa->src;
789798
} else {
790-
ret = ccp_init_data(&dst, cmd_q, p_outp, ilen,
799+
ret = ccp_init_data(&wa->dst, cmd_q, p_outp, ilen,
791800
AES_BLOCK_SIZE, DMA_FROM_DEVICE);
792801
if (ret)
793802
goto e_src;
794803
}
795804

796-
op.soc = 0;
797-
op.eom = 0;
798-
op.init = 1;
799-
while (src.sg_wa.bytes_left) {
800-
ccp_prepare_data(&src, &dst, &op, AES_BLOCK_SIZE, true);
801-
if (!src.sg_wa.bytes_left) {
805+
wa->op.soc = 0;
806+
wa->op.eom = 0;
807+
wa->op.init = 1;
808+
while (wa->src.sg_wa.bytes_left) {
809+
ccp_prepare_data(&wa->src, &wa->dst, &wa->op, AES_BLOCK_SIZE, true);
810+
if (!wa->src.sg_wa.bytes_left) {
802811
unsigned int nbytes = ilen % AES_BLOCK_SIZE;
803812

804813
if (nbytes) {
805-
op.eom = 1;
806-
op.u.aes.size = (nbytes * 8) - 1;
814+
wa->op.eom = 1;
815+
wa->op.u.aes.size = (nbytes * 8) - 1;
807816
}
808817
}
809818

810-
ret = cmd_q->ccp->vdata->perform->aes(&op);
819+
ret = cmd_q->ccp->vdata->perform->aes(&wa->op);
811820
if (ret) {
812821
cmd->engine_error = cmd_q->cmd_error;
813822
goto e_dst;
814823
}
815824

816-
ccp_process_data(&src, &dst, &op);
817-
op.init = 0;
825+
ccp_process_data(&wa->src, &wa->dst, &wa->op);
826+
wa->op.init = 0;
818827
}
819828
}
820829

821830
/* Step 3: Update the IV portion of the context with the original IV */
822-
ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
831+
ret = ccp_copy_from_sb(cmd_q, &wa->ctx, wa->op.jobid, wa->op.sb_ctx,
823832
CCP_PASSTHRU_BYTESWAP_256BIT);
824833
if (ret) {
825834
cmd->engine_error = cmd_q->cmd_error;
826835
goto e_dst;
827836
}
828837

829-
ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
838+
ret = ccp_set_dm_area(&wa->ctx, dm_offset, aes->iv, 0, aes->iv_len);
830839
if (ret)
831840
goto e_dst;
832841

833-
ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
842+
ret = ccp_copy_to_sb(cmd_q, &wa->ctx, wa->op.jobid, wa->op.sb_ctx,
834843
CCP_PASSTHRU_BYTESWAP_256BIT);
835844
if (ret) {
836845
cmd->engine_error = cmd_q->cmd_error;
@@ -840,75 +849,75 @@ ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
840849
/* Step 4: Concatenate the lengths of the AAD and source, and
841850
* hash that 16 byte buffer.
842851
*/
843-
ret = ccp_init_dm_workarea(&final_wa, cmd_q, AES_BLOCK_SIZE,
852+
ret = ccp_init_dm_workarea(&wa->final, cmd_q, AES_BLOCK_SIZE,
844853
DMA_BIDIRECTIONAL);
845854
if (ret)
846855
goto e_dst;
847-
final = (__be64 *)final_wa.address;
856+
final = (__be64 *)wa->final.address;
848857
final[0] = cpu_to_be64(aes->aad_len * 8);
849858
final[1] = cpu_to_be64(ilen * 8);
850859

851-
memset(&op, 0, sizeof(op));
852-
op.cmd_q = cmd_q;
853-
op.jobid = jobid;
854-
op.sb_key = cmd_q->sb_key; /* Pre-allocated */
855-
op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */
856-
op.init = 1;
857-
op.u.aes.type = aes->type;
858-
op.u.aes.mode = CCP_AES_MODE_GHASH;
859-
op.u.aes.action = CCP_AES_GHASHFINAL;
860-
op.src.type = CCP_MEMTYPE_SYSTEM;
861-
op.src.u.dma.address = final_wa.dma.address;
862-
op.src.u.dma.length = AES_BLOCK_SIZE;
863-
op.dst.type = CCP_MEMTYPE_SYSTEM;
864-
op.dst.u.dma.address = final_wa.dma.address;
865-
op.dst.u.dma.length = AES_BLOCK_SIZE;
866-
op.eom = 1;
867-
op.u.aes.size = 0;
868-
ret = cmd_q->ccp->vdata->perform->aes(&op);
860+
memset(&wa->op, 0, sizeof(wa->op));
861+
wa->op.cmd_q = cmd_q;
862+
wa->op.jobid = jobid;
863+
wa->op.sb_key = cmd_q->sb_key; /* Pre-allocated */
864+
wa->op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */
865+
wa->op.init = 1;
866+
wa->op.u.aes.type = aes->type;
867+
wa->op.u.aes.mode = CCP_AES_MODE_GHASH;
868+
wa->op.u.aes.action = CCP_AES_GHASHFINAL;
869+
wa->op.src.type = CCP_MEMTYPE_SYSTEM;
870+
wa->op.src.u.dma.address = wa->final.dma.address;
871+
wa->op.src.u.dma.length = AES_BLOCK_SIZE;
872+
wa->op.dst.type = CCP_MEMTYPE_SYSTEM;
873+
wa->op.dst.u.dma.address = wa->final.dma.address;
874+
wa->op.dst.u.dma.length = AES_BLOCK_SIZE;
875+
wa->op.eom = 1;
876+
wa->op.u.aes.size = 0;
877+
ret = cmd_q->ccp->vdata->perform->aes(&wa->op);
869878
if (ret)
870879
goto e_final_wa;
871880

872881
if (aes->action == CCP_AES_ACTION_ENCRYPT) {
873882
/* Put the ciphered tag after the ciphertext. */
874-
ccp_get_dm_area(&final_wa, 0, p_tag, 0, authsize);
883+
ccp_get_dm_area(&wa->final, 0, p_tag, 0, authsize);
875884
} else {
876885
/* Does this ciphered tag match the input? */
877-
ret = ccp_init_dm_workarea(&tag, cmd_q, authsize,
886+
ret = ccp_init_dm_workarea(&wa->tag, cmd_q, authsize,
878887
DMA_BIDIRECTIONAL);
879888
if (ret)
880889
goto e_final_wa;
881-
ret = ccp_set_dm_area(&tag, 0, p_tag, 0, authsize);
890+
ret = ccp_set_dm_area(&wa->tag, 0, p_tag, 0, authsize);
882891
if (ret) {
883-
ccp_dm_free(&tag);
892+
ccp_dm_free(&wa->tag);
884893
goto e_final_wa;
885894
}
886895

887-
ret = crypto_memneq(tag.address, final_wa.address,
896+
ret = crypto_memneq(wa->tag.address, wa->final.address,
888897
authsize) ? -EBADMSG : 0;
889-
ccp_dm_free(&tag);
898+
ccp_dm_free(&wa->tag);
890899
}
891900

892901
e_final_wa:
893-
ccp_dm_free(&final_wa);
902+
ccp_dm_free(&wa->final);
894903

895904
e_dst:
896905
if (ilen > 0 && !in_place)
897-
ccp_free_data(&dst, cmd_q);
906+
ccp_free_data(&wa->dst, cmd_q);
898907

899908
e_src:
900909
if (ilen > 0)
901-
ccp_free_data(&src, cmd_q);
910+
ccp_free_data(&wa->src, cmd_q);
902911

903912
e_aad:
904913
if (aes->aad_len)
905-
ccp_free_data(&aad, cmd_q);
914+
ccp_free_data(&wa->aad, cmd_q);
906915

907916
e_ctx:
908-
ccp_dm_free(&ctx);
917+
ccp_dm_free(&wa->ctx);
909918

910919
e_key:
911-
ccp_dm_free(&key);
920+
ccp_dm_free(&wa->key);
912921

913922
return ret;
914923
}

0 commit comments

Comments
 (0)