@@ -633,10 +633,16 @@ static noinline_for_stack int
633
633
ccp_run_aes_gcm_cmd (struct ccp_cmd_queue * cmd_q , struct ccp_cmd * cmd )
634
634
{
635
635
struct ccp_aes_engine * aes = & cmd -> u .aes ;
636
- struct ccp_dm_workarea key , ctx , final_wa , tag ;
637
- struct ccp_data src , dst ;
638
- struct ccp_data aad ;
639
- struct ccp_op op ;
636
+ struct {
637
+ struct ccp_dm_workarea key ;
638
+ struct ccp_dm_workarea ctx ;
639
+ struct ccp_dm_workarea final ;
640
+ struct ccp_dm_workarea tag ;
641
+ struct ccp_data src ;
642
+ struct ccp_data dst ;
643
+ struct ccp_data aad ;
644
+ struct ccp_op op ;
645
+ } * wa __cleanup (kfree ) = kzalloc (sizeof * wa , GFP_KERNEL );
640
646
unsigned int dm_offset ;
641
647
unsigned int authsize ;
642
648
unsigned int jobid ;
@@ -650,6 +656,9 @@ ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
650
656
struct scatterlist * p_outp , sg_outp [2 ];
651
657
struct scatterlist * p_aad ;
652
658
659
+ if (!wa )
660
+ return - ENOMEM ;
661
+
653
662
if (!aes -> iv )
654
663
return - EINVAL ;
655
664
@@ -696,26 +705,26 @@ ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
696
705
697
706
jobid = CCP_NEW_JOBID (cmd_q -> ccp );
698
707
699
- memset (& op , 0 , sizeof (op ));
700
- op .cmd_q = cmd_q ;
701
- op .jobid = jobid ;
702
- op .sb_key = cmd_q -> sb_key ; /* Pre-allocated */
703
- op .sb_ctx = cmd_q -> sb_ctx ; /* Pre-allocated */
704
- op .init = 1 ;
705
- op .u .aes .type = aes -> type ;
708
+ memset (& wa -> op , 0 , sizeof (wa -> op ));
709
+ wa -> op .cmd_q = cmd_q ;
710
+ wa -> op .jobid = jobid ;
711
+ wa -> op .sb_key = cmd_q -> sb_key ; /* Pre-allocated */
712
+ wa -> op .sb_ctx = cmd_q -> sb_ctx ; /* Pre-allocated */
713
+ wa -> op .init = 1 ;
714
+ wa -> op .u .aes .type = aes -> type ;
706
715
707
716
/* Copy the key to the LSB */
708
- ret = ccp_init_dm_workarea (& key , cmd_q ,
717
+ ret = ccp_init_dm_workarea (& wa -> key , cmd_q ,
709
718
CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES ,
710
719
DMA_TO_DEVICE );
711
720
if (ret )
712
721
return ret ;
713
722
714
723
dm_offset = CCP_SB_BYTES - aes -> key_len ;
715
- ret = ccp_set_dm_area (& key , dm_offset , aes -> key , 0 , aes -> key_len );
724
+ ret = ccp_set_dm_area (& wa -> key , dm_offset , aes -> key , 0 , aes -> key_len );
716
725
if (ret )
717
726
goto e_key ;
718
- ret = ccp_copy_to_sb (cmd_q , & key , op .jobid , op .sb_key ,
727
+ ret = ccp_copy_to_sb (cmd_q , & wa -> key , wa -> op .jobid , wa -> op .sb_key ,
719
728
CCP_PASSTHRU_BYTESWAP_256BIT );
720
729
if (ret ) {
721
730
cmd -> engine_error = cmd_q -> cmd_error ;
@@ -726,111 +735,111 @@ ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
726
735
* There is an assumption here that the IV is 96 bits in length, plus
727
736
* a nonce of 32 bits. If no IV is present, use a zeroed buffer.
728
737
*/
729
- ret = ccp_init_dm_workarea (& ctx , cmd_q ,
738
+ ret = ccp_init_dm_workarea (& wa -> ctx , cmd_q ,
730
739
CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES ,
731
740
DMA_BIDIRECTIONAL );
732
741
if (ret )
733
742
goto e_key ;
734
743
735
744
dm_offset = CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES - aes -> iv_len ;
736
- ret = ccp_set_dm_area (& ctx , dm_offset , aes -> iv , 0 , aes -> iv_len );
745
+ ret = ccp_set_dm_area (& wa -> ctx , dm_offset , aes -> iv , 0 , aes -> iv_len );
737
746
if (ret )
738
747
goto e_ctx ;
739
748
740
- ret = ccp_copy_to_sb (cmd_q , & ctx , op .jobid , op .sb_ctx ,
749
+ ret = ccp_copy_to_sb (cmd_q , & wa -> ctx , wa -> op .jobid , wa -> op .sb_ctx ,
741
750
CCP_PASSTHRU_BYTESWAP_256BIT );
742
751
if (ret ) {
743
752
cmd -> engine_error = cmd_q -> cmd_error ;
744
753
goto e_ctx ;
745
754
}
746
755
747
- op .init = 1 ;
756
+ wa -> op .init = 1 ;
748
757
if (aes -> aad_len > 0 ) {
749
758
/* Step 1: Run a GHASH over the Additional Authenticated Data */
750
- ret = ccp_init_data (& aad , cmd_q , p_aad , aes -> aad_len ,
759
+ ret = ccp_init_data (& wa -> aad , cmd_q , p_aad , aes -> aad_len ,
751
760
AES_BLOCK_SIZE ,
752
761
DMA_TO_DEVICE );
753
762
if (ret )
754
763
goto e_ctx ;
755
764
756
- op .u .aes .mode = CCP_AES_MODE_GHASH ;
757
- op .u .aes .action = CCP_AES_GHASHAAD ;
765
+ wa -> op .u .aes .mode = CCP_AES_MODE_GHASH ;
766
+ wa -> op .u .aes .action = CCP_AES_GHASHAAD ;
758
767
759
- while (aad .sg_wa .bytes_left ) {
760
- ccp_prepare_data (& aad , NULL , & op , AES_BLOCK_SIZE , true);
768
+ while (wa -> aad .sg_wa .bytes_left ) {
769
+ ccp_prepare_data (& wa -> aad , NULL , & wa -> op , AES_BLOCK_SIZE , true);
761
770
762
- ret = cmd_q -> ccp -> vdata -> perform -> aes (& op );
771
+ ret = cmd_q -> ccp -> vdata -> perform -> aes (& wa -> op );
763
772
if (ret ) {
764
773
cmd -> engine_error = cmd_q -> cmd_error ;
765
774
goto e_aad ;
766
775
}
767
776
768
- ccp_process_data (& aad , NULL , & op );
769
- op .init = 0 ;
777
+ ccp_process_data (& wa -> aad , NULL , & wa -> op );
778
+ wa -> op .init = 0 ;
770
779
}
771
780
}
772
781
773
- op .u .aes .mode = CCP_AES_MODE_GCTR ;
774
- op .u .aes .action = aes -> action ;
782
+ wa -> op .u .aes .mode = CCP_AES_MODE_GCTR ;
783
+ wa -> op .u .aes .action = aes -> action ;
775
784
776
785
if (ilen > 0 ) {
777
786
/* Step 2: Run a GCTR over the plaintext */
778
787
in_place = (sg_virt (p_inp ) == sg_virt (p_outp )) ? true : false;
779
788
780
- ret = ccp_init_data (& src , cmd_q , p_inp , ilen ,
789
+ ret = ccp_init_data (& wa -> src , cmd_q , p_inp , ilen ,
781
790
AES_BLOCK_SIZE ,
782
791
in_place ? DMA_BIDIRECTIONAL
783
792
: DMA_TO_DEVICE );
784
793
if (ret )
785
794
goto e_aad ;
786
795
787
796
if (in_place ) {
788
- dst = src ;
797
+ wa -> dst = wa -> src ;
789
798
} else {
790
- ret = ccp_init_data (& dst , cmd_q , p_outp , ilen ,
799
+ ret = ccp_init_data (& wa -> dst , cmd_q , p_outp , ilen ,
791
800
AES_BLOCK_SIZE , DMA_FROM_DEVICE );
792
801
if (ret )
793
802
goto e_src ;
794
803
}
795
804
796
- op .soc = 0 ;
797
- op .eom = 0 ;
798
- op .init = 1 ;
799
- while (src .sg_wa .bytes_left ) {
800
- ccp_prepare_data (& src , & dst , & op , AES_BLOCK_SIZE , true);
801
- if (!src .sg_wa .bytes_left ) {
805
+ wa -> op .soc = 0 ;
806
+ wa -> op .eom = 0 ;
807
+ wa -> op .init = 1 ;
808
+ while (wa -> src .sg_wa .bytes_left ) {
809
+ ccp_prepare_data (& wa -> src , & wa -> dst , & wa -> op , AES_BLOCK_SIZE , true);
810
+ if (!wa -> src .sg_wa .bytes_left ) {
802
811
unsigned int nbytes = ilen % AES_BLOCK_SIZE ;
803
812
804
813
if (nbytes ) {
805
- op .eom = 1 ;
806
- op .u .aes .size = (nbytes * 8 ) - 1 ;
814
+ wa -> op .eom = 1 ;
815
+ wa -> op .u .aes .size = (nbytes * 8 ) - 1 ;
807
816
}
808
817
}
809
818
810
- ret = cmd_q -> ccp -> vdata -> perform -> aes (& op );
819
+ ret = cmd_q -> ccp -> vdata -> perform -> aes (& wa -> op );
811
820
if (ret ) {
812
821
cmd -> engine_error = cmd_q -> cmd_error ;
813
822
goto e_dst ;
814
823
}
815
824
816
- ccp_process_data (& src , & dst , & op );
817
- op .init = 0 ;
825
+ ccp_process_data (& wa -> src , & wa -> dst , & wa -> op );
826
+ wa -> op .init = 0 ;
818
827
}
819
828
}
820
829
821
830
/* Step 3: Update the IV portion of the context with the original IV */
822
- ret = ccp_copy_from_sb (cmd_q , & ctx , op .jobid , op .sb_ctx ,
831
+ ret = ccp_copy_from_sb (cmd_q , & wa -> ctx , wa -> op .jobid , wa -> op .sb_ctx ,
823
832
CCP_PASSTHRU_BYTESWAP_256BIT );
824
833
if (ret ) {
825
834
cmd -> engine_error = cmd_q -> cmd_error ;
826
835
goto e_dst ;
827
836
}
828
837
829
- ret = ccp_set_dm_area (& ctx , dm_offset , aes -> iv , 0 , aes -> iv_len );
838
+ ret = ccp_set_dm_area (& wa -> ctx , dm_offset , aes -> iv , 0 , aes -> iv_len );
830
839
if (ret )
831
840
goto e_dst ;
832
841
833
- ret = ccp_copy_to_sb (cmd_q , & ctx , op .jobid , op .sb_ctx ,
842
+ ret = ccp_copy_to_sb (cmd_q , & wa -> ctx , wa -> op .jobid , wa -> op .sb_ctx ,
834
843
CCP_PASSTHRU_BYTESWAP_256BIT );
835
844
if (ret ) {
836
845
cmd -> engine_error = cmd_q -> cmd_error ;
@@ -840,75 +849,75 @@ ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
840
849
/* Step 4: Concatenate the lengths of the AAD and source, and
841
850
* hash that 16 byte buffer.
842
851
*/
843
- ret = ccp_init_dm_workarea (& final_wa , cmd_q , AES_BLOCK_SIZE ,
852
+ ret = ccp_init_dm_workarea (& wa -> final , cmd_q , AES_BLOCK_SIZE ,
844
853
DMA_BIDIRECTIONAL );
845
854
if (ret )
846
855
goto e_dst ;
847
- final = (__be64 * )final_wa .address ;
856
+ final = (__be64 * )wa -> final .address ;
848
857
final [0 ] = cpu_to_be64 (aes -> aad_len * 8 );
849
858
final [1 ] = cpu_to_be64 (ilen * 8 );
850
859
851
- memset (& op , 0 , sizeof (op ));
852
- op .cmd_q = cmd_q ;
853
- op .jobid = jobid ;
854
- op .sb_key = cmd_q -> sb_key ; /* Pre-allocated */
855
- op .sb_ctx = cmd_q -> sb_ctx ; /* Pre-allocated */
856
- op .init = 1 ;
857
- op .u .aes .type = aes -> type ;
858
- op .u .aes .mode = CCP_AES_MODE_GHASH ;
859
- op .u .aes .action = CCP_AES_GHASHFINAL ;
860
- op .src .type = CCP_MEMTYPE_SYSTEM ;
861
- op .src .u .dma .address = final_wa .dma .address ;
862
- op .src .u .dma .length = AES_BLOCK_SIZE ;
863
- op .dst .type = CCP_MEMTYPE_SYSTEM ;
864
- op .dst .u .dma .address = final_wa .dma .address ;
865
- op .dst .u .dma .length = AES_BLOCK_SIZE ;
866
- op .eom = 1 ;
867
- op .u .aes .size = 0 ;
868
- ret = cmd_q -> ccp -> vdata -> perform -> aes (& op );
860
+ memset (& wa -> op , 0 , sizeof (wa -> op ));
861
+ wa -> op .cmd_q = cmd_q ;
862
+ wa -> op .jobid = jobid ;
863
+ wa -> op .sb_key = cmd_q -> sb_key ; /* Pre-allocated */
864
+ wa -> op .sb_ctx = cmd_q -> sb_ctx ; /* Pre-allocated */
865
+ wa -> op .init = 1 ;
866
+ wa -> op .u .aes .type = aes -> type ;
867
+ wa -> op .u .aes .mode = CCP_AES_MODE_GHASH ;
868
+ wa -> op .u .aes .action = CCP_AES_GHASHFINAL ;
869
+ wa -> op .src .type = CCP_MEMTYPE_SYSTEM ;
870
+ wa -> op .src .u .dma .address = wa -> final .dma .address ;
871
+ wa -> op .src .u .dma .length = AES_BLOCK_SIZE ;
872
+ wa -> op .dst .type = CCP_MEMTYPE_SYSTEM ;
873
+ wa -> op .dst .u .dma .address = wa -> final .dma .address ;
874
+ wa -> op .dst .u .dma .length = AES_BLOCK_SIZE ;
875
+ wa -> op .eom = 1 ;
876
+ wa -> op .u .aes .size = 0 ;
877
+ ret = cmd_q -> ccp -> vdata -> perform -> aes (& wa -> op );
869
878
if (ret )
870
879
goto e_final_wa ;
871
880
872
881
if (aes -> action == CCP_AES_ACTION_ENCRYPT ) {
873
882
/* Put the ciphered tag after the ciphertext. */
874
- ccp_get_dm_area (& final_wa , 0 , p_tag , 0 , authsize );
883
+ ccp_get_dm_area (& wa -> final , 0 , p_tag , 0 , authsize );
875
884
} else {
876
885
/* Does this ciphered tag match the input? */
877
- ret = ccp_init_dm_workarea (& tag , cmd_q , authsize ,
886
+ ret = ccp_init_dm_workarea (& wa -> tag , cmd_q , authsize ,
878
887
DMA_BIDIRECTIONAL );
879
888
if (ret )
880
889
goto e_final_wa ;
881
- ret = ccp_set_dm_area (& tag , 0 , p_tag , 0 , authsize );
890
+ ret = ccp_set_dm_area (& wa -> tag , 0 , p_tag , 0 , authsize );
882
891
if (ret ) {
883
- ccp_dm_free (& tag );
892
+ ccp_dm_free (& wa -> tag );
884
893
goto e_final_wa ;
885
894
}
886
895
887
- ret = crypto_memneq (tag .address , final_wa .address ,
896
+ ret = crypto_memneq (wa -> tag .address , wa -> final .address ,
888
897
authsize ) ? - EBADMSG : 0 ;
889
- ccp_dm_free (& tag );
898
+ ccp_dm_free (& wa -> tag );
890
899
}
891
900
892
901
e_final_wa :
893
- ccp_dm_free (& final_wa );
902
+ ccp_dm_free (& wa -> final );
894
903
895
904
e_dst :
896
905
if (ilen > 0 && !in_place )
897
- ccp_free_data (& dst , cmd_q );
906
+ ccp_free_data (& wa -> dst , cmd_q );
898
907
899
908
e_src :
900
909
if (ilen > 0 )
901
- ccp_free_data (& src , cmd_q );
910
+ ccp_free_data (& wa -> src , cmd_q );
902
911
903
912
e_aad :
904
913
if (aes -> aad_len )
905
- ccp_free_data (& aad , cmd_q );
914
+ ccp_free_data (& wa -> aad , cmd_q );
906
915
907
916
e_ctx :
908
- ccp_dm_free (& ctx );
917
+ ccp_dm_free (& wa -> ctx );
909
918
910
919
e_key :
911
- ccp_dm_free (& key );
920
+ ccp_dm_free (& wa -> key );
912
921
913
922
return ret ;
914
923
}
0 commit comments