@@ -585,7 +585,7 @@ static status_t trigger_aes_gcm(dif_aes_key_share_t key, dif_aes_iv_t iv,
585585 return OK_STATUS ();
586586}
587587
588- status_t handle_aes_sca_gcm_fvsr_batch (ujson_t * uj ) {
588+ status_t handle_aes_sca_gcm_fvsr_iv_key_batch (ujson_t * uj ) {
589589 // Receive the AES-GCM input data over uJSON.
590590 aes_sca_num_ops_t uj_num_ops ;
591591 aes_sca_gcm_triggers_t uj_triggers ;
@@ -722,6 +722,165 @@ status_t handle_aes_sca_gcm_fvsr_batch(ujson_t *uj) {
722722 return OK_STATUS ();
723723}
724724
725+ status_t handle_aes_sca_gcm_fvsr_ptx_aad_batch (ujson_t * uj ) {
726+ // Receive the AES-GCM input data over uJSON.
727+ aes_sca_num_ops_t uj_num_ops ;
728+ aes_sca_gcm_triggers_t uj_triggers ;
729+ aes_sca_block_t uj_iv ;
730+ aes_sca_key_t uj_key ;
731+ aes_sca_num_blocks_t uj_aad_blocks ;
732+ aes_sca_num_blocks_t uj_ptx_blocks ;
733+ aes_sca_block_t uj_aad [kMaxGcmBlocks ];
734+ aes_sca_block_t uj_ptx [kMaxGcmBlocks ];
735+ // Get number of batch iterations.
736+ TRY (ujson_deserialize_aes_sca_num_ops_t (uj , & uj_num_ops ));
737+ if (uj_num_ops .num_batch_ops > kNumBatchOpsMax ) {
738+ return OUT_OF_RANGE ();
739+ }
740+ // Get the trigger configuration.
741+ // uj_triggers.triggers[0] = True/False - process all-zero block.
742+ // uj_triggers.triggers[1] = True/False - process initial counter block.
743+ // uj_triggers.triggers[2] = True/False - process AAD blocks.
744+ // uj_triggers.triggers[3] = True/False - process PTX blocks.
745+ // uj_triggers.triggers[4] = True/False - process TAG block.
746+ // uj_triggers.block = int - which AAD or PTX block is captured?
747+ TRY (ujson_deserialize_aes_sca_gcm_triggers_t (uj , & uj_triggers ));
748+ // Get static IV and static KEY.
749+ TRY (ujson_deserialize_aes_sca_block_t (uj , & uj_iv ));
750+ TRY (ujson_deserialize_aes_sca_key_t (uj , & uj_key ));
751+ // Get number of AAD and PTX blocks we are expecting.
752+ TRY (ujson_deserialize_aes_sca_num_blocks_t (uj , & uj_aad_blocks ));
753+ TRY (ujson_deserialize_aes_sca_num_blocks_t (uj , & uj_ptx_blocks ));
754+ if (uj_aad_blocks .num_blocks > kMaxGcmBlocks ||
755+ uj_ptx_blocks .num_blocks > kMaxGcmBlocks ) {
756+ return ABORTED ();
757+ }
758+ // Fetch fixed AAD blocks.
759+ for (size_t block = 0 ; block < uj_aad_blocks .num_blocks ; block ++ ) {
760+ TRY (ujson_deserialize_aes_sca_block_t (uj , & uj_aad [block ]));
761+ }
762+ // Fetch fixed PTX blocks.
763+ for (size_t block = 0 ; block < uj_ptx_blocks .num_blocks ; block ++ ) {
764+ TRY (ujson_deserialize_aes_sca_block_t (uj , & uj_ptx [block ]));
765+ }
766+
767+ // Prepare static AES IV.
768+ dif_aes_iv_t aes_iv_static ;
769+ memset (aes_iv_static .iv , 0 , 16 );
770+ memcpy (aes_iv_static .iv , uj_iv .block , uj_iv .num_valid_bytes );
771+
772+ // Prepare static AES key.
773+ uint8_t aes_key_static [kAesKeyLength ];
774+ memset (aes_key_static , 0 , kAesKeyLength );
775+ memcpy (aes_key_static , uj_key .key , uj_key .key_length );
776+
777+ // Get the last PTX & AAD block size.
778+ size_t aes_ptx_last_block_size =
779+ uj_ptx [uj_ptx_blocks .num_blocks - 1 ].num_valid_bytes ;
780+ size_t aes_aad_last_block_size =
781+ uj_aad [uj_aad_blocks .num_blocks - 1 ].num_valid_bytes ;
782+
783+ // Generate Fvsr AES PTX & AAD set.
784+ dif_aes_data_t aes_ptx_fvsr [kNumBatchOpsMax ][kMaxGcmBlocks ];
785+ dif_aes_data_t aes_aad_fvsr [kNumBatchOpsMax ][kMaxGcmBlocks ];
786+ bool sample_fixed = true;
787+ for (size_t batch = 0 ; batch < uj_num_ops .num_batch_ops ; batch ++ ) {
788+ // Clear the arrays to 0.
789+ for (size_t block = 0 ; block < kMaxGcmBlocks ; block ++ ) {
790+ memset (aes_ptx_fvsr [batch ][block ].data , 0 ,
791+ ARRAYSIZE (aes_ptx_fvsr [batch ][block ].data ) *
792+ sizeof (aes_ptx_fvsr [batch ][block ].data ));
793+ memset (aes_aad_fvsr [batch ][block ].data , 0 ,
794+ ARRAYSIZE (aes_aad_fvsr [batch ][block ].data ) *
795+ sizeof (aes_aad_fvsr [batch ][block ].data ));
796+ }
797+
798+ if (sample_fixed ) {
799+ // Use static PTX.
800+ for (size_t block = 0 ; block < uj_ptx_blocks .num_blocks ; block ++ ) {
801+ memcpy (aes_ptx_fvsr [batch ][block ].data , uj_ptx [block ].block ,
802+ uj_ptx [block ].num_valid_bytes );
803+ }
804+ // Use static AAD.
805+ for (size_t block = 0 ; block < uj_aad_blocks .num_blocks ; block ++ ) {
806+ memcpy (aes_aad_fvsr [batch ][block ].data , uj_aad [block ].block ,
807+ uj_aad [block ].num_valid_bytes );
808+ }
809+ } else {
810+ // Generate random PTX.
811+ for (size_t block = 0 ; block < uj_ptx_blocks .num_blocks ; block ++ ) {
812+ size_t valid_bytes = 16 ;
813+ uint8_t rand_ptx [valid_bytes ];
814+ prng_rand_bytes (rand_ptx , valid_bytes );
815+ if (block == uj_ptx_blocks .num_blocks - 1 ) {
816+ valid_bytes = aes_ptx_last_block_size ;
817+ }
818+ memcpy (aes_ptx_fvsr [batch ][block ].data , rand_ptx , valid_bytes );
819+ }
820+
821+ // Generate random AAD.
822+ for (size_t block = 0 ; block < uj_aad_blocks .num_blocks ; block ++ ) {
823+ size_t valid_bytes = 16 ;
824+ uint8_t rand_aad [valid_bytes ];
825+ prng_rand_bytes (rand_aad , valid_bytes );
826+ if (block == uj_aad_blocks .num_blocks - 1 ) {
827+ valid_bytes = aes_aad_last_block_size ;
828+ }
829+ memcpy (aes_aad_fvsr [batch ][block ].data , rand_aad , valid_bytes );
830+ }
831+ }
832+ sample_fixed = prng_rand_uint32 () & 0x1 ;
833+ }
834+
835+ // Prepare key structure.
836+ dif_aes_key_share_t key_static ;
837+ memset (key_static .share0 , 0 , sizeof (key_static .share0 ));
838+ memset (key_static .share1 , 0 , sizeof (key_static .share1 ));
839+
840+ // Mask the provided key.
841+ for (int i = 0 ; i < uj_key .key_length / 4 ; ++ i ) {
842+ key_static .share1 [i ] = pentest_non_linear_layer (
843+ pentest_linear_layer (pentest_next_lfsr (1 , kPentestLfsrMasking )));
844+ key_static .share0 [i ] =
845+ * ((uint32_t * )aes_key_static + i ) ^ key_static .share1 [i ];
846+ }
847+ // Provide random shares for unused key bits.
848+ for (size_t i = uj_key .key_length / 4 ; i < kAesKeyLengthMax / 4 ; ++ i ) {
849+ key_static .share1 [i ] =
850+ pentest_non_linear_layer (pentest_next_lfsr (1 , kPentestLfsrMasking ));
851+ key_static .share0 [i ] =
852+ pentest_non_linear_layer (pentest_next_lfsr (1 , kPentestLfsrMasking ));
853+ }
854+
855+ // Trigger the AES GCM operation.
856+ dif_aes_data_t aes_tag_acc ;
857+ aes_tag_acc .data [0 ] = 0 ;
858+ aes_tag_acc .data [1 ] = 0 ;
859+ aes_tag_acc .data [2 ] = 0 ;
860+ aes_tag_acc .data [3 ] = 0 ;
861+ for (size_t it = 0 ; it < uj_num_ops .num_batch_ops ; it ++ ) {
862+ dif_aes_data_t aes_tag ;
863+ TRY (trigger_aes_gcm (key_static , aes_iv_static , aes_aad_fvsr [it ],
864+ uj_aad_blocks .num_blocks , aes_aad_last_block_size ,
865+ aes_ptx_fvsr [it ], uj_ptx_blocks .num_blocks ,
866+ aes_ptx_last_block_size , & aes_tag , uj_triggers ));
867+ // Accumulate (i.e., XOR) TAG for sending back to host.
868+ for (size_t i = 0 ; i < ARRAYSIZE (aes_tag_acc .data ); i ++ ) {
869+ aes_tag_acc .data [i ] ^= aes_tag .data [i ];
870+ }
871+ }
872+
873+ // Send accumulated TAG back to host.
874+ aes_sca_block_t uj_tag ;
875+ uj_tag .num_valid_bytes = 16 ;
876+ memset (uj_tag .block , 0 , sizeof (uj_tag .block ));
877+ memcpy (uj_tag .block , (uint8_t * )aes_tag_acc .data , uj_tag .num_valid_bytes );
878+
879+ RESP_OK (ujson_serialize_aes_sca_block_t , uj , & uj_tag );
880+
881+ return OK_STATUS ();
882+ }
883+
725884status_t handle_aes_sca_gcm_single_encrypt (ujson_t * uj ) {
726885 // Receive the AES-GCM input data over uJSON.
727886 aes_sca_gcm_triggers_t uj_triggers ;
@@ -895,8 +1054,10 @@ status_t handle_aes_sca(ujson_t *uj) {
8951054 return handle_aes_sca_batch_fvsr_data (uj );
8961055 case kAesScaSubcommandBatchFvsrKey :
8971056 return handle_aes_sca_batch_fvsr_key (uj );
898- case kAesScaSubcommandGcmFvsrBatch :
899- return handle_aes_sca_gcm_fvsr_batch (uj );
1057+ case kAesScaSubcommandGcmFvsrBatchIvKey :
1058+ return handle_aes_sca_gcm_fvsr_iv_key_batch (uj );
1059+ case kAesScaSubcommandGcmFvsrBatchPtxAad :
1060+ return handle_aes_sca_gcm_fvsr_ptx_aad_batch (uj );
9001061 case kAesScaSubcommandGcmSingleEncrypt :
9011062 return handle_aes_sca_gcm_single_encrypt (uj );
9021063 case kAesScaSubcommandInit :
0 commit comments