@@ -650,36 +650,35 @@ int main(int argc, const char* argv[]) {
650650
651651 for (int i = 0 ; i < inputs.size (); ++i) {
652652 if (inputs[i].isTensor ()) {
653- Tensor t = inputs[i].toTensor ();
653+ Tensor tensor = inputs[i].toTensor ();
654654 // The output might be collected and parsed so printf() is used instead
655655 // of ET_LOG() here
656- for (int j = 0 ; j < inputs[i]. toTensor () .numel (); ++j) {
657- if (t .scalar_type () == ScalarType::Int) {
656+ for (int j = 0 ; j < tensor .numel (); ++j) {
657+ if (tensor .scalar_type () == ScalarType::Int) {
658658 printf (
659659 " Input[%d][%d]: (int) %d\n " ,
660660 i,
661661 j,
662- inputs[i]. toTensor () .const_data_ptr <int >()[j]);
663- } else if (t .scalar_type () == ScalarType::Float) {
662+ tensor .const_data_ptr <int >()[j]);
663+ } else if (tensor .scalar_type () == ScalarType::Float) {
664664 printf (
665665 " Input[%d][%d]: (float) %f\n " ,
666666 i,
667667 j,
668- inputs[i]. toTensor () .const_data_ptr <float >()[j]);
669- } else if (t .scalar_type () == ScalarType::Char) {
668+ tensor .const_data_ptr <float >()[j]);
669+ } else if (tensor .scalar_type () == ScalarType::Char) {
670670 printf (
671671 " Input[%d][%d]: (char) %d\n " ,
672672 i,
673673 j,
674- inputs[i]. toTensor () .const_data_ptr <int8_t >()[j]);
675- } else if (t .scalar_type () == ScalarType::Bool) {
674+ tensor .const_data_ptr <int8_t >()[j]);
675+ } else if (tensor .scalar_type () == ScalarType::Bool) {
676676 printf (
677677 " Input[%d][%d]: (bool) %s (0x%x)\n " ,
678678 i,
679679 j,
680- inputs[i].toTensor ().const_data_ptr <int8_t >()[j] ? " true"
681- : " false" ,
682- inputs[i].toTensor ().const_data_ptr <int8_t >()[j]);
680+ tensor.const_data_ptr <int8_t >()[j] ? " true" : " false" ,
681+ tensor.const_data_ptr <int8_t >()[j]);
683682 }
684683 }
685684 } else {
@@ -754,53 +753,53 @@ int main(int argc, const char* argv[]) {
754753
755754 // Print the outputs.
756755 for (int i = 0 ; i < outputs.size (); ++i) {
757- Tensor t = outputs[i].toTensor ();
756+ if (outputs[i].isTensor ()) {
757+ Tensor tensor = outputs[i].toTensor ();
758758#if !defined(SEMIHOSTING)
759759#if defined(ET_DUMP_OUTPUT)
760- // The output might be collected and parsed so printf() is used instead
761- // of ET_LOG() here
762- for (int j = 0 ; j < outputs[i]. toTensor () .numel (); ++j) {
763- if (t .scalar_type () == ScalarType::Int) {
764- printf (
765- " Output[%d][%d]: (int) %d\n " ,
766- i,
767- j,
768- outputs[i]. toTensor () .const_data_ptr <int >()[j]);
769- } else if (t .scalar_type () == ScalarType::Float) {
770- printf (
771- " Output[%d][%d]: (float) %f\n " ,
772- i,
773- j,
774- outputs[i]. toTensor () .const_data_ptr <float >()[j]);
775- } else if (t .scalar_type () == ScalarType::Char) {
776- printf (
777- " Output[%d][%d]: (char) %d\n " ,
778- i,
779- j,
780- outputs[i]. toTensor () .const_data_ptr <int8_t >()[j]);
781- } else if (t .scalar_type () == ScalarType::Bool) {
782- printf (
783- " Output[%d][%d]: (bool) %s (0x%x)\n " ,
784- i,
785- j,
786- outputs[i]. toTensor (). const_data_ptr <int8_t >()[j] ? " true "
787- : " false " ,
788- outputs[i]. toTensor (). const_data_ptr < int8_t >()[j]);
760+ // The output might be collected and parsed so printf() is used instead
761+ // of ET_LOG() here
762+ for (int j = 0 ; j < tensor .numel (); ++j) {
763+ if (tensor .scalar_type () == ScalarType::Int) {
764+ printf (
765+ " Output[%d][%d]: (int) %d\n " ,
766+ i,
767+ j,
768+ tensor .const_data_ptr <int >()[j]);
769+ } else if (tensor .scalar_type () == ScalarType::Float) {
770+ printf (
771+ " Output[%d][%d]: (float) %f\n " ,
772+ i,
773+ j,
774+ tensor .const_data_ptr <float >()[j]);
775+ } else if (tensor .scalar_type () == ScalarType::Char) {
776+ printf (
777+ " Output[%d][%d]: (char) %d\n " ,
778+ i,
779+ j,
780+ tensor .const_data_ptr <int8_t >()[j]);
781+ } else if (tensor .scalar_type () == ScalarType::Bool) {
782+ printf (
783+ " Output[%d][%d]: (bool) %s (0x%x)\n " ,
784+ i,
785+ j,
786+ tensor. const_data_ptr <int8_t >()[j] ? " true " : " false " ,
787+ tensor. const_data_ptr < int8_t >()[j]);
788+ }
789789 }
790- }
791790#endif
792791#else
793- char out_filename[255 ];
794- snprintf (out_filename, 255 , " %s-%d.bin" , output_basename, i);
795- ET_LOG (Info, " Writing output to file: %s" , out_filename);
796- FILE* out_file = fopen (out_filename, " wb" );
797- auto written_size = fwrite (
798- outputs[i].toTensor ().const_data_ptr <char >(),
799- 1 ,
800- outputs[i].toTensor ().nbytes (),
801- out_file);
802- fclose (out_file);
792+ char out_filename[255 ];
793+ snprintf (out_filename, 255 , " %s-%d.bin" , output_basename, i);
794+ ET_LOG (Info, " Writing output to file: %s" , out_filename);
795+ FILE* out_file = fopen (out_filename, " wb" );
796+ auto written_size =
797+ fwrite (tensor.const_data_ptr <char >(), 1 , tensor.nbytes (), out_file);
798+ fclose (out_file);
803799#endif
800+ } else {
801+ printf (" Output[%d]: Not Tensor\n " , i);
802+ }
804803 }
805804
806805#if defined(ET_EVENT_TRACER_ENABLED)
0 commit comments