66* the LICENSE file in the root directory of this source tree.
77*
88*/
9- #include " tensorflow/lite/micro/kernels/ all_ops_resolver.h"
9+ #include " tensorflow/lite/micro/all_ops_resolver.h"
1010#include " tensorflow/lite/micro/kernels/micro_ops.h"
1111#include " tensorflow/lite/micro/micro_error_reporter.h"
1212#include " tensorflow/lite/micro/micro_interpreter.h"
1919// tensor_arena has to be 16 bytes aligned
2020typedef uint8_t aligned_uint8_t __attribute__ ((aligned(16 )));
2121constexpr int kTensorArenaSize = 50 * 1024 ;
22- aligned_uint8_t tensor_arena[ kTensorArenaSize ] = { 0 };
22+ aligned_uint8_t tensor_arena[kTensorArenaSize ] = {0 };
2323
2424int main () {
2525 tflite::ErrorReporter* reporter = nullptr ;
2626 tflite::MicroErrorReporter error_reporter;
2727 reporter = &error_reporter;
28- reporter->Report ( " Run EMNIST letter recognition" );
28+ reporter->Report (" Run EMNIST letter recognition" );
2929
3030 // Load Model
31- const tflite::Model* model = tflite::GetModel ( emnist_model_int8_tflite );
32- if ( model->version () != TFLITE_SCHEMA_VERSION ) {
31+ const tflite::Model* model = tflite::GetModel (emnist_model_int8_tflite);
32+ if ( model->version () != TFLITE_SCHEMA_VERSION) {
3333 reporter->Report ( " Model is schema version: %d\n "
3434 " Supported schema version is: %d" , model->version (), TFLITE_SCHEMA_VERSION );
3535 return 1 ;
@@ -39,25 +39,17 @@ int main() {
3939 // Add Builtins corresponding to Model layers
4040 // Note: If you change the model structure/layer types, you'll need to make
4141 // equivalent changes to the resolver
42- tflite::MicroOpResolver<5 > resolver;
43- resolver.AddBuiltin (tflite::BuiltinOperator_CONV_2D,
44- tflite::ops::micro::Register_CONV_2D (), 3 );
45- resolver.AddBuiltin (tflite::BuiltinOperator_MAX_POOL_2D,
46- tflite::ops::micro::Register_MAX_POOL_2D (), 2 );
47- resolver.AddBuiltin (tflite::BuiltinOperator_RESHAPE,
48- tflite::ops::micro::Register_RESHAPE ());
49- resolver.AddBuiltin (tflite::BuiltinOperator_FULLY_CONNECTED,
50- tflite::ops::micro::Register_FULLY_CONNECTED (), 4 );
51- resolver.AddBuiltin (tflite::BuiltinOperator_SOFTMAX,
52- tflite::ops::micro::Register_SOFTMAX (), 2 );
42+ tflite::MicroMutableOpResolver<5 > resolver;
43+ resolver.AddConv2D ();
44+ resolver.AddMaxPool2D ();
45+ resolver.AddFullyConnected ();
46+ resolver.AddReshape ();
47+ resolver.AddSoftmax ();
5348
54- tflite::MicroInterpreter interpreter (
55- model, resolver, tensor_arena, kTensorArenaSize , reporter );
56- // interpreter = &static_interpreter;
49+ tflite::MicroInterpreter interpreter (model, resolver, tensor_arena, kTensorArenaSize , reporter);
5750
5851 // Allocate memory from the tensor_arena for the model's tensors.
59- // TfLiteStatus allocate_status = interpreter.AllocateTensors();
60- if ( interpreter.AllocateTensors () != kTfLiteOk ) {
52+ if (interpreter.AllocateTensors () != kTfLiteOk ) {
6153 reporter->Report ( " AllocateTensors() failed" );
6254 return 1 ;
6355 }
@@ -71,17 +63,16 @@ int main() {
7163 int32_t zero_point = input->params .zero_point ;
7264
7365 // Invoke interpreter for each test sample and process results
74- for (int j = 0 ; j < kNumSamples ; j++){
66+ for (int j = 0 ; j < kNumSamples ; j++) {
7567 // Perform image thinning (round values to either -128 or 127)
7668 // Write image to input data
7769 for (int i = 0 ; i < kImageSize ; i++) {
7870 input->data .int8 [i] = (test_samples[j].image [i] <= 210 ) ? -128 : 127 ;
79- }
71+ }
8072
8173 // Run model
82- // TfLiteStatus invoke_status = interpreter.Invoke();
83- if ( interpreter.Invoke () != kTfLiteOk ) {
84- reporter->Report ( " Invoke failed" );
74+ if (interpreter.Invoke () != kTfLiteOk ) {
75+ reporter->Report (" Invoke failed" );
8576 return 1 ;
8677 }
8778
@@ -97,4 +88,4 @@ int main() {
9788 kCategoryLabels [result], (int )(confidence * 100 ), status);
9889 }
9990 return 0 ;
100- }
91+ }
0 commit comments