Skip to content

Commit 48bf697

Browse files
dzakharipozdnov
authored andcommitted
fix to make it work with the latest state of TFLM
1 parent 5496f49 commit 48bf697

File tree

3 files changed

+22
-31
lines changed

3 files changed

+22
-31
lines changed

examples/tutorial_emnist_tflm/README.md

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ Installation process of the following dependencies is described in [Getting Star
2121
* NumPy 1.16.4
2222
* Matplotlib
2323
* Jupyter Lab / Notebook
24-
* tf-nightly 2.3
24+
* TensorFlow 2.3
2525
* Keras
2626
* emnist
2727

@@ -41,8 +41,8 @@ Installation process of the following dependencies is described in [Getting Star
4141
## Install pip requirements
4242
```bash
4343
pip install --upgrade pip setuptools
44-
pip install -r requirements.txt
45-
python -c "import emnist; emnist.ensure_cached_data();
44+
pip install -r ./conversion_tutorial/requirements.txt
45+
python -c "import emnist; emnist.ensure_cached_data();"
4646
```
4747
## Generate Tensorflow Lite Micro library
4848
Open root directory of tensorflow in terminal (use Cygwin or MinGW terminal if you're on Windows). Run:
Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
numpy==1.16.4
22
matplotlib
33
jupyterlab==1.1.0
4-
tf-nightly>=2.3
4+
tensorflow>=2.3.0rc0
55
keras>=2.2.4
66
emnist
77

examples/tutorial_emnist_tflm/src/main.cc

Lines changed: 18 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66
* the LICENSE file in the root directory of this source tree.
77
*
88
*/
9-
#include "tensorflow/lite/micro/kernels/all_ops_resolver.h"
9+
#include "tensorflow/lite/micro/all_ops_resolver.h"
1010
#include "tensorflow/lite/micro/kernels/micro_ops.h"
1111
#include "tensorflow/lite/micro/micro_error_reporter.h"
1212
#include "tensorflow/lite/micro/micro_interpreter.h"
@@ -19,17 +19,17 @@
1919
//tensor_arena has to be 16 bytes aligned
2020
typedef uint8_t aligned_uint8_t __attribute__((aligned(16)));
2121
constexpr int kTensorArenaSize = 50 * 1024;
22-
aligned_uint8_t tensor_arena[ kTensorArenaSize ] = { 0 };
22+
aligned_uint8_t tensor_arena[kTensorArenaSize] = {0};
2323

2424
int main() {
2525
tflite::ErrorReporter* reporter = nullptr;
2626
tflite::MicroErrorReporter error_reporter;
2727
reporter = &error_reporter;
28-
reporter->Report( "Run EMNIST letter recognition" );
28+
reporter->Report("Run EMNIST letter recognition");
2929

3030
//Load Model
31-
const tflite::Model* model = tflite::GetModel( emnist_model_int8_tflite );
32-
if( model->version() != TFLITE_SCHEMA_VERSION ) {
31+
const tflite::Model* model = tflite::GetModel(emnist_model_int8_tflite);
32+
if (model->version() != TFLITE_SCHEMA_VERSION) {
3333
reporter->Report( "Model is schema version: %d\n"
3434
"Supported schema version is: %d", model->version(), TFLITE_SCHEMA_VERSION );
3535
return 1;
@@ -39,25 +39,17 @@ int main() {
3939
// Add Builtins corresponding to Model layers
4040
// Note: If you change the model structure/layer types, you'll need to make
4141
// equivalent changes to the resolver
42-
tflite::MicroOpResolver<5> resolver;
43-
resolver.AddBuiltin(tflite::BuiltinOperator_CONV_2D,
44-
tflite::ops::micro::Register_CONV_2D(), 3);
45-
resolver.AddBuiltin(tflite::BuiltinOperator_MAX_POOL_2D,
46-
tflite::ops::micro::Register_MAX_POOL_2D(), 2);
47-
resolver.AddBuiltin(tflite::BuiltinOperator_RESHAPE,
48-
tflite::ops::micro::Register_RESHAPE());
49-
resolver.AddBuiltin(tflite::BuiltinOperator_FULLY_CONNECTED,
50-
tflite::ops::micro::Register_FULLY_CONNECTED(), 4);
51-
resolver.AddBuiltin(tflite::BuiltinOperator_SOFTMAX,
52-
tflite::ops::micro::Register_SOFTMAX(), 2);
42+
tflite::MicroMutableOpResolver<5> resolver;
43+
resolver.AddConv2D();
44+
resolver.AddMaxPool2D();
45+
resolver.AddFullyConnected();
46+
resolver.AddReshape();
47+
resolver.AddSoftmax();
5348

54-
tflite::MicroInterpreter interpreter(
55-
model, resolver, tensor_arena, kTensorArenaSize, reporter );
56-
//interpreter = &static_interpreter;
49+
tflite::MicroInterpreter interpreter(model, resolver, tensor_arena, kTensorArenaSize, reporter);
5750

5851
// Allocate memory from the tensor_arena for the model's tensors.
59-
//TfLiteStatus allocate_status = interpreter.AllocateTensors();
60-
if( interpreter.AllocateTensors() != kTfLiteOk ) {
52+
if (interpreter.AllocateTensors() != kTfLiteOk) {
6153
reporter->Report( "AllocateTensors() failed" );
6254
return 1;
6355
}
@@ -71,17 +63,16 @@ int main() {
7163
int32_t zero_point = input->params.zero_point;
7264

7365
// Invoke interpreter for each test sample and process results
74-
for (int j = 0; j < kNumSamples; j++){
66+
for (int j = 0; j < kNumSamples; j++) {
7567
// Perform image thinning (round values to either -128 or 127)
7668
// Write image to input data
7769
for (int i = 0; i < kImageSize; i++) {
7870
input->data.int8[i] = (test_samples[j].image[i] <= 210) ? -128 : 127;
79-
}
71+
}
8072

8173
// Run model
82-
//TfLiteStatus invoke_status = interpreter.Invoke();
83-
if ( interpreter.Invoke() != kTfLiteOk ) {
84-
reporter->Report( "Invoke failed" );
74+
if (interpreter.Invoke() != kTfLiteOk) {
75+
reporter->Report("Invoke failed");
8576
return 1;
8677
}
8778

@@ -97,4 +88,4 @@ int main() {
9788
kCategoryLabels[result], (int)(confidence * 100), status);
9889
}
9990
return 0;
100-
}
91+
}

0 commit comments

Comments
 (0)