Skip to content

Commit 82f64e1

Browse files
laurenmurphyx64cfriedt
authored andcommitted
samples: tflite-micro: update hello_world
Updates tflite-micro hello_world sample code. Signed-off-by: Lauren Murphy <[email protected]>
1 parent 6cf7395 commit 82f64e1

File tree

2 files changed

+219
-227
lines changed

2 files changed

+219
-227
lines changed

samples/modules/tflite-micro/hello_world/src/main_functions.cc

Lines changed: 18 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -22,8 +22,8 @@
2222
#include "output_handler.h"
2323
#include <tensorflow/lite/micro/micro_error_reporter.h>
2424
#include <tensorflow/lite/micro/micro_interpreter.h>
25+
#include <tensorflow/lite/micro/system_setup.h>
2526
#include <tensorflow/lite/schema/schema_generated.h>
26-
#include <tensorflow/lite/version.h>
2727

2828
/* Globals, used for compatibility with Arduino-style sketches. */
2929
namespace {
@@ -34,15 +34,7 @@ namespace {
3434
TfLiteTensor *output = nullptr;
3535
int inference_count = 0;
3636

37-
/* Create an area of memory to use for input, output, and intermediate arrays.
38-
* Minimum arena size, at the time of writing. After allocating tensors
39-
* you can retrieve this value by invoking interpreter.arena_used_bytes().
40-
*/
41-
const int kModelArenaSize = 2468;
42-
43-
/* Extra headroom for model + alignment + future interpreter changes. */
44-
const int kExtraArenaSize = 560 + 16 + 100;
45-
const int kTensorArenaSize = kModelArenaSize + kExtraArenaSize;
37+
constexpr int kTensorArenaSize = 2000;
4638
uint8_t tensor_arena[kTensorArenaSize];
4739
} /* namespace */
4840

@@ -63,9 +55,9 @@ void setup(void)
6355
model = tflite::GetModel(g_model);
6456
if (model->version() != TFLITE_SCHEMA_VERSION) {
6557
TF_LITE_REPORT_ERROR(error_reporter,
66-
"Model provided is schema version %d not equal "
67-
"to supported version %d.",
68-
model->version(), TFLITE_SCHEMA_VERSION);
58+
"Model provided is schema version %d not equal "
59+
"to supported version %d.",
60+
model->version(), TFLITE_SCHEMA_VERSION);
6961
return;
7062
}
7163

@@ -104,32 +96,34 @@ void loop(void)
10496
*/
10597
float position = static_cast < float > (inference_count) /
10698
static_cast < float > (kInferencesPerCycle);
107-
float x_val = position * kXrange;
99+
float x = position * kXrange;
108100

109-
/* Place our calculated x value in the model's input tensor */
110-
input->data.f[0] = x_val;
101+
/* Quantize the input from floating-point to integer */
102+
int8_t x_quantized = x / input->params.scale + input->params.zero_point;
103+
/* Place the quantized input in the model's input tensor */
104+
input->data.int8[0] = x_quantized;
111105

112106
/* Run inference, and report any error */
113107
TfLiteStatus invoke_status = interpreter->Invoke();
114108
if (invoke_status != kTfLiteOk) {
115-
TF_LITE_REPORT_ERROR(error_reporter, "Invoke failed on x_val: %f\n",
116-
static_cast < double > (x_val));
109+
TF_LITE_REPORT_ERROR(error_reporter, "Invoke failed on x: %f\n",
110+
static_cast < double > (x));
117111
return;
118112
}
119113

120-
/* Read the predicted y value from the model's output tensor */
121-
float y_val = output->data.f[0];
114+
/* Obtain the quantized output from model's output tensor */
115+
int8_t y_quantized = output->data.int8[0];
116+
/* Dequantize the output from integer to floating-point */
117+
float y = (y_quantized - output->params.zero_point) * output->params.scale;
122118

123119
/* Output the results. A custom HandleOutput function can be implemented
124120
* for each supported hardware target.
125121
*/
126-
HandleOutput(error_reporter, x_val, y_val);
122+
HandleOutput(error_reporter, x, y);
127123

128124
/* Increment the inference_counter, and reset it if we have reached
129125
* the total number per cycle
130126
*/
131127
inference_count += 1;
132-
if (inference_count >= kInferencesPerCycle) {
133-
inference_count = 0;
134-
}
128+
if (inference_count >= kInferencesPerCycle) inference_count = 0;
135129
}

0 commit comments

Comments
 (0)