Skip to content

Commit 1f0bd2e

Browse files
authored
Sync from tflite-micro. (tensorflow#176)
1 parent 33795b6 commit 1f0bd2e

15 files changed

+89
-88
lines changed

src/tensorflow/lite/c/builtin_op_data.h

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
1+
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
22
33
Licensed under the Apache License, Version 2.0 (the "License");
44
you may not use this file except in compliance with the License.
@@ -15,8 +15,6 @@ limitations under the License.
1515
#ifndef TENSORFLOW_LITE_C_BUILTIN_OP_DATA_H_
1616
#define TENSORFLOW_LITE_C_BUILTIN_OP_DATA_H_
1717

18-
/// For documentation, see
19-
/// third_party/tensorflow/lite/core/c/builtin_op_data.h.
2018
#include "tensorflow/lite/core/c/builtin_op_data.h"
2119

2220
#endif // TENSORFLOW_LITE_C_BUILTIN_OP_DATA_H_

src/tensorflow/lite/c/c_api_types.h

Lines changed: 1 addition & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
1+
/* Copyright 2022 The TensorFlow Authors. All Rights Reserved.
22
33
Licensed under the Apache License, Version 2.0 (the "License");
44
you may not use this file except in compliance with the License.
@@ -12,15 +12,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1212
See the License for the specific language governing permissions and
1313
limitations under the License.
1414
==============================================================================*/
15-
16-
// This file declares types used by the pure C inference API defined in c_api.h,
17-
// some of which are also used in the C++ and C kernel and interpreter APIs.
18-
1915
#ifndef TENSORFLOW_LITE_C_C_API_TYPES_H_
2016
#define TENSORFLOW_LITE_C_C_API_TYPES_H_
2117

22-
/// For documentation, see
23-
/// third_party/tensorflow/lite/core/c/c_api_types.h.
2418
#include "tensorflow/lite/core/c/c_api_types.h"
2519

2620
#endif // TENSORFLOW_LITE_C_C_API_TYPES_H_

src/tensorflow/lite/c/common.h

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -36,8 +36,12 @@ limitations under the License.
3636
#ifndef TENSORFLOW_LITE_C_COMMON_H_
3737
#define TENSORFLOW_LITE_C_COMMON_H_
3838

39-
/// For documentation, see
40-
/// third_party/tensorflow/lite/core/c/common.h.
4139
#include "tensorflow/lite/core/c/common.h"
4240

41+
// TfLiteOpaqueDelegate: allows delegation of nodes to alternative backends.
42+
// TfLiteOpaqueDelegate is an abstract type that is intended to have the same
43+
// role as TfLiteDelegate, but without necessarily exposing the implementation
44+
// details of how delegates are implemented.
45+
typedef TfLiteDelegate TfLiteOpaqueDelegate;
46+
4347
#endif // TENSORFLOW_LITE_C_COMMON_H_

src/tensorflow/lite/micro/kernels/activations_common.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -55,8 +55,8 @@ void CalculateReluOpData(const TfLiteTensor* input, TfLiteTensor* output,
5555
ReluOpData* data) {
5656
float act_min = 0.0;
5757
float act_max = std::numeric_limits<float>::infinity();
58-
double real_multiplier = static_cast<double>(input->params.scale) /
59-
static_cast<double>(output->params.scale);
58+
double real_multiplier =
59+
static_cast<double>(input->params.scale / output->params.scale);
6060

6161
const RuntimeShape input_shape = GetTensorShape(input);
6262
const RuntimeShape output_shape = GetTensorShape(output);

src/tensorflow/lite/micro/kernels/cmsis_nn/conv.cpp

Lines changed: 6 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,6 @@ limitations under the License.
2020
#include "tensorflow/lite/c/builtin_op_data.h"
2121
#include "tensorflow/lite/c/common.h"
2222
#include "tensorflow/lite/kernels/internal/common.h"
23-
#include "tensorflow/lite/kernels/internal/portable_tensor_utils.h"
2423
#include "tensorflow/lite/kernels/internal/quantization_util.h"
2524
#include "tensorflow/lite/kernels/internal/reference/conv.h"
2625
#include "tensorflow/lite/kernels/internal/reference/integer_ops/conv.h"
@@ -366,9 +365,11 @@ TfLiteStatus EvalInt8(TfLiteContext* context, TfLiteNode* node) {
366365
*(reinterpret_cast<TfLiteConvParams*>(node->builtin_data));
367366
TFLITE_DCHECK(node->user_data != nullptr);
368367
const OpData& data = *(static_cast<const OpData*>(node->user_data));
368+
TfLiteEvalTensor filter_int8 = tflite::micro::MakeUnpackedInt4Tensor(
369+
context, data.reference_op_data.filter_buffer_index, filter);
369370

370-
return EvalQuantizedPerChannel(context, node, params, data, input, filter,
371-
bias, output);
371+
return EvalQuantizedPerChannel(context, node, params, data, input,
372+
&filter_int8, bias, output);
372373
}
373374

374375
TfLiteStatus EvalInt16x8(TfLiteContext* context, TfLiteNode* node) {
@@ -419,22 +420,8 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
419420
(input->type == kTfLiteInt8 && filter->type == kTfLiteInt4),
420421
"Hybrid models are not supported on TFLite Micro.");
421422

422-
TfLiteEvalTensor filter_int8;
423-
424-
if (filter->type == kTfLiteInt4) {
425-
filter_int8.data.data = static_cast<int8_t*>(context->GetScratchBuffer(
426-
context, data.reference_op_data.filter_buffer_index));
427-
428-
filter_int8.dims = filter->dims;
429-
filter_int8.type = kTfLiteInt8;
430-
tflite::tensor_utils::UnpackDenseInt4IntoInt8(
431-
tflite::micro::GetTensorData<int8_t>(filter),
432-
tflite::micro::GetTensorShape(filter).FlatSize(),
433-
tflite::micro::GetTensorData<int8_t>(&filter_int8));
434-
435-
} else {
436-
filter_int8 = *filter;
437-
}
423+
TfLiteEvalTensor filter_int8 = tflite::micro::MakeUnpackedInt4Tensor(
424+
context, data.reference_op_data.filter_buffer_index, filter);
438425

439426
switch (input->type) { // Already know in/out types are same.
440427
case kTfLiteFloat32: {

src/tensorflow/lite/micro/kernels/cmsis_nn/depthwise_conv.cpp

Lines changed: 7 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,6 @@ limitations under the License.
1919
#include "tensorflow/lite/c/builtin_op_data.h"
2020
#include "tensorflow/lite/c/common.h"
2121
#include "tensorflow/lite/kernels/internal/common.h"
22-
#include "tensorflow/lite/kernels/internal/portable_tensor_utils.h"
2322
#include "tensorflow/lite/kernels/internal/quantization_util.h"
2423
#include "tensorflow/lite/kernels/internal/reference/depthwiseconv_float.h"
2524
#include "tensorflow/lite/kernels/internal/reference/integer_ops/depthwise_conv.h"
@@ -337,20 +336,9 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
337336
(NumInputs(node) == 3)
338337
? tflite::micro::GetEvalInput(context, node, kDepthwiseConvBiasTensor)
339338
: nullptr;
340-
TfLiteEvalTensor filter_int8;
341339

342-
if (filter->type == kTfLiteInt4) {
343-
filter_int8.data.data = static_cast<int8_t*>(context->GetScratchBuffer(
344-
context, data.reference_op_data.filter_buffer_index));
345-
filter_int8.dims = filter->dims;
346-
filter_int8.type = kTfLiteInt8;
347-
tflite::tensor_utils::UnpackDenseInt4IntoInt8(
348-
tflite::micro::GetTensorData<int8_t>(filter),
349-
tflite::micro::GetTensorShape(filter).FlatSize(),
350-
tflite::micro::GetTensorData<int8_t>(&filter_int8));
351-
} else {
352-
filter_int8 = *filter;
353-
}
340+
TfLiteEvalTensor filter_int8 = tflite::micro::MakeUnpackedInt4Tensor(
341+
context, data.reference_op_data.filter_buffer_index, filter);
354342

355343
switch (input->type) { // Already know in/out types are same.
356344
case kTfLiteFloat32: {
@@ -411,8 +399,11 @@ TfLiteStatus EvalInt8(TfLiteContext* context, TfLiteNode* node) {
411399
? tflite::micro::GetEvalInput(context, node, kDepthwiseConvBiasTensor)
412400
: nullptr;
413401

414-
EvalQuantizedPerChannel(context, node, params, data, input, filter, bias,
415-
output);
402+
TfLiteEvalTensor filter_int8 = tflite::micro::MakeUnpackedInt4Tensor(
403+
context, data.reference_op_data.filter_buffer_index, filter);
404+
405+
EvalQuantizedPerChannel(context, node, params, data, input, &filter_int8,
406+
bias, output);
416407
return kTfLiteOk;
417408
}
418409

src/tensorflow/lite/micro/kernels/cmsis_nn/fully_connected.cpp

Lines changed: 8 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -319,22 +319,9 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
319319
TFLITE_DCHECK(node->user_data != nullptr);
320320
const OpData& data = *(static_cast<const OpData*>(node->user_data));
321321

322-
TfLiteEvalTensor filter_int8;
322+
TfLiteEvalTensor filter_int8 = tflite::micro::MakeUnpackedInt4Tensor(
323+
context, data.reference_op_data.filter_buffer_index, filter);
323324

324-
if (filter->type == kTfLiteInt4) {
325-
filter_int8.data.data = static_cast<int8_t*>(context->GetScratchBuffer(
326-
context, data.reference_op_data.filter_buffer_index));
327-
328-
filter_int8.dims = filter->dims;
329-
filter_int8.type = kTfLiteInt8;
330-
tflite::tensor_utils::UnpackDenseInt4IntoInt8(
331-
tflite::micro::GetTensorData<int8_t>(filter),
332-
tflite::micro::GetTensorShape(filter).FlatSize(),
333-
tflite::micro::GetTensorData<int8_t>(&filter_int8));
334-
335-
} else {
336-
filter_int8 = *filter;
337-
}
338325
// Checks in Prepare ensure input, output and filter types are all the same.
339326
switch (input->type) {
340327
case kTfLiteFloat32: {
@@ -352,8 +339,7 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
352339
break;
353340
}
354341
case kTfLiteInt8: {
355-
switch (filter->type) {
356-
case kTfLiteInt4:
342+
switch (filter_int8.type) {
357343
case kTfLiteInt8:
358344
return EvalQuantizedInt8(context, node, data, input, &filter_int8,
359345
bias, output);
@@ -403,7 +389,11 @@ TfLiteStatus EvalInt8(TfLiteContext* context, TfLiteNode* node) {
403389
return kTfLiteError;
404390
}
405391

406-
return EvalQuantizedInt8(context, node, data, input, filter, bias, output);
392+
TfLiteEvalTensor filter_int8 = tflite::micro::MakeUnpackedInt4Tensor(
393+
context, data.reference_op_data.filter_buffer_index, filter);
394+
395+
return EvalQuantizedInt8(context, node, data, input, &filter_int8, bias,
396+
output);
407397
}
408398

409399
TfLiteStatus EvalInt16(TfLiteContext* context, TfLiteNode* node) {

src/tensorflow/lite/micro/kernels/fully_connected.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -73,7 +73,7 @@ TfLiteStatus CalculateOpDataFullyConnected(
7373
// (reference or optimized) must define this function.
7474
TfLiteRegistration Register_FULLY_CONNECTED();
7575

76-
#if defined(ARDUINO) || defined(HEXAGON)
76+
#if defined(ARDUINO) || defined(HEXAGON) || defined(XTENSA)
7777
// Returns a TfLiteRegistration struct for kernel variant that only supports
7878
// int8.
7979
TfLiteRegistration Register_FULLY_CONNECTED_INT8();

src/tensorflow/lite/micro/kernels/fully_connected_common.cpp

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -64,12 +64,13 @@ TfLiteStatus CalculateOpDataFullyConnected(
6464
QuantizeMultiplier(real_multiplier, &data->output_multiplier,
6565
&data->output_shift);
6666

67-
data->input_zero_point = input->params.zero_point;
6867
// Filter weights will always be symmetric quantized since we only support
6968
// int8 quantization. See
7069
// https://github.com/tensorflow/tensorflow/issues/44912 for additional
7170
// context.
7271
TFLITE_DCHECK(filter->params.zero_point == 0);
72+
73+
data->input_zero_point = input->params.zero_point;
7374
data->filter_zero_point = filter->params.zero_point;
7475
data->output_zero_point = output->params.zero_point;
7576

src/tensorflow/lite/micro/kernels/kernel_util.cpp

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@ limitations under the License.
1616
#include "tensorflow/lite/micro/kernels/kernel_util.h"
1717

1818
#include "tensorflow/lite/c/common.h"
19+
#include "tensorflow/lite/kernels/internal/portable_tensor_utils.h"
1920
#include "tensorflow/lite/micro/memory_helpers.h"
2021
#include "tensorflow/lite/micro/micro_log.h"
2122

@@ -256,5 +257,24 @@ TfLiteStatus CopySubgraphOutputsToOpOutputs(TfLiteContext* context,
256257
return kTfLiteOk;
257258
}
258259

260+
TfLiteEvalTensor MakeUnpackedInt4Tensor(TfLiteContext* context,
261+
int scratch_buffer_index,
262+
const TfLiteEvalTensor* tensor) {
263+
if (tensor->type != kTfLiteInt4) {
264+
return *tensor;
265+
}
266+
267+
TfLiteEvalTensor new_tensor;
268+
new_tensor.data.data = static_cast<int8_t*>(
269+
context->GetScratchBuffer(context, scratch_buffer_index));
270+
new_tensor.dims = tensor->dims;
271+
new_tensor.type = kTfLiteInt8;
272+
tflite::tensor_utils::UnpackDenseInt4IntoInt8(
273+
tflite::micro::GetTensorData<int8_t>(tensor),
274+
tflite::micro::GetTensorShape(tensor).FlatSize(),
275+
tflite::micro::GetTensorData<int8_t>(&new_tensor));
276+
return new_tensor;
277+
}
278+
259279
} // namespace micro
260280
} // namespace tflite

0 commit comments

Comments
 (0)