Skip to content

Commit a0cb185

Browse files
authored
Automated sync from github.com/tensorflow/tensorflow (#3146)
BUG=automated sync from upstream NO_CHECK_TFLITE_FILES=automated sync from upstream
1 parent a8d13ff commit a0cb185

File tree

6 files changed

+44
-4
lines changed

6 files changed

+44
-4
lines changed

tensorflow/lite/core/c/common.cc

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -649,4 +649,8 @@ TfLiteRunStep TfLiteTensorGetShapeKnownStep(const TfLiteTensor* t) {
649649
return kTfLiteRunStepUnknown;
650650
}
651651

652+
// Returns a sentinel value to be used as the user_data field of a TfLiteNode
653+
// when the kernel initialization fails.
654+
void* TfLiteKernelInitFailed() { return reinterpret_cast<void*>(-1); }
655+
652656
} // extern "C"

tensorflow/lite/core/c/common.h

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1161,6 +1161,11 @@ typedef struct TfLiteRegistration {
11611161
/// NOTE: if the data is already in the desired format, simply implement this
11621162
/// function to return `nullptr` and implement the free function to be a
11631163
/// no-op.
1164+
///
1165+
/// NOTE: For a Delegate kernel, returns `TfLiteKernelInitFailed()` if it
1166+
/// fails on the initialization. This eventually causes user's API call to
1167+
/// InterpreterBuilder::operator() or Interpreter::ModifyGraphWithDelegate()
1168+
/// to return an error.
11641169
void* (*init)(TfLiteContext* context, const char* buffer, size_t length);
11651170

11661171
/// The pointer `buffer` is the data previously returned by an init
@@ -1499,6 +1504,10 @@ TfLiteRunStep TfLiteTensorGetDataKnownStep(const TfLiteTensor* t);
14991504
/// operations.
15001505
TfLiteRunStep TfLiteTensorGetShapeKnownStep(const TfLiteTensor* t);
15011506

1507+
/// Returns a sentinel value to be used as the user_data field of a TfLiteNode
1508+
/// when the kernel initialization fails.
1509+
void* TfLiteKernelInitFailed();
1510+
15021511
/** @} */
15031512
// Ends `\addtogroup`, it's important for the doc generator that this doesn't
15041513
// include the CC code below.

tensorflow/lite/kernels/internal/portable_tensor_utils.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -170,6 +170,7 @@ inline void BatchQuantizeFloats(const float* float_data_ptr, int n_batch,
170170
tensor_utils::SymmetricQuantizeFloats(
171171
float_data_ptr + offset, n_data, quantized_data_ptr + offset,
172172
&unused_min, &unused_max, &scaling_factors[b]);
173+
if (zero_points) zero_points[b] = 0;
173174
}
174175
}
175176
}

tensorflow/lite/kernels/internal/reference/batch_matmul.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@ limitations under the License.
2121
#include "tensorflow/lite/kernels/internal/common.h"
2222
#include "tensorflow/lite/kernels/internal/compatibility.h"
2323
#include "tensorflow/lite/kernels/internal/portable_tensor_utils.h"
24+
#include "tensorflow/lite/kernels/internal/runtime_shape.h"
2425
#include "tensorflow/lite/kernels/internal/types.h"
2526

2627
namespace tflite {

tensorflow/lite/kernels/internal/reference/div.h

Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -99,6 +99,18 @@ inline void Div(const ArithmeticParams& params,
9999
DivElementwise(flat_size, params, input1_data, input2_data, output_data);
100100
}
101101

102+
inline void Div(const ArithmeticParams& params,
103+
const RuntimeShape& input1_shape, const int16_t* input1_data,
104+
const RuntimeShape& input2_shape, const int16_t* input2_data,
105+
const RuntimeShape& output_shape, int16_t* output_data) {
106+
TFLITE_DCHECK_LE(params.quantized_activation_min,
107+
params.quantized_activation_max);
108+
const int flat_size =
109+
MatchingElementsSize(input1_shape, input2_shape, output_shape);
110+
111+
DivElementwise(flat_size, params, input1_data, input2_data, output_data);
112+
}
113+
102114
template <typename T, int N = 5>
103115
inline void BroadcastDivSlowQuantized(
104116
const ArithmeticParams& params, const RuntimeShape& unextended_input1_shape,
@@ -177,6 +189,19 @@ inline void BroadcastDivSlow(const ArithmeticParams& params,
177189
input2_data, unextended_output_shape, output_data);
178190
}
179191

192+
template <int N = 5>
193+
inline void BroadcastDivSlow(const ArithmeticParams& params,
194+
const RuntimeShape& unextended_input1_shape,
195+
const int16_t* input1_data,
196+
const RuntimeShape& unextended_input2_shape,
197+
const int16_t* input2_data,
198+
const RuntimeShape& unextended_output_shape,
199+
int16_t* output_data) {
200+
BroadcastDivSlowQuantized<int16_t, N>(
201+
params, unextended_input1_shape, input1_data, unextended_input2_shape,
202+
input2_data, unextended_output_shape, output_data);
203+
}
204+
180205
// TODO(jiawen): We can implement BroadcastDiv on buffers of arbitrary
181206
// dimensionality if the runtime code does a single loop over one dimension
182207
// that handles broadcasting as the base case. The code generator would then

tensorflow/lite/kernels/internal/reference/prelu.h

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -26,10 +26,10 @@ namespace tflite {
2626
namespace reference_ops {
2727

2828
// Broadcast prelu to output_shape for quantized uint8_t/int8_t data.
29-
template <typename T>
29+
template <typename T, typename U>
3030
inline void BroadcastPrelu4DSlow(
3131
const PreluParams& params, const RuntimeShape& input_shape,
32-
const T* input_data, const RuntimeShape& alpha_shape, const T* alpha_data,
32+
const T* input_data, const RuntimeShape& alpha_shape, const U* alpha_data,
3333
const RuntimeShape& output_shape, T* output_data) {
3434
TFLITE_DCHECK_LE(input_shape.DimensionsCount(), 4);
3535
TFLITE_DCHECK_LE(alpha_shape.DimensionsCount(), 4);
@@ -74,10 +74,10 @@ inline void BroadcastPrelu4DSlow(
7474
}
7575
}
7676

77-
template <typename T>
77+
template <typename T, typename U>
7878
inline void Prelu(const PreluParams& params, const RuntimeShape& input_shape,
7979
const T* input_data, const RuntimeShape& alpha_shape,
80-
const T* alpha_data, const RuntimeShape& output_shape,
80+
const U* alpha_data, const RuntimeShape& output_shape,
8181
T* output_data) {
8282
const int32_t quantized_min = std::numeric_limits<T>::min();
8383
const int32_t quantized_max = std::numeric_limits<T>::max();

0 commit comments

Comments
 (0)