diff --git a/FeatureRequest.md b/FeatureRequest.md
index 1a09d17d..58b2acce 100644
--- a/FeatureRequest.md
+++ b/FeatureRequest.md
@@ -11,6 +11,20 @@
____
+
+
+#### #15 Calibration for FishEye image
+
+* https://github.com/jomjol/AI-on-the-edge-device/issues/507
+
+1. The development of such a correction algorithm with the libraries, that are available for the ESP32 environment.
+2. New module for integration of the flow into the image processing flow.
+3. Extension of the configuration (config.ini) and html-pages
+4. Parameter adjustment and testing for every different fish-eye module
+5. Maintenance for further updates / modules, ...
+
+
+
#### #14 Backup and restore option for configuration
* https://github.com/jomjol/AI-on-the-edge-device/issues/459
diff --git a/README.md b/README.md
index 351573f2..4467534a 100644
--- a/README.md
+++ b/README.md
@@ -52,6 +52,15 @@ In other cases you can contact the developer via email:
#include
#include
+
+#ifdef __cplusplus
+extern "C" {
+#endif
#include
+#ifdef __cplusplus
+}
+#endif
#include "esp_err.h"
#include "esp_log.h"
diff --git a/code/components/jomjol_fileserver_ota/server_help.cpp b/code/components/jomjol_fileserver_ota/server_help.cpp
index 25ffbcc4..4bb06ffa 100644
--- a/code/components/jomjol_fileserver_ota/server_help.cpp
+++ b/code/components/jomjol_fileserver_ota/server_help.cpp
@@ -5,7 +5,14 @@
#include
#include
#include
+
+#ifdef __cplusplus
+extern "C" {
+#endif
#include
+#ifdef __cplusplus
+}
+#endif
#include "esp_err.h"
#include "esp_log.h"
diff --git a/code/components/jomjol_flowcontroll/ClassFlowControll.cpp b/code/components/jomjol_flowcontroll/ClassFlowControll.cpp
index ce1b133c..c76a7ec2 100644
--- a/code/components/jomjol_flowcontroll/ClassFlowControll.cpp
+++ b/code/components/jomjol_flowcontroll/ClassFlowControll.cpp
@@ -6,7 +6,15 @@
#include "freertos/task.h"
#include
+
+#ifdef __cplusplus
+extern "C" {
+#endif
#include
+#ifdef __cplusplus
+}
+#endif
+
#include "ClassLogFile.h"
#include "time_sntp.h"
#include "Helper.h"
diff --git a/code/components/jomjol_flowcontroll/ClassFlowImage.cpp b/code/components/jomjol_flowcontroll/ClassFlowImage.cpp
index 1473fe6c..c50b8cda 100644
--- a/code/components/jomjol_flowcontroll/ClassFlowImage.cpp
+++ b/code/components/jomjol_flowcontroll/ClassFlowImage.cpp
@@ -2,7 +2,15 @@
#include
#include
#include
+
+#ifdef __cplusplus
+extern "C" {
+#endif
#include
+#ifdef __cplusplus
+}
+#endif
+
#include "time_sntp.h"
#include "ClassLogFile.h"
#include "CImageBasis.h"
diff --git a/code/components/jomjol_helper/CMakeLists.txt b/code/components/jomjol_helper/CMakeLists.txt
index 948e3829..c57b54d3 100644
--- a/code/components/jomjol_helper/CMakeLists.txt
+++ b/code/components/jomjol_helper/CMakeLists.txt
@@ -2,6 +2,6 @@ FILE(GLOB_RECURSE app_sources ${CMAKE_CURRENT_SOURCE_DIR}/*.*)
idf_component_register(SRCS ${app_sources}
INCLUDE_DIRS "."
- REQUIRES tfmicro jomjol_logfile)
+ REQUIRES tflite-lib jomjol_logfile)
diff --git a/code/components/jomjol_helper/Helper.cpp b/code/components/jomjol_helper/Helper.cpp
index d8537b86..fe299f54 100644
--- a/code/components/jomjol_helper/Helper.cpp
+++ b/code/components/jomjol_helper/Helper.cpp
@@ -6,7 +6,15 @@
#include "Helper.h"
#include
#include
+
+#ifdef __cplusplus
+extern "C" {
+#endif
#include
+#ifdef __cplusplus
+}
+#endif
+
#include
#include
diff --git a/code/components/jomjol_logfile/ClassLogFile.cpp b/code/components/jomjol_logfile/ClassLogFile.cpp
index a026d985..04a4df4d 100644
--- a/code/components/jomjol_logfile/ClassLogFile.cpp
+++ b/code/components/jomjol_logfile/ClassLogFile.cpp
@@ -3,7 +3,15 @@
#include
#include
#include
+
+#ifdef __cplusplus
+extern "C" {
+#endif
#include
+#ifdef __cplusplus
+}
+#endif
+
#include "Helper.h"
static const char *TAG = "log";
diff --git a/code/components/jomjol_mqtt/CMakeLists.txt b/code/components/jomjol_mqtt/CMakeLists.txt
index 432727eb..f89faf81 100644
--- a/code/components/jomjol_mqtt/CMakeLists.txt
+++ b/code/components/jomjol_mqtt/CMakeLists.txt
@@ -2,6 +2,6 @@ FILE(GLOB_RECURSE app_sources ${CMAKE_CURRENT_SOURCE_DIR}/*.*)
idf_component_register(SRCS ${app_sources}
INCLUDE_DIRS "."
- REQUIRES tfmicro mqtt jomjol_logfile)
+ REQUIRES tflite-lib mqtt jomjol_logfile)
diff --git a/code/components/jomjol_time_sntp/CMakeLists.txt b/code/components/jomjol_time_sntp/CMakeLists.txt
index 948e3829..c57b54d3 100644
--- a/code/components/jomjol_time_sntp/CMakeLists.txt
+++ b/code/components/jomjol_time_sntp/CMakeLists.txt
@@ -2,6 +2,6 @@ FILE(GLOB_RECURSE app_sources ${CMAKE_CURRENT_SOURCE_DIR}/*.*)
idf_component_register(SRCS ${app_sources}
INCLUDE_DIRS "."
- REQUIRES tfmicro jomjol_logfile)
+ REQUIRES tflite-lib jomjol_logfile)
diff --git a/code/components/tflite-lib/CMakeLists.txt b/code/components/tflite-lib/CMakeLists.txt
new file mode 100644
index 00000000..fab7027a
--- /dev/null
+++ b/code/components/tflite-lib/CMakeLists.txt
@@ -0,0 +1,50 @@
+cmake_minimum_required(VERSION 3.5)
+
+set(tflite_dir "${CMAKE_CURRENT_SOURCE_DIR}/tensorflow/lite")
+set(tfmicro_dir "${tflite_dir}/micro")
+set(tfmicro_frontend_dir "${tflite_dir}/experimental/microfrontend/lib")
+set(tfmicro_kernels_dir "${tfmicro_dir}/kernels")
+
+file(GLOB srcs_micro
+ "${tfmicro_dir}/*.cc"
+ "${tfmicro_dir}/*.c")
+
+file(GLOB src_micro_frontend
+ "${tfmicro_frontend_dir}/*.c"
+ "${tfmicro_frontend_dir}/*.cc")
+file(GLOB srcs_kernels
+ "${tfmicro_kernels_dir}/*.c"
+ "${tfmicro_kernels_dir}/*.cc")
+
+set(lib_srcs
+ "${srcs_micro}"
+ "${srcs_kernels}"
+ "${src_micro_frontend}"
+ "${tflite_dir}/kernels/kernel_util.cc"
+ "${tflite_dir}/micro/memory_planner/greedy_memory_planner.cc"
+ "${tflite_dir}/micro/memory_planner/linear_memory_planner.cc"
+ "${tflite_dir}/c/common.c"
+ "${tflite_dir}/core/api/error_reporter.cc"
+ "${tflite_dir}/core/api/flatbuffer_conversions.cc"
+ "${tflite_dir}/core/api/op_resolver.cc"
+ "${tflite_dir}/core/api/tensor_utils.cc"
+ "${tflite_dir}/kernels/internal/quantization_util.cc"
+ "${tflite_dir}/schema/schema_utils.cc")
+
+idf_component_register(
+ SRCS "${lib_srcs}"
+ INCLUDE_DIRS "." "third_party/gemmlowp"
+ "third_party/flatbuffers/include"
+ "third_party/ruy"
+ "third_party/kissfft")
+
+# Reduce the level of paranoia to be able to compile TF sources
+target_compile_options(${COMPONENT_LIB} PRIVATE
+ -Wno-maybe-uninitialized
+ -Wno-missing-field-initializers
+ -Wno-type-limits)
+
+target_compile_options(${COMPONENT_LIB} PRIVATE -fno-unwind-tables -ffunction-sections -fdata-sections -fmessage-length=0 -DTF_LITE_STATIC_MEMORY -DTF_LITE_DISABLE_X86_NEON -O3 -Wsign-compare -Wdouble-promotion -Wshadow -Wunused-variable -Wmissing-field-initializers -Wunused-function -Wswitch -Wvla -Wall -Wextra -Wstrict-aliasing -Wno-unused-parameter -DESP -DESP_NN -Wno-nonnull -Wno-nonnull -Wno-nonnull)
+target_compile_options(${COMPONENT_LIB} PRIVATE $<$: -std=c++11 -fno-rtti -fno-exceptions -fno-threadsafe-statics -fno-unwind-tables -ffunction-sections -fdata-sections -fmessage-length=0 -DTF_LITE_STATIC_MEMORY -DTF_LITE_DISABLE_X86_NEON -O3 -Werror -Wsign-compare -Wdouble-promotion -Wshadow -Wunused-variable -Wmissing-field-initializers -Wunused-function -Wswitch -Wvla -Wall -Wextra -Wstrict-aliasing -Wno-unused-parameter -DESP -DESP_NN -Wno-return-type -Wno-strict-aliasing -std=gnu++14 -Wno-return-type -Wno-strict-aliasing -std=gnu++14 -Wno-return-type -Wno-strict-aliasing -std=gnu++14 >)
+target_compile_options(${COMPONENT_LIB} INTERFACE $<$>:-DTF_LITE_STATIC_MEMORY>)
+target_link_libraries(${COMPONENT_LIB} PRIVATE -lm)
diff --git a/code/components/tfmicro/tensorflow/lite/c/builtin_op_data.h b/code/components/tflite-lib/tensorflow/lite/c/builtin_op_data.h
similarity index 97%
rename from code/components/tfmicro/tensorflow/lite/c/builtin_op_data.h
rename to code/components/tflite-lib/tensorflow/lite/c/builtin_op_data.h
index ed5ac004..7f160972 100644
--- a/code/components/tfmicro/tensorflow/lite/c/builtin_op_data.h
+++ b/code/components/tflite-lib/tensorflow/lite/c/builtin_op_data.h
@@ -502,6 +502,22 @@ typedef struct {
const char* shared_name;
} TfLiteVarHandleParams;
+typedef struct {
+ int seed;
+ int seed2;
+} TfLiteRandomParams;
+
+typedef struct {
+ int num_boundaries;
+ // This points to the memory stored in the model (flatbuffer),
+ // and is not owned.
+ const float* boundaries;
+} TfLiteBucketizeParams;
+
+typedef struct {
+ bool approximate;
+} TfLiteGeluParams;
+
#ifdef __cplusplus
} // extern "C"
#endif // __cplusplus
diff --git a/code/components/tfmicro/tensorflow/lite/c/c_api_types.h b/code/components/tflite-lib/tensorflow/lite/c/c_api_types.h
similarity index 85%
rename from code/components/tfmicro/tensorflow/lite/c/c_api_types.h
rename to code/components/tflite-lib/tensorflow/lite/c/c_api_types.h
index e9cd4c77..678dfae6 100644
--- a/code/components/tfmicro/tensorflow/lite/c/c_api_types.h
+++ b/code/components/tflite-lib/tensorflow/lite/c/c_api_types.h
@@ -43,6 +43,9 @@ extern "C" {
#endif // _WIN32
#endif // SWIG
+// Note that new error status values may be added in future in order to
+// indicate more fine-grained internal states, therefore, applications should
+// not rely on status values being members of the enum.
typedef enum TfLiteStatus {
kTfLiteOk = 0,
@@ -54,7 +57,7 @@ typedef enum TfLiteStatus {
// Generally referring to an error in applying a delegate due to
// incompatibility between runtime and delegate, e.g., this error is returned
- // when trying to apply a TfLite delegate onto a model graph that's already
+ // when trying to apply a TF Lite delegate onto a model graph that's already
// immutable.
kTfLiteApplicationError = 3,
@@ -68,7 +71,12 @@ typedef enum TfLiteStatus {
// Generally referring to data-reading issues in delegate serialization.
// See tflite::delegates::Serialization.
- kTfLiteDelegateDataReadError = 5,
+ kTfLiteDelegateDataReadError = 6,
+
+ // Generally referring to issues when the TF Lite model has ops that cannot be
+ // resolved at runtime. This could happen when the specific op is not
+ // registered or built with the TF Lite framework.
+ kTfLiteUnresolvedOps = 7,
} TfLiteStatus;
// Types supported by tensor
diff --git a/code/components/tfmicro/tensorflow/lite/c/common.c b/code/components/tflite-lib/tensorflow/lite/c/common.c
similarity index 85%
rename from code/components/tfmicro/tensorflow/lite/c/common.c
rename to code/components/tflite-lib/tensorflow/lite/c/common.c
index 5456a889..d149d22c 100644
--- a/code/components/tfmicro/tensorflow/lite/c/common.c
+++ b/code/components/tflite-lib/tensorflow/lite/c/common.c
@@ -21,9 +21,15 @@ limitations under the License.
#include
#endif // TF_LITE_STATIC_MEMORY
-int TfLiteIntArrayGetSizeInBytes(int size) {
+size_t TfLiteIntArrayGetSizeInBytes(int size) {
static TfLiteIntArray dummy;
- return sizeof(dummy) + sizeof(dummy.data[0]) * size;
+
+ size_t computed_size = sizeof(dummy) + sizeof(dummy.data[0]) * size;
+#if defined(_MSC_VER)
+ // Context for why this is needed is in http://b/189926408#comment21
+ computed_size -= sizeof(dummy.data[0]);
+#endif
+ return computed_size;
}
int TfLiteIntArrayEqual(const TfLiteIntArray* a, const TfLiteIntArray* b) {
@@ -45,7 +51,7 @@ int TfLiteIntArrayEqualsArray(const TfLiteIntArray* a, int b_size,
#ifndef TF_LITE_STATIC_MEMORY
TfLiteIntArray* TfLiteIntArrayCreate(int size) {
- int alloc_size = TfLiteIntArrayGetSizeInBytes(size);
+ size_t alloc_size = TfLiteIntArrayGetSizeInBytes(size);
if (alloc_size <= 0) return NULL;
TfLiteIntArray* ret = (TfLiteIntArray*)malloc(alloc_size);
if (!ret) return ret;
@@ -68,7 +74,13 @@ void TfLiteIntArrayFree(TfLiteIntArray* a) { free(a); }
int TfLiteFloatArrayGetSizeInBytes(int size) {
static TfLiteFloatArray dummy;
- return sizeof(dummy) + sizeof(dummy.data[0]) * size;
+
+ int computed_size = sizeof(dummy) + sizeof(dummy.data[0]) * size;
+#if defined(_MSC_VER)
+ // Context for why this is needed is in http://b/189926408#comment21
+ computed_size -= sizeof(dummy.data[0]);
+#endif
+ return computed_size;
}
#ifndef TF_LITE_STATIC_MEMORY
@@ -176,6 +188,26 @@ void TfLiteTensorReset(TfLiteType type, const char* name, TfLiteIntArray* dims,
tensor->quantization.params = NULL;
}
+TfLiteStatus TfLiteTensorCopy(const TfLiteTensor* src, TfLiteTensor* dst) {
+ if (!src || !dst)
+ return kTfLiteOk;
+ if (src->bytes != dst->bytes)
+ return kTfLiteError;
+ if (src == dst)
+ return kTfLiteOk;
+
+ dst->type = src->type;
+ if (dst->dims)
+ TfLiteIntArrayFree(dst->dims);
+ dst->dims = TfLiteIntArrayCopy(src->dims);
+ memcpy(dst->data.raw, src->data.raw, src->bytes);
+ dst->buffer_handle = src->buffer_handle;
+ dst->data_is_stale = src->data_is_stale;
+ dst->delegate = src->delegate;
+
+ return kTfLiteOk;
+}
+
void TfLiteTensorRealloc(size_t num_bytes, TfLiteTensor* tensor) {
if (tensor->allocation_type != kTfLiteDynamic &&
tensor->allocation_type != kTfLitePersistentRo) {
diff --git a/code/components/tfmicro/tensorflow/lite/c/common.h b/code/components/tflite-lib/tensorflow/lite/c/common.h
similarity index 95%
rename from code/components/tfmicro/tensorflow/lite/c/common.h
rename to code/components/tflite-lib/tensorflow/lite/c/common.h
index e8c89429..7056d1e2 100644
--- a/code/components/tfmicro/tensorflow/lite/c/common.h
+++ b/code/components/tflite-lib/tensorflow/lite/c/common.h
@@ -80,12 +80,16 @@ typedef struct TfLiteExternalContext {
// indices
typedef struct TfLiteIntArray {
int size;
-// gcc 6.1+ have a bug where flexible members aren't properly handled
-// https://github.com/google/re2/commit/b94b7cd42e9f02673cd748c1ac1d16db4052514c
-#if (!defined(__clang__) && defined(__GNUC__) && __GNUC__ == 6 && \
- __GNUC_MINOR__ >= 1) || \
- defined(HEXAGON) || \
+
+#if defined(_MSC_VER)
+ // Context for why this is needed is in http://b/189926408#comment21
+ int data[1];
+#elif (!defined(__clang__) && defined(__GNUC__) && __GNUC__ == 6 && \
+ __GNUC_MINOR__ >= 1) || \
+ defined(HEXAGON) || \
(defined(__clang__) && __clang_major__ == 7 && __clang_minor__ == 1)
+ // gcc 6.1+ have a bug where flexible members aren't properly handled
+ // https://github.com/google/re2/commit/b94b7cd42e9f02673cd748c1ac1d16db4052514c
int data[0];
#else
int data[];
@@ -94,7 +98,7 @@ typedef struct TfLiteIntArray {
// Given the size (number of elements) in a TfLiteIntArray, calculate its size
// in bytes.
-int TfLiteIntArrayGetSizeInBytes(int size);
+size_t TfLiteIntArrayGetSizeInBytes(int size);
#ifndef TF_LITE_STATIC_MEMORY
// Create a array of a given `size` (uninitialized entries).
@@ -121,11 +125,15 @@ void TfLiteIntArrayFree(TfLiteIntArray* a);
// Fixed size list of floats. Used for per-channel quantization.
typedef struct TfLiteFloatArray {
int size;
-// gcc 6.1+ have a bug where flexible members aren't properly handled
-// https://github.com/google/re2/commit/b94b7cd42e9f02673cd748c1ac1d16db4052514c
-// This also applies to the toolchain used for Qualcomm Hexagon DSPs.
-#if !defined(__clang__) && defined(__GNUC__) && __GNUC__ == 6 && \
- __GNUC_MINOR__ >= 1
+#if defined(_MSC_VER)
+ // Context for why this is needed is in http://b/189926408#comment21
+ float data[1];
+#elif (!defined(__clang__) && defined(__GNUC__) && __GNUC__ == 6 && \
+ __GNUC_MINOR__ >= 1) || \
+ defined(HEXAGON) || \
+ (defined(__clang__) && __clang_major__ == 7 && __clang_minor__ == 1)
+ // gcc 6.1+ have a bug where flexible members aren't properly handled
+ // https://github.com/google/re2/commit/b94b7cd42e9f02673cd748c1ac1d16db4052514c
float data[0];
#else
float data[];
@@ -562,6 +570,10 @@ typedef struct TfLiteNode {
// Outputs to this node expressed as indices into the simulator's tensors.
TfLiteIntArray* outputs;
+ // intermediate tensors to this node expressed as indices into the simulator's
+ // tensors.
+ TfLiteIntArray* intermediates;
+
// Opaque data provided by the node implementer through `Registration.init`.
void* user_data;
@@ -614,6 +626,16 @@ void TfLiteTensorReset(TfLiteType type, const char* name, TfLiteIntArray* dims,
const void* allocation, bool is_variable,
TfLiteTensor* tensor);
+// Copies the contents of 'src' in 'dst'.
+// Function does nothing if either 'src' or 'dst' is passed as nullptr and
+// return kTfLiteOk.
+// Returns kTfLiteError if 'src' and 'dst' doesn't have matching data size.
+// Note function copies contents, so it won't create new data pointer
+// or change allocation type.
+// All Tensor related properties will be copied from 'src' to 'dst' like
+// quantization, sparsity, ...
+TfLiteStatus TfLiteTensorCopy(const TfLiteTensor* src, TfLiteTensor* dst);
+
// Resize the allocated data of a (dynamic) tensor. Tensors with allocation
// types other than kTfLiteDynamic will be ignored.
void TfLiteTensorRealloc(size_t num_bytes, TfLiteTensor* tensor);
@@ -819,6 +841,9 @@ typedef struct TfLiteContext {
typedef struct TfLiteRegistration {
// Initializes the op from serialized data.
+ // Called only *once* for the lifetime of the op, so any one-time allocations
+ // should be made here (unless they depend on tensor sizes).
+ //
// If a built-in op:
// `buffer` is the op's params data (TfLiteLSTMParams*).
// `length` is zero.
@@ -841,6 +866,7 @@ typedef struct TfLiteRegistration {
// prepare is called when the inputs this node depends on have been resized.
// context->ResizeTensor() can be called to request output tensors to be
// resized.
+ // Can be called multiple times for the lifetime of the op.
//
// Returns kTfLiteOk on success.
TfLiteStatus (*prepare)(TfLiteContext* context, TfLiteNode* node);
diff --git a/code/components/tfmicro/tensorflow/lite/core/api/error_reporter.cc b/code/components/tflite-lib/tensorflow/lite/core/api/error_reporter.cc
similarity index 100%
rename from code/components/tfmicro/tensorflow/lite/core/api/error_reporter.cc
rename to code/components/tflite-lib/tensorflow/lite/core/api/error_reporter.cc
diff --git a/code/components/tfmicro/tensorflow/lite/core/api/error_reporter.h b/code/components/tflite-lib/tensorflow/lite/core/api/error_reporter.h
similarity index 100%
rename from code/components/tfmicro/tensorflow/lite/core/api/error_reporter.h
rename to code/components/tflite-lib/tensorflow/lite/core/api/error_reporter.h
diff --git a/code/components/tfmicro/tensorflow/lite/core/api/flatbuffer_conversions.cc b/code/components/tflite-lib/tensorflow/lite/core/api/flatbuffer_conversions.cc
similarity index 90%
rename from code/components/tfmicro/tensorflow/lite/core/api/flatbuffer_conversions.cc
rename to code/components/tflite-lib/tensorflow/lite/core/api/flatbuffer_conversions.cc
index 5eee4406..dfa0ccfd 100644
--- a/code/components/tfmicro/tensorflow/lite/core/api/flatbuffer_conversions.cc
+++ b/code/components/tflite-lib/tensorflow/lite/core/api/flatbuffer_conversions.cc
@@ -1,4 +1,4 @@
-/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+/* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -131,6 +131,17 @@ TfLitePadding ConvertPadding(Padding padding) {
return kTfLitePaddingUnknown;
}
+// Converts the flatbuffer mirror padding enum to what is used at runtime.
+TfLiteMirrorPaddingMode ConvertMirrorPadding(MirrorPadMode padding) {
+ switch (padding) {
+ case MirrorPadMode_REFLECT:
+ return kTfLiteMirrorPaddingReflect;
+ case MirrorPadMode_SYMMETRIC:
+ return kTfLiteMirrorPaddingSymmetric;
+ }
+ return kTfLiteMirrorPaddingUnknown;
+}
+
#ifndef TF_LITE_STATIC_MEMORY
TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type,
ErrorReporter* error_reporter,
@@ -181,6 +192,10 @@ TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type,
return ParseArgMin(op, error_reporter, allocator, builtin_data);
}
+ case BuiltinOperator_ASSIGN_VARIABLE: {
+ return ParseAssignVariable(op, error_reporter, allocator, builtin_data);
+ }
+
case BuiltinOperator_AVERAGE_POOL_2D: {
return ParsePool(op, error_reporter, allocator, builtin_data);
}
@@ -193,6 +208,10 @@ TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type,
return ParseBatchToSpaceNd(op, error_reporter, allocator, builtin_data);
}
+ case BuiltinOperator_CALL_ONCE: {
+ return ParseCallOnce(op, error_reporter, allocator, builtin_data);
+ }
+
case BuiltinOperator_CEIL: {
return ParseCeil(op, error_reporter, allocator, builtin_data);
}
@@ -325,6 +344,10 @@ TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type,
return ParsePool(op, error_reporter, allocator, builtin_data);
}
+ case BuiltinOperator_MIRROR_PAD: {
+ return ParseMirrorPad(op, error_reporter, allocator, builtin_data);
+ }
+
case BuiltinOperator_MEAN: {
return ParseReducer(op, error_reporter, allocator, builtin_data);
}
@@ -369,6 +392,10 @@ TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type,
return ParseQuantize(op, error_reporter, allocator, builtin_data);
}
+ case BuiltinOperator_READ_VARIABLE: {
+ return ParseReadVariable(op, error_reporter, allocator, builtin_data);
+ }
+
case BuiltinOperator_REDUCE_ANY: {
return ParseReducer(op, error_reporter, allocator, builtin_data);
}
@@ -486,6 +513,10 @@ TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type,
return ParseUnpack(op, error_reporter, allocator, builtin_data);
}
+ case BuiltinOperator_VAR_HANDLE: {
+ return ParseVarHandle(op, error_reporter, allocator, builtin_data);
+ }
+
case BuiltinOperator_ZEROS_LIKE: {
return ParseZerosLike(op, error_reporter, allocator, builtin_data);
}
@@ -606,21 +637,8 @@ TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type,
return kTfLiteOk;
}
case BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_LSTM: {
- auto params =
- safe_allocator.Allocate();
- TF_LITE_ENSURE(error_reporter, params != nullptr);
- if (const auto* seq_lstm_params =
- op->builtin_options_as_UnidirectionalSequenceLSTMOptions()) {
- params->activation =
- ConvertActivation(seq_lstm_params->fused_activation_function());
- params->cell_clip = seq_lstm_params->cell_clip();
- params->proj_clip = seq_lstm_params->proj_clip();
- params->time_major = seq_lstm_params->time_major();
- params->asymmetric_quantize_inputs =
- seq_lstm_params->asymmetric_quantize_inputs();
- }
- *builtin_data = params.release();
- return kTfLiteOk;
+ return ParseUnidirectionalSequenceLSTM(op, error_reporter, allocator,
+ builtin_data);
}
case BuiltinOperator_BIDIRECTIONAL_SEQUENCE_LSTM: {
auto params =
@@ -693,19 +711,6 @@ TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type,
*builtin_data = params.release();
return kTfLiteOk;
}
- case BuiltinOperator_MIRROR_PAD: {
- auto params = safe_allocator.Allocate();
- TF_LITE_ENSURE(error_reporter, params != nullptr);
- const auto* mirror_pad_params = op->builtin_options_as_MirrorPadOptions();
- if (mirror_pad_params != nullptr) {
- params->mode =
- mirror_pad_params->mode() == tflite::MirrorPadMode_REFLECT
- ? TfLiteMirrorPaddingMode::kTfLiteMirrorPaddingReflect
- : TfLiteMirrorPaddingMode::kTfLiteMirrorPaddingSymmetric;
- }
- *builtin_data = params.release();
- return kTfLiteOk;
- }
case BuiltinOperator_UNIQUE: {
auto params = safe_allocator.Allocate();
TF_LITE_ENSURE(error_reporter, params != nullptr);
@@ -750,16 +755,6 @@ TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type,
*builtin_data = params.release();
return kTfLiteOk;
}
- case BuiltinOperator_CALL_ONCE: {
- auto params = safe_allocator.Allocate();
- TF_LITE_ENSURE(error_reporter, params != nullptr);
- if (const auto* call_once_params =
- op->builtin_options_as_CallOnceOptions()) {
- params->init_subgraph_index = call_once_params->init_subgraph_index();
- }
- *builtin_data = params.release();
- return kTfLiteOk;
- }
case BuiltinOperator_CONV_3D:
case BuiltinOperator_CONV_3D_TRANSPOSE: {
auto params = safe_allocator.Allocate();
@@ -793,17 +788,69 @@ TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type,
*builtin_data = params.release();
return kTfLiteOk;
}
- case BuiltinOperator_VAR_HANDLE: {
- auto params = safe_allocator.Allocate();
+ case BuiltinOperator_MULTINOMIAL: {
+ auto params = safe_allocator.Allocate();
TF_LITE_ENSURE(error_reporter, params != nullptr);
- params->container = nullptr;
- params->shared_name = nullptr;
- if (const auto* var_handle_params =
- op->builtin_options_as_VarHandleOptions()) {
- if (var_handle_params->container())
- params->container = var_handle_params->container()->c_str();
- if (var_handle_params->shared_name())
- params->shared_name = var_handle_params->shared_name()->c_str();
+ if (const auto* multinomial_params =
+ op->builtin_options_as_RandomOptions()) {
+ params->seed = multinomial_params->seed();
+ params->seed2 = multinomial_params->seed2();
+ }
+ *builtin_data = params.release();
+ return kTfLiteOk;
+ }
+ case BuiltinOperator_RANDOM_STANDARD_NORMAL: {
+ auto params = safe_allocator.Allocate();
+ TF_LITE_ENSURE(error_reporter, params != nullptr);
+ if (const auto* random_std_normal_params =
+ op->builtin_options_as_RandomOptions()) {
+ params->seed = random_std_normal_params->seed();
+ params->seed2 = random_std_normal_params->seed2();
+ }
+ *builtin_data = params.release();
+ return kTfLiteOk;
+ }
+ case BuiltinOperator_BUCKETIZE: {
+ auto params = safe_allocator.Allocate();
+ TF_LITE_ENSURE(error_reporter, params != nullptr);
+ if (const auto* bucketize_params =
+ op->builtin_options_as_BucketizeOptions()) {
+ const flatbuffers::Vector* boundaries =
+ bucketize_params->boundaries();
+ if (boundaries == nullptr) {
+ TF_LITE_REPORT_ERROR(
+ error_reporter,
+ "boundaries array not provided for operation 'bucketize'.\n");
+ return kTfLiteError;
+ }
+ params->num_boundaries = boundaries->size();
+ if (boundaries->data() == nullptr) {
+ TF_LITE_REPORT_ERROR(error_reporter,
+ "boundaries.data() returned nullptr for "
+ "operation 'bucketize'.\n");
+ return kTfLiteError;
+ }
+ params->boundaries = boundaries->data();
+ }
+ *builtin_data = params.release();
+ return kTfLiteOk;
+ }
+ case BuiltinOperator_RANDOM_UNIFORM: {
+ auto params = safe_allocator.Allocate();
+ TF_LITE_ENSURE(error_reporter, params != nullptr);
+ if (const auto* random_uniform_params =
+ op->builtin_options_as_RandomOptions()) {
+ params->seed = random_uniform_params->seed();
+ params->seed2 = random_uniform_params->seed2();
+ }
+ *builtin_data = params.release();
+ return kTfLiteOk;
+ }
+ case BuiltinOperator_GELU: {
+ auto params = safe_allocator.Allocate();
+ TF_LITE_ENSURE(error_reporter, params != nullptr);
+ if (const auto* gelu_params = op->builtin_options_as_GeluOptions()) {
+ params->approximate = gelu_params->approximate();
}
*builtin_data = params.release();
return kTfLiteOk;
@@ -844,8 +891,6 @@ TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type,
case BuiltinOperator_HASHTABLE_FIND:
case BuiltinOperator_HASHTABLE_IMPORT:
case BuiltinOperator_HASHTABLE_SIZE:
- case BuiltinOperator_READ_VARIABLE:
- case BuiltinOperator_ASSIGN_VARIABLE:
case BuiltinOperator_BROADCAST_ARGS:
return kTfLiteOk;
case BuiltinOperator_PLACEHOLDER_FOR_GREATER_OP_CODES:
@@ -1003,6 +1048,14 @@ TfLiteStatus ParseArgMin(const Operator* op, ErrorReporter* error_reporter,
return kTfLiteOk;
}
+// We have this parse function instead of directly returning kTfLiteOk from the
+// switch-case in ParseOpData because this function is used as part of the
+// selective registration for the OpResolver implementation in micro.
+TfLiteStatus ParseAssignVariable(const Operator*, ErrorReporter*,
+ BuiltinDataAllocator*, void**) {
+ return kTfLiteOk;
+}
+
// We have this parse function instead of directly returning kTfLiteOk from the
// switch-case in ParseOpData because this function is used as part of the
// selective registration for the OpResolver implementation in micro.
@@ -1032,6 +1085,33 @@ TfLiteStatus ParseBatchToSpaceNd(const Operator*, ErrorReporter*,
return kTfLiteOk;
}
+TfLiteStatus ParseCallOnce(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator,
+ void** builtin_data) {
+ CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
+
+ SafeBuiltinDataAllocator safe_allocator(allocator);
+ std::unique_ptr
+ params = safe_allocator.Allocate();
+ TF_LITE_ENSURE(error_reporter, params != nullptr);
+
+ const CallOnceOptions* schema_params =
+ op->builtin_options_as_CallOnceOptions();
+
+ if (schema_params != nullptr) {
+ params->init_subgraph_index = schema_params->init_subgraph_index();
+
+ } else {
+ // TODO(b/157480169): We should either return kTfLiteError or fill in some
+ // reasonable defaults in the params struct. We are not doing so until we
+ // better undertand the ramifications of changing the legacy behavior.
+ }
+
+ *builtin_data = params.release();
+ return kTfLiteOk;
+}
+
// We have this parse function instead of directly returning kTfLiteOk from the
// switch-case in ParseOpData because this function is used as part of the
// selective registration for the OpResolver implementation in micro.
@@ -1541,6 +1621,32 @@ TfLiteStatus ParseMinimum(const Operator*, ErrorReporter*,
return kTfLiteOk;
}
+TfLiteStatus ParseMirrorPad(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator,
+ void** builtin_data) {
+ CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
+
+ SafeBuiltinDataAllocator safe_allocator(allocator);
+ std::unique_ptr
+ params = safe_allocator.Allocate();
+ TF_LITE_ENSURE(error_reporter, params != nullptr);
+
+ const MirrorPadOptions* schema_params =
+ op->builtin_options_as_MirrorPadOptions();
+
+ if (schema_params != nullptr) {
+ params->mode = ConvertMirrorPadding(schema_params->mode());
+ } else {
+ // TODO(b/157480169): We should either return kTfLiteError or fill in some
+ // reasonable defaults in the params struct. We are not doing so until we
+ // better undertand the ramifications of changing the legacy behavior.
+ }
+
+ *builtin_data = params.release();
+ return kTfLiteOk;
+}
+
TfLiteStatus ParseMul(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
@@ -1676,6 +1782,14 @@ TfLiteStatus ParseQuantize(const Operator*, ErrorReporter*,
return kTfLiteOk;
}
+// We have this parse function instead of directly returning kTfLiteOk from the
+// switch-case in ParseOpData because this function is used as part of the
+// selective registration for the OpResolver implementation in micro.
+TfLiteStatus ParseReadVariable(const Operator*, ErrorReporter*,
+ BuiltinDataAllocator*, void**) {
+ return kTfLiteOk;
+}
+
TfLiteStatus ParseReducer(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data) {
@@ -1856,6 +1970,14 @@ TfLiteStatus ParseSin(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
return kTfLiteOk;
}
+// We have this parse function instead of directly returning kTfLiteOk from the
+// switch-case in ParseOpData because this function is used as part of the
+// selective registration for the OpResolver implementation in micro.
+TfLiteStatus ParseSlice(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
+ void**) {
+ return kTfLiteOk;
+}
+
TfLiteStatus ParseSoftmax(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data) {
@@ -1962,6 +2084,29 @@ TfLiteStatus ParseSplitV(const Operator* op, ErrorReporter* error_reporter,
return kTfLiteOk;
}
+TfLiteStatus ParseUnidirectionalSequenceLSTM(const Operator* op,
+ ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator,
+ void** builtin_data) {
+ CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
+ SafeBuiltinDataAllocator safe_allocator(allocator);
+ auto params =
+ safe_allocator.Allocate();
+ TF_LITE_ENSURE(error_reporter, params != nullptr);
+ if (const auto* seq_lstm_params =
+ op->builtin_options_as_UnidirectionalSequenceLSTMOptions()) {
+ params->activation =
+ ConvertActivation(seq_lstm_params->fused_activation_function());
+ params->cell_clip = seq_lstm_params->cell_clip();
+ params->proj_clip = seq_lstm_params->proj_clip();
+ params->time_major = seq_lstm_params->time_major();
+ params->asymmetric_quantize_inputs =
+ seq_lstm_params->asymmetric_quantize_inputs();
+ }
+ *builtin_data = params.release();
+ return kTfLiteOk;
+}
+
TfLiteStatus ParseSqueeze(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data) {
@@ -2161,6 +2306,37 @@ TfLiteStatus ParseUnpack(const Operator* op, ErrorReporter* error_reporter,
return kTfLiteOk;
}
+TfLiteStatus ParseVarHandle(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator,
+ void** builtin_data) {
+ CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
+
+ SafeBuiltinDataAllocator safe_allocator(allocator);
+ std::unique_ptr
+ params = safe_allocator.Allocate();
+ TF_LITE_ENSURE(error_reporter, params != nullptr);
+
+ const VarHandleOptions* schema_params =
+ op->builtin_options_as_VarHandleOptions();
+
+ if (schema_params != nullptr) {
+ if (schema_params->container()) {
+ params->container = schema_params->container()->c_str();
+ }
+ if (schema_params->shared_name()) {
+ params->shared_name = schema_params->shared_name()->c_str();
+ }
+ } else {
+ // TODO(b/157480169): We should either return kTfLiteError or fill in some
+ // reasonable defaults in the params struct. We are not doing so until we
+ // better undertand the ramifications of changing the legacy behavior.
+ }
+
+ *builtin_data = params.release();
+ return kTfLiteOk;
+}
+
// We have this parse function instead of directly returning kTfLiteOk from the
// switch-case in ParseOpData because this function is used as part of the
// selective registration for the OpResolver implementation in micro.
diff --git a/code/components/tfmicro/tensorflow/lite/core/api/flatbuffer_conversions.h b/code/components/tflite-lib/tensorflow/lite/core/api/flatbuffer_conversions.h
similarity index 91%
rename from code/components/tfmicro/tensorflow/lite/core/api/flatbuffer_conversions.h
rename to code/components/tflite-lib/tensorflow/lite/core/api/flatbuffer_conversions.h
index 263a839b..8cf889d8 100644
--- a/code/components/tfmicro/tensorflow/lite/core/api/flatbuffer_conversions.h
+++ b/code/components/tflite-lib/tensorflow/lite/core/api/flatbuffer_conversions.h
@@ -1,4 +1,4 @@
-/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+/* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -84,6 +84,11 @@ TfLiteStatus ParseArgMax(const Operator* op, ErrorReporter* error_reporter,
TfLiteStatus ParseArgMin(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
+TfLiteStatus ParseAssignVariable(const Operator* op,
+ ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator,
+ void** builtin_data);
+
TfLiteStatus ParseBatchMatMul(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data);
@@ -93,6 +98,10 @@ TfLiteStatus ParseBatchToSpaceNd(const Operator* op,
BuiltinDataAllocator* allocator,
void** builtin_data);
+TfLiteStatus ParseCallOnce(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator,
+ void** builtin_data);
+
TfLiteStatus ParseCeil(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
@@ -229,6 +238,10 @@ TfLiteStatus ParseMaximum(const Operator* op, ErrorReporter* error_reporter,
TfLiteStatus ParseMinimum(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
+TfLiteStatus ParseMirrorPad(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator,
+ void** builtin_data);
+
TfLiteStatus ParseMul(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
@@ -261,6 +274,11 @@ TfLiteStatus ParseQuantize(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data);
+TfLiteStatus ParseReadVariable(const Operator* op,
+ ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator,
+ void** builtin_data);
+
TfLiteStatus ParseReducer(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
@@ -295,6 +313,9 @@ TfLiteStatus ParseShape(const Operator* op, ErrorReporter* error_reporter,
TfLiteStatus ParseSin(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
+TfLiteStatus ParseSlice(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator, void** builtin_data);
+
TfLiteStatus ParseSoftmax(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
@@ -349,6 +370,15 @@ TfLiteStatus ParseTransposeConv(const Operator* op,
TfLiteStatus ParseUnpack(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
+TfLiteStatus ParseUnidirectionalSequenceLSTM(const Operator* op,
+ ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator,
+ void** builtin_data);
+
+TfLiteStatus ParseVarHandle(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator,
+ void** builtin_data);
+
TfLiteStatus ParseZerosLike(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data);
diff --git a/code/components/tfmicro/tensorflow/lite/core/api/op_resolver.cc b/code/components/tflite-lib/tensorflow/lite/core/api/op_resolver.cc
similarity index 100%
rename from code/components/tfmicro/tensorflow/lite/core/api/op_resolver.cc
rename to code/components/tflite-lib/tensorflow/lite/core/api/op_resolver.cc
diff --git a/code/components/tfmicro/tensorflow/lite/core/api/op_resolver.h b/code/components/tflite-lib/tensorflow/lite/core/api/op_resolver.h
similarity index 82%
rename from code/components/tfmicro/tensorflow/lite/core/api/op_resolver.h
rename to code/components/tflite-lib/tensorflow/lite/core/api/op_resolver.h
index 471db813..49ac778e 100644
--- a/code/components/tfmicro/tensorflow/lite/core/api/op_resolver.h
+++ b/code/components/tflite-lib/tensorflow/lite/core/api/op_resolver.h
@@ -15,6 +15,7 @@ limitations under the License.
#ifndef TENSORFLOW_LITE_CORE_API_OP_RESOLVER_H_
#define TENSORFLOW_LITE_CORE_API_OP_RESOLVER_H_
+#include
#include
#include
@@ -36,15 +37,26 @@ class OpResolver {
virtual const TfLiteRegistration* FindOp(const char* op,
int version) const = 0;
+ using TfLiteDelegatePtrVector =
+ std::vector>;
// Returns optional delegates for resolving and handling ops in the flatbuffer
// model. This may be used in addition to the standard TfLiteRegistration
// lookup for graph resolution.
- using TfLiteDelegatePtrVector =
- std::vector>;
+ // WARNING: This API is deprecated, GetDelegateCreators is preferred.
virtual TfLiteDelegatePtrVector GetDelegates(int num_threads) const {
- return TfLiteDelegatePtrVector();
+ return {};
}
+ // Represent a function that creates a TfLite delegate instance.
+ using TfLiteDelegateCreator =
+ std::function(
+ int /*num_threads*/)>;
+ using TfLiteDelegateCreators = std::vector;
+ // Returns a vector of delegate creators to create optional delegates for
+ // resolving and handling ops in the flatbuffer model. This may be used in
+ // addition to the standard TfLiteRegistration lookup for graph resolution.
+ virtual TfLiteDelegateCreators GetDelegateCreators() const { return {}; }
+
virtual ~OpResolver() {}
private:
diff --git a/code/components/tfmicro/tensorflow/lite/core/api/tensor_utils.cc b/code/components/tflite-lib/tensorflow/lite/core/api/tensor_utils.cc
similarity index 100%
rename from code/components/tfmicro/tensorflow/lite/core/api/tensor_utils.cc
rename to code/components/tflite-lib/tensorflow/lite/core/api/tensor_utils.cc
diff --git a/code/components/tfmicro/tensorflow/lite/core/api/tensor_utils.h b/code/components/tflite-lib/tensorflow/lite/core/api/tensor_utils.h
similarity index 100%
rename from code/components/tfmicro/tensorflow/lite/core/api/tensor_utils.h
rename to code/components/tflite-lib/tensorflow/lite/core/api/tensor_utils.h
diff --git a/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/bits.h b/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/bits.h
new file mode 100644
index 00000000..04b3ba6f
--- /dev/null
+++ b/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/bits.h
@@ -0,0 +1,102 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_BITS_H_
+#define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_BITS_H_
+
+#ifdef __cplusplus
+#include
+
+extern "C" {
+#endif
+
+static inline int CountLeadingZeros32Slow(uint64_t n) {
+ int zeroes = 28;
+ if (n >> 16) zeroes -= 16, n >>= 16;
+ if (n >> 8) zeroes -= 8, n >>= 8;
+ if (n >> 4) zeroes -= 4, n >>= 4;
+ return "\4\3\2\2\1\1\1\1\0\0\0\0\0\0\0"[n] + zeroes;
+}
+
+static inline int CountLeadingZeros32(uint32_t n) {
+#if defined(_MSC_VER)
+ unsigned long result = 0; // NOLINT(runtime/int)
+ if (_BitScanReverse(&result, n)) {
+ return 31 - result;
+ }
+ return 32;
+#elif defined(__GNUC__)
+
+ // Handle 0 as a special case because __builtin_clz(0) is undefined.
+ if (n == 0) {
+ return 32;
+ }
+ return __builtin_clz(n);
+#else
+ return CountLeadingZeros32Slow(n);
+#endif
+}
+
+static inline int MostSignificantBit32(uint32_t n) {
+ return 32 - CountLeadingZeros32(n);
+}
+
+static inline int CountLeadingZeros64Slow(uint64_t n) {
+ int zeroes = 60;
+ if (n >> 32) zeroes -= 32, n >>= 32;
+ if (n >> 16) zeroes -= 16, n >>= 16;
+ if (n >> 8) zeroes -= 8, n >>= 8;
+ if (n >> 4) zeroes -= 4, n >>= 4;
+ return "\4\3\2\2\1\1\1\1\0\0\0\0\0\0\0"[n] + zeroes;
+}
+
+static inline int CountLeadingZeros64(uint64_t n) {
+#if defined(_MSC_VER) && defined(_M_X64)
+ // MSVC does not have __builtin_clzll. Use _BitScanReverse64.
+ unsigned long result = 0; // NOLINT(runtime/int)
+ if (_BitScanReverse64(&result, n)) {
+ return 63 - result;
+ }
+ return 64;
+#elif defined(_MSC_VER)
+ // MSVC does not have __builtin_clzll. Compose two calls to _BitScanReverse
+ unsigned long result = 0; // NOLINT(runtime/int)
+ if ((n >> 32) && _BitScanReverse(&result, n >> 32)) {
+ return 31 - result;
+ }
+ if (_BitScanReverse(&result, n)) {
+ return 63 - result;
+ }
+ return 64;
+#elif defined(__GNUC__)
+
+ // Handle 0 as a special case because __builtin_clzll(0) is undefined.
+ if (n == 0) {
+ return 64;
+ }
+ return __builtin_clzll(n);
+#else
+ return CountLeadingZeros64Slow(n);
+#endif
+}
+
+static inline int MostSignificantBit64(uint64_t n) {
+ return 64 - CountLeadingZeros64(n);
+}
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_BITS_H_
diff --git a/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/fft.cc b/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/fft.cc
new file mode 100644
index 00000000..62442fba
--- /dev/null
+++ b/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/fft.cc
@@ -0,0 +1,52 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#include "tensorflow/lite/experimental/microfrontend/lib/fft.h"
+#include "tensorflow/lite/experimental/microfrontend/lib/kiss_fft_int16.h"
+
+#include
+
+
+void FftCompute(struct FftState* state, const int16_t* input,
+ int input_scale_shift) {
+ const size_t input_size = state->input_size;
+ const size_t fft_size = state->fft_size;
+
+ int16_t* fft_input = state->input;
+ // First, scale the input by the given shift.
+ size_t i;
+ for (i = 0; i < input_size; ++i) {
+ fft_input[i] = static_cast(static_cast(input[i])
+ << input_scale_shift);
+ }
+ // Zero out whatever else remains in the top part of the input.
+ for (; i < fft_size; ++i) {
+ fft_input[i] = 0;
+ }
+
+ // Apply the FFT.
+ kissfft_fixed16::kiss_fftr(
+ reinterpret_cast(state->scratch),
+ state->input,
+ reinterpret_cast(state->output));
+}
+
+void FftInit(struct FftState* state) {
+ // All the initialization is done in FftPopulateState()
+}
+
+void FftReset(struct FftState* state) {
+ memset(state->input, 0, state->fft_size * sizeof(*state->input));
+ memset(state->output, 0, (state->fft_size / 2 + 1) * sizeof(*state->output));
+}
diff --git a/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/fft.h b/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/fft.h
new file mode 100644
index 00000000..aaffa69d
--- /dev/null
+++ b/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/fft.h
@@ -0,0 +1,50 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FFT_H_
+#define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FFT_H_
+
+#include
+#include
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct complex_int16_t {
+ int16_t real;
+ int16_t imag;
+};
+
+struct FftState {
+ int16_t* input;
+ struct complex_int16_t* output;
+ size_t fft_size;
+ size_t input_size;
+ void* scratch;
+ size_t scratch_size;
+};
+
+void FftCompute(struct FftState* state, const int16_t* input,
+ int input_scale_shift);
+
+void FftInit(struct FftState* state);
+
+void FftReset(struct FftState* state);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FFT_H_
diff --git a/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/fft_util.cc b/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/fft_util.cc
new file mode 100644
index 00000000..81efe14d
--- /dev/null
+++ b/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/fft_util.cc
@@ -0,0 +1,69 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#include "tensorflow/lite/experimental/microfrontend/lib/fft_util.h"
+#include "tensorflow/lite/experimental/microfrontend/lib/kiss_fft_int16.h"
+
+#include
+
+int FftPopulateState(struct FftState* state, size_t input_size) {
+ state->input_size = input_size;
+ state->fft_size = 1;
+ while (state->fft_size < state->input_size) {
+ state->fft_size <<= 1;
+ }
+
+ state->input = reinterpret_cast(
+ malloc(state->fft_size * sizeof(*state->input)));
+ if (state->input == nullptr) {
+ fprintf(stderr, "Failed to alloc fft input buffer\n");
+ return 0;
+ }
+
+ state->output = reinterpret_cast(
+ malloc((state->fft_size / 2 + 1) * sizeof(*state->output) * 2));
+ if (state->output == nullptr) {
+ fprintf(stderr, "Failed to alloc fft output buffer\n");
+ return 0;
+ }
+
+ // Ask kissfft how much memory it wants.
+ size_t scratch_size = 0;
+ kissfft_fixed16::kiss_fftr_cfg kfft_cfg = kissfft_fixed16::kiss_fftr_alloc(
+ state->fft_size, 0, nullptr, &scratch_size);
+ if (kfft_cfg != nullptr) {
+ fprintf(stderr, "Kiss memory sizing failed.\n");
+ return 0;
+ }
+ state->scratch = malloc(scratch_size);
+ if (state->scratch == nullptr) {
+ fprintf(stderr, "Failed to alloc fft scratch buffer\n");
+ return 0;
+ }
+ state->scratch_size = scratch_size;
+ // Let kissfft configure the scratch space we just allocated
+ kfft_cfg = kissfft_fixed16::kiss_fftr_alloc(state->fft_size, 0,
+ state->scratch, &scratch_size);
+ if (kfft_cfg != state->scratch) {
+ fprintf(stderr, "Kiss memory preallocation strategy failed.\n");
+ return 0;
+ }
+ return 1;
+}
+
+void FftFreeStateContents(struct FftState* state) {
+ free(state->input);
+ free(state->output);
+ free(state->scratch);
+}
diff --git a/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/fft_util.h b/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/fft_util.h
new file mode 100644
index 00000000..6a471301
--- /dev/null
+++ b/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/fft_util.h
@@ -0,0 +1,34 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FFT_UTIL_H_
+#define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FFT_UTIL_H_
+
+#include "tensorflow/lite/experimental/microfrontend/lib/fft.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+// Prepares and FFT for the given input size.
+int FftPopulateState(struct FftState* state, size_t input_size);
+
+// Frees any allocated buffers.
+void FftFreeStateContents(struct FftState* state);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FFT_UTIL_H_
diff --git a/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/filterbank.c b/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/filterbank.c
new file mode 100644
index 00000000..80f8738f
--- /dev/null
+++ b/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/filterbank.c
@@ -0,0 +1,134 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#include "tensorflow/lite/experimental/microfrontend/lib/filterbank.h"
+
+#include
+
+#include "tensorflow/lite/experimental/microfrontend/lib/bits.h"
+
+void FilterbankConvertFftComplexToEnergy(struct FilterbankState* state,
+ struct complex_int16_t* fft_output,
+ int32_t* energy) {
+ const int end_index = state->end_index;
+ int i;
+ energy += state->start_index;
+ fft_output += state->start_index;
+ for (i = state->start_index; i < end_index; ++i) {
+ const int32_t real = fft_output->real;
+ const int32_t imag = fft_output->imag;
+ fft_output++;
+ const uint32_t mag_squared = (real * real) + (imag * imag);
+ *energy++ = mag_squared;
+ }
+}
+
+void FilterbankAccumulateChannels(struct FilterbankState* state,
+ const int32_t* energy) {
+ uint64_t* work = state->work;
+ uint64_t weight_accumulator = 0;
+ uint64_t unweight_accumulator = 0;
+
+ const int16_t* channel_frequency_starts = state->channel_frequency_starts;
+ const int16_t* channel_weight_starts = state->channel_weight_starts;
+ const int16_t* channel_widths = state->channel_widths;
+
+ int num_channels_plus_1 = state->num_channels + 1;
+ int i;
+ for (i = 0; i < num_channels_plus_1; ++i) {
+ const int32_t* magnitudes = energy + *channel_frequency_starts++;
+ const int16_t* weights = state->weights + *channel_weight_starts;
+ const int16_t* unweights = state->unweights + *channel_weight_starts++;
+ const int width = *channel_widths++;
+ int j;
+ for (j = 0; j < width; ++j) {
+ weight_accumulator += *weights++ * ((uint64_t)*magnitudes);
+ unweight_accumulator += *unweights++ * ((uint64_t)*magnitudes);
+ ++magnitudes;
+ }
+ *work++ = weight_accumulator;
+ weight_accumulator = unweight_accumulator;
+ unweight_accumulator = 0;
+ }
+}
+
+static uint16_t Sqrt32(uint32_t num) {
+ if (num == 0) {
+ return 0;
+ }
+ uint32_t res = 0;
+ int max_bit_number = 32 - MostSignificantBit32(num);
+ max_bit_number |= 1;
+ uint32_t bit = 1U << (31 - max_bit_number);
+ int iterations = (31 - max_bit_number) / 2 + 1;
+ while (iterations--) {
+ if (num >= res + bit) {
+ num -= res + bit;
+ res = (res >> 1U) + bit;
+ } else {
+ res >>= 1U;
+ }
+ bit >>= 2U;
+ }
+ // Do rounding - if we have the bits.
+ if (num > res && res != 0xFFFF) {
+ ++res;
+ }
+ return res;
+}
+
+static uint32_t Sqrt64(uint64_t num) {
+ // Take a shortcut and just use 32 bit operations if the upper word is all
+ // clear. This will cause a slight off by one issue for numbers close to 2^32,
+ // but it probably isn't going to matter (and gives us a big performance win).
+ if ((num >> 32) == 0) {
+ return Sqrt32((uint32_t)num);
+ }
+ uint64_t res = 0;
+ int max_bit_number = 64 - MostSignificantBit64(num);
+ max_bit_number |= 1;
+ uint64_t bit = 1ULL << (63 - max_bit_number);
+ int iterations = (63 - max_bit_number) / 2 + 1;
+ while (iterations--) {
+ if (num >= res + bit) {
+ num -= res + bit;
+ res = (res >> 1U) + bit;
+ } else {
+ res >>= 1U;
+ }
+ bit >>= 2U;
+ }
+ // Do rounding - if we have the bits.
+ if (num > res && res != 0xFFFFFFFFLL) {
+ ++res;
+ }
+ return res;
+}
+
+uint32_t* FilterbankSqrt(struct FilterbankState* state, int scale_down_shift) {
+ const int num_channels = state->num_channels;
+ const uint64_t* work = state->work + 1;
+ // Reuse the work buffer since we're fine clobbering it at this point to hold
+ // the output.
+ uint32_t* output = (uint32_t*)state->work;
+ int i;
+ for (i = 0; i < num_channels; ++i) {
+ *output++ = Sqrt64(*work++) >> scale_down_shift;
+ }
+ return (uint32_t*)state->work;
+}
+
+void FilterbankReset(struct FilterbankState* state) {
+ memset(state->work, 0, (state->num_channels + 1) * sizeof(*state->work));
+}
diff --git a/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/filterbank.h b/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/filterbank.h
new file mode 100644
index 00000000..1e6d3885
--- /dev/null
+++ b/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/filterbank.h
@@ -0,0 +1,63 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FILTERBANK_H_
+#define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FILTERBANK_H_
+
+#include
+#include
+
+#include "tensorflow/lite/experimental/microfrontend/lib/fft.h"
+
+#define kFilterbankBits 12
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct FilterbankState {
+ int num_channels;
+ int start_index;
+ int end_index;
+ int16_t* channel_frequency_starts;
+ int16_t* channel_weight_starts;
+ int16_t* channel_widths;
+ int16_t* weights;
+ int16_t* unweights;
+ uint64_t* work;
+};
+
+// Converts the relevant complex values of an FFT output into energy (the
+// square magnitude).
+void FilterbankConvertFftComplexToEnergy(struct FilterbankState* state,
+ struct complex_int16_t* fft_output,
+ int32_t* energy);
+
+// Computes the mel-scale filterbank on the given energy array. Output is cached
+// internally - to fetch it, you need to call FilterbankSqrt.
+void FilterbankAccumulateChannels(struct FilterbankState* state,
+ const int32_t* energy);
+
+// Applies an integer square root to the 64 bit intermediate values of the
+// filterbank, and returns a pointer to them. Memory will be invalidated the
+// next time FilterbankAccumulateChannels is called.
+uint32_t* FilterbankSqrt(struct FilterbankState* state, int scale_down_shift);
+
+void FilterbankReset(struct FilterbankState* state);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FILTERBANK_H_
diff --git a/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/filterbank_util.c b/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/filterbank_util.c
new file mode 100644
index 00000000..f18ebf54
--- /dev/null
+++ b/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/filterbank_util.c
@@ -0,0 +1,220 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#include "tensorflow/lite/experimental/microfrontend/lib/filterbank_util.h"
+
+#include
+#include
+#include
+
+#define kFilterbankIndexAlignment 4
+#define kFilterbankChannelBlockSize 4
+
+void FilterbankFillConfigWithDefaults(struct FilterbankConfig* config) {
+ config->num_channels = 32;
+ config->lower_band_limit = 125.0f;
+ config->upper_band_limit = 7500.0f;
+ config->output_scale_shift = 7;
+}
+
+static float FreqToMel(float freq) { return 1127.0 * log1p(freq / 700.0); }
+
+static void CalculateCenterFrequencies(const int num_channels,
+ const float lower_frequency_limit,
+ const float upper_frequency_limit,
+ float* center_frequencies) {
+ assert(lower_frequency_limit >= 0.0f);
+ assert(upper_frequency_limit > lower_frequency_limit);
+
+ const float mel_low = FreqToMel(lower_frequency_limit);
+ const float mel_hi = FreqToMel(upper_frequency_limit);
+ const float mel_span = mel_hi - mel_low;
+ const float mel_spacing = mel_span / ((float)num_channels);
+ int i;
+ for (i = 0; i < num_channels; ++i) {
+ center_frequencies[i] = mel_low + (mel_spacing * (i + 1));
+ }
+}
+
+static void QuantizeFilterbankWeights(const float float_weight, int16_t* weight,
+ int16_t* unweight) {
+ *weight = floor(float_weight * (1 << kFilterbankBits) + 0.5);
+ *unweight = floor((1.0 - float_weight) * (1 << kFilterbankBits) + 0.5);
+}
+
+int FilterbankPopulateState(const struct FilterbankConfig* config,
+ struct FilterbankState* state, int sample_rate,
+ int spectrum_size) {
+ state->num_channels = config->num_channels;
+ const int num_channels_plus_1 = config->num_channels + 1;
+
+ // How should we align things to index counts given the byte alignment?
+ const int index_alignment =
+ (kFilterbankIndexAlignment < sizeof(int16_t)
+ ? 1
+ : kFilterbankIndexAlignment / sizeof(int16_t));
+
+ state->channel_frequency_starts =
+ malloc(num_channels_plus_1 * sizeof(*state->channel_frequency_starts));
+ state->channel_weight_starts =
+ malloc(num_channels_plus_1 * sizeof(*state->channel_weight_starts));
+ state->channel_widths =
+ malloc(num_channels_plus_1 * sizeof(*state->channel_widths));
+ state->work = malloc(num_channels_plus_1 * sizeof(*state->work));
+
+ float* center_mel_freqs =
+ malloc(num_channels_plus_1 * sizeof(*center_mel_freqs));
+ int16_t* actual_channel_starts =
+ malloc(num_channels_plus_1 * sizeof(*actual_channel_starts));
+ int16_t* actual_channel_widths =
+ malloc(num_channels_plus_1 * sizeof(*actual_channel_widths));
+
+ if (state->channel_frequency_starts == NULL ||
+ state->channel_weight_starts == NULL || state->channel_widths == NULL ||
+ center_mel_freqs == NULL || actual_channel_starts == NULL ||
+ actual_channel_widths == NULL) {
+ free(center_mel_freqs);
+ free(actual_channel_starts);
+ free(actual_channel_widths);
+ fprintf(stderr, "Failed to allocate channel buffers\n");
+ return 0;
+ }
+
+ CalculateCenterFrequencies(num_channels_plus_1, config->lower_band_limit,
+ config->upper_band_limit, center_mel_freqs);
+
+ // Always exclude DC.
+ const float hz_per_sbin = 0.5 * sample_rate / ((float)spectrum_size - 1);
+ state->start_index = 1.5 + config->lower_band_limit / hz_per_sbin;
+ state->end_index = 0; // Initialized to zero here, but actually set below.
+
+ // For each channel, we need to figure out what frequencies belong to it, and
+ // how much padding we need to add so that we can efficiently multiply the
+ // weights and unweights for accumulation. To simplify the multiplication
+ // logic, all channels will have some multiplication to do (even if there are
+ // no frequencies that accumulate to that channel) - they will be directed to
+ // a set of zero weights.
+ int chan_freq_index_start = state->start_index;
+ int weight_index_start = 0;
+ int needs_zeros = 0;
+
+ int chan;
+ for (chan = 0; chan < num_channels_plus_1; ++chan) {
+ // Keep jumping frequencies until we overshoot the bound on this channel.
+ int freq_index = chan_freq_index_start;
+ while (FreqToMel((freq_index)*hz_per_sbin) <= center_mel_freqs[chan]) {
+ ++freq_index;
+ }
+
+ const int width = freq_index - chan_freq_index_start;
+ actual_channel_starts[chan] = chan_freq_index_start;
+ actual_channel_widths[chan] = width;
+
+ if (width == 0) {
+ // This channel doesn't actually get anything from the frequencies, it's
+ // always zero. We need then to insert some 'zero' weights into the
+ // output, and just redirect this channel to do a single multiplication at
+ // this point. For simplicity, the zeros are placed at the beginning of
+ // the weights arrays, so we have to go and update all the other
+ // weight_starts to reflect this shift (but only once).
+ state->channel_frequency_starts[chan] = 0;
+ state->channel_weight_starts[chan] = 0;
+ state->channel_widths[chan] = kFilterbankChannelBlockSize;
+ if (!needs_zeros) {
+ needs_zeros = 1;
+ int j;
+ for (j = 0; j < chan; ++j) {
+ state->channel_weight_starts[j] += kFilterbankChannelBlockSize;
+ }
+ weight_index_start += kFilterbankChannelBlockSize;
+ }
+ } else {
+ // How far back do we need to go to ensure that we have the proper
+ // alignment?
+ const int aligned_start =
+ (chan_freq_index_start / index_alignment) * index_alignment;
+ const int aligned_width = (chan_freq_index_start - aligned_start + width);
+ const int padded_width =
+ (((aligned_width - 1) / kFilterbankChannelBlockSize) + 1) *
+ kFilterbankChannelBlockSize;
+
+ state->channel_frequency_starts[chan] = aligned_start;
+ state->channel_weight_starts[chan] = weight_index_start;
+ state->channel_widths[chan] = padded_width;
+ weight_index_start += padded_width;
+ }
+ chan_freq_index_start = freq_index;
+ }
+
+ // Allocate the two arrays to store the weights - weight_index_start contains
+ // the index of what would be the next set of weights that we would need to
+ // add, so that's how many weights we need to allocate.
+ state->weights = calloc(weight_index_start, sizeof(*state->weights));
+ state->unweights = calloc(weight_index_start, sizeof(*state->unweights));
+
+ // If the alloc failed, we also need to nuke the arrays.
+ if (state->weights == NULL || state->unweights == NULL) {
+ free(center_mel_freqs);
+ free(actual_channel_starts);
+ free(actual_channel_widths);
+ fprintf(stderr, "Failed to allocate weights or unweights\n");
+ return 0;
+ }
+
+ // Next pass, compute all the weights. Since everything has been memset to
+ // zero, we only need to fill in the weights that correspond to some frequency
+ // for a channel.
+ const float mel_low = FreqToMel(config->lower_band_limit);
+ for (chan = 0; chan < num_channels_plus_1; ++chan) {
+ int frequency = actual_channel_starts[chan];
+ const int num_frequencies = actual_channel_widths[chan];
+ const int frequency_offset =
+ frequency - state->channel_frequency_starts[chan];
+ const int weight_start = state->channel_weight_starts[chan];
+ const float denom_val = (chan == 0) ? mel_low : center_mel_freqs[chan - 1];
+
+ int j;
+ for (j = 0; j < num_frequencies; ++j, ++frequency) {
+ const float weight =
+ (center_mel_freqs[chan] - FreqToMel(frequency * hz_per_sbin)) /
+ (center_mel_freqs[chan] - denom_val);
+
+ // Make the float into an integer for the weights (and unweights).
+ const int weight_index = weight_start + frequency_offset + j;
+ QuantizeFilterbankWeights(weight, state->weights + weight_index,
+ state->unweights + weight_index);
+ }
+ if (frequency > state->end_index) {
+ state->end_index = frequency;
+ }
+ }
+
+ free(center_mel_freqs);
+ free(actual_channel_starts);
+ free(actual_channel_widths);
+ if (state->end_index >= spectrum_size) {
+ fprintf(stderr, "Filterbank end_index is above spectrum size.\n");
+ return 0;
+ }
+ return 1;
+}
+
+void FilterbankFreeStateContents(struct FilterbankState* state) {
+ free(state->channel_frequency_starts);
+ free(state->channel_weight_starts);
+ free(state->channel_widths);
+ free(state->weights);
+ free(state->unweights);
+ free(state->work);
+}
diff --git a/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/filterbank_util.h b/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/filterbank_util.h
new file mode 100644
index 00000000..781d1024
--- /dev/null
+++ b/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/filterbank_util.h
@@ -0,0 +1,50 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FILTERBANK_UTIL_H_
+#define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FILTERBANK_UTIL_H_
+
+#include "tensorflow/lite/experimental/microfrontend/lib/filterbank.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct FilterbankConfig {
+ // number of frequency channel buckets for filterbank
+ int num_channels;
+ // maximum frequency to include
+ float upper_band_limit;
+ // minimum frequency to include
+ float lower_band_limit;
+ // unused
+ int output_scale_shift;
+};
+
+// Fills the frontendConfig with "sane" defaults.
+void FilterbankFillConfigWithDefaults(struct FilterbankConfig* config);
+
+// Allocates any buffers.
+int FilterbankPopulateState(const struct FilterbankConfig* config,
+ struct FilterbankState* state, int sample_rate,
+ int spectrum_size);
+
+// Frees any allocated buffers.
+void FilterbankFreeStateContents(struct FilterbankState* state);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FILTERBANK_UTIL_H_
diff --git a/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/frontend.c b/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/frontend.c
new file mode 100644
index 00000000..9de2a879
--- /dev/null
+++ b/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/frontend.c
@@ -0,0 +1,72 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#include "tensorflow/lite/experimental/microfrontend/lib/frontend.h"
+
+#include "tensorflow/lite/experimental/microfrontend/lib/bits.h"
+
+struct FrontendOutput FrontendProcessSamples(struct FrontendState* state,
+ const int16_t* samples,
+ size_t num_samples,
+ size_t* num_samples_read) {
+ struct FrontendOutput output;
+ output.values = NULL;
+ output.size = 0;
+
+ // Try to apply the window - if it fails, return and wait for more data.
+ if (!WindowProcessSamples(&state->window, samples, num_samples,
+ num_samples_read)) {
+ return output;
+ }
+
+ // Apply the FFT to the window's output (and scale it so that the fixed point
+ // FFT can have as much resolution as possible).
+ int input_shift =
+ 15 - MostSignificantBit32(state->window.max_abs_output_value);
+ FftCompute(&state->fft, state->window.output, input_shift);
+
+ // We can re-ruse the fft's output buffer to hold the energy.
+ int32_t* energy = (int32_t*)state->fft.output;
+
+ FilterbankConvertFftComplexToEnergy(&state->filterbank, state->fft.output,
+ energy);
+
+ FilterbankAccumulateChannels(&state->filterbank, energy);
+ uint32_t* scaled_filterbank = FilterbankSqrt(&state->filterbank, input_shift);
+
+ // Apply noise reduction.
+ NoiseReductionApply(&state->noise_reduction, scaled_filterbank);
+
+ if (state->pcan_gain_control.enable_pcan) {
+ PcanGainControlApply(&state->pcan_gain_control, scaled_filterbank);
+ }
+
+ // Apply the log and scale.
+ int correction_bits =
+ MostSignificantBit32(state->fft.fft_size) - 1 - (kFilterbankBits / 2);
+ uint16_t* logged_filterbank =
+ LogScaleApply(&state->log_scale, scaled_filterbank,
+ state->filterbank.num_channels, correction_bits);
+
+ output.size = state->filterbank.num_channels;
+ output.values = logged_filterbank;
+ return output;
+}
+
+void FrontendReset(struct FrontendState* state) {
+ WindowReset(&state->window);
+ FftReset(&state->fft);
+ FilterbankReset(&state->filterbank);
+ NoiseReductionReset(&state->noise_reduction);
+}
diff --git a/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/frontend.h b/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/frontend.h
new file mode 100644
index 00000000..883df5fd
--- /dev/null
+++ b/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/frontend.h
@@ -0,0 +1,64 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FRONTEND_H_
+#define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FRONTEND_H_
+
+#include
+#include
+
+#include "tensorflow/lite/experimental/microfrontend/lib/fft.h"
+#include "tensorflow/lite/experimental/microfrontend/lib/filterbank.h"
+#include "tensorflow/lite/experimental/microfrontend/lib/log_scale.h"
+#include "tensorflow/lite/experimental/microfrontend/lib/noise_reduction.h"
+#include "tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control.h"
+#include "tensorflow/lite/experimental/microfrontend/lib/window.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct FrontendState {
+ struct WindowState window;
+ struct FftState fft;
+ struct FilterbankState filterbank;
+ struct NoiseReductionState noise_reduction;
+ struct PcanGainControlState pcan_gain_control;
+ struct LogScaleState log_scale;
+};
+
+struct FrontendOutput {
+ const uint16_t* values;
+ size_t size;
+};
+
+// Main entry point to processing frontend samples. Updates num_samples_read to
+// contain the number of samples that have been consumed from the input array.
+// Returns a struct containing the generated output. If not enough samples were
+// added to generate a feature vector, the returned size will be 0 and the
+// values pointer will be NULL. Note that the output pointer will be invalidated
+// as soon as FrontendProcessSamples is called again, so copy the contents
+// elsewhere if you need to use them later.
+struct FrontendOutput FrontendProcessSamples(struct FrontendState* state,
+ const int16_t* samples,
+ size_t num_samples,
+ size_t* num_samples_read);
+
+void FrontendReset(struct FrontendState* state);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FRONTEND_H_
diff --git a/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/frontend_util.c b/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/frontend_util.c
new file mode 100644
index 00000000..27224f6d
--- /dev/null
+++ b/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/frontend_util.c
@@ -0,0 +1,85 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#include "tensorflow/lite/experimental/microfrontend/lib/frontend_util.h"
+
+#include
+#include
+
+#include "tensorflow/lite/experimental/microfrontend/lib/bits.h"
+
+void FrontendFillConfigWithDefaults(struct FrontendConfig* config) {
+ WindowFillConfigWithDefaults(&config->window);
+ FilterbankFillConfigWithDefaults(&config->filterbank);
+ NoiseReductionFillConfigWithDefaults(&config->noise_reduction);
+ PcanGainControlFillConfigWithDefaults(&config->pcan_gain_control);
+ LogScaleFillConfigWithDefaults(&config->log_scale);
+}
+
+int FrontendPopulateState(const struct FrontendConfig* config,
+ struct FrontendState* state, int sample_rate) {
+ memset(state, 0, sizeof(*state));
+
+ if (!WindowPopulateState(&config->window, &state->window, sample_rate)) {
+ fprintf(stderr, "Failed to populate window state\n");
+ return 0;
+ }
+
+ if (!FftPopulateState(&state->fft, state->window.size)) {
+ fprintf(stderr, "Failed to populate fft state\n");
+ return 0;
+ }
+ FftInit(&state->fft);
+
+ if (!FilterbankPopulateState(&config->filterbank, &state->filterbank,
+ sample_rate, state->fft.fft_size / 2 + 1)) {
+ fprintf(stderr, "Failed to populate filterbank state\n");
+ return 0;
+ }
+
+ if (!NoiseReductionPopulateState(&config->noise_reduction,
+ &state->noise_reduction,
+ state->filterbank.num_channels)) {
+ fprintf(stderr, "Failed to populate noise reduction state\n");
+ return 0;
+ }
+
+ int input_correction_bits =
+ MostSignificantBit32(state->fft.fft_size) - 1 - (kFilterbankBits / 2);
+ if (!PcanGainControlPopulateState(
+ &config->pcan_gain_control, &state->pcan_gain_control,
+ state->noise_reduction.estimate, state->filterbank.num_channels,
+ state->noise_reduction.smoothing_bits, input_correction_bits)) {
+ fprintf(stderr, "Failed to populate pcan gain control state\n");
+ return 0;
+ }
+
+ if (!LogScalePopulateState(&config->log_scale, &state->log_scale)) {
+ fprintf(stderr, "Failed to populate log scale state\n");
+ return 0;
+ }
+
+ FrontendReset(state);
+
+ // All good, return a true value.
+ return 1;
+}
+
+void FrontendFreeStateContents(struct FrontendState* state) {
+ WindowFreeStateContents(&state->window);
+ FftFreeStateContents(&state->fft);
+ FilterbankFreeStateContents(&state->filterbank);
+ NoiseReductionFreeStateContents(&state->noise_reduction);
+ PcanGainControlFreeStateContents(&state->pcan_gain_control);
+}
diff --git a/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/frontend_util.h b/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/frontend_util.h
new file mode 100644
index 00000000..895ce6cd
--- /dev/null
+++ b/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/frontend_util.h
@@ -0,0 +1,52 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FRONTEND_UTIL_H_
+#define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FRONTEND_UTIL_H_
+
+#include "tensorflow/lite/experimental/microfrontend/lib/fft_util.h"
+#include "tensorflow/lite/experimental/microfrontend/lib/filterbank_util.h"
+#include "tensorflow/lite/experimental/microfrontend/lib/frontend.h"
+#include "tensorflow/lite/experimental/microfrontend/lib/log_scale_util.h"
+#include "tensorflow/lite/experimental/microfrontend/lib/noise_reduction_util.h"
+#include "tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control_util.h"
+#include "tensorflow/lite/experimental/microfrontend/lib/window_util.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct FrontendConfig {
+ struct WindowConfig window;
+ struct FilterbankConfig filterbank;
+ struct NoiseReductionConfig noise_reduction;
+ struct PcanGainControlConfig pcan_gain_control;
+ struct LogScaleConfig log_scale;
+};
+
+// Fills the frontendConfig with "sane" defaults.
+void FrontendFillConfigWithDefaults(struct FrontendConfig* config);
+
+// Allocates any buffers.
+int FrontendPopulateState(const struct FrontendConfig* config,
+ struct FrontendState* state, int sample_rate);
+
+// Frees any allocated buffers.
+void FrontendFreeStateContents(struct FrontendState* state);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FRONTEND_UTIL_H_
diff --git a/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/kiss_fft_common.h b/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/kiss_fft_common.h
new file mode 100644
index 00000000..33556dab
--- /dev/null
+++ b/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/kiss_fft_common.h
@@ -0,0 +1,48 @@
+/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_KISS_FFT_COMMON_H_
+#define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_KISS_FFT_COMMON_H_
+
+// This header file should be included in all variants of kiss_fft_$type.{h,cc}
+// so that their sub-included source files do not mistakenly wrap libc header
+// files within their kissfft_$type namespaces.
+// E.g, This header avoids kissfft_int16.h containing:
+// namespace kiss_fft_int16 {
+// #include "kiss_fft.h"
+// }
+// where kiss_fft_.h contains:
+// #include
+//
+// TRICK: By including the following header files here, their preprocessor
+// header guards prevent them being re-defined inside of the kiss_fft_$type
+// namespaces declared within the kiss_fft_$type.{h,cc} sources.
+// Note that the original kiss_fft*.h files are untouched since they
+// may be used in libraries that include them directly.
+
+#include
+#include
+#include
+#include
+#include
+
+#ifdef FIXED_POINT
+#include
+#endif
+
+#ifdef USE_SIMD
+#include
+#endif
+#endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_KISS_FFT_COMMON_H_
diff --git a/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/kiss_fft_int16.cc b/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/kiss_fft_int16.cc
new file mode 100644
index 00000000..55457f4d
--- /dev/null
+++ b/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/kiss_fft_int16.cc
@@ -0,0 +1,8 @@
+#include "tensorflow/lite/experimental/microfrontend/lib/kiss_fft_common.h"
+
+#define FIXED_POINT 16
+namespace kissfft_fixed16 {
+#include "kiss_fft.c"
+#include "tools/kiss_fftr.c"
+} // namespace kissfft_fixed16
+#undef FIXED_POINT
diff --git a/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/kiss_fft_int16.h b/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/kiss_fft_int16.h
new file mode 100644
index 00000000..9abe686b
--- /dev/null
+++ b/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/kiss_fft_int16.h
@@ -0,0 +1,34 @@
+/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_KISS_FFT_INT16_H_
+#define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_KISS_FFT_INT16_H_
+
+#include "tensorflow/lite/experimental/microfrontend/lib/kiss_fft_common.h"
+
+// Wrap 16-bit kiss fft in its own namespace. Enables us to link an application
+// with different kiss fft resultions (16/32 bit interger, float, double)
+// without getting a linker error.
+#define FIXED_POINT 16
+namespace kissfft_fixed16 {
+#include "kiss_fft.h"
+#include "tools/kiss_fftr.h"
+} // namespace kissfft_fixed16
+#undef FIXED_POINT
+#undef kiss_fft_scalar
+#undef KISS_FFT_H
+
+#endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_KISS_FFT_INT16_H_
+
diff --git a/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/log_lut.c b/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/log_lut.c
new file mode 100644
index 00000000..f59618e0
--- /dev/null
+++ b/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/log_lut.c
@@ -0,0 +1,30 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#include "tensorflow/lite/experimental/microfrontend/lib/log_lut.h"
+const uint16_t kLogLut[]
+#ifndef _MSC_VER
+ __attribute__((aligned(4)))
+#endif // _MSV_VER
+ = {0, 224, 442, 654, 861, 1063, 1259, 1450, 1636, 1817, 1992, 2163,
+ 2329, 2490, 2646, 2797, 2944, 3087, 3224, 3358, 3487, 3611, 3732, 3848,
+ 3960, 4068, 4172, 4272, 4368, 4460, 4549, 4633, 4714, 4791, 4864, 4934,
+ 5001, 5063, 5123, 5178, 5231, 5280, 5326, 5368, 5408, 5444, 5477, 5507,
+ 5533, 5557, 5578, 5595, 5610, 5622, 5631, 5637, 5640, 5641, 5638, 5633,
+ 5626, 5615, 5602, 5586, 5568, 5547, 5524, 5498, 5470, 5439, 5406, 5370,
+ 5332, 5291, 5249, 5203, 5156, 5106, 5054, 5000, 4944, 4885, 4825, 4762,
+ 4697, 4630, 4561, 4490, 4416, 4341, 4264, 4184, 4103, 4020, 3935, 3848,
+ 3759, 3668, 3575, 3481, 3384, 3286, 3186, 3084, 2981, 2875, 2768, 2659,
+ 2549, 2437, 2323, 2207, 2090, 1971, 1851, 1729, 1605, 1480, 1353, 1224,
+ 1094, 963, 830, 695, 559, 421, 282, 142, 0, 0};
diff --git a/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/log_lut.h b/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/log_lut.h
new file mode 100644
index 00000000..b2448a32
--- /dev/null
+++ b/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/log_lut.h
@@ -0,0 +1,40 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_LOG_LUT_H_
+#define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_LOG_LUT_H_
+
+#include
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+// Number of segments in the log lookup table. The table will be kLogSegments+1
+// in length (with some padding).
+#define kLogSegments 128
+#define kLogSegmentsLog2 7
+
+// Scale used by lookup table.
+#define kLogScale 65536
+#define kLogScaleLog2 16
+#define kLogCoeff 45426
+
+extern const uint16_t kLogLut[];
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_LOG_LUT_H_
diff --git a/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/log_scale.c b/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/log_scale.c
new file mode 100644
index 00000000..c27a50a6
--- /dev/null
+++ b/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/log_scale.c
@@ -0,0 +1,83 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#include "tensorflow/lite/experimental/microfrontend/lib/log_scale.h"
+
+#include "tensorflow/lite/experimental/microfrontend/lib/bits.h"
+#include "tensorflow/lite/experimental/microfrontend/lib/log_lut.h"
+
+#define kuint16max 0x0000FFFF
+
+// The following functions implement integer logarithms of various sizes. The
+// approximation is calculated according to method described in
+// www.inti.gob.ar/electronicaeinformatica/instrumentacion/utic/
+// publicaciones/SPL2007/Log10-spl07.pdf
+// It first calculates log2 of the input and then converts it to natural
+// logarithm.
+
+static uint32_t Log2FractionPart(const uint32_t x, const uint32_t log2x) {
+ // Part 1
+ int32_t frac = x - (1LL << log2x);
+ if (log2x < kLogScaleLog2) {
+ frac <<= kLogScaleLog2 - log2x;
+ } else {
+ frac >>= log2x - kLogScaleLog2;
+ }
+ // Part 2
+ const uint32_t base_seg = frac >> (kLogScaleLog2 - kLogSegmentsLog2);
+ const uint32_t seg_unit =
+ (((uint32_t)1) << kLogScaleLog2) >> kLogSegmentsLog2;
+
+ const int32_t c0 = kLogLut[base_seg];
+ const int32_t c1 = kLogLut[base_seg + 1];
+ const int32_t seg_base = seg_unit * base_seg;
+ const int32_t rel_pos = ((c1 - c0) * (frac - seg_base)) >> kLogScaleLog2;
+ return frac + c0 + rel_pos;
+}
+
+static uint32_t Log(const uint32_t x, const uint32_t scale_shift) {
+ const uint32_t integer = MostSignificantBit32(x) - 1;
+ const uint32_t fraction = Log2FractionPart(x, integer);
+ const uint32_t log2 = (integer << kLogScaleLog2) + fraction;
+ const uint32_t round = kLogScale / 2;
+ const uint32_t loge = (((uint64_t)kLogCoeff) * log2 + round) >> kLogScaleLog2;
+ // Finally scale to our output scale
+ const uint32_t loge_scaled = ((loge << scale_shift) + round) >> kLogScaleLog2;
+ return loge_scaled;
+}
+
+uint16_t* LogScaleApply(struct LogScaleState* state, uint32_t* signal,
+ int signal_size, int correction_bits) {
+ const int scale_shift = state->scale_shift;
+ uint16_t* output = (uint16_t*)signal;
+ uint16_t* ret = output;
+ int i;
+ for (i = 0; i < signal_size; ++i) {
+ uint32_t value = *signal++;
+ if (state->enable_log) {
+ if (correction_bits < 0) {
+ value >>= -correction_bits;
+ } else {
+ value <<= correction_bits;
+ }
+ if (value > 1) {
+ value = Log(value, scale_shift);
+ } else {
+ value = 0;
+ }
+ }
+ *output++ = (value < kuint16max) ? value : kuint16max;
+ }
+ return ret;
+}
diff --git a/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/log_scale.h b/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/log_scale.h
new file mode 100644
index 00000000..a383f32f
--- /dev/null
+++ b/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/log_scale.h
@@ -0,0 +1,39 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_LOG_SCALE_H_
+#define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_LOG_SCALE_H_
+
+#include
+#include
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct LogScaleState {
+ int enable_log;
+ int scale_shift;
+};
+
+// Applies a fixed point logarithm to the signal and converts it to 16 bit. Note
+// that the signal array will be modified.
+uint16_t* LogScaleApply(struct LogScaleState* state, uint32_t* signal,
+ int signal_size, int correction_bits);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_LOG_SCALE_H_
diff --git a/code/components/tfmicro/tensorflow/lite/micro/benchmarks/keyword_scrambled_model_data.h b/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/log_scale_util.c
similarity index 55%
rename from code/components/tfmicro/tensorflow/lite/micro/benchmarks/keyword_scrambled_model_data.h
rename to code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/log_scale_util.c
index ce34426c..0e3dd1d1 100644
--- a/code/components/tfmicro/tensorflow/lite/micro/benchmarks/keyword_scrambled_model_data.h
+++ b/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/log_scale_util.c
@@ -1,4 +1,4 @@
-/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -12,11 +12,16 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
+#include "tensorflow/lite/experimental/microfrontend/lib/log_scale_util.h"
-#ifndef TENSORFLOW_LITE_MICRO_BENCHMARKS_KEYWORD_SCRAMBLED_MODEL_DATA_H_
-#define TENSORFLOW_LITE_MICRO_BENCHMARKS_KEYWORD_SCRAMBLED_MODEL_DATA_H_
+void LogScaleFillConfigWithDefaults(struct LogScaleConfig* config) {
+ config->enable_log = 1;
+ config->scale_shift = 6;
+}
-extern const unsigned char g_keyword_scrambled_model_data[];
-extern const unsigned int g_keyword_scrambled_model_data_length;
-
-#endif // TENSORFLOW_LITE_MICRO_BENCHMARKS_KEYWORD_SCRAMBLED_MODEL_DATA_H_
+int LogScalePopulateState(const struct LogScaleConfig* config,
+ struct LogScaleState* state) {
+ state->enable_log = config->enable_log;
+ state->scale_shift = config->scale_shift;
+ return 1;
+}
diff --git a/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/log_scale_util.h b/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/log_scale_util.h
new file mode 100644
index 00000000..11f7d9ee
--- /dev/null
+++ b/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/log_scale_util.h
@@ -0,0 +1,45 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_LOG_SCALE_UTIL_H_
+#define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_LOG_SCALE_UTIL_H_
+
+#include
+#include
+
+#include "tensorflow/lite/experimental/microfrontend/lib/log_scale.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct LogScaleConfig {
+ // set to false (0) to disable this module
+ int enable_log;
+ // scale results by 2^(scale_shift)
+ int scale_shift;
+};
+
+// Populates the LogScaleConfig with "sane" default values.
+void LogScaleFillConfigWithDefaults(struct LogScaleConfig* config);
+
+// Allocates any buffers.
+int LogScalePopulateState(const struct LogScaleConfig* config,
+ struct LogScaleState* state);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_LOG_SCALE_UTIL_H_
diff --git a/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/noise_reduction.c b/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/noise_reduction.c
new file mode 100644
index 00000000..16b30e66
--- /dev/null
+++ b/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/noise_reduction.c
@@ -0,0 +1,51 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#include "tensorflow/lite/experimental/microfrontend/lib/noise_reduction.h"
+
+#include
+
+void NoiseReductionApply(struct NoiseReductionState* state, uint32_t* signal) {
+ int i;
+ for (i = 0; i < state->num_channels; ++i) {
+ const uint32_t smoothing =
+ ((i & 1) == 0) ? state->even_smoothing : state->odd_smoothing;
+ const uint32_t one_minus_smoothing = (1 << kNoiseReductionBits) - smoothing;
+
+ // Update the estimate of the noise.
+ const uint32_t signal_scaled_up = signal[i] << state->smoothing_bits;
+ uint32_t estimate =
+ (((uint64_t)signal_scaled_up * smoothing) +
+ ((uint64_t)state->estimate[i] * one_minus_smoothing)) >>
+ kNoiseReductionBits;
+ state->estimate[i] = estimate;
+
+ // Make sure that we can't get a negative value for the signal - estimate.
+ if (estimate > signal_scaled_up) {
+ estimate = signal_scaled_up;
+ }
+
+ const uint32_t floor =
+ ((uint64_t)signal[i] * state->min_signal_remaining) >>
+ kNoiseReductionBits;
+ const uint32_t subtracted =
+ (signal_scaled_up - estimate) >> state->smoothing_bits;
+ const uint32_t output = subtracted > floor ? subtracted : floor;
+ signal[i] = output;
+ }
+}
+
+void NoiseReductionReset(struct NoiseReductionState* state) {
+ memset(state->estimate, 0, sizeof(*state->estimate) * state->num_channels);
+}
diff --git a/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/noise_reduction.h b/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/noise_reduction.h
new file mode 100644
index 00000000..46d3f52e
--- /dev/null
+++ b/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/noise_reduction.h
@@ -0,0 +1,46 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_NOISE_REDUCTION_H_
+#define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_NOISE_REDUCTION_H_
+
+#define kNoiseReductionBits 14
+
+#include
+#include
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct NoiseReductionState {
+ int smoothing_bits;
+ uint16_t even_smoothing;
+ uint16_t odd_smoothing;
+ uint16_t min_signal_remaining;
+ int num_channels;
+ uint32_t* estimate;
+};
+
+// Removes stationary noise from each channel of the signal using a low pass
+// filter.
+void NoiseReductionApply(struct NoiseReductionState* state, uint32_t* signal);
+
+void NoiseReductionReset(struct NoiseReductionState* state);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_NOISE_REDUCTION_H_
diff --git a/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/noise_reduction_util.c b/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/noise_reduction_util.c
new file mode 100644
index 00000000..a6c9234e
--- /dev/null
+++ b/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/noise_reduction_util.c
@@ -0,0 +1,45 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#include "tensorflow/lite/experimental/microfrontend/lib/noise_reduction_util.h"
+
+#include
+
+void NoiseReductionFillConfigWithDefaults(struct NoiseReductionConfig* config) {
+ config->smoothing_bits = 10;
+ config->even_smoothing = 0.025;
+ config->odd_smoothing = 0.06;
+ config->min_signal_remaining = 0.05;
+}
+
+int NoiseReductionPopulateState(const struct NoiseReductionConfig* config,
+ struct NoiseReductionState* state,
+ int num_channels) {
+ state->smoothing_bits = config->smoothing_bits;
+ state->odd_smoothing = config->odd_smoothing * (1 << kNoiseReductionBits);
+ state->even_smoothing = config->even_smoothing * (1 << kNoiseReductionBits);
+ state->min_signal_remaining =
+ config->min_signal_remaining * (1 << kNoiseReductionBits);
+ state->num_channels = num_channels;
+ state->estimate = calloc(state->num_channels, sizeof(*state->estimate));
+ if (state->estimate == NULL) {
+ fprintf(stderr, "Failed to alloc estimate buffer\n");
+ return 0;
+ }
+ return 1;
+}
+
+void NoiseReductionFreeStateContents(struct NoiseReductionState* state) {
+ free(state->estimate);
+}
diff --git a/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/noise_reduction_util.h b/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/noise_reduction_util.h
new file mode 100644
index 00000000..fa555391
--- /dev/null
+++ b/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/noise_reduction_util.h
@@ -0,0 +1,50 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_NOISE_REDUCTION_UTIL_H_
+#define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_NOISE_REDUCTION_UTIL_H_
+
+#include "tensorflow/lite/experimental/microfrontend/lib/noise_reduction.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct NoiseReductionConfig {
+ // scale the signal up by 2^(smoothing_bits) before reduction
+ int smoothing_bits;
+ // smoothing coefficient for even-numbered channels
+ float even_smoothing;
+ // smoothing coefficient for odd-numbered channels
+ float odd_smoothing;
+ // fraction of signal to preserve (1.0 disables this module)
+ float min_signal_remaining;
+};
+
+// Populates the NoiseReductionConfig with "sane" default values.
+void NoiseReductionFillConfigWithDefaults(struct NoiseReductionConfig* config);
+
+// Allocates any buffers.
+int NoiseReductionPopulateState(const struct NoiseReductionConfig* config,
+ struct NoiseReductionState* state,
+ int num_channels);
+
+// Frees any allocated buffers.
+void NoiseReductionFreeStateContents(struct NoiseReductionState* state);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_NOISE_REDUCTION_UTIL_H_
diff --git a/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control.c b/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control.c
new file mode 100644
index 00000000..22d58767
--- /dev/null
+++ b/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control.c
@@ -0,0 +1,56 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#include "tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control.h"
+
+#include "tensorflow/lite/experimental/microfrontend/lib/bits.h"
+
+int16_t WideDynamicFunction(const uint32_t x, const int16_t* lut) {
+ if (x <= 2) {
+ return lut[x];
+ }
+
+ const int16_t interval = MostSignificantBit32(x);
+ lut += 4 * interval - 6;
+
+ const int16_t frac =
+ ((interval < 11) ? (x << (11 - interval)) : (x >> (interval - 11))) &
+ 0x3FF;
+
+ int32_t result = ((int32_t)lut[2] * frac) >> 5;
+ result += (int32_t)((uint32_t)lut[1] << 5);
+ result *= frac;
+ result = (result + (1 << 14)) >> 15;
+ result += lut[0];
+ return (int16_t)result;
+}
+
+uint32_t PcanShrink(const uint32_t x) {
+ if (x < (2 << kPcanSnrBits)) {
+ return (x * x) >> (2 + 2 * kPcanSnrBits - kPcanOutputBits);
+ } else {
+ return (x >> (kPcanSnrBits - kPcanOutputBits)) - (1 << kPcanOutputBits);
+ }
+}
+
+void PcanGainControlApply(struct PcanGainControlState* state,
+ uint32_t* signal) {
+ int i;
+ for (i = 0; i < state->num_channels; ++i) {
+ const uint32_t gain =
+ WideDynamicFunction(state->noise_estimate[i], state->gain_lut);
+ const uint32_t snr = ((uint64_t)signal[i] * gain) >> state->snr_shift;
+ signal[i] = PcanShrink(snr);
+ }
+}
diff --git a/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control.h b/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control.h
new file mode 100644
index 00000000..3f6222be
--- /dev/null
+++ b/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control.h
@@ -0,0 +1,47 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_PCAN_GAIN_CONTROL_H_
+#define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_PCAN_GAIN_CONTROL_H_
+
+#include
+#include
+
+#define kPcanSnrBits 12
+#define kPcanOutputBits 6
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+// Details at https://research.google/pubs/pub45911.pdf
+struct PcanGainControlState {
+ int enable_pcan;
+ uint32_t* noise_estimate;
+ int num_channels;
+ int16_t* gain_lut;
+ int32_t snr_shift;
+};
+
+int16_t WideDynamicFunction(const uint32_t x, const int16_t* lut);
+
+uint32_t PcanShrink(const uint32_t x);
+
+void PcanGainControlApply(struct PcanGainControlState* state, uint32_t* signal);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_PCAN_GAIN_CONTROL_H_
diff --git a/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control_util.c b/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control_util.c
new file mode 100644
index 00000000..e850d439
--- /dev/null
+++ b/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control_util.c
@@ -0,0 +1,92 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#include "tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control_util.h"
+
+#include
+#include
+
+#define kint16max 0x00007FFF
+
+void PcanGainControlFillConfigWithDefaults(
+ struct PcanGainControlConfig* config) {
+ config->enable_pcan = 0;
+ config->strength = 0.95;
+ config->offset = 80.0;
+ config->gain_bits = 21;
+}
+
+int16_t PcanGainLookupFunction(const struct PcanGainControlConfig* config,
+ int32_t input_bits, uint32_t x) {
+ const float x_as_float = ((float)x) / ((uint32_t)1 << input_bits);
+ const float gain_as_float =
+ ((uint32_t)1 << config->gain_bits) *
+ powf(x_as_float + config->offset, -config->strength);
+
+ if (gain_as_float > kint16max) {
+ return kint16max;
+ }
+ return (int16_t)(gain_as_float + 0.5f);
+}
+
+int PcanGainControlPopulateState(const struct PcanGainControlConfig* config,
+ struct PcanGainControlState* state,
+ uint32_t* noise_estimate,
+ const int num_channels,
+ const uint16_t smoothing_bits,
+ const int32_t input_correction_bits) {
+ state->enable_pcan = config->enable_pcan;
+ if (!state->enable_pcan) {
+ return 1;
+ }
+ state->noise_estimate = noise_estimate;
+ state->num_channels = num_channels;
+ state->gain_lut = malloc(kWideDynamicFunctionLUTSize * sizeof(int16_t));
+ if (state->gain_lut == NULL) {
+ fprintf(stderr, "Failed to allocate gain LUT\n");
+ return 0;
+ }
+ state->snr_shift = config->gain_bits - input_correction_bits - kPcanSnrBits;
+
+ const int32_t input_bits = smoothing_bits - input_correction_bits;
+ state->gain_lut[0] = PcanGainLookupFunction(config, input_bits, 0);
+ state->gain_lut[1] = PcanGainLookupFunction(config, input_bits, 1);
+ state->gain_lut -= 6;
+ int interval;
+ for (interval = 2; interval <= kWideDynamicFunctionBits; ++interval) {
+ const uint32_t x0 = (uint32_t)1 << (interval - 1);
+ const uint32_t x1 = x0 + (x0 >> 1);
+ const uint32_t x2 =
+ (interval == kWideDynamicFunctionBits) ? x0 + (x0 - 1) : 2 * x0;
+
+ const int16_t y0 = PcanGainLookupFunction(config, input_bits, x0);
+ const int16_t y1 = PcanGainLookupFunction(config, input_bits, x1);
+ const int16_t y2 = PcanGainLookupFunction(config, input_bits, x2);
+
+ const int32_t diff1 = (int32_t)y1 - y0;
+ const int32_t diff2 = (int32_t)y2 - y0;
+ const int32_t a1 = 4 * diff1 - diff2;
+ const int32_t a2 = diff2 - a1;
+
+ state->gain_lut[4 * interval] = y0;
+ state->gain_lut[4 * interval + 1] = (int16_t)a1;
+ state->gain_lut[4 * interval + 2] = (int16_t)a2;
+ }
+ state->gain_lut += 6;
+ return 1;
+}
+
+void PcanGainControlFreeStateContents(struct PcanGainControlState* state) {
+ free(state->gain_lut);
+}
diff --git a/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control_util.h b/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control_util.h
new file mode 100644
index 00000000..d4bfaa2e
--- /dev/null
+++ b/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control_util.h
@@ -0,0 +1,57 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_PCAN_GAIN_CONTROL_UTIL_H_
+#define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_PCAN_GAIN_CONTROL_UTIL_H_
+
+#include "tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control.h"
+
+#define kWideDynamicFunctionBits 32
+#define kWideDynamicFunctionLUTSize (4 * kWideDynamicFunctionBits - 3)
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct PcanGainControlConfig {
+ // set to false (0) to disable this module
+ int enable_pcan;
+ // gain normalization exponent (0.0 disables, 1.0 full strength)
+ float strength;
+ // positive value added in the normalization denominator
+ float offset;
+ // number of fractional bits in the gain
+ int gain_bits;
+};
+
+void PcanGainControlFillConfigWithDefaults(
+ struct PcanGainControlConfig* config);
+
+int16_t PcanGainLookupFunction(const struct PcanGainControlConfig* config,
+ int32_t input_bits, uint32_t x);
+
+int PcanGainControlPopulateState(const struct PcanGainControlConfig* config,
+ struct PcanGainControlState* state,
+ uint32_t* noise_estimate,
+ const int num_channels,
+ const uint16_t smoothing_bits,
+ const int32_t input_correction_bits);
+
+void PcanGainControlFreeStateContents(struct PcanGainControlState* state);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_PCAN_GAIN_CONTROL_UTIL_H_
diff --git a/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/window.c b/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/window.c
new file mode 100644
index 00000000..10da6762
--- /dev/null
+++ b/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/window.c
@@ -0,0 +1,70 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#include "tensorflow/lite/experimental/microfrontend/lib/window.h"
+
+#include
+
+int WindowProcessSamples(struct WindowState* state, const int16_t* samples,
+ size_t num_samples, size_t* num_samples_read) {
+ const int size = state->size;
+
+ // Copy samples from the samples buffer over to our local input.
+ size_t max_samples_to_copy = state->size - state->input_used;
+ if (max_samples_to_copy > num_samples) {
+ max_samples_to_copy = num_samples;
+ }
+ memcpy(state->input + state->input_used, samples,
+ max_samples_to_copy * sizeof(*samples));
+ *num_samples_read = max_samples_to_copy;
+ state->input_used += max_samples_to_copy;
+
+ if (state->input_used < state->size) {
+ // We don't have enough samples to compute a window.
+ return 0;
+ }
+
+ // Apply the window to the input.
+ const int16_t* coefficients = state->coefficients;
+ const int16_t* input = state->input;
+ int16_t* output = state->output;
+ int i;
+ int16_t max_abs_output_value = 0;
+ for (i = 0; i < size; ++i) {
+ int16_t new_value =
+ (((int32_t)*input++) * *coefficients++) >> kFrontendWindowBits;
+ *output++ = new_value;
+ if (new_value < 0) {
+ new_value = -new_value;
+ }
+ if (new_value > max_abs_output_value) {
+ max_abs_output_value = new_value;
+ }
+ }
+ // Shuffle the input down by the step size, and update how much we have used.
+ memmove(state->input, state->input + state->step,
+ sizeof(*state->input) * (state->size - state->step));
+ state->input_used -= state->step;
+ state->max_abs_output_value = max_abs_output_value;
+
+ // Indicate that the output buffer is valid for the next stage.
+ return 1;
+}
+
+void WindowReset(struct WindowState* state) {
+ memset(state->input, 0, state->size * sizeof(*state->input));
+ memset(state->output, 0, state->size * sizeof(*state->output));
+ state->input_used = 0;
+ state->max_abs_output_value = 0;
+}
diff --git a/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/window.h b/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/window.h
new file mode 100644
index 00000000..bad81514
--- /dev/null
+++ b/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/window.h
@@ -0,0 +1,49 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_WINDOW_H_
+#define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_WINDOW_H_
+
+#include
+#include
+
+#define kFrontendWindowBits 12
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct WindowState {
+ size_t size;
+ int16_t* coefficients;
+ size_t step;
+
+ int16_t* input;
+ size_t input_used;
+ int16_t* output;
+ int16_t max_abs_output_value;
+};
+
+// Applies a window to the samples coming in, stepping forward at the given
+// rate.
+int WindowProcessSamples(struct WindowState* state, const int16_t* samples,
+ size_t num_samples, size_t* num_samples_read);
+
+void WindowReset(struct WindowState* state);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_WINDOW_H_
diff --git a/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/window_util.c b/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/window_util.c
new file mode 100644
index 00000000..eee6e7b5
--- /dev/null
+++ b/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/window_util.c
@@ -0,0 +1,73 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#include "tensorflow/lite/experimental/microfrontend/lib/window_util.h"
+
+#include
+#include
+#include
+#include
+
+// Some platforms don't have M_PI
+#ifndef M_PI
+#define M_PI 3.14159265358979323846
+#endif
+
+void WindowFillConfigWithDefaults(struct WindowConfig* config) {
+ config->size_ms = 25;
+ config->step_size_ms = 10;
+}
+
+int WindowPopulateState(const struct WindowConfig* config,
+ struct WindowState* state, int sample_rate) {
+ state->size = config->size_ms * sample_rate / 1000;
+ state->step = config->step_size_ms * sample_rate / 1000;
+
+ state->coefficients = malloc(state->size * sizeof(*state->coefficients));
+ if (state->coefficients == NULL) {
+ fprintf(stderr, "Failed to allocate window coefficients\n");
+ return 0;
+ }
+
+ // Populate the window values.
+ const float arg = M_PI * 2.0 / ((float)state->size);
+ int i;
+ for (i = 0; i < state->size; ++i) {
+ float float_value = 0.5 - (0.5 * cos(arg * (i + 0.5)));
+ // Scale it to fixed point and round it.
+ state->coefficients[i] =
+ floor(float_value * (1 << kFrontendWindowBits) + 0.5);
+ }
+
+ state->input_used = 0;
+ state->input = malloc(state->size * sizeof(*state->input));
+ if (state->input == NULL) {
+ fprintf(stderr, "Failed to allocate window input\n");
+ return 0;
+ }
+
+ state->output = malloc(state->size * sizeof(*state->output));
+ if (state->output == NULL) {
+ fprintf(stderr, "Failed to allocate window output\n");
+ return 0;
+ }
+
+ return 1;
+}
+
+void WindowFreeStateContents(struct WindowState* state) {
+ free(state->coefficients);
+ free(state->input);
+ free(state->output);
+}
diff --git a/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/window_util.h b/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/window_util.h
new file mode 100644
index 00000000..68e4de9e
--- /dev/null
+++ b/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/window_util.h
@@ -0,0 +1,45 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_WINDOW_UTIL_H_
+#define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_WINDOW_UTIL_H_
+
+#include "tensorflow/lite/experimental/microfrontend/lib/window.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct WindowConfig {
+ // length of window frame in milliseconds
+ size_t size_ms;
+ // length of step for next frame in milliseconds
+ size_t step_size_ms;
+};
+
+// Populates the WindowConfig with "sane" default values.
+void WindowFillConfigWithDefaults(struct WindowConfig* config);
+
+// Allocates any buffers.
+int WindowPopulateState(const struct WindowConfig* config,
+ struct WindowState* state, int sample_rate);
+
+// Frees any allocated buffers.
+void WindowFreeStateContents(struct WindowState* state);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_WINDOW_UTIL_H_
diff --git a/code/components/tfmicro/tensorflow/lite/kernels/internal/common.h b/code/components/tflite-lib/tensorflow/lite/kernels/internal/common.h
similarity index 92%
rename from code/components/tfmicro/tensorflow/lite/kernels/internal/common.h
rename to code/components/tflite-lib/tensorflow/lite/kernels/internal/common.h
index 098b6beb..5e8778f1 100644
--- a/code/components/tfmicro/tensorflow/lite/kernels/internal/common.h
+++ b/code/components/tflite-lib/tensorflow/lite/kernels/internal/common.h
@@ -75,6 +75,7 @@ float ActivationFunction(float x) {
inline void BiasAndClamp(float clamp_min, float clamp_max, int bias_size,
const float* bias_data, int array_size,
float* array_data) {
+ if (bias_size == 0) return;
// Note: see b/132215220: in May 2019 we thought it would be OK to replace
// this with the Eigen one-liner:
// return (array.colwise() + bias).cwiseMin(clamp_max).cwiseMin(clamp_max).
@@ -138,6 +139,100 @@ inline void BiasAndClamp(float clamp_min, float clamp_max, int bias_size,
#endif
}
+// Single-rounding MultiplyByQuantizedMultiplier
+#if TFLITE_SINGLE_ROUNDING
+inline int32_t MultiplyByQuantizedMultiplier(int32_t x,
+ int32_t quantized_multiplier,
+ int shift) {
+ TFLITE_DCHECK(quantized_multiplier >= 0);
+ TFLITE_DCHECK(shift >= -31 && shift <= 30);
+
+ const int64_t total_shift = 31 - shift;
+ const int64_t round = static_cast(1) << (total_shift - 1);
+ int64_t result = x * static_cast(quantized_multiplier) + round;
+ result = result >> total_shift;
+
+ TFLITE_DCHECK(result >= std::numeric_limits::min() &&
+ result <= std::numeric_limits::max());
+ return static_cast(result);
+}
+
+inline int32_t MultiplyByQuantizedMultiplierSmallerThanOneExp(
+ int32_t x, int32_t quantized_multiplier, int shift) {
+ TFLITE_DCHECK_LE(shift, 0);
+ return MultiplyByQuantizedMultiplier(x, quantized_multiplier, shift);
+}
+
+inline int32_t MultiplyByQuantizedMultiplierGreaterThanOne(
+ int32_t x, int32_t quantized_multiplier, int shift) {
+ TFLITE_DCHECK_GE(shift, 0);
+ return MultiplyByQuantizedMultiplier(x, quantized_multiplier, shift);
+}
+
+inline int32_t MultiplyByQuantizedMultiplier(int64_t x,
+ int32_t quantized_multiplier,
+ int shift) {
+ // Inputs:
+ // - quantized_multiplier has fixed point at bit 31
+ // - shift is -31 to +7 (negative for right shift)
+ //
+ // Assumptions: The following input ranges are assumed
+ // - quantize_scale>=0 (the usual range is (1<<30) to (1>>31)-1)
+ // - scaling is chosen so final scaled result fits in int32_t
+ // - input x is in the range -(1<<47) <= x < (1<<47)
+ TFLITE_DCHECK(quantized_multiplier >= 0);
+ TFLITE_DCHECK(shift >= -31 && shift < 8);
+ TFLITE_DCHECK(x >= -(static_cast(1) << 47) &&
+ x < (static_cast(1) << 47));
+
+ const int32_t reduced_multiplier =
+ (quantized_multiplier < 0x7FFF0000)
+ ? ((quantized_multiplier + (1 << 15)) >> 16)
+ : 0x7FFF;
+ const int64_t total_shift = 15 - shift;
+ const int64_t round = static_cast(1) << (total_shift - 1);
+ int64_t result = x * static_cast(reduced_multiplier) + round;
+ result = result >> total_shift;
+
+ TFLITE_DCHECK(result >= std::numeric_limits::min() &&
+ result <= std::numeric_limits::max());
+ return static_cast(result);
+}
+
+#ifdef USE_NEON
+inline int32x4x4_t MultiplyByQuantizedMultiplier4Rows(
+ int32x4x4_t input_val, int32_t quantized_multiplier, int shift) {
+ TFLITE_DCHECK(quantized_multiplier >= 0);
+
+ const int right_shift = std::min(-1, shift);
+ const int left_shift = shift - right_shift;
+
+ const int32x4_t multiplier_dup = vdupq_n_s32(quantized_multiplier);
+ const int32x4_t left_shift_dup = vdupq_n_s32(left_shift);
+ const int32x4_t right_shift_dup = vdupq_n_s32(right_shift);
+
+ int32x4x4_t result;
+ result.val[0] = vrshlq_s32(
+ vqdmulhq_s32(vshlq_s32(input_val.val[0], left_shift_dup), multiplier_dup),
+ right_shift_dup);
+
+ result.val[1] = vrshlq_s32(
+ vqdmulhq_s32(vshlq_s32(input_val.val[1], left_shift_dup), multiplier_dup),
+ right_shift_dup);
+
+ result.val[2] = vrshlq_s32(
+ vqdmulhq_s32(vshlq_s32(input_val.val[2], left_shift_dup), multiplier_dup),
+ right_shift_dup);
+
+ result.val[3] = vrshlq_s32(
+ vqdmulhq_s32(vshlq_s32(input_val.val[3], left_shift_dup), multiplier_dup),
+ right_shift_dup);
+
+ return result;
+}
+#endif // USE_NEON
+// Double-rounding MultiplyByQuantizedMultiplier
+#else
inline int32_t MultiplyByQuantizedMultiplierSmallerThanOneExp(
int32_t x, int32_t quantized_multiplier, int left_shift) {
using gemmlowp::RoundingDivideByPOT;
@@ -224,7 +319,8 @@ inline int32x4x4_t MultiplyByQuantizedMultiplier4Rows(
return result;
}
-#endif
+#endif // USE_NEON
+#endif // TFLITE_SINGLE_ROUNDING
template
int CountLeadingZeros(T integer_input) {
diff --git a/code/components/tfmicro/tensorflow/lite/kernels/internal/compatibility.h b/code/components/tflite-lib/tensorflow/lite/kernels/internal/compatibility.h
similarity index 100%
rename from code/components/tfmicro/tensorflow/lite/kernels/internal/compatibility.h
rename to code/components/tflite-lib/tensorflow/lite/kernels/internal/compatibility.h
diff --git a/code/components/tfmicro/tensorflow/lite/kernels/internal/cppmath.h b/code/components/tflite-lib/tensorflow/lite/kernels/internal/cppmath.h
similarity index 100%
rename from code/components/tfmicro/tensorflow/lite/kernels/internal/cppmath.h
rename to code/components/tflite-lib/tensorflow/lite/kernels/internal/cppmath.h
diff --git a/code/components/tfmicro/tensorflow/lite/kernels/internal/max.h b/code/components/tflite-lib/tensorflow/lite/kernels/internal/max.h
similarity index 100%
rename from code/components/tfmicro/tensorflow/lite/kernels/internal/max.h
rename to code/components/tflite-lib/tensorflow/lite/kernels/internal/max.h
diff --git a/code/components/tfmicro/tensorflow/lite/kernels/internal/min.h b/code/components/tflite-lib/tensorflow/lite/kernels/internal/min.h
similarity index 100%
rename from code/components/tfmicro/tensorflow/lite/kernels/internal/min.h
rename to code/components/tflite-lib/tensorflow/lite/kernels/internal/min.h
diff --git a/code/components/tfmicro/tensorflow/lite/kernels/internal/optimized/neon_check.h b/code/components/tflite-lib/tensorflow/lite/kernels/internal/optimized/neon_check.h
similarity index 100%
rename from code/components/tfmicro/tensorflow/lite/kernels/internal/optimized/neon_check.h
rename to code/components/tflite-lib/tensorflow/lite/kernels/internal/optimized/neon_check.h
diff --git a/code/components/tfmicro/tensorflow/lite/kernels/internal/portable_tensor.h b/code/components/tflite-lib/tensorflow/lite/kernels/internal/portable_tensor.h
similarity index 100%
rename from code/components/tfmicro/tensorflow/lite/kernels/internal/portable_tensor.h
rename to code/components/tflite-lib/tensorflow/lite/kernels/internal/portable_tensor.h
diff --git a/code/components/tflite-lib/tensorflow/lite/kernels/internal/portable_tensor_utils.h b/code/components/tflite-lib/tensorflow/lite/kernels/internal/portable_tensor_utils.h
new file mode 100644
index 00000000..0671ce73
--- /dev/null
+++ b/code/components/tflite-lib/tensorflow/lite/kernels/internal/portable_tensor_utils.h
@@ -0,0 +1,124 @@
+/* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_PORTABLE_TENSOR_UTILS_H_
+#define TENSORFLOW_LITE_KERNELS_INTERNAL_PORTABLE_TENSOR_UTILS_H_
+
+#include
+#include
+#include
+
+#include "tensorflow/lite/c/builtin_op_data.h"
+#include "tensorflow/lite/c/common.h"
+
+#if defined(_MSC_VER)
+#define __restrict__ __restrict
+#endif
+
+namespace tflite {
+
+namespace tensor_utils {
+
+// Multiplies a matrix with a scalar and reduce the result on each row to a
+// scalar.
+// Parameters:
+// - matrix: matrix of size n_row * n_col
+// - scalar: the scalar that is multiplied to each element in the matrix
+// - n_row: the row count of the matrix
+// - n_col: the column count of the matrix
+// - output: the 32bit output
+// Note: We do not need saturation because the int8 * int8 is safe from overflow
+// in (2^31-1) / (2^14) = 131072, which is bigger than the n_row. Non-zero
+// initial output value is not exceptionally large.
+void MatrixScalarMultiplyAccumulate(const int8_t* matrix, int32_t scalar,
+ int32_t n_row, int32_t n_col,
+ int32_t* output);
+
+// Add another vector for each batch in the batch vector.
+template
+void VectorBatchVectorAdd(const T* vector, int v_size, int n_batch,
+ T* batch_vector) {
+ for (int b = 0; b < n_batch; b++) {
+ for (int i = 0; i < v_size; ++i) {
+ batch_vector[i] += vector[i];
+ }
+ batch_vector += v_size;
+ }
+}
+
+// Cwise product of two vectors.
+template
+inline void VectorVectorCwiseProduct(const T* __restrict__ vector1,
+ const T* __restrict__ vector2, int v_size,
+ T* __restrict__ result) {
+ for (int v = 0; v < v_size; v++) {
+ *result++ = *vector1++ * *vector2++;
+ }
+}
+
+// Cwise product of a vector and a batch-vector.
+template
+inline void VectorBatchVectorCwiseProduct(const T* vector, int v_size,
+ const T* batch_vector, int n_batch,
+ T* result) {
+ for (int b = 0; b < n_batch; b++) {
+ VectorVectorCwiseProduct(vector, batch_vector, v_size, result);
+ // Update the pointers.
+ result += v_size;
+ batch_vector += v_size;
+ }
+}
+
+// Cwise product and accumulate of two vectors. Since it's a MAC operation, the
+// assumption here is that result array is initialized to valid values.
+template
+inline void VectorVectorCwiseProductAccumulate(const T* __restrict__ vector1,
+ const T* __restrict__ vector2,
+ int v_size,
+ T* __restrict__ result) {
+ for (int v = 0; v < v_size; v++) {
+ *result++ += *vector1++ * *vector2++;
+ }
+}
+
+// Cwise product and accumulate of a vector and a batch-vector. Since it's a MAC
+// operation, the assumption here is that result array is initialized to valid
+// values.
+template
+inline void VectorBatchVectorCwiseProductAccumulate(const T* vector, int v_size,
+ const T* batch_vector,
+ int n_batch, T* result) {
+ for (int b = 0; b < n_batch; b++) {
+ VectorVectorCwiseProductAccumulate(vector, batch_vector, v_size, result);
+ // Update the pointers.
+ result += v_size;
+ batch_vector += v_size;
+ }
+}
+
+// Batch vector initialization with another vector.
+template
+void VectorBatchVectorAssign(const T* vector, int v_size, int n_batch,
+ T* batch_vector) {
+ for (int b = 0; b < n_batch; b++) {
+ std::copy_n(vector, v_size, batch_vector + b * v_size);
+ }
+}
+
+} // namespace tensor_utils
+
+} // namespace tflite
+
+#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_PORTABLE_TENSOR_UTILS_H_
diff --git a/code/components/tfmicro/tensorflow/lite/kernels/internal/quantization_util.cc b/code/components/tflite-lib/tensorflow/lite/kernels/internal/quantization_util.cc
similarity index 93%
rename from code/components/tfmicro/tensorflow/lite/kernels/internal/quantization_util.cc
rename to code/components/tflite-lib/tensorflow/lite/kernels/internal/quantization_util.cc
index ed0fe439..62045d67 100644
--- a/code/components/tfmicro/tensorflow/lite/kernels/internal/quantization_util.cc
+++ b/code/components/tflite-lib/tensorflow/lite/kernels/internal/quantization_util.cc
@@ -52,6 +52,11 @@ constexpr uint32_t kFractionRoundingThreshold = 0x00200000;
void QuantizeMultiplier(double double_multiplier, int32_t* quantized_multiplier,
int* shift) {
+#if TFLITE_SINGLE_ROUNDING
+ // Single-rounding MultiplyByQuantizedMultiplier only supports positive
+ // multipliers.
+ // TFLITE_DCHECK(double_multiplier >= 0);
+#endif
if (double_multiplier == 0.) {
*quantized_multiplier = 0;
*shift = 0;
@@ -65,10 +70,10 @@ void QuantizeMultiplier(double double_multiplier, int32_t* quantized_multiplier,
int64_t q_fixed = IntegerFrExp(double_multiplier, shift);
#else // TFLITE_EMULATE_FLOAT
const double q = std::frexp(double_multiplier, shift);
- auto q_fixed = static_cast(TfLiteRound(q * (1ll << 31)));
+ auto q_fixed = static_cast(TfLiteRound(q * (1LL << 31)));
#endif // TFLITE_EMULATE_FLOAT
- TFLITE_CHECK(q_fixed <= (1ll << 31));
- if (q_fixed == (1ll << 31)) {
+ TFLITE_CHECK(q_fixed <= (1LL << 31));
+ if (q_fixed == (1LL << 31)) {
q_fixed /= 2;
++*shift;
}
@@ -87,6 +92,14 @@ void QuantizeMultiplier(double double_multiplier, int32_t* quantized_multiplier,
*shift = 0;
q_fixed = 0;
}
+#if TFLITE_SINGLE_ROUNDING
+ // Single-rounding MultiplyByQuantizedMultiplier doesn't support a shift > 30,
+ // saturate it.
+ if (*shift > 30) {
+ *shift = 30;
+ q_fixed = (1LL << 31) - 1;
+ }
+#endif
*quantized_multiplier = static_cast(q_fixed);
}
@@ -278,6 +291,12 @@ void PreprocessSoftmaxScaling(double beta, double input_scale,
// result is double equivalent of Q0.31 (actually with more precision). Thus
// this generates a Q(input_integer_bits).(31-input_integer_bits)
// representation.
+#if TFLITE_SINGLE_ROUNDING
+ const double max_real_multiplier = (1LL << 30) - 1.0;
+#else
+ const double max_real_multiplier = (1LL << 31) - 1.0;
+#endif
+
#ifdef TFLITE_EMULATE_FLOAT
const double input_beta = IntegerDoubleMultiply(beta, input_scale);
int shift;
@@ -285,12 +304,14 @@ void PreprocessSoftmaxScaling(double beta, double input_scale,
shift += (31 - input_integer_bits);
double input_beta_real_multiplier =
DoubleFromFractionAndShift(fraction, shift);
- if (IntegerDoubleCompare(input_beta_real_multiplier, (1ll << 31) - 1.0) > 0) {
- input_beta_real_multiplier = (1ll << 31) - 1.0;
+ if (IntegerDoubleCompare(input_beta_real_multiplier, max_real_multiplier) >
+ 0) {
+ input_beta_real_multiplier = max_real_multiplier;
}
#else // TFLITE_EMULATE_FLOAT
- const double input_beta_real_multiplier = std::min(
- beta * input_scale * (1 << (31 - input_integer_bits)), (1ll << 31) - 1.0);
+ const double input_beta_real_multiplier =
+ std::min(beta * input_scale * (1 << (31 - input_integer_bits)),
+ max_real_multiplier);
#endif // TFLITE_EMULATE_FLOAT
QuantizeMultiplierGreaterThanOne(input_beta_real_multiplier,
@@ -324,8 +345,8 @@ int CalculateInputRadius(int input_integer_bits, int input_left_shift,
#else // TFLITE_EMULATE_FLOAT
const double max_input_rescaled =
1.0 * ((1 << input_integer_bits) - 1) *
- (1ll << (total_signed_bits - input_integer_bits)) /
- (1ll << input_left_shift);
+ (1LL << (total_signed_bits - input_integer_bits)) /
+ (1LL << input_left_shift);
// Tighten bound using floor. Suppose that we could use the exact value.
// After scaling the difference, the result would be at the maximum. Thus we
// must ensure that our value has lower magnitude.
diff --git a/code/components/tfmicro/tensorflow/lite/kernels/internal/quantization_util.h b/code/components/tflite-lib/tensorflow/lite/kernels/internal/quantization_util.h
similarity index 100%
rename from code/components/tfmicro/tensorflow/lite/kernels/internal/quantization_util.h
rename to code/components/tflite-lib/tensorflow/lite/kernels/internal/quantization_util.h
diff --git a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/add.h b/code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/add.h
similarity index 100%
rename from code/components/tfmicro/tensorflow/lite/kernels/internal/reference/add.h
rename to code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/add.h
diff --git a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/add_n.h b/code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/add_n.h
similarity index 100%
rename from code/components/tfmicro/tensorflow/lite/kernels/internal/reference/add_n.h
rename to code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/add_n.h
diff --git a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/arg_min_max.h b/code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/arg_min_max.h
similarity index 100%
rename from code/components/tfmicro/tensorflow/lite/kernels/internal/reference/arg_min_max.h
rename to code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/arg_min_max.h
diff --git a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/batch_matmul.h b/code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/batch_matmul.h
similarity index 100%
rename from code/components/tfmicro/tensorflow/lite/kernels/internal/reference/batch_matmul.h
rename to code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/batch_matmul.h
diff --git a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/batch_to_space_nd.h b/code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/batch_to_space_nd.h
similarity index 100%
rename from code/components/tfmicro/tensorflow/lite/kernels/internal/reference/batch_to_space_nd.h
rename to code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/batch_to_space_nd.h
diff --git a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/binary_function.h b/code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/binary_function.h
similarity index 82%
rename from code/components/tfmicro/tensorflow/lite/kernels/internal/reference/binary_function.h
rename to code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/binary_function.h
index 1711940c..0b124af8 100644
--- a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/binary_function.h
+++ b/code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/binary_function.h
@@ -43,16 +43,27 @@ inline void BroadcastBinaryFunction4DSlow(
NdArrayDescsForElementwiseBroadcast(unextended_input1_shape,
unextended_input2_shape, &desc1, &desc2);
+ const int* dims_data =
+ reinterpret_cast(output_shape.DimsDataUpTo5D());
for (int b = 0; b < output_shape.Dims(0); ++b) {
+ int out_idx_b = b * dims_data[1];
+ int in_idx1_b = desc1.strides[0] * b;
+ int in_idx2_b = desc2.strides[0] * b;
for (int y = 0; y < output_shape.Dims(1); ++y) {
+ int out_idx_y = (out_idx_b + y) * dims_data[2];
+ int in_idx1_y = in_idx1_b + desc1.strides[1] * y;
+ int in_idx2_y = in_idx2_b + desc2.strides[1] * y;
for (int x = 0; x < output_shape.Dims(2); ++x) {
+ int out_idx_x = (out_idx_y + x) * dims_data[3];
+ int in1_idx = in_idx1_y + desc1.strides[2] * x;
+ int in2_idx = in_idx2_y + desc2.strides[2] * x;
for (int c = 0; c < output_shape.Dims(3); ++c) {
- auto out_idx = Offset(output_shape, b, y, x, c);
- auto in1_idx = SubscriptToIndex(desc1, b, y, x, c);
- auto in2_idx = SubscriptToIndex(desc2, b, y, x, c);
+ auto out_idx = out_idx_x + c;
auto in1_val = input1_data[in1_idx];
auto in2_val = input2_data[in2_idx];
output_data[out_idx] = func(in1_val, in2_val);
+ in1_idx += desc1.strides[3];
+ in2_idx += desc2.strides[3];
}
}
}
diff --git a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/ceil.h b/code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/ceil.h
similarity index 100%
rename from code/components/tfmicro/tensorflow/lite/kernels/internal/reference/ceil.h
rename to code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/ceil.h
diff --git a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/comparisons.h b/code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/comparisons.h
similarity index 100%
rename from code/components/tfmicro/tensorflow/lite/kernels/internal/reference/comparisons.h
rename to code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/comparisons.h
diff --git a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/concatenation.h b/code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/concatenation.h
similarity index 100%
rename from code/components/tfmicro/tensorflow/lite/kernels/internal/reference/concatenation.h
rename to code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/concatenation.h
diff --git a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/conv.h b/code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/conv.h
similarity index 100%
rename from code/components/tfmicro/tensorflow/lite/kernels/internal/reference/conv.h
rename to code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/conv.h
diff --git a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/cumsum.h b/code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/cumsum.h
similarity index 100%
rename from code/components/tfmicro/tensorflow/lite/kernels/internal/reference/cumsum.h
rename to code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/cumsum.h
diff --git a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/depth_to_space.h b/code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/depth_to_space.h
similarity index 100%
rename from code/components/tfmicro/tensorflow/lite/kernels/internal/reference/depth_to_space.h
rename to code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/depth_to_space.h
diff --git a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/depthwiseconv_float.h b/code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/depthwiseconv_float.h
similarity index 100%
rename from code/components/tfmicro/tensorflow/lite/kernels/internal/reference/depthwiseconv_float.h
rename to code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/depthwiseconv_float.h
diff --git a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/depthwiseconv_uint8.h b/code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/depthwiseconv_uint8.h
similarity index 94%
rename from code/components/tfmicro/tensorflow/lite/kernels/internal/reference/depthwiseconv_uint8.h
rename to code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/depthwiseconv_uint8.h
index 20bf83df..d4fba139 100644
--- a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/depthwiseconv_uint8.h
+++ b/code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/depthwiseconv_uint8.h
@@ -68,6 +68,27 @@ inline int32_t DepthwiseConvRound(int32_t x, int32_t quantized_multiplier,
return MultiplyByQuantizedMultiplier(x, quantized_multiplier, shift);
}
+// Single-rounding MultiplyByQuantizedMultiplier
+#if TFLITE_SINGLE_ROUNDING
+template <>
+inline int32_t DepthwiseConvRound(
+ int32_t x, int32_t quantized_multiplier, int shift) {
+ using gemmlowp::RoundingDivideByPOT;
+ using gemmlowp::SaturatingRoundingDoublingHighMul;
+ int left_shift = shift > 0 ? shift : 0;
+ int right_shift = shift > 0 ? 0 : -shift;
+ return RoundingDivideByPOT(SaturatingRoundingDoublingHighMul(
+ x * (1 << left_shift), quantized_multiplier),
+ right_shift);
+}
+
+template <>
+inline int32_t DepthwiseConvRound(
+ int32_t x, int32_t quantized_multiplier, int shift) {
+ return MultiplyByQuantizedMultiplier(x, quantized_multiplier, shift);
+}
+// Double-rounding MultiplyByQuantizedMultiplier
+#else
template <>
inline int32_t DepthwiseConvRound(
int32_t x, int32_t quantized_multiplier, int shift) {
@@ -86,6 +107,7 @@ inline int32_t DepthwiseConvRound(
rounding_offset) >>
right_shift;
}
+#endif // TFLITE_SINGLE_ROUNDING
template
struct DepthwiseConvBasicKernel {
diff --git a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/dequantize.h b/code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/dequantize.h
similarity index 100%
rename from code/components/tfmicro/tensorflow/lite/kernels/internal/reference/dequantize.h
rename to code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/dequantize.h
diff --git a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/elu.h b/code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/elu.h
similarity index 100%
rename from code/components/tfmicro/tensorflow/lite/kernels/internal/reference/elu.h
rename to code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/elu.h
diff --git a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/exp.h b/code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/exp.h
similarity index 100%
rename from code/components/tfmicro/tensorflow/lite/kernels/internal/reference/exp.h
rename to code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/exp.h
diff --git a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/fill.h b/code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/fill.h
similarity index 100%
rename from code/components/tfmicro/tensorflow/lite/kernels/internal/reference/fill.h
rename to code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/fill.h
diff --git a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/floor.h b/code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/floor.h
similarity index 100%
rename from code/components/tfmicro/tensorflow/lite/kernels/internal/reference/floor.h
rename to code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/floor.h
diff --git a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/floor_div.h b/code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/floor_div.h
similarity index 100%
rename from code/components/tfmicro/tensorflow/lite/kernels/internal/reference/floor_div.h
rename to code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/floor_div.h
diff --git a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/floor_mod.h b/code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/floor_mod.h
similarity index 100%
rename from code/components/tfmicro/tensorflow/lite/kernels/internal/reference/floor_mod.h
rename to code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/floor_mod.h
diff --git a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/fully_connected.h b/code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/fully_connected.h
similarity index 99%
rename from code/components/tfmicro/tensorflow/lite/kernels/internal/reference/fully_connected.h
rename to code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/fully_connected.h
index d5ad9d67..9bf2e5df 100644
--- a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/fully_connected.h
+++ b/code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/fully_connected.h
@@ -15,6 +15,7 @@ limitations under the License.
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FULLY_CONNECTED_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FULLY_CONNECTED_H_
+#include "ruy/profiler/instrumentation.h" // from @ruy
#include "tensorflow/lite/kernels/internal/common.h"
#include "tensorflow/lite/kernels/internal/cppmath.h"
#include "tensorflow/lite/kernels/internal/quantization_util.h"
diff --git a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/hard_swish.h b/code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/hard_swish.h
similarity index 100%
rename from code/components/tfmicro/tensorflow/lite/kernels/internal/reference/hard_swish.h
rename to code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/hard_swish.h
diff --git a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/integer_ops/add.h b/code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/integer_ops/add.h
similarity index 100%
rename from code/components/tfmicro/tensorflow/lite/kernels/internal/reference/integer_ops/add.h
rename to code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/integer_ops/add.h
diff --git a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/integer_ops/conv.h b/code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/integer_ops/conv.h
similarity index 100%
rename from code/components/tfmicro/tensorflow/lite/kernels/internal/reference/integer_ops/conv.h
rename to code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/integer_ops/conv.h
diff --git a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/integer_ops/depthwise_conv.h b/code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/integer_ops/depthwise_conv.h
similarity index 100%
rename from code/components/tfmicro/tensorflow/lite/kernels/internal/reference/integer_ops/depthwise_conv.h
rename to code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/integer_ops/depthwise_conv.h
diff --git a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/integer_ops/fully_connected.h b/code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/integer_ops/fully_connected.h
similarity index 94%
rename from code/components/tfmicro/tensorflow/lite/kernels/internal/reference/integer_ops/fully_connected.h
rename to code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/integer_ops/fully_connected.h
index 2bc3e794..1a469fa9 100644
--- a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/integer_ops/fully_connected.h
+++ b/code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/integer_ops/fully_connected.h
@@ -74,12 +74,13 @@ inline void FullyConnected(
const int32_t output_activation_min = params.quantized_activation_min;
const int32_t output_activation_max = params.quantized_activation_max;
TFLITE_DCHECK_GE(filter_shape.DimensionsCount(), 2);
- TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 2);
+ TFLITE_DCHECK_GE(output_shape.DimensionsCount(), 1);
TFLITE_DCHECK_LE(output_activation_min, output_activation_max);
const int filter_dim_count = filter_shape.DimensionsCount();
- const int batches = output_shape.Dims(0);
- const int output_depth = output_shape.Dims(1);
+ const int output_dim_count = output_shape.DimensionsCount();
+ const int batches = FlatSizeSkipDim(output_shape, output_dim_count - 1);
+ const int output_depth = output_shape.Dims(output_dim_count - 1);
TFLITE_DCHECK_LE(output_depth, filter_shape.Dims(filter_dim_count - 2));
const int accum_depth = filter_shape.Dims(filter_dim_count - 1);
for (int b = 0; b < batches; ++b) {
diff --git a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/integer_ops/l2normalization.h b/code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/integer_ops/l2normalization.h
similarity index 100%
rename from code/components/tfmicro/tensorflow/lite/kernels/internal/reference/integer_ops/l2normalization.h
rename to code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/integer_ops/l2normalization.h
diff --git a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/integer_ops/logistic.h b/code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/integer_ops/logistic.h
similarity index 100%
rename from code/components/tfmicro/tensorflow/lite/kernels/internal/reference/integer_ops/logistic.h
rename to code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/integer_ops/logistic.h
diff --git a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/integer_ops/mean.h b/code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/integer_ops/mean.h
similarity index 100%
rename from code/components/tfmicro/tensorflow/lite/kernels/internal/reference/integer_ops/mean.h
rename to code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/integer_ops/mean.h
diff --git a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/integer_ops/mul.h b/code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/integer_ops/mul.h
similarity index 100%
rename from code/components/tfmicro/tensorflow/lite/kernels/internal/reference/integer_ops/mul.h
rename to code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/integer_ops/mul.h
diff --git a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/integer_ops/pooling.h b/code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/integer_ops/pooling.h
similarity index 100%
rename from code/components/tfmicro/tensorflow/lite/kernels/internal/reference/integer_ops/pooling.h
rename to code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/integer_ops/pooling.h
diff --git a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/integer_ops/tanh.h b/code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/integer_ops/tanh.h
similarity index 100%
rename from code/components/tfmicro/tensorflow/lite/kernels/internal/reference/integer_ops/tanh.h
rename to code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/integer_ops/tanh.h
diff --git a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/integer_ops/transpose_conv.h b/code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/integer_ops/transpose_conv.h
similarity index 100%
rename from code/components/tfmicro/tensorflow/lite/kernels/internal/reference/integer_ops/transpose_conv.h
rename to code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/integer_ops/transpose_conv.h
diff --git a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/l2normalization.h b/code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/l2normalization.h
similarity index 100%
rename from code/components/tfmicro/tensorflow/lite/kernels/internal/reference/l2normalization.h
rename to code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/l2normalization.h
diff --git a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/leaky_relu.h b/code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/leaky_relu.h
similarity index 100%
rename from code/components/tfmicro/tensorflow/lite/kernels/internal/reference/leaky_relu.h
rename to code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/leaky_relu.h
diff --git a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/log_softmax.h b/code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/log_softmax.h
similarity index 100%
rename from code/components/tfmicro/tensorflow/lite/kernels/internal/reference/log_softmax.h
rename to code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/log_softmax.h
diff --git a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/logistic.h b/code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/logistic.h
similarity index 100%
rename from code/components/tfmicro/tensorflow/lite/kernels/internal/reference/logistic.h
rename to code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/logistic.h
diff --git a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/maximum_minimum.h b/code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/maximum_minimum.h
similarity index 100%
rename from code/components/tfmicro/tensorflow/lite/kernels/internal/reference/maximum_minimum.h
rename to code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/maximum_minimum.h
diff --git a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/mul.h b/code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/mul.h
similarity index 100%
rename from code/components/tfmicro/tensorflow/lite/kernels/internal/reference/mul.h
rename to code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/mul.h
diff --git a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/neg.h b/code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/neg.h
similarity index 100%
rename from code/components/tfmicro/tensorflow/lite/kernels/internal/reference/neg.h
rename to code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/neg.h
diff --git a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/pad.h b/code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/pad.h
similarity index 100%
rename from code/components/tfmicro/tensorflow/lite/kernels/internal/reference/pad.h
rename to code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/pad.h
diff --git a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/pooling.h b/code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/pooling.h
similarity index 100%
rename from code/components/tfmicro/tensorflow/lite/kernels/internal/reference/pooling.h
rename to code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/pooling.h
diff --git a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/portable_tensor_utils.cc b/code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/portable_tensor_utils.cc
similarity index 99%
rename from code/components/tfmicro/tensorflow/lite/kernels/internal/reference/portable_tensor_utils.cc
rename to code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/portable_tensor_utils.cc
index 0af86e36..4cc51cb4 100644
--- a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/portable_tensor_utils.cc
+++ b/code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/portable_tensor_utils.cc
@@ -77,8 +77,8 @@ void PortableAsymmetricQuantizeFloats(const float* values, const int size,
const double qmin_double = kMinScale;
const double qmax_double = kMaxScale;
const auto minmax = std::minmax_element(values, values + size);
- const double rmin = std::fmin(0, *minmax.first);
- const double rmax = std::fmax(0, *minmax.second);
+ const double rmin = static_cast(std::min(0.0f, *minmax.first));
+ const double rmax = static_cast(std::max(0.0f, *minmax.second));
if (rmin == rmax) {
memset(quantized_values, 0, size * sizeof(int8_t));
*scaling_factor = 1;
@@ -495,7 +495,7 @@ void PortableApplyLayerNormFloat(const int16_t* input,
const float weighted_normalized_value =
normalized_value * layer_norm_weights[i] * layer_norm_scale +
bias[i] * bias_scale;
- const int32_t quant_output = static_cast(std::round(
+ const int32_t quant_output = static_cast(round(
weighted_normalized_value * static_cast(std::pow(2, 12))));
output[index] = std::min(int16_max, std::max(int16_min, quant_output));
}
diff --git a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/portable_tensor_utils_impl.h b/code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/portable_tensor_utils_impl.h
similarity index 100%
rename from code/components/tfmicro/tensorflow/lite/kernels/internal/reference/portable_tensor_utils_impl.h
rename to code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/portable_tensor_utils_impl.h
diff --git a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/prelu.h b/code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/prelu.h
similarity index 100%
rename from code/components/tfmicro/tensorflow/lite/kernels/internal/reference/prelu.h
rename to code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/prelu.h
diff --git a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/process_broadcast_shapes.h b/code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/process_broadcast_shapes.h
similarity index 100%
rename from code/components/tfmicro/tensorflow/lite/kernels/internal/reference/process_broadcast_shapes.h
rename to code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/process_broadcast_shapes.h
diff --git a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/quantize.h b/code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/quantize.h
similarity index 58%
rename from code/components/tfmicro/tensorflow/lite/kernels/internal/reference/quantize.h
rename to code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/quantize.h
index 6f3f9aeb..f304b641 100644
--- a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/quantize.h
+++ b/code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/quantize.h
@@ -17,6 +17,7 @@ limitations under the License.
#include
#include
+#include
#include "tensorflow/lite/kernels/internal/common.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
@@ -49,6 +50,39 @@ inline void AffineQuantize(const tflite::QuantizationParams& op_params,
}
}
+// Quantizes per-channel.
+template
+inline void PerChannelQuantize(
+ const tflite::PerChannelQuantizationParams& op_params,
+ const RuntimeShape& input_shape, const InputT* input_data,
+ const RuntimeShape& output_shape, OutputT* output_data) {
+ // Ensure flat size is same.
+ MatchingFlatSize(input_shape, output_shape);
+
+ const int32_t* zero_point = op_params.zero_point;
+ const float* scale = op_params.scale;
+ const int32_t quantized_dimension = op_params.quantized_dimension;
+ const int32_t num_dims = input_shape.DimensionsCount();
+ const int32_t* dims_data = input_shape.DimsData();
+ std::vector current_dim(num_dims, 0);
+ static constexpr int32_t min_val = std::numeric_limits::min();
+ static constexpr int32_t max_val = std::numeric_limits::max();
+
+ do {
+ size_t offset =
+ ReducedOutputOffset(num_dims, reinterpret_cast(dims_data),
+ current_dim.data(), 0, nullptr);
+ const InputT val = input_data[offset];
+ const int channel = current_dim[quantized_dimension];
+ int32_t unclamped = static_cast(TfLiteRound(
+ val / static_cast(scale[channel]))) +
+ zero_point[channel];
+ int32_t clamped = std::min(std::max(unclamped, min_val), max_val);
+ output_data[offset] = static_cast(clamped);
+ } while (NextIndex(num_dims, reinterpret_cast(dims_data),
+ current_dim.data()));
+}
+
} // namespace reference_ops
} // namespace tflite
diff --git a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/reduce.h b/code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/reduce.h
similarity index 100%
rename from code/components/tfmicro/tensorflow/lite/kernels/internal/reference/reduce.h
rename to code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/reduce.h
diff --git a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/requantize.h b/code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/requantize.h
similarity index 100%
rename from code/components/tfmicro/tensorflow/lite/kernels/internal/reference/requantize.h
rename to code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/requantize.h
diff --git a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/resize_bilinear.h b/code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/resize_bilinear.h
similarity index 100%
rename from code/components/tfmicro/tensorflow/lite/kernels/internal/reference/resize_bilinear.h
rename to code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/resize_bilinear.h
diff --git a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/resize_nearest_neighbor.h b/code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/resize_nearest_neighbor.h
similarity index 100%
rename from code/components/tfmicro/tensorflow/lite/kernels/internal/reference/resize_nearest_neighbor.h
rename to code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/resize_nearest_neighbor.h
diff --git a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/round.h b/code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/round.h
similarity index 100%
rename from code/components/tfmicro/tensorflow/lite/kernels/internal/reference/round.h
rename to code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/round.h
diff --git a/code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/slice.h b/code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/slice.h
new file mode 100644
index 00000000..cb73ea0d
--- /dev/null
+++ b/code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/slice.h
@@ -0,0 +1,80 @@
+/* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_SLICE_H_
+#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_SLICE_H_
+
+#include "tensorflow/lite/kernels/internal/portable_tensor.h"
+#include "tensorflow/lite/kernels/internal/types.h"
+
+namespace tflite {
+
+namespace reference_ops {
+
+template
+inline void Slice(const tflite::SliceParams& op_params,
+ const RuntimeShape& input_shape,
+ const RuntimeShape& output_shape,
+ SequentialTensorWriter* writer) {
+ const RuntimeShape ext_shape = RuntimeShape::ExtendedShape(5, input_shape);
+ TFLITE_DCHECK_LE(op_params.begin_count, 5);
+ TFLITE_DCHECK_LE(op_params.size_count, 5);
+ const int begin_count = op_params.begin_count;
+ const int size_count = op_params.size_count;
+ // We front-pad the begin and size vectors.
+ int start[5];
+ int stop[5];
+ for (int i = 0; i < 5; ++i) {
+ int padded_i = 5 - i;
+ start[i] =
+ begin_count < padded_i ? 0 : op_params.begin[begin_count - padded_i];
+ stop[i] =
+ (size_count < padded_i || op_params.size[size_count - padded_i] == -1)
+ ? ext_shape.Dims(i)
+ : start[i] + op_params.size[size_count - padded_i];
+ }
+
+ for (int i0 = start[0]; i0 < stop[0]; ++i0) {
+ for (int i1 = start[1]; i1 < stop[1]; ++i1) {
+ for (int i2 = start[2]; i2 < stop[2]; ++i2) {
+ for (int i3 = start[3]; i3 < stop[3]; ++i3) {
+ for (int i4 = start[4]; i4 < stop[4]; ++i4) {
+ writer->Write(Offset(ext_shape, i0, i1, i2, i3, i4));
+ }
+ }
+ }
+ }
+ }
+}
+
+template
+inline void Slice(const tflite::SliceParams& op_params,
+ const RuntimeShape& input_shape, const T* input_data,
+ const RuntimeShape& output_shape, T* output_data) {
+ SequentialTensorWriter writer(input_data, output_data);
+ return Slice(op_params, input_shape, output_shape, &writer);
+}
+
+template
+inline void Slice(const tflite::SliceParams& op_params,
+ const RuntimeShape& input_shape, const TfLiteTensor* input,
+ const RuntimeShape& output_shape, TfLiteTensor* output) {
+ SequentialTensorWriter writer(input, output);
+ return Slice(op_params, input_shape, output_shape, &writer);
+}
+
+} // namespace reference_ops
+} // namespace tflite
+
+#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_SLICE_H_
diff --git a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/softmax.h b/code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/softmax.h
similarity index 100%
rename from code/components/tfmicro/tensorflow/lite/kernels/internal/reference/softmax.h
rename to code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/softmax.h
diff --git a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/space_to_batch_nd.h b/code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/space_to_batch_nd.h
similarity index 100%
rename from code/components/tfmicro/tensorflow/lite/kernels/internal/reference/space_to_batch_nd.h
rename to code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/space_to_batch_nd.h
diff --git a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/space_to_depth.h b/code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/space_to_depth.h
similarity index 100%
rename from code/components/tfmicro/tensorflow/lite/kernels/internal/reference/space_to_depth.h
rename to code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/space_to_depth.h
diff --git a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/strided_slice.h b/code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/strided_slice.h
similarity index 100%
rename from code/components/tfmicro/tensorflow/lite/kernels/internal/reference/strided_slice.h
rename to code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/strided_slice.h
diff --git a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/sub.h b/code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/sub.h
similarity index 77%
rename from code/components/tfmicro/tensorflow/lite/kernels/internal/reference/sub.h
rename to code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/sub.h
index b8b8b732..3fa43ce9 100644
--- a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/sub.h
+++ b/code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/sub.h
@@ -105,63 +105,6 @@ inline void BroadcastSubSlow(const ArithmeticParams& params,
NDOpsHelper(output_desc, sub_func);
}
-template
-inline void BroadcastSubSlow(const ArithmeticParams& params,
- const RuntimeShape& input1_shape,
- const uint8_t* input1_data,
- const RuntimeShape& input2_shape,
- const uint8_t* input2_data,
- const RuntimeShape& output_shape,
- uint8_t* output_data) {
- ruy::profiler::ScopeLabel label("BroadcastSubSlow/uint8_t");
- TFLITE_DCHECK_LE(input1_shape.DimensionsCount(), N);
- TFLITE_DCHECK_LE(input2_shape.DimensionsCount(), N);
- TFLITE_DCHECK_LE(output_shape.DimensionsCount(), N);
- NdArrayDesc desc1;
- NdArrayDesc desc2;
- NdArrayDesc output_desc;
- NdArrayDescsForElementwiseBroadcast(input1_shape, input2_shape, &desc1,
- &desc2);
- CopyDimsToDesc(RuntimeShape::ExtendedShape(N, output_shape), &output_desc);
-
- // In Tensorflow, the dimensions are canonically named (batch_number, row,
- // col, channel), with extents (batches, height, width, depth), with the
- // trailing dimension changing most rapidly (channels has the smallest stride,
- // typically 1 element).
- //
- // In generated C code, we store arrays with the dimensions reversed. The
- // first dimension has smallest stride.
- //
- // We name our variables by their Tensorflow convention, but generate C code
- // nesting loops such that the innermost loop has the smallest stride for the
- // best cache behavior.
- auto sub_func = [&](int indexes[N]) {
- const int32_t input1_val =
- params.input1_offset + input1_data[SubscriptToIndex(desc1, indexes)];
- const int32_t input2_val =
- params.input2_offset + input2_data[SubscriptToIndex(desc2, indexes)];
- const int32_t shifted_input1_val = input1_val * (1 << params.left_shift);
- const int32_t shifted_input2_val = input2_val * (1 << params.left_shift);
- const int32_t scaled_input1_val =
- MultiplyByQuantizedMultiplierSmallerThanOneExp(
- shifted_input1_val, params.input1_multiplier, params.input1_shift);
- const int32_t scaled_input2_val =
- MultiplyByQuantizedMultiplierSmallerThanOneExp(
- shifted_input2_val, params.input2_multiplier, params.input2_shift);
- const int32_t raw_sub = scaled_input1_val - scaled_input2_val;
- const int32_t raw_output =
- MultiplyByQuantizedMultiplierSmallerThanOneExp(
- raw_sub, params.output_multiplier, params.output_shift) +
- params.output_offset;
- const int32_t clamped_output =
- std::min(params.quantized_activation_max,
- std::max(params.quantized_activation_min, raw_output));
- output_data[SubscriptToIndex(output_desc, indexes)] =
- static_cast(clamped_output);
- };
- NDOpsHelper(output_desc, sub_func);
-}
-
template
inline void BroadcastSubSlow(const ArithmeticParams& params,
const RuntimeShape& input1_shape,
@@ -202,60 +145,6 @@ inline void BroadcastSubSlow(const ArithmeticParams& params,
NDOpsHelper(output_desc, sub_func);
}
-template
-inline void BroadcastSubSlow(const ArithmeticParams& params,
- const RuntimeShape& input1_shape,
- const int8_t* input1_data,
- const RuntimeShape& input2_shape,
- const int8_t* input2_data,
- const RuntimeShape& output_shape,
- int8_t* output_data) {
- ruy::profiler::ScopeLabel label("BroadcastSubSlow/int8_t");
- NdArrayDesc desc1;
- NdArrayDesc desc2;
- NdArrayDesc output_desc;
- NdArrayDescsForElementwiseBroadcast(input1_shape, input2_shape, &desc1,
- &desc2);
- CopyDimsToDesc(RuntimeShape::ExtendedShape(N, output_shape), &output_desc);
-
- // In Tensorflow, the dimensions are canonically named (batch_number, row,
- // col, channel), with extents (batches, height, width, depth), with the
- // trailing dimension changing most rapidly (channels has the smallest stride,
- // typically 1 element).
- //
- // In generated C code, we store arrays with the dimensions reversed. The
- // first dimension has smallest stride.
- //
- // We name our variables by their Tensorflow convention, but generate C code
- // nesting loops such that the innermost loop has the smallest stride for the
- // best cache behavior.
- auto sub_func = [&](int indexes[N]) {
- const int32_t input1_val =
- params.input1_offset + input1_data[SubscriptToIndex(desc1, indexes)];
- const int32_t input2_val =
- params.input2_offset + input2_data[SubscriptToIndex(desc2, indexes)];
- const int32_t shifted_input1_val = input1_val * (1 << params.left_shift);
- const int32_t shifted_input2_val = input2_val * (1 << params.left_shift);
- const int32_t scaled_input1_val =
- MultiplyByQuantizedMultiplierSmallerThanOneExp(
- shifted_input1_val, params.input1_multiplier, params.input1_shift);
- const int32_t scaled_input2_val =
- MultiplyByQuantizedMultiplierSmallerThanOneExp(
- shifted_input2_val, params.input2_multiplier, params.input2_shift);
- const int32_t raw_sub = scaled_input1_val - scaled_input2_val;
- const int32_t raw_output =
- MultiplyByQuantizedMultiplierSmallerThanOneExp(
- raw_sub, params.output_multiplier, params.output_shift) +
- params.output_offset;
- const int32_t clamped_output =
- std::min(params.quantized_activation_max,
- std::max(params.quantized_activation_min, raw_output));
- output_data[SubscriptToIndex(output_desc, indexes)] =
- static_cast(clamped_output);
- };
- NDOpsHelper(output_desc, sub_func);
-}
-
template
void BroadcastSubSlow(const ArithmeticParams& params,
const RuntimeShape& input1_shape,
@@ -376,19 +265,37 @@ inline void BroadcastSub16POTSlow(const ArithmeticParams& params,
NDOpsHelper(output_desc, sub_func);
}
-// Element-wise Sub that can often be used for inner loop of broadcast sub as
-// well as the non-broadcast sub.
-inline void SubElementwise(int size, const ArithmeticParams& params,
- const uint8_t* input1_data,
- const uint8_t* input2_data, uint8_t* output_data) {
- TFLITE_DCHECK_GT(params.input1_offset, -256);
- TFLITE_DCHECK_GT(params.input2_offset, -256);
- TFLITE_DCHECK_LT(params.input1_offset, 256);
- TFLITE_DCHECK_LT(params.input2_offset, 256);
+template
+void BroadcastQuantSubSlow(const ArithmeticParams& params,
+ const RuntimeShape& input1_shape,
+ const T* input1_data,
+ const RuntimeShape& input2_shape,
+ const T* input2_data,
+ const RuntimeShape& output_shape, T* output_data) {
+ ruy::profiler::ScopeLabel label("BroadcastQuantSubSlow/T");
+ NdArrayDesc desc1;
+ NdArrayDesc desc2;
+ NdArrayDesc output_desc;
+ NdArrayDescsForElementwiseBroadcast(input1_shape, input2_shape, &desc1,
+ &desc2);
+ CopyDimsToDesc(RuntimeShape::ExtendedShape(N, output_shape), &output_desc);
- for (int i = 0; i < size; ++i) {
- const int32_t input1_val = params.input1_offset + input1_data[i];
- const int32_t input2_val = params.input2_offset + input2_data[i];
+ // In Tensorflow, the dimensions are canonically named (batch_number, row,
+ // col, channel), with extents (batches, height, width, depth), with the
+ // trailing dimension changing most rapidly (channels has the smallest stride,
+ // typically 1 element).
+ //
+ // In generated C code, we store arrays with the dimensions reversed. The
+ // first dimension has smallest stride.
+ //
+ // We name our variables by their Tensorflow convention, but generate C code
+ // nesting loops such that the innermost loop has the smallest stride for the
+ // best cache behavior.
+ auto sub_func = [&](int indexes[N]) {
+ const int32_t input1_val =
+ params.input1_offset + input1_data[SubscriptToIndex(desc1, indexes)];
+ const int32_t input2_val =
+ params.input2_offset + input2_data[SubscriptToIndex(desc2, indexes)];
const int32_t shifted_input1_val = input1_val * (1 << params.left_shift);
const int32_t shifted_input2_val = input2_val * (1 << params.left_shift);
const int32_t scaled_input1_val =
@@ -405,21 +312,18 @@ inline void SubElementwise(int size, const ArithmeticParams& params,
const int32_t clamped_output =
std::min(params.quantized_activation_max,
std::max(params.quantized_activation_min, raw_output));
- output_data[i] = static_cast(clamped_output);
- }
+ output_data[SubscriptToIndex(output_desc, indexes)] =
+ static_cast(clamped_output);
+ };
+ NDOpsHelper(output_desc, sub_func);
}
// Element-wise add that can often be used for inner loop of broadcast add as
// well as the non-broadcast add.
+template
inline void SubElementwise(int size, const ArithmeticParams& params,
- const int8_t* input1_data, const int8_t* input2_data,
- int8_t* output_data) {
- const int32_t int8_max_value = std::numeric_limits::max();
- TFLITE_DCHECK_GE(params.input1_offset, -1 * int8_max_value);
- TFLITE_DCHECK_GE(params.input2_offset, -1 * int8_max_value);
- TFLITE_DCHECK_LE(params.input1_offset, int8_max_value);
- TFLITE_DCHECK_LE(params.input2_offset, int8_max_value);
-
+ const T* input1_data, const T* input2_data,
+ T* output_data) {
for (int i = 0; i < size; ++i) {
const int32_t input1_val = params.input1_offset + input1_data[i];
const int32_t input2_val = params.input2_offset + input2_data[i];
@@ -439,7 +343,7 @@ inline void SubElementwise(int size, const ArithmeticParams& params,
const int32_t clamped_output =
std::min(params.quantized_activation_max,
std::max(params.quantized_activation_min, raw_output));
- output_data[i] = static_cast(clamped_output);
+ output_data[i] = static_cast(clamped_output);
}
}
@@ -469,11 +373,27 @@ inline void Sub(const ArithmeticParams& params,
const int flat_size =
MatchingElementsSize(input1_shape, input2_shape, output_shape);
- const int32_t int8_max_value = std::numeric_limits::max();
- TFLITE_DCHECK_GE(params.input1_offset, -1 * int8_max_value);
- TFLITE_DCHECK_GE(params.input2_offset, -1 * int8_max_value);
- TFLITE_DCHECK_LE(params.input1_offset, int8_max_value);
- TFLITE_DCHECK_LE(params.input2_offset, int8_max_value);
+ TFLITE_DCHECK_GE(params.input1_offset, -128);
+ TFLITE_DCHECK_GE(params.input2_offset, -128);
+ // offset = -quantization_params.zero_point in PrepareGeneralSubOp().
+ // So it's maximum can be 128 not 127.
+ TFLITE_DCHECK_LE(params.input1_offset, 128);
+ TFLITE_DCHECK_LE(params.input2_offset, 128);
+ SubElementwise(flat_size, params, input1_data, input2_data, output_data);
+}
+
+inline void Sub(const ArithmeticParams& params,
+ const RuntimeShape& input1_shape, const int16_t* input1_data,
+ const RuntimeShape& input2_shape, const int16_t* input2_data,
+ const RuntimeShape& output_shape, int16_t* output_data) {
+ TFLITE_DCHECK_LE(params.quantized_activation_min,
+ params.quantized_activation_max);
+
+ const int flat_size =
+ MatchingElementsSize(input1_shape, input2_shape, output_shape);
+
+ TFLITE_DCHECK_EQ(params.input1_offset, 0);
+ TFLITE_DCHECK_EQ(params.input2_offset, 0);
SubElementwise(flat_size, params, input1_data, input2_data, output_data);
}
diff --git a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/tanh.h b/code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/tanh.h
similarity index 100%
rename from code/components/tfmicro/tensorflow/lite/kernels/internal/reference/tanh.h
rename to code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/tanh.h
diff --git a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/transpose.h b/code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/transpose.h
similarity index 100%
rename from code/components/tfmicro/tensorflow/lite/kernels/internal/reference/transpose.h
rename to code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/transpose.h
diff --git a/code/components/tfmicro/tensorflow/lite/kernels/internal/reference/transpose_conv.h b/code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/transpose_conv.h
similarity index 100%
rename from code/components/tfmicro/tensorflow/lite/kernels/internal/reference/transpose_conv.h
rename to code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/transpose_conv.h
diff --git a/code/components/tflite-lib/tensorflow/lite/kernels/internal/runtime_shape.h b/code/components/tflite-lib/tensorflow/lite/kernels/internal/runtime_shape.h
new file mode 100644
index 00000000..13693643
--- /dev/null
+++ b/code/components/tflite-lib/tensorflow/lite/kernels/internal/runtime_shape.h
@@ -0,0 +1,155 @@
+/* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_RUNTIME_SHAPE_H_
+#define TENSORFLOW_LITE_KERNELS_INTERNAL_RUNTIME_SHAPE_H_
+
+namespace tflite {
+
+template
+struct Dims {
+ int sizes[N];
+ int strides[N];
+};
+
+class RuntimeShape {
+ public:
+ RuntimeShape& operator=(RuntimeShape const&) = delete;
+
+ RuntimeShape() : size_(0) {}
+
+ explicit RuntimeShape(int dimensions_count) : size_(dimensions_count) {}
+
+ RuntimeShape(int shape_size, int32_t value) : size_(shape_size) {
+ for (int i = 0; i < shape_size; ++i) {
+ SetDim(i, value);
+ }
+ }
+
+ RuntimeShape(int dimensions_count, const int32_t* dims_data)
+ : size_(dimensions_count) {
+ ReplaceWith(dimensions_count, dims_data);
+ }
+
+ bool operator==(const RuntimeShape& comp) const {
+ return this->size_ == comp.size_ &&
+ std::memcmp(DimsData(), comp.DimsData(), size_ * sizeof(int32_t)) ==
+ 0;
+ }
+
+ ~RuntimeShape() {}
+
+ int32_t DimensionsCount() const { return size_; }
+ int32_t Dims(int i) const {
+ TFLITE_DCHECK_GE(i, 0);
+ TFLITE_DCHECK_LT(i, size_);
+ return dims_[i];
+ }
+ void SetDim(int i, int32_t val) {
+ TFLITE_DCHECK_GE(i, 0);
+ TFLITE_DCHECK_LT(i, size_);
+ dims_[i] = val;
+ }
+
+ static RuntimeShape ExtendedShape(int new_shape_size,
+ const RuntimeShape& shape) {
+ return RuntimeShape(new_shape_size, shape, 1);
+ }
+ int32_t* DimsData() { return dims_; }
+ const int32_t* DimsData() const { return dims_; }
+ const int32_t* DimsDataUpTo5D() const { return dims_; }
+
+ void ReplaceWith(int dimensions_count, const int32_t* dims_data) {
+ size_ = dimensions_count;
+ int32_t* dst_dims = DimsData();
+ std::memcpy(dst_dims, dims_data, dimensions_count * sizeof(int32_t));
+ }
+
+ // Returns the total count of elements, that is the size when flattened into a
+ // vector.
+ int FlatSize() const {
+ int buffer_size = 1;
+ const int* dims_data = reinterpret_cast(DimsData());
+ for (int i = 0; i < size_; i++) {
+ buffer_size *= dims_data[i];
+ }
+ return buffer_size;
+ }
+
+ private:
+ // For use only by ExtendedShape(), written to guarantee (return-value) copy
+ // elision in C++17.
+ // This creates a shape padded to the desired size with the specified value.
+ RuntimeShape(int new_shape_size, const RuntimeShape& shape, int pad_value)
+ : size_(new_shape_size) {
+ // If the following check fails, it is likely because a 4D-only kernel is
+ // being used with an array of larger dimension count.
+ TFLITE_CHECK_GE(new_shape_size, shape.DimensionsCount());
+ const int size_increase = new_shape_size - shape.DimensionsCount();
+ for (int i = 0; i < size_increase; ++i) {
+ SetDim(i, pad_value);
+ }
+ std::memcpy(DimsData() + size_increase, shape.DimsData(),
+ sizeof(int32_t) * shape.DimensionsCount());
+ }
+
+ // A maximum of 4 dimensions are supported on TFLM.
+ static constexpr int kMaxSize = 5;
+ int32_t size_;
+ union {
+ int32_t dims_[kMaxSize];
+ };
+};
+
+// Since tensors with '0' in their shape are valid in TF, these offset functions
+// allow that as long as the corresponding index is also 0. It is upto the
+// calling ops to ensure that they perform verification checks on tensor shapes
+// if they don't support a particular behavior.
+
+inline int Offset(const RuntimeShape& shape, int i0, int i1, int i2, int i3) {
+ TFLITE_DCHECK_EQ(shape.DimensionsCount(), 4);
+ const int* dims_data = reinterpret_cast(shape.DimsData());
+ TFLITE_DCHECK((dims_data[0] == 0 && i0 == 0) ||
+ (i0 >= 0 && i0 < dims_data[0]));
+ TFLITE_DCHECK((dims_data[1] == 0 && i1 == 0) ||
+ (i1 >= 0 && i1 < dims_data[1]));
+ TFLITE_DCHECK((dims_data[2] == 0 && i2 == 0) ||
+ (i2 >= 0 && i2 < dims_data[2]));
+ TFLITE_DCHECK((dims_data[3] == 0 && i3 == 0) ||
+ (i3 >= 0 && i3 < dims_data[3]));
+ return ((i0 * dims_data[1] + i1) * dims_data[2] + i2) * dims_data[3] + i3;
+}
+
+inline int Offset(const RuntimeShape& shape, int i0, int i1, int i2, int i3,
+ int i4) {
+ TFLITE_DCHECK_EQ(shape.DimensionsCount(), 5);
+ const int* dims_data = reinterpret_cast(shape.DimsData());
+ TFLITE_DCHECK((dims_data[0] == 0 && i0 == 0) ||
+ (i0 >= 0 && i0 < dims_data[0]));
+ TFLITE_DCHECK((dims_data[1] == 0 && i1 == 0) ||
+ (i1 >= 0 && i1 < dims_data[1]));
+ TFLITE_DCHECK((dims_data[2] == 0 && i2 == 0) ||
+ (i2 >= 0 && i2 < dims_data[2]));
+ TFLITE_DCHECK((dims_data[3] == 0 && i3 == 0) ||
+ (i3 >= 0 && i3 < dims_data[3]));
+ TFLITE_DCHECK((dims_data[4] == 0 && i4 == 0) ||
+ (i4 >= 0 && i4 < dims_data[4]));
+ return (((i0 * dims_data[1] + i1) * dims_data[2] + i2) * dims_data[3] + i3) *
+ dims_data[4] +
+ i4;
+}
+
+} // namespace tflite
+
+#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_RUNTIME_SHAPE_H_
diff --git a/code/components/tfmicro/tensorflow/lite/kernels/internal/strided_slice_logic.h b/code/components/tflite-lib/tensorflow/lite/kernels/internal/strided_slice_logic.h
similarity index 100%
rename from code/components/tfmicro/tensorflow/lite/kernels/internal/strided_slice_logic.h
rename to code/components/tflite-lib/tensorflow/lite/kernels/internal/strided_slice_logic.h
diff --git a/code/components/tfmicro/tensorflow/lite/kernels/internal/tensor_ctypes.h b/code/components/tflite-lib/tensorflow/lite/kernels/internal/tensor_ctypes.h
similarity index 100%
rename from code/components/tfmicro/tensorflow/lite/kernels/internal/tensor_ctypes.h
rename to code/components/tflite-lib/tensorflow/lite/kernels/internal/tensor_ctypes.h
diff --git a/code/components/tfmicro/tensorflow/lite/kernels/internal/types.h b/code/components/tflite-lib/tensorflow/lite/kernels/internal/types.h
similarity index 80%
rename from code/components/tfmicro/tensorflow/lite/kernels/internal/types.h
rename to code/components/tflite-lib/tensorflow/lite/kernels/internal/types.h
index 5cd3f687..77644bc0 100644
--- a/code/components/tfmicro/tensorflow/lite/kernels/internal/types.h
+++ b/code/components/tflite-lib/tensorflow/lite/kernels/internal/types.h
@@ -21,6 +21,7 @@ limitations under the License.
#include
#include "tensorflow/lite/kernels/internal/compatibility.h"
+#include "tensorflow/lite/kernels/internal/runtime_shape.h"
namespace tflite {
@@ -139,211 +140,22 @@ inline bool operator==(const QuantizationParams& qp1,
return qp1.zero_point == qp2.zero_point && qp1.scale == qp2.scale;
}
-template
-struct Dims {
- int sizes[N];
- int strides[N];
+// Quantization parameters for each channel, determining the mapping of
+// quantized values to real values. See QuantizationParams for a single set of
+// parameters per tensor. This has one parameters set per each channel.
+//
+// The correspondence is as follows:
+//
+// real_value = scale[channel] * (quantized_value - zero_point[channel]);
+//
+struct PerChannelQuantizationParams {
+ // The following members typically point to the corresponding members of a
+ // TfLiteAffineQuantization struct.
+ const float* scale;
+ const int32_t* zero_point;
+ int32_t quantized_dimension;
};
-class RuntimeShape {
- public:
- // Shapes with dimensions up to 5 are stored directly in the structure, while
- // larger shapes are separately allocated.
- static constexpr int kMaxSmallSize = 5;
-
- RuntimeShape& operator=(RuntimeShape const&) = delete;
-
- RuntimeShape() : size_(0) {}
-
- explicit RuntimeShape(int dimensions_count) : size_(dimensions_count) {
- if (dimensions_count > kMaxSmallSize) {
-#ifdef TF_LITE_STATIC_MEMORY
- TFLITE_CHECK(false && "No shape resizing supported on this platform");
-#else // TF_LITE_STATIC_MEMORY
- dims_pointer_ = new int32_t[dimensions_count];
-#endif // TF_LITE_STATIC_MEMORY
- }
- }
-
- RuntimeShape(int shape_size, int32_t value) : size_(0) {
- Resize(shape_size);
- for (int i = 0; i < shape_size; ++i) {
- SetDim(i, value);
- }
- }
-
- RuntimeShape(int dimensions_count, const int32_t* dims_data) : size_(0) {
- ReplaceWith(dimensions_count, dims_data);
- }
-
- RuntimeShape(const std::initializer_list init_list) : size_(0) {
- BuildFrom(init_list);
- }
-
- // Avoid using this constructor. We should be able to delete it when C++17
- // rolls out.
- RuntimeShape(RuntimeShape const& other) : size_(other.DimensionsCount()) {
- if (size_ > kMaxSmallSize) {
-#ifdef TF_LITE_STATIC_MEMORY
- TFLITE_CHECK(false && "No shape resizing supported on this platform");
-#else
- dims_pointer_ = new int32_t[size_];
-#endif
- }
- std::memcpy(DimsData(), other.DimsData(), sizeof(int32_t) * size_);
- }
-
- bool operator==(const RuntimeShape& comp) const {
- return this->size_ == comp.size_ &&
- std::memcmp(DimsData(), comp.DimsData(), size_ * sizeof(int32_t)) ==
- 0;
- }
-
- ~RuntimeShape() {
- if (size_ > kMaxSmallSize) {
-#ifdef TF_LITE_STATIC_MEMORY
- TFLITE_CHECK(false && "No shape resizing supported on this platform");
-#else // TF_LITE_STATIC_MEMORY
- delete[] dims_pointer_;
-#endif // TF_LITE_STATIC_MEMORY
- }
- }
-
- inline int32_t DimensionsCount() const { return size_; }
- inline int32_t Dims(int i) const {
- TFLITE_DCHECK_GE(i, 0);
- TFLITE_DCHECK_LT(i, size_);
- return size_ > kMaxSmallSize ? dims_pointer_[i] : dims_[i];
- }
- inline void SetDim(int i, int32_t val) {
- TFLITE_DCHECK_GE(i, 0);
- TFLITE_DCHECK_LT(i, size_);
- if (size_ > kMaxSmallSize) {
- dims_pointer_[i] = val;
- } else {
- dims_[i] = val;
- }
- }
-
- inline int32_t* DimsData() {
- return size_ > kMaxSmallSize ? dims_pointer_ : dims_;
- }
- inline const int32_t* DimsData() const {
- return size_ > kMaxSmallSize ? dims_pointer_ : dims_;
- }
- // The caller must ensure that the shape is no bigger than 5-D.
- inline const int32_t* DimsDataUpTo5D() const { return dims_; }
-
- inline void Resize(int dimensions_count) {
- if (size_ > kMaxSmallSize) {
-#ifdef TF_LITE_STATIC_MEMORY
- TFLITE_CHECK(false && "No shape resizing supported on this platform");
-#else // TF_LITE_STATIC_MEMORY
- delete[] dims_pointer_;
-#endif // TF_LITE_STATIC_MEMORY
- }
- size_ = dimensions_count;
- if (dimensions_count > kMaxSmallSize) {
-#ifdef TF_LITE_STATIC_MEMORY
- TFLITE_CHECK(false && "No shape resizing supported on this platform");
-#else // TF_LITE_STATIC_MEMORY
- dims_pointer_ = new int32_t[dimensions_count];
-#endif // TF_LITE_STATIC_MEMORY
- }
- }
-
- inline void ReplaceWith(int dimensions_count, const int32_t* dims_data) {
- Resize(dimensions_count);
- int32_t* dst_dims = DimsData();
- std::memcpy(dst_dims, dims_data, dimensions_count * sizeof(int32_t));
- }
-
- template
- inline void BuildFrom(const T& src_iterable) {
- const int dimensions_count =
- std::distance(src_iterable.begin(), src_iterable.end());
- Resize(dimensions_count);
- int32_t* data = DimsData();
- for (auto it : src_iterable) {
- *data = it;
- ++data;
- }
- }
-
- // This will probably be factored out. Old code made substantial use of 4-D
- // shapes, and so this function is used to extend smaller shapes. Note that
- // (a) as Dims<4>-dependent code is eliminated, the reliance on this should be
- // reduced, and (b) some kernels are stricly 4-D, but then the shapes of their
- // inputs should already be 4-D, so this function should not be needed.
- inline static RuntimeShape ExtendedShape(int new_shape_size,
- const RuntimeShape& shape) {
- return RuntimeShape(new_shape_size, shape, 1);
- }
-
- inline void BuildFrom(const std::initializer_list init_list) {
- BuildFrom>(init_list);
- }
-
- // Returns the total count of elements, that is the size when flattened into a
- // vector.
- inline int FlatSize() const {
- int buffer_size = 1;
- const int* dims_data = reinterpret_cast(DimsData());
- for (int i = 0; i < size_; i++) {
- buffer_size *= dims_data[i];
- }
- return buffer_size;
- }
-
- bool operator!=(const RuntimeShape& comp) const { return !((*this) == comp); }
-
- private:
- // For use only by ExtendedShape(), written to guarantee (return-value) copy
- // elision in C++17.
- // This creates a shape padded to the desired size with the specified value.
- RuntimeShape(int new_shape_size, const RuntimeShape& shape, int pad_value)
- : size_(0) {
- // If the following check fails, it is likely because a 4D-only kernel is
- // being used with an array of larger dimension count.
- TFLITE_CHECK_GE(new_shape_size, shape.DimensionsCount());
- Resize(new_shape_size);
- const int size_increase = new_shape_size - shape.DimensionsCount();
- for (int i = 0; i < size_increase; ++i) {
- SetDim(i, pad_value);
- }
- std::memcpy(DimsData() + size_increase, shape.DimsData(),
- sizeof(int32_t) * shape.DimensionsCount());
- }
-
- int32_t size_;
- union {
- int32_t dims_[kMaxSmallSize];
- int32_t* dims_pointer_;
- };
-};
-
-// Converts inference-style shape to legacy tflite::Dims<4>.
-inline tflite::Dims<4> ToRuntimeDims(const tflite::RuntimeShape& array_shape) {
- tflite::Dims<4> result;
- const int dimensions_count = array_shape.DimensionsCount();
- TFLITE_CHECK_LE(dimensions_count, 4);
- int cum_prod = 1;
- for (int i = 0; i < 4; i++) {
- const int new_dim =
- (i < dimensions_count) ? array_shape.Dims(dimensions_count - 1 - i) : 1;
- result.sizes[i] = new_dim;
- result.strides[i] = cum_prod;
- cum_prod *= new_dim;
- }
- return result;
-}
-
-// TODO(b/80418076): Move to legacy ops file, update invocations.
-inline RuntimeShape DimsToShape(const tflite::Dims<4>& dims) {
- return RuntimeShape(
- {dims.sizes[3], dims.sizes[2], dims.sizes[1], dims.sizes[0]});
-}
-
// Gets next index to iterate through a multidimensional array.
inline bool NextIndex(const int num_dims, const int* dims, int* current) {
if (num_dims == 0) {
@@ -405,43 +217,6 @@ inline size_t ReducedOutputOffset(const int num_dims, const int* dims,
// calling ops to ensure that they perform verification checks on tensor shapes
// if they don't support a particular behavior.
-inline int Offset(const RuntimeShape& shape, int i0, int i1, int i2, int i3) {
- TFLITE_DCHECK_EQ(shape.DimensionsCount(), 4);
- const int* dims_data = reinterpret_cast(shape.DimsDataUpTo5D());
- TFLITE_DCHECK((dims_data[0] == 0 && i0 == 0) ||
- (i0 >= 0 && i0 < dims_data[0]));
- TFLITE_DCHECK((dims_data[1] == 0 && i1 == 0) ||
- (i1 >= 0 && i1 < dims_data[1]));
- TFLITE_DCHECK((dims_data[2] == 0 && i2 == 0) ||
- (i2 >= 0 && i2 < dims_data[2]));
- TFLITE_DCHECK((dims_data[3] == 0 && i3 == 0) ||
- (i3 >= 0 && i3 < dims_data[3]));
- return ((i0 * dims_data[1] + i1) * dims_data[2] + i2) * dims_data[3] + i3;
-}
-
-inline int Offset(const RuntimeShape& shape, int i0, int i1, int i2, int i3,
- int i4) {
- TFLITE_DCHECK_EQ(shape.DimensionsCount(), 5);
- const int* dims_data = reinterpret_cast(shape.DimsDataUpTo5D());
- TFLITE_DCHECK((dims_data[0] == 0 && i0 == 0) ||
- (i0 >= 0 && i0 < dims_data[0]));
- TFLITE_DCHECK((dims_data[1] == 0 && i1 == 0) ||
- (i1 >= 0 && i1 < dims_data[1]));
- TFLITE_DCHECK((dims_data[2] == 0 && i2 == 0) ||
- (i2 >= 0 && i2 < dims_data[2]));
- TFLITE_DCHECK((dims_data[3] == 0 && i3 == 0) ||
- (i3 >= 0 && i3 < dims_data[3]));
- TFLITE_DCHECK((dims_data[4] == 0 && i4 == 0) ||
- (i4 >= 0 && i4 < dims_data[4]));
- return (((i0 * dims_data[1] + i1) * dims_data[2] + i2) * dims_data[3] + i3) *
- dims_data[4] +
- i4;
-}
-
-inline int Offset(const RuntimeShape& shape, int* index) {
- return Offset(shape, index[0], index[1], index[2], index[3]);
-}
-
inline int Offset(const Dims<4>& dims, int i0, int i1, int i2, int i3) {
TFLITE_DCHECK((i0 == 0 && dims.sizes[0] == 0) ||
(i0 >= 0 && i0 < dims.sizes[0]));
diff --git a/code/components/tfmicro/tensorflow/lite/kernels/kernel_util.cc b/code/components/tflite-lib/tensorflow/lite/kernels/kernel_util.cc
similarity index 95%
rename from code/components/tfmicro/tensorflow/lite/kernels/kernel_util.cc
rename to code/components/tflite-lib/tensorflow/lite/kernels/kernel_util.cc
index 6a53757b..75529296 100644
--- a/code/components/tfmicro/tensorflow/lite/kernels/kernel_util.cc
+++ b/code/components/tflite-lib/tensorflow/lite/kernels/kernel_util.cc
@@ -410,21 +410,45 @@ bool HaveSameShapes(const TfLiteTensor* input1, const TfLiteTensor* input2) {
}
#ifndef TF_LITE_STATIC_MEMORY
+TfLiteStatus GetOutputShapeFromInput(TfLiteContext* context,
+ const TfLiteTensor* input,
+ TfLiteIntArray** output_shape) {
+ if (NumDimensions(input) != 1) {
+ TF_LITE_KERNEL_LOG(const_cast(context),
+ "Invalid %dD input tensor (must be a 1D tensor).",
+ NumDimensions(input));
+ return kTfLiteError;
+ }
+ const int output_dims = SizeOfDimension(input, 0);
+ std::unique_ptr shape(
+ TfLiteIntArrayCreate(output_dims), TfLiteIntArrayFree);
+ for (int i = 0; i < output_dims; i++) {
+ shape->data[i] = input->data.i32[i];
+ }
+ *output_shape = shape.release();
+ return kTfLiteOk;
+}
// TODO(b/172067338): Having this function be part of TF_LITE_STATIC_MEMORY
// build results in a 6KB size increase, even though the function is unsused for
// that build. What appears to be happening is that while the linker drops the
// unsused function, the string library that gets pulled in is not dropped,
// resulting in the increased binary size.
-std::string GetShapeDebugString(const TfLiteIntArray* shape) {
+const std::string GetShapeDebugString(const TfLiteIntArray* shape) {
std::string str;
for (int d = 0; d < shape->size; ++d) {
if (str.empty())
str = "[" + std::to_string(shape->data[d]);
else
- str += ", " + std::to_string(shape->data[d]);
+ // Don't add space after "," to make the output consistent with
+ // tensorflow::shape_inference::InferenceContext::DebugString()
+ str += "," + std::to_string(shape->data[d]);
+ }
+ if (str.empty()) {
+ str = "[]";
+ } else {
+ str += "]";
}
- str += "]";
return str;
}
diff --git a/code/components/tfmicro/tensorflow/lite/kernels/kernel_util.h b/code/components/tflite-lib/tensorflow/lite/kernels/kernel_util.h
similarity index 93%
rename from code/components/tfmicro/tensorflow/lite/kernels/kernel_util.h
rename to code/components/tflite-lib/tensorflow/lite/kernels/kernel_util.h
index ffae0701..d082e7b0 100644
--- a/code/components/tfmicro/tensorflow/lite/kernels/kernel_util.h
+++ b/code/components/tflite-lib/tensorflow/lite/kernels/kernel_util.h
@@ -18,6 +18,9 @@ limitations under the License.
#include
#include
+#ifndef TF_LITE_STATIC_MEMORY
+#include
+#endif // TF_LITE_STATIC_MEMORY
#include "tensorflow/lite/c/builtin_op_data.h"
#include "tensorflow/lite/c/common.h"
@@ -149,8 +152,12 @@ inline int SizeOfDimension(const TfLiteTensor* t, int dim) {
return t->dims->data[dim];
}
-inline int NumInputs(const TfLiteNode* node) { return node->inputs->size; }
-inline int NumOutputs(const TfLiteNode* node) { return node->outputs->size; }
+inline int NumInputs(const TfLiteNode* node) {
+ return node->inputs == nullptr ? 0 : node->inputs->size;
+}
+inline int NumOutputs(const TfLiteNode* node) {
+ return node->outputs == nullptr ? 0 : node->outputs->size;
+}
#ifndef TF_LITE_STATIC_MEMORY
inline int NumIntermediates(const TfLiteNode* node) {
@@ -179,6 +186,11 @@ inline bool IsConstantTensor(const TfLiteTensor* tensor) {
return tensor->allocation_type == kTfLiteMmapRo;
}
+inline bool IsConstantOrPersistentTensor(const TfLiteTensor* tensor) {
+ return IsConstantTensor(tensor) ||
+ (tensor->allocation_type == kTfLitePersistentRo);
+}
+
// Determines whether tensor is dynamic. Note that a tensor can be non-const and
// not dynamic. This function specifically checks for a dynamic tensor.
inline bool IsDynamicTensor(const TfLiteTensor* tensor) {
@@ -271,6 +283,16 @@ void CalculateActivationRange(TfLiteFusedActivation activation,
// Return true if the given tensors have the same shape.
bool HaveSameShapes(const TfLiteTensor* input1, const TfLiteTensor* input2);
+#if !defined(TF_LITE_STATIC_MEMORY)
+// Gets the output shape from the input tensor.
+TfLiteStatus GetOutputShapeFromInput(TfLiteContext* context,
+ const TfLiteTensor* input,
+ TfLiteIntArray** output_shape);
+
+const std::string GetShapeDebugString(const TfLiteIntArray* shape);
+
+#endif // !defined(TF_LITE_STATIC_MEMORY)
+
// Calculates the output_shape that is necessary for element-wise operations
// with broadcasting involving the two input tensors.
TfLiteStatus CalculateShapeForBroadcast(TfLiteContext* context,
diff --git a/code/components/tfmicro/tensorflow/lite/kernels/op_macros.h b/code/components/tflite-lib/tensorflow/lite/kernels/op_macros.h
similarity index 100%
rename from code/components/tfmicro/tensorflow/lite/kernels/op_macros.h
rename to code/components/tflite-lib/tensorflow/lite/kernels/op_macros.h
diff --git a/code/components/tfmicro/tensorflow/lite/kernels/padding.h b/code/components/tflite-lib/tensorflow/lite/kernels/padding.h
similarity index 100%
rename from code/components/tfmicro/tensorflow/lite/kernels/padding.h
rename to code/components/tflite-lib/tensorflow/lite/kernels/padding.h
diff --git a/code/components/tfmicro/tensorflow/lite/micro/all_ops_resolver.cc b/code/components/tflite-lib/tensorflow/lite/micro/all_ops_resolver.cc
similarity index 95%
rename from code/components/tfmicro/tensorflow/lite/micro/all_ops_resolver.cc
rename to code/components/tflite-lib/tensorflow/lite/micro/all_ops_resolver.cc
index 5abdc3f9..8777cd28 100644
--- a/code/components/tfmicro/tensorflow/lite/micro/all_ops_resolver.cc
+++ b/code/components/tflite-lib/tensorflow/lite/micro/all_ops_resolver.cc
@@ -26,8 +26,10 @@ AllOpsResolver::AllOpsResolver() {
AddAddN();
AddArgMax();
AddArgMin();
+ AddAssignVariable();
AddAveragePool2D();
AddBatchToSpaceNd();
+ AddCallOnce();
AddCeil();
AddConcatenation();
AddConv2D();
@@ -40,7 +42,9 @@ AllOpsResolver::AllOpsResolver() {
AddElu();
AddEqual();
AddEthosU();
+ AddExp();
AddExpandDims();
+ AddFill();
AddFloor();
AddFloorDiv();
AddFloorMod();
@@ -70,6 +74,7 @@ AllOpsResolver::AllOpsResolver() {
AddPadV2();
AddPrelu();
AddQuantize();
+ AddReadVariable();
AddReduceMax();
AddRelu();
AddRelu6();
@@ -92,9 +97,10 @@ AllOpsResolver::AllOpsResolver() {
AddSub();
AddSvdf();
AddTanh();
- AddTransposeConv();
AddTranspose();
+ AddTransposeConv();
AddUnpack();
+ AddVarHandle();
}
} // namespace tflite
diff --git a/code/components/tfmicro/tensorflow/lite/micro/all_ops_resolver.h b/code/components/tflite-lib/tensorflow/lite/micro/all_ops_resolver.h
similarity index 100%
rename from code/components/tfmicro/tensorflow/lite/micro/all_ops_resolver.h
rename to code/components/tflite-lib/tensorflow/lite/micro/all_ops_resolver.h
diff --git a/code/components/tfmicro/tensorflow/lite/micro/compatibility.h b/code/components/tflite-lib/tensorflow/lite/micro/compatibility.h
similarity index 100%
rename from code/components/tfmicro/tensorflow/lite/micro/compatibility.h
rename to code/components/tflite-lib/tensorflow/lite/micro/compatibility.h
diff --git a/code/components/tfmicro/tensorflow/lite/micro/debug_log.cc b/code/components/tflite-lib/tensorflow/lite/micro/debug_log.cc
similarity index 100%
rename from code/components/tfmicro/tensorflow/lite/micro/debug_log.cc
rename to code/components/tflite-lib/tensorflow/lite/micro/debug_log.cc
diff --git a/code/components/tfmicro/tensorflow/lite/micro/debug_log.h b/code/components/tflite-lib/tensorflow/lite/micro/debug_log.h
similarity index 100%
rename from code/components/tfmicro/tensorflow/lite/micro/debug_log.h
rename to code/components/tflite-lib/tensorflow/lite/micro/debug_log.h
diff --git a/code/components/tfmicro/tensorflow/lite/micro/flatbuffer_utils.cc b/code/components/tflite-lib/tensorflow/lite/micro/flatbuffer_utils.cc
similarity index 70%
rename from code/components/tfmicro/tensorflow/lite/micro/flatbuffer_utils.cc
rename to code/components/tflite-lib/tensorflow/lite/micro/flatbuffer_utils.cc
index ab3d98a7..9996172b 100644
--- a/code/components/tfmicro/tensorflow/lite/micro/flatbuffer_utils.cc
+++ b/code/components/tflite-lib/tensorflow/lite/micro/flatbuffer_utils.cc
@@ -61,4 +61,24 @@ uint32_t NumSubgraphOperators(const Model* model, int subgraph_idx) {
return NumSubgraphOperators(subgraph);
}
+TfLiteIntArray* FlatBufferVectorToTfLiteTypeArray(
+ const flatbuffers::Vector* flatbuffer_array) {
+ // On little-endian machines, TfLiteIntArray happens to have the same memory
+ // layout as flatbuffers:Vector, so we can reinterpret_cast the
+ // flatbuffer vector and avoid a copy and malloc.
+ // TODO(b/188459715): audit this usage of const_cast.
+ return const_cast(
+ reinterpret_cast(flatbuffer_array));
+}
+
+TfLiteFloatArray* FlatBufferVectorToTfLiteTypeArray(
+ const flatbuffers::Vector* flatbuffer_array) {
+ // On little-endian machines, TfLiteFloatArray happens to have the same memory
+ // layout as flatbuffers:Vector, so we can reinterpret_cast the
+ // flatbuffer vector and avoid a copy and malloc.
+ // TODO(b/188459715): audit this usage of const_cast.
+ return const_cast(
+ reinterpret_cast(flatbuffer_array));
+}
+
} // namespace tflite
diff --git a/code/components/tfmicro/tensorflow/lite/micro/flatbuffer_utils.h b/code/components/tflite-lib/tensorflow/lite/micro/flatbuffer_utils.h
similarity index 85%
rename from code/components/tfmicro/tensorflow/lite/micro/flatbuffer_utils.h
rename to code/components/tflite-lib/tensorflow/lite/micro/flatbuffer_utils.h
index ab675799..b4e0cdc2 100644
--- a/code/components/tfmicro/tensorflow/lite/micro/flatbuffer_utils.h
+++ b/code/components/tflite-lib/tensorflow/lite/micro/flatbuffer_utils.h
@@ -18,6 +18,7 @@ limitations under the License.
#include "flatbuffers/flatbuffers.h"
#include "flatbuffers/flexbuffers.h"
+#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
@@ -51,6 +52,14 @@ class FlexbufferWrapper : public flexbuffers::Vector {
uint32_t NumSubgraphOperators(const SubGraph* subgraph);
uint32_t NumSubgraphOperators(const Model* model, int subgraph_idx);
+// Converts a flatbuffer array to a TfLiteArray.
+// TODO(b/188459715): These function convert a const input to a non-const via a
+// const_cast. It is unclear exactly why this is required.
+TfLiteIntArray* FlatBufferVectorToTfLiteTypeArray(
+ const flatbuffers::Vector* flatbuffer_array);
+TfLiteFloatArray* FlatBufferVectorToTfLiteTypeArray(
+ const flatbuffers::Vector* flatbuffer_array);
+
} // namespace tflite
#endif // THIRD_PARTY_TFLITE_MICRO_TENSORFLOW_LITE_MICRO_FLATBUFFER_UTILS_H_
diff --git a/code/components/tfmicro/tensorflow/lite/micro/kernels/activation_utils.h b/code/components/tflite-lib/tensorflow/lite/micro/kernels/activation_utils.h
similarity index 100%
rename from code/components/tfmicro/tensorflow/lite/micro/kernels/activation_utils.h
rename to code/components/tflite-lib/tensorflow/lite/micro/kernels/activation_utils.h
diff --git a/code/components/tfmicro/tensorflow/lite/micro/kernels/activations.cc b/code/components/tflite-lib/tensorflow/lite/micro/kernels/activations.cc
similarity index 100%
rename from code/components/tfmicro/tensorflow/lite/micro/kernels/activations.cc
rename to code/components/tflite-lib/tensorflow/lite/micro/kernels/activations.cc
diff --git a/code/components/tfmicro/tensorflow/lite/micro/kernels/activations.h b/code/components/tflite-lib/tensorflow/lite/micro/kernels/activations.h
similarity index 100%
rename from code/components/tfmicro/tensorflow/lite/micro/kernels/activations.h
rename to code/components/tflite-lib/tensorflow/lite/micro/kernels/activations.h
diff --git a/code/components/tfmicro/tensorflow/lite/micro/kernels/activations_common.cc b/code/components/tflite-lib/tensorflow/lite/micro/kernels/activations_common.cc
similarity index 100%
rename from code/components/tfmicro/tensorflow/lite/micro/kernels/activations_common.cc
rename to code/components/tflite-lib/tensorflow/lite/micro/kernels/activations_common.cc
diff --git a/code/components/tfmicro/tensorflow/lite/micro/kernels/add.cc b/code/components/tflite-lib/tensorflow/lite/micro/kernels/add.cc
similarity index 58%
rename from code/components/tfmicro/tensorflow/lite/micro/kernels/add.cc
rename to code/components/tflite-lib/tensorflow/lite/micro/kernels/add.cc
index c02a7497..75523d14 100644
--- a/code/components/tfmicro/tensorflow/lite/micro/kernels/add.cc
+++ b/code/components/tflite-lib/tensorflow/lite/micro/kernels/add.cc
@@ -1,4 +1,4 @@
-/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
+/* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -23,89 +23,15 @@ limitations under the License.
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/kernels/op_macros.h"
+#include "tensorflow/lite/micro/kernels/add.h"
#include "tensorflow/lite/micro/kernels/kernel_util.h"
#include "tensorflow/lite/micro/memory_helpers.h"
+#include "tensorflow/lite/micro/micro_error_reporter.h"
namespace tflite {
-namespace ops {
-namespace micro {
-namespace add {
-
-constexpr int kInputTensor1 = 0;
-constexpr int kInputTensor2 = 1;
-constexpr int kOutputTensor = 0;
-
-struct OpData {
- bool requires_broadcast;
-
- // These fields are used in both the general 8-bit -> 8bit quantized path,
- // and the special 16-bit -> 16bit quantized path
- int input1_shift;
- int input2_shift;
- int32_t output_activation_min;
- int32_t output_activation_max;
-
- // These fields are used only in the general 8-bit -> 8bit quantized path
- int32_t input1_multiplier;
- int32_t input2_multiplier;
- int32_t output_multiplier;
- int output_shift;
- int left_shift;
- int32_t input1_offset;
- int32_t input2_offset;
- int32_t output_offset;
-
- // Used only for float evals:
- float output_activation_min_f32;
- float output_activation_max_f32;
-};
-
-TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteAddParams* params,
- const TfLiteTensor* input1,
- const TfLiteTensor* input2, TfLiteTensor* output,
- OpData* data) {
- data->requires_broadcast = !HaveSameShapes(input1, input2);
-
- if (output->type == kTfLiteInt8 || output->type == kTfLiteInt16) {
- // 8bit -> 8bit general quantized path, with general rescalings
- data->input1_offset = -input1->params.zero_point;
- data->input2_offset = -input2->params.zero_point;
- data->output_offset = output->params.zero_point;
- data->left_shift = (output->type == kTfLiteInt16) ? 15 : 20;
- const double twice_max_input_scale =
- 2 * static_cast(
- std::max(input1->params.scale, input2->params.scale));
- const double real_input1_multiplier =
- static_cast(input1->params.scale) / twice_max_input_scale;
- const double real_input2_multiplier =
- static_cast(input2->params.scale) / twice_max_input_scale;
- const double real_output_multiplier =
- twice_max_input_scale /
- ((1 << data->left_shift) * static_cast(output->params.scale));
-
- QuantizeMultiplierSmallerThanOneExp(
- real_input1_multiplier, &data->input1_multiplier, &data->input1_shift);
-
- QuantizeMultiplierSmallerThanOneExp(
- real_input2_multiplier, &data->input2_multiplier, &data->input2_shift);
-
- QuantizeMultiplierSmallerThanOneExp(
- real_output_multiplier, &data->output_multiplier, &data->output_shift);
-
- TF_LITE_ENSURE_STATUS(CalculateActivationRangeQuantized(
- context, params->activation, output, &data->output_activation_min,
- &data->output_activation_max));
- } else if (output->type == kTfLiteFloat32) {
- CalculateActivationRange(params->activation,
- &data->output_activation_min_f32,
- &data->output_activation_max_f32);
- }
-
- return kTfLiteOk;
-}
void EvalAdd(TfLiteContext* context, TfLiteNode* node, TfLiteAddParams* params,
- const OpData* data, const TfLiteEvalTensor* input1,
+ const OpDataAdd* data, const TfLiteEvalTensor* input1,
const TfLiteEvalTensor* input2, TfLiteEvalTensor* output) {
tflite::ArithmeticParams op_params;
SetActivationParams(data->output_activation_min_f32,
@@ -129,7 +55,7 @@ void EvalAdd(TfLiteContext* context, TfLiteNode* node, TfLiteAddParams* params,
}
TfLiteStatus EvalAddQuantized(TfLiteContext* context, TfLiteNode* node,
- TfLiteAddParams* params, const OpData* data,
+ TfLiteAddParams* params, const OpDataAdd* data,
const TfLiteEvalTensor* input1,
const TfLiteEvalTensor* input2,
TfLiteEvalTensor* output) {
@@ -192,51 +118,31 @@ TfLiteStatus EvalAddQuantized(TfLiteContext* context, TfLiteNode* node,
break;
}
default:
- TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.",
- TfLiteTypeGetName(output->type), output->type);
+ MicroPrintf("Type %s (%d) not supported.",
+ TfLiteTypeGetName(output->type), output->type);
return kTfLiteError;
}
return kTfLiteOk;
}
-void* Init(TfLiteContext* context, const char* buffer, size_t length) {
+void* AddInit(TfLiteContext* context, const char* buffer, size_t length) {
TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr);
- return context->AllocatePersistentBuffer(context, sizeof(OpData));
+ return context->AllocatePersistentBuffer(context, sizeof(OpDataAdd));
}
-TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
- TFLITE_DCHECK(node->user_data != nullptr);
- TFLITE_DCHECK(node->builtin_data != nullptr);
-
- const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1);
- TF_LITE_ENSURE(context, input1 != nullptr);
- const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2);
- TF_LITE_ENSURE(context, input2 != nullptr);
- TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
- TF_LITE_ENSURE(context, output != nullptr);
-
- OpData* data = static_cast(node->user_data);
- auto* params = reinterpret_cast(node->builtin_data);
-
- TF_LITE_ENSURE_STATUS(
- CalculateOpData(context, params, input1, input2, output, data));
-
- return kTfLiteOk;
-}
-
-TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus AddEval(TfLiteContext* context, TfLiteNode* node) {
auto* params = reinterpret_cast(node->builtin_data);
TFLITE_DCHECK(node->user_data != nullptr);
- const OpData* data = static_cast(node->user_data);
+ const OpDataAdd* data = static_cast(node->user_data);
const TfLiteEvalTensor* input1 =
- tflite::micro::GetEvalInput(context, node, kInputTensor1);
+ tflite::micro::GetEvalInput(context, node, kAddInputTensor1);
const TfLiteEvalTensor* input2 =
- tflite::micro::GetEvalInput(context, node, kInputTensor2);
+ tflite::micro::GetEvalInput(context, node, kAddInputTensor2);
TfLiteEvalTensor* output =
- tflite::micro::GetEvalOutput(context, node, kOutputTensor);
+ tflite::micro::GetEvalOutput(context, node, kAddOutputTensor);
if (output->type == kTfLiteFloat32) {
EvalAdd(context, node, params, data, input1, input2, output);
@@ -244,27 +150,23 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_OK(context, EvalAddQuantized(context, node, params, data,
input1, input2, output));
} else {
- TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.",
- TfLiteTypeGetName(output->type), output->type);
+ MicroPrintf("Type %s (%d) not supported.", TfLiteTypeGetName(output->type),
+ output->type);
return kTfLiteError;
}
return kTfLiteOk;
}
-} // namespace add
-
TfLiteRegistration Register_ADD() {
- return {/*init=*/add::Init,
+ return {/*init=*/AddInit,
/*free=*/nullptr,
- /*prepare=*/add::Prepare,
- /*invoke=*/add::Eval,
+ /*prepare=*/AddPrepare,
+ /*invoke=*/AddEval,
/*profiling_string=*/nullptr,
/*builtin_code=*/0,
/*custom_name=*/nullptr,
/*version=*/0};
}
-} // namespace micro
-} // namespace ops
} // namespace tflite
diff --git a/code/components/tflite-lib/tensorflow/lite/micro/kernels/add.h b/code/components/tflite-lib/tensorflow/lite/micro/kernels/add.h
new file mode 100644
index 00000000..88526153
--- /dev/null
+++ b/code/components/tflite-lib/tensorflow/lite/micro/kernels/add.h
@@ -0,0 +1,64 @@
+/* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#ifndef TENSORFLOW_LITE_MICRO_KERNELS_ADD_H_
+#define TENSORFLOW_LITE_MICRO_KERNELS_ADD_H_
+
+#include
+
+#include "tensorflow/lite/c/builtin_op_data.h"
+#include "tensorflow/lite/c/common.h"
+
+namespace tflite {
+
+extern const int kAddInputTensor1;
+extern const int kAddInputTensor2;
+extern const int kAddOutputTensor;
+
+struct OpDataAdd {
+ bool requires_broadcast;
+
+ // These fields are used in both the general 8-bit -> 8bit quantized path,
+ // and the special 16-bit -> 16bit quantized path
+ int input1_shift;
+ int input2_shift;
+ int32_t output_activation_min;
+ int32_t output_activation_max;
+
+ // These fields are used only in the general 8-bit -> 8bit quantized path
+ int32_t input1_multiplier;
+ int32_t input2_multiplier;
+ int32_t output_multiplier;
+ int output_shift;
+ int left_shift;
+ int32_t input1_offset;
+ int32_t input2_offset;
+ int32_t output_offset;
+
+ // Used only for float evals:
+ float output_activation_min_f32;
+ float output_activation_max_f32;
+};
+
+TfLiteStatus CalculateOpDataAdd(TfLiteContext* context, TfLiteAddParams* params,
+ const TfLiteTensor* input1,
+ const TfLiteTensor* input2,
+ TfLiteTensor* output, OpDataAdd* data);
+
+TfLiteStatus AddPrepare(TfLiteContext* context, TfLiteNode* node);
+
+} // namespace tflite
+
+#endif // TENSORFLOW_LITE_MICRO_KERNELS_ADD_H_
diff --git a/code/components/tflite-lib/tensorflow/lite/micro/kernels/add_common.cc b/code/components/tflite-lib/tensorflow/lite/micro/kernels/add_common.cc
new file mode 100644
index 00000000..3d0c841e
--- /dev/null
+++ b/code/components/tflite-lib/tensorflow/lite/micro/kernels/add_common.cc
@@ -0,0 +1,99 @@
+/* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "tensorflow/lite/c/builtin_op_data.h"
+#include "tensorflow/lite/c/common.h"
+#include "tensorflow/lite/kernels/internal/quantization_util.h"
+#include "tensorflow/lite/kernels/internal/reference/add.h"
+#include "tensorflow/lite/kernels/internal/reference/integer_ops/add.h"
+#include "tensorflow/lite/kernels/internal/reference/process_broadcast_shapes.h"
+#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
+#include "tensorflow/lite/kernels/kernel_util.h"
+#include "tensorflow/lite/kernels/op_macros.h"
+#include "tensorflow/lite/micro/kernels/add.h"
+#include "tensorflow/lite/micro/kernels/kernel_util.h"
+#include "tensorflow/lite/micro/memory_helpers.h"
+
+namespace tflite {
+
+const int kAddInputTensor1 = 0;
+const int kAddInputTensor2 = 1;
+const int kAddOutputTensor = 0;
+
+TfLiteStatus CalculateOpDataAdd(TfLiteContext* context, TfLiteAddParams* params,
+ const TfLiteTensor* input1,
+ const TfLiteTensor* input2,
+ TfLiteTensor* output, OpDataAdd* data) {
+ data->requires_broadcast = !HaveSameShapes(input1, input2);
+
+ if (output->type == kTfLiteInt8 || output->type == kTfLiteInt16) {
+ // 8bit -> 8bit general quantized path, with general rescalings
+ data->input1_offset = -input1->params.zero_point;
+ data->input2_offset = -input2->params.zero_point;
+ data->output_offset = output->params.zero_point;
+ data->left_shift = (output->type == kTfLiteInt16) ? 15 : 20;
+ const double twice_max_input_scale =
+ 2 * static_cast(
+ std::max(input1->params.scale, input2->params.scale));
+ const double real_input1_multiplier =
+ static_cast(input1->params.scale) / twice_max_input_scale;
+ const double real_input2_multiplier =
+ static_cast(input2->params.scale) / twice_max_input_scale;
+ const double real_output_multiplier =
+ twice_max_input_scale /
+ ((1 << data->left_shift) * static_cast(output->params.scale));
+
+ QuantizeMultiplierSmallerThanOneExp(
+ real_input1_multiplier, &data->input1_multiplier, &data->input1_shift);
+
+ QuantizeMultiplierSmallerThanOneExp(
+ real_input2_multiplier, &data->input2_multiplier, &data->input2_shift);
+
+ QuantizeMultiplierSmallerThanOneExp(
+ real_output_multiplier, &data->output_multiplier, &data->output_shift);
+
+ TF_LITE_ENSURE_STATUS(CalculateActivationRangeQuantized(
+ context, params->activation, output, &data->output_activation_min,
+ &data->output_activation_max));
+ } else if (output->type == kTfLiteFloat32) {
+ CalculateActivationRange(params->activation,
+ &data->output_activation_min_f32,
+ &data->output_activation_max_f32);
+ }
+
+ return kTfLiteOk;
+}
+
+TfLiteStatus AddPrepare(TfLiteContext* context, TfLiteNode* node) {
+ TFLITE_DCHECK(node->user_data != nullptr);
+ TFLITE_DCHECK(node->builtin_data != nullptr);
+
+ const TfLiteTensor* input1 = GetInput(context, node, kAddInputTensor1);
+ TF_LITE_ENSURE(context, input1 != nullptr);
+ const TfLiteTensor* input2 = GetInput(context, node, kAddInputTensor2);
+ TF_LITE_ENSURE(context, input2 != nullptr);
+ TfLiteTensor* output = GetOutput(context, node, kAddOutputTensor);
+ TF_LITE_ENSURE(context, output != nullptr);
+
+ OpDataAdd* data = static_cast(node->user_data);
+ auto* params = reinterpret_cast(node->builtin_data);
+
+ TF_LITE_ENSURE_STATUS(
+ CalculateOpDataAdd(context, params, input1, input2, output, data));
+
+ return kTfLiteOk;
+}
+
+} // namespace tflite
diff --git a/code/components/tfmicro/tensorflow/lite/micro/kernels/add_n.cc b/code/components/tflite-lib/tensorflow/lite/micro/kernels/add_n.cc
similarity index 100%
rename from code/components/tfmicro/tensorflow/lite/micro/kernels/add_n.cc
rename to code/components/tflite-lib/tensorflow/lite/micro/kernels/add_n.cc
diff --git a/code/components/tfmicro/tensorflow/lite/micro/kernels/arg_min_max.cc b/code/components/tflite-lib/tensorflow/lite/micro/kernels/arg_min_max.cc
similarity index 97%
rename from code/components/tfmicro/tensorflow/lite/micro/kernels/arg_min_max.cc
rename to code/components/tflite-lib/tensorflow/lite/micro/kernels/arg_min_max.cc
index 12ac0019..8217a4a0 100644
--- a/code/components/tfmicro/tensorflow/lite/micro/kernels/arg_min_max.cc
+++ b/code/components/tflite-lib/tensorflow/lite/micro/kernels/arg_min_max.cc
@@ -66,9 +66,6 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node, bool is_arg_max) {
case kTfLiteFloat32:
TF_LITE_ARG_MIN_MAX(float, int32_t, int32_t);
break;
- case kTfLiteUInt8:
- TF_LITE_ARG_MIN_MAX(uint8_t, int32_t, int32_t);
- break;
case kTfLiteInt8:
TF_LITE_ARG_MIN_MAX(int8_t, int32_t, int32_t);
break;
diff --git a/code/components/tflite-lib/tensorflow/lite/micro/kernels/assign_variable.cc b/code/components/tflite-lib/tensorflow/lite/micro/kernels/assign_variable.cc
new file mode 100644
index 00000000..a583a067
--- /dev/null
+++ b/code/components/tflite-lib/tensorflow/lite/micro/kernels/assign_variable.cc
@@ -0,0 +1,114 @@
+/* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include
+
+#include
+
+#include "tensorflow/lite/c/builtin_op_data.h"
+#include "tensorflow/lite/c/common.h"
+#include "tensorflow/lite/kernels/internal/compatibility.h"
+#include "tensorflow/lite/kernels/kernel_util.h"
+#include "tensorflow/lite/micro/kernels/kernel_util.h"
+#include "tensorflow/lite/micro/memory_helpers.h"
+#include "tensorflow/lite/micro/micro_error_reporter.h"
+#include "tensorflow/lite/micro/micro_graph.h"
+#include "tensorflow/lite/micro/micro_resource_variable.h"
+#include "tensorflow/lite/schema/schema_generated.h"
+
+namespace tflite {
+
+namespace {
+
+constexpr int kInputVariableId = 0;
+constexpr int kInputValue = 1;
+
+TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
+ TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
+ TF_LITE_ENSURE_EQ(context, NumOutputs(node), 0);
+
+ // This must be a TfLiteEvalTensor despite this being in Prepare, because
+ // CreateTensor allocates a temp tensor from the flatbuffer, which does not
+ // contain the correct ID generated within the VAR_HANDLE op. EvalTensors are
+ // all allocated during StartModelAllocation which happens before
+ // init/prepare, and VAR_HANDLE Prepare() references its own op_data in the
+ // TfLiteEvalTensor, so reading the ID here is valid.
+ const TfLiteEvalTensor* input_resource_id_tensor =
+ tflite::micro::GetEvalInput(context, node, kInputVariableId);
+ TFLITE_DCHECK(input_resource_id_tensor != nullptr);
+ TF_LITE_ENSURE(context, (input_resource_id_tensor->type == kTfLiteResource ||
+ input_resource_id_tensor->type == kTfLiteInt32));
+ TF_LITE_ENSURE_EQ(context, NumElements(input_resource_id_tensor->dims), 1);
+
+ const TfLiteTensor* input_value = GetInput(context, node, kInputValue);
+ TFLITE_DCHECK(input_value != nullptr);
+
+ // Casting to TfliteIntArray is required since we are re-using
+ // GetExecutionPlan from TfLiteContext. On TFLM this method returns a
+ // MicroGraph.
+ // TODO(b/188226309): Design a cleaner way to get a graph from kernel context.
+ MicroGraph* graph_info;
+ context->GetExecutionPlan(context,
+ reinterpret_cast(&graph_info));
+ MicroResourceVariables* resources = graph_info->GetResourceVariables();
+ TF_LITE_ENSURE_OK(context,
+ resources->Allocate(input_resource_id_tensor->data.i32[0],
+ context, input_value));
+
+ return kTfLiteOk;
+}
+
+TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
+ const TfLiteEvalTensor* input_id =
+ tflite::micro::GetEvalInput(context, node, kInputVariableId);
+ TFLITE_DCHECK(input_id != nullptr);
+
+ const TfLiteEvalTensor* input_value =
+ tflite::micro::GetEvalInput(context, node, kInputValue);
+ TFLITE_DCHECK(input_value != nullptr);
+
+ // Casting to TfliteIntArray is required since we are re-using
+ // GetExecutionPlan from TfLiteContext. On TFLM this method returns a
+ // MicroGraph.
+ // TODO(b/188226309): Design a cleaner way to get a graph from kernel context.
+ MicroGraph* graph_info;
+ context->GetExecutionPlan(context,
+ reinterpret_cast(&graph_info));
+ MicroResourceVariables* resources = graph_info->GetResourceVariables();
+ if (resources == nullptr) {
+ MicroPrintf(
+ "ASSIGN_VARIABLE requires resource variables. Please create "
+ "ResourceVariables and pass it to the interpreter.");
+ return kTfLiteError;
+ }
+ TF_LITE_ENSURE_OK(context,
+ resources->Assign(input_id->data.i32[0], input_value));
+ return kTfLiteOk;
+}
+
+} // namespace.
+
+TfLiteRegistration Register_ASSIGN_VARIABLE() {
+ return {/*init=*/nullptr,
+ /*free=*/nullptr,
+ /*prepare=*/Prepare,
+ /*invoke=*/Eval,
+ /*profiling_string=*/nullptr,
+ /*builtin_code=*/0,
+ /*custom_name=*/nullptr,
+ /*version=*/0};
+}
+
+} // namespace tflite
diff --git a/code/components/tfmicro/tensorflow/lite/micro/kernels/batch_to_space_nd.cc b/code/components/tflite-lib/tensorflow/lite/micro/kernels/batch_to_space_nd.cc
similarity index 100%
rename from code/components/tfmicro/tensorflow/lite/micro/kernels/batch_to_space_nd.cc
rename to code/components/tflite-lib/tensorflow/lite/micro/kernels/batch_to_space_nd.cc
diff --git a/code/components/tflite-lib/tensorflow/lite/micro/kernels/call_once.cc b/code/components/tflite-lib/tensorflow/lite/micro/kernels/call_once.cc
new file mode 100644
index 00000000..97fded0c
--- /dev/null
+++ b/code/components/tflite-lib/tensorflow/lite/micro/kernels/call_once.cc
@@ -0,0 +1,104 @@
+/* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include
+
+#include
+
+#include "tensorflow/lite/c/builtin_op_data.h"
+#include "tensorflow/lite/c/common.h"
+#include "tensorflow/lite/kernels/internal/compatibility.h"
+#include "tensorflow/lite/kernels/kernel_util.h"
+#include "tensorflow/lite/micro/kernels/kernel_util.h"
+#include "tensorflow/lite/micro/memory_helpers.h"
+#include "tensorflow/lite/micro/micro_graph.h"
+#include "tensorflow/lite/schema/schema_generated.h"
+
+namespace tflite {
+
+namespace {
+
+struct OpData {
+ int init_subgraph_index;
+ bool has_run;
+};
+
+void* Init(TfLiteContext* context, const char* buffer, size_t length) {
+ TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr);
+ return context->AllocatePersistentBuffer(context, sizeof(OpData));
+}
+
+TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
+ OpData* op_data = reinterpret_cast(node->user_data);
+ const auto* params =
+ reinterpret_cast(node->builtin_data);
+ op_data->init_subgraph_index = params->init_subgraph_index;
+ op_data->has_run = false;
+
+ TF_LITE_ENSURE(context, NumInputs(node) == 0);
+ TF_LITE_ENSURE(context, NumOutputs(node) == 0);
+
+ // Casting to TfliteIntArray is required since we are re-using
+ // GetExecutionPlan from TfLiteContext. On TFLM this method returns a
+ // MicroGraph.
+ // TODO(b/188226309): Design a cleaner way to get a graph from kernel context.
+ MicroGraph* graph_info;
+ context->GetExecutionPlan(context,
+ reinterpret_cast(&graph_info));
+
+ TF_LITE_ENSURE(context,
+ op_data->init_subgraph_index < graph_info->NumSubgraphs());
+
+ return kTfLiteOk;
+}
+
+TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
+ OpData* op_data = reinterpret_cast(node->user_data);
+
+ // Call once only runs one time then is a no-op for every subsequent call.
+ if (op_data->has_run) {
+ return kTfLiteOk;
+ }
+
+ // Casting to TfliteIntArray is required since we are re-using
+ // GetExecutionPlan from TfLiteContext. On TFLM this method returns a
+ // MicroGraph.
+ // TODO(b/188226309): Design a cleaner way to get a graph from kernel context.
+ MicroGraph* graph_info;
+ context->GetExecutionPlan(context,
+ reinterpret_cast(&graph_info));
+
+ TF_LITE_ENSURE_OK(context,
+ graph_info->InvokeSubgraph(op_data->init_subgraph_index));
+
+ op_data->has_run = true;
+
+ return kTfLiteOk;
+}
+
+} // namespace.
+
+TfLiteRegistration Register_CALL_ONCE() {
+ return {/*init=*/Init,
+ /*free=*/nullptr,
+ /*prepare=*/Prepare,
+ /*invoke=*/Eval,
+ /*profiling_string=*/nullptr,
+ /*builtin_code=*/0,
+ /*custom_name=*/nullptr,
+ /*version=*/0};
+}
+
+} // namespace tflite
diff --git a/code/components/tfmicro/tensorflow/lite/micro/kernels/cast.cc b/code/components/tflite-lib/tensorflow/lite/micro/kernels/cast.cc
similarity index 80%
rename from code/components/tfmicro/tensorflow/lite/micro/kernels/cast.cc
rename to code/components/tflite-lib/tensorflow/lite/micro/kernels/cast.cc
index b0462ed6..0314e523 100644
--- a/code/components/tfmicro/tensorflow/lite/micro/kernels/cast.cc
+++ b/code/components/tflite-lib/tensorflow/lite/micro/kernels/cast.cc
@@ -17,6 +17,7 @@ limitations under the License.
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/micro/kernels/kernel_util.h"
+#include "tensorflow/lite/micro/micro_error_reporter.h"
namespace tflite {
namespace {
@@ -48,13 +49,19 @@ TfLiteStatus copyToTensor(TfLiteContext* context, const FromT* in,
case kTfLiteInt8:
copyCast(in, out->data.int8, num_elements);
break;
+ case kTfLiteInt16:
+ copyCast(in, out->data.i16, num_elements);
+ break;
+ case kTfLiteInt32:
+ copyCast(in, out->data.i32, num_elements);
+ break;
case kTfLiteFloat32:
copyCast(in, tflite::micro::GetTensorData(out), num_elements);
break;
default:
// Unsupported type.
- TF_LITE_KERNEL_LOG(context, "Output type %s (%d) not supported.",
- TfLiteTypeGetName(out->type), out->type);
+ MicroPrintf("Output type %s (%d) not supported.",
+ TfLiteTypeGetName(out->type), out->type);
}
return kTfLiteOk;
}
@@ -70,13 +77,19 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
switch (input->type) {
case kTfLiteInt8:
return copyToTensor(context, input->data.int8, output, num_elements);
+ case kTfLiteInt16:
+ return copyToTensor(context, tflite::micro::GetTensorData(input),
+ output, num_elements);
+ case kTfLiteInt32:
+ return copyToTensor(context, tflite::micro::GetTensorData(input),
+ output, num_elements);
case kTfLiteFloat32:
return copyToTensor(context, tflite::micro::GetTensorData(input),
output, num_elements);
default:
// Unsupported type.
- TF_LITE_KERNEL_LOG(context, "Input type %s (%d) not supported.",
- TfLiteTypeGetName(input->type), input->type);
+ MicroPrintf("Input type %s (%d) not supported.",
+ TfLiteTypeGetName(input->type), input->type);
}
return kTfLiteOk;
}
diff --git a/code/components/tfmicro/tensorflow/lite/micro/kernels/ceil.cc b/code/components/tflite-lib/tensorflow/lite/micro/kernels/ceil.cc
similarity index 100%
rename from code/components/tfmicro/tensorflow/lite/micro/kernels/ceil.cc
rename to code/components/tflite-lib/tensorflow/lite/micro/kernels/ceil.cc
diff --git a/code/components/tfmicro/tensorflow/lite/micro/kernels/circular_buffer.cc b/code/components/tflite-lib/tensorflow/lite/micro/kernels/circular_buffer.cc
similarity index 51%
rename from code/components/tfmicro/tensorflow/lite/micro/kernels/circular_buffer.cc
rename to code/components/tflite-lib/tensorflow/lite/micro/kernels/circular_buffer.cc
index 37854c16..bda3e66a 100644
--- a/code/components/tfmicro/tensorflow/lite/micro/kernels/circular_buffer.cc
+++ b/code/components/tflite-lib/tensorflow/lite/micro/kernels/circular_buffer.cc
@@ -13,6 +13,8 @@ See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
+#include "tensorflow/lite/micro/kernels/circular_buffer.h"
+
#include "tensorflow/lite/c/builtin_op_data.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
@@ -45,43 +47,17 @@ limitations under the License.
* - Input and output quantization params must be identical.
*/
namespace tflite {
-namespace ops {
-namespace micro {
-namespace circular_buffer {
-namespace {
-
-// The CircularBuffer op has one input and one output tensor.
-constexpr int kInputTensor = 0;
-constexpr int kOutputTensor = 0;
-
-// Indices into the init flexbuffer's vector.
-// The parameter's name is in the comment that follows.
-// Elements in the vectors are ordered alphabetically by parameter name.
-constexpr int kCyclesMaxIndex = 0; // 'cycles_max'
-
-// TODO(b/149795762): Add this to TfLiteStatus enum.
-constexpr TfLiteStatus kTfLiteAbort = static_cast(-9);
-
-// These fields control the stride period of a strided streaming model. This op
-// returns kTfLiteAbort until cycles_until_run-- is zero. At this time,
-// cycles_until_run is reset to cycles_max.
-struct OpData {
- int cycles_until_run;
- int cycles_max;
-};
-
-} // namespace
-
-void* Init(TfLiteContext* context, const char* buffer, size_t length) {
+void* CircularBufferInit(TfLiteContext* context, const char* buffer,
+ size_t length) {
TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr);
- OpData* op_data = static_cast(
- context->AllocatePersistentBuffer(context, sizeof(OpData)));
+ OpDataCircularBuffer* op_data = static_cast(
+ context->AllocatePersistentBuffer(context, sizeof(OpDataCircularBuffer)));
if (buffer != nullptr && length > 0) {
const uint8_t* buffer_t = reinterpret_cast(buffer);
tflite::FlexbufferWrapper wrapper(buffer_t, length);
- op_data->cycles_max = wrapper.ElementAsInt32(kCyclesMaxIndex);
+ op_data->cycles_max = wrapper.ElementAsInt32(kCircularBufferCyclesMaxIndex);
} else {
op_data->cycles_max = 0;
}
@@ -89,54 +65,6 @@ void* Init(TfLiteContext* context, const char* buffer, size_t length) {
return op_data;
}
-TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
- const TfLiteTensor* input = GetInput(context, node, kInputTensor);
- TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
-
- TFLITE_DCHECK(node->user_data != nullptr);
- OpData* op_data = static_cast(node->user_data);
-
- TF_LITE_ENSURE(context, input != nullptr);
- TF_LITE_ENSURE(context, output != nullptr);
- TF_LITE_ENSURE_EQ(context, input->dims->data[0], output->dims->data[0]);
- TF_LITE_ENSURE_EQ(context, 1, input->dims->data[1]);
- TF_LITE_ENSURE_EQ(context, input->dims->data[2], output->dims->data[2]);
- TF_LITE_ENSURE_EQ(context, output->dims->data[3], input->dims->data[3]);
-
- TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type);
-
- // The circular buffer custom operator currently only supports int8.
- TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteInt8);
-
- if (op_data->cycles_max <= 0) {
- // The last circular buffer layer simply accumulates outputs, and does not
- // run periodically.
- // TODO(b/150001379): Move this special case logic to the tflite flatbuffer.
- static int cb_prepare_count = 0;
- cb_prepare_count++;
- // These checks specifically work for the only two streaming models
- // supported on TFLM. They use the shape of the output tensor along with the
- // layer number to determine if the circular buffer period should be 1 or 2.
-
- // These models are outlined int the following documents:
- // https://docs.google.com/document/d/1lc_G2ZFhjiKFo02UHjBaljye1xsL0EkfybkaVELEE3Q/edit?usp=sharing
- // https://docs.google.com/document/d/1pGc42PuWyrk-Jy1-9qeqtggvsmHr1ifz8Lmqfpr2rKA/edit?usp=sharing
- if (output->dims->data[1] == 5 || output->dims->data[1] == 13 ||
- output->dims->data[1] == 25 ||
- (cb_prepare_count == 5 && output->dims->data[2] == 2 &&
- output->dims->data[3] == 96)) {
- op_data->cycles_max = 1;
- cb_prepare_count = 0;
- } else {
- op_data->cycles_max = 2;
- }
- }
- op_data->cycles_until_run = op_data->cycles_max;
- node->user_data = op_data;
-
- return kTfLiteOk;
-}
-
// Shifts buffer over by the output depth, and write new input to end of buffer.
// num_slots is the number of samples stored in the output buffer.
// depth is the size of each sample.
@@ -145,14 +73,15 @@ void EvalInt8(const int8_t* input, int num_slots, int depth, int8_t* output) {
memcpy(&output[(num_slots - 1) * depth], input, depth);
}
-TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
+TfLiteStatus CircularBufferEval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteEvalTensor* input =
- tflite::micro::GetEvalInput(context, node, kInputTensor);
+ tflite::micro::GetEvalInput(context, node, kCircularBufferInputTensor);
TfLiteEvalTensor* output =
- tflite::micro::GetEvalOutput(context, node, kOutputTensor);
+ tflite::micro::GetEvalOutput(context, node, kCircularBufferOutputTensor);
TFLITE_DCHECK(node->user_data != nullptr);
- OpData* data = reinterpret_cast(node->user_data);
+ OpDataCircularBuffer* data =
+ reinterpret_cast(node->user_data);
int num_slots = output->dims->data[1];
int depth = output->dims->data[2] * output->dims->data[3];
@@ -178,13 +107,11 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
return kTfLiteOk;
}
-} // namespace circular_buffer
-
TfLiteRegistration* Register_CIRCULAR_BUFFER() {
- static TfLiteRegistration r = {/*init=*/circular_buffer::Init,
+ static TfLiteRegistration r = {/*init=*/CircularBufferInit,
/*free=*/nullptr,
- /*prepare=*/circular_buffer::Prepare,
- /*invoke=*/circular_buffer::Eval,
+ /*prepare=*/CircularBufferPrepare,
+ /*invoke=*/CircularBufferEval,
/*profiling_string=*/nullptr,
/*builtin_code=*/0,
/*custom_name=*/nullptr,
@@ -192,6 +119,4 @@ TfLiteRegistration* Register_CIRCULAR_BUFFER() {
return &r;
}
-} // namespace micro
-} // namespace ops
} // namespace tflite
diff --git a/code/components/tflite-lib/tensorflow/lite/micro/kernels/circular_buffer.h b/code/components/tflite-lib/tensorflow/lite/micro/kernels/circular_buffer.h
new file mode 100644
index 00000000..51adf746
--- /dev/null
+++ b/code/components/tflite-lib/tensorflow/lite/micro/kernels/circular_buffer.h
@@ -0,0 +1,48 @@
+/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#ifndef TENSORFLOW_LITE_MICRO_KERNELS_CIRCULAR_BUFFER_H_
+#define TENSORFLOW_LITE_MICRO_KERNELS_CIRCULAR_BUFFER_H_
+
+#include "tensorflow/lite/c/builtin_op_data.h"
+#include "tensorflow/lite/c/common.h"
+
+namespace tflite {
+
+// The CircularBuffer op has one input and one output tensor.
+extern const int kCircularBufferInputTensor;
+extern const int kCircularBufferOutputTensor;
+
+// Indices into the init flexbuffer's vector.
+// The parameter's name is in the comment that follows.
+// Elements in the vectors are ordered alphabetically by parameter name.
+extern const int kCircularBufferCyclesMaxIndex; // 'cycles_max'
+
+// TODO(b/149795762): Add this to TfLiteStatus enum.
+extern const TfLiteStatus kTfLiteAbort;
+
+// These fields control the stride period of a strided streaming model. This op
+// returns kTfLiteAbort until cycles_until_run-- is zero. At this time,
+// cycles_until_run is reset to cycles_max.
+struct OpDataCircularBuffer {
+ int cycles_until_run;
+ int cycles_max;
+};
+
+TfLiteStatus CircularBufferPrepare(TfLiteContext* context, TfLiteNode* node);
+
+} // namespace tflite
+
+#endif // TENSORFLOW_LITE_MICRO_KERNELS_CIRCULAR_BUFFER_H_
diff --git a/code/components/tflite-lib/tensorflow/lite/micro/kernels/circular_buffer_common.cc b/code/components/tflite-lib/tensorflow/lite/micro/kernels/circular_buffer_common.cc
new file mode 100644
index 00000000..0bb4d476
--- /dev/null
+++ b/code/components/tflite-lib/tensorflow/lite/micro/kernels/circular_buffer_common.cc
@@ -0,0 +1,91 @@
+/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "tensorflow/lite/c/builtin_op_data.h"
+#include "tensorflow/lite/c/common.h"
+#include "tensorflow/lite/kernels/internal/compatibility.h"
+#include "tensorflow/lite/kernels/internal/quantization_util.h"
+#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
+#include "tensorflow/lite/kernels/kernel_util.h"
+#include "tensorflow/lite/kernels/op_macros.h"
+#include "tensorflow/lite/micro/flatbuffer_utils.h"
+#include "tensorflow/lite/micro/kernels/circular_buffer.h"
+#include "tensorflow/lite/micro/kernels/kernel_util.h"
+
+namespace tflite {
+
+// The CircularBuffer op has one input and one output tensor.
+const int kCircularBufferInputTensor = 0;
+const int kCircularBufferOutputTensor = 0;
+
+// Indices into the init flexbuffer's vector.
+// The parameter's name is in the comment that follows.
+// Elements in the vectors are ordered alphabetically by parameter name.
+const int kCircularBufferCyclesMaxIndex = 0; // 'cycles_max'
+
+// TODO(b/149795762): Add this to TfLiteStatus enum.
+const TfLiteStatus kTfLiteAbort = static_cast(-9);
+
+TfLiteStatus CircularBufferPrepare(TfLiteContext* context, TfLiteNode* node) {
+ const TfLiteTensor* input =
+ GetInput(context, node, kCircularBufferInputTensor);
+ TfLiteTensor* output = GetOutput(context, node, kCircularBufferOutputTensor);
+
+ TFLITE_DCHECK(node->user_data != nullptr);
+ OpDataCircularBuffer* op_data =
+ static_cast(node->user_data);
+
+ TF_LITE_ENSURE(context, input != nullptr);
+ TF_LITE_ENSURE(context, output != nullptr);
+ TF_LITE_ENSURE_EQ(context, input->dims->data[0], output->dims->data[0]);
+ TF_LITE_ENSURE_EQ(context, 1, input->dims->data[1]);
+ TF_LITE_ENSURE_EQ(context, input->dims->data[2], output->dims->data[2]);
+ TF_LITE_ENSURE_EQ(context, output->dims->data[3], input->dims->data[3]);
+
+ TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type);
+
+ // The circular buffer custom operator currently only supports int8.
+ TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteInt8);
+
+ if (op_data->cycles_max <= 0) {
+ // The last circular buffer layer simply accumulates outputs, and does not
+ // run periodically.
+ // TODO(b/150001379): Move this special case logic to the tflite flatbuffer.
+ static int cb_prepare_count = 0;
+ cb_prepare_count++;
+ // These checks specifically work for the only two streaming models
+ // supported on TFLM. They use the shape of the output tensor along with the
+ // layer number to determine if the circular buffer period should be 1 or 2.
+
+ // These models are outlined int the following documents:
+ // https://docs.google.com/document/d/1lc_G2ZFhjiKFo02UHjBaljye1xsL0EkfybkaVELEE3Q/edit?usp=sharing
+ // https://docs.google.com/document/d/1pGc42PuWyrk-Jy1-9qeqtggvsmHr1ifz8Lmqfpr2rKA/edit?usp=sharing
+ if (output->dims->data[1] == 5 || output->dims->data[1] == 13 ||
+ output->dims->data[1] == 25 ||
+ (cb_prepare_count == 5 && output->dims->data[2] == 2 &&
+ output->dims->data[3] == 96)) {
+ op_data->cycles_max = 1;
+ cb_prepare_count = 0;
+ } else {
+ op_data->cycles_max = 2;
+ }
+ }
+ op_data->cycles_until_run = op_data->cycles_max;
+ node->user_data = op_data;
+
+ return kTfLiteOk;
+}
+
+} // namespace tflite
diff --git a/code/components/tfmicro/tensorflow/lite/micro/kernels/circular_buffer_flexbuffers_generated_data.h b/code/components/tflite-lib/tensorflow/lite/micro/kernels/circular_buffer_flexbuffers_generated_data.h
similarity index 100%
rename from code/components/tfmicro/tensorflow/lite/micro/kernels/circular_buffer_flexbuffers_generated_data.h
rename to code/components/tflite-lib/tensorflow/lite/micro/kernels/circular_buffer_flexbuffers_generated_data.h
diff --git a/code/components/tfmicro/tensorflow/lite/micro/kernels/comparisons.cc b/code/components/tflite-lib/tensorflow/lite/micro/kernels/comparisons.cc
similarity index 87%
rename from code/components/tfmicro/tensorflow/lite/micro/kernels/comparisons.cc
rename to code/components/tflite-lib/tensorflow/lite/micro/kernels/comparisons.cc
index 35007640..eb39d9ea 100644
--- a/code/components/tfmicro/tensorflow/lite/micro/kernels/comparisons.cc
+++ b/code/components/tflite-lib/tensorflow/lite/micro/kernels/comparisons.cc
@@ -104,19 +104,6 @@ TfLiteStatus EqualEval(TfLiteContext* context, TfLiteNode* node) {
tflite::micro::GetTensorData(input2), output_shape,
output_data);
break;
- case kTfLiteUInt8:
- requires_broadcast
- ? reference_ops::Broadcast4DSlowEqualWithScaling(
- data->params, input1_shape,
- tflite::micro::GetTensorData