bool VerifyBufferFromStart(const char *identifier, size_t start) {
- if (identifier && (size_ < 2 * sizeof(flatbuffers::uoffset_t) ||
- !BufferHasIdentifier(buf_ + start, identifier))) {
+ if (identifier && !Check((size_ >= 2 * sizeof(flatbuffers::uoffset_t) &&
+ BufferHasIdentifier(buf_ + start, identifier)))) {
return false;
}
@@ -2452,12 +2470,26 @@ class Table {
return field_offset ? reinterpret_cast(p) : nullptr;
}
+ template
+ flatbuffers::Optional GetOptional(voffset_t field) const {
+ auto field_offset = GetOptionalFieldOffset(field);
+ auto p = data_ + field_offset;
+ return field_offset ? Optional(static_cast(ReadScalar(p)))
+ : Optional();
+ }
+
template bool SetField(voffset_t field, T val, T def) {
auto field_offset = GetOptionalFieldOffset(field);
if (!field_offset) return IsTheSameAs(val, def);
WriteScalar(data_ + field_offset, val);
return true;
}
+ template bool SetField(voffset_t field, T val) {
+ auto field_offset = GetOptionalFieldOffset(field);
+ if (!field_offset) return false;
+ WriteScalar(data_ + field_offset, val);
+ return true;
+ }
bool SetPointer(voffset_t field, const uint8_t *val) {
auto field_offset = GetOptionalFieldOffset(field);
@@ -2525,6 +2557,17 @@ class Table {
uint8_t data_[1];
};
+// This specialization allows avoiding warnings like:
+// MSVC C4800: type: forcing value to bool 'true' or 'false'.
+template<>
+inline flatbuffers::Optional Table::GetOptional(
+ voffset_t field) const {
+ auto field_offset = GetOptionalFieldOffset(field);
+ auto p = data_ + field_offset;
+ return field_offset ? Optional(ReadScalar(p) != 0)
+ : Optional();
+}
+
template
void FlatBufferBuilder::Required(Offset table, voffset_t field) {
auto table_ptr = reinterpret_cast(buf_.data_at(table.o));
@@ -2704,7 +2747,7 @@ inline const char * const *ElementaryTypeNames() {
// Basic type info cost just 16bits per field!
struct TypeCode {
uint16_t base_type : 4; // ElementaryType
- uint16_t is_vector : 1;
+ uint16_t is_repeating : 1; // Either vector (in table) or array (in struct)
int16_t sequence_ref : 11; // Index into type_refs below, or -1 for none.
};
@@ -2720,6 +2763,7 @@ struct TypeTable {
size_t num_elems; // of type_codes, values, names (but not type_refs).
const TypeCode *type_codes; // num_elems count
const TypeFunction *type_refs; // less than num_elems entries (see TypeCode).
+ const int16_t *array_sizes; // less than num_elems entries (see TypeCode).
const int64_t *values; // Only set for non-consecutive enum/union or structs.
const char *const *names; // Only set if compiled with --reflect-names.
};
diff --git a/code/lib/tfmicro/flatbuffers/stl_emulation.h b/code/lib/tfmicro/flatbuffers/stl_emulation.h
index 8bae61bf..c9a1a8bf 100644
--- a/code/lib/tfmicro/flatbuffers/stl_emulation.h
+++ b/code/lib/tfmicro/flatbuffers/stl_emulation.h
@@ -18,6 +18,7 @@
#define FLATBUFFERS_STL_EMULATION_H_
// clang-format off
+#include "flatbuffers/base.h"
#include
#include
@@ -25,6 +26,17 @@
#include
#include
+// Detect C++17 compatible compiler.
+// __cplusplus >= 201703L - a compiler has support of 'static inline' variables.
+#if defined(FLATBUFFERS_USE_STD_OPTIONAL) \
+ || (defined(__cplusplus) && __cplusplus >= 201703L) \
+ || (defined(_MSVC_LANG) && (_MSVC_LANG >= 201703L))
+ #include
+ #ifndef FLATBUFFERS_USE_STD_OPTIONAL
+ #define FLATBUFFERS_USE_STD_OPTIONAL
+ #endif
+#endif
+
#if defined(_STLPORT_VERSION) && !defined(FLATBUFFERS_CPP98_STL)
#define FLATBUFFERS_CPP98_STL
#endif // defined(_STLPORT_VERSION) && !defined(FLATBUFFERS_CPP98_STL)
@@ -33,16 +45,6 @@
#include
#endif // defined(FLATBUFFERS_CPP98_STL)
-// Check if we can use template aliases
-// Not possible if Microsoft Compiler before 2012
-// Possible is the language feature __cpp_alias_templates is defined well
-// Or possible if the C++ std is C+11 or newer
-#if (defined(_MSC_VER) && _MSC_VER > 1700 /* MSVC2012 */) \
- || (defined(__cpp_alias_templates) && __cpp_alias_templates >= 200704) \
- || (defined(__cplusplus) && __cplusplus >= 201103L)
- #define FLATBUFFERS_TEMPLATES_ALIASES
-#endif
-
// This header provides backwards compatibility for C++98 STLs like stlport.
namespace flatbuffers {
@@ -190,7 +192,7 @@ inline void vector_emplace_back(std::vector *vector, V &&data) {
// MSVC 2010 doesn't support C++11 aliases.
// We're manually "aliasing" the class here as we want to bring unique_ptr
// into the flatbuffers namespace. We have unique_ptr in the flatbuffers
- // namespace we have a completely independent implemenation (see below)
+ // namespace we have a completely independent implementation (see below)
// for C++98 STL implementations.
template class unique_ptr : public std::unique_ptr {
public:
@@ -302,6 +304,146 @@ inline void vector_emplace_back(std::vector *vector, V &&data) {
#endif // !FLATBUFFERS_CPP98_STL
+#ifdef FLATBUFFERS_USE_STD_OPTIONAL
+template
+using Optional = std::optional;
+using nullopt_t = std::nullopt_t;
+inline constexpr nullopt_t nullopt = std::nullopt;
+
+#else
+// Limited implementation of Optional type for a scalar T.
+// This implementation limited by trivial types compatible with
+// std::is_arithmetic or std::is_enum type traits.
+
+// A tag to indicate an empty flatbuffers::optional.
+struct nullopt_t {
+ explicit FLATBUFFERS_CONSTEXPR_CPP11 nullopt_t(int) {}
+};
+
+#if defined(FLATBUFFERS_CONSTEXPR_DEFINED)
+ namespace internal {
+ template struct nullopt_holder {
+ static constexpr nullopt_t instance_ = nullopt_t(0);
+ };
+ template
+ constexpr nullopt_t nullopt_holder::instance_;
+ }
+ static constexpr const nullopt_t &nullopt = internal::nullopt_holder::instance_;
+
+#else
+ namespace internal {
+ template struct nullopt_holder {
+ static const nullopt_t instance_;
+ };
+ template
+ const nullopt_t nullopt_holder::instance_ = nullopt_t(0);
+ }
+ static const nullopt_t &nullopt = internal::nullopt_holder::instance_;
+
+#endif
+
+template
+class Optional FLATBUFFERS_FINAL_CLASS {
+ // Non-scalar 'T' would extremely complicated Optional.
+ // Use is_scalar checking because flatbuffers flatbuffers::is_arithmetic
+ // isn't implemented.
+ static_assert(flatbuffers::is_scalar::value, "unexpected type T");
+
+ public:
+ ~Optional() {}
+
+ FLATBUFFERS_CONSTEXPR_CPP11 Optional() FLATBUFFERS_NOEXCEPT
+ : value_(), has_value_(false) {}
+
+ FLATBUFFERS_CONSTEXPR_CPP11 Optional(nullopt_t) FLATBUFFERS_NOEXCEPT
+ : value_(), has_value_(false) {}
+
+ FLATBUFFERS_CONSTEXPR_CPP11 Optional(T val) FLATBUFFERS_NOEXCEPT
+ : value_(val), has_value_(true) {}
+
+ FLATBUFFERS_CONSTEXPR_CPP11 Optional(const Optional &other) FLATBUFFERS_NOEXCEPT
+ : value_(other.value_), has_value_(other.has_value_) {}
+
+ FLATBUFFERS_CONSTEXPR_CPP14 Optional &operator=(const Optional &other) FLATBUFFERS_NOEXCEPT {
+ value_ = other.value_;
+ has_value_ = other.has_value_;
+ return *this;
+ }
+
+ FLATBUFFERS_CONSTEXPR_CPP14 Optional &operator=(nullopt_t) FLATBUFFERS_NOEXCEPT {
+ value_ = T();
+ has_value_ = false;
+ return *this;
+ }
+
+ FLATBUFFERS_CONSTEXPR_CPP14 Optional &operator=(T val) FLATBUFFERS_NOEXCEPT {
+ value_ = val;
+ has_value_ = true;
+ return *this;
+ }
+
+ void reset() FLATBUFFERS_NOEXCEPT {
+ *this = nullopt;
+ }
+
+ void swap(Optional &other) FLATBUFFERS_NOEXCEPT {
+ std::swap(value_, other.value_);
+ std::swap(has_value_, other.has_value_);
+ }
+
+ FLATBUFFERS_CONSTEXPR_CPP11 FLATBUFFERS_EXPLICIT_CPP11 operator bool() const FLATBUFFERS_NOEXCEPT {
+ return has_value_;
+ }
+
+ FLATBUFFERS_CONSTEXPR_CPP11 bool has_value() const FLATBUFFERS_NOEXCEPT {
+ return has_value_;
+ }
+
+ FLATBUFFERS_CONSTEXPR_CPP11 const T& operator*() const FLATBUFFERS_NOEXCEPT {
+ return value_;
+ }
+
+ const T& value() const {
+ FLATBUFFERS_ASSERT(has_value());
+ return value_;
+ }
+
+ T value_or(T default_value) const FLATBUFFERS_NOEXCEPT {
+ return has_value() ? value_ : default_value;
+ }
+
+ private:
+ T value_;
+ bool has_value_;
+};
+
+template
+FLATBUFFERS_CONSTEXPR_CPP11 bool operator==(const Optional& opt, nullopt_t) FLATBUFFERS_NOEXCEPT {
+ return !opt;
+}
+template
+FLATBUFFERS_CONSTEXPR_CPP11 bool operator==(nullopt_t, const Optional& opt) FLATBUFFERS_NOEXCEPT {
+ return !opt;
+}
+
+template
+FLATBUFFERS_CONSTEXPR_CPP11 bool operator==(const Optional& lhs, const U& rhs) FLATBUFFERS_NOEXCEPT {
+ return static_cast(lhs) && (*lhs == rhs);
+}
+
+template
+FLATBUFFERS_CONSTEXPR_CPP11 bool operator==(const T& lhs, const Optional& rhs) FLATBUFFERS_NOEXCEPT {
+ return static_cast(rhs) && (lhs == *rhs);
+}
+
+template
+FLATBUFFERS_CONSTEXPR_CPP11 bool operator==(const Optional& lhs, const Optional& rhs) FLATBUFFERS_NOEXCEPT {
+ return static_cast(lhs) != static_cast(rhs)
+ ? false
+ : !static_cast(lhs) ? false : (*lhs == *rhs);
+}
+#endif // FLATBUFFERS_USE_STD_OPTIONAL
+
} // namespace flatbuffers
#endif // FLATBUFFERS_STL_EMULATION_H_
diff --git a/code/lib/tfmicro/kissfft/COPYING b/code/lib/tfmicro/kissfft/COPYING
deleted file mode 100644
index 2fc6685a..00000000
--- a/code/lib/tfmicro/kissfft/COPYING
+++ /dev/null
@@ -1,11 +0,0 @@
-Copyright (c) 2003-2010 Mark Borgerding
-
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
-
- * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
- * Neither the author nor the names of any contributors may be used to endorse or promote products derived from this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/code/lib/tfmicro/kissfft/_kiss_fft_guts.h b/code/lib/tfmicro/kissfft/_kiss_fft_guts.h
deleted file mode 100644
index ba661444..00000000
--- a/code/lib/tfmicro/kissfft/_kiss_fft_guts.h
+++ /dev/null
@@ -1,164 +0,0 @@
-/*
-Copyright (c) 2003-2010, Mark Borgerding
-
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
-
- * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
- * Neither the author nor the names of any contributors may be used to endorse or promote products derived from this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-
-/* kiss_fft.h
- defines kiss_fft_scalar as either short or a float type
- and defines
- typedef struct { kiss_fft_scalar r; kiss_fft_scalar i; }kiss_fft_cpx; */
-#include "kiss_fft.h"
-#include
-
-#define MAXFACTORS 32
-/* e.g. an fft of length 128 has 4 factors
- as far as kissfft is concerned
- 4*4*4*2
- */
-
-struct kiss_fft_state{
- int nfft;
- int inverse;
- int factors[2*MAXFACTORS];
- kiss_fft_cpx twiddles[1];
-};
-
-/*
- Explanation of macros dealing with complex math:
-
- C_MUL(m,a,b) : m = a*b
- C_FIXDIV( c , div ) : if a fixed point impl., c /= div. noop otherwise
- C_SUB( res, a,b) : res = a - b
- C_SUBFROM( res , a) : res -= a
- C_ADDTO( res , a) : res += a
- * */
-#ifdef FIXED_POINT
-#if (FIXED_POINT==32)
-# define FRACBITS 31
-# define SAMPPROD int64_t
-#define SAMP_MAX 2147483647
-#else
-# define FRACBITS 15
-# define SAMPPROD int32_t
-#define SAMP_MAX 32767
-#endif
-
-#define SAMP_MIN -SAMP_MAX
-
-#if defined(CHECK_OVERFLOW)
-# define CHECK_OVERFLOW_OP(a,op,b) \
- if ( (SAMPPROD)(a) op (SAMPPROD)(b) > SAMP_MAX || (SAMPPROD)(a) op (SAMPPROD)(b) < SAMP_MIN ) { \
- fprintf(stderr,"WARNING:overflow @ " __FILE__ "(%d): (%d " #op" %d) = %ld\n",__LINE__,(a),(b),(SAMPPROD)(a) op (SAMPPROD)(b) ); }
-#endif
-
-
-# define smul(a,b) ( (SAMPPROD)(a)*(b) )
-# define sround( x ) (kiss_fft_scalar)( ( (x) + (1<<(FRACBITS-1)) ) >> FRACBITS )
-
-# define S_MUL(a,b) sround( smul(a,b) )
-
-# define C_MUL(m,a,b) \
- do{ (m).r = sround( smul((a).r,(b).r) - smul((a).i,(b).i) ); \
- (m).i = sround( smul((a).r,(b).i) + smul((a).i,(b).r) ); }while(0)
-
-# define DIVSCALAR(x,k) \
- (x) = sround( smul( x, SAMP_MAX/k ) )
-
-# define C_FIXDIV(c,div) \
- do { DIVSCALAR( (c).r , div); \
- DIVSCALAR( (c).i , div); }while (0)
-
-# define C_MULBYSCALAR( c, s ) \
- do{ (c).r = sround( smul( (c).r , s ) ) ;\
- (c).i = sround( smul( (c).i , s ) ) ; }while(0)
-
-#else /* not FIXED_POINT*/
-
-# define S_MUL(a,b) ( (a)*(b) )
-#define C_MUL(m,a,b) \
- do{ (m).r = (a).r*(b).r - (a).i*(b).i;\
- (m).i = (a).r*(b).i + (a).i*(b).r; }while(0)
-# define C_FIXDIV(c,div) /* NOOP */
-# define C_MULBYSCALAR( c, s ) \
- do{ (c).r *= (s);\
- (c).i *= (s); }while(0)
-#endif
-
-#ifndef CHECK_OVERFLOW_OP
-# define CHECK_OVERFLOW_OP(a,op,b) /* noop */
-#endif
-
-#define C_ADD( res, a,b)\
- do { \
- CHECK_OVERFLOW_OP((a).r,+,(b).r)\
- CHECK_OVERFLOW_OP((a).i,+,(b).i)\
- (res).r=(a).r+(b).r; (res).i=(a).i+(b).i; \
- }while(0)
-#define C_SUB( res, a,b)\
- do { \
- CHECK_OVERFLOW_OP((a).r,-,(b).r)\
- CHECK_OVERFLOW_OP((a).i,-,(b).i)\
- (res).r=(a).r-(b).r; (res).i=(a).i-(b).i; \
- }while(0)
-#define C_ADDTO( res , a)\
- do { \
- CHECK_OVERFLOW_OP((res).r,+,(a).r)\
- CHECK_OVERFLOW_OP((res).i,+,(a).i)\
- (res).r += (a).r; (res).i += (a).i;\
- }while(0)
-
-#define C_SUBFROM( res , a)\
- do {\
- CHECK_OVERFLOW_OP((res).r,-,(a).r)\
- CHECK_OVERFLOW_OP((res).i,-,(a).i)\
- (res).r -= (a).r; (res).i -= (a).i; \
- }while(0)
-
-
-#ifdef FIXED_POINT
-# define KISS_FFT_COS(phase) floor(.5+SAMP_MAX * cos (phase))
-# define KISS_FFT_SIN(phase) floor(.5+SAMP_MAX * sin (phase))
-# define HALF_OF(x) ((x)>>1)
-#elif defined(USE_SIMD)
-# define KISS_FFT_COS(phase) _mm_set1_ps( cos(phase) )
-# define KISS_FFT_SIN(phase) _mm_set1_ps( sin(phase) )
-# define HALF_OF(x) ((x)*_mm_set1_ps(.5))
-#else
-# define KISS_FFT_COS(phase) (kiss_fft_scalar) cos(phase)
-# define KISS_FFT_SIN(phase) (kiss_fft_scalar) sin(phase)
-# define HALF_OF(x) ((x)*.5)
-#endif
-
-#define kf_cexp(x,phase) \
- do{ \
- (x)->r = KISS_FFT_COS(phase);\
- (x)->i = KISS_FFT_SIN(phase);\
- }while(0)
-
-
-/* a debugging function */
-#define pcpx(c)\
- fprintf(stderr,"%g + %gi\n",(double)((c)->r),(double)((c)->i) )
-
-
-#ifdef KISS_FFT_USE_ALLOCA
-// define this to allow use of alloca instead of malloc for temporary buffers
-// Temporary buffers are used in two case:
-// 1. FFT sizes that have "bad" factors. i.e. not 2,3 and 5
-// 2. "in-place" FFTs. Notice the quotes, since kissfft does not really do an in-place transform.
-#include
-#define KISS_FFT_TMP_ALLOC(nbytes) alloca(nbytes)
-#define KISS_FFT_TMP_FREE(ptr)
-#else
-#define KISS_FFT_TMP_ALLOC(nbytes) KISS_FFT_MALLOC(nbytes)
-#define KISS_FFT_TMP_FREE(ptr) KISS_FFT_FREE(ptr)
-#endif
diff --git a/code/lib/tfmicro/kissfft/kiss_fft.h b/code/lib/tfmicro/kissfft/kiss_fft.h
deleted file mode 100644
index c34ea5ee..00000000
--- a/code/lib/tfmicro/kissfft/kiss_fft.h
+++ /dev/null
@@ -1,131 +0,0 @@
-#ifndef KISS_FFT_H
-#define KISS_FFT_H
-
-#include
-#include
-#include
-#include
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/*
- ATTENTION!
- If you would like a :
- -- a utility that will handle the caching of fft objects
- -- real-only (no imaginary time component ) FFT
- -- a multi-dimensional FFT
- -- a command-line utility to perform ffts
- -- a command-line utility to perform fast-convolution filtering
-
- Then see kfc.h kiss_fftr.h kiss_fftnd.h fftutil.c kiss_fastfir.c
- in the tools/ directory.
-*/
-
-#ifdef USE_SIMD
-# include
-# define kiss_fft_scalar __m128
-#define KISS_FFT_MALLOC(nbytes) _mm_malloc(nbytes,16)
-#define KISS_FFT_FREE _mm_free
-#else
-#define KISS_FFT_MALLOC(X) (void*)(0) /* Patched. */
-#define KISS_FFT_FREE(X) /* Patched. */
-#endif
-
-
-// Patched automatically by download_dependencies.sh so default is 16 bit.
-#ifndef FIXED_POINT
-#define FIXED_POINT (16)
-#endif
-// End patch.
-
-#ifdef FIXED_POINT
-#include /* Patched. */
-#include
-# if (FIXED_POINT == 32)
-# define kiss_fft_scalar int32_t
-# else
-# define kiss_fft_scalar int16_t
-# endif
-#else
-# ifndef kiss_fft_scalar
-/* default is float */
-# define kiss_fft_scalar float
-# endif
-#endif
-
-typedef struct {
- kiss_fft_scalar r;
- kiss_fft_scalar i;
-}kiss_fft_cpx;
-
-typedef struct kiss_fft_state* kiss_fft_cfg;
-
-/*
- * kiss_fft_alloc
- *
- * Initialize a FFT (or IFFT) algorithm's cfg/state buffer.
- *
- * typical usage: kiss_fft_cfg mycfg=kiss_fft_alloc(1024,0,NULL,NULL);
- *
- * The return value from fft_alloc is a cfg buffer used internally
- * by the fft routine or NULL.
- *
- * If lenmem is NULL, then kiss_fft_alloc will allocate a cfg buffer using malloc.
- * The returned value should be free()d when done to avoid memory leaks.
- *
- * The state can be placed in a user supplied buffer 'mem':
- * If lenmem is not NULL and mem is not NULL and *lenmem is large enough,
- * then the function places the cfg in mem and the size used in *lenmem
- * and returns mem.
- *
- * If lenmem is not NULL and ( mem is NULL or *lenmem is not large enough),
- * then the function returns NULL and places the minimum cfg
- * buffer size in *lenmem.
- * */
-
-kiss_fft_cfg kiss_fft_alloc(int nfft,int inverse_fft,void * mem,size_t * lenmem);
-
-/*
- * kiss_fft(cfg,in_out_buf)
- *
- * Perform an FFT on a complex input buffer.
- * for a forward FFT,
- * fin should be f[0] , f[1] , ... ,f[nfft-1]
- * fout will be F[0] , F[1] , ... ,F[nfft-1]
- * Note that each element is complex and can be accessed like
- f[k].r and f[k].i
- * */
-void kiss_fft(kiss_fft_cfg cfg,const kiss_fft_cpx *fin,kiss_fft_cpx *fout);
-
-/*
- A more generic version of the above function. It reads its input from every Nth sample.
- * */
-void kiss_fft_stride(kiss_fft_cfg cfg,const kiss_fft_cpx *fin,kiss_fft_cpx *fout,int fin_stride);
-
-/* If kiss_fft_alloc allocated a buffer, it is one contiguous
- buffer and can be simply free()d when no longer needed*/
-#define kiss_fft_free free
-
-/*
- Cleans up some memory that gets managed internally. Not necessary to call, but it might clean up
- your compiler output to call this before you exit.
-*/
-void kiss_fft_cleanup(void);
-
-
-/*
- * Returns the smallest integer k, such that k>=n and k has only "fast" factors (2,3,5)
- */
-int kiss_fft_next_fast_size(int n);
-
-/* for real ffts, we need an even size */
-#define kiss_fftr_next_fast_size_real(n) \
- (kiss_fft_next_fast_size( ((n)+1)>>1)<<1)
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/code/lib/tfmicro/kissfft/tools/kiss_fftr.h b/code/lib/tfmicro/kissfft/tools/kiss_fftr.h
deleted file mode 100644
index 72e5a577..00000000
--- a/code/lib/tfmicro/kissfft/tools/kiss_fftr.h
+++ /dev/null
@@ -1,46 +0,0 @@
-#ifndef KISS_FTR_H
-#define KISS_FTR_H
-
-#include "kiss_fft.h"
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-
-/*
-
- Real optimized version can save about 45% cpu time vs. complex fft of a real seq.
-
-
-
- */
-
-typedef struct kiss_fftr_state *kiss_fftr_cfg;
-
-
-kiss_fftr_cfg kiss_fftr_alloc(int nfft,int inverse_fft,void * mem, size_t * lenmem);
-/*
- nfft must be even
-
- If you don't care to allocate space, use mem = lenmem = NULL
-*/
-
-
-void kiss_fftr(kiss_fftr_cfg cfg,const kiss_fft_scalar *timedata,kiss_fft_cpx *freqdata);
-/*
- input timedata has nfft scalar points
- output freqdata has nfft/2+1 complex points
-*/
-
-void kiss_fftri(kiss_fftr_cfg cfg,const kiss_fft_cpx *freqdata,kiss_fft_scalar *timedata);
-/*
- input freqdata has nfft/2+1 complex points
- output timedata has nfft scalar points
-*/
-
-#define kiss_fftr_free free
-
-#ifdef __cplusplus
-}
-#endif
-#endif
diff --git a/code/lib/tfmicro/ruy/ruy/profiler/instrumentation.h b/code/lib/tfmicro/ruy/ruy/profiler/instrumentation.h
deleted file mode 100644
index c4df1e68..00000000
--- a/code/lib/tfmicro/ruy/ruy/profiler/instrumentation.h
+++ /dev/null
@@ -1,203 +0,0 @@
-/* Copyright 2020 Google LLC. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-
-#ifndef RUY_RUY_PROFILER_INSTRUMENTATION_H_
-#define RUY_RUY_PROFILER_INSTRUMENTATION_H_
-
-#ifdef RUY_PROFILER
-#include
-#include
-#include
-#endif
-
-namespace ruy {
-namespace profiler {
-
-#ifdef RUY_PROFILER
-
-// A label is how a code scope is annotated to appear in profiles.
-// The stacks that are sampled by the profiler are stacks of such labels.
-// A label consists of a literal string, plus optional integer arguments.
-class Label {
- public:
- Label() {}
- template
- explicit Label(Args... args) {
- Set(args...);
- }
- void Set(const char* format) {
- format_ = format;
- args_count_ = 0;
- }
- template
- void Set(const char* format, Args... args) {
- format_ = format;
- args_count_ = sizeof...(args);
- SetArgs(0, args...);
- }
-
- void operator=(const Label& other);
-
- bool operator==(const Label& other) const;
-
- std::string Formatted() const;
- const char* format() const { return format_; }
-
- private:
- void SetArgs(int position, int arg0) { args_[position] = arg0; }
-
- template
- void SetArgs(int position, int arg0, Args... args) {
- SetArgs(position, arg0);
- SetArgs(position + 1, args...);
- }
-
- static constexpr int kMaxArgs = 4;
- const char* format_ = nullptr;
- int args_count_ = 0;
- int args_[kMaxArgs];
-};
-
-namespace detail {
-
-// Forward-declaration, see class ThreadStack below.
-class ThreadStack;
-
-bool& GlobalIsProfilerRunning();
-
-// Returns the global vector of pointers to all stacks, there being one stack
-// per thread executing instrumented code.
-std::vector* GlobalAllThreadStacks();
-
-// Returns the mutex to be locked around any access to GlobalAllThreadStacks().
-std::mutex* GlobalsMutex();
-
-// Returns the thread-local stack, specific to the current thread.
-ThreadStack* ThreadLocalThreadStack();
-
-// This 'stack' is what may be more appropriately called a 'pseudostack':
-// It contains Label entries that are 'manually' entered by instrumentation
-// code. It's unrelated to real call stacks.
-struct Stack {
- std::uint32_t id = 0;
- static constexpr int kMaxSize = 64;
- int size = 0;
- Label labels[kMaxSize];
-};
-
-// Returns the buffer byte size required by CopyToSample.
-int GetBufferSize(const Stack& stack);
-
-// Copies this Stack into a byte buffer, called a 'sample'.
-void CopyToBuffer(const Stack& stack, char* dst);
-
-// Populates this Stack from an existing sample buffer, typically
-// produced by CopyToSample.
-void ReadFromBuffer(const char* src, Stack* stack);
-
-// ThreadStack is meant to be used as a thread-local singleton, assigning to
-// each thread a Stack object holding its pseudo-stack of profile labels,
-// plus a mutex allowing to synchronize accesses to this pseudo-stack between
-// this thread and a possible profiler thread sampling it.
-class ThreadStack {
- public:
- ThreadStack();
- ~ThreadStack();
-
- const Stack& stack() const { return stack_; }
-
- // Returns the mutex to lock around any access to this stack. Each stack is
- // accessed by potentially two threads: the thread that it belongs to
- // (which calls Push and Pop) and the profiler thread during profiling
- // (which calls CopyToSample).
- std::mutex& Mutex() const { return mutex_; }
-
- // Pushes a new label on the top of this Stack.
- template
- void Push(Args... args) {
- // This mutex locking is needed to guard against race conditions as both
- // the current thread and the profiler thread may be concurrently accessing
- // this stack. In addition to that, this mutex locking also serves the other
- // purpose of acting as a barrier (of compiler code reordering, of runtime
- // CPU instruction reordering, and of memory access reordering), which
- // gives a measure of correctness to this profiler. The downside is some
- // latency. As this lock will be uncontended most of the times, the cost
- // should be roughly that of an sequentially-consistent atomic access,
- // comparable to an access to the level of CPU data cache that is shared
- // among all cores, typically 60 cycles on current ARM CPUs, plus side
- // effects from barrier instructions.
- std::lock_guard lock(mutex_);
- // Avoid overrunning the stack, even in 'release' builds. This profiling
- // instrumentation code should not ship in release builds anyway, the
- // overhead of this check is negligible, and overrunning a stack array would
- // be bad.
- if (stack_.size >= Stack::kMaxSize) {
- abort();
- }
- stack_.labels[stack_.size++].Set(args...);
- }
-
- // Pops the top-most label from this Stack.
- void Pop() {
- // See the comment in Push about this lock. While it would be tempting to
- // try to remove this lock and just atomically decrement size_ with a
- // store-release, that would not necessarily be a substitute for all of the
- // purposes that this lock serves, or if it was done carefully to serve all
- // of the same purposes, then that wouldn't be faster than this (mostly
- // uncontended) lock.
- std::lock_guard lock(mutex_);
- stack_.size--;
- }
-
- private:
- mutable std::mutex mutex_;
- Stack stack_;
-};
-
-} // namespace detail
-
-// RAII user-facing way to construct Labels associated with their life scope
-// and get them pushed to / popped from the current thread stack.
-class ScopeLabel {
- public:
- template
- ScopeLabel(Args... args) : thread_stack_(detail::ThreadLocalThreadStack()) {
- thread_stack_->Push(args...);
- }
-
- ~ScopeLabel() { thread_stack_->Pop(); }
-
- private:
- detail::ThreadStack* thread_stack_;
-};
-
-#else // no RUY_PROFILER
-
-class ScopeLabel {
- public:
- template
- explicit ScopeLabel(Args...) {}
-
- // This destructor is needed to consistently silence clang's -Wunused-variable
- // which seems to trigger semi-randomly.
- ~ScopeLabel() {}
-};
-
-#endif
-
-} // namespace profiler
-} // namespace ruy
-
-#endif // RUY_RUY_PROFILER_INSTRUMENTATION_H_
diff --git a/code/lib/tfmicro/tensorflow/core/public/version.h b/code/lib/tfmicro/tensorflow/core/public/version.h
index 8f983022..08318293 100644
--- a/code/lib/tfmicro/tensorflow/core/public/version.h
+++ b/code/lib/tfmicro/tensorflow/core/public/version.h
@@ -21,7 +21,7 @@ limitations under the License.
// Also update tensorflow/tensorflow.bzl and
// tensorflow/tools/pip_package/setup.py
#define TF_MAJOR_VERSION 2
-#define TF_MINOR_VERSION 1
+#define TF_MINOR_VERSION 5
#define TF_PATCH_VERSION 0
// TF_VERSION_SUFFIX is non-empty for pre-releases (e.g. "-alpha", "-alpha.1",
@@ -108,7 +108,7 @@ limitations under the License.
#define TF_GRAPH_DEF_VERSION_MIN_PRODUCER 0
#define TF_GRAPH_DEF_VERSION_MIN_CONSUMER 0
-#define TF_GRAPH_DEF_VERSION 389 // Updated: 2020/5/2
+#define TF_GRAPH_DEF_VERSION 578 // Updated: 2020/11/7
// Checkpoint compatibility versions (the versions field in SavedSliceMeta).
//
diff --git a/code/lib/tfmicro/tensorflow/lite/c/builtin_op_data.h b/code/lib/tfmicro/tensorflow/lite/c/builtin_op_data.h
index 2fe6c053..5452ef63 100644
--- a/code/lib/tfmicro/tensorflow/lite/c/builtin_op_data.h
+++ b/code/lib/tfmicro/tensorflow/lite/c/builtin_op_data.h
@@ -67,8 +67,9 @@ typedef struct {
typedef enum {
kTfLiteActNone = 0,
kTfLiteActRelu,
- kTfLiteActRelu1, // min(max(-1, x), 1)
- kTfLiteActRelu6, // min(max(0, x), 6)
+ kTfLiteActReluN1To1, // min(max(-1, x), 1)
+ kTfLiteActRelu1 = kTfLiteActReluN1To1, // kTfLiteActRelu1 will be deprecated.
+ kTfLiteActRelu6, // min(max(0, x), 6)
kTfLiteActTanh,
kTfLiteActSignBit,
kTfLiteActSigmoid,
@@ -198,6 +199,8 @@ typedef struct {
typedef struct {
TfLiteFusedActivation activation;
+ // Parameter added for the version 4.
+ bool pot_scale_int16;
} TfLiteAddParams;
typedef struct {
@@ -219,6 +222,8 @@ typedef struct {
typedef struct {
TfLiteFusedActivation activation;
+ // Parameter added for the version 5.
+ bool pot_scale_int16;
} TfLiteSubParams;
typedef struct {
@@ -297,6 +302,7 @@ typedef struct {
typedef struct {
bool align_corners;
+ bool half_pixel_centers;
} TfLiteResizeNearestNeighborParams;
typedef struct {
@@ -459,6 +465,15 @@ typedef struct {
int body_subgraph_index;
} TfLiteWhileParams;
+typedef struct {
+ bool exclusive;
+ bool reverse;
+} TfLiteCumsumParams;
+
+typedef struct {
+ int init_subgraph_index;
+} TfLiteCallOnceParams;
+
#ifdef __cplusplus
} // extern "C"
#endif // __cplusplus
diff --git a/code/lib/tfmicro/tensorflow/lite/c/common.c b/code/lib/tfmicro/tensorflow/lite/c/common.c
index f70a6000..0264f420 100644
--- a/code/lib/tfmicro/tensorflow/lite/c/common.c
+++ b/code/lib/tfmicro/tensorflow/lite/c/common.c
@@ -79,7 +79,8 @@ TfLiteFloatArray* TfLiteFloatArrayCreate(int size) {
void TfLiteFloatArrayFree(TfLiteFloatArray* a) { free(a); }
void TfLiteTensorDataFree(TfLiteTensor* t) {
- if (t->allocation_type == kTfLiteDynamic) {
+ if (t->allocation_type == kTfLiteDynamic ||
+ t->allocation_type == kTfLitePersistentRo) {
free(t->data.raw);
}
t->data.raw = NULL;
@@ -172,7 +173,8 @@ void TfLiteTensorReset(TfLiteType type, const char* name, TfLiteIntArray* dims,
}
void TfLiteTensorRealloc(size_t num_bytes, TfLiteTensor* tensor) {
- if (tensor->allocation_type != kTfLiteDynamic) {
+ if (tensor->allocation_type != kTfLiteDynamic &&
+ tensor->allocation_type != kTfLitePersistentRo) {
return;
}
// TODO(b/145340303): Tensor data should be aligned.
@@ -205,6 +207,8 @@ const char* TfLiteTypeGetName(TfLiteType type) {
return "BOOL";
case kTfLiteComplex64:
return "COMPLEX64";
+ case kTfLiteComplex128:
+ return "COMPLEX128";
case kTfLiteString:
return "STRING";
case kTfLiteFloat16:
diff --git a/code/lib/tfmicro/tensorflow/lite/c/common.h b/code/lib/tfmicro/tensorflow/lite/c/common.h
index 12ddf994..e04e1a12 100644
--- a/code/lib/tfmicro/tensorflow/lite/c/common.h
+++ b/code/lib/tfmicro/tensorflow/lite/c/common.h
@@ -29,6 +29,9 @@ limitations under the License.
// TfLiteDelegate - allows delegation of nodes to alternative backends.
//
// Some abstractions in this file are created and managed by Interpreter.
+//
+// NOTE: The order of values in these structs are "semi-ABI stable". New values
+// should be added only to the end of structs and never reordered.
#ifndef TENSORFLOW_LITE_C_COMMON_H_
#define TENSORFLOW_LITE_C_COMMON_H_
@@ -43,8 +46,18 @@ extern "C" {
typedef enum TfLiteStatus {
kTfLiteOk = 0,
+
+ // Generally referring to an error in the runtime (i.e. interpreter)
kTfLiteError = 1,
- kTfLiteDelegateError = 2
+
+ // Generally referring to an error from a TfLiteDelegate itself.
+ kTfLiteDelegateError = 2,
+
+ // Generally referring to an error in applying a delegate due to
+ // incompatibility between runtime and delegate, e.g., this error is returned
+ // when trying to apply a TfLite delegate onto a model graph that's already
+ // immutable.
+ kTfLiteApplicationError = 3
} TfLiteStatus;
// The list of external context types known to TF Lite. This list exists solely
@@ -55,7 +68,7 @@ typedef enum TfLiteExternalContextType {
kTfLiteEigenContext = 0, // include eigen_support.h to use.
kTfLiteGemmLowpContext = 1, // include gemm_support.h to use.
kTfLiteEdgeTpuContext = 2, // Placeholder for Edge TPU support.
- kTfLiteCpuBackendContext = 3, // include cpu_backend_support.h to use.
+ kTfLiteCpuBackendContext = 3, // include cpu_backend_context.h to use.
kTfLiteMaxExternalContexts = 4
} TfLiteExternalContextType;
@@ -83,8 +96,9 @@ typedef struct TfLiteIntArray {
int size;
// gcc 6.1+ have a bug where flexible members aren't properly handled
// https://github.com/google/re2/commit/b94b7cd42e9f02673cd748c1ac1d16db4052514c
-#if !defined(__clang__) && defined(__GNUC__) && __GNUC__ == 6 && \
- __GNUC_MINOR__ >= 1
+#if (!defined(__clang__) && defined(__GNUC__) && __GNUC__ == 6 && \
+ __GNUC_MINOR__ >= 1) || \
+ defined(HEXAGON) || (__clang_major__ == 7 && __clang_minor__ == 1)
int data[0];
#else
int data[];
@@ -122,6 +136,7 @@ typedef struct TfLiteFloatArray {
int size;
// gcc 6.1+ have a bug where flexible members aren't properly handled
// https://github.com/google/re2/commit/b94b7cd42e9f02673cd748c1ac1d16db4052514c
+// This also applies to the toolchain used for Qualcomm Hexagon DSPs.
#if !defined(__clang__) && defined(__GNUC__) && __GNUC__ == 6 && \
__GNUC_MINOR__ >= 1
float data[0];
@@ -200,6 +215,7 @@ void TfLiteFloatArrayFree(TfLiteFloatArray* a);
// the current function, while also reporting the location of the error.
// `a` and `b` may be evaluated more than once, so no side effects or
// extremely expensive computations should be done.
+// NOTE: Use TF_LITE_ENSURE_TYPES_EQ if comparing TfLiteTypes.
#define TF_LITE_ENSURE_EQ(context, a, b) \
do { \
if ((a) != (b)) { \
@@ -219,6 +235,17 @@ void TfLiteFloatArrayFree(TfLiteFloatArray* a);
} \
} while (0)
+#define TF_LITE_ENSURE_NEAR(context, a, b, epsilon) \
+ do { \
+ auto delta = ((a) > (b)) ? ((a) - (b)) : ((b) - (a)); \
+ if (delta > epsilon) { \
+ TF_LITE_KERNEL_LOG((context), "%s:%d %s not near %s (%f != %f)", \
+ __FILE__, __LINE__, #a, #b, static_cast(a), \
+ static_cast(b)); \
+ return kTfLiteError; \
+ } \
+ } while (0)
+
#define TF_LITE_ENSURE_OK(context, status) \
do { \
const TfLiteStatus s = (status); \
@@ -227,11 +254,32 @@ void TfLiteFloatArrayFree(TfLiteFloatArray* a);
} \
} while (0)
+// Define TFL_CAPI_EXPORT macro to export a function properly with a shared
+// library.
+#ifdef SWIG
+#define TFL_CAPI_EXPORT
+#else
+#if defined(_WIN32)
+#ifdef TFL_COMPILE_LIBRARY
+#define TFL_CAPI_EXPORT __declspec(dllexport)
+#else
+#define TFL_CAPI_EXPORT __declspec(dllimport)
+#endif // TFL_COMPILE_LIBRARY
+#else
+#define TFL_CAPI_EXPORT __attribute__((visibility("default")))
+#endif // _WIN32
+#endif // SWIG
+
// Single-precision complex data type compatible with the C99 definition.
typedef struct TfLiteComplex64 {
float re, im; // real and imaginary parts, respectively.
} TfLiteComplex64;
+// Double-precision complex data type compatible with the C99 definition.
+typedef struct TfLiteComplex128 {
+ double re, im; // real and imaginary parts, respectively.
+} TfLiteComplex128;
+
// Half precision data type compatible with the C99 definition.
typedef struct TfLiteFloat16 {
uint16_t data;
@@ -251,6 +299,7 @@ typedef enum {
kTfLiteInt8 = 9,
kTfLiteFloat16 = 10,
kTfLiteFloat64 = 11,
+ kTfLiteComplex128 = 12,
} TfLiteType;
// Return the name of a given type, for error reporting purposes.
@@ -307,26 +356,39 @@ typedef union TfLitePtrUnion {
int64_t* i64;
float* f;
TfLiteFloat16* f16;
+ double* f64;
char* raw;
const char* raw_const;
uint8_t* uint8;
bool* b;
int16_t* i16;
TfLiteComplex64* c64;
+ TfLiteComplex128* c128;
int8_t* int8;
/* Only use this member. */
void* data;
} TfLitePtrUnion;
-// Memory allocation strategies. kTfLiteMmapRo is for read-only memory-mapped
-// data (or data externally allocated). kTfLiteArenaRw is arena allocated
-// data. kTfLiteDynamic is for tensors that are allocated during evaluation.
+// Memory allocation strategies.
+// * kTfLiteMmapRo: Read-only memory-mapped data, or data externally allocated.
+// * kTfLiteArenaRw: Arena allocated with no guarantees about persistence,
+// and available during eval.
+// * kTfLiteArenaRwPersistent: Arena allocated but persistent across eval, and
+// only available during eval.
+// * kTfLiteDynamic: Allocated during eval, or for string tensors.
+// * kTfLitePersistentRo: Allocated and populated during prepare. This is
+// useful for tensors that can be computed during prepare and treated
+// as constant inputs for downstream ops (also in prepare).
+// * kTfLiteCustom: Custom memory allocation provided by the user. See
+// TfLiteCustomAllocation below.
typedef enum TfLiteAllocationType {
kTfLiteMemNone = 0,
kTfLiteMmapRo,
kTfLiteArenaRw,
kTfLiteArenaRwPersistent,
kTfLiteDynamic,
+ kTfLitePersistentRo,
+ kTfLiteCustom,
} TfLiteAllocationType;
// The delegates should use zero or positive integers to represent handles.
@@ -359,8 +421,18 @@ typedef struct TfLiteSparsity {
int dim_metadata_size;
} TfLiteSparsity;
-// An tensor in the interpreter system which is a wrapper around a buffer of
+// Defines a custom memory allocation not owned by the runtime.
+// `data` should be aligned to kDefaultTensorAlignment defined in
+// lite/util.h. (Currently 64 bytes)
+// NOTE: See Interpreter.SetCustomAllocationForTensor for details on usage.
+typedef struct TfLiteCustomAllocation {
+ void* data;
+ size_t bytes;
+} TfLiteCustomAllocation;
+
+// A tensor in the interpreter system which is a wrapper around a buffer of
// data including a dimensionality (or NULL if not currently defined).
+#ifndef TF_LITE_STATIC_MEMORY
typedef struct TfLiteTensor {
// The data type specification for data stored in `data`. This affects
// what member of `data` union should be used.
@@ -426,31 +498,6 @@ typedef struct TfLiteTensor {
const TfLiteIntArray* dims_signature;
} TfLiteTensor;
-#ifndef TF_LITE_STATIC_MEMORY
-// Free data memory of tensor `t`.
-void TfLiteTensorDataFree(TfLiteTensor* t);
-
-// Free quantization data.
-void TfLiteQuantizationFree(TfLiteQuantization* quantization);
-
-// Free sparsity parameters.
-void TfLiteSparsityFree(TfLiteSparsity* sparsity);
-
-// Free memory of tensor `t`.
-void TfLiteTensorFree(TfLiteTensor* t);
-
-// Set all of a tensor's fields (and free any previously allocated data).
-void TfLiteTensorReset(TfLiteType type, const char* name, TfLiteIntArray* dims,
- TfLiteQuantizationParams quantization, char* buffer,
- size_t size, TfLiteAllocationType allocation_type,
- const void* allocation, bool is_variable,
- TfLiteTensor* tensor);
-
-// Resize the allocated data of a (dynamic) tensor. Tensors with allocation
-// types other than kTfLiteDynamic will be ignored.
-void TfLiteTensorRealloc(size_t num_bytes, TfLiteTensor* tensor);
-#endif // TF_LITE_STATIC_MEMORY
-
// A structure representing an instance of a node.
// This structure only exhibits the inputs, outputs and user defined data, not
// other features like the type.
@@ -487,6 +534,130 @@ typedef struct TfLiteNode {
// WARNING: This is an experimental interface that is subject to change.
struct TfLiteDelegate* delegate;
} TfLiteNode;
+#else // defined(TF_LITE_STATIC_MEMORY)?
+// NOTE: This flag is opt-in only at compile time.
+//
+// Specific reduced TfLiteTensor struct for TF Micro runtime. This struct
+// contains only the minimum fields required to initialize and prepare a micro
+// inference graph. The fields in this struct have been ordered from
+// largest-to-smallest for optimal struct sizeof.
+//
+// This struct does not use:
+// - allocation
+// - buffer_handle
+// - data_is_stale
+// - delegate
+// - dims_signature
+// - name
+// - sparsity
+typedef struct TfLiteTensor {
+ // TODO(b/155784997): Consider consolidating these quantization fields:
+ // Quantization information. Replaces params field above.
+ TfLiteQuantization quantization;
+
+ // Quantization information.
+ TfLiteQuantizationParams params;
+
+ // A union of data pointers. The appropriate type should be used for a typed
+ // tensor based on `type`.
+ TfLitePtrUnion data;
+
+ // A pointer to a structure representing the dimensionality interpretation
+ // that the buffer should have. NOTE: the product of elements of `dims`
+ // and the element datatype size should be equal to `bytes` below.
+ TfLiteIntArray* dims;
+
+ // The number of bytes required to store the data of this Tensor. I.e.
+ // (bytes of each element) * dims[0] * ... * dims[n-1]. For example, if
+ // type is kTfLiteFloat32 and dims = {3, 2} then
+ // bytes = sizeof(float) * 3 * 2 = 4 * 3 * 2 = 24.
+ size_t bytes;
+
+ // The data type specification for data stored in `data`. This affects
+ // what member of `data` union should be used.
+ TfLiteType type;
+
+ // How memory is mapped
+ // kTfLiteMmapRo: Memory mapped read only.
+ // i.e. weights
+ // kTfLiteArenaRw: Arena allocated read write memory
+ // (i.e. temporaries, outputs).
+ TfLiteAllocationType allocation_type;
+
+ // True if the tensor is a variable.
+ bool is_variable;
+} TfLiteTensor;
+
+// Specific reduced TfLiteNode struct for TF Micro runtime. This struct contains
+// only the minimum fields required to represent a node.
+//
+// This struct does not use:
+// - delegate
+// - intermediates
+// - temporaries
+typedef struct TfLiteNode {
+ // Inputs to this node expressed as indices into the simulator's tensors.
+ TfLiteIntArray* inputs;
+
+ // Outputs to this node expressed as indices into the simulator's tensors.
+ TfLiteIntArray* outputs;
+
+ // Opaque data provided by the node implementer through `Registration.init`.
+ void* user_data;
+
+ // Opaque data provided to the node if the node is a builtin. This is usually
+ // a structure defined in builtin_op_data.h
+ void* builtin_data;
+
+ // Custom initial data. This is the opaque data provided in the flatbuffer.
+ // WARNING: This is an experimental interface that is subject to change.
+ const void* custom_initial_data;
+ int custom_initial_data_size;
+} TfLiteNode;
+#endif // TF_LITE_STATIC_MEMORY
+
+// Light-weight tensor struct for TF Micro runtime. Provides the minimal amount
+// of information required for a kernel to run during TfLiteRegistration::Eval.
+// TODO(b/160955687): Move this field into TF_LITE_STATIC_MEMORY when TFLM
+// builds with this flag by default internally.
+typedef struct TfLiteEvalTensor {
+ // A union of data pointers. The appropriate type should be used for a typed
+ // tensor based on `type`.
+ TfLitePtrUnion data;
+
+ // A pointer to a structure representing the dimensionality interpretation
+ // that the buffer should have.
+ TfLiteIntArray* dims;
+
+ // The data type specification for data stored in `data`. This affects
+ // what member of `data` union should be used.
+ TfLiteType type;
+} TfLiteEvalTensor;
+
+#ifndef TF_LITE_STATIC_MEMORY
+// Free data memory of tensor `t`.
+void TfLiteTensorDataFree(TfLiteTensor* t);
+
+// Free quantization data.
+void TfLiteQuantizationFree(TfLiteQuantization* quantization);
+
+// Free sparsity parameters.
+void TfLiteSparsityFree(TfLiteSparsity* sparsity);
+
+// Free memory of tensor `t`.
+void TfLiteTensorFree(TfLiteTensor* t);
+
+// Set all of a tensor's fields (and free any previously allocated data).
+void TfLiteTensorReset(TfLiteType type, const char* name, TfLiteIntArray* dims,
+ TfLiteQuantizationParams quantization, char* buffer,
+ size_t size, TfLiteAllocationType allocation_type,
+ const void* allocation, bool is_variable,
+ TfLiteTensor* tensor);
+
+// Resize the allocated data of a (dynamic) tensor. Tensors with allocation
+// types other than kTfLiteDynamic will be ignored.
+void TfLiteTensorRealloc(size_t num_bytes, TfLiteTensor* tensor);
+#endif // TF_LITE_STATIC_MEMORY
// WARNING: This is an experimental interface that is subject to change.
//
@@ -578,12 +749,11 @@ typedef struct TfLiteContext {
void* profiler;
// Allocate persistent buffer which has the same life time as the interpreter.
+ // Returns nullptr on failure.
// The memory is allocated from heap for TFL, and from tail in TFLM.
- // If *ptr is not nullptr, the pointer will be reallocated.
- // This method is only available in Prepare stage.
+ // This method is only available in Init or Prepare stage.
// WARNING: This is an experimental interface that is subject to change.
- TfLiteStatus (*AllocatePersistentBuffer)(struct TfLiteContext* ctx,
- size_t bytes, void** ptr);
+ void* (*AllocatePersistentBuffer)(struct TfLiteContext* ctx, size_t bytes);
// Allocate a buffer which will be deallocated right after invoke phase.
// The memory is allocated from heap in TFL, and from volatile arena in TFLM.
@@ -638,6 +808,18 @@ typedef struct TfLiteContext {
TfLiteStatus (*PreviewDelegatePartitioning)(
struct TfLiteContext* context, const TfLiteIntArray* nodes_to_replace,
TfLiteDelegateParams** partition_params_array, int* num_partitions);
+
+ // Returns a TfLiteTensor struct for a given index.
+ // WARNING: This is an experimental interface that is subject to change.
+ // WARNING: This method may not be available on all platforms.
+ TfLiteTensor* (*GetTensor)(const struct TfLiteContext* context,
+ int tensor_idx);
+
+ // Returns a TfLiteEvalTensor struct for a given index.
+ // WARNING: This is an experimental interface that is subject to change.
+ // WARNING: This method may not be available on all platforms.
+ TfLiteEvalTensor* (*GetEvalTensor)(const struct TfLiteContext* context,
+ int tensor_idx);
} TfLiteContext;
typedef struct TfLiteRegistration {
@@ -712,7 +894,26 @@ typedef enum TfLiteDelegateFlags {
//
// If the delegate isn't capable to handle dynamic tensors, this flag need
// to be set to false.
- kTfLiteDelegateFlagsAllowDynamicTensors = 1
+ kTfLiteDelegateFlagsAllowDynamicTensors = 1,
+
+ // This flag can be used by delegates (that allow dynamic tensors) to ensure
+ // applicable tensor shapes are automatically propagated in the case of tensor
+ // resizing.
+ // This means that non-dynamic (allocation_type != kTfLiteDynamic) I/O tensors
+ // of a delegate kernel will have correct shapes before its Prepare() method
+ // is called. The runtime leverages TFLite builtin ops in the original
+ // execution plan to propagate shapes.
+ //
+ // A few points to note:
+ // 1. This requires kTfLiteDelegateFlagsAllowDynamicTensors. If that flag is
+ // false, this one is redundant since the delegate kernels are re-initialized
+ // every time tensors are resized.
+ // 2. Enabling this flag adds some overhead to AllocateTensors(), since extra
+ // work is required to prepare the original execution plan.
+ // 3. This flag requires that the original execution plan only have ops with
+ // valid registrations (and not 'dummy' custom ops like with Flex).
+ // WARNING: This feature is experimental and subject to change.
+ kTfLiteDelegateFlagsRequirePropagatedShapes = 2
} TfLiteDelegateFlags;
// WARNING: This is an experimental interface that is subject to change.
@@ -731,8 +932,9 @@ typedef struct TfLiteDelegate {
struct TfLiteDelegate* delegate);
// Copy the data from delegate buffer handle into raw memory of the given
- // 'tensor'. This cannot be null. The delegate is allowed to allocate the raw
- // bytes as long as it follows the rules for kTfLiteDynamic tensors.
+ // 'tensor'. Note that the delegate is allowed to allocate the raw bytes as
+ // long as it follows the rules for kTfLiteDynamic tensors, in which case this
+ // cannot be null.
TfLiteStatus (*CopyFromBufferHandle)(TfLiteContext* context,
struct TfLiteDelegate* delegate,
TfLiteBufferHandle buffer_handle,
diff --git a/code/lib/tfmicro/tensorflow/lite/core/api/flatbuffer_conversions.cc b/code/lib/tfmicro/tensorflow/lite/core/api/flatbuffer_conversions.cc
index 2325513d..16118d41 100644
--- a/code/lib/tfmicro/tensorflow/lite/core/api/flatbuffer_conversions.cc
+++ b/code/lib/tfmicro/tensorflow/lite/core/api/flatbuffer_conversions.cc
@@ -15,10 +15,15 @@ limitations under the License.
#include "tensorflow/lite/core/api/flatbuffer_conversions.h"
-#include
+#include
+#include
+#include
+#include "flatbuffers/flatbuffers.h" // from @flatbuffers
#include "tensorflow/lite/c/builtin_op_data.h"
#include "tensorflow/lite/c/common.h"
+#include "tensorflow/lite/core/api/error_reporter.h"
+#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
@@ -57,6 +62,17 @@ class SafeBuiltinDataAllocator {
BuiltinDataAllocator* allocator_;
};
+// All the Parse functions take some pointers as params and this function has
+// the common DCHECKs to catch if any of those are nullptr.
+void CheckParsePointerParams(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator,
+ void** builtin_data) {
+ TFLITE_DCHECK(op != nullptr);
+ TFLITE_DCHECK(error_reporter != nullptr);
+ TFLITE_DCHECK(allocator != nullptr);
+ TFLITE_DCHECK(builtin_data != nullptr);
+}
+
// Copies the contents from the flatbuffer int vector `flatbuffer` into the
// int array `buffer`. `flat_vector` and `buffer` represent the same
// configuration operation for a given operation.
@@ -85,87 +101,41 @@ TfLiteStatus FlatBufferIntVectorToArray(
return kTfLiteOk;
}
-} // namespace
-
-TfLiteStatus ConvertTensorType(TensorType tensor_type, TfLiteType* type,
- ErrorReporter* error_reporter) {
- switch (tensor_type) {
- case TensorType_FLOAT16:
- *type = kTfLiteFloat16;
- return kTfLiteOk;
- case TensorType_FLOAT32:
- *type = kTfLiteFloat32;
- return kTfLiteOk;
- case TensorType_FLOAT64:
- *type = kTfLiteFloat64;
- return kTfLiteOk;
- case TensorType_INT16:
- *type = kTfLiteInt16;
- return kTfLiteOk;
- case TensorType_INT32:
- *type = kTfLiteInt32;
- return kTfLiteOk;
- case TensorType_UINT8:
- *type = kTfLiteUInt8;
- return kTfLiteOk;
- case TensorType_INT8:
- *type = kTfLiteInt8;
- return kTfLiteOk;
- case TensorType_INT64:
- *type = kTfLiteInt64;
- return kTfLiteOk;
- case TensorType_STRING:
- *type = kTfLiteString;
- return kTfLiteOk;
- case TensorType_BOOL:
- *type = kTfLiteBool;
- return kTfLiteOk;
- case TensorType_COMPLEX64:
- *type = kTfLiteComplex64;
- return kTfLiteOk;
- default:
- *type = kTfLiteNoType;
- TF_LITE_REPORT_ERROR(error_reporter,
- "Unsupported data type %d in tensor\n", tensor_type);
- return kTfLiteError;
+// Converts the flatbuffer activation to what is used at runtime.
+TfLiteFusedActivation ConvertActivation(ActivationFunctionType activation) {
+ switch (activation) {
+ case ActivationFunctionType_NONE:
+ return kTfLiteActNone;
+ case ActivationFunctionType_RELU:
+ return kTfLiteActRelu;
+ case ActivationFunctionType_RELU_N1_TO_1:
+ return kTfLiteActReluN1To1;
+ case ActivationFunctionType_RELU6:
+ return kTfLiteActRelu6;
+ case ActivationFunctionType_TANH:
+ return kTfLiteActTanh;
+ case ActivationFunctionType_SIGN_BIT:
+ return kTfLiteActSignBit;
}
+ return kTfLiteActNone;
}
-// Parse the appropriate data out of the op.
-//
-// This handles builtin data explicitly as there are flatbuffer schemas.
-// If it returns kTfLiteOk, it passes the data out with `builtin_data`, which
-// need to be released by calling `free`.`
-// If it returns kTfLiteError, `builtin_data` will be `nullptr`.
-TfLiteStatus ParseOpData(const Operator* op, BuiltinOperator op_type,
- ErrorReporter* error_reporter,
- BuiltinDataAllocator* allocator, void** builtin_data) {
- auto parse_padding = [](Padding padding) {
- switch (padding) {
- case Padding_SAME:
- return kTfLitePaddingSame;
- case Padding_VALID:
- return kTfLitePaddingValid;
- }
- return kTfLitePaddingUnknown;
- };
- auto parse_activation = [](ActivationFunctionType activation) {
- switch (activation) {
- case ActivationFunctionType_NONE:
- return kTfLiteActNone;
- case ActivationFunctionType_RELU:
- return kTfLiteActRelu;
- case ActivationFunctionType_RELU_N1_TO_1:
- return kTfLiteActRelu1;
- case ActivationFunctionType_RELU6:
- return kTfLiteActRelu6;
- case ActivationFunctionType_TANH:
- return kTfLiteActTanh;
- case ActivationFunctionType_SIGN_BIT:
- return kTfLiteActSignBit;
- }
- return kTfLiteActNone;
- };
+// Converts the flatbuffer padding enum to what is used at runtime.
+TfLitePadding ConvertPadding(Padding padding) {
+ switch (padding) {
+ case Padding_SAME:
+ return kTfLitePaddingSame;
+ case Padding_VALID:
+ return kTfLitePaddingValid;
+ }
+ return kTfLitePaddingUnknown;
+}
+
+#ifndef TF_LITE_STATIC_MEMORY
+TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type,
+ ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator,
+ void** builtin_data) {
auto parseLSHProjectionType = [](LSHProjectionType type) {
switch (type) {
case LSHProjectionType_SPARSE:
@@ -191,22 +161,247 @@ TfLiteStatus ParseOpData(const Operator* op, BuiltinOperator op_type,
SafeBuiltinDataAllocator safe_allocator(allocator);
*builtin_data = nullptr;
switch (op_type) {
- case BuiltinOperator_CONV_2D: {
- auto params = safe_allocator.Allocate();
- TF_LITE_ENSURE(error_reporter, params != nullptr);
- if (auto* conv_params = op->builtin_options_as_Conv2DOptions()) {
- params->padding = parse_padding(conv_params->padding());
- params->stride_width = conv_params->stride_w();
- params->stride_height = conv_params->stride_h();
- params->activation =
- parse_activation(conv_params->fused_activation_function());
-
- params->dilation_width_factor = conv_params->dilation_w_factor();
- params->dilation_height_factor = conv_params->dilation_h_factor();
- }
- *builtin_data = params.release();
- return kTfLiteOk;
+ case BuiltinOperator_ABS: {
+ return ParseAbs(op, error_reporter, allocator, builtin_data);
}
+
+ case BuiltinOperator_ADD: {
+ return ParseAdd(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_ARG_MAX: {
+ return ParseArgMax(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_ARG_MIN: {
+ return ParseArgMin(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_AVERAGE_POOL_2D: {
+ return ParsePool(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_CEIL: {
+ return ParseCeil(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_CONCATENATION: {
+ return ParseConcatenation(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_CONV_2D: {
+ return ParseConv2D(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_DEPTHWISE_CONV_2D: {
+ return ParseDepthwiseConv2D(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_DEQUANTIZE: {
+ return ParseDequantize(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_FLOOR: {
+ return ParseFloor(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_FULLY_CONNECTED: {
+ return ParseFullyConnected(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_GREATER: {
+ return ParseGreater(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_GREATER_EQUAL: {
+ return ParseGreaterEqual(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_HARD_SWISH: {
+ return ParseHardSwish(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_L2_NORMALIZATION: {
+ return ParseL2Normalization(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_L2_POOL_2D: {
+ return ParsePool(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_LESS: {
+ return ParseLess(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_LESS_EQUAL: {
+ return ParseLessEqual(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_LOG: {
+ return ParseLog(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_LOGICAL_AND: {
+ return ParseLogicalAnd(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_LOGICAL_NOT: {
+ return ParseLogicalNot(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_LOGICAL_OR: {
+ return ParseLogicalOr(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_LOGISTIC: {
+ return ParseLogistic(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_MAXIMUM: {
+ return ParseMaximum(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_MAX_POOL_2D: {
+ return ParsePool(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_MEAN: {
+ return ParseReducer(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_MINIMUM: {
+ return ParseMinimum(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_MUL: {
+ return ParseMul(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_NEG: {
+ return ParseNeg(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_NOT_EQUAL: {
+ return ParseNotEqual(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_PACK: {
+ return ParsePack(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_PAD: {
+ return ParsePad(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_PADV2: {
+ return ParsePadV2(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_PRELU: {
+ return ParsePrelu(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_QUANTIZE: {
+ return ParseQuantize(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_REDUCE_ANY: {
+ return ParseReducer(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_REDUCE_MAX: {
+ return ParseReducer(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_REDUCE_MIN: {
+ return ParseReducer(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_REDUCE_PROD: {
+ return ParseReducer(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_RELU: {
+ return ParseRelu(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_RELU6: {
+ return ParseRelu6(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_RESHAPE: {
+ return ParseReshape(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_RESIZE_BILINEAR: {
+ return ParseResizeBilinear(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_RESIZE_NEAREST_NEIGHBOR: {
+ return ParseResizeNearestNeighbor(op, error_reporter, allocator,
+ builtin_data);
+ }
+
+ case BuiltinOperator_ROUND: {
+ return ParseRound(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_RSQRT: {
+ return ParseRsqrt(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_SHAPE: {
+ return ParseShape(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_SIN: {
+ return ParseSin(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_SOFTMAX: {
+ return ParseSoftmax(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_SPLIT: {
+ return ParseSplit(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_SPLIT_V: {
+ return ParseSplitV(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_SQRT: {
+ return ParseSqrt(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_SQUARE: {
+ return ParseSquare(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_STRIDED_SLICE: {
+ return ParseStridedSlice(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_SUB: {
+ return ParseSub(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_SUM: {
+ return ParseReducer(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_SVDF: {
+ return ParseSvdf(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_TANH: {
+ return ParseTanh(op, error_reporter, allocator, builtin_data);
+ }
+
+ case BuiltinOperator_UNPACK: {
+ return ParseUnpack(op, error_reporter, allocator, builtin_data);
+ }
+
case BuiltinOperator_CAST: {
auto params = safe_allocator.Allocate();
TF_LITE_ENSURE(error_reporter, params != nullptr);
@@ -231,61 +426,13 @@ TfLiteStatus ParseOpData(const Operator* op, BuiltinOperator op_type,
*builtin_data = params.release();
return kTfLiteOk;
}
- case BuiltinOperator_AVERAGE_POOL_2D:
- case BuiltinOperator_MAX_POOL_2D:
- case BuiltinOperator_L2_POOL_2D: {
- auto params = safe_allocator.Allocate();
- TF_LITE_ENSURE(error_reporter, params != nullptr);
- if (const auto* pool_params = op->builtin_options_as_Pool2DOptions()) {
- params->padding = parse_padding(pool_params->padding());
- params->stride_width = pool_params->stride_w();
- params->stride_height = pool_params->stride_h();
- params->filter_width = pool_params->filter_width();
- params->filter_height = pool_params->filter_height();
- params->activation =
- parse_activation(pool_params->fused_activation_function());
- }
- *builtin_data = params.release();
- return kTfLiteOk;
- }
- case BuiltinOperator_DEPTHWISE_CONV_2D: {
- auto params = safe_allocator.Allocate();
- TF_LITE_ENSURE(error_reporter, params != nullptr);
- if (const auto* conv_params =
- op->builtin_options_as_DepthwiseConv2DOptions()) {
- params->padding = parse_padding(conv_params->padding());
- params->stride_width = conv_params->stride_w();
- params->stride_height = conv_params->stride_h();
- params->depth_multiplier = conv_params->depth_multiplier();
- params->activation =
- parse_activation(conv_params->fused_activation_function());
-
- params->dilation_width_factor = conv_params->dilation_w_factor();
- params->dilation_height_factor = conv_params->dilation_h_factor();
- }
- *builtin_data = params.release();
- return kTfLiteOk;
- }
- case BuiltinOperator_SVDF: {
- auto params = safe_allocator.Allocate();
- TF_LITE_ENSURE(error_reporter, params != nullptr);
- if (const auto* svdf_params = op->builtin_options_as_SVDFOptions()) {
- params->rank = svdf_params->rank();
- params->activation =
- parse_activation(svdf_params->fused_activation_function());
- params->asymmetric_quantize_inputs =
- svdf_params->asymmetric_quantize_inputs();
- }
- *builtin_data = params.release();
- return kTfLiteOk;
- }
case BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_RNN: {
auto params = safe_allocator.Allocate();
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* sequence_rnn_params =
op->builtin_options_as_SequenceRNNOptions()) {
params->activation =
- parse_activation(sequence_rnn_params->fused_activation_function());
+ ConvertActivation(sequence_rnn_params->fused_activation_function());
params->time_major = sequence_rnn_params->time_major();
params->asymmetric_quantize_inputs =
sequence_rnn_params->asymmetric_quantize_inputs();
@@ -299,7 +446,7 @@ TfLiteStatus ParseOpData(const Operator* op, BuiltinOperator op_type,
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* bidi_sequence_rnn_params =
op->builtin_options_as_BidirectionalSequenceRNNOptions()) {
- params->activation = parse_activation(
+ params->activation = ConvertActivation(
bidi_sequence_rnn_params->fused_activation_function());
params->time_major = bidi_sequence_rnn_params->time_major();
params->merge_outputs = bidi_sequence_rnn_params->merge_outputs();
@@ -314,7 +461,7 @@ TfLiteStatus ParseOpData(const Operator* op, BuiltinOperator op_type,
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* rnn_params = op->builtin_options_as_RNNOptions()) {
params->activation =
- parse_activation(rnn_params->fused_activation_function());
+ ConvertActivation(rnn_params->fused_activation_function());
params->asymmetric_quantize_inputs =
rnn_params->asymmetric_quantize_inputs();
}
@@ -332,104 +479,16 @@ TfLiteStatus ParseOpData(const Operator* op, BuiltinOperator op_type,
*builtin_data = params.release();
return kTfLiteOk;
}
- case BuiltinOperator_FULLY_CONNECTED: {
- auto params = safe_allocator.Allocate();
- TF_LITE_ENSURE(error_reporter, params != nullptr);
- if (const auto* fully_connected_params =
- op->builtin_options_as_FullyConnectedOptions()) {
- params->activation = parse_activation(
- fully_connected_params->fused_activation_function());
- params->keep_num_dims = fully_connected_params->keep_num_dims();
- params->asymmetric_quantize_inputs =
- fully_connected_params->asymmetric_quantize_inputs();
- switch (fully_connected_params->weights_format()) {
- case FullyConnectedOptionsWeightsFormat_DEFAULT:
- params->weights_format = kTfLiteFullyConnectedWeightsFormatDefault;
- break;
- case FullyConnectedOptionsWeightsFormat_SHUFFLED4x16INT8:
- params->weights_format =
- kTfLiteFullyConnectedWeightsFormatShuffled4x16Int8;
- break;
- default:
- TF_LITE_REPORT_ERROR(error_reporter,
- "Unhandled fully-connected weights format.");
- return kTfLiteError;
- }
- }
- *builtin_data = params.release();
- return kTfLiteOk;
- }
+
case BuiltinOperator_HASHTABLE_LOOKUP:
// no-op.
return kTfLiteOk;
- case BuiltinOperator_SOFTMAX: {
- auto params = safe_allocator.Allocate();
- TF_LITE_ENSURE(error_reporter, params != nullptr);
- if (const auto* softmax_params =
- op->builtin_options_as_SoftmaxOptions()) {
- params->beta = softmax_params->beta();
- }
- *builtin_data = params.release();
- return kTfLiteOk;
- }
- case BuiltinOperator_CONCATENATION: {
- auto params = safe_allocator.Allocate();
- TF_LITE_ENSURE(error_reporter, params != nullptr);
- if (const auto* concatenation_params =
- op->builtin_options_as_ConcatenationOptions()) {
- params->activation =
- parse_activation(concatenation_params->fused_activation_function());
- params->axis = concatenation_params->axis();
- }
- *builtin_data = params.release();
- return kTfLiteOk;
- }
- case BuiltinOperator_MUL: {
- auto params = safe_allocator.Allocate();
- TF_LITE_ENSURE(error_reporter, params != nullptr);
- if (const auto* schema_params = op->builtin_options_as_MulOptions()) {
- params->activation =
- parse_activation(schema_params->fused_activation_function());
- }
- *builtin_data = params.release();
- return kTfLiteOk;
- }
- case BuiltinOperator_ADD: {
- auto params = safe_allocator.Allocate();
- TF_LITE_ENSURE(error_reporter, params != nullptr);
- if (const auto* schema_params = op->builtin_options_as_AddOptions()) {
- params->activation =
- parse_activation(schema_params->fused_activation_function());
- }
- *builtin_data = params.release();
- return kTfLiteOk;
- }
case BuiltinOperator_DIV: {
auto params = safe_allocator.Allocate();
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* schema_params = op->builtin_options_as_DivOptions()) {
params->activation =
- parse_activation(schema_params->fused_activation_function());
- }
- *builtin_data = params.release();
- return kTfLiteOk;
- }
- case BuiltinOperator_SUB: {
- auto params = safe_allocator.Allocate();
- TF_LITE_ENSURE(error_reporter, params != nullptr);
- if (const auto* schema_params = op->builtin_options_as_SubOptions()) {
- params->activation =
- parse_activation(schema_params->fused_activation_function());
- }
- *builtin_data = params.release();
- return kTfLiteOk;
- }
- case BuiltinOperator_L2_NORMALIZATION: {
- auto params = safe_allocator.Allocate();
- TF_LITE_ENSURE(error_reporter, params != nullptr);
- if (const auto* schema_params = op->builtin_options_as_L2NormOptions()) {
- params->activation =
- parse_activation(schema_params->fused_activation_function());
+ ConvertActivation(schema_params->fused_activation_function());
}
*builtin_data = params.release();
return kTfLiteOk;
@@ -452,7 +511,7 @@ TfLiteStatus ParseOpData(const Operator* op, BuiltinOperator op_type,
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* lstm_params = op->builtin_options_as_LSTMOptions()) {
params->activation =
- parse_activation(lstm_params->fused_activation_function());
+ ConvertActivation(lstm_params->fused_activation_function());
params->cell_clip = lstm_params->cell_clip();
params->proj_clip = lstm_params->proj_clip();
switch (lstm_params->kernel_type()) {
@@ -485,7 +544,7 @@ TfLiteStatus ParseOpData(const Operator* op, BuiltinOperator op_type,
if (const auto* seq_lstm_params =
op->builtin_options_as_UnidirectionalSequenceLSTMOptions()) {
params->activation =
- parse_activation(seq_lstm_params->fused_activation_function());
+ ConvertActivation(seq_lstm_params->fused_activation_function());
params->cell_clip = seq_lstm_params->cell_clip();
params->proj_clip = seq_lstm_params->proj_clip();
params->time_major = seq_lstm_params->time_major();
@@ -502,7 +561,7 @@ TfLiteStatus ParseOpData(const Operator* op, BuiltinOperator op_type,
if (const auto* bidi_lstm_params =
op->builtin_options_as_BidirectionalSequenceLSTMOptions()) {
params->activation =
- parse_activation(bidi_lstm_params->fused_activation_function());
+ ConvertActivation(bidi_lstm_params->fused_activation_function());
params->cell_clip = bidi_lstm_params->cell_clip();
params->proj_clip = bidi_lstm_params->proj_clip();
params->merge_outputs = bidi_lstm_params->merge_outputs();
@@ -513,51 +572,6 @@ TfLiteStatus ParseOpData(const Operator* op, BuiltinOperator op_type,
*builtin_data = params.release();
return kTfLiteOk;
}
- case BuiltinOperator_RESIZE_BILINEAR: {
- auto params = safe_allocator.Allocate();
- TF_LITE_ENSURE(error_reporter, params != nullptr);
- if (const auto* schema_params =
- op->builtin_options_as_ResizeBilinearOptions()) {
- params->align_corners = schema_params->align_corners();
- params->half_pixel_centers = schema_params->half_pixel_centers();
- } else {
- // Some older models did not populate the ResizeBilinearOptions field in
- // the flatbuffer, so ensure it's set to a sensible default.
- params->align_corners = false;
- params->half_pixel_centers = false;
- }
- *builtin_data = params.release();
- return kTfLiteOk;
- }
- case BuiltinOperator_RESIZE_NEAREST_NEIGHBOR: {
- auto params =
- safe_allocator.Allocate();
- TF_LITE_ENSURE(error_reporter, params != nullptr);
- if (const auto* schema_params =
- op->builtin_options_as_ResizeNearestNeighborOptions()) {
- params->align_corners = schema_params->align_corners();
- }
- *builtin_data = params.release();
- return kTfLiteOk;
- }
- case BuiltinOperator_RESHAPE: {
- auto params = safe_allocator.Allocate();
- TF_LITE_ENSURE(error_reporter, params != nullptr);
- if (const auto* schema_params = op->builtin_options_as_ReshapeOptions()) {
- auto* new_shape = schema_params->new_shape();
- // TODO(b/147203660): We need to figure out when dynamic reshape
- // (new_shape is a tensor) happens, why the option is not a nullptr.
- // But nonethless, we should only copy when new_shape is not a nullptr.
- if (new_shape) {
- TF_LITE_ENSURE_STATUS(FlatBufferIntVectorToArray(
- sizeof(params->shape), new_shape, params->shape, error_reporter,
- "reshape"));
- params->num_dimensions = new_shape->size();
- }
- }
- *builtin_data = params.release();
- return kTfLiteOk;
- }
case BuiltinOperator_SKIP_GRAM: {
auto params = safe_allocator.Allocate();
TF_LITE_ENSURE(error_reporter, params != nullptr);
@@ -601,83 +615,20 @@ TfLiteStatus ParseOpData(const Operator* op, BuiltinOperator op_type,
*builtin_data = params.release();
return kTfLiteOk;
}
- case BuiltinOperator_MEAN:
- case BuiltinOperator_REDUCE_MAX:
- case BuiltinOperator_REDUCE_MIN:
- case BuiltinOperator_REDUCE_PROD:
- case BuiltinOperator_REDUCE_ANY:
- case BuiltinOperator_SUM: {
- auto params = safe_allocator.Allocate();
- TF_LITE_ENSURE(error_reporter, params != nullptr);
- if (const auto* schema_params = op->builtin_options_as_ReducerOptions()) {
- params->keep_dims = schema_params->keep_dims();
- }
- *builtin_data = params.release();
- return kTfLiteOk;
- }
- case BuiltinOperator_SPLIT: {
- auto params = safe_allocator.Allocate();
- TF_LITE_ENSURE(error_reporter, params != nullptr);
- if (const auto* schema_params = op->builtin_options_as_SplitOptions()) {
- params->num_splits = schema_params->num_splits();
- }
- *builtin_data = params.release();
- return kTfLiteOk;
- }
- case BuiltinOperator_SPLIT_V: {
- auto params = safe_allocator.Allocate();
- TF_LITE_ENSURE(error_reporter, params != nullptr);
- if (const auto* schema_params = op->builtin_options_as_SplitVOptions()) {
- params->num_splits = schema_params->num_splits();
- }
- *builtin_data = params.release();
- return kTfLiteOk;
- }
+
case BuiltinOperator_SQUEEZE: {
auto params = safe_allocator.Allocate();
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* schema_params = op->builtin_options_as_SqueezeOptions()) {
const auto* squeeze_dims = schema_params->squeeze_dims();
- TF_LITE_ENSURE_STATUS(FlatBufferIntVectorToArray(
- sizeof(params->squeeze_dims), squeeze_dims, params->squeeze_dims,
- error_reporter, "squeeze"));
- params->num_squeeze_dims = squeeze_dims->size();
- }
- *builtin_data = params.release();
- return kTfLiteOk;
- }
- case BuiltinOperator_STRIDED_SLICE: {
- auto params = safe_allocator.Allocate();
- TF_LITE_ENSURE(error_reporter, params != nullptr);
- if (const auto* schema_params =
- op->builtin_options_as_StridedSliceOptions()) {
- params->begin_mask = schema_params->begin_mask();
- params->end_mask = schema_params->end_mask();
- params->ellipsis_mask = schema_params->ellipsis_mask();
- params->new_axis_mask = schema_params->new_axis_mask();
- params->shrink_axis_mask = schema_params->shrink_axis_mask();
- }
- *builtin_data = params.release();
- return kTfLiteOk;
- }
- case BuiltinOperator_ARG_MAX: {
- auto params = safe_allocator.Allocate();
- TF_LITE_ENSURE(error_reporter, params != nullptr);
- if (const auto* schema_params = op->builtin_options_as_ArgMaxOptions()) {
- TF_LITE_ENSURE_STATUS(ConvertTensorType(schema_params->output_type(),
- ¶ms->output_type,
- error_reporter));
- }
- *builtin_data = params.release();
- return kTfLiteOk;
- }
- case BuiltinOperator_ARG_MIN: {
- auto params = safe_allocator.Allocate();
- TF_LITE_ENSURE(error_reporter, params != nullptr);
- if (const auto* schema_params = op->builtin_options_as_ArgMinOptions()) {
- TF_LITE_ENSURE_STATUS(ConvertTensorType(schema_params->output_type(),
- ¶ms->output_type,
- error_reporter));
+ if (squeeze_dims != nullptr) {
+ TF_LITE_ENSURE_STATUS(FlatBufferIntVectorToArray(
+ sizeof(params->squeeze_dims), squeeze_dims, params->squeeze_dims,
+ error_reporter, "squeeze"));
+ params->num_squeeze_dims = squeeze_dims->size();
+ } else {
+ params->num_squeeze_dims = 0;
+ }
}
*builtin_data = params.release();
return kTfLiteOk;
@@ -687,7 +638,7 @@ TfLiteStatus ParseOpData(const Operator* op, BuiltinOperator op_type,
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* transpose_conv_params =
op->builtin_options_as_TransposeConvOptions()) {
- params->padding = parse_padding(transpose_conv_params->padding());
+ params->padding = ConvertPadding(transpose_conv_params->padding());
params->stride_width = transpose_conv_params->stride_w();
params->stride_height = transpose_conv_params->stride_h();
}
@@ -704,26 +655,6 @@ TfLiteStatus ParseOpData(const Operator* op, BuiltinOperator op_type,
*builtin_data = params.release();
return kTfLiteOk;
}
- case BuiltinOperator_SHAPE: {
- auto params = safe_allocator.Allocate();
- TF_LITE_ENSURE(error_reporter, params != nullptr);
- if (const auto* schema_params = op->builtin_options_as_ShapeOptions()) {
- TF_LITE_ENSURE_STATUS(ConvertTensorType(
- schema_params->out_type(), ¶ms->out_type, error_reporter));
- }
- *builtin_data = params.release();
- return kTfLiteOk;
- }
- case BuiltinOperator_PACK: {
- auto params = safe_allocator.Allocate();
- TF_LITE_ENSURE(error_reporter, params != nullptr);
- if (const auto* pack_params = op->builtin_options_as_PackOptions()) {
- params->values_count = pack_params->values_count();
- params->axis = pack_params->axis();
- }
- *builtin_data = params.release();
- return kTfLiteOk;
- }
case BuiltinOperator_DELEGATE: {
// TODO(ycling): Revisit when supporting saving delegated models.
TF_LITE_REPORT_ERROR(error_reporter,
@@ -752,16 +683,6 @@ TfLiteStatus ParseOpData(const Operator* op, BuiltinOperator op_type,
*builtin_data = params.release();
return kTfLiteOk;
}
- case BuiltinOperator_UNPACK: {
- auto params = safe_allocator.Allocate();
- TF_LITE_ENSURE(error_reporter, params != nullptr);
- if (const auto* unpack_params = op->builtin_options_as_UnpackOptions()) {
- params->num = unpack_params->num();
- params->axis = unpack_params->axis();
- }
- *builtin_data = params.release();
- return kTfLiteOk;
- }
case BuiltinOperator_LEAKY_RELU: {
auto params = safe_allocator.Allocate();
TF_LITE_ENSURE(error_reporter, params != nullptr);
@@ -840,8 +761,27 @@ TfLiteStatus ParseOpData(const Operator* op, BuiltinOperator op_type,
*builtin_data = params.release();
return kTfLiteOk;
}
+ case BuiltinOperator_CALL_ONCE: {
+ auto params = safe_allocator.Allocate();
+ TF_LITE_ENSURE(error_reporter, params != nullptr);
+ if (const auto* call_once_params =
+ op->builtin_options_as_CallOnceOptions()) {
+ params->init_subgraph_index = call_once_params->init_subgraph_index();
+ }
+ *builtin_data = params.release();
+ return kTfLiteOk;
+ }
+ case BuiltinOperator_CUMSUM: {
+ auto params = safe_allocator.Allocate();
+ TF_LITE_ENSURE(error_reporter, params != nullptr);
+ if (const auto* cumsum_params = op->builtin_options_as_CumsumOptions()) {
+ params->exclusive = cumsum_params->exclusive();
+ params->reverse = cumsum_params->reverse();
+ }
+ *builtin_data = params.release();
+ return kTfLiteOk;
+ }
// Below are the ops with no builtin_data structure.
- case BuiltinOperator_ABS:
case BuiltinOperator_BATCH_TO_SPACE_ND:
// TODO(aselle): Implement call in BuiltinOptions, but nullptrs are
// ok for now, since there is no call implementation either.
@@ -849,52 +789,24 @@ TfLiteStatus ParseOpData(const Operator* op, BuiltinOperator op_type,
case BuiltinOperator_CONCAT_EMBEDDINGS:
case BuiltinOperator_COS:
case BuiltinOperator_CUSTOM:
- case BuiltinOperator_DEQUANTIZE:
case BuiltinOperator_ELU:
case BuiltinOperator_EMBEDDING_LOOKUP:
case BuiltinOperator_EQUAL:
case BuiltinOperator_EXP:
case BuiltinOperator_EXPAND_DIMS:
- case BuiltinOperator_CEIL:
- case BuiltinOperator_FLOOR:
- case BuiltinOperator_GREATER:
- case BuiltinOperator_GREATER_EQUAL:
- case BuiltinOperator_HARD_SWISH:
- case BuiltinOperator_LESS:
- case BuiltinOperator_LESS_EQUAL:
- case BuiltinOperator_LOG:
- case BuiltinOperator_LOGISTIC:
case BuiltinOperator_LOG_SOFTMAX:
case BuiltinOperator_MATRIX_DIAG:
case BuiltinOperator_MATRIX_SET_DIAG:
- case BuiltinOperator_MAXIMUM:
- case BuiltinOperator_MINIMUM:
- case BuiltinOperator_NEG:
- case BuiltinOperator_NOT_EQUAL:
- case BuiltinOperator_PAD:
- case BuiltinOperator_PADV2:
- case BuiltinOperator_PRELU:
- case BuiltinOperator_RELU:
- case BuiltinOperator_RELU6:
case BuiltinOperator_RELU_N1_TO_1:
- case BuiltinOperator_ROUND:
- case BuiltinOperator_RSQRT:
case BuiltinOperator_SELECT:
case BuiltinOperator_SELECT_V2:
- case BuiltinOperator_SIN:
case BuiltinOperator_SLICE:
case BuiltinOperator_SPACE_TO_BATCH_ND:
- case BuiltinOperator_SQRT:
- case BuiltinOperator_TANH:
case BuiltinOperator_TILE:
case BuiltinOperator_TOPK_V2:
case BuiltinOperator_TRANSPOSE:
case BuiltinOperator_POW:
- case BuiltinOperator_LOGICAL_OR:
- case BuiltinOperator_LOGICAL_AND:
- case BuiltinOperator_LOGICAL_NOT:
case BuiltinOperator_FLOOR_DIV:
- case BuiltinOperator_SQUARE:
case BuiltinOperator_ZEROS_LIKE:
case BuiltinOperator_FILL:
case BuiltinOperator_FLOOR_MOD:
@@ -905,15 +817,999 @@ TfLiteStatus ParseOpData(const Operator* op, BuiltinOperator op_type,
case BuiltinOperator_GATHER_ND:
case BuiltinOperator_WHERE:
case BuiltinOperator_RANK:
- case BuiltinOperator_QUANTIZE:
case BuiltinOperator_NON_MAX_SUPPRESSION_V4:
case BuiltinOperator_NON_MAX_SUPPRESSION_V5:
case BuiltinOperator_SCATTER_ND:
case BuiltinOperator_DENSIFY:
case BuiltinOperator_SEGMENT_SUM:
+ case BuiltinOperator_BROADCAST_TO:
return kTfLiteOk;
+ case BuiltinOperator_PLACEHOLDER_FOR_GREATER_OP_CODES:
+ return kTfLiteError;
}
return kTfLiteError;
} // NOLINT[readability/fn_size]
+#endif // !defined(TF_LITE_STATIC_MEMORY)
+} // namespace
+
+TfLiteStatus ConvertTensorType(TensorType tensor_type, TfLiteType* type,
+ ErrorReporter* error_reporter) {
+ switch (tensor_type) {
+ case TensorType_FLOAT16:
+ *type = kTfLiteFloat16;
+ return kTfLiteOk;
+ case TensorType_FLOAT32:
+ *type = kTfLiteFloat32;
+ return kTfLiteOk;
+ case TensorType_FLOAT64:
+ *type = kTfLiteFloat64;
+ return kTfLiteOk;
+ case TensorType_INT16:
+ *type = kTfLiteInt16;
+ return kTfLiteOk;
+ case TensorType_INT32:
+ *type = kTfLiteInt32;
+ return kTfLiteOk;
+ case TensorType_UINT8:
+ *type = kTfLiteUInt8;
+ return kTfLiteOk;
+ case TensorType_INT8:
+ *type = kTfLiteInt8;
+ return kTfLiteOk;
+ case TensorType_INT64:
+ *type = kTfLiteInt64;
+ return kTfLiteOk;
+ case TensorType_STRING:
+ *type = kTfLiteString;
+ return kTfLiteOk;
+ case TensorType_BOOL:
+ *type = kTfLiteBool;
+ return kTfLiteOk;
+ case TensorType_COMPLEX64:
+ *type = kTfLiteComplex64;
+ return kTfLiteOk;
+ case TensorType_COMPLEX128:
+ *type = kTfLiteComplex128;
+ return kTfLiteOk;
+ default:
+ *type = kTfLiteNoType;
+ TF_LITE_REPORT_ERROR(error_reporter,
+ "Unsupported data type %d in tensor\n", tensor_type);
+ return kTfLiteError;
+ }
+}
+
+// We have this parse function instead of directly returning kTfLiteOk from the
+// switch-case in ParseOpData because this function is used as part of the
+// selective registration for the OpResolver implementation in micro.
+TfLiteStatus ParseAbs(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
+ void**) {
+ return kTfLiteOk;
+}
+
+TfLiteStatus ParseAdd(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator, void** builtin_data) {
+ CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
+
+ SafeBuiltinDataAllocator safe_allocator(allocator);
+ std::unique_ptr
+ params = safe_allocator.Allocate();
+ TF_LITE_ENSURE(error_reporter, params != nullptr);
+
+ const AddOptions* schema_params = op->builtin_options_as_AddOptions();
+
+ if (schema_params != nullptr) {
+ params->activation =
+ ConvertActivation(schema_params->fused_activation_function());
+ params->pot_scale_int16 = schema_params->pot_scale_int16();
+ } else {
+ // TODO(b/157480169): We should either return kTfLiteError or fill in some
+ // reasonable defaults in the params struct. We are not doing so until we
+ // better undertand the ramifications of changing the legacy behavior.
+ }
+
+ *builtin_data = params.release();
+ return kTfLiteOk;
+}
+
+TfLiteStatus ParseArgMax(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator, void** builtin_data) {
+ CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
+
+ SafeBuiltinDataAllocator safe_allocator(allocator);
+ std::unique_ptr
+ params = safe_allocator.Allocate();
+ TF_LITE_ENSURE(error_reporter, params != nullptr);
+
+ const ArgMaxOptions* schema_params = op->builtin_options_as_ArgMaxOptions();
+
+ if (schema_params != nullptr) {
+ TF_LITE_ENSURE_STATUS(ConvertTensorType(
+ schema_params->output_type(), ¶ms->output_type, error_reporter));
+ } else {
+ // TODO(b/157480169): We should either return kTfLiteError or fill in some
+ // reasonable defaults in the params struct. We are not doing so until we
+ // better undertand the ramifications of changing the legacy behavior.
+ }
+
+ *builtin_data = params.release();
+ return kTfLiteOk;
+}
+
+TfLiteStatus ParseArgMin(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator, void** builtin_data) {
+ CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
+
+ SafeBuiltinDataAllocator safe_allocator(allocator);
+ std::unique_ptr
+ params = safe_allocator.Allocate();
+ TF_LITE_ENSURE(error_reporter, params != nullptr);
+
+ const ArgMinOptions* schema_params = op->builtin_options_as_ArgMinOptions();
+
+ if (schema_params != nullptr) {
+ TF_LITE_ENSURE_STATUS(ConvertTensorType(
+ schema_params->output_type(), ¶ms->output_type, error_reporter));
+ } else {
+ // TODO(b/157480169): We should either return kTfLiteError or fill in some
+ // reasonable defaults in the params struct. We are not doing so until we
+ // better undertand the ramifications of changing the legacy behavior.
+ }
+
+ *builtin_data = params.release();
+ return kTfLiteOk;
+}
+
+// We have this parse function instead of directly returning kTfLiteOk from the
+// switch-case in ParseOpData because this function is used as part of the
+// selective registration for the OpResolver implementation in micro.
+TfLiteStatus ParseCeil(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
+ void**) {
+ return kTfLiteOk;
+}
+
+TfLiteStatus ParseConcatenation(const Operator* op,
+ ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator,
+ void** builtin_data) {
+ CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
+
+ SafeBuiltinDataAllocator safe_allocator(allocator);
+ std::unique_ptr
+ params = safe_allocator.Allocate();
+ TF_LITE_ENSURE(error_reporter, params != nullptr);
+
+ const ConcatenationOptions* schema_params =
+ op->builtin_options_as_ConcatenationOptions();
+
+ if (schema_params != nullptr) {
+ params->activation =
+ ConvertActivation(schema_params->fused_activation_function());
+ params->axis = schema_params->axis();
+ } else {
+ // TODO(b/157480169): We should either return kTfLiteError or fill in some
+ // reasonable defaults in the params struct. We are not doing so until we
+ // better undertand the ramifications of changing the legacy behavior.
+ }
+
+ *builtin_data = params.release();
+ return kTfLiteOk;
+}
+
+TfLiteStatus ParseConv2D(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator, void** builtin_data) {
+ CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
+
+ SafeBuiltinDataAllocator safe_allocator(allocator);
+ std::unique_ptr
+ params = safe_allocator.Allocate();
+ TF_LITE_ENSURE(error_reporter, params != nullptr);
+
+ const Conv2DOptions* schema_params = op->builtin_options_as_Conv2DOptions();
+
+ if (schema_params != nullptr) {
+ params->padding = ConvertPadding(schema_params->padding());
+ params->stride_width = schema_params->stride_w();
+ params->stride_height = schema_params->stride_h();
+ params->activation =
+ ConvertActivation(schema_params->fused_activation_function());
+
+ params->dilation_width_factor = schema_params->dilation_w_factor();
+ params->dilation_height_factor = schema_params->dilation_h_factor();
+ } else {
+ // TODO(b/157480169): We should either return kTfLiteError or fill in some
+ // reasonable defaults in the params struct. We are not doing so until we
+ // better undertand the ramifications of changing the legacy behavior.
+ }
+
+ *builtin_data = params.release();
+ return kTfLiteOk;
+}
+
+// We have this parse function instead of directly returning kTfLiteOk from the
+// switch-case in ParseOpData because this function is used as part of the
+// selective registration for the OpResolver implementation in micro.
+TfLiteStatus ParseCos(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
+ void**) {
+ return kTfLiteOk;
+}
+
+TfLiteStatus ParseDepthwiseConv2D(const Operator* op,
+ ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator,
+ void** builtin_data) {
+ CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
+
+ SafeBuiltinDataAllocator safe_allocator(allocator);
+
+ std::unique_ptr
+ params = safe_allocator.Allocate();
+ TF_LITE_ENSURE(error_reporter, params != nullptr);
+
+ const DepthwiseConv2DOptions* schema_params =
+ op->builtin_options_as_DepthwiseConv2DOptions();
+
+ if (schema_params != nullptr) {
+ params->padding = ConvertPadding(schema_params->padding());
+ params->stride_width = schema_params->stride_w();
+ params->stride_height = schema_params->stride_h();
+ params->depth_multiplier = schema_params->depth_multiplier();
+ params->activation =
+ ConvertActivation(schema_params->fused_activation_function());
+
+ params->dilation_width_factor = schema_params->dilation_w_factor();
+ params->dilation_height_factor = schema_params->dilation_h_factor();
+ } else {
+ // TODO(b/157480169): We should either return kTfLiteError or fill in some
+ // reasonable defaults in the params struct. We are not doing so until we
+ // better undertand the ramifications of changing the legacy behavior.
+ }
+
+ *builtin_data = params.release();
+ return kTfLiteOk;
+}
+
+// We have this parse function instead of directly returning kTfLiteOk from the
+// switch-case in ParseOpData because this function is used as part of the
+// selective registration for the OpResolver implementation in micro.
+TfLiteStatus ParseDequantize(const Operator*, ErrorReporter*,
+ BuiltinDataAllocator*, void**) {
+ return kTfLiteOk;
+}
+
+// We have this parse function instead of directly returning kTfLiteOk from the
+// switch-case in ParseOpData because this function is used as part of the
+// selective registration for the OpResolver implementation in micro.
+TfLiteStatus ParseEqual(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
+ void**) {
+ return kTfLiteOk;
+}
+
+// We have this parse function instead of directly returning kTfLiteOk from the
+// switch-case in ParseOpData because this function is used as part of the
+// selective registration for the OpResolver implementation in micro.
+TfLiteStatus ParseFloor(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
+ void**) {
+ return kTfLiteOk;
+}
+
+TfLiteStatus ParseFullyConnected(const Operator* op,
+ ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator,
+ void** builtin_data) {
+ CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
+
+ SafeBuiltinDataAllocator safe_allocator(allocator);
+
+ std::unique_ptr
+ params = safe_allocator.Allocate();
+ TF_LITE_ENSURE(error_reporter, params != nullptr);
+
+ const FullyConnectedOptions* schema_params =
+ op->builtin_options_as_FullyConnectedOptions();
+
+ if (schema_params != nullptr) {
+ params->activation =
+ ConvertActivation(schema_params->fused_activation_function());
+ params->keep_num_dims = schema_params->keep_num_dims();
+ params->asymmetric_quantize_inputs =
+ schema_params->asymmetric_quantize_inputs();
+
+ switch (schema_params->weights_format()) {
+ case FullyConnectedOptionsWeightsFormat_DEFAULT:
+ params->weights_format = kTfLiteFullyConnectedWeightsFormatDefault;
+ break;
+ case FullyConnectedOptionsWeightsFormat_SHUFFLED4x16INT8:
+ params->weights_format =
+ kTfLiteFullyConnectedWeightsFormatShuffled4x16Int8;
+ break;
+ default:
+ TF_LITE_REPORT_ERROR(error_reporter,
+ "Unhandled fully-connected weights format.");
+ return kTfLiteError;
+ }
+ } else {
+ // TODO(b/157480169): We should either return kTfLiteError or fill in some
+ // reasonable defaults in the params struct. We are not doing so until we
+ // better undertand the ramifications of changing the legacy behavior.
+ }
+
+ *builtin_data = params.release();
+ return kTfLiteOk;
+}
+
+// We have this parse function instead of directly returning kTfLiteOk from the
+// switch-case in ParseOpData because this function is used as part of the
+// selective registration for the OpResolver implementation in micro.
+TfLiteStatus ParseGreater(const Operator*, ErrorReporter*,
+ BuiltinDataAllocator*, void**) {
+ return kTfLiteOk;
+}
+
+// We have this parse function instead of directly returning kTfLiteOk from the
+// switch-case in ParseOpData because this function is used as part of the
+// selective registration for the OpResolver implementation in micro.
+TfLiteStatus ParseGreaterEqual(const Operator*, ErrorReporter*,
+ BuiltinDataAllocator*, void**) {
+ return kTfLiteOk;
+}
+
+// We have this parse function instead of directly returning kTfLiteOk from the
+// switch-case in ParseOpData because this function is used as part of the
+// selective registration for the OpResolver implementation in micro.
+TfLiteStatus ParseHardSwish(const Operator*, ErrorReporter*,
+ BuiltinDataAllocator*, void**) {
+ return kTfLiteOk;
+}
+
+TfLiteStatus ParseL2Normalization(const Operator* op,
+ ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator,
+ void** builtin_data) {
+ CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
+
+ SafeBuiltinDataAllocator safe_allocator(allocator);
+ std::unique_ptr
+ params = safe_allocator.Allocate();
+ TF_LITE_ENSURE(error_reporter, params != nullptr);
+
+ const L2NormOptions* schema_params = op->builtin_options_as_L2NormOptions();
+
+ if (schema_params != nullptr) {
+ params->activation =
+ ConvertActivation(schema_params->fused_activation_function());
+ } else {
+ // TODO(b/157480169): We should either return kTfLiteError or fill in some
+ // reasonable defaults in the params struct. We are not doing so until we
+ // better undertand the ramifications of changing the legacy behavior.
+ }
+
+ *builtin_data = params.release();
+ return kTfLiteOk;
+}
+
+// We have this parse function instead of directly returning kTfLiteOk from the
+// switch-case in ParseOpData because this function is used as part of the
+// selective registration for the OpResolver implementation in micro.
+TfLiteStatus ParseLess(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
+ void**) {
+ return kTfLiteOk;
+}
+
+// We have this parse function instead of directly returning kTfLiteOk from the
+// switch-case in ParseOpData because this function is used as part of the
+// selective registration for the OpResolver implementation in micro.
+TfLiteStatus ParseLessEqual(const Operator*, ErrorReporter*,
+ BuiltinDataAllocator*, void**) {
+ return kTfLiteOk;
+}
+
+// We have this parse function instead of directly returning kTfLiteOk from the
+// switch-case in ParseOpData because this function is used as part of the
+// selective registration for the OpResolver implementation in micro.
+TfLiteStatus ParseLog(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
+ void**) {
+ return kTfLiteOk;
+}
+
+// We have this parse function instead of directly returning kTfLiteOk from the
+// switch-case in ParseOpData because this function is used as part of the
+// selective registration for the OpResolver implementation in micro.
+TfLiteStatus ParseLogicalAnd(const Operator*, ErrorReporter*,
+ BuiltinDataAllocator*, void**) {
+ return kTfLiteOk;
+}
+
+// We have this parse function instead of directly returning kTfLiteOk from the
+// switch-case in ParseOpData because this function is used as part of the
+// selective registration for the OpResolver implementation in micro.
+TfLiteStatus ParseLogicalNot(const Operator*, ErrorReporter*,
+ BuiltinDataAllocator*, void**) {
+ return kTfLiteOk;
+}
+
+// We have this parse function instead of directly returning kTfLiteOk from the
+// switch-case in ParseOpData because this function is used as part of the
+// selective registration for the OpResolver implementation in micro.
+TfLiteStatus ParseLogicalOr(const Operator*, ErrorReporter*,
+ BuiltinDataAllocator*, void**) {
+ return kTfLiteOk;
+}
+
+// We have this parse function instead of directly returning kTfLiteOk from the
+// switch-case in ParseOpData because this function is used as part of the
+// selective registration for the OpResolver implementation in micro.
+TfLiteStatus ParseLogistic(const Operator*, ErrorReporter*,
+ BuiltinDataAllocator*, void**) {
+ return kTfLiteOk;
+}
+
+// We have this parse function instead of directly returning kTfLiteOk from the
+// switch-case in ParseOpData because this function is used as part of the
+// selective registration for the OpResolver implementation in micro.
+TfLiteStatus ParseMaximum(const Operator*, ErrorReporter*,
+ BuiltinDataAllocator*, void**) {
+ return kTfLiteOk;
+}
+
+// We have this parse function instead of directly returning kTfLiteOk from the
+// switch-case in ParseOpData because this function is used as part of the
+// selective registration for the OpResolver implementation in micro.
+TfLiteStatus ParseMinimum(const Operator*, ErrorReporter*,
+ BuiltinDataAllocator*, void**) {
+ return kTfLiteOk;
+}
+
+TfLiteStatus ParseMul(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator, void** builtin_data) {
+ CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
+
+ SafeBuiltinDataAllocator safe_allocator(allocator);
+ std::unique_ptr
+ params = safe_allocator.Allocate();
+ TF_LITE_ENSURE(error_reporter, params != nullptr);
+
+ const MulOptions* schema_params = op->builtin_options_as_MulOptions();
+
+ if (schema_params != nullptr) {
+ params->activation =
+ ConvertActivation(schema_params->fused_activation_function());
+ } else {
+ // TODO(b/157480169): We should either return kTfLiteError or fill in some
+ // reasonable defaults in the params struct. We are not doing so until we
+ // better undertand the ramifications of changing the legacy behavior.
+ }
+
+ *builtin_data = params.release();
+ return kTfLiteOk;
+}
+
+// We have this parse function instead of directly returning kTfLiteOk from the
+// switch-case in ParseOpData because this function is used as part of the
+// selective registration for the OpResolver implementation in micro.
+TfLiteStatus ParseNeg(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
+ void**) {
+ return kTfLiteOk;
+}
+
+// We have this parse function instead of directly returning kTfLiteOk from the
+// switch-case in ParseOpData because this function is used as part of the
+// selective registration for the OpResolver implementation in micro.
+TfLiteStatus ParseNotEqual(const Operator*, ErrorReporter*,
+ BuiltinDataAllocator*, void**) {
+ return kTfLiteOk;
+}
+
+TfLiteStatus ParsePack(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator, void** builtin_data) {
+ CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
+
+ SafeBuiltinDataAllocator safe_allocator(allocator);
+ std::unique_ptr
+ params = safe_allocator.Allocate();
+ TF_LITE_ENSURE(error_reporter, params != nullptr);
+
+ const PackOptions* schema_params = op->builtin_options_as_PackOptions();
+
+ if (schema_params != nullptr) {
+ params->values_count = schema_params->values_count();
+ params->axis = schema_params->axis();
+ } else {
+ // TODO(b/157480169): We should either return kTfLiteError or fill in some
+ // reasonable defaults in the params struct. We are not doing so until we
+ // better undertand the ramifications of changing the legacy behavior.
+ }
+
+ *builtin_data = params.release();
+ return kTfLiteOk;
+}
+
+// We have this parse function instead of directly returning kTfLiteOk from the
+// switch-case in ParseOpData because this function is used as part of the
+// selective registration for the OpResolver implementation in micro.
+TfLiteStatus ParsePad(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
+ void**) {
+ return kTfLiteOk;
+}
+
+// We have this parse function instead of directly returning kTfLiteOk from the
+// switch-case in ParseOpData because this function is used as part of the
+// selective registration for the OpResolver implementation in micro.
+TfLiteStatus ParsePadV2(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
+ void**) {
+ return kTfLiteOk;
+}
+
+TfLiteStatus ParsePool(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator, void** builtin_data) {
+ CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
+
+ SafeBuiltinDataAllocator safe_allocator(allocator);
+ std::unique_ptr
+ params = safe_allocator.Allocate();
+ TF_LITE_ENSURE(error_reporter, params != nullptr);
+
+ const Pool2DOptions* schema_params = op->builtin_options_as_Pool2DOptions();
+
+ if (schema_params != nullptr) {
+ params->padding = ConvertPadding(schema_params->padding());
+ params->stride_width = schema_params->stride_w();
+ params->stride_height = schema_params->stride_h();
+ params->filter_width = schema_params->filter_width();
+ params->filter_height = schema_params->filter_height();
+ params->activation =
+ ConvertActivation(schema_params->fused_activation_function());
+ } else {
+ // TODO(b/157480169): We should either return kTfLiteError or fill in some
+ // reasonable defaults in the params struct. We are not doing so until we
+ // better undertand the ramifications of changing the legacy behavior.
+ }
+
+ *builtin_data = params.release();
+ return kTfLiteOk;
+}
+
+// We have this parse function instead of directly returning kTfLiteOk from the
+// switch-case in ParseOpData because this function is used as part of the
+// selective registration for the OpResolver implementation in micro.
+TfLiteStatus ParsePrelu(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
+ void**) {
+ return kTfLiteOk;
+}
+
+// We have this parse function instead of directly returning kTfLiteOk from the
+// switch-case in ParseOpData because this function is used as part of the
+// selective registration for the OpResolver implementation in micro.
+TfLiteStatus ParseQuantize(const Operator*, ErrorReporter*,
+ BuiltinDataAllocator*, void**) {
+ return kTfLiteOk;
+}
+
+TfLiteStatus ParseReducer(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator,
+ void** builtin_data) {
+ CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
+
+ SafeBuiltinDataAllocator safe_allocator(allocator);
+
+ std::unique_ptr
+ params = safe_allocator.Allocate();
+ TF_LITE_ENSURE(error_reporter, params != nullptr);
+
+ const ReducerOptions* schema_params = op->builtin_options_as_ReducerOptions();
+
+ if (schema_params != nullptr) {
+ params->keep_dims = schema_params->keep_dims();
+ } else {
+ // TODO(b/157480169): We should either return kTfLiteError or fill in some
+ // reasonable defaults in the params struct. We are not doing so until we
+ // better undertand the ramifications of changing the legacy behavior.
+ }
+
+ *builtin_data = params.release();
+ return kTfLiteOk;
+}
+
+// We have this parse function instead of directly returning kTfLiteOk from the
+// switch-case in ParseOpData because this function is used as part of the
+// selective registration for the OpResolver implementation in micro.
+TfLiteStatus ParseRelu(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
+ void**) {
+ return kTfLiteOk;
+}
+
+// We have this parse function instead of directly returning kTfLiteOk from the
+// switch-case in ParseOpData because this function is used as part of the
+// selective registration for the OpResolver implementation in micro.
+TfLiteStatus ParseRelu6(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
+ void**) {
+ return kTfLiteOk;
+}
+
+TfLiteStatus ParseReshape(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator,
+ void** builtin_data) {
+ CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
+
+ SafeBuiltinDataAllocator safe_allocator(allocator);
+
+ std::unique_ptr
+ params = safe_allocator.Allocate();
+ TF_LITE_ENSURE(error_reporter, params != nullptr);
+
+ const ReshapeOptions* schema_params = op->builtin_options_as_ReshapeOptions();
+
+ if (schema_params != nullptr) {
+ const flatbuffers::Vector* new_shape = schema_params->new_shape();
+ if (new_shape != nullptr) {
+ TF_LITE_ENSURE_STATUS(
+ FlatBufferIntVectorToArray(sizeof(params->shape), new_shape,
+ params->shape, error_reporter, "reshape"));
+ params->num_dimensions = new_shape->size();
+ } else {
+ // TODO(b/157480169) TODO(b/147203660): We should either return
+ // kTfLiteError or fill in some reasonable defaults in the params struct.
+ // We are not doing so until we better undertand the ramifications of
+ // changing the legacy behavior.
+ }
+ } else {
+ // TODO(b/157480169): We should either return kTfLiteError or fill in some
+ // reasonable defaults in the params struct. We are not doing so until we
+ // better undertand the ramifications of changing the legacy behavior.
+ }
+
+ *builtin_data = params.release();
+ return kTfLiteOk;
+}
+
+TfLiteStatus ParseResizeBilinear(const Operator* op,
+ ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator,
+ void** builtin_data) {
+ CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
+
+ SafeBuiltinDataAllocator safe_allocator(allocator);
+ std::unique_ptr
+ params = safe_allocator.Allocate();
+ TF_LITE_ENSURE(error_reporter, params != nullptr);
+
+ const ResizeBilinearOptions* schema_params =
+ op->builtin_options_as_ResizeBilinearOptions();
+
+ if (schema_params != nullptr) {
+ params->align_corners = schema_params->align_corners();
+ params->half_pixel_centers = schema_params->half_pixel_centers();
+ } else {
+ params->align_corners = false;
+ params->half_pixel_centers = false;
+ }
+
+ *builtin_data = params.release();
+ return kTfLiteOk;
+}
+
+TfLiteStatus ParseResizeNearestNeighbor(const Operator* op,
+ ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator,
+ void** builtin_data) {
+ CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
+
+ SafeBuiltinDataAllocator safe_allocator(allocator);
+ std::unique_ptr
+ params = safe_allocator.Allocate();
+ TF_LITE_ENSURE(error_reporter, params != nullptr);
+
+ const ResizeNearestNeighborOptions* schema_params =
+ op->builtin_options_as_ResizeNearestNeighborOptions();
+
+ if (schema_params != nullptr) {
+ params->align_corners = schema_params->align_corners();
+ params->half_pixel_centers = schema_params->half_pixel_centers();
+ } else {
+ params->align_corners = false;
+ params->half_pixel_centers = false;
+ }
+
+ *builtin_data = params.release();
+ return kTfLiteOk;
+}
+
+// We have this parse function instead of directly returning kTfLiteOk from the
+// switch-case in ParseOpData because this function is used as part of the
+// selective registration for the OpResolver implementation in micro.
+TfLiteStatus ParseRound(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
+ void**) {
+ return kTfLiteOk;
+}
+
+// We have this parse function instead of directly returning kTfLiteOk from the
+// switch-case in ParseOpData because this function is used as part of the
+// selective registration for the OpResolver implementation in micro.
+TfLiteStatus ParseRsqrt(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
+ void**) {
+ return kTfLiteOk;
+}
+
+TfLiteStatus ParseShape(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator, void** builtin_data) {
+ SafeBuiltinDataAllocator safe_allocator(allocator);
+ std::unique_ptr
+ params = safe_allocator.Allocate();
+ TF_LITE_ENSURE(error_reporter, params != nullptr);
+
+ const ShapeOptions* schema_params = op->builtin_options_as_ShapeOptions();
+
+ if (schema_params != nullptr) {
+ TF_LITE_ENSURE_STATUS(ConvertTensorType(schema_params->out_type(),
+ ¶ms->out_type, error_reporter));
+ } else {
+ // TODO(b/157480169): We should either return kTfLiteError or fill in some
+ // reasonable defaults in the params struct. We are not doing so until we
+ // better undertand the ramifications of changing the legacy behavior.
+ }
+
+ *builtin_data = params.release();
+ return kTfLiteOk;
+}
+
+// We have this parse function instead of directly returning kTfLiteOk from the
+// switch-case in ParseOpData because this function is used as part of the
+// selective registration for the OpResolver implementation in micro.
+TfLiteStatus ParseSin(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
+ void**) {
+ return kTfLiteOk;
+}
+
+TfLiteStatus ParseSoftmax(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator,
+ void** builtin_data) {
+ CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
+
+ SafeBuiltinDataAllocator safe_allocator(allocator);
+ std::unique_ptr
+ params = safe_allocator.Allocate();
+ TF_LITE_ENSURE(error_reporter, params != nullptr);
+
+ const SoftmaxOptions* schema_params = op->builtin_options_as_SoftmaxOptions();
+
+ if (schema_params != nullptr) {
+ params->beta = schema_params->beta();
+ } else {
+ // TODO(b/157480169): We should either return kTfLiteError or fill in some
+ // reasonable defaults in the params struct. We are not doing so until we
+ // better undertand the ramifications of changing the legacy behavior.
+ }
+
+ *builtin_data = params.release();
+ return kTfLiteOk;
+}
+
+TfLiteStatus ParseSplit(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator, void** builtin_data) {
+ CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
+
+ SafeBuiltinDataAllocator safe_allocator(allocator);
+ std::unique_ptr
+ params = safe_allocator.Allocate();
+ TF_LITE_ENSURE(error_reporter, params != nullptr);
+
+ const SplitOptions* schema_params = op->builtin_options_as_SplitOptions();
+
+ if (schema_params != nullptr) {
+ params->num_splits = schema_params->num_splits();
+ } else {
+ // TODO(b/157480169): We should either return kTfLiteError or fill in some
+ // reasonable defaults in the params struct. We are not doing so until we
+ // better undertand the ramifications of changing the legacy behavior.
+ }
+
+ *builtin_data = params.release();
+ return kTfLiteOk;
+}
+
+TfLiteStatus ParseSplitV(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator, void** builtin_data) {
+ CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
+ SafeBuiltinDataAllocator safe_allocator(allocator);
+
+ std::unique_ptr
+ params = safe_allocator.Allocate();
+ TF_LITE_ENSURE(error_reporter, params != nullptr);
+
+ const SplitVOptions* schema_params = op->builtin_options_as_SplitVOptions();
+
+ if (schema_params != nullptr) {
+ params->num_splits = schema_params->num_splits();
+ } else {
+ // TODO(b/157480169): We should either return kTfLiteError or fill in some
+ // reasonable defaults in the params struct. We are not doing so until we
+ // better undertand the ramifications of changing the legacy behavior.
+ }
+
+ *builtin_data = params.release();
+ return kTfLiteOk;
+}
+
+// We have this parse function instead of directly returning kTfLiteOk from the
+// switch-case in ParseOpData because this function is used as part of the
+// selective registration for the OpResolver implementation in micro.
+TfLiteStatus ParseSqrt(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
+ void**) {
+ return kTfLiteOk;
+}
+
+// We have this parse function instead of directly returning kTfLiteOk from the
+// switch-case in ParseOpData because this function is used as part of the
+// selective registration for the OpResolver implementation in micro.
+TfLiteStatus ParseSquare(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
+ void**) {
+ return kTfLiteOk;
+}
+
+TfLiteStatus ParseStridedSlice(const Operator* op,
+ ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator,
+ void** builtin_data) {
+ CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
+
+ SafeBuiltinDataAllocator safe_allocator(allocator);
+ std::unique_ptr
+ params = safe_allocator.Allocate();
+ TF_LITE_ENSURE(error_reporter, params != nullptr);
+
+ const StridedSliceOptions* schema_params =
+ op->builtin_options_as_StridedSliceOptions();
+
+ if (schema_params != nullptr) {
+ params->begin_mask = schema_params->begin_mask();
+ params->end_mask = schema_params->end_mask();
+ params->ellipsis_mask = schema_params->ellipsis_mask();
+ params->new_axis_mask = schema_params->new_axis_mask();
+ params->shrink_axis_mask = schema_params->shrink_axis_mask();
+ } else {
+ // TODO(b/157480169): We should either return kTfLiteError or fill in some
+ // reasonable defaults in the params struct. We are not doing so until we
+ // better undertand the ramifications of changing the legacy behavior.
+ }
+
+ *builtin_data = params.release();
+ return kTfLiteOk;
+}
+
+TfLiteStatus ParseSub(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator, void** builtin_data) {
+ CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
+
+ SafeBuiltinDataAllocator safe_allocator(allocator);
+ std::unique_ptr
+ params = safe_allocator.Allocate();
+ TF_LITE_ENSURE(error_reporter, params != nullptr);
+
+ const SubOptions* schema_params = op->builtin_options_as_SubOptions();
+
+ if (schema_params != nullptr) {
+ params->activation =
+ ConvertActivation(schema_params->fused_activation_function());
+ params->pot_scale_int16 = schema_params->pot_scale_int16();
+ } else {
+ // TODO(b/157480169): We should either return kTfLiteError or fill in some
+ // reasonable defaults in the params struct. We are not doing so until we
+ // better undertand the ramifications of changing the legacy behavior.
+ }
+
+ *builtin_data = params.release();
+ return kTfLiteOk;
+}
+
+TfLiteStatus ParseSvdf(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator, void** builtin_data) {
+ CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
+
+ SafeBuiltinDataAllocator safe_allocator(allocator);
+ std::unique_ptr
+ params = safe_allocator.Allocate();
+ TF_LITE_ENSURE(error_reporter, params != nullptr);
+
+ const SVDFOptions* schema_params = op->builtin_options_as_SVDFOptions();
+ if (schema_params != nullptr) {
+ params->rank = schema_params->rank();
+ params->activation =
+ ConvertActivation(schema_params->fused_activation_function());
+ params->asymmetric_quantize_inputs =
+ schema_params->asymmetric_quantize_inputs();
+ } else {
+ // TODO(b/157480169): We should either return kTfLiteError or fill in some
+ // reasonable defaults in the params struct. We are not doing so until we
+ // better undertand the ramifications of changing the legacy behavior.
+ }
+
+ *builtin_data = params.release();
+ return kTfLiteOk;
+}
+
+// We have this parse function instead of directly returning kTfLiteOk from the
+// switch-case in ParseOpData because this function is used as part of the
+// selective registration for the OpResolver implementation in micro.
+TfLiteStatus ParseTanh(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
+ void**) {
+ return kTfLiteOk;
+}
+
+TfLiteStatus ParseUnpack(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator, void** builtin_data) {
+ CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
+
+ SafeBuiltinDataAllocator safe_allocator(allocator);
+ std::unique_ptr
+ params = safe_allocator.Allocate();
+ TF_LITE_ENSURE(error_reporter, params != nullptr);
+
+ const UnpackOptions* schema_params = op->builtin_options_as_UnpackOptions();
+
+ if (schema_params != nullptr) {
+ params->num = schema_params->num();
+ params->axis = schema_params->axis();
+ } else {
+ // TODO(b/157480169): We should either return kTfLiteError or fill in some
+ // reasonable defaults in the params struct. We are not doing so until we
+ // better undertand the ramifications of changing the legacy behavior.
+ }
+
+ *builtin_data = params.release();
+ return kTfLiteOk;
+}
+
+TfLiteStatus ParseOpData(const Operator* op, BuiltinOperator op_type,
+ ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator, void** builtin_data) {
+// TODO(b/145762662): It would be preferable to have the build graph for TF Lite
+// Micro not have the ParseOpData function at all. This would require splitting
+// the current file into two separate files, one of which defines the
+// ParseOpData function and the other that defines the operator specific parse
+// functions (e.g. ParseAdd).
+//
+// Such a split was attempted but was not worth the effort at the time because
+// of the following reasons:
+// * We could either duplicate the functions and the SafeBuiltinDataAllocator
+// class in the anonymous namespace of this file, or attempt to make a common
+// library with these helper functions and class.
+// * Making a common library with a separate build target was not feasible as
+// it introduced circular dependencies due to the ErrorReporter and a common
+// .cc and .h within the same api build target the also cause circular
+// dependencies due to the BuiltinDataAllocator class.
+// * If all the builtin operators were to have their own parse functions, or we
+// were ok with some amount of code duplication, then this split of the .cc
+// files would be a lot more feasible.
+#ifdef TF_LITE_STATIC_MEMORY
+ TF_LITE_REPORT_ERROR(
+ error_reporter,
+ "ParseOpData is unsupported on TfLiteMicro, please use the operator "
+ "specific parse functions (e.g. ParseAdd etc.).\n");
+ return kTfLiteError;
+#else
+ return ParseOpDataTfLite(op, op_type, error_reporter, allocator,
+ builtin_data);
+#endif
+}
} // namespace tflite
diff --git a/code/lib/tfmicro/tensorflow/lite/core/api/flatbuffer_conversions.h b/code/lib/tfmicro/tensorflow/lite/core/api/flatbuffer_conversions.h
index d774afe8..13680997 100644
--- a/code/lib/tfmicro/tensorflow/lite/core/api/flatbuffer_conversions.h
+++ b/code/lib/tfmicro/tensorflow/lite/core/api/flatbuffer_conversions.h
@@ -19,9 +19,12 @@ limitations under the License.
// flatbuffer serialization format into in-memory values that are used by the
// runtime API and interpreter.
+#include
+#include
+#include
+
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/core/api/error_reporter.h"
-#include "tensorflow/lite/core/api/op_resolver.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
@@ -42,7 +45,7 @@ class BuiltinDataAllocator {
// platform targets support that properly.
static_assert(std::is_pod::value, "Builtin data structure must be POD.");
void* allocated_memory = this->Allocate(sizeof(T), alignof(T));
- return new (allocated_memory) T;
+ return new (allocated_memory) T();
}
virtual ~BuiltinDataAllocator() {}
@@ -66,6 +69,196 @@ TfLiteStatus ParseOpData(const Operator* op, BuiltinOperator op_type,
TfLiteStatus ConvertTensorType(TensorType tensor_type, TfLiteType* type,
ErrorReporter* error_reporter);
+TfLiteStatus ParseAbs(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParseAdd(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParseArgMax(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParseArgMin(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParseCeil(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParseConcatenation(const Operator* op,
+ ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator,
+ void** builtin_data);
+
+TfLiteStatus ParseConv2D(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParseCos(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParseDepthwiseConv2D(const Operator* op,
+ ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator,
+ void** builtin_data);
+
+TfLiteStatus ParseDequantize(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator,
+ void** builtin_data);
+
+TfLiteStatus ParseEqual(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParseFloor(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParseFullyConnected(const Operator* op,
+ ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator,
+ void** builtin_data);
+
+TfLiteStatus ParseGreater(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParseGreaterEqual(const Operator* op,
+ ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator,
+ void** builtin_data);
+
+TfLiteStatus ParseHardSwish(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator,
+ void** builtin_data);
+
+TfLiteStatus ParseL2Normalization(const Operator* op,
+ ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator,
+ void** builtin_data);
+
+TfLiteStatus ParseLess(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParseLessEqual(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator,
+ void** builtin_data);
+
+TfLiteStatus ParseLog(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParseLogicalAnd(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator,
+ void** builtin_data);
+
+TfLiteStatus ParseLogicalNot(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator,
+ void** builtin_data);
+
+TfLiteStatus ParseLogicalOr(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator,
+ void** builtin_data);
+
+TfLiteStatus ParseLogistic(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator,
+ void** builtin_data);
+
+TfLiteStatus ParseMaximum(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParseMinimum(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParseMul(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParseNeg(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParseNotEqual(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator,
+ void** builtin_data);
+
+TfLiteStatus ParsePack(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParsePad(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParsePadV2(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParsePool(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParsePrelu(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParseQuantize(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator,
+ void** builtin_data);
+
+TfLiteStatus ParseReducer(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParseRelu(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParseRelu6(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParseReshape(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParseResizeBilinear(const Operator* op,
+ ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator,
+ void** builtin_data);
+
+TfLiteStatus ParseResizeNearestNeighbor(const Operator* op,
+ ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator,
+ void** builtin_data);
+
+TfLiteStatus ParseRound(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParseRsqrt(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParseShape(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParseSin(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParseSoftmax(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParseSplit(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParseSplitV(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParseSqrt(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParseSquare(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParseStridedSlice(const Operator* op,
+ ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator,
+ void** builtin_data);
+
+TfLiteStatus ParseSub(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParseSvdf(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParseTanh(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator, void** builtin_data);
+
+TfLiteStatus ParseUnpack(const Operator* op, ErrorReporter* error_reporter,
+ BuiltinDataAllocator* allocator, void** builtin_data);
+
} // namespace tflite
#endif // TENSORFLOW_LITE_CORE_API_FLATBUFFER_CONVERSIONS_H_
diff --git a/code/lib/tfmicro/tensorflow/lite/core/api/op_resolver.cc b/code/lib/tfmicro/tensorflow/lite/core/api/op_resolver.cc
index 6424071f..c5dffb63 100644
--- a/code/lib/tfmicro/tensorflow/lite/core/api/op_resolver.cc
+++ b/code/lib/tfmicro/tensorflow/lite/core/api/op_resolver.cc
@@ -15,6 +15,11 @@ limitations under the License.
#include "tensorflow/lite/core/api/op_resolver.h"
+#include "flatbuffers/flatbuffers.h" // from @flatbuffers
+#include "tensorflow/lite/c/common.h"
+#include "tensorflow/lite/core/api/error_reporter.h"
+#include "tensorflow/lite/schema/schema_utils.h"
+
namespace tflite {
TfLiteStatus GetRegistrationFromOpCode(
@@ -22,7 +27,7 @@ TfLiteStatus GetRegistrationFromOpCode(
ErrorReporter* error_reporter, const TfLiteRegistration** registration) {
TfLiteStatus status = kTfLiteOk;
*registration = nullptr;
- auto builtin_code = opcode->builtin_code();
+ auto builtin_code = GetBuiltinCode(opcode);
int version = opcode->version();
if (builtin_code > BuiltinOperator_MAX ||
diff --git a/code/lib/tfmicro/tensorflow/lite/core/api/op_resolver.h b/code/lib/tfmicro/tensorflow/lite/core/api/op_resolver.h
index 1294b7b8..b6a8171d 100644
--- a/code/lib/tfmicro/tensorflow/lite/core/api/op_resolver.h
+++ b/code/lib/tfmicro/tensorflow/lite/core/api/op_resolver.h
@@ -15,6 +15,8 @@ limitations under the License.
#ifndef TENSORFLOW_LITE_CORE_API_OP_RESOLVER_H_
#define TENSORFLOW_LITE_CORE_API_OP_RESOLVER_H_
+#include
+
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/core/api/error_reporter.h"
#include "tensorflow/lite/schema/schema_generated.h"
@@ -32,6 +34,16 @@ class OpResolver {
/// Finds the op registration of a custom operator by op name.
virtual const TfLiteRegistration* FindOp(const char* op,
int version) const = 0;
+
+ // Returns optional delegates for resolving and handling ops in the flatbuffer
+ // model. This may be used in addition to the standard TfLiteRegistration
+ // lookup for graph resolution.
+ using TfLiteDelegatePtrVector =
+ std::vector>;
+ virtual TfLiteDelegatePtrVector GetDelegates(int num_threads) const {
+ return TfLiteDelegatePtrVector();
+ }
+
virtual ~OpResolver() {}
};
diff --git a/code/lib/tfmicro/tensorflow/lite/core/api/profiler.h b/code/lib/tfmicro/tensorflow/lite/core/api/profiler.h
new file mode 100644
index 00000000..897efbe1
--- /dev/null
+++ b/code/lib/tfmicro/tensorflow/lite/core/api/profiler.h
@@ -0,0 +1,194 @@
+/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#ifndef TENSORFLOW_LITE_CORE_API_PROFILER_H_
+#define TENSORFLOW_LITE_CORE_API_PROFILER_H_
+
+#include
+
+namespace tflite {
+
+// A simple utility for enabling profiled event tracing in TensorFlow Lite.
+class Profiler {
+ public:
+ // As certain Profiler instance might be only interested in certain event
+ // types, we define each event type value to allow a Profiler to use
+ // bitmasking bitwise operations to determine whether an event should be
+ // recorded or not.
+ enum class EventType {
+ // Default event type, the metadata field has no special significance.
+ DEFAULT = 1,
+
+ // The event is an operator invocation and the event_metadata field is the
+ // index of operator node.
+ OPERATOR_INVOKE_EVENT = 2,
+
+ // The event is an invocation for an internal operator of a TFLite delegate.
+ // The event_metadata field is the index of operator node that's specific to
+ // the delegate.
+ DELEGATE_OPERATOR_INVOKE_EVENT = 4,
+
+ // The event is a recording of runtime instrumentation such as the overall
+ // TFLite runtime status, the TFLite delegate status (if a delegate
+ // is applied), and the overall model inference latency etc.
+ // Note, the delegate status and overall status are stored as separate
+ // event_metadata fields. In particular, the delegate status is encoded
+ // as DelegateStatus::full_status().
+ GENERAL_RUNTIME_INSTRUMENTATION_EVENT = 8,
+ };
+
+ virtual ~Profiler() {}
+
+ // Signals the beginning of an event and returns a handle to the profile
+ // event. The `event_metadata1` and `event_metadata2` have different
+ // interpretations based on the actual Profiler instance and the `event_type`.
+ // For example, as for the 'SubgraphAwareProfiler' defined in
+ // lite/core/subgraph.h, when the event_type is OPERATOR_INVOKE_EVENT,
+ // `event_metadata1` represents the index of a TFLite node, and
+ // `event_metadata2` represents the index of the subgraph that this event
+ // comes from.
+ virtual uint32_t BeginEvent(const char* tag, EventType event_type,
+ int64_t event_metadata1,
+ int64_t event_metadata2) = 0;
+ // Similar w/ the above, but `event_metadata2` defaults to 0.
+ uint32_t BeginEvent(const char* tag, EventType event_type,
+ int64_t event_metadata) {
+ return BeginEvent(tag, event_type, event_metadata, /*event_metadata2*/ 0);
+ }
+
+ // Signals an end to the specified profile event with 'event_metadata's, This
+ // is useful when 'event_metadata's are not available when the event begins
+ // or when one wants to overwrite the 'event_metadata's set at the beginning.
+ virtual void EndEvent(uint32_t event_handle, int64_t event_metadata1,
+ int64_t event_metadata2) {}
+ // Signals an end to the specified profile event.
+ virtual void EndEvent(uint32_t event_handle) = 0;
+
+ // Appends an event of type 'event_type' with 'tag' and 'event_metadata'
+ // which started at 'start' and ended at 'end'
+ // Note:
+ // In cases were ProfileSimmarizer and tensorflow::StatsCalculator are used
+ // they assume the value is in "usec", if in any case subclasses
+ // didn't put usec, then the values are not meaningful.
+ // TODO karimnosseir: Revisit and make the function more clear.
+ void AddEvent(const char* tag, EventType event_type, uint64_t start,
+ uint64_t end, int64_t event_metadata) {
+ AddEvent(tag, event_type, start, end, event_metadata,
+ /*event_metadata2*/ 0);
+ }
+
+ virtual void AddEvent(const char* tag, EventType event_type, uint64_t start,
+ uint64_t end, int64_t event_metadata1,
+ int64_t event_metadata2) {}
+
+ protected:
+ friend class ScopedProfile;
+};
+
+// Adds a profile event to `profiler` that begins with the construction
+// of the object and ends when the object goes out of scope.
+// The lifetime of tag should be at least the lifetime of `profiler`.
+// `profiler` may be null, in which case nothing is profiled.
+class ScopedProfile {
+ public:
+ ScopedProfile(Profiler* profiler, const char* tag,
+ Profiler::EventType event_type = Profiler::EventType::DEFAULT,
+ int64_t event_metadata = 0)
+ : profiler_(profiler), event_handle_(0) {
+ if (profiler) {
+ event_handle_ = profiler_->BeginEvent(tag, event_type, event_metadata);
+ }
+ }
+
+ ~ScopedProfile() {
+ if (profiler_) {
+ profiler_->EndEvent(event_handle_);
+ }
+ }
+
+ protected:
+ Profiler* profiler_;
+ uint32_t event_handle_;
+};
+
+class ScopedOperatorProfile : public ScopedProfile {
+ public:
+ ScopedOperatorProfile(Profiler* profiler, const char* tag, int node_index)
+ : ScopedProfile(profiler, tag, Profiler::EventType::OPERATOR_INVOKE_EVENT,
+ static_cast(node_index)) {}
+};
+
+class ScopedDelegateOperatorProfile : public ScopedProfile {
+ public:
+ ScopedDelegateOperatorProfile(Profiler* profiler, const char* tag,
+ int node_index)
+ : ScopedProfile(profiler, tag,
+ Profiler::EventType::DELEGATE_OPERATOR_INVOKE_EVENT,
+ static_cast(node_index)) {}
+};
+
+class ScopedRuntimeInstrumentationProfile : public ScopedProfile {
+ public:
+ ScopedRuntimeInstrumentationProfile(Profiler* profiler, const char* tag)
+ : ScopedProfile(
+ profiler, tag,
+ Profiler::EventType::GENERAL_RUNTIME_INSTRUMENTATION_EVENT, -1) {}
+
+ void set_runtime_status(int64_t delegate_status, int64_t interpreter_status) {
+ if (profiler_) {
+ delegate_status_ = delegate_status;
+ interpreter_status_ = interpreter_status;
+ }
+ }
+
+ ~ScopedRuntimeInstrumentationProfile() {
+ if (profiler_) {
+ profiler_->EndEvent(event_handle_, delegate_status_, interpreter_status_);
+ }
+ }
+
+ private:
+ int64_t delegate_status_;
+ int64_t interpreter_status_;
+};
+
+} // namespace tflite
+
+#define TFLITE_VARNAME_UNIQ_IMPL(name, ctr) name##ctr
+#define TFLITE_VARNAME_UNIQ(name, ctr) TFLITE_VARNAME_UNIQ_IMPL(name, ctr)
+
+#define TFLITE_SCOPED_TAGGED_DEFAULT_PROFILE(profiler, tag) \
+ tflite::ScopedProfile TFLITE_VARNAME_UNIQ(_profile_, __COUNTER__)( \
+ (profiler), (tag))
+
+#define TFLITE_SCOPED_TAGGED_OPERATOR_PROFILE(profiler, tag, node_index) \
+ tflite::ScopedOperatorProfile TFLITE_VARNAME_UNIQ(_profile_, __COUNTER__)( \
+ (profiler), (tag), (node_index))
+
+#define TFLITE_SCOPED_DELEGATE_OPERATOR_PROFILE(profiler, tag, node_index) \
+ tflite::ScopedDelegateOperatorProfile TFLITE_VARNAME_UNIQ( \
+ _profile_, __COUNTER__)((profiler), (tag), (node_index))
+
+#define TFLITE_ADD_RUNTIME_INSTRUMENTATION_EVENT( \
+ profiler, tag, delegate_status, interpreter_status) \
+ do { \
+ if (!profiler) { \
+ const auto handle = profiler->BeginEvent( \
+ tag, Profiler::EventType::GENERAL_RUNTIME_INSTRUMENTATION_EVENT, \
+ delegate_status, interpreter_status); \
+ profiler->EndEvent(handle); \
+ } \
+ } while (false);
+
+#endif // TENSORFLOW_LITE_CORE_API_PROFILER_H_
diff --git a/code/lib/tfmicro/tensorflow/lite/core/api/tensor_utils.cc b/code/lib/tfmicro/tensorflow/lite/core/api/tensor_utils.cc
index d8d6fc46..3aac16b6 100644
--- a/code/lib/tfmicro/tensorflow/lite/core/api/tensor_utils.cc
+++ b/code/lib/tfmicro/tensorflow/lite/core/api/tensor_utils.cc
@@ -17,6 +17,8 @@ limitations under the License.
#include
+#include "tensorflow/lite/c/common.h"
+
namespace tflite {
TfLiteStatus ResetVariableTensor(TfLiteTensor* tensor) {
diff --git a/code/lib/tfmicro/tensorflow/lite/kernels/internal/common.h b/code/lib/tfmicro/tensorflow/lite/kernels/internal/common.h
index c1db3587..662a1864 100644
--- a/code/lib/tfmicro/tensorflow/lite/kernels/internal/common.h
+++ b/code/lib/tfmicro/tensorflow/lite/kernels/internal/common.h
@@ -55,9 +55,12 @@ inline void GetActivationMinMax(FusedActivationFunctionType ac,
}
}
-inline float ActivationFunctionWithMinMax(float x, float output_activation_min,
- float output_activation_max) {
- return std::min(std::max(x, output_activation_min), output_activation_max);
+template
+inline T ActivationFunctionWithMinMax(T x, T output_activation_min,
+ T output_activation_max) {
+ using std::max;
+ using std::min;
+ return min(max(x, output_activation_min), output_activation_max);
}
// Legacy function, left for compatibility only.
@@ -135,23 +138,24 @@ inline void BiasAndClamp(float clamp_min, float clamp_max, int bias_size,
#endif
}
-inline int32 MultiplyByQuantizedMultiplierSmallerThanOneExp(
- int32 x, int32 quantized_multiplier, int left_shift) {
+inline int32_t MultiplyByQuantizedMultiplierSmallerThanOneExp(
+ int32_t x, int32_t quantized_multiplier, int left_shift) {
using gemmlowp::RoundingDivideByPOT;
using gemmlowp::SaturatingRoundingDoublingHighMul;
return RoundingDivideByPOT(
SaturatingRoundingDoublingHighMul(x, quantized_multiplier), -left_shift);
}
-inline int32 MultiplyByQuantizedMultiplierGreaterThanOne(
- int32 x, int32 quantized_multiplier, int left_shift) {
+inline int32_t MultiplyByQuantizedMultiplierGreaterThanOne(
+ int32_t x, int32_t quantized_multiplier, int left_shift) {
using gemmlowp::SaturatingRoundingDoublingHighMul;
return SaturatingRoundingDoublingHighMul(x * (1 << left_shift),
quantized_multiplier);
}
-inline int32 MultiplyByQuantizedMultiplier(int32 x, int32 quantized_multiplier,
- int shift) {
+inline int32_t MultiplyByQuantizedMultiplier(int32_t x,
+ int32_t quantized_multiplier,
+ int shift) {
using gemmlowp::RoundingDivideByPOT;
using gemmlowp::SaturatingRoundingDoublingHighMul;
int left_shift = shift > 0 ? shift : 0;
@@ -161,16 +165,16 @@ inline int32 MultiplyByQuantizedMultiplier(int32 x, int32 quantized_multiplier,
right_shift);
}
-inline int32 MultiplyByQuantizedMultiplier(int64_t x,
- int32 quantized_multiplier,
- int shift) {
+inline int32_t MultiplyByQuantizedMultiplier(int64_t x,
+ int32_t quantized_multiplier,
+ int shift) {
// Inputs:
// - quantized_multiplier has fixed point at bit 31
// - shift is -31 to +7 (negative for right shift)
//
// Assumptions: The following input ranges are assumed
// - quantize_scale>=0 (the usual range is (1<<30) to (1>>31)-1)
- // - scaling is chosen so final scaled result fits in int32
+ // - scaling is chosen so final scaled result fits in int32_t
// - input x is in the range -(1<<47) <= x < (1<<47)
assert(quantized_multiplier >= 0);
assert(shift >= -31 && shift < 8);
@@ -215,9 +219,9 @@ inline int CountLeadingSignBits(T integer_input) {
using U = typename std::make_unsigned::type;
return integer_input >= 0
? CountLeadingZeros(static_cast(integer_input)) - 1
- : integer_input != std::numeric_limits