diff --git a/.gitmodules b/.gitmodules index 6c7c1d4f..e2909c41 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,5 +1,5 @@ -[submodule "code/components/esp32-camera-master"] - path = code/components/esp32-camera-master +[submodule "code/components/esp32-camera"] + path = code/components/esp32-camera url = https://github.com/espressif/esp32-camera.git [submodule "code/components/esp-nn"] path = code/components/esp-nn diff --git a/code/components/esp32-camera b/code/components/esp32-camera new file mode 160000 index 00000000..de025b8f --- /dev/null +++ b/code/components/esp32-camera @@ -0,0 +1 @@ +Subproject commit de025b8f40e45b4b264b2cdd385000d151d8986a diff --git a/code/components/jomjol_controlcamera/CMakeLists.txt b/code/components/jomjol_controlcamera/CMakeLists.txt index ec401f60..9f1a3927 100644 --- a/code/components/jomjol_controlcamera/CMakeLists.txt +++ b/code/components/jomjol_controlcamera/CMakeLists.txt @@ -4,6 +4,6 @@ list(APPEND EXTRA_COMPONENT_DIRS $ENV{IDF_PATH}/examples/common_components/proto idf_component_register(SRCS ${app_sources} INCLUDE_DIRS "." - REQUIRES esp32-camera-master esp_http_server jomjol_logfile jomjol_image_proc nvs_flash jomjol_fileserver_ota jomjol_controlGPIO) + REQUIRES esp32-camera esp_http_server jomjol_logfile jomjol_image_proc nvs_flash jomjol_fileserver_ota jomjol_controlGPIO) diff --git a/code/components/jomjol_tfliteclass/CMakeLists.txt b/code/components/jomjol_tfliteclass/CMakeLists.txt index 7c2c5594..e0dea9c5 100644 --- a/code/components/jomjol_tfliteclass/CMakeLists.txt +++ b/code/components/jomjol_tfliteclass/CMakeLists.txt @@ -2,6 +2,6 @@ FILE(GLOB_RECURSE app_sources ${CMAKE_CURRENT_SOURCE_DIR}/*.*) idf_component_register(SRCS ${app_sources} INCLUDE_DIRS "." - REQUIRES jomjol_image_proc jomjol_logfile esp_http_server esp32-camera-master jomjol_controlcamera jomjol_flowcontroll jomjol_helper) + REQUIRES jomjol_image_proc jomjol_logfile esp_http_server esp32-camera jomjol_controlcamera jomjol_flowcontroll jomjol_helper) diff --git a/code/components/tflite-lib/CMakeLists.txt b/code/components/tflite-lib/CMakeLists.txt deleted file mode 100644 index e09fd092..00000000 --- a/code/components/tflite-lib/CMakeLists.txt +++ /dev/null @@ -1,82 +0,0 @@ -## TODO: GLOB is not a good way to collect files. Use explicit file list instead - -cmake_minimum_required(VERSION 3.5) - -set(tflite_dir "${CMAKE_CURRENT_SOURCE_DIR}/tensorflow/lite") -set(tfmicro_dir "${tflite_dir}/micro") -set(tfmicro_frontend_dir "${tflite_dir}/experimental/microfrontend/lib") -set(tfmicro_kernels_dir "${tfmicro_dir}/kernels") - -file(GLOB srcs_micro - "${tfmicro_dir}/*.cc" - "${tfmicro_dir}/*.c") - -file(GLOB src_micro_frontend - "${tfmicro_frontend_dir}/*.c" - "${tfmicro_frontend_dir}/*.cc") -file(GLOB srcs_kernels - "${tfmicro_kernels_dir}/*.c" - "${tfmicro_kernels_dir}/*.cc") - -# remove sources which will be provided by esp_nn -list(REMOVE_ITEM srcs_kernels - "${tfmicro_kernels_dir}/add.cc" - "${tfmicro_kernels_dir}/conv.cc" - "${tfmicro_kernels_dir}/depthwise_conv.cc" - "${tfmicro_kernels_dir}/fully_connected.cc" - "${tfmicro_kernels_dir}/mul.cc" - "${tfmicro_kernels_dir}/pooling.cc" - "${tfmicro_kernels_dir}/softmax.cc") - -FILE(GLOB esp_nn_kernels - "${tfmicro_kernels_dir}/esp_nn/*.cc") - -set(lib_srcs - "${srcs_micro}" - "${srcs_kernels}" - "${esp_nn_kernels}" - "${src_micro_frontend}" - "${tflite_dir}/kernels/kernel_util.cc" - "${tflite_dir}/micro/memory_planner/greedy_memory_planner.cc" - "${tflite_dir}/micro/memory_planner/linear_memory_planner.cc" - "${tflite_dir}/micro/arena_allocator/non_persistent_arena_buffer_allocator.cc" - "${tflite_dir}/micro/arena_allocator/persistent_arena_buffer_allocator.cc" - "${tflite_dir}/micro/arena_allocator/recording_single_arena_buffer_allocator.cc" - "${tflite_dir}/micro/arena_allocator/single_arena_buffer_allocator.cc" - "${tflite_dir}/c/common.cc" - "${tflite_dir}/core/api/error_reporter.cc" - "${tflite_dir}/core/api/flatbuffer_conversions.cc" - "${tflite_dir}/core/api/op_resolver.cc" - "${tflite_dir}/core/api/tensor_utils.cc" - "${tflite_dir}/kernels/internal/quantization_util.cc" - "${tflite_dir}/schema/schema_utils.cc") - -set(priv_req esp-nn) - -# include component requirements which were introduced after IDF version 4.1 -if("${IDF_VERSION_MAJOR}.${IDF_VERSION_MINOR}" VERSION_GREATER "4.1") - list(APPEND priv_req esp_timer driver) -endif() - -idf_component_register( - SRCS "${lib_srcs}" - INCLUDE_DIRS "." "third_party/gemmlowp" - "third_party/flatbuffers/include" - "third_party/ruy" - "third_party/kissfft" - REQUIRES ${pub_req} - PRIV_REQUIRES ${priv_req}) - -# Reduce the level of paranoia to be able to compile TF sources -target_compile_options(${COMPONENT_LIB} PRIVATE - -Wno-maybe-uninitialized - -Wno-missing-field-initializers - -Wno-error=sign-compare - -Wno-error=double-promotion - -DESP_NN # enables ESP-NN optimizations by Espressif - -Wno-type-limits) - -target_compile_options(${COMPONENT_LIB} PRIVATE -fno-unwind-tables -ffunction-sections -fdata-sections -fmessage-length=0 -DTF_LITE_STATIC_MEMORY -DTF_LITE_DISABLE_X86_NEON -O3 -Wsign-compare -Wdouble-promotion -Wshadow -Wunused-variable -Wmissing-field-initializers -Wunused-function -Wswitch -Wvla -Wall -Wextra -Wstrict-aliasing -Wno-unused-parameter -Wno-nonnull) -target_compile_options(${COMPONENT_LIB} PRIVATE $<$: -std=c++11 -fno-rtti -fno-exceptions -fno-threadsafe-statics -fno-unwind-tables -ffunction-sections -fdata-sections -fmessage-length=0 -DTF_LITE_STATIC_MEMORY -DTF_LITE_DISABLE_X86_NEON -O3 -Werror -Wsign-compare -Wdouble-promotion -Wshadow -Wunused-variable -Wmissing-field-initializers -Wunused-function -Wswitch -Wvla -Wall -Wextra -Wstrict-aliasing -Wno-unused-parameter -Wno-return-type -Wno-strict-aliasing -std=gnu++14 >) -target_compile_options(${COMPONENT_LIB} INTERFACE $<$>:-DTF_LITE_STATIC_MEMORY>) -target_link_libraries(${COMPONENT_LIB} PRIVATE -lm) diff --git a/code/components/tflite-lib/tensorflow/lite/builtin_ops.h b/code/components/tflite-lib/tensorflow/lite/builtin_ops.h deleted file mode 100644 index 33707308..00000000 --- a/code/components/tflite-lib/tensorflow/lite/builtin_ops.h +++ /dev/null @@ -1,194 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#ifndef TENSORFLOW_LITE_BUILTIN_OPS_H_ -#define TENSORFLOW_LITE_BUILTIN_OPS_H_ - -// DO NOT EDIT MANUALLY: This file is automatically generated by -// `schema/builtin_ops_header/generator.cc`. - -#ifdef __cplusplus -extern "C" { -#endif // __cplusplus - -// The enum for builtin operators. -// Note: CUSTOM, DELEGATE, and PLACEHOLDER_FOR_GREATER_OP_CODES are 3 special -// ops which are not real built-in ops. -typedef enum { - kTfLiteBuiltinAdd = 0, - kTfLiteBuiltinAveragePool2d = 1, - kTfLiteBuiltinConcatenation = 2, - kTfLiteBuiltinConv2d = 3, - kTfLiteBuiltinDepthwiseConv2d = 4, - kTfLiteBuiltinDepthToSpace = 5, - kTfLiteBuiltinDequantize = 6, - kTfLiteBuiltinEmbeddingLookup = 7, - kTfLiteBuiltinFloor = 8, - kTfLiteBuiltinFullyConnected = 9, - kTfLiteBuiltinHashtableLookup = 10, - kTfLiteBuiltinL2Normalization = 11, - kTfLiteBuiltinL2Pool2d = 12, - kTfLiteBuiltinLocalResponseNormalization = 13, - kTfLiteBuiltinLogistic = 14, - kTfLiteBuiltinLshProjection = 15, - kTfLiteBuiltinLstm = 16, - kTfLiteBuiltinMaxPool2d = 17, - kTfLiteBuiltinMul = 18, - kTfLiteBuiltinRelu = 19, - kTfLiteBuiltinReluN1To1 = 20, - kTfLiteBuiltinRelu6 = 21, - kTfLiteBuiltinReshape = 22, - kTfLiteBuiltinResizeBilinear = 23, - kTfLiteBuiltinRnn = 24, - kTfLiteBuiltinSoftmax = 25, - kTfLiteBuiltinSpaceToDepth = 26, - kTfLiteBuiltinSvdf = 27, - kTfLiteBuiltinTanh = 28, - kTfLiteBuiltinConcatEmbeddings = 29, - kTfLiteBuiltinSkipGram = 30, - kTfLiteBuiltinCall = 31, - kTfLiteBuiltinCustom = 32, - kTfLiteBuiltinEmbeddingLookupSparse = 33, - kTfLiteBuiltinPad = 34, - kTfLiteBuiltinUnidirectionalSequenceRnn = 35, - kTfLiteBuiltinGather = 36, - kTfLiteBuiltinBatchToSpaceNd = 37, - kTfLiteBuiltinSpaceToBatchNd = 38, - kTfLiteBuiltinTranspose = 39, - kTfLiteBuiltinMean = 40, - kTfLiteBuiltinSub = 41, - kTfLiteBuiltinDiv = 42, - kTfLiteBuiltinSqueeze = 43, - kTfLiteBuiltinUnidirectionalSequenceLstm = 44, - kTfLiteBuiltinStridedSlice = 45, - kTfLiteBuiltinBidirectionalSequenceRnn = 46, - kTfLiteBuiltinExp = 47, - kTfLiteBuiltinTopkV2 = 48, - kTfLiteBuiltinSplit = 49, - kTfLiteBuiltinLogSoftmax = 50, - kTfLiteBuiltinDelegate = 51, - kTfLiteBuiltinBidirectionalSequenceLstm = 52, - kTfLiteBuiltinCast = 53, - kTfLiteBuiltinPrelu = 54, - kTfLiteBuiltinMaximum = 55, - kTfLiteBuiltinArgMax = 56, - kTfLiteBuiltinMinimum = 57, - kTfLiteBuiltinLess = 58, - kTfLiteBuiltinNeg = 59, - kTfLiteBuiltinPadv2 = 60, - kTfLiteBuiltinGreater = 61, - kTfLiteBuiltinGreaterEqual = 62, - kTfLiteBuiltinLessEqual = 63, - kTfLiteBuiltinSelect = 64, - kTfLiteBuiltinSlice = 65, - kTfLiteBuiltinSin = 66, - kTfLiteBuiltinTransposeConv = 67, - kTfLiteBuiltinSparseToDense = 68, - kTfLiteBuiltinTile = 69, - kTfLiteBuiltinExpandDims = 70, - kTfLiteBuiltinEqual = 71, - kTfLiteBuiltinNotEqual = 72, - kTfLiteBuiltinLog = 73, - kTfLiteBuiltinSum = 74, - kTfLiteBuiltinSqrt = 75, - kTfLiteBuiltinRsqrt = 76, - kTfLiteBuiltinShape = 77, - kTfLiteBuiltinPow = 78, - kTfLiteBuiltinArgMin = 79, - kTfLiteBuiltinFakeQuant = 80, - kTfLiteBuiltinReduceProd = 81, - kTfLiteBuiltinReduceMax = 82, - kTfLiteBuiltinPack = 83, - kTfLiteBuiltinLogicalOr = 84, - kTfLiteBuiltinOneHot = 85, - kTfLiteBuiltinLogicalAnd = 86, - kTfLiteBuiltinLogicalNot = 87, - kTfLiteBuiltinUnpack = 88, - kTfLiteBuiltinReduceMin = 89, - kTfLiteBuiltinFloorDiv = 90, - kTfLiteBuiltinReduceAny = 91, - kTfLiteBuiltinSquare = 92, - kTfLiteBuiltinZerosLike = 93, - kTfLiteBuiltinFill = 94, - kTfLiteBuiltinFloorMod = 95, - kTfLiteBuiltinRange = 96, - kTfLiteBuiltinResizeNearestNeighbor = 97, - kTfLiteBuiltinLeakyRelu = 98, - kTfLiteBuiltinSquaredDifference = 99, - kTfLiteBuiltinMirrorPad = 100, - kTfLiteBuiltinAbs = 101, - kTfLiteBuiltinSplitV = 102, - kTfLiteBuiltinUnique = 103, - kTfLiteBuiltinCeil = 104, - kTfLiteBuiltinReverseV2 = 105, - kTfLiteBuiltinAddN = 106, - kTfLiteBuiltinGatherNd = 107, - kTfLiteBuiltinCos = 108, - kTfLiteBuiltinWhere = 109, - kTfLiteBuiltinRank = 110, - kTfLiteBuiltinElu = 111, - kTfLiteBuiltinReverseSequence = 112, - kTfLiteBuiltinMatrixDiag = 113, - kTfLiteBuiltinQuantize = 114, - kTfLiteBuiltinMatrixSetDiag = 115, - kTfLiteBuiltinRound = 116, - kTfLiteBuiltinHardSwish = 117, - kTfLiteBuiltinIf = 118, - kTfLiteBuiltinWhile = 119, - kTfLiteBuiltinNonMaxSuppressionV4 = 120, - kTfLiteBuiltinNonMaxSuppressionV5 = 121, - kTfLiteBuiltinScatterNd = 122, - kTfLiteBuiltinSelectV2 = 123, - kTfLiteBuiltinDensify = 124, - kTfLiteBuiltinSegmentSum = 125, - kTfLiteBuiltinBatchMatmul = 126, - kTfLiteBuiltinPlaceholderForGreaterOpCodes = 127, - kTfLiteBuiltinCumsum = 128, - kTfLiteBuiltinCallOnce = 129, - kTfLiteBuiltinBroadcastTo = 130, - kTfLiteBuiltinRfft2d = 131, - kTfLiteBuiltinConv3d = 132, - kTfLiteBuiltinImag = 133, - kTfLiteBuiltinReal = 134, - kTfLiteBuiltinComplexAbs = 135, - kTfLiteBuiltinHashtable = 136, - kTfLiteBuiltinHashtableFind = 137, - kTfLiteBuiltinHashtableImport = 138, - kTfLiteBuiltinHashtableSize = 139, - kTfLiteBuiltinReduceAll = 140, - kTfLiteBuiltinConv3dTranspose = 141, - kTfLiteBuiltinVarHandle = 142, - kTfLiteBuiltinReadVariable = 143, - kTfLiteBuiltinAssignVariable = 144, - kTfLiteBuiltinBroadcastArgs = 145, - kTfLiteBuiltinRandomStandardNormal = 146, - kTfLiteBuiltinBucketize = 147, - kTfLiteBuiltinRandomUniform = 148, - kTfLiteBuiltinMultinomial = 149, - kTfLiteBuiltinGelu = 150, - kTfLiteBuiltinDynamicUpdateSlice = 151, - kTfLiteBuiltinRelu0To1 = 152, - kTfLiteBuiltinUnsortedSegmentProd = 153, - kTfLiteBuiltinUnsortedSegmentMax = 154, - kTfLiteBuiltinUnsortedSegmentSum = 155, - kTfLiteBuiltinAtan2 = 156, - kTfLiteBuiltinUnsortedSegmentMin = 157, - kTfLiteBuiltinSign = 158, -} TfLiteBuiltinOperator; - -#ifdef __cplusplus -} // extern "C" -#endif // __cplusplus -#endif // TENSORFLOW_LITE_BUILTIN_OPS_H_ diff --git a/code/components/tflite-lib/tensorflow/lite/c/common.cc b/code/components/tflite-lib/tensorflow/lite/c/common.cc deleted file mode 100644 index f10d3bfe..00000000 --- a/code/components/tflite-lib/tensorflow/lite/c/common.cc +++ /dev/null @@ -1,307 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/c/common.h" - -#include "tensorflow/lite/c/c_api_types.h" -#ifdef TF_LITE_TENSORFLOW_PROFILER -#include "tensorflow/lite/tensorflow_profiler_logger.h" -#endif - -#ifndef TF_LITE_STATIC_MEMORY -#include -#include -#endif // TF_LITE_STATIC_MEMORY - -extern "C" { - -size_t TfLiteIntArrayGetSizeInBytes(int size) { - static TfLiteIntArray dummy; - - size_t computed_size = sizeof(dummy) + sizeof(dummy.data[0]) * size; -#if defined(_MSC_VER) - // Context for why this is needed is in http://b/189926408#comment21 - computed_size -= sizeof(dummy.data[0]); -#endif - return computed_size; -} - -int TfLiteIntArrayEqual(const TfLiteIntArray* a, const TfLiteIntArray* b) { - if (a == b) return 1; - if (a == nullptr || b == nullptr) return 0; - return TfLiteIntArrayEqualsArray(a, b->size, b->data); -} - -int TfLiteIntArrayEqualsArray(const TfLiteIntArray* a, int b_size, - const int b_data[]) { - if (a == nullptr) return (b_size == 0); - if (a->size != b_size) return 0; - int i = 0; - for (; i < a->size; i++) - if (a->data[i] != b_data[i]) return 0; - return 1; -} - -#ifndef TF_LITE_STATIC_MEMORY - -TfLiteIntArray* TfLiteIntArrayCreate(int size) { - size_t alloc_size = TfLiteIntArrayGetSizeInBytes(size); - if (alloc_size <= 0) return nullptr; - TfLiteIntArray* ret = (TfLiteIntArray*)malloc(alloc_size); - if (!ret) return ret; - ret->size = size; - return ret; -} - -TfLiteIntArray* TfLiteIntArrayCopy(const TfLiteIntArray* src) { - if (!src) return nullptr; - TfLiteIntArray* ret = TfLiteIntArrayCreate(src->size); - if (ret) { - memcpy(ret->data, src->data, src->size * sizeof(int)); - } - return ret; -} - -void TfLiteIntArrayFree(TfLiteIntArray* a) { free(a); } - -#endif // TF_LITE_STATIC_MEMORY - -int TfLiteFloatArrayGetSizeInBytes(int size) { - static TfLiteFloatArray dummy; - - int computed_size = sizeof(dummy) + sizeof(dummy.data[0]) * size; -#if defined(_MSC_VER) - // Context for why this is needed is in http://b/189926408#comment21 - computed_size -= sizeof(dummy.data[0]); -#endif - return computed_size; -} - -#ifndef TF_LITE_STATIC_MEMORY - -TfLiteFloatArray* TfLiteFloatArrayCreate(int size) { - TfLiteFloatArray* ret = - (TfLiteFloatArray*)malloc(TfLiteFloatArrayGetSizeInBytes(size)); - ret->size = size; - return ret; -} - -void TfLiteFloatArrayFree(TfLiteFloatArray* a) { free(a); } - -void TfLiteTensorDataFree(TfLiteTensor* t) { - if (t->allocation_type == kTfLiteDynamic || - t->allocation_type == kTfLitePersistentRo) { - if (t->data.raw) { -#ifdef TF_LITE_TENSORFLOW_PROFILER - tflite::OnTfLiteTensorDealloc(t); -#endif - free(t->data.raw); - } - } - t->data.raw = nullptr; -} - -void TfLiteQuantizationFree(TfLiteQuantization* quantization) { - if (quantization->type == kTfLiteAffineQuantization) { - TfLiteAffineQuantization* q_params = - (TfLiteAffineQuantization*)(quantization->params); - if (q_params->scale) { - TfLiteFloatArrayFree(q_params->scale); - q_params->scale = nullptr; - } - if (q_params->zero_point) { - TfLiteIntArrayFree(q_params->zero_point); - q_params->zero_point = nullptr; - } - free(q_params); - } - quantization->params = nullptr; - quantization->type = kTfLiteNoQuantization; -} - -void TfLiteSparsityFree(TfLiteSparsity* sparsity) { - if (sparsity == nullptr) { - return; - } - - if (sparsity->traversal_order) { - TfLiteIntArrayFree(sparsity->traversal_order); - sparsity->traversal_order = nullptr; - } - - if (sparsity->block_map) { - TfLiteIntArrayFree(sparsity->block_map); - sparsity->block_map = nullptr; - } - - if (sparsity->dim_metadata) { - int i = 0; - for (; i < sparsity->dim_metadata_size; i++) { - TfLiteDimensionMetadata metadata = sparsity->dim_metadata[i]; - if (metadata.format == kTfLiteDimSparseCSR) { - TfLiteIntArrayFree(metadata.array_segments); - metadata.array_segments = nullptr; - TfLiteIntArrayFree(metadata.array_indices); - metadata.array_indices = nullptr; - } - } - free(sparsity->dim_metadata); - sparsity->dim_metadata = nullptr; - } - - free(sparsity); -} - -void TfLiteTensorFree(TfLiteTensor* t) { - TfLiteTensorDataFree(t); - if (t->dims) TfLiteIntArrayFree(t->dims); - t->dims = nullptr; - - if (t->dims_signature) { - TfLiteIntArrayFree((TfLiteIntArray*)t->dims_signature); - } - t->dims_signature = nullptr; - - TfLiteQuantizationFree(&t->quantization); - TfLiteSparsityFree(t->sparsity); - t->sparsity = nullptr; -} - -void TfLiteTensorReset(TfLiteType type, const char* name, TfLiteIntArray* dims, - TfLiteQuantizationParams quantization, char* buffer, - size_t size, TfLiteAllocationType allocation_type, - const void* allocation, bool is_variable, - TfLiteTensor* tensor) { - TfLiteTensorFree(tensor); - tensor->type = type; - tensor->name = name; - tensor->dims = dims; - tensor->params = quantization; - tensor->data.raw = buffer; - tensor->bytes = size; - tensor->allocation_type = allocation_type; - tensor->allocation = allocation; - tensor->is_variable = is_variable; - - tensor->quantization.type = kTfLiteNoQuantization; - tensor->quantization.params = nullptr; -} - -TfLiteStatus TfLiteTensorCopy(const TfLiteTensor* src, TfLiteTensor* dst) { - if (!src || !dst) return kTfLiteOk; - if (src->bytes != dst->bytes) return kTfLiteError; - if (src == dst) return kTfLiteOk; - - dst->type = src->type; - if (dst->dims) TfLiteIntArrayFree(dst->dims); - dst->dims = TfLiteIntArrayCopy(src->dims); - memcpy(dst->data.raw, src->data.raw, src->bytes); - dst->buffer_handle = src->buffer_handle; - dst->data_is_stale = src->data_is_stale; - dst->delegate = src->delegate; - - return kTfLiteOk; -} - -void TfLiteTensorRealloc(size_t num_bytes, TfLiteTensor* tensor) { - if (tensor->allocation_type != kTfLiteDynamic && - tensor->allocation_type != kTfLitePersistentRo) { - return; - } - // TODO(b/145340303): Tensor data should be aligned. - if (!tensor->data.raw) { - tensor->data.raw = (char*)malloc(num_bytes); -#ifdef TF_LITE_TENSORFLOW_PROFILER - tflite::OnTfLiteTensorAlloc(tensor, num_bytes); -#endif - } else if (num_bytes > tensor->bytes) { -#ifdef TF_LITE_TENSORFLOW_PROFILER - tflite::OnTfLiteTensorDealloc(tensor); -#endif - tensor->data.raw = (char*)realloc(tensor->data.raw, num_bytes); -#ifdef TF_LITE_TENSORFLOW_PROFILER - tflite::OnTfLiteTensorAlloc(tensor, num_bytes); -#endif - } - tensor->bytes = num_bytes; -} -#endif // TF_LITE_STATIC_MEMORY - -const char* TfLiteTypeGetName(TfLiteType type) { - switch (type) { - case kTfLiteNoType: - return "NOTYPE"; - case kTfLiteFloat32: - return "FLOAT32"; - case kTfLiteUInt16: - return "UINT16"; - case kTfLiteInt16: - return "INT16"; - case kTfLiteInt32: - return "INT32"; - case kTfLiteUInt32: - return "UINT32"; - case kTfLiteUInt8: - return "UINT8"; - case kTfLiteInt8: - return "INT8"; - case kTfLiteInt64: - return "INT64"; - case kTfLiteUInt64: - return "UINT64"; - case kTfLiteBool: - return "BOOL"; - case kTfLiteComplex64: - return "COMPLEX64"; - case kTfLiteComplex128: - return "COMPLEX128"; - case kTfLiteString: - return "STRING"; - case kTfLiteFloat16: - return "FLOAT16"; - case kTfLiteFloat64: - return "FLOAT64"; - case kTfLiteResource: - return "RESOURCE"; - case kTfLiteVariant: - return "VARIANT"; - } - return "Unknown type"; -} - -TfLiteDelegate TfLiteDelegateCreate() { return TfLiteDelegate{}; } - -struct TfLiteOpaqueDelegateStruct* TfLiteOpaqueDelegateCreate( - const TfLiteOpaqueDelegateBuilder* opaque_delegate_builder) { - if (!opaque_delegate_builder) return nullptr; - - TfLiteDelegate* result = new TfLiteDelegate{}; - result->opaque_delegate_builder = new TfLiteOpaqueDelegateBuilder{}; - *(result->opaque_delegate_builder) = *opaque_delegate_builder; - - return reinterpret_cast(result); -} - -void TfLiteOpaqueDelegateDelete( - const struct TfLiteOpaqueDelegateStruct* opaque_delegate) { - if (!opaque_delegate) return; - - const TfLiteDelegate* tflite_delegate = - reinterpret_cast(opaque_delegate); - delete tflite_delegate->opaque_delegate_builder; - delete tflite_delegate; -} - -} // extern "C" diff --git a/code/components/tflite-lib/tensorflow/lite/c/common.h b/code/components/tflite-lib/tensorflow/lite/c/common.h deleted file mode 100644 index f60b65ed..00000000 --- a/code/components/tflite-lib/tensorflow/lite/c/common.h +++ /dev/null @@ -1,1095 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -// This file defines common C types and APIs for implementing operations, -// delegates and other constructs in TensorFlow Lite. The actual operations and -// delegates can be defined using C++, but the interface between the interpreter -// and the operations are C. -// -// Summary of abstractions -// TF_LITE_ENSURE - Self-sufficient error checking -// TfLiteStatus - Status reporting -// TfLiteIntArray - stores tensor shapes (dims), -// TfLiteContext - allows an op to access the tensors -// TfLiteTensor - tensor (a multidimensional array) -// TfLiteNode - a single node or operation -// TfLiteRegistration - the implementation of a conceptual operation. -// TfLiteDelegate - allows delegation of nodes to alternative backends. -// -// Some abstractions in this file are created and managed by Interpreter. -// -// NOTE: The order of values in these structs are "semi-ABI stable". New values -// should be added only to the end of structs and never reordered. - -#ifndef TENSORFLOW_LITE_C_COMMON_H_ -#define TENSORFLOW_LITE_C_COMMON_H_ - -#include -#include -#include - -#include "tensorflow/lite/c/c_api_types.h" // IWYU pragma: export - -#ifdef __cplusplus -extern "C" { -#endif // __cplusplus - -// The list of external context types known to TF Lite. This list exists solely -// to avoid conflicts and to ensure ops can share the external contexts they -// need. Access to the external contexts is controlled by one of the -// corresponding support files. -typedef enum TfLiteExternalContextType { - kTfLiteEigenContext = 0, // include eigen_support.h to use. - kTfLiteGemmLowpContext = 1, // include gemm_support.h to use. - kTfLiteEdgeTpuContext = 2, // Placeholder for Edge TPU support. - kTfLiteCpuBackendContext = 3, // include cpu_backend_context.h to use. - kTfLiteMaxExternalContexts = 4 -} TfLiteExternalContextType; - -// Forward declare so dependent structs and methods can reference these types -// prior to the struct definitions. -struct TfLiteContext; -struct TfLiteDelegate; -struct TfLiteRegistration; -struct TfLiteOpaqueDelegateStruct; -struct TfLiteOpaqueDelegateBuilder; - -// An external context is a collection of information unrelated to the TF Lite -// framework, but useful to a subset of the ops. TF Lite knows very little -// about the actual contexts, but it keeps a list of them, and is able to -// refresh them if configurations like the number of recommended threads -// change. -typedef struct TfLiteExternalContext { - TfLiteExternalContextType type; - TfLiteStatus (*Refresh)(struct TfLiteContext* context); -} TfLiteExternalContext; - -#define kTfLiteOptionalTensor (-1) - -// Fixed size list of integers. Used for dimensions and inputs/outputs tensor -// indices -typedef struct TfLiteIntArray { - int size; - -#if defined(_MSC_VER) - // Context for why this is needed is in http://b/189926408#comment21 - int data[1]; -#elif (!defined(__clang__) && defined(__GNUC__) && __GNUC__ == 6 && \ - __GNUC_MINOR__ >= 1) || \ - defined(HEXAGON) || \ - (defined(__clang__) && __clang_major__ == 7 && __clang_minor__ == 1) - // gcc 6.1+ have a bug where flexible members aren't properly handled - // https://github.com/google/re2/commit/b94b7cd42e9f02673cd748c1ac1d16db4052514c - int data[0]; -#else - int data[]; -#endif -} TfLiteIntArray; - -// Given the size (number of elements) in a TfLiteIntArray, calculate its size -// in bytes. -size_t TfLiteIntArrayGetSizeInBytes(int size); - -#ifndef TF_LITE_STATIC_MEMORY -// Create a array of a given `size` (uninitialized entries). -// This returns a pointer, that you must free using TfLiteIntArrayFree(). -TfLiteIntArray* TfLiteIntArrayCreate(int size); -#endif - -// Check if two intarrays are equal. Returns 1 if they are equal, 0 otherwise. -int TfLiteIntArrayEqual(const TfLiteIntArray* a, const TfLiteIntArray* b); - -// Check if an intarray equals an array. Returns 1 if equals, 0 otherwise. -int TfLiteIntArrayEqualsArray(const TfLiteIntArray* a, int b_size, - const int b_data[]); - -#ifndef TF_LITE_STATIC_MEMORY -// Create a copy of an array passed as `src`. -// You are expected to free memory with TfLiteIntArrayFree -TfLiteIntArray* TfLiteIntArrayCopy(const TfLiteIntArray* src); - -// Free memory of array `a`. -void TfLiteIntArrayFree(TfLiteIntArray* a); -#endif // TF_LITE_STATIC_MEMORY - -// Fixed size list of floats. Used for per-channel quantization. -typedef struct TfLiteFloatArray { - int size; -#if defined(_MSC_VER) - // Context for why this is needed is in http://b/189926408#comment21 - float data[1]; -#elif (!defined(__clang__) && defined(__GNUC__) && __GNUC__ == 6 && \ - __GNUC_MINOR__ >= 1) || \ - defined(HEXAGON) || \ - (defined(__clang__) && __clang_major__ == 7 && __clang_minor__ == 1) - // gcc 6.1+ have a bug where flexible members aren't properly handled - // https://github.com/google/re2/commit/b94b7cd42e9f02673cd748c1ac1d16db4052514c - float data[0]; -#else - float data[]; -#endif -} TfLiteFloatArray; - -// Given the size (number of elements) in a TfLiteFloatArray, calculate its size -// in bytes. -int TfLiteFloatArrayGetSizeInBytes(int size); - -#ifndef TF_LITE_STATIC_MEMORY -// Create a array of a given `size` (uninitialized entries). -// This returns a pointer, that you must free using TfLiteFloatArrayFree(). -TfLiteFloatArray* TfLiteFloatArrayCreate(int size); - -// Free memory of array `a`. -void TfLiteFloatArrayFree(TfLiteFloatArray* a); -#endif // TF_LITE_STATIC_MEMORY - -// Since we must not depend on any libraries, define a minimal subset of -// error macros while avoiding names that have pre-conceived meanings like -// assert and check. - -// Try to make all reporting calls through TF_LITE_KERNEL_LOG rather than -// calling the context->ReportError function directly, so that message strings -// can be stripped out if the binary size needs to be severely optimized. -#ifndef TF_LITE_STRIP_ERROR_STRINGS -#define TF_LITE_KERNEL_LOG(context, ...) \ - do { \ - (context)->ReportError((context), __VA_ARGS__); \ - } while (false) - -#define TF_LITE_MAYBE_KERNEL_LOG(context, ...) \ - do { \ - if ((context) != nullptr) { \ - (context)->ReportError((context), __VA_ARGS__); \ - } \ - } while (false) -#else // TF_LITE_STRIP_ERROR_STRINGS -#define ARGS_UNUSED(...) (void)sizeof(#__VA_ARGS__) -#define TF_LITE_KERNEL_LOG(context, ...) ARGS_UNUSED(__VA_ARGS__) -#define TF_LITE_MAYBE_KERNEL_LOG(context, ...) ARGS_UNUSED(__VA_ARGS__) -#endif // TF_LITE_STRIP_ERROR_STRINGS - -// Check whether value is true, and if not return kTfLiteError from -// the current function (and report the error string msg). -#define TF_LITE_ENSURE_MSG(context, value, msg) \ - do { \ - if (!(value)) { \ - TF_LITE_KERNEL_LOG((context), __FILE__ " " msg); \ - return kTfLiteError; \ - } \ - } while (0) - -// Check whether the value `a` is true, and if not return kTfLiteError from -// the current function, while also reporting the location of the error. -#define TF_LITE_ENSURE(context, a) \ - do { \ - if (!(a)) { \ - TF_LITE_KERNEL_LOG((context), "%s:%d %s was not true.", __FILE__, \ - __LINE__, #a); \ - return kTfLiteError; \ - } \ - } while (0) - -#define TF_LITE_ENSURE_STATUS(a) \ - do { \ - const TfLiteStatus s = (a); \ - if (s != kTfLiteOk) { \ - return s; \ - } \ - } while (0) - -// Check whether the value `a == b` is true, and if not return kTfLiteError from -// the current function, while also reporting the location of the error. -// `a` and `b` may be evaluated more than once, so no side effects or -// extremely expensive computations should be done. -// NOTE: Use TF_LITE_ENSURE_TYPES_EQ if comparing TfLiteTypes. -#define TF_LITE_ENSURE_EQ(context, a, b) \ - do { \ - if ((a) != (b)) { \ - TF_LITE_KERNEL_LOG((context), "%s:%d %s != %s (%d != %d)", __FILE__, \ - __LINE__, #a, #b, (a), (b)); \ - return kTfLiteError; \ - } \ - } while (0) - -#define TF_LITE_ENSURE_TYPES_EQ(context, a, b) \ - do { \ - if ((a) != (b)) { \ - TF_LITE_KERNEL_LOG((context), "%s:%d %s != %s (%s != %s)", __FILE__, \ - __LINE__, #a, #b, TfLiteTypeGetName(a), \ - TfLiteTypeGetName(b)); \ - return kTfLiteError; \ - } \ - } while (0) - -#define TF_LITE_ENSURE_NEAR(context, a, b, epsilon) \ - do { \ - auto delta = ((a) > (b)) ? ((a) - (b)) : ((b) - (a)); \ - if (delta > epsilon) { \ - TF_LITE_KERNEL_LOG((context), "%s:%d %s not near %s (%f != %f)", \ - __FILE__, __LINE__, #a, #b, static_cast(a), \ - static_cast(b)); \ - return kTfLiteError; \ - } \ - } while (0) - -#define TF_LITE_ENSURE_OK(context, status) \ - do { \ - const TfLiteStatus s = (status); \ - if ((s) != kTfLiteOk) { \ - return s; \ - } \ - } while (0) - -// Single-precision complex data type compatible with the C99 definition. -typedef struct TfLiteComplex64 { - float re, im; // real and imaginary parts, respectively. -} TfLiteComplex64; - -// Double-precision complex data type compatible with the C99 definition. -typedef struct TfLiteComplex128 { - double re, im; // real and imaginary parts, respectively. -} TfLiteComplex128; - -// Half precision data type compatible with the C99 definition. -typedef struct TfLiteFloat16 { - uint16_t data; -} TfLiteFloat16; - -// Return the name of a given type, for error reporting purposes. -const char* TfLiteTypeGetName(TfLiteType type); - -// SupportedQuantizationTypes. -typedef enum TfLiteQuantizationType { - // No quantization. - kTfLiteNoQuantization = 0, - // Affine quantization (with support for per-channel quantization). - // Corresponds to TfLiteAffineQuantization. - kTfLiteAffineQuantization = 1, -} TfLiteQuantizationType; - -// Structure specifying the quantization used by the tensor, if-any. -typedef struct TfLiteQuantization { - // The type of quantization held by params. - TfLiteQuantizationType type; - // Holds an optional reference to a quantization param structure. The actual - // type depends on the value of the `type` field (see the comment there for - // the values and corresponding types). - void* params; -} TfLiteQuantization; - -// Parameters for asymmetric quantization across a dimension (i.e per output -// channel quantization). -// quantized_dimension specifies which dimension the scales and zero_points -// correspond to. -// For a particular value in quantized_dimension, quantized values can be -// converted back to float using: -// real_value = scale * (quantized_value - zero_point) -typedef struct TfLiteAffineQuantization { - TfLiteFloatArray* scale; - TfLiteIntArray* zero_point; - int32_t quantized_dimension; -} TfLiteAffineQuantization; - -/* A union of pointers that points to memory for a given tensor. */ -typedef union TfLitePtrUnion { - /* Do not access these members directly, if possible, use - * GetTensorData(tensor) instead, otherwise only access .data, as other - * members are deprecated. */ - int32_t* i32; - uint32_t* u32; - int64_t* i64; - uint64_t* u64; - float* f; - TfLiteFloat16* f16; - double* f64; - char* raw; - const char* raw_const; - uint8_t* uint8; - bool* b; - int16_t* i16; - uint16_t* ui16; - TfLiteComplex64* c64; - TfLiteComplex128* c128; - int8_t* int8; - /* Only use this member. */ - void* data; -} TfLitePtrUnion; - -// Memory allocation strategies. -// * kTfLiteMmapRo: Read-only memory-mapped data, or data externally allocated. -// * kTfLiteArenaRw: Arena allocated with no guarantees about persistence, -// and available during eval. -// * kTfLiteArenaRwPersistent: Arena allocated but persistent across eval, and -// only available during eval. -// * kTfLiteDynamic: Allocated during eval, or for string tensors. -// * kTfLitePersistentRo: Allocated and populated during prepare. This is -// useful for tensors that can be computed during prepare and treated -// as constant inputs for downstream ops (also in prepare). -// * kTfLiteCustom: Custom memory allocation provided by the user. See -// TfLiteCustomAllocation below. -typedef enum TfLiteAllocationType { - kTfLiteMemNone = 0, - kTfLiteMmapRo, - kTfLiteArenaRw, - kTfLiteArenaRwPersistent, - kTfLiteDynamic, - kTfLitePersistentRo, - kTfLiteCustom, -} TfLiteAllocationType; - -// The delegates should use zero or positive integers to represent handles. -// -1 is reserved from unallocated status. -typedef int TfLiteBufferHandle; -enum { - kTfLiteNullBufferHandle = -1, -}; - -// Storage format of each dimension in a sparse tensor. -typedef enum TfLiteDimensionType { - kTfLiteDimDense = 0, - kTfLiteDimSparseCSR, -} TfLiteDimensionType; - -// Metadata to encode each dimension in a sparse tensor. -typedef struct TfLiteDimensionMetadata { - TfLiteDimensionType format; - int dense_size; - TfLiteIntArray* array_segments; - TfLiteIntArray* array_indices; -} TfLiteDimensionMetadata; - -// Parameters used to encode a sparse tensor. For detailed explanation of each -// field please refer to lite/schema/schema.fbs. -typedef struct TfLiteSparsity { - TfLiteIntArray* traversal_order; - TfLiteIntArray* block_map; - TfLiteDimensionMetadata* dim_metadata; - int dim_metadata_size; -} TfLiteSparsity; - -// Defines a custom memory allocation not owned by the runtime. -// `data` should be aligned to kDefaultTensorAlignment defined in -// lite/util.h. (Currently 64 bytes) -// NOTE: See Interpreter.SetCustomAllocationForTensor for details on usage. -typedef struct TfLiteCustomAllocation { - void* data; - size_t bytes; -} TfLiteCustomAllocation; - -// The flags used in `Interpreter::SetCustomAllocationForTensor`. -// Note that this is a bitmask, so the values should be 1, 2, 4, 8, ...etc. -typedef enum TfLiteCustomAllocationFlags { - kTfLiteCustomAllocationFlagsNone = 0, - // Skips checking whether allocation.data points to an aligned buffer as - // expected by the TFLite runtime. - // NOTE: Setting this flag can cause crashes when calling Invoke(). - // Use with caution. - kTfLiteCustomAllocationFlagsSkipAlignCheck = 1, -} TfLiteCustomAllocationFlags; - -// A tensor in the interpreter system which is a wrapper around a buffer of -// data including a dimensionality (or NULL if not currently defined). -#ifndef TF_LITE_STATIC_MEMORY -typedef struct TfLiteTensor { - // The data type specification for data stored in `data`. This affects - // what member of `data` union should be used. - TfLiteType type; - // A union of data pointers. The appropriate type should be used for a typed - // tensor based on `type`. - TfLitePtrUnion data; - // A pointer to a structure representing the dimensionality interpretation - // that the buffer should have. NOTE: the product of elements of `dims` - // and the element datatype size should be equal to `bytes` below. - TfLiteIntArray* dims; - // Quantization information. - TfLiteQuantizationParams params; - // How memory is mapped - // kTfLiteMmapRo: Memory mapped read only. - // i.e. weights - // kTfLiteArenaRw: Arena allocated read write memory - // (i.e. temporaries, outputs). - TfLiteAllocationType allocation_type; - // The number of bytes required to store the data of this Tensor. I.e. - // (bytes of each element) * dims[0] * ... * dims[n-1]. For example, if - // type is kTfLiteFloat32 and dims = {3, 2} then - // bytes = sizeof(float) * 3 * 2 = 4 * 3 * 2 = 24. - size_t bytes; - - // An opaque pointer to a tflite::MMapAllocation - const void* allocation; - - // Null-terminated name of this tensor. - const char* name; - - // The delegate which knows how to handle `buffer_handle`. - // WARNING: This is an experimental interface that is subject to change. - struct TfLiteDelegate* delegate; - - // An integer buffer handle that can be handled by `delegate`. - // The value is valid only when delegate is not null. - // WARNING: This is an experimental interface that is subject to change. - TfLiteBufferHandle buffer_handle; - - // If the delegate uses its own buffer (e.g. GPU memory), the delegate is - // responsible to set data_is_stale to true. - // `delegate->CopyFromBufferHandle` can be called to copy the data from - // delegate buffer. - // WARNING: This is an // experimental interface that is subject to change. - bool data_is_stale; - - // True if the tensor is a variable. - bool is_variable; - - // Quantization information. Replaces params field above. - TfLiteQuantization quantization; - - // Parameters used to encode a sparse tensor. - // This is optional. The field is NULL if a tensor is dense. - // WARNING: This is an experimental interface that is subject to change. - TfLiteSparsity* sparsity; - - // Optional. Encodes shapes with unknown dimensions with -1. This field is - // only populated when unknown dimensions exist in a read-write tensor (i.e. - // an input or output tensor). (e.g. `dims` contains [1, 1, 1, 3] and - // `dims_signature` contains [1, -1, -1, 3]). Note that this field only - // exists when TF_LITE_STATIC_MEMORY is not defined. - const TfLiteIntArray* dims_signature; -} TfLiteTensor; - -// A structure representing an instance of a node. -// This structure only exhibits the inputs, outputs, user defined data and some -// node properties (like statefulness), not other features like the type. -typedef struct TfLiteNode { - // Inputs to this node expressed as indices into the simulator's tensors. - TfLiteIntArray* inputs; - - // Outputs to this node expressed as indices into the simulator's tensors. - TfLiteIntArray* outputs; - - // intermediate tensors to this node expressed as indices into the simulator's - // tensors. - TfLiteIntArray* intermediates; - - // Temporary tensors uses during the computations. This usually contains no - // tensors, but ops are allowed to change that if they need scratch space of - // any sort. - TfLiteIntArray* temporaries; - - // Opaque data provided by the node implementer through `Registration.init`. - void* user_data; - - // Opaque data provided to the node if the node is a builtin. This is usually - // a structure defined in builtin_op_data.h - void* builtin_data; - - // Custom initial data. This is the opaque data provided in the flatbuffer. - // WARNING: This is an experimental interface that is subject to change. - const void* custom_initial_data; - int custom_initial_data_size; - - // The pointer to the delegate. This is non-null only when the node is - // created by calling `interpreter.ModifyGraphWithDelegate`. - // WARNING: This is an experimental interface that is subject to change. - struct TfLiteDelegate* delegate; - - // Whether this op might have side effect (e.g. stateful op). - bool might_have_side_effect; -} TfLiteNode; -#else // defined(TF_LITE_STATIC_MEMORY)? -// NOTE: This flag is opt-in only at compile time. -// -// Specific reduced TfLiteTensor struct for TF Micro runtime. This struct -// contains only the minimum fields required to initialize and prepare a micro -// inference graph. The fields in this struct have been ordered from -// largest-to-smallest for optimal struct sizeof. -// -// This struct does not use: -// - allocation -// - buffer_handle -// - data_is_stale -// - delegate -// - dims_signature -// - name -// - sparsity -typedef struct TfLiteTensor { - // TODO(b/155784997): Consider consolidating these quantization fields: - // Quantization information. Replaces params field above. - TfLiteQuantization quantization; - - // Quantization information. - TfLiteQuantizationParams params; - - // A union of data pointers. The appropriate type should be used for a typed - // tensor based on `type`. - TfLitePtrUnion data; - - // A pointer to a structure representing the dimensionality interpretation - // that the buffer should have. NOTE: the product of elements of `dims` - // and the element datatype size should be equal to `bytes` below. - TfLiteIntArray* dims; - - // The number of bytes required to store the data of this Tensor. I.e. - // (bytes of each element) * dims[0] * ... * dims[n-1]. For example, if - // type is kTfLiteFloat32 and dims = {3, 2} then - // bytes = sizeof(float) * 3 * 2 = 4 * 3 * 2 = 24. - size_t bytes; - - // The data type specification for data stored in `data`. This affects - // what member of `data` union should be used. - TfLiteType type; - - // How memory is mapped - // kTfLiteMmapRo: Memory mapped read only. - // i.e. weights - // kTfLiteArenaRw: Arena allocated read write memory - // (i.e. temporaries, outputs). - TfLiteAllocationType allocation_type; - - // True if the tensor is a variable. - bool is_variable; -} TfLiteTensor; - -// Specific reduced TfLiteNode struct for TF Micro runtime. This struct contains -// only the minimum fields required to represent a node. -// -// This struct does not use: -// - delegate -// - intermediates -// - temporaries -typedef struct TfLiteNode { - // Inputs to this node expressed as indices into the simulator's tensors. - TfLiteIntArray* inputs; - - // Outputs to this node expressed as indices into the simulator's tensors. - TfLiteIntArray* outputs; - - // intermediate tensors to this node expressed as indices into the simulator's - // tensors. - TfLiteIntArray* intermediates; - - // Opaque data provided by the node implementer through `Registration.init`. - void* user_data; - - // Opaque data provided to the node if the node is a builtin. This is usually - // a structure defined in builtin_op_data.h - void* builtin_data; - - // Custom initial data. This is the opaque data provided in the flatbuffer. - // WARNING: This is an experimental interface that is subject to change. - const void* custom_initial_data; - int custom_initial_data_size; -} TfLiteNode; -#endif // TF_LITE_STATIC_MEMORY - -// Light-weight tensor struct for TF Micro runtime. Provides the minimal amount -// of information required for a kernel to run during TfLiteRegistration::Eval. -// TODO(b/160955687): Move this field into TF_LITE_STATIC_MEMORY when TFLM -// builds with this flag by default internally. -typedef struct TfLiteEvalTensor { - // A union of data pointers. The appropriate type should be used for a typed - // tensor based on `type`. - TfLitePtrUnion data; - - // A pointer to a structure representing the dimensionality interpretation - // that the buffer should have. - TfLiteIntArray* dims; - - // The data type specification for data stored in `data`. This affects - // what member of `data` union should be used. - TfLiteType type; -} TfLiteEvalTensor; - -#ifndef TF_LITE_STATIC_MEMORY -// Free data memory of tensor `t`. -void TfLiteTensorDataFree(TfLiteTensor* t); - -// Free quantization data. -void TfLiteQuantizationFree(TfLiteQuantization* quantization); - -// Free sparsity parameters. -void TfLiteSparsityFree(TfLiteSparsity* sparsity); - -// Free memory of tensor `t`. -void TfLiteTensorFree(TfLiteTensor* t); - -// Set all of a tensor's fields (and free any previously allocated data). -void TfLiteTensorReset(TfLiteType type, const char* name, TfLiteIntArray* dims, - TfLiteQuantizationParams quantization, char* buffer, - size_t size, TfLiteAllocationType allocation_type, - const void* allocation, bool is_variable, - TfLiteTensor* tensor); - -// Copies the contents of 'src' in 'dst'. -// Function does nothing if either 'src' or 'dst' is passed as nullptr and -// return kTfLiteOk. -// Returns kTfLiteError if 'src' and 'dst' doesn't have matching data size. -// Note function copies contents, so it won't create new data pointer -// or change allocation type. -// All Tensor related properties will be copied from 'src' to 'dst' like -// quantization, sparsity, ... -TfLiteStatus TfLiteTensorCopy(const TfLiteTensor* src, TfLiteTensor* dst); - -// Resize the allocated data of a (dynamic) tensor. Tensors with allocation -// types other than kTfLiteDynamic will be ignored. -void TfLiteTensorRealloc(size_t num_bytes, TfLiteTensor* tensor); -#endif // TF_LITE_STATIC_MEMORY - -// WARNING: This is an experimental interface that is subject to change. -// -// Currently, TfLiteDelegateParams has to be allocated in a way that it's -// trivially destructable. It will be stored as `builtin_data` field in -// `TfLiteNode` of the delegate node. -// -// See also the `CreateDelegateParams` function in `interpreter.cc` details. -typedef struct TfLiteDelegateParams { - struct TfLiteDelegate* delegate; - TfLiteIntArray* nodes_to_replace; - TfLiteIntArray* input_tensors; - TfLiteIntArray* output_tensors; -} TfLiteDelegateParams; - -typedef struct TfLiteContext { - // Number of tensors in the context. - size_t tensors_size; - - // The execution plan contains a list of the node indices in execution - // order. execution_plan->size is the current number of nodes. And, - // execution_plan->data[0] is the first node that needs to be run. - // TfLiteDelegates can traverse the current execution plan by iterating - // through each member of this array and using GetNodeAndRegistration() to - // access details about a node. i.e. - // - // TfLiteIntArray* execution_plan; - // TF_LITE_ENSURE_STATUS(context->GetExecutionPlan(context, &execution_plan)); - // for (int exec_index = 0; exec_index < execution_plan->size; exec_index++) { - // int node_index = execution_plan->data[exec_index]; - // TfLiteNode* node; - // TfLiteRegistration* reg; - // context->GetNodeAndRegistration(context, node_index, &node, ®); - // } - // Note: the memory pointed by '`*execution_plan` is OWNED by TfLite runtime. - // Future calls to GetExecutionPlan invalidates earlier outputs. The following - // code snippet shows the issue of such an invocation pattern. After calling - // CheckNode, subsequent access to `plan_1st` is undefined. - // - // void CheckNode(const TfLiteNode* node) { - // ... - // TfLiteIntArray* plan_2nd; - // TF_LITE_ENSURE_STATUS(context->GetExecutionPlan(context, &plan_2nd)); - // ... - // } - // - // TfLiteIntArray* plan_1st; - // TF_LITE_ENSURE_STATUS(context->GetExecutionPlan(context, &plan_1st)); - // for (int exec_index = 0; exec_index < plan_1st->size; exec_index++) { - // int node_index = plan_1st->data[exec_index]; - // TfLiteNode* node; - // TfLiteRegistration* reg; - // context->GetNodeAndRegistration(context, node_index, &node, ®); - // CheckNode(node); - // } - // - // WARNING: This is an experimental interface that is subject to change. - TfLiteStatus (*GetExecutionPlan)(struct TfLiteContext* context, - TfLiteIntArray** execution_plan); - - // An array of tensors in the interpreter context (of length `tensors_size`) - TfLiteTensor* tensors; - - // opaque full context ptr (an opaque c++ data structure) - void* impl_; - - // Request memory pointer be resized. Updates dimensions on the tensor. - // NOTE: ResizeTensor takes ownership of newSize. - TfLiteStatus (*ResizeTensor)(struct TfLiteContext*, TfLiteTensor* tensor, - TfLiteIntArray* new_size); - // Request that an error be reported with format string msg. - void (*ReportError)(struct TfLiteContext*, const char* msg, ...); - - // Add `tensors_to_add` tensors, preserving pre-existing Tensor entries. If - // non-null, the value pointed to by `first_new_tensor_index` will be set to - // the index of the first new tensor. - TfLiteStatus (*AddTensors)(struct TfLiteContext*, int tensors_to_add, - int* first_new_tensor_index); - - // Get a Tensor node by node_index. - // WARNING: This is an experimental interface that is subject to change. - TfLiteStatus (*GetNodeAndRegistration)( - struct TfLiteContext*, int node_index, TfLiteNode** node, - struct TfLiteRegistration** registration); - - // Replace ops with one or more stub delegate operations. This function - // does not take ownership of `nodes_to_replace`. - TfLiteStatus (*ReplaceNodeSubsetsWithDelegateKernels)( - struct TfLiteContext*, struct TfLiteRegistration registration, - const TfLiteIntArray* nodes_to_replace, struct TfLiteDelegate* delegate); - - // Number of threads that are recommended to subsystems like gemmlowp and - // eigen. - int recommended_num_threads; - - // Access external contexts by type. - // WARNING: This is an experimental interface that is subject to change. - TfLiteExternalContext* (*GetExternalContext)(struct TfLiteContext*, - TfLiteExternalContextType); - // Set the value of a external context. Does not take ownership of the - // pointer. - // WARNING: This is an experimental interface that is subject to change. - void (*SetExternalContext)(struct TfLiteContext*, TfLiteExternalContextType, - TfLiteExternalContext*); - - // Flag for allowing float16 precision for FP32 calculation. - // default: false. - // WARNING: This is an experimental API and subject to change. - bool allow_fp32_relax_to_fp16; - - // Pointer to the op-level profiler, if set; nullptr otherwise. - void* profiler; - - // Allocate persistent buffer which has the same life time as the interpreter. - // Returns nullptr on failure. - // The memory is allocated from heap for TFL, and from tail in TFLM. - // This method is only available in Init or Prepare stage. - // WARNING: This is an experimental interface that is subject to change. - void* (*AllocatePersistentBuffer)(struct TfLiteContext* ctx, size_t bytes); - - // Allocate a buffer which will be deallocated right after invoke phase. - // The memory is allocated from heap in TFL, and from volatile arena in TFLM. - // This method is only available in invoke stage. - // NOTE: If possible use RequestScratchBufferInArena method to avoid memory - // allocation during inference time. - // WARNING: This is an experimental interface that is subject to change. - TfLiteStatus (*AllocateBufferForEval)(struct TfLiteContext* ctx, size_t bytes, - void** ptr); - - // Request a scratch buffer in the arena through static memory planning. - // This method is only available in Prepare stage and the buffer is allocated - // by the interpreter between Prepare and Eval stage. In Eval stage, - // GetScratchBuffer API can be used to fetch the address. - // WARNING: This is an experimental interface that is subject to change. - TfLiteStatus (*RequestScratchBufferInArena)(struct TfLiteContext* ctx, - size_t bytes, int* buffer_idx); - - // Get the scratch buffer pointer. - // This method is only available in Eval stage. - // WARNING: This is an experimental interface that is subject to change. - void* (*GetScratchBuffer)(struct TfLiteContext* ctx, int buffer_idx); - - // Resize the memory pointer of the `tensor`. This method behaves the same as - // `ResizeTensor`, except that it makes a copy of the shape array internally - // so the shape array could be deallocated right afterwards. - // WARNING: This is an experimental interface that is subject to change. - TfLiteStatus (*ResizeTensorExplicit)(struct TfLiteContext* ctx, - TfLiteTensor* tensor, int dims, - const int* shape); - - // This method provides a preview of post-delegation partitioning. Each - // TfLiteDelegateParams in the referenced array corresponds to one instance of - // the delegate kernel. - // Example usage: - // - // TfLiteIntArray* nodes_to_replace = ...; - // TfLiteDelegateParams* params_array; - // int num_partitions = 0; - // TF_LITE_ENSURE_STATUS(context->PreviewDelegatePartitioning( - // context, delegate, nodes_to_replace, ¶ms_array, &num_partitions)); - // for (int idx = 0; idx < num_partitions; idx++) { - // const auto& partition_params = params_array[idx]; - // ... - // } - // - // NOTE: The context owns the memory referenced by partition_params_array. It - // will be cleared with another call to PreviewDelegateParitioning, or after - // TfLiteDelegateParams::Prepare returns. - // - // WARNING: This is an experimental interface that is subject to change. - TfLiteStatus (*PreviewDelegatePartitioning)( - struct TfLiteContext* context, const TfLiteIntArray* nodes_to_replace, - TfLiteDelegateParams** partition_params_array, int* num_partitions); - - // Returns a TfLiteTensor struct for a given index. - // WARNING: This is an experimental interface that is subject to change. - // WARNING: This method may not be available on all platforms. - TfLiteTensor* (*GetTensor)(const struct TfLiteContext* context, - int tensor_idx); - - // Returns a TfLiteEvalTensor struct for a given index. - // WARNING: This is an experimental interface that is subject to change. - // WARNING: This method may not be available on all platforms. - TfLiteEvalTensor* (*GetEvalTensor)(const struct TfLiteContext* context, - int tensor_idx); - - // Retrieves named metadata buffer from the TFLite model. - // Returns kTfLiteOk if metadata is successfully obtained from the flatbuffer - // Model: that is, there exists a `metadata` entry with given `name` string. - // (see TFLite's schema.fbs). - // The corresponding `buffer` information is populated in `ptr` & `bytes`. - // The data from `ptr` is valid for the lifetime of the Interpreter. - // - // WARNING: This is an experimental interface that is subject to change. - TfLiteStatus (*GetModelMetadata)(const struct TfLiteContext* context, - const char* name, const char** ptr, - size_t* bytes); -} TfLiteContext; - -// `TfLiteRegistrationExternal` is an external version of `TfLiteRegistration` -// for C API which doesn't use internal types (such as `TfLiteContext`) but only -// uses stable API types (such as `TfLiteOpaqueContext`). The purpose of each -// field is the exactly the same as with `TfLiteRegistration`. -typedef struct TfLiteRegistrationExternal TfLiteRegistrationExternal; - -typedef struct TfLiteRegistration { - // Initializes the op from serialized data. - // Called only *once* for the lifetime of the op, so any one-time allocations - // should be made here (unless they depend on tensor sizes). - // - // If a built-in op: - // `buffer` is the op's params data (TfLiteLSTMParams*). - // `length` is zero. - // If custom op: - // `buffer` is the op's `custom_options`. - // `length` is the size of the buffer. - // - // Returns a type-punned (i.e. void*) opaque data (e.g. a primitive pointer - // or an instance of a struct). - // - // The returned pointer will be stored with the node in the `user_data` field, - // accessible within prepare and invoke functions below. - // NOTE: if the data is already in the desired format, simply implement this - // function to return `nullptr` and implement the free function to be a no-op. - void* (*init)(TfLiteContext* context, const char* buffer, size_t length); - - // The pointer `buffer` is the data previously returned by an init invocation. - void (*free)(TfLiteContext* context, void* buffer); - - // prepare is called when the inputs this node depends on have been resized. - // context->ResizeTensor() can be called to request output tensors to be - // resized. - // Can be called multiple times for the lifetime of the op. - // - // Returns kTfLiteOk on success. - TfLiteStatus (*prepare)(TfLiteContext* context, TfLiteNode* node); - - // Execute the node (should read node->inputs and output to node->outputs). - // Returns kTfLiteOk on success. - TfLiteStatus (*invoke)(TfLiteContext* context, TfLiteNode* node); - - // profiling_string is called during summarization of profiling information - // in order to group executions together. Providing a value here will cause a - // given op to appear multiple times is the profiling report. This is - // particularly useful for custom ops that can perform significantly - // different calculations depending on their `user-data`. - const char* (*profiling_string)(const TfLiteContext* context, - const TfLiteNode* node); - - // Builtin codes. If this kernel refers to a builtin this is the code - // of the builtin. This is so we can do marshaling to other frameworks like - // NN API. - // Note: It is the responsibility of the registration binder to set this - // properly. - int32_t builtin_code; - - // Custom op name. If the op is a builtin, this will be null. - // Note: It is the responsibility of the registration binder to set this - // properly. - // WARNING: This is an experimental interface that is subject to change. - const char* custom_name; - - // The version of the op. - // Note: It is the responsibility of the registration binder to set this - // properly. - int version; - - // The external version of `TfLiteRegistration`. Since we can't use internal - // types (such as `TfLiteContext`) for C API to maintain ABI stability. - // C API user will provide `TfLiteRegistrationExternal` to implement custom - // ops. We keep it inside of `TfLiteRegistration` and use it to route - // callbacks properly. - TfLiteRegistrationExternal* registration_external; -} TfLiteRegistration; - -// Old version of `TfLiteRegistration` to maintain binary backward -// compatibility. -// WARNING: This structure is deprecated / not an official part of the API. -// It should be only used for binary backward compatibility. -typedef struct TfLiteRegistration_V1 { - void* (*init)(TfLiteContext* context, const char* buffer, size_t length); - void (*free)(TfLiteContext* context, void* buffer); - TfLiteStatus (*prepare)(TfLiteContext* context, TfLiteNode* node); - TfLiteStatus (*invoke)(TfLiteContext* context, TfLiteNode* node); - const char* (*profiling_string)(const TfLiteContext* context, - const TfLiteNode* node); - int32_t builtin_code; - const char* custom_name; - int version; -} TfLiteRegistration_V1; - -// The flags used in `TfLiteDelegate`. Note that this is a bitmask, so the -// values should be 1, 2, 4, 8, ...etc. -typedef enum TfLiteDelegateFlags { - kTfLiteDelegateFlagsNone = 0, - // The flag is set if the delegate can handle dynamic sized tensors. - // For example, the output shape of a `Resize` op with non-constant shape - // can only be inferred when the op is invoked. - // In this case, the Delegate is responsible for calling - // `SetTensorToDynamic` to mark the tensor as a dynamic tensor, and calling - // `ResizeTensor` when invoking the op. - // - // If the delegate isn't capable to handle dynamic tensors, this flag need - // to be set to false. - kTfLiteDelegateFlagsAllowDynamicTensors = 1, - - // This flag can be used by delegates (that allow dynamic tensors) to ensure - // applicable tensor shapes are automatically propagated in the case of tensor - // resizing. - // This means that non-dynamic (allocation_type != kTfLiteDynamic) I/O tensors - // of a delegate kernel will have correct shapes before its Prepare() method - // is called. The runtime leverages TFLite builtin ops in the original - // execution plan to propagate shapes. - // - // A few points to note: - // 1. This requires kTfLiteDelegateFlagsAllowDynamicTensors. If that flag is - // false, this one is redundant since the delegate kernels are re-initialized - // every time tensors are resized. - // 2. Enabling this flag adds some overhead to AllocateTensors(), since extra - // work is required to prepare the original execution plan. - // 3. This flag requires that the original execution plan only have ops with - // valid registrations (and not 'dummy' custom ops like with Flex). - // WARNING: This feature is experimental and subject to change. - kTfLiteDelegateFlagsRequirePropagatedShapes = 2 -} TfLiteDelegateFlags; - -// WARNING: This is an experimental interface that is subject to change. -typedef struct TfLiteDelegate { - // Data that delegate needs to identify itself. This data is owned by the - // delegate. The delegate is owned in the user code, so the delegate is - // responsible for deallocating this when it is destroyed. - void* data_; - - // Invoked by ModifyGraphWithDelegate. This prepare is called, giving the - // delegate a view of the current graph through TfLiteContext*. It typically - // will look at the nodes and call ReplaceNodeSubsetsWithDelegateKernels() - // to ask the TensorFlow lite runtime to create macro-nodes to represent - // delegated subgraphs of the original graph. - TfLiteStatus (*Prepare)(TfLiteContext* context, - struct TfLiteDelegate* delegate); - - // Copy the data from delegate buffer handle into raw memory of the given - // 'tensor'. Note that the delegate is allowed to allocate the raw bytes as - // long as it follows the rules for kTfLiteDynamic tensors, in which case this - // cannot be null. - TfLiteStatus (*CopyFromBufferHandle)(TfLiteContext* context, - struct TfLiteDelegate* delegate, - TfLiteBufferHandle buffer_handle, - TfLiteTensor* tensor); - - // Copy the data from raw memory of the given 'tensor' to delegate buffer - // handle. This can be null if the delegate doesn't use its own buffer. - TfLiteStatus (*CopyToBufferHandle)(TfLiteContext* context, - struct TfLiteDelegate* delegate, - TfLiteBufferHandle buffer_handle, - TfLiteTensor* tensor); - - // Free the Delegate Buffer Handle. Note: This only frees the handle, but - // this doesn't release the underlying resource (e.g. textures). The - // resources are either owned by application layer or the delegate. - // This can be null if the delegate doesn't use its own buffer. - void (*FreeBufferHandle)(TfLiteContext* context, - struct TfLiteDelegate* delegate, - TfLiteBufferHandle* handle); - - // Bitmask flags. See the comments in `TfLiteDelegateFlags`. - int64_t flags; - - // The opaque delegate builder associated with this object. If set then the - // TF Lite runtime will give precedence to this field. E.g. instead of - // invoking 'Prepare' via the function pointer inside the 'TfLiteDelegate' - // object, the runtime will first check if the corresponding function - // pointer inside 'opaque_delegate_builder' is set and if so invoke that. - // - // If this field is non-null, then the 'Prepare' field (of the - // 'TfLiteDelegate') should be null. - struct TfLiteOpaqueDelegateBuilder* opaque_delegate_builder; -} TfLiteDelegate; - -// Build a 'null' delegate, with all the fields properly set to their default -// values. -TfLiteDelegate TfLiteDelegateCreate(void); - -// `TfLiteOpaqueDelegateBuilder` is used for constructing -// `TfLiteOpaqueDelegateStruct`, see `TfLiteOpaqueDelegateCreate` below. Note: -// This struct is not ABI stable. -// -// For forward source compatibility `TfLiteOpaqueDelegateBuilder` objects should -// be brace-initialized, so that all fields (including any that might be added -// in the future) get zero-initialized. The purpose of each field is exactly -// the same as with `TfLiteDelegate`. -// -// WARNING: This is an experimental interface that is subject to change. -typedef struct TfLiteOpaqueDelegateBuilder { - // Data that delegate needs to identify itself. This data is owned by the - // delegate. The delegate is owned in the user code, so the delegate is - // responsible for deallocating this when it is destroyed. - void* data; - // Invoked by ModifyGraphWithDelegate. This prepare is called, giving the - // delegate a view of the current graph through TfLiteContext*. It typically - // will look at the nodes and call ReplaceNodeSubsetsWithDelegateKernels() - // to ask the TensorFlow lite runtime to create macro-nodes to represent - // delegated subgraphs of the original graph. - TfLiteStatus (*Prepare)(TfLiteOpaqueContext* context, // NOLINT - struct TfLiteOpaqueDelegateStruct* delegate, - void* data); - // Copies the data from delegate buffer handle into raw memory of the given - // 'tensor'. Note that the delegate is allowed to allocate the raw bytes as - // long as it follows the rules for kTfLiteDynamic tensors, in which case this - // cannot be null. - TfLiteStatus (*CopyFromBufferHandle)( // NOLINT - TfLiteOpaqueContext* context, struct TfLiteOpaqueDelegateStruct* delegate, - void* data, TfLiteBufferHandle buffer_handle, TfLiteOpaqueTensor* tensor); - // Copies the data from raw memory of the given 'tensor' to delegate buffer - // handle. This can be null if the delegate doesn't use its own buffer. - TfLiteStatus (*CopyToBufferHandle)( // NOLINT - TfLiteOpaqueContext* context, struct TfLiteOpaqueDelegateStruct* delegate, - void* data, TfLiteBufferHandle buffer_handle, TfLiteOpaqueTensor* tensor); - // Frees the Delegate Buffer Handle. Note: This only frees the handle, but - // this doesn't release the underlying resource (e.g. textures). The - // resources are either owned by application layer or the delegate. - // This can be null if the delegate doesn't use its own buffer. - void (*FreeBufferHandle)(TfLiteOpaqueContext* context, // NOLINT - struct TfLiteOpaqueDelegateStruct* delegate, - void* data, TfLiteBufferHandle* handle); - // Bitmask flags. See the comments in `TfLiteDelegateFlags`. - int64_t flags; -} TfLiteOpaqueDelegateBuilder; - -// Creates an opaque delegate and returns its address. The opaque delegate will -// behave according to the provided 'opaque_delegate_builder'. The lifetime of -// the fields within the 'opaque_delegate_builder' must outlive any interaction -// between the runtime and the returned 'TfLiteOpaqueDelegateStruct'. The -// returned address should be passed to 'TfLiteOpaqueDelegateDelete' for -// deletion. If 'opaque_delegate_builder' is a null pointer, then a null -// pointer will be returned. -struct TfLiteOpaqueDelegateStruct* TfLiteOpaqueDelegateCreate( - const TfLiteOpaqueDelegateBuilder* opaque_delegate_builder); - -// Deletes the provided opaque 'delegate'. This function has no effect if the -// 'delegate' is a null pointer. -void TfLiteOpaqueDelegateDelete( - const struct TfLiteOpaqueDelegateStruct* delegate); - -#ifdef __cplusplus -} // extern "C" -#endif // __cplusplus -#endif // TENSORFLOW_LITE_C_COMMON_H_ diff --git a/code/components/tflite-lib/tensorflow/lite/context_util.h b/code/components/tflite-lib/tensorflow/lite/context_util.h deleted file mode 100644 index ed42cc73..00000000 --- a/code/components/tflite-lib/tensorflow/lite/context_util.h +++ /dev/null @@ -1,53 +0,0 @@ -/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -/// \file -/// This provides a few C++ helpers that are useful for manipulating C -/// structures in C++. -#ifndef TENSORFLOW_LITE_CONTEXT_UTIL_H_ -#define TENSORFLOW_LITE_CONTEXT_UTIL_H_ - -#include - -#include "tensorflow/lite/c/common.h" - -namespace tflite { - -/// Provides a range iterable wrapper for TfLiteIntArray* (C lists) that TfLite -/// C api uses. -// Can't use the google array_view, since we can't depend on even -// absl for embedded device reasons. -class TfLiteIntArrayView { - public: - /// Construct a view of a TfLiteIntArray*. Note, `int_array` should be - /// non-null and this view does not take ownership of it. - explicit TfLiteIntArrayView(const TfLiteIntArray* int_array) - : int_array_(int_array) {} - - TfLiteIntArrayView(const TfLiteIntArrayView&) = default; - TfLiteIntArrayView& operator=(const TfLiteIntArrayView& rhs) = default; - - typedef const int* const_iterator; - const_iterator begin() const { return int_array_->data; } - const_iterator end() const { return &int_array_->data[int_array_->size]; } - size_t size() const { return end() - begin(); } - int operator[](size_t pos) const { return int_array_->data[pos]; } - - private: - const TfLiteIntArray* int_array_; -}; - -} // namespace tflite - -#endif // TENSORFLOW_LITE_CONTEXT_UTIL_H_ diff --git a/code/components/tflite-lib/tensorflow/lite/core/api/flatbuffer_conversions.cc b/code/components/tflite-lib/tensorflow/lite/core/api/flatbuffer_conversions.cc deleted file mode 100644 index 37d7661c..00000000 --- a/code/components/tflite-lib/tensorflow/lite/core/api/flatbuffer_conversions.cc +++ /dev/null @@ -1,2469 +0,0 @@ -/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/core/api/flatbuffer_conversions.h" - -#include -#include -#include - -#include "flatbuffers/flatbuffers.h" // from @flatbuffers -#include "tensorflow/lite/c/builtin_op_data.h" -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/core/api/error_reporter.h" -#include "tensorflow/lite/kernels/internal/compatibility.h" -#include "tensorflow/lite/schema/schema_generated.h" - -namespace tflite { - -namespace { - -// Utility class for safely allocating POD data. This is useful for avoiding -// leaks in cases where op params are allocated but fail to propagate to the -// parsed op data (e.g., when model parameters are invalid). -class SafeBuiltinDataAllocator { - public: - class BuiltinDataDeleter { - public: - explicit BuiltinDataDeleter(BuiltinDataAllocator* allocator) - : allocator_(allocator) {} - - void operator()(void* data) { allocator_->Deallocate(data); } - - private: - BuiltinDataAllocator* allocator_; - }; - - template - using BuiltinDataPtr = std::unique_ptr; - - explicit SafeBuiltinDataAllocator(BuiltinDataAllocator* allocator) - : allocator_(allocator) {} - - template - BuiltinDataPtr Allocate() { - return BuiltinDataPtr(allocator_->AllocatePOD(), - BuiltinDataDeleter(allocator_)); - } - - private: - BuiltinDataAllocator* allocator_; -}; - -// All the Parse functions take some pointers as params and this function has -// the common DCHECKs to catch if any of those are nullptr. -void CheckParsePointerParams(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, - void** builtin_data) { - TFLITE_DCHECK(op != nullptr); - TFLITE_DCHECK(error_reporter != nullptr); - TFLITE_DCHECK(allocator != nullptr); - TFLITE_DCHECK(builtin_data != nullptr); -} - -// Copies the contents from the flatbuffer int vector `flatbuffer` into the -// int array `buffer`. `flat_vector` and `buffer` represent the same -// configuration operation for a given operation. -TfLiteStatus FlatBufferIntVectorToArray( - int max_size_of_buffer, const flatbuffers::Vector* flat_vector, - int* buffer, ErrorReporter* error_reporter, const char* op_name) { - if (!flat_vector) { - TF_LITE_REPORT_ERROR(error_reporter, - "Input array not provided for operation '%s'.\n", - op_name); - return kTfLiteError; - } else { - size_t num_dimensions = flat_vector->size(); - if (num_dimensions > max_size_of_buffer / sizeof(int)) { - TF_LITE_REPORT_ERROR( - error_reporter, - "Found too many dimensions in the input array of operation '%s'.\n", - op_name); - return kTfLiteError; - } else { - for (size_t i = 0; i < num_dimensions; ++i) { - buffer[i] = flat_vector->Get(i); - } - } - } - return kTfLiteOk; -} - -// Converts the flatbuffer activation to what is used at runtime. -TfLiteFusedActivation ConvertActivation(ActivationFunctionType activation) { - switch (activation) { - case ActivationFunctionType_NONE: - return kTfLiteActNone; - case ActivationFunctionType_RELU: - return kTfLiteActRelu; - case ActivationFunctionType_RELU_N1_TO_1: - return kTfLiteActReluN1To1; - case ActivationFunctionType_RELU6: - return kTfLiteActRelu6; - case ActivationFunctionType_TANH: - return kTfLiteActTanh; - case ActivationFunctionType_SIGN_BIT: - return kTfLiteActSignBit; - } - return kTfLiteActNone; -} - -// Converts the flatbuffer padding enum to what is used at runtime. -TfLitePadding ConvertPadding(Padding padding) { - switch (padding) { - case Padding_SAME: - return kTfLitePaddingSame; - case Padding_VALID: - return kTfLitePaddingValid; - } - return kTfLitePaddingUnknown; -} - -// Converts the flatbuffer mirror padding enum to what is used at runtime. -TfLiteMirrorPaddingMode ConvertMirrorPadding(MirrorPadMode padding) { - switch (padding) { - case MirrorPadMode_REFLECT: - return kTfLiteMirrorPaddingReflect; - case MirrorPadMode_SYMMETRIC: - return kTfLiteMirrorPaddingSymmetric; - } - return kTfLiteMirrorPaddingUnknown; -} - -#ifndef TF_LITE_STATIC_MEMORY -TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type, - ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, - void** builtin_data) { - auto parseLSHProjectionType = [](LSHProjectionType type) { - switch (type) { - case LSHProjectionType_SPARSE: - return kTfLiteLshProjectionSparse; - case LSHProjectionType_DENSE: - return kTfLiteLshProjectionDense; - default: - return kTfLiteLshProjectionUnknown; - } - }; - auto parseCombinerType = [](CombinerType type) { - switch (type) { - case CombinerType_MEAN: - return kTfLiteCombinerTypeMean; - case CombinerType_SQRTN: - return kTfLiteCombinerTypeSqrtn; - case CombinerType_SUM: - default: - return kTfLiteCombinerTypeSum; - } - }; - - SafeBuiltinDataAllocator safe_allocator(allocator); - *builtin_data = nullptr; - switch (op_type) { - case BuiltinOperator_ABS: { - return ParseAbs(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_ADD: { - return ParseAdd(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_ADD_N: { - return ParseAddN(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_ARG_MAX: { - return ParseArgMax(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_ARG_MIN: { - return ParseArgMin(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_ASSIGN_VARIABLE: { - return ParseAssignVariable(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_AVERAGE_POOL_2D: { - return ParsePool(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_BATCH_MATMUL: { - return ParseBatchMatMul(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_BATCH_TO_SPACE_ND: { - return ParseBatchToSpaceNd(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_BROADCAST_ARGS: { - return ParseBroadcastArgs(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_BROADCAST_TO: { - return ParseBroadcastTo(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_CALL_ONCE: { - return ParseCallOnce(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_CEIL: { - return ParseCeil(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_CONCATENATION: { - return ParseConcatenation(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_CONV_2D: { - return ParseConv2D(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_CUMSUM: { - return ParseCumsum(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_DEPTH_TO_SPACE: { - return ParseDepthToSpace(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_DEPTHWISE_CONV_2D: { - return ParseDepthwiseConv2D(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_DEQUANTIZE: { - return ParseDequantize(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_DIV: { - return ParseDiv(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_ELU: { - return ParseElu(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_EXP: { - return ParseExp(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_EXPAND_DIMS: { - return ParseExpandDims(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_FILL: { - return ParseFill(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_FLOOR: { - return ParseFloor(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_FLOOR_DIV: { - return ParseFloorDiv(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_FLOOR_MOD: { - return ParseFloorMod(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_FULLY_CONNECTED: { - return ParseFullyConnected(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_GATHER_ND: { - return ParseGatherNd(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_GREATER: { - return ParseGreater(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_GREATER_EQUAL: { - return ParseGreaterEqual(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_HARD_SWISH: { - return ParseHardSwish(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_L2_NORMALIZATION: { - return ParseL2Normalization(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_L2_POOL_2D: { - return ParsePool(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_LEAKY_RELU: { - return ParseLeakyRelu(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_LESS: { - return ParseLess(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_LESS_EQUAL: { - return ParseLessEqual(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_LOG: { - return ParseLog(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_LOGICAL_AND: { - return ParseLogicalAnd(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_LOGICAL_NOT: { - return ParseLogicalNot(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_LOGICAL_OR: { - return ParseLogicalOr(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_LOGISTIC: { - return ParseLogistic(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_LOG_SOFTMAX: { - return ParseLogSoftmax(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_LSTM: { - return ParseLSTM(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_MAXIMUM: { - return ParseMaximum(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_MAX_POOL_2D: { - return ParsePool(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_MIRROR_PAD: { - return ParseMirrorPad(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_MEAN: { - return ParseReducer(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_MINIMUM: { - return ParseMinimum(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_MUL: { - return ParseMul(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_NEG: { - return ParseNeg(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_NOT_EQUAL: { - return ParseNotEqual(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_PACK: { - return ParsePack(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_PAD: { - return ParsePad(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_PADV2: { - return ParsePadV2(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_POW: { - return ParsePow(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_PRELU: { - return ParsePrelu(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_QUANTIZE: { - return ParseQuantize(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_READ_VARIABLE: { - return ParseReadVariable(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_REDUCE_ANY: { - return ParseReducer(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_REDUCE_ALL: { - return ParseReducer(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_REDUCE_MAX: { - return ParseReducer(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_REDUCE_MIN: { - return ParseReducer(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_REDUCE_PROD: { - return ParseReducer(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_RELU: { - return ParseRelu(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_RELU6: { - return ParseRelu6(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_RESHAPE: { - return ParseReshape(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_RESIZE_BILINEAR: { - return ParseResizeBilinear(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_RESIZE_NEAREST_NEIGHBOR: { - return ParseResizeNearestNeighbor(op, error_reporter, allocator, - builtin_data); - } - - case BuiltinOperator_ROUND: { - return ParseRound(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_RSQRT: { - return ParseRsqrt(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_SELECT_V2: { - return ParseSelectV2(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_SHAPE: { - return ParseShape(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_SIN: { - return ParseSin(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_SOFTMAX: { - return ParseSoftmax(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_SPACE_TO_BATCH_ND: { - return ParseSpaceToBatchNd(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_SPACE_TO_DEPTH: { - return ParseSpaceToDepth(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_SPLIT: { - return ParseSplit(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_SPLIT_V: { - return ParseSplitV(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_SQRT: { - return ParseSqrt(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_SQUARE: { - return ParseSquare(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_SQUARED_DIFFERENCE: { - return ParseSquaredDifference(op, error_reporter, allocator, - builtin_data); - } - - case BuiltinOperator_SQUEEZE: { - return ParseSqueeze(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_STRIDED_SLICE: { - return ParseStridedSlice(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_SUB: { - return ParseSub(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_SUM: { - return ParseReducer(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_SVDF: { - return ParseSvdf(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_TANH: { - return ParseTanh(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_TRANSPOSE_CONV: { - return ParseTransposeConv(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_UNPACK: { - return ParseUnpack(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_VAR_HANDLE: { - return ParseVarHandle(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_ZEROS_LIKE: { - return ParseZerosLike(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_CAST: { - return ParseCast(op, error_reporter, allocator, builtin_data); - } - case BuiltinOperator_LSH_PROJECTION: { - auto params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - if (const auto* lshParams = - op->builtin_options_as_LSHProjectionOptions()) { - params->type = parseLSHProjectionType(lshParams->type()); - } - *builtin_data = params.release(); - return kTfLiteOk; - } - case BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_RNN: { - auto params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - if (const auto* sequence_rnn_params = - op->builtin_options_as_SequenceRNNOptions()) { - params->activation = - ConvertActivation(sequence_rnn_params->fused_activation_function()); - params->time_major = sequence_rnn_params->time_major(); - params->asymmetric_quantize_inputs = - sequence_rnn_params->asymmetric_quantize_inputs(); - } - *builtin_data = params.release(); - return kTfLiteOk; - } - case BuiltinOperator_BIDIRECTIONAL_SEQUENCE_RNN: { - auto params = - safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - if (const auto* bidi_sequence_rnn_params = - op->builtin_options_as_BidirectionalSequenceRNNOptions()) { - params->activation = ConvertActivation( - bidi_sequence_rnn_params->fused_activation_function()); - params->time_major = bidi_sequence_rnn_params->time_major(); - params->merge_outputs = bidi_sequence_rnn_params->merge_outputs(); - params->asymmetric_quantize_inputs = - bidi_sequence_rnn_params->asymmetric_quantize_inputs(); - } - *builtin_data = params.release(); - return kTfLiteOk; - } - case BuiltinOperator_RNN: { - auto params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - if (const auto* rnn_params = op->builtin_options_as_RNNOptions()) { - params->activation = - ConvertActivation(rnn_params->fused_activation_function()); - params->asymmetric_quantize_inputs = - rnn_params->asymmetric_quantize_inputs(); - } - *builtin_data = params.release(); - return kTfLiteOk; - } - case BuiltinOperator_EMBEDDING_LOOKUP_SPARSE: { - auto params = - safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - if (const auto* embedding_params = - op->builtin_options_as_EmbeddingLookupSparseOptions()) { - params->combiner = parseCombinerType(embedding_params->combiner()); - } - *builtin_data = params.release(); - return kTfLiteOk; - } - - case BuiltinOperator_HASHTABLE_LOOKUP: - // no-op. - return kTfLiteOk; - - case BuiltinOperator_LOCAL_RESPONSE_NORMALIZATION: { - auto params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - if (const auto* schema_params = - op->builtin_options_as_LocalResponseNormalizationOptions()) { - params->radius = schema_params->radius(); - params->bias = schema_params->bias(); - params->alpha = schema_params->alpha(); - params->beta = schema_params->beta(); - } - *builtin_data = params.release(); - return kTfLiteOk; - } - case BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_LSTM: { - return ParseUnidirectionalSequenceLSTM(op, error_reporter, allocator, - builtin_data); - } - case BuiltinOperator_BIDIRECTIONAL_SEQUENCE_LSTM: { - auto params = - safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - if (const auto* bidi_lstm_params = - op->builtin_options_as_BidirectionalSequenceLSTMOptions()) { - params->activation = - ConvertActivation(bidi_lstm_params->fused_activation_function()); - params->cell_clip = bidi_lstm_params->cell_clip(); - params->proj_clip = bidi_lstm_params->proj_clip(); - params->merge_outputs = bidi_lstm_params->merge_outputs(); - params->time_major = bidi_lstm_params->time_major(); - params->asymmetric_quantize_inputs = - bidi_lstm_params->asymmetric_quantize_inputs(); - } - *builtin_data = params.release(); - return kTfLiteOk; - } - case BuiltinOperator_SKIP_GRAM: { - auto params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - if (const auto* skip_gram_params = - op->builtin_options_as_SkipGramOptions()) { - params->ngram_size = skip_gram_params->ngram_size(); - params->max_skip_size = skip_gram_params->max_skip_size(); - params->include_all_ngrams = skip_gram_params->include_all_ngrams(); - } - *builtin_data = params.release(); - return kTfLiteOk; - } - - case BuiltinOperator_GATHER: { - return ParseGather(op, error_reporter, allocator, builtin_data); - } - case BuiltinOperator_SPARSE_TO_DENSE: { - auto params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - if (const auto* sparse_to_dense_params = - op->builtin_options_as_SparseToDenseOptions()) { - params->validate_indices = sparse_to_dense_params->validate_indices(); - } - *builtin_data = params.release(); - return kTfLiteOk; - } - case BuiltinOperator_DELEGATE: { - TF_LITE_REPORT_ERROR(error_reporter, - "DELEGATE op shouldn't exist in model."); - return kTfLiteError; - } - case BuiltinOperator_FAKE_QUANT: { - auto params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - if (const auto* schema_params = - op->builtin_options_as_FakeQuantOptions()) { - params->min = schema_params->min(); - params->max = schema_params->max(); - params->num_bits = schema_params->num_bits(); - params->narrow_range = schema_params->narrow_range(); - } - *builtin_data = params.release(); - return kTfLiteOk; - } - case BuiltinOperator_ONE_HOT: { - auto params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - if (const auto* schema_params = op->builtin_options_as_OneHotOptions()) { - params->axis = schema_params->axis(); - } - *builtin_data = params.release(); - return kTfLiteOk; - } - case BuiltinOperator_UNIQUE: { - auto params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - const auto* unique_params = op->builtin_options_as_UniqueOptions(); - if (unique_params != nullptr) { - params->index_out_type = - unique_params->idx_out_type() == tflite::TensorType_INT64 - ? TfLiteType::kTfLiteInt64 - : TfLiteType::kTfLiteInt32; - } - *builtin_data = params.release(); - return kTfLiteOk; - } - case BuiltinOperator_REVERSE_SEQUENCE: { - auto params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - if (const auto* reverse_seq_params = - op->builtin_options_as_ReverseSequenceOptions()) { - params->seq_dim = reverse_seq_params->seq_dim(); - params->batch_dim = reverse_seq_params->batch_dim(); - } - *builtin_data = params.release(); - return kTfLiteOk; - } - case BuiltinOperator_IF: { - auto params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - if (const auto* if_params = op->builtin_options_as_IfOptions()) { - params->then_subgraph_index = if_params->then_subgraph_index(); - params->else_subgraph_index = if_params->else_subgraph_index(); - } - *builtin_data = params.release(); - return kTfLiteOk; - } - case BuiltinOperator_WHILE: { - auto params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - if (const auto* while_params = op->builtin_options_as_WhileOptions()) { - params->cond_subgraph_index = while_params->cond_subgraph_index(); - params->body_subgraph_index = while_params->body_subgraph_index(); - } - *builtin_data = params.release(); - return kTfLiteOk; - } - case BuiltinOperator_CONV_3D: - case BuiltinOperator_CONV_3D_TRANSPOSE: { - auto params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - if (const auto* conv3d_params = op->builtin_options_as_Conv3DOptions()) { - params->padding = ConvertPadding(conv3d_params->padding()); - params->activation = - ConvertActivation(conv3d_params->fused_activation_function()); - params->stride_depth = conv3d_params->stride_d(); - params->stride_height = conv3d_params->stride_h(); - params->stride_width = conv3d_params->stride_w(); - params->dilation_depth_factor = conv3d_params->dilation_d_factor(); - params->dilation_height_factor = conv3d_params->dilation_h_factor(); - params->dilation_width_factor = conv3d_params->dilation_w_factor(); - } - *builtin_data = params.release(); - return kTfLiteOk; - } - case BuiltinOperator_HASHTABLE: { - auto params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - if (const auto* hashtable_params = - op->builtin_options_as_HashtableOptions()) { - params->table_id = hashtable_params->table_id(); - TF_LITE_ENSURE_STATUS(ConvertTensorType( - hashtable_params->key_dtype(), ¶ms->key_dtype, error_reporter)); - TF_LITE_ENSURE_STATUS(ConvertTensorType(hashtable_params->value_dtype(), - ¶ms->value_dtype, - error_reporter)); - } - *builtin_data = params.release(); - return kTfLiteOk; - } - case BuiltinOperator_MULTINOMIAL: { - auto params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - if (const auto* multinomial_params = - op->builtin_options_as_RandomOptions()) { - params->seed = multinomial_params->seed(); - params->seed2 = multinomial_params->seed2(); - } - *builtin_data = params.release(); - return kTfLiteOk; - } - case BuiltinOperator_RANDOM_STANDARD_NORMAL: { - auto params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - if (const auto* random_std_normal_params = - op->builtin_options_as_RandomOptions()) { - params->seed = random_std_normal_params->seed(); - params->seed2 = random_std_normal_params->seed2(); - } - *builtin_data = params.release(); - return kTfLiteOk; - } - case BuiltinOperator_BUCKETIZE: { - auto params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - if (const auto* bucketize_params = - op->builtin_options_as_BucketizeOptions()) { - const flatbuffers::Vector* boundaries = - bucketize_params->boundaries(); - if (boundaries == nullptr) { - TF_LITE_REPORT_ERROR( - error_reporter, - "boundaries array not provided for operation 'bucketize'.\n"); - return kTfLiteError; - } - params->num_boundaries = boundaries->size(); - if (boundaries->data() == nullptr) { - TF_LITE_REPORT_ERROR(error_reporter, - "boundaries.data() returned nullptr for " - "operation 'bucketize'.\n"); - return kTfLiteError; - } - params->boundaries = boundaries->data(); - } - *builtin_data = params.release(); - return kTfLiteOk; - } - case BuiltinOperator_RANDOM_UNIFORM: { - auto params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - if (const auto* random_uniform_params = - op->builtin_options_as_RandomOptions()) { - params->seed = random_uniform_params->seed(); - params->seed2 = random_uniform_params->seed2(); - } - *builtin_data = params.release(); - return kTfLiteOk; - } - case BuiltinOperator_GELU: { - auto params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - if (const auto* gelu_params = op->builtin_options_as_GeluOptions()) { - params->approximate = gelu_params->approximate(); - } - *builtin_data = params.release(); - return kTfLiteOk; - } - // Below are the ops with no builtin_data structure. - // TODO(aselle): Implement call in BuiltinOptions, but nullptrs are - // ok for now, since there is no call implementation either. - case BuiltinOperator_CALL: - case BuiltinOperator_COMPLEX_ABS: - case BuiltinOperator_CONCAT_EMBEDDINGS: - case BuiltinOperator_COS: - case BuiltinOperator_CUSTOM: - case BuiltinOperator_DENSIFY: - case BuiltinOperator_DYNAMIC_UPDATE_SLICE: - case BuiltinOperator_EMBEDDING_LOOKUP: - case BuiltinOperator_EQUAL: - case BuiltinOperator_HASHTABLE_FIND: - case BuiltinOperator_HASHTABLE_IMPORT: - case BuiltinOperator_HASHTABLE_SIZE: - case BuiltinOperator_IMAG: - case BuiltinOperator_MATRIX_DIAG: - case BuiltinOperator_MATRIX_SET_DIAG: - case BuiltinOperator_NON_MAX_SUPPRESSION_V4: - case BuiltinOperator_NON_MAX_SUPPRESSION_V5: - case BuiltinOperator_RELU_N1_TO_1: - case BuiltinOperator_RELU_0_TO_1: - case BuiltinOperator_SCATTER_ND: - case BuiltinOperator_SELECT: - case BuiltinOperator_SLICE: - case BuiltinOperator_TILE: - case BuiltinOperator_TOPK_V2: - case BuiltinOperator_TRANSPOSE: - case BuiltinOperator_RANGE: - case BuiltinOperator_RANK: - case BuiltinOperator_REAL: - case BuiltinOperator_RFFT2D: - case BuiltinOperator_SEGMENT_SUM: - case BuiltinOperator_REVERSE_V2: - case BuiltinOperator_UNSORTED_SEGMENT_MAX: - case BuiltinOperator_UNSORTED_SEGMENT_MIN: - case BuiltinOperator_UNSORTED_SEGMENT_PROD: - case BuiltinOperator_UNSORTED_SEGMENT_SUM: - case BuiltinOperator_ATAN2: - case BuiltinOperator_SIGN: - case BuiltinOperator_WHERE: - return kTfLiteOk; - case BuiltinOperator_PLACEHOLDER_FOR_GREATER_OP_CODES: - return kTfLiteError; - } - return kTfLiteError; -} // NOLINT[readability/fn_size] -#endif // !defined(TF_LITE_STATIC_MEMORY) -} // namespace - -TfLiteStatus ConvertTensorType(TensorType tensor_type, TfLiteType* type, - ErrorReporter* error_reporter) { - switch (tensor_type) { - case TensorType_FLOAT16: - *type = kTfLiteFloat16; - return kTfLiteOk; - case TensorType_FLOAT32: - *type = kTfLiteFloat32; - return kTfLiteOk; - case TensorType_FLOAT64: - *type = kTfLiteFloat64; - return kTfLiteOk; - case TensorType_INT16: - *type = kTfLiteInt16; - return kTfLiteOk; - case TensorType_UINT16: - *type = kTfLiteUInt16; - return kTfLiteOk; - case TensorType_INT32: - *type = kTfLiteInt32; - return kTfLiteOk; - case TensorType_UINT32: - *type = kTfLiteUInt32; - return kTfLiteOk; - case TensorType_UINT8: - *type = kTfLiteUInt8; - return kTfLiteOk; - case TensorType_INT8: - *type = kTfLiteInt8; - return kTfLiteOk; - case TensorType_INT64: - *type = kTfLiteInt64; - return kTfLiteOk; - case TensorType_UINT64: - *type = kTfLiteUInt64; - return kTfLiteOk; - case TensorType_STRING: - *type = kTfLiteString; - return kTfLiteOk; - case TensorType_BOOL: - *type = kTfLiteBool; - return kTfLiteOk; - case TensorType_COMPLEX64: - *type = kTfLiteComplex64; - return kTfLiteOk; - case TensorType_COMPLEX128: - *type = kTfLiteComplex128; - return kTfLiteOk; - case TensorType_RESOURCE: - *type = kTfLiteResource; - return kTfLiteOk; - case TensorType_VARIANT: - *type = kTfLiteVariant; - return kTfLiteOk; - default: - *type = kTfLiteNoType; - TF_LITE_REPORT_ERROR(error_reporter, - "Unsupported data type %d in tensor\n", tensor_type); - return kTfLiteError; - } -} - -// We have this parse function instead of directly returning kTfLiteOk from the -// switch-case in ParseOpData because this function is used as part of the -// selective registration for the OpResolver implementation in micro. -TfLiteStatus ParseAbs(const Operator*, ErrorReporter*, BuiltinDataAllocator*, - void**) { - return kTfLiteOk; -} - -TfLiteStatus ParseAdd(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data) { - CheckParsePointerParams(op, error_reporter, allocator, builtin_data); - - SafeBuiltinDataAllocator safe_allocator(allocator); - std::unique_ptr - params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - - const AddOptions* schema_params = op->builtin_options_as_AddOptions(); - - if (schema_params != nullptr) { - params->activation = - ConvertActivation(schema_params->fused_activation_function()); - params->pot_scale_int16 = schema_params->pot_scale_int16(); - } else { - // TODO(b/157480169): We should either return kTfLiteError or fill in some - // reasonable defaults in the params struct. We are not doing so until we - // better undertand the ramifications of changing the legacy behavior. - } - - *builtin_data = params.release(); - return kTfLiteOk; -} - -TfLiteStatus ParseAddN(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data) { - return kTfLiteOk; -} - -TfLiteStatus ParseArgMax(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data) { - CheckParsePointerParams(op, error_reporter, allocator, builtin_data); - - SafeBuiltinDataAllocator safe_allocator(allocator); - std::unique_ptr - params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - - const ArgMaxOptions* schema_params = op->builtin_options_as_ArgMaxOptions(); - - if (schema_params != nullptr) { - TF_LITE_ENSURE_STATUS(ConvertTensorType( - schema_params->output_type(), ¶ms->output_type, error_reporter)); - } else { - // TODO(b/157480169): We should either return kTfLiteError or fill in some - // reasonable defaults in the params struct. We are not doing so until we - // better undertand the ramifications of changing the legacy behavior. - } - - *builtin_data = params.release(); - return kTfLiteOk; -} - -TfLiteStatus ParseArgMin(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data) { - CheckParsePointerParams(op, error_reporter, allocator, builtin_data); - - SafeBuiltinDataAllocator safe_allocator(allocator); - std::unique_ptr - params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - - const ArgMinOptions* schema_params = op->builtin_options_as_ArgMinOptions(); - - if (schema_params != nullptr) { - TF_LITE_ENSURE_STATUS(ConvertTensorType( - schema_params->output_type(), ¶ms->output_type, error_reporter)); - } else { - // TODO(b/157480169): We should either return kTfLiteError or fill in some - // reasonable defaults in the params struct. We are not doing so until we - // better undertand the ramifications of changing the legacy behavior. - } - - *builtin_data = params.release(); - return kTfLiteOk; -} - -// We have this parse function instead of directly returning kTfLiteOk from the -// switch-case in ParseOpData because this function is used as part of the -// selective registration for the OpResolver implementation in micro. -TfLiteStatus ParseAssignVariable(const Operator*, ErrorReporter*, - BuiltinDataAllocator*, void**) { - return kTfLiteOk; -} - -// We have this parse function instead of directly returning kTfLiteOk from the -// switch-case in ParseOpData because this function is used as part of the -// selective registration for the OpResolver implementation in micro. -TfLiteStatus ParseBatchMatMul(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, - void** builtin_data) { - CheckParsePointerParams(op, error_reporter, allocator, builtin_data); - - SafeBuiltinDataAllocator safe_allocator(allocator); - auto params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - if (const auto* bmm_params = op->builtin_options_as_BatchMatMulOptions()) { - params->adj_x = bmm_params->adj_x(); - params->adj_y = bmm_params->adj_y(); - params->asymmetric_quantize_inputs = - bmm_params->asymmetric_quantize_inputs(); - } - *builtin_data = params.release(); - return kTfLiteOk; -} - -// We have this parse function instead of directly returning kTfLiteOk from the -// switch-case in ParseOpData because this function is used as part of the -// selective registration for the OpResolver implementation in micro. -TfLiteStatus ParseBatchToSpaceNd(const Operator*, ErrorReporter*, - BuiltinDataAllocator*, void**) { - return kTfLiteOk; -} - -// We have this parse function instead of directly returning kTfLiteOk from the -// switch-case in ParseOpData because this function is used as part of the -// selective registration for the OpResolver implementation in micro. -TfLiteStatus ParseBroadcastArgs(const Operator*, ErrorReporter*, - BuiltinDataAllocator*, void**) { - return kTfLiteOk; -} - -// We have this parse function instead of directly returning kTfLiteOk from the -// switch-case in ParseOpData because this function is used as part of the -// selective registration for the OpResolver implementation in micro. -TfLiteStatus ParseBroadcastTo(const Operator*, ErrorReporter*, - BuiltinDataAllocator*, void**) { - return kTfLiteOk; -} - -TfLiteStatus ParseCallOnce(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, - void** builtin_data) { - CheckParsePointerParams(op, error_reporter, allocator, builtin_data); - - SafeBuiltinDataAllocator safe_allocator(allocator); - std::unique_ptr - params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - - const CallOnceOptions* schema_params = - op->builtin_options_as_CallOnceOptions(); - - if (schema_params != nullptr) { - params->init_subgraph_index = schema_params->init_subgraph_index(); - - } else { - // TODO(b/157480169): We should either return kTfLiteError or fill in some - // reasonable defaults in the params struct. We are not doing so until we - // better undertand the ramifications of changing the legacy behavior. - } - - *builtin_data = params.release(); - return kTfLiteOk; -} - -// We have this parse function instead of directly returning kTfLiteOk from the -// switch-case in ParseOpData because this function is used as part of the -// selective registration for the OpResolver implementation in micro. -TfLiteStatus ParseCast(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data) { - CheckParsePointerParams(op, error_reporter, allocator, builtin_data); - - SafeBuiltinDataAllocator safe_allocator(allocator); - auto params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - if (const auto* schema_params = op->builtin_options_as_CastOptions()) { - TF_LITE_ENSURE_STATUS(ConvertTensorType( - schema_params->in_data_type(), ¶ms->in_data_type, error_reporter)); - TF_LITE_ENSURE_STATUS(ConvertTensorType(schema_params->out_data_type(), - ¶ms->out_data_type, - error_reporter)); - } - *builtin_data = params.release(); - return kTfLiteOk; -} - -// We have this parse function instead of directly returning kTfLiteOk from the -// switch-case in ParseOpData because this function is used as part of the -// selective registration for the OpResolver implementation in micro. -TfLiteStatus ParseCeil(const Operator*, ErrorReporter*, BuiltinDataAllocator*, - void**) { - return kTfLiteOk; -} - -TfLiteStatus ParseConcatenation(const Operator* op, - ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, - void** builtin_data) { - CheckParsePointerParams(op, error_reporter, allocator, builtin_data); - - SafeBuiltinDataAllocator safe_allocator(allocator); - std::unique_ptr - params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - - const ConcatenationOptions* schema_params = - op->builtin_options_as_ConcatenationOptions(); - - if (schema_params != nullptr) { - params->activation = - ConvertActivation(schema_params->fused_activation_function()); - params->axis = schema_params->axis(); - } else { - // TODO(b/157480169): We should either return kTfLiteError or fill in some - // reasonable defaults in the params struct. We are not doing so until we - // better undertand the ramifications of changing the legacy behavior. - } - - *builtin_data = params.release(); - return kTfLiteOk; -} - -TfLiteStatus ParseConv2D(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data) { - CheckParsePointerParams(op, error_reporter, allocator, builtin_data); - - SafeBuiltinDataAllocator safe_allocator(allocator); - std::unique_ptr - params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - - const Conv2DOptions* schema_params = op->builtin_options_as_Conv2DOptions(); - - if (schema_params != nullptr) { - params->padding = ConvertPadding(schema_params->padding()); - params->stride_width = schema_params->stride_w(); - params->stride_height = schema_params->stride_h(); - params->activation = - ConvertActivation(schema_params->fused_activation_function()); - - params->dilation_width_factor = schema_params->dilation_w_factor(); - params->dilation_height_factor = schema_params->dilation_h_factor(); - } else { - // TODO(b/157480169): We should either return kTfLiteError or fill in some - // reasonable defaults in the params struct. We are not doing so until we - // better undertand the ramifications of changing the legacy behavior. - } - - *builtin_data = params.release(); - return kTfLiteOk; -} - -// We have this parse function instead of directly returning kTfLiteOk from the -// switch-case in ParseOpData because this function is used as part of the -// selective registration for the OpResolver implementation in micro. -TfLiteStatus ParseCumsum(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data) { - CheckParsePointerParams(op, error_reporter, allocator, builtin_data); - - SafeBuiltinDataAllocator safe_allocator(allocator); - auto params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - if (const auto* cumsum_params = op->builtin_options_as_CumsumOptions()) { - params->exclusive = cumsum_params->exclusive(); - params->reverse = cumsum_params->reverse(); - } - *builtin_data = params.release(); - return kTfLiteOk; -} - -// We have this parse function instead of directly returning kTfLiteOk from the -// switch-case in ParseOpData because this function is used as part of the -// selective registration for the OpResolver implementation in micro. -TfLiteStatus ParseCos(const Operator*, ErrorReporter*, BuiltinDataAllocator*, - void**) { - return kTfLiteOk; -} - -TfLiteStatus ParseDepthToSpace(const Operator* op, - ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, - void** builtin_data) { - CheckParsePointerParams(op, error_reporter, allocator, builtin_data); - - SafeBuiltinDataAllocator safe_allocator(allocator); - std::unique_ptr - params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - - const auto* schema_params = op->builtin_options_as_DepthToSpaceOptions(); - if (schema_params != nullptr) { - params->block_size = schema_params->block_size(); - } else { - // TODO(b/157480169): We should either return kTfLiteError or fill in some - // reasonable defaults in the params struct. We are not doing so until we - // better undertand the ramifications of changing the legacy behavior. - } - - *builtin_data = params.release(); - return kTfLiteOk; -} - -TfLiteStatus ParseDepthwiseConv2D(const Operator* op, - ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, - void** builtin_data) { - CheckParsePointerParams(op, error_reporter, allocator, builtin_data); - - SafeBuiltinDataAllocator safe_allocator(allocator); - - std::unique_ptr - params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - - const DepthwiseConv2DOptions* schema_params = - op->builtin_options_as_DepthwiseConv2DOptions(); - - if (schema_params != nullptr) { - params->padding = ConvertPadding(schema_params->padding()); - params->stride_width = schema_params->stride_w(); - params->stride_height = schema_params->stride_h(); - params->depth_multiplier = schema_params->depth_multiplier(); - params->activation = - ConvertActivation(schema_params->fused_activation_function()); - - params->dilation_width_factor = schema_params->dilation_w_factor(); - params->dilation_height_factor = schema_params->dilation_h_factor(); - } else { - // TODO(b/157480169): We should either return kTfLiteError or fill in some - // reasonable defaults in the params struct. We are not doing so until we - // better undertand the ramifications of changing the legacy behavior. - } - - *builtin_data = params.release(); - return kTfLiteOk; -} - -// We have this parse function instead of directly returning kTfLiteOk from the -// switch-case in ParseOpData because this function is used as part of the -// selective registration for the OpResolver implementation in micro. -TfLiteStatus ParseDequantize(const Operator*, ErrorReporter*, - BuiltinDataAllocator*, void**) { - return kTfLiteOk; -} - -TfLiteStatus ParseDiv(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data) { - CheckParsePointerParams(op, error_reporter, allocator, builtin_data); - - SafeBuiltinDataAllocator safe_allocator(allocator); - auto params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - if (const auto* schema_params = op->builtin_options_as_DivOptions()) { - params->activation = - ConvertActivation(schema_params->fused_activation_function()); - } - *builtin_data = params.release(); - return kTfLiteOk; -} - -// We have this parse function instead of directly returning kTfLiteOk from the -// switch-case in ParseOpData because this function is used as part of the -// selective registration for the OpResolver implementation in micro. -TfLiteStatus ParseElu(const Operator*, ErrorReporter*, BuiltinDataAllocator*, - void**) { - return kTfLiteOk; -} - -// We have this parse function instead of directly returning kTfLiteOk from the -// switch-case in ParseOpData because this function is used as part of the -// selective registration for the OpResolver implementation in micro. -TfLiteStatus ParseEqual(const Operator*, ErrorReporter*, BuiltinDataAllocator*, - void**) { - return kTfLiteOk; -} - -// We have this parse function instead of directly returning kTfLiteOk from the -// switch-case in ParseOpData because this function is used as part of the -// selective registration for the OpResolver implementation in micro. -TfLiteStatus ParseExp(const Operator*, ErrorReporter*, BuiltinDataAllocator*, - void**) { - return kTfLiteOk; -} - -// We have this parse function instead of directly returning kTfLiteOk from the -// switch-case in ParseOpData because this function is used as part of the -// selective registration for the OpResolver implementation in micro. -TfLiteStatus ParseExpandDims(const Operator*, ErrorReporter*, - BuiltinDataAllocator*, void**) { - return kTfLiteOk; -} - -// We have this parse function instead of directly returning kTfLiteOk from the -// switch-case in ParseOpData because this function is used as part of the -// selective registration for the OpResolver implementation in micro. -TfLiteStatus ParseFill(const Operator*, ErrorReporter*, BuiltinDataAllocator*, - void**) { - return kTfLiteOk; -} - -// We have this parse function instead of directly returning kTfLiteOk from the -// switch-case in ParseOpData because this function is used as part of the -// selective registration for the OpResolver implementation in micro. -TfLiteStatus ParseFloor(const Operator*, ErrorReporter*, BuiltinDataAllocator*, - void**) { - return kTfLiteOk; -} - -// We have this parse function instead of directly returning kTfLiteOk from the -// switch-case in ParseOpData because this function is used as part of the -// selective registration for the OpResolver implementation in micro. -TfLiteStatus ParseFloorDiv(const Operator*, ErrorReporter*, - BuiltinDataAllocator*, void**) { - return kTfLiteOk; -} - -// We have this parse function instead of directly returning kTfLiteOk from the -// switch-case in ParseOpData because this function is used as part of the -// selective registration for the OpResolver implementation in micro. -TfLiteStatus ParseFloorMod(const Operator*, ErrorReporter*, - BuiltinDataAllocator*, void**) { - return kTfLiteOk; -} - -TfLiteStatus ParseFullyConnected(const Operator* op, - ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, - void** builtin_data) { - CheckParsePointerParams(op, error_reporter, allocator, builtin_data); - - SafeBuiltinDataAllocator safe_allocator(allocator); - - std::unique_ptr - params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - - const FullyConnectedOptions* schema_params = - op->builtin_options_as_FullyConnectedOptions(); - - if (schema_params != nullptr) { - params->activation = - ConvertActivation(schema_params->fused_activation_function()); - params->keep_num_dims = schema_params->keep_num_dims(); - params->asymmetric_quantize_inputs = - schema_params->asymmetric_quantize_inputs(); - - switch (schema_params->weights_format()) { - case FullyConnectedOptionsWeightsFormat_DEFAULT: - params->weights_format = kTfLiteFullyConnectedWeightsFormatDefault; - break; - case FullyConnectedOptionsWeightsFormat_SHUFFLED4x16INT8: - params->weights_format = - kTfLiteFullyConnectedWeightsFormatShuffled4x16Int8; - break; - default: - TF_LITE_REPORT_ERROR(error_reporter, - "Unhandled fully-connected weights format."); - return kTfLiteError; - } - } else { - // TODO(b/157480169): We should either return kTfLiteError or fill in some - // reasonable defaults in the params struct. We are not doing so until we - // better undertand the ramifications of changing the legacy behavior. - } - - *builtin_data = params.release(); - return kTfLiteOk; -} - -// We have this parse function instead of directly returning kTfLiteOk from the -// switch-case in ParseOpData because this function is used as part of the -// selective registration for the OpResolver implementation in micro. -TfLiteStatus ParseGather(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data) { - CheckParsePointerParams(op, error_reporter, allocator, builtin_data); - - SafeBuiltinDataAllocator safe_allocator(allocator); - auto params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - params->axis = 0; - params->batch_dims = 0; - if (const auto* gather_params = op->builtin_options_as_GatherOptions()) { - params->axis = gather_params->axis(); - params->batch_dims = gather_params->batch_dims(); - } - - *builtin_data = params.release(); - return kTfLiteOk; -} - -// We have this parse function instead of directly returning kTfLiteOk from the -// switch-case in ParseOpData because this function is used as part of the -// selective registration for the OpResolver implementation in micro. -TfLiteStatus ParseGatherNd(const Operator*, ErrorReporter*, - BuiltinDataAllocator*, void**) { - return kTfLiteOk; -} - -// We have this parse function instead of directly returning kTfLiteOk from the -// switch-case in ParseOpData because this function is used as part of the -// selective registration for the OpResolver implementation in micro. -TfLiteStatus ParseGreater(const Operator*, ErrorReporter*, - BuiltinDataAllocator*, void**) { - return kTfLiteOk; -} - -// We have this parse function instead of directly returning kTfLiteOk from the -// switch-case in ParseOpData because this function is used as part of the -// selective registration for the OpResolver implementation in micro. -TfLiteStatus ParseGreaterEqual(const Operator*, ErrorReporter*, - BuiltinDataAllocator*, void**) { - return kTfLiteOk; -} - -// We have this parse function instead of directly returning kTfLiteOk from the -// switch-case in ParseOpData because this function is used as part of the -// selective registration for the OpResolver implementation in micro. -TfLiteStatus ParseHardSwish(const Operator*, ErrorReporter*, - BuiltinDataAllocator*, void**) { - return kTfLiteOk; -} - -TfLiteStatus ParseIf(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data) { - CheckParsePointerParams(op, error_reporter, allocator, builtin_data); - - SafeBuiltinDataAllocator safe_allocator(allocator); - std::unique_ptr - params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - - const IfOptions* schema_params = op->builtin_options_as_IfOptions(); - - if (schema_params != nullptr) { - params->then_subgraph_index = schema_params->then_subgraph_index(); - params->else_subgraph_index = schema_params->else_subgraph_index(); - } else { - // TODO(b/157480169): We should either return kTfLiteError or fill in some - // reasonable defaults in the params struct. We are not doing so until we - // better undertand the ramifications of changing the legacy behavior. - } - - *builtin_data = params.release(); - return kTfLiteOk; -} - -TfLiteStatus ParseL2Normalization(const Operator* op, - ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, - void** builtin_data) { - CheckParsePointerParams(op, error_reporter, allocator, builtin_data); - - SafeBuiltinDataAllocator safe_allocator(allocator); - std::unique_ptr - params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - - const L2NormOptions* schema_params = op->builtin_options_as_L2NormOptions(); - - if (schema_params != nullptr) { - params->activation = - ConvertActivation(schema_params->fused_activation_function()); - } else { - // TODO(b/157480169): We should either return kTfLiteError or fill in some - // reasonable defaults in the params struct. We are not doing so until we - // better undertand the ramifications of changing the legacy behavior. - } - - *builtin_data = params.release(); - return kTfLiteOk; -} - -TfLiteStatus ParseLeakyRelu(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, - void** builtin_data) { - CheckParsePointerParams(op, error_reporter, allocator, builtin_data); - - SafeBuiltinDataAllocator safe_allocator(allocator); - auto params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - if (const auto* leaky_relu_params = - op->builtin_options_as_LeakyReluOptions()) { - params->alpha = leaky_relu_params->alpha(); - } - *builtin_data = params.release(); - return kTfLiteOk; -} - -// We have this parse function instead of directly returning kTfLiteOk from the -// switch-case in ParseOpData because this function is used as part of the -// selective registration for the OpResolver implementation in micro. -TfLiteStatus ParseLess(const Operator*, ErrorReporter*, BuiltinDataAllocator*, - void**) { - return kTfLiteOk; -} - -// We have this parse function instead of directly returning kTfLiteOk from the -// switch-case in ParseOpData because this function is used as part of the -// selective registration for the OpResolver implementation in micro. -TfLiteStatus ParseLessEqual(const Operator*, ErrorReporter*, - BuiltinDataAllocator*, void**) { - return kTfLiteOk; -} - -// We have this parse function instead of directly returning kTfLiteOk from the -// switch-case in ParseOpData because this function is used as part of the -// selective registration for the OpResolver implementation in micro. -TfLiteStatus ParseLog(const Operator*, ErrorReporter*, BuiltinDataAllocator*, - void**) { - return kTfLiteOk; -} - -// We have this parse function instead of directly returning kTfLiteOk from the -// switch-case in ParseOpData because this function is used as part of the -// selective registration for the OpResolver implementation in micro. -TfLiteStatus ParseLogicalAnd(const Operator*, ErrorReporter*, - BuiltinDataAllocator*, void**) { - return kTfLiteOk; -} - -// We have this parse function instead of directly returning kTfLiteOk from the -// switch-case in ParseOpData because this function is used as part of the -// selective registration for the OpResolver implementation in micro. -TfLiteStatus ParseLogicalNot(const Operator*, ErrorReporter*, - BuiltinDataAllocator*, void**) { - return kTfLiteOk; -} - -// We have this parse function instead of directly returning kTfLiteOk from the -// switch-case in ParseOpData because this function is used as part of the -// selective registration for the OpResolver implementation in micro. -TfLiteStatus ParseLogicalOr(const Operator*, ErrorReporter*, - BuiltinDataAllocator*, void**) { - return kTfLiteOk; -} - -// We have this parse function instead of directly returning kTfLiteOk from the -// switch-case in ParseOpData because this function is used as part of the -// selective registration for the OpResolver implementation in micro. -TfLiteStatus ParseLogistic(const Operator*, ErrorReporter*, - BuiltinDataAllocator*, void**) { - return kTfLiteOk; -} - -// We have this parse function instead of directly returning kTfLiteOk from the -// switch-case in ParseOpData because this function is used as part of the -// selective registration for the OpResolver implementation in micro. -TfLiteStatus ParseLogSoftmax(const Operator*, ErrorReporter*, - BuiltinDataAllocator*, void**) { - return kTfLiteOk; -} - -TfLiteStatus ParseLSTM(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data) { - CheckParsePointerParams(op, error_reporter, allocator, builtin_data); - - SafeBuiltinDataAllocator safe_allocator(allocator); - auto params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - if (const auto* lstm_params = op->builtin_options_as_LSTMOptions()) { - params->activation = - ConvertActivation(lstm_params->fused_activation_function()); - params->cell_clip = lstm_params->cell_clip(); - params->proj_clip = lstm_params->proj_clip(); - switch (lstm_params->kernel_type()) { - case LSTMKernelType_FULL: - params->kernel_type = kTfLiteLSTMFullKernel; - break; - case LSTMKernelType_BASIC: - params->kernel_type = kTfLiteLSTMBasicKernel; - break; - default: - TF_LITE_REPORT_ERROR(error_reporter, "Unhandled LSTM kernel type: %d", - lstm_params->kernel_type()); - return kTfLiteError; - } - params->asymmetric_quantize_inputs = - lstm_params->asymmetric_quantize_inputs(); - } else { - TF_LITE_REPORT_ERROR(error_reporter, "No valid LSTM builtin options exist"); - return kTfLiteError; - } - *builtin_data = params.release(); - return kTfLiteOk; -} - -// We have this parse function instead of directly returning kTfLiteOk from the -// switch-case in ParseOpData because this function is used as part of the -// selective registration for the OpResolver implementation in micro. -TfLiteStatus ParseMaximum(const Operator*, ErrorReporter*, - BuiltinDataAllocator*, void**) { - return kTfLiteOk; -} - -// We have this parse function instead of directly returning kTfLiteOk from the -// switch-case in ParseOpData because this function is used as part of the -// selective registration for the OpResolver implementation in micro. -TfLiteStatus ParseMinimum(const Operator*, ErrorReporter*, - BuiltinDataAllocator*, void**) { - return kTfLiteOk; -} - -TfLiteStatus ParseMirrorPad(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, - void** builtin_data) { - CheckParsePointerParams(op, error_reporter, allocator, builtin_data); - - SafeBuiltinDataAllocator safe_allocator(allocator); - std::unique_ptr - params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - - const MirrorPadOptions* schema_params = - op->builtin_options_as_MirrorPadOptions(); - - if (schema_params != nullptr) { - params->mode = ConvertMirrorPadding(schema_params->mode()); - } else { - // TODO(b/157480169): We should either return kTfLiteError or fill in some - // reasonable defaults in the params struct. We are not doing so until we - // better undertand the ramifications of changing the legacy behavior. - } - - *builtin_data = params.release(); - return kTfLiteOk; -} - -TfLiteStatus ParseMul(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data) { - CheckParsePointerParams(op, error_reporter, allocator, builtin_data); - - SafeBuiltinDataAllocator safe_allocator(allocator); - std::unique_ptr - params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - - const MulOptions* schema_params = op->builtin_options_as_MulOptions(); - - if (schema_params != nullptr) { - params->activation = - ConvertActivation(schema_params->fused_activation_function()); - } else { - // TODO(b/157480169): We should either return kTfLiteError or fill in some - // reasonable defaults in the params struct. We are not doing so until we - // better undertand the ramifications of changing the legacy behavior. - } - - *builtin_data = params.release(); - return kTfLiteOk; -} - -// We have this parse function instead of directly returning kTfLiteOk from the -// switch-case in ParseOpData because this function is used as part of the -// selective registration for the OpResolver implementation in micro. -TfLiteStatus ParseNeg(const Operator*, ErrorReporter*, BuiltinDataAllocator*, - void**) { - return kTfLiteOk; -} - -// We have this parse function instead of directly returning kTfLiteOk from the -// switch-case in ParseOpData because this function is used as part of the -// selective registration for the OpResolver implementation in micro. -TfLiteStatus ParseNotEqual(const Operator*, ErrorReporter*, - BuiltinDataAllocator*, void**) { - return kTfLiteOk; -} - -TfLiteStatus ParsePack(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data) { - CheckParsePointerParams(op, error_reporter, allocator, builtin_data); - - SafeBuiltinDataAllocator safe_allocator(allocator); - std::unique_ptr - params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - - const PackOptions* schema_params = op->builtin_options_as_PackOptions(); - - if (schema_params != nullptr) { - params->values_count = schema_params->values_count(); - params->axis = schema_params->axis(); - } else { - // TODO(b/157480169): We should either return kTfLiteError or fill in some - // reasonable defaults in the params struct. We are not doing so until we - // better undertand the ramifications of changing the legacy behavior. - } - - *builtin_data = params.release(); - return kTfLiteOk; -} - -// We have this parse function instead of directly returning kTfLiteOk from the -// switch-case in ParseOpData because this function is used as part of the -// selective registration for the OpResolver implementation in micro. -TfLiteStatus ParsePad(const Operator*, ErrorReporter*, BuiltinDataAllocator*, - void**) { - return kTfLiteOk; -} - -// We have this parse function instead of directly returning kTfLiteOk from the -// switch-case in ParseOpData because this function is used as part of the -// selective registration for the OpResolver implementation in micro. -TfLiteStatus ParsePadV2(const Operator*, ErrorReporter*, BuiltinDataAllocator*, - void**) { - return kTfLiteOk; -} - -TfLiteStatus ParsePool(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data) { - CheckParsePointerParams(op, error_reporter, allocator, builtin_data); - - SafeBuiltinDataAllocator safe_allocator(allocator); - std::unique_ptr - params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - - const Pool2DOptions* schema_params = op->builtin_options_as_Pool2DOptions(); - - if (schema_params != nullptr) { - params->padding = ConvertPadding(schema_params->padding()); - params->stride_width = schema_params->stride_w(); - params->stride_height = schema_params->stride_h(); - params->filter_width = schema_params->filter_width(); - params->filter_height = schema_params->filter_height(); - params->activation = - ConvertActivation(schema_params->fused_activation_function()); - } else { - // TODO(b/157480169): We should either return kTfLiteError or fill in some - // reasonable defaults in the params struct. We are not doing so until we - // better undertand the ramifications of changing the legacy behavior. - } - - *builtin_data = params.release(); - return kTfLiteOk; -} - -// We have this parse function instead of directly returning kTfLiteOk from the -// switch-case in ParseOpData because this function is used as part of the -// selective registration for the OpResolver implementation in micro. -TfLiteStatus ParsePow(const Operator*, ErrorReporter*, BuiltinDataAllocator*, - void**) { - return kTfLiteOk; -} - -// We have this parse function instead of directly returning kTfLiteOk from the -// switch-case in ParseOpData because this function is used as part of the -// selective registration for the OpResolver implementation in micro. -TfLiteStatus ParsePrelu(const Operator*, ErrorReporter*, BuiltinDataAllocator*, - void**) { - return kTfLiteOk; -} - -// We have this parse function instead of directly returning kTfLiteOk from the -// switch-case in ParseOpData because this function is used as part of the -// selective registration for the OpResolver implementation in micro. -TfLiteStatus ParseQuantize(const Operator*, ErrorReporter*, - BuiltinDataAllocator*, void**) { - return kTfLiteOk; -} - -// We have this parse function instead of directly returning kTfLiteOk from the -// switch-case in ParseOpData because this function is used as part of the -// selective registration for the OpResolver implementation in micro. -TfLiteStatus ParseReadVariable(const Operator*, ErrorReporter*, - BuiltinDataAllocator*, void**) { - return kTfLiteOk; -} - -TfLiteStatus ParseReducer(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, - void** builtin_data) { - CheckParsePointerParams(op, error_reporter, allocator, builtin_data); - - SafeBuiltinDataAllocator safe_allocator(allocator); - - std::unique_ptr - params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - - const ReducerOptions* schema_params = op->builtin_options_as_ReducerOptions(); - - if (schema_params != nullptr) { - params->keep_dims = schema_params->keep_dims(); - } else { - // TODO(b/157480169): We should either return kTfLiteError or fill in some - // reasonable defaults in the params struct. We are not doing so until we - // better undertand the ramifications of changing the legacy behavior. - } - - *builtin_data = params.release(); - return kTfLiteOk; -} - -// We have this parse function instead of directly returning kTfLiteOk from the -// switch-case in ParseOpData because this function is used as part of the -// selective registration for the OpResolver implementation in micro. -TfLiteStatus ParseRelu(const Operator*, ErrorReporter*, BuiltinDataAllocator*, - void**) { - return kTfLiteOk; -} - -// We have this parse function instead of directly returning kTfLiteOk from the -// switch-case in ParseOpData because this function is used as part of the -// selective registration for the OpResolver implementation in micro. -TfLiteStatus ParseRelu6(const Operator*, ErrorReporter*, BuiltinDataAllocator*, - void**) { - return kTfLiteOk; -} - -TfLiteStatus ParseReshape(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, - void** builtin_data) { - CheckParsePointerParams(op, error_reporter, allocator, builtin_data); - - SafeBuiltinDataAllocator safe_allocator(allocator); - - std::unique_ptr - params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - - const ReshapeOptions* schema_params = op->builtin_options_as_ReshapeOptions(); - - if (schema_params != nullptr) { - const flatbuffers::Vector* new_shape = schema_params->new_shape(); - if (new_shape != nullptr) { - TF_LITE_ENSURE_STATUS( - FlatBufferIntVectorToArray(sizeof(params->shape), new_shape, - params->shape, error_reporter, "reshape")); - params->num_dimensions = new_shape->size(); - } else { - // TODO(b/157480169) TODO(b/147203660): We should either return - // kTfLiteError or fill in some reasonable defaults in the params struct. - // We are not doing so until we better undertand the ramifications of - // changing the legacy behavior. - } - } else { - // TODO(b/157480169): We should either return kTfLiteError or fill in some - // reasonable defaults in the params struct. We are not doing so until we - // better undertand the ramifications of changing the legacy behavior. - } - - *builtin_data = params.release(); - return kTfLiteOk; -} - -TfLiteStatus ParseResizeBilinear(const Operator* op, - ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, - void** builtin_data) { - CheckParsePointerParams(op, error_reporter, allocator, builtin_data); - - SafeBuiltinDataAllocator safe_allocator(allocator); - std::unique_ptr - params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - - const ResizeBilinearOptions* schema_params = - op->builtin_options_as_ResizeBilinearOptions(); - - if (schema_params != nullptr) { - params->align_corners = schema_params->align_corners(); - params->half_pixel_centers = schema_params->half_pixel_centers(); - } else { - params->align_corners = false; - params->half_pixel_centers = false; - } - - *builtin_data = params.release(); - return kTfLiteOk; -} - -TfLiteStatus ParseResizeNearestNeighbor(const Operator* op, - ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, - void** builtin_data) { - CheckParsePointerParams(op, error_reporter, allocator, builtin_data); - - SafeBuiltinDataAllocator safe_allocator(allocator); - std::unique_ptr - params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - - const ResizeNearestNeighborOptions* schema_params = - op->builtin_options_as_ResizeNearestNeighborOptions(); - - if (schema_params != nullptr) { - params->align_corners = schema_params->align_corners(); - params->half_pixel_centers = schema_params->half_pixel_centers(); - } else { - params->align_corners = false; - params->half_pixel_centers = false; - } - - *builtin_data = params.release(); - return kTfLiteOk; -} - -// We have this parse function instead of directly returning kTfLiteOk from the -// switch-case in ParseOpData because this function is used as part of the -// selective registration for the OpResolver implementation in micro. -TfLiteStatus ParseRound(const Operator*, ErrorReporter*, BuiltinDataAllocator*, - void**) { - return kTfLiteOk; -} - -// We have this parse function instead of directly returning kTfLiteOk from the -// switch-case in ParseOpData because this function is used as part of the -// selective registration for the OpResolver implementation in micro. -TfLiteStatus ParseRsqrt(const Operator*, ErrorReporter*, BuiltinDataAllocator*, - void**) { - return kTfLiteOk; -} - -// We have this parse function instead of directly returning kTfLiteOk from the -// switch-case in ParseOpData because this function is used as part of the -// selective registration for the OpResolver implementation in micro. -TfLiteStatus ParseSelectV2(const Operator*, ErrorReporter*, - BuiltinDataAllocator*, void**) { - return kTfLiteOk; -} - -TfLiteStatus ParseShape(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data) { - SafeBuiltinDataAllocator safe_allocator(allocator); - std::unique_ptr - params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - - const ShapeOptions* schema_params = op->builtin_options_as_ShapeOptions(); - - if (schema_params != nullptr) { - TF_LITE_ENSURE_STATUS(ConvertTensorType(schema_params->out_type(), - ¶ms->out_type, error_reporter)); - } else { - // TODO(b/157480169): We should either return kTfLiteError or fill in some - // reasonable defaults in the params struct. We are not doing so until we - // better undertand the ramifications of changing the legacy behavior. - } - - *builtin_data = params.release(); - return kTfLiteOk; -} - -// We have this parse function instead of directly returning kTfLiteOk from the -// switch-case in ParseOpData because this function is used as part of the -// selective registration for the OpResolver implementation in micro. -TfLiteStatus ParseSin(const Operator*, ErrorReporter*, BuiltinDataAllocator*, - void**) { - return kTfLiteOk; -} - -// We have this parse function instead of directly returning kTfLiteOk from the -// switch-case in ParseOpData because this function is used as part of the -// selective registration for the OpResolver implementation in micro. -TfLiteStatus ParseSlice(const Operator*, ErrorReporter*, BuiltinDataAllocator*, - void**) { - return kTfLiteOk; -} - -TfLiteStatus ParseSoftmax(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, - void** builtin_data) { - CheckParsePointerParams(op, error_reporter, allocator, builtin_data); - - SafeBuiltinDataAllocator safe_allocator(allocator); - std::unique_ptr - params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - - const SoftmaxOptions* schema_params = op->builtin_options_as_SoftmaxOptions(); - - if (schema_params != nullptr) { - params->beta = schema_params->beta(); - } else { - // TODO(b/157480169): We should either return kTfLiteError or fill in some - // reasonable defaults in the params struct. We are not doing so until we - // better undertand the ramifications of changing the legacy behavior. - } - - *builtin_data = params.release(); - return kTfLiteOk; -} - -// We have this parse function instead of directly returning kTfLiteOk from the -// switch-case in ParseOpData because this function is used as part of the -// selective registration for the OpResolver implementation in micro. -TfLiteStatus ParseSpaceToBatchNd(const Operator*, ErrorReporter*, - BuiltinDataAllocator*, void**) { - return kTfLiteOk; -} - -TfLiteStatus ParseSpaceToDepth(const Operator* op, - ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, - void** builtin_data) { - CheckParsePointerParams(op, error_reporter, allocator, builtin_data); - - SafeBuiltinDataAllocator safe_allocator(allocator); - std::unique_ptr - params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - - const auto* schema_params = op->builtin_options_as_SpaceToDepthOptions(); - if (schema_params != nullptr) { - params->block_size = schema_params->block_size(); - } else { - // TODO(b/157480169): We should either return kTfLiteError or fill in some - // reasonable defaults in the params struct. We are not doing so until we - // better undertand the ramifications of changing the legacy behavior. - } - - *builtin_data = params.release(); - return kTfLiteOk; -} - -TfLiteStatus ParseSplit(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data) { - CheckParsePointerParams(op, error_reporter, allocator, builtin_data); - - SafeBuiltinDataAllocator safe_allocator(allocator); - std::unique_ptr - params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - - const SplitOptions* schema_params = op->builtin_options_as_SplitOptions(); - - if (schema_params != nullptr) { - params->num_splits = schema_params->num_splits(); - } else { - // TODO(b/157480169): We should either return kTfLiteError or fill in some - // reasonable defaults in the params struct. We are not doing so until we - // better undertand the ramifications of changing the legacy behavior. - } - - *builtin_data = params.release(); - return kTfLiteOk; -} - -TfLiteStatus ParseSplitV(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data) { - CheckParsePointerParams(op, error_reporter, allocator, builtin_data); - SafeBuiltinDataAllocator safe_allocator(allocator); - - std::unique_ptr - params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - - const SplitVOptions* schema_params = op->builtin_options_as_SplitVOptions(); - - if (schema_params != nullptr) { - params->num_splits = schema_params->num_splits(); - } else { - // TODO(b/157480169): We should either return kTfLiteError or fill in some - // reasonable defaults in the params struct. We are not doing so until we - // better undertand the ramifications of changing the legacy behavior. - } - - *builtin_data = params.release(); - return kTfLiteOk; -} - -TfLiteStatus ParseUnidirectionalSequenceLSTM(const Operator* op, - ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, - void** builtin_data) { - CheckParsePointerParams(op, error_reporter, allocator, builtin_data); - SafeBuiltinDataAllocator safe_allocator(allocator); - auto params = - safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - if (const auto* seq_lstm_params = - op->builtin_options_as_UnidirectionalSequenceLSTMOptions()) { - params->activation = - ConvertActivation(seq_lstm_params->fused_activation_function()); - params->cell_clip = seq_lstm_params->cell_clip(); - params->proj_clip = seq_lstm_params->proj_clip(); - params->time_major = seq_lstm_params->time_major(); - params->asymmetric_quantize_inputs = - seq_lstm_params->asymmetric_quantize_inputs(); - } - *builtin_data = params.release(); - return kTfLiteOk; -} - -TfLiteStatus ParseSqueeze(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, - void** builtin_data) { - CheckParsePointerParams(op, error_reporter, allocator, builtin_data); - SafeBuiltinDataAllocator safe_allocator(allocator); - - std::unique_ptr - params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - - const SqueezeOptions* schema_params = op->builtin_options_as_SqueezeOptions(); - - if (schema_params != nullptr) { - const auto* squeeze_dims = schema_params->squeeze_dims(); - if (squeeze_dims != nullptr) { - TF_LITE_ENSURE_STATUS(FlatBufferIntVectorToArray( - sizeof(params->squeeze_dims), squeeze_dims, params->squeeze_dims, - error_reporter, "squeeze")); - params->num_squeeze_dims = squeeze_dims->size(); - } else { - params->num_squeeze_dims = 0; - } - } else { - // TODO(b/157480169): We should either return kTfLiteError or fill in some - // reasonable defaults in the params struct. We are not doing so until we - // better undertand the ramifications of changing the legacy behavior. - } - - *builtin_data = params.release(); - return kTfLiteOk; -} - -// We have this parse function instead of directly returning kTfLiteOk from the -// switch-case in ParseOpData because this function is used as part of the -// selective registration for the OpResolver implementation in micro. -TfLiteStatus ParseSqrt(const Operator*, ErrorReporter*, BuiltinDataAllocator*, - void**) { - return kTfLiteOk; -} - -// We have this parse function instead of directly returning kTfLiteOk from the -// switch-case in ParseOpData because this function is used as part of the -// selective registration for the OpResolver implementation in micro. -TfLiteStatus ParseSquare(const Operator*, ErrorReporter*, BuiltinDataAllocator*, - void**) { - return kTfLiteOk; -} - -// We have this parse function instead of directly returning kTfLiteOk from the -// switch-case in ParseOpData because this function is used as part of the -// selective registration for the OpResolver implementation in micro. -TfLiteStatus ParseSquaredDifference(const Operator*, ErrorReporter*, - BuiltinDataAllocator*, void**) { - return kTfLiteOk; -} - -TfLiteStatus ParseStridedSlice(const Operator* op, - ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, - void** builtin_data) { - CheckParsePointerParams(op, error_reporter, allocator, builtin_data); - - SafeBuiltinDataAllocator safe_allocator(allocator); - std::unique_ptr - params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - - const StridedSliceOptions* schema_params = - op->builtin_options_as_StridedSliceOptions(); - - if (schema_params != nullptr) { - params->begin_mask = schema_params->begin_mask(); - params->end_mask = schema_params->end_mask(); - params->ellipsis_mask = schema_params->ellipsis_mask(); - params->new_axis_mask = schema_params->new_axis_mask(); - params->shrink_axis_mask = schema_params->shrink_axis_mask(); - } else { - // TODO(b/157480169): We should either return kTfLiteError or fill in some - // reasonable defaults in the params struct. We are not doing so until we - // better undertand the ramifications of changing the legacy behavior. - } - - *builtin_data = params.release(); - return kTfLiteOk; -} - -TfLiteStatus ParseSub(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data) { - CheckParsePointerParams(op, error_reporter, allocator, builtin_data); - - SafeBuiltinDataAllocator safe_allocator(allocator); - std::unique_ptr - params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - - const SubOptions* schema_params = op->builtin_options_as_SubOptions(); - - if (schema_params != nullptr) { - params->activation = - ConvertActivation(schema_params->fused_activation_function()); - params->pot_scale_int16 = schema_params->pot_scale_int16(); - } else { - // TODO(b/157480169): We should either return kTfLiteError or fill in some - // reasonable defaults in the params struct. We are not doing so until we - // better undertand the ramifications of changing the legacy behavior. - } - - *builtin_data = params.release(); - return kTfLiteOk; -} - -TfLiteStatus ParseSvdf(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data) { - CheckParsePointerParams(op, error_reporter, allocator, builtin_data); - - SafeBuiltinDataAllocator safe_allocator(allocator); - std::unique_ptr - params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - - const SVDFOptions* schema_params = op->builtin_options_as_SVDFOptions(); - if (schema_params != nullptr) { - params->rank = schema_params->rank(); - params->activation = - ConvertActivation(schema_params->fused_activation_function()); - params->asymmetric_quantize_inputs = - schema_params->asymmetric_quantize_inputs(); - } else { - // TODO(b/157480169): We should either return kTfLiteError or fill in some - // reasonable defaults in the params struct. We are not doing so until we - // better undertand the ramifications of changing the legacy behavior. - } - - *builtin_data = params.release(); - return kTfLiteOk; -} - -// We have this parse function instead of directly returning kTfLiteOk from the -// switch-case in ParseOpData because this function is used as part of the -// selective registration for the OpResolver implementation in micro. -TfLiteStatus ParseTanh(const Operator*, ErrorReporter*, BuiltinDataAllocator*, - void**) { - return kTfLiteOk; -} -// -// We have this parse function instead of directly returning kTfLiteOk from the -// switch-case in ParseOpData because this function is used as part of the -// selective registration for the OpResolver implementation in micro. -TfLiteStatus ParseTranspose(const Operator*, ErrorReporter*, - BuiltinDataAllocator*, void**) { - return kTfLiteOk; -} - -TfLiteStatus ParseTransposeConv(const Operator* op, - ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, - void** builtin_data) { - CheckParsePointerParams(op, error_reporter, allocator, builtin_data); - - SafeBuiltinDataAllocator safe_allocator(allocator); - std::unique_ptr - params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - const TransposeConvOptions* transpose_conv_params = - op->builtin_options_as_TransposeConvOptions(); - if (transpose_conv_params != nullptr) { - params->padding = ConvertPadding(transpose_conv_params->padding()); - params->stride_width = transpose_conv_params->stride_w(); - params->stride_height = transpose_conv_params->stride_h(); - } else { - // TODO(b/157480169): We should either return kTfLiteError or fill in some - // reasonable defaults in the params struct. We are not doing so until we - // better undertand the ramifications of changing the legacy behavior. - } - *builtin_data = params.release(); - return kTfLiteOk; -} - -TfLiteStatus ParseUnpack(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data) { - CheckParsePointerParams(op, error_reporter, allocator, builtin_data); - - SafeBuiltinDataAllocator safe_allocator(allocator); - std::unique_ptr - params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - - const UnpackOptions* schema_params = op->builtin_options_as_UnpackOptions(); - - if (schema_params != nullptr) { - params->num = schema_params->num(); - params->axis = schema_params->axis(); - } else { - // TODO(b/157480169): We should either return kTfLiteError or fill in some - // reasonable defaults in the params struct. We are not doing so until we - // better undertand the ramifications of changing the legacy behavior. - } - - *builtin_data = params.release(); - return kTfLiteOk; -} - -TfLiteStatus ParseVarHandle(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, - void** builtin_data) { - CheckParsePointerParams(op, error_reporter, allocator, builtin_data); - - SafeBuiltinDataAllocator safe_allocator(allocator); - std::unique_ptr - params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - - const VarHandleOptions* schema_params = - op->builtin_options_as_VarHandleOptions(); - - if (schema_params != nullptr) { - if (schema_params->container()) { - params->container = schema_params->container()->c_str(); - } - if (schema_params->shared_name()) { - params->shared_name = schema_params->shared_name()->c_str(); - } - } else { - // TODO(b/157480169): We should either return kTfLiteError or fill in some - // reasonable defaults in the params struct. We are not doing so until we - // better undertand the ramifications of changing the legacy behavior. - } - - *builtin_data = params.release(); - return kTfLiteOk; -} - -TfLiteStatus ParseWhile(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data) { - CheckParsePointerParams(op, error_reporter, allocator, builtin_data); - - SafeBuiltinDataAllocator safe_allocator(allocator); - std::unique_ptr - params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - - const WhileOptions* schema_params = op->builtin_options_as_WhileOptions(); - - if (schema_params != nullptr) { - params->cond_subgraph_index = schema_params->cond_subgraph_index(); - params->body_subgraph_index = schema_params->body_subgraph_index(); - } else { - // TODO(b/157480169): We should either return kTfLiteError or fill in some - // reasonable defaults in the params struct. We are not doing so until we - // better undertand the ramifications of changing the legacy behavior. - } - - *builtin_data = params.release(); - return kTfLiteOk; -} - -// We have this parse function instead of directly returning kTfLiteOk from the -// switch-case in ParseOpData because this function is used as part of the -// selective registration for the OpResolver implementation in micro. -TfLiteStatus ParseZerosLike(const Operator*, ErrorReporter*, - BuiltinDataAllocator*, void**) { - return kTfLiteOk; -} - -TfLiteStatus ParseOpData(const Operator* op, BuiltinOperator op_type, - ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data) { -// TODO(b/145762662): It would be preferable to have the build graph for TF Lite -// Micro not have the ParseOpData function at all. This would require splitting -// the current file into two separate files, one of which defines the -// ParseOpData function and the other that defines the operator specific parse -// functions (e.g. ParseAdd). -// -// Such a split was attempted but was not worth the effort at the time because -// of the following reasons: -// * We could either duplicate the functions and the SafeBuiltinDataAllocator -// class in the anonymous namespace of this file, or attempt to make a common -// library with these helper functions and class. -// * Making a common library with a separate build target was not feasible as -// it introduced circular dependencies due to the ErrorReporter and a common -// .cc and .h within the same api build target the also cause circular -// dependencies due to the BuiltinDataAllocator class. -// * If all the builtin operators were to have their own parse functions, or we -// were ok with some amount of code duplication, then this split of the .cc -// files would be a lot more feasible. -#ifdef TF_LITE_STATIC_MEMORY - TF_LITE_REPORT_ERROR( - error_reporter, - "ParseOpData is unsupported on TfLiteMicro, please use the operator " - "specific parse functions (e.g. ParseAdd etc.).\n"); - return kTfLiteError; -#else - return ParseOpDataTfLite(op, op_type, error_reporter, allocator, - builtin_data); -#endif -} - -} // namespace tflite diff --git a/code/components/tflite-lib/tensorflow/lite/core/api/flatbuffer_conversions.h b/code/components/tflite-lib/tensorflow/lite/core/api/flatbuffer_conversions.h deleted file mode 100644 index c7653f01..00000000 --- a/code/components/tflite-lib/tensorflow/lite/core/api/flatbuffer_conversions.h +++ /dev/null @@ -1,412 +0,0 @@ -/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_CORE_API_FLATBUFFER_CONVERSIONS_H_ -#define TENSORFLOW_LITE_CORE_API_FLATBUFFER_CONVERSIONS_H_ - -// These functions transform codes and data structures that are defined in the -// flatbuffer serialization format into in-memory values that are used by the -// runtime API and interpreter. - -#include -#include -#include - -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/core/api/error_reporter.h" -#include "tensorflow/lite/schema/schema_generated.h" - -namespace tflite { - -// Interface class for builtin data allocations. -class BuiltinDataAllocator { - public: - virtual void* Allocate(size_t size, size_t alignment_hint) = 0; - virtual void Deallocate(void* data) = 0; - - // Allocate a structure, but make sure it is a POD structure that doesn't - // require constructors to run. The reason we do this, is that Interpreter's C - // extension part will take ownership so destructors will not be run during - // deallocation. - template - T* AllocatePOD() { - // TODO(b/154346074): Change this to is_trivially_destructible when all - // platform targets support that properly. - static_assert(std::is_pod::value, "Builtin data structure must be POD."); - void* allocated_memory = this->Allocate(sizeof(T), alignof(T)); - return new (allocated_memory) T(); - } - - virtual ~BuiltinDataAllocator() {} -}; - -// Parse the appropriate data out of the op. -// -// This handles builtin data explicitly as there are flatbuffer schemas. -// If it returns kTfLiteOk, it passes the data out with `builtin_data`. The -// calling function has to pass in an allocator object, and this allocator -// will be called to reserve space for the output data. If the calling -// function's allocator reserves memory on the heap, then it's the calling -// function's responsibility to free it. -// If it returns kTfLiteError, `builtin_data` will be `nullptr`. -TfLiteStatus ParseOpData(const Operator* op, BuiltinOperator op_type, - ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data); - -// Converts the tensor data type used in the flat buffer to the representation -// used by the runtime. -TfLiteStatus ConvertTensorType(TensorType tensor_type, TfLiteType* type, - ErrorReporter* error_reporter); - -TfLiteStatus ParseAbs(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data); - -TfLiteStatus ParseAdd(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data); - -TfLiteStatus ParseAddN(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data); - -TfLiteStatus ParseArgMax(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data); - -TfLiteStatus ParseArgMin(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data); - -TfLiteStatus ParseAssignVariable(const Operator* op, - ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, - void** builtin_data); - -TfLiteStatus ParseBatchMatMul(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, - void** builtin_data); - -TfLiteStatus ParseBatchToSpaceNd(const Operator* op, - ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, - void** builtin_data); - -TfLiteStatus ParseBroadcastArgs(const Operator* op, - ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, - void** builtin_data); - -TfLiteStatus ParseBroadcastTo(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, - void** builtin_data); - -TfLiteStatus ParseCallOnce(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, - void** builtin_data); - -TfLiteStatus ParseCeil(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data); - -TfLiteStatus ParseCast(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data); - -TfLiteStatus ParseConcatenation(const Operator* op, - ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, - void** builtin_data); - -TfLiteStatus ParseConv2D(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data); - -TfLiteStatus ParseCos(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data); - -TfLiteStatus ParseCumsum(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data); - -TfLiteStatus ParseDepthToSpace(const Operator* op, - ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, - void** builtin_data); - -TfLiteStatus ParseDepthwiseConv2D(const Operator* op, - ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, - void** builtin_data); - -TfLiteStatus ParseDequantize(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, - void** builtin_data); - -TfLiteStatus ParseDiv(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data); - -TfLiteStatus ParseElu(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data); - -TfLiteStatus ParseEqual(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data); - -TfLiteStatus ParseExp(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data); - -TfLiteStatus ParseExpandDims(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, - void** builtin_data); - -TfLiteStatus ParseFill(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data); - -TfLiteStatus ParseFloor(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data); - -TfLiteStatus ParseFloorDiv(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, - void** builtin_data); - -TfLiteStatus ParseFloorMod(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, - void** builtin_data); - -TfLiteStatus ParseFullyConnected(const Operator* op, - ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, - void** builtin_data); - -TfLiteStatus ParseGather(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data); - -TfLiteStatus ParseGatherNd(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, - void** builtin_data); - -TfLiteStatus ParseGreater(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data); - -TfLiteStatus ParseGreaterEqual(const Operator* op, - ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, - void** builtin_data); - -TfLiteStatus ParseHardSwish(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, - void** builtin_data); - -TfLiteStatus ParseIf(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data); - -TfLiteStatus ParseL2Normalization(const Operator* op, - ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, - void** builtin_data); - -TfLiteStatus ParseLeakyRelu(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, - void** builtin_data); - -TfLiteStatus ParseLess(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data); - -TfLiteStatus ParseLessEqual(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, - void** builtin_data); - -TfLiteStatus ParseLog(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data); - -TfLiteStatus ParseLogicalAnd(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, - void** builtin_data); - -TfLiteStatus ParseLogicalNot(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, - void** builtin_data); - -TfLiteStatus ParseLogicalOr(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, - void** builtin_data); - -TfLiteStatus ParseLogistic(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, - void** builtin_data); - -TfLiteStatus ParseLogSoftmax(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, - void** builtin_data); - -TfLiteStatus ParseLSTM(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data); - -TfLiteStatus ParseMaximum(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data); - -TfLiteStatus ParseMinimum(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data); - -TfLiteStatus ParseMirrorPad(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, - void** builtin_data); - -TfLiteStatus ParseMul(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data); - -TfLiteStatus ParseNeg(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data); - -TfLiteStatus ParseNotEqual(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, - void** builtin_data); - -TfLiteStatus ParsePack(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data); - -TfLiteStatus ParsePad(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data); - -TfLiteStatus ParsePadV2(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data); - -TfLiteStatus ParsePool(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data); - -TfLiteStatus ParsePow(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data); - -TfLiteStatus ParsePrelu(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data); - -TfLiteStatus ParseQuantize(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, - void** builtin_data); - -TfLiteStatus ParseReadVariable(const Operator* op, - ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, - void** builtin_data); - -TfLiteStatus ParseReducer(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data); - -TfLiteStatus ParseRelu(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data); - -TfLiteStatus ParseRelu6(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data); - -TfLiteStatus ParseReshape(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data); - -TfLiteStatus ParseResizeBilinear(const Operator* op, - ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, - void** builtin_data); - -TfLiteStatus ParseResizeNearestNeighbor(const Operator* op, - ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, - void** builtin_data); - -TfLiteStatus ParseRound(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data); - -TfLiteStatus ParseRsqrt(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data); - -TfLiteStatus ParseSelectV2(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, - void** builtin_data); - -TfLiteStatus ParseShape(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data); - -TfLiteStatus ParseSin(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data); - -TfLiteStatus ParseSlice(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data); - -TfLiteStatus ParseSoftmax(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data); - -TfLiteStatus ParseSpaceToBatchNd(const Operator* op, - ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, - void** builtin_data); - -TfLiteStatus ParseSpaceToDepth(const Operator* op, - ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, - void** builtin_data); - -TfLiteStatus ParseSplit(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data); - -TfLiteStatus ParseSplitV(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data); - -TfLiteStatus ParseSqueeze(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data); - -TfLiteStatus ParseSqrt(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data); - -TfLiteStatus ParseSquare(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data); - -TfLiteStatus ParseSquaredDifference(const Operator* op, - ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, - void** builtin_data); - -TfLiteStatus ParseStridedSlice(const Operator* op, - ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, - void** builtin_data); - -TfLiteStatus ParseSub(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data); - -TfLiteStatus ParseSvdf(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data); - -TfLiteStatus ParseTanh(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data); - -TfLiteStatus ParseTranspose(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, - void** builtin_data); - -TfLiteStatus ParseTransposeConv(const Operator* op, - ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, - void** builtin_data); - -TfLiteStatus ParseUnpack(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data); - -TfLiteStatus ParseUnidirectionalSequenceLSTM(const Operator* op, - ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, - void** builtin_data); - -TfLiteStatus ParseVarHandle(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, - void** builtin_data); - -TfLiteStatus ParseWhile(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data); - -TfLiteStatus ParseZerosLike(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, - void** builtin_data); - -} // namespace tflite - -#endif // TENSORFLOW_LITE_CORE_API_FLATBUFFER_CONVERSIONS_H_ diff --git a/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/kiss_fft_int16.cc b/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/kiss_fft_int16.cc deleted file mode 100644 index f1e781b2..00000000 --- a/code/components/tflite-lib/tensorflow/lite/experimental/microfrontend/lib/kiss_fft_int16.cc +++ /dev/null @@ -1,10 +0,0 @@ -#include - -#include "tensorflow/lite/experimental/microfrontend/lib/kiss_fft_common.h" - -#define FIXED_POINT 16 -namespace kissfft_fixed16 { -#include "kiss_fft.c" -#include "tools/kiss_fftr.c" -} // namespace kissfft_fixed16 -#undef FIXED_POINT diff --git a/code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/hard_swish.h b/code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/hard_swish.h deleted file mode 100644 index 81fcd63e..00000000 --- a/code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/hard_swish.h +++ /dev/null @@ -1,168 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_HARD_SWISH_H_ -#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_HARD_SWISH_H_ - -#include - -#include "ruy/profiler/instrumentation.h" // from @ruy -#include "tensorflow/lite/kernels/internal/common.h" -#include "tensorflow/lite/kernels/internal/types.h" - -namespace tflite { -namespace reference_ops { - -inline int16_t SaturatingLeftShift(int16_t value, int amount) { - int64_t result = static_cast(value) * (1 << amount); - result = std::min(result, std::numeric_limits::max()); - result = std::max(result, std::numeric_limits::min()); - return result; -} - -// Similar to ARM instruction SQDMULH. -// Similar to gemmlowp::SaturatingRoundingDoublingHighMul except -// rounding to zero instead of to nearest (SQRDMULH). -inline std::int16_t SaturatingDoublingHighMul(std::int16_t a, std::int16_t b) { - bool overflow = a == b && a == std::numeric_limits::min(); - std::int32_t a_32(a); - std::int32_t b_32(b); - std::int32_t ab_32 = a_32 * b_32; - std::int16_t ab_x2_high16 = static_cast((ab_32) / (1 << 15)); - return overflow ? std::numeric_limits::max() : ab_x2_high16; -} - -template -inline void HardSwish(const RuntimeShape& input_shape, const T* input_data, - const RuntimeShape& output_shape, T* output_data) { - ruy::profiler::ScopeLabel label("ReferenceHardSwish/Float"); - auto matching_size = MatchingFlatSize(input_shape, output_shape); - const T* in_end = input_data + matching_size; - for (; input_data < in_end; input_data++, output_data++) { - const float in = *input_data; - *output_data = - in * std::min(static_cast(6), std::max(static_cast(0), in + 3)) / - 6; - } -} - -template -inline void HardSwish(const HardSwishParams& params, - const RuntimeShape& input_shape, const T* input_data, - const RuntimeShape& output_shape, T* output_data) { - ruy::profiler::ScopeLabel label("ReferenceHardSwish/Quantized"); - - const int flat_size = MatchingFlatSize(input_shape, output_shape); - - for (int i = 0; i < flat_size; i++) { - const int16_t input_value = input_data[i] - params.input_zero_point; - // Left-shift as much as we can without overflow/saturation to put - // significant bits in the high bits of our 16-bit fixedpoint values, so - // that fixed-point approximate computations below are as accurate as - // possible. - const int16_t input_value_on_hires_input_scale = input_value * (1 << 7); - // Compute the input value on essentially the output scale, just not - // right-shifted yet. This is the value that we'll use in the (x >= +3) - // case, and that in the general case we'll multiply against the "relu-ish" - // fixed-point multiplier in [0, 1]. - const int16_t input_value_on_preshift_output_scale = - gemmlowp::SaturatingRoundingDoublingHighMul( - input_value_on_hires_input_scale, - params.output_multiplier_fixedpoint_int16); - // Now compute the "relu-ish multiplier". In the (-3 <= x <= +3) case, that - // is just an affine rescaling of x from [-3, 3] to [0, 1]. In the general - // case, it is just that plus saturation at the boundaries of [-3, 3]. - // First, we rescale from [-3, 3] to [-1, 1], saturating. - // That is done by rescaling the input value with a fixed-point multiplier - // (reluish_multiplier_fixedpoint) and bit-shift such that we represent - // that input value on the scale where the real value 3.0f is represented - // by the quantized value 32768. (+32768 is actually not representable as - // int16_t, so this saturates at +32767, and that is seen empirically to be - // a negligible contribution to numerical error/bias). - // - // This code is careful to correctly implement any magnitude of multiplier, - // involving either a right shift or a left shift, with correct saturation - // behavior in the left-shift case. This forces this code to be more - // complicated, but is necessary for real applications: a partially - // trained quantized MobileNet v3-small model that motivated this code - // exhibits some large [min, max] range boundaries, of the order of - // magnitude of 10 or 100 depending on layers. - // - // The next few lines are basically just an ordinary - // MultiplyByQuantizedMultiplier, except that we are more careful here - // about the fine details of saturation when left-shifting, because here - // overflow in left-shift is a common case, not an anomaly as - // MultiplyByQuantizedMultiplier assumes. - int16_t reluish_value = input_value_on_hires_input_scale; - // Shift left, saturating, as much as we can while ensuring that this - // saturation will not contribute to the result. That is, left shift amount - // reduced by 1. - if (params.reluish_multiplier_exponent > 0) { - reluish_value = SaturatingLeftShift( - reluish_value, params.reluish_multiplier_exponent - 1); - } - // Apply the fixed-point multiplier, dividing the value by a divisor - // ranging in [1, 2]. - reluish_value = gemmlowp::SaturatingRoundingDoublingHighMul( - reluish_value, params.reluish_multiplier_fixedpoint_int16); - // Apply the last bit of left-shift. Thus, in the left-shifting case, if - // any saturation affects the result, it is happening here --- any - // saturation having occurred above is overwritten here, not affecting the - // result. - if (params.reluish_multiplier_exponent > 0) { - reluish_value = SaturatingLeftShift(reluish_value, 1); - } - // Shift right, in the right-shifting case. - if (params.reluish_multiplier_exponent < 0) { - reluish_value = gemmlowp::RoundingDivideByPOT( - reluish_value, -params.reluish_multiplier_exponent); - } - // At this point we have rescaled the value into a 16bit fixedpoint - // reluish_value in [-1, 1]. - // We now convert that to a 16bit fixedpoint value in [0, 1]. - reluish_value = (reluish_value + (1 << 15)) >> 1; - // Use of SaturatingDoublingHighMul here is important to cancel the biases - // from the above SaturatingRoundingDoublingHighMul. - // - // On a partially trained MobileNet-v3-small, - // - // | bias on | ImageNet - // | quantized | Top-1 - // Operation used here | values | accuracy (50k) - // --------------------------------------+------------+----------- - // SaturatingDoublingHighMul | -0.0024 | 58.920 - // SaturatingRoundingDoublingHighMul | -0.0067 | 58.064 - // - // In activations_test, this is covered by this testcase: - // QuantizedActivationsOpTest.HardSwishBias - // - const int16_t preshift_output_value = SaturatingDoublingHighMul( - reluish_value, input_value_on_preshift_output_scale); - // We were so far operating on the pre-shift output scale. Now we finally - // apply that output shift, arriving at the final output scale. - int16_t output_value = gemmlowp::RoundingDivideByPOT( - preshift_output_value, -params.output_multiplier_exponent); - output_value += params.output_zero_point; - output_value = - std::min(output_value, std::numeric_limits::max()); - output_value = - std::max(output_value, std::numeric_limits::min()); - output_data[i] = output_value; - } -} - -} // namespace reference_ops -} // namespace tflite - -#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_HARD_SWISH_H_ diff --git a/code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/mul.h b/code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/mul.h deleted file mode 100644 index 53197732..00000000 --- a/code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/mul.h +++ /dev/null @@ -1,214 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_MUL_H_ -#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_MUL_H_ - -#include -#include - -#include "tensorflow/lite/kernels/internal/common.h" - -namespace tflite { - -namespace reference_ops { - -// Element-wise mul that can often be used for inner loop of broadcast Mul as -// well as the non-broadcast Mul. -inline void MulElementwise(int size, const ArithmeticParams& params, - const uint8_t* input1_data, - const uint8_t* input2_data, uint8_t* output_data) { - for (int i = 0; i < size; ++i) { - const int32_t input1_val = params.input1_offset + input1_data[i]; - const int32_t input2_val = params.input2_offset + input2_data[i]; - const int32_t unclamped_result = - params.output_offset + - MultiplyByQuantizedMultiplier(input1_val * input2_val, - params.output_multiplier, - params.output_shift); - const int32_t clamped_output = - std::min(params.quantized_activation_max, - std::max(params.quantized_activation_min, unclamped_result)); - output_data[i] = static_cast(clamped_output); - } -} - -template -inline void Mul(const ArithmeticParams& params, - const RuntimeShape& input1_shape, const T* input1_data, - const RuntimeShape& input2_shape, const T* input2_data, - const RuntimeShape& output_shape, T* output_data) { - T output_activation_min; - T output_activation_max; - GetActivationParams(params, &output_activation_min, &output_activation_max); - - const int flat_size = - MatchingExtendedShapeFlatSize(input1_shape, input2_shape, output_shape); - for (int i = 0; i < flat_size; ++i) { - output_data[i] = ActivationFunctionWithMinMax( - input1_data[i] * input2_data[i], output_activation_min, - output_activation_max); - } -} - -inline void Mul(const ArithmeticParams& params, - const RuntimeShape& input1_shape, - const std::complex* input1_data, - const RuntimeShape& input2_shape, - const std::complex* input2_data, - const RuntimeShape& output_shape, - std::complex* output_data) { - const int flat_size = - MatchingExtendedShapeFlatSize(input1_shape, input2_shape, output_shape); - for (int i = 0; i < flat_size; ++i) { - output_data[i] = input1_data[i] * input2_data[i]; - } -} - -inline void Mul(const ArithmeticParams& params, - const RuntimeShape& input1_shape, const uint8_t* input1_data, - const RuntimeShape& input2_shape, const uint8_t* input2_data, - const RuntimeShape& output_shape, uint8_t* output_data) { - TFLITE_DCHECK_LE(params.quantized_activation_min, - params.quantized_activation_max); - const int flat_size = - MatchingExtendedShapeFlatSize(input1_shape, input2_shape, output_shape); - - MulElementwise(flat_size, params, input1_data, input2_data, output_data); -} - -inline void BroadcastMul4DSlow(const ArithmeticParams& params, - const RuntimeShape& input1_shape, - const uint8_t* input1_data, - const RuntimeShape& input2_shape, - const uint8_t* input2_data, - const RuntimeShape& output_shape, - uint8_t* output_data) { - NdArrayDesc<4> desc1; - NdArrayDesc<4> desc2; - NdArrayDescsForElementwiseBroadcast(input1_shape, input2_shape, &desc1, - &desc2); - const RuntimeShape extended_output_shape = - RuntimeShape::ExtendedShape(4, output_shape); - - for (int b = 0; b < extended_output_shape.Dims(0); ++b) { - for (int y = 0; y < extended_output_shape.Dims(1); ++y) { - for (int x = 0; x < extended_output_shape.Dims(2); ++x) { - for (int c = 0; c < extended_output_shape.Dims(3); ++c) { - const int32_t input1_val = - params.input1_offset + - input1_data[SubscriptToIndex(desc1, b, y, x, c)]; - const int32_t input2_val = - params.input2_offset + - input2_data[SubscriptToIndex(desc2, b, y, x, c)]; - const int32_t unclamped_result = - params.output_offset + - MultiplyByQuantizedMultiplier(input1_val * input2_val, - params.output_multiplier, - params.output_shift); - const int32_t clamped_output = std::min( - params.quantized_activation_max, - std::max(params.quantized_activation_min, unclamped_result)); - output_data[Offset(extended_output_shape, b, y, x, c)] = - static_cast(clamped_output); - } - } - } - } -} - -template -void BroadcastMul4DSlow(const ArithmeticParams& params, - const RuntimeShape& unextended_input1_shape, - const T* input1_data, - const RuntimeShape& unextended_input2_shape, - const T* input2_data, - const RuntimeShape& unextended_output_shape, - T* output_data) { - T output_activation_min; - T output_activation_max; - GetActivationParams(params, &output_activation_min, &output_activation_max); - - TFLITE_DCHECK_LE(unextended_input1_shape.DimensionsCount(), 4); - TFLITE_DCHECK_LE(unextended_input2_shape.DimensionsCount(), 4); - TFLITE_DCHECK_LE(unextended_output_shape.DimensionsCount(), 4); - const RuntimeShape output_shape = - RuntimeShape::ExtendedShape(4, unextended_output_shape); - - NdArrayDesc<4> desc1; - NdArrayDesc<4> desc2; - NdArrayDescsForElementwiseBroadcast(unextended_input1_shape, - unextended_input2_shape, &desc1, &desc2); - - // In Tensorflow, the dimensions are canonically named (batch_number, row, - // col, channel), with extents (batches, height, width, depth), with the - // trailing dimension changing most rapidly (channels has the smallest stride, - // typically 1 element). - // - // In generated C code, we store arrays with the dimensions reversed. The - // first dimension has smallest stride. - // - // We name our variables by their Tensorflow convention, but generate C code - // nesting loops such that the innermost loop has the smallest stride for the - // best cache behavior. - for (int b = 0; b < output_shape.Dims(0); ++b) { - for (int y = 0; y < output_shape.Dims(1); ++y) { - for (int x = 0; x < output_shape.Dims(2); ++x) { - for (int c = 0; c < output_shape.Dims(3); ++c) { - output_data[Offset(output_shape, b, y, x, c)] = - ActivationFunctionWithMinMax( - input1_data[SubscriptToIndex(desc1, b, y, x, c)] * - input2_data[SubscriptToIndex(desc2, b, y, x, c)], - output_activation_min, output_activation_max); - } - } - } - } -} - -inline void BroadcastMul4DSlow(const ArithmeticParams& params, - const RuntimeShape& unextended_input1_shape, - const std::complex* input1_data, - const RuntimeShape& unextended_input2_shape, - const std::complex* input2_data, - const RuntimeShape& unextended_output_shape, - std::complex* output_data) { - TFLITE_DCHECK_LE(unextended_input1_shape.DimensionsCount(), 4); - TFLITE_DCHECK_LE(unextended_input2_shape.DimensionsCount(), 4); - TFLITE_DCHECK_LE(unextended_output_shape.DimensionsCount(), 4); - const RuntimeShape output_shape = - RuntimeShape::ExtendedShape(4, unextended_output_shape); - - NdArrayDesc<4> desc1; - NdArrayDesc<4> desc2; - NdArrayDescsForElementwiseBroadcast(unextended_input1_shape, - unextended_input2_shape, &desc1, &desc2); - - for (int b = 0; b < output_shape.Dims(0); ++b) { - for (int y = 0; y < output_shape.Dims(1); ++y) { - for (int x = 0; x < output_shape.Dims(2); ++x) { - for (int c = 0; c < output_shape.Dims(3); ++c) { - output_data[Offset(output_shape, b, y, x, c)] = - input1_data[SubscriptToIndex(desc1, b, y, x, c)] * - input2_data[SubscriptToIndex(desc2, b, y, x, c)]; - } - } - } - } -} - -} // namespace reference_ops -} // namespace tflite - -#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_MUL_H_ diff --git a/code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/select.h b/code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/select.h deleted file mode 100644 index 82b6097c..00000000 --- a/code/components/tflite-lib/tensorflow/lite/kernels/internal/reference/select.h +++ /dev/null @@ -1,151 +0,0 @@ -/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_SELECT_H_ -#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_SELECT_H_ - -#include - -#include "ruy/profiler/instrumentation.h" // from @ruy -#include "tensorflow/lite/kernels/internal/common.h" -#include "tensorflow/lite/kernels/internal/types.h" - -namespace tflite { -namespace reference_ops { - -template -void Select(const RuntimeShape& input_condition_shape, - const D* input_condition_data, const RuntimeShape& input_x_shape, - const T* input_x_data, const RuntimeShape& input_y_shape, - const T* input_y_data, const RuntimeShape& output_shape, - T* output_data) { - ruy::profiler::ScopeLabel label("Select"); - int64_t flatsize; - // Allow select operator executions on mixed scalar tensors and one element - // tensors. - if (input_condition_shape.FlatSize() == 1 && input_x_shape.FlatSize() == 1 && - input_y_shape.FlatSize() == 1 && output_shape.FlatSize() == 1) { - flatsize = 1; - } else { - flatsize = MatchingFlatSize(input_condition_shape, input_x_shape, - input_y_shape, output_shape); - } - for (int64_t i = 0; i < flatsize; ++i) { - output_data[i] = - input_condition_data[i] ? input_x_data[i] : input_y_data[i]; - } -} - -template -void RankOneSelect(const RuntimeShape& input_condition_shape, - const D* input_condition_data, - const RuntimeShape& input_x_shape, const T* input_x_data, - const RuntimeShape& input_y_shape, const T* input_y_data, - const RuntimeShape& output_shape, T* output_data) { - ruy::profiler::ScopeLabel label("Select/RankOneSelect"); - const int64_t outer_size = input_condition_shape.FlatSize(); - int64_t inner_size; - if (input_condition_shape.DimensionsCount() == 0) { - inner_size = MatchingFlatSize(input_x_shape, input_y_shape, output_shape); - } else { - TFLITE_DCHECK_EQ( - MatchingDim(input_x_shape, 0, input_y_shape, 0, output_shape, 0), - outer_size); - inner_size = - MatchingFlatSizeSkipDim(input_x_shape, 0, input_y_shape, output_shape); - } - - int64_t offset = 0; - for (int64_t i = 0; i < outer_size; i++) { - const T* input_data = input_condition_data[i] ? input_x_data : input_y_data; - memcpy(output_data + offset, input_data + offset, inner_size * sizeof(T)); - offset += inner_size; - } -} - -template -void BroadcastSelect5DSlow(const RuntimeShape& input_condition_shape, - const D* input_condition_data, - const RuntimeShape& input_x_shape, - const T* input_x_data, - const RuntimeShape& input_y_shape, - const T* input_y_data, - const RuntimeShape& output_shape, T* output_data) { - ruy::profiler::ScopeLabel label("Select/BroadcastSelectSlow"); - TFLITE_DCHECK_LE(input_condition_shape.DimensionsCount(), 5); - TFLITE_DCHECK_LE(input_x_shape.DimensionsCount(), 5); - TFLITE_DCHECK_LE(input_y_shape.DimensionsCount(), 5); - TFLITE_DCHECK_LE(output_shape.DimensionsCount(), 5); - - NdArrayDesc<5> desc_condition; - NdArrayDesc<5> desc_x; - NdArrayDesc<5> desc_y; - NdArrayDesc<5> desc_output; - const RuntimeShape extended_output_shape = - RuntimeShape::ExtendedShape(5, output_shape); - CopyDimsToDesc(extended_output_shape, &desc_output); - NdArrayDescsForElementwiseBroadcast(input_condition_shape, input_x_shape, - input_y_shape, &desc_condition, &desc_x, - &desc_y); - - // In Tensorflow, the dimensions are canonically named (batch_number, row, - // col, channel), with extents (batches, height, width, depth), with the - // trailing dimension changing most rapidly (channels has the smallest - // stride, typically 1 element). - // - // In generated C code, we store arrays with the dimensions reversed. The - // first dimension has smallest stride. - // - // We name our variables by their Tensorflow convention, but generate C code - // nesting loops such that the innermost loop has the smallest stride for - // the best cache behavior. - for (int n = 0; n < desc_output.extents[0]; ++n) { - int out_idx_n = desc_output.extents[1] * n; - int cond_idx_n = desc_condition.strides[0] * n; - int in_idx1_n = desc_x.strides[0] * n; - int in_idx2_n = desc_y.strides[0] * n; - for (int b = 0; b < desc_output.extents[1]; ++b) { - int out_idx_b = (out_idx_n + b) * desc_output.extents[2]; - int cond_idx_b = cond_idx_n + desc_condition.strides[1] * b; - int in_idx1_b = in_idx1_n + desc_x.strides[1] * b; - int in_idx2_b = in_idx2_n + desc_y.strides[1] * b; - for (int y = 0; y < desc_output.extents[2]; ++y) { - int out_idx_y = (out_idx_b + y) * desc_output.extents[3]; - int cond_idx_y = cond_idx_b + desc_condition.strides[2] * y; - int in_idx1_y = in_idx1_b + desc_x.strides[2] * y; - int in_idx2_y = in_idx2_b + desc_y.strides[2] * y; - for (int x = 0; x < desc_output.extents[3]; ++x) { - int out_idx = (out_idx_y + x) * desc_output.extents[4]; - int cond_idx = cond_idx_y + desc_condition.strides[3] * x; - int in_idx1 = in_idx1_y + desc_x.strides[3] * x; - int in_idx2 = in_idx2_y + desc_y.strides[3] * x; - for (int c = 0; c < desc_output.extents[4]; ++c) { - output_data[out_idx] = input_condition_data[cond_idx] - ? input_x_data[in_idx1] - : input_y_data[in_idx2]; - out_idx++; - cond_idx += desc_condition.strides[4]; - in_idx1 += desc_x.strides[4]; - in_idx2 += desc_y.strides[4]; - } - } - } - } - } -} - -} // namespace reference_ops -} // namespace tflite - -#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_SELECT_H_ diff --git a/code/components/tflite-lib/tensorflow/lite/micro/all_ops_resolver.cc b/code/components/tflite-lib/tensorflow/lite/micro/all_ops_resolver.cc deleted file mode 100644 index df792264..00000000 --- a/code/components/tflite-lib/tensorflow/lite/micro/all_ops_resolver.cc +++ /dev/null @@ -1,122 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/micro/all_ops_resolver.h" - -#include "tensorflow/lite/micro/kernels/micro_ops.h" - -namespace tflite { - -AllOpsResolver::AllOpsResolver() { - // Please keep this list of Builtin Operators in alphabetical order. - AddAbs(); - AddAdd(); - AddAddN(); - AddArgMax(); - AddArgMin(); - AddAssignVariable(); - AddAveragePool2D(); - AddBatchToSpaceNd(); - AddBroadcastArgs(); - AddBroadcastTo(); - AddCallOnce(); - AddCast(); - AddCeil(); - AddCircularBuffer(); - AddConcatenation(); - AddConv2D(); - AddCos(); - AddCumSum(); - AddDepthToSpace(); - AddDepthwiseConv2D(); - AddDequantize(); - AddDetectionPostprocess(); - AddDiv(); - AddElu(); - AddEqual(); - AddEthosU(); - AddExp(); - AddExpandDims(); - AddFill(); - AddFloor(); - AddFloorDiv(); - AddFloorMod(); - AddFullyConnected(); - AddGather(); - AddGatherNd(); - AddGreater(); - AddGreaterEqual(); - AddHardSwish(); - AddIf(); - AddL2Normalization(); - AddL2Pool2D(); - AddLeakyRelu(); - AddLess(); - AddLessEqual(); - AddLog(); - AddLogicalAnd(); - AddLogicalNot(); - AddLogicalOr(); - AddLogistic(); - AddMaxPool2D(); - AddMaximum(); - AddMean(); - AddMinimum(); - AddMirrorPad(); - AddMul(); - AddNeg(); - AddNotEqual(); - AddPack(); - AddPad(); - AddPadV2(); - AddPrelu(); - AddQuantize(); - AddReadVariable(); - AddReduceMax(); - AddRelu(); - AddRelu6(); - AddReshape(); - AddResizeBilinear(); - AddResizeNearestNeighbor(); - AddRound(); - AddRsqrt(); - AddSelectV2(); - AddShape(); - AddSin(); - AddSlice(); - AddSoftmax(); - AddSpaceToBatchNd(); - AddSpaceToDepth(); - AddSplit(); - AddSplitV(); - AddSqrt(); - AddSquare(); - AddSquaredDifference(); - AddSqueeze(); - AddStridedSlice(); - AddSub(); - AddSum(); - AddSvdf(); - AddTanh(); - AddTranspose(); - AddTransposeConv(); - AddUnidirectionalSequenceLSTM(); - AddUnpack(); - AddVarHandle(); - AddWhile(); - AddZerosLike(); -} - -} // namespace tflite diff --git a/code/components/tflite-lib/tensorflow/lite/micro/kernels/add.h b/code/components/tflite-lib/tensorflow/lite/micro/kernels/add.h deleted file mode 100644 index e2e5d23b..00000000 --- a/code/components/tflite-lib/tensorflow/lite/micro/kernels/add.h +++ /dev/null @@ -1,77 +0,0 @@ -/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#ifndef TENSORFLOW_LITE_MICRO_KERNELS_ADD_H_ -#define TENSORFLOW_LITE_MICRO_KERNELS_ADD_H_ - -#include - -#include "tensorflow/lite/c/builtin_op_data.h" -#include "tensorflow/lite/c/common.h" - -namespace tflite { - -extern const int kAddInputTensor1; -extern const int kAddInputTensor2; -extern const int kAddOutputTensor; - -struct OpDataAdd { - bool requires_broadcast; - - // These fields are used in both the general 8-bit -> 8bit quantized path, - // and the special 16-bit -> 16bit quantized path - int input1_shift; - int input2_shift; - int32_t output_activation_min; - int32_t output_activation_max; - - // These fields are used only in the general 8-bit -> 8bit quantized path - int32_t input1_multiplier; - int32_t input2_multiplier; - int32_t output_multiplier; - int output_shift; - int left_shift; - int32_t input1_offset; - int32_t input2_offset; - int32_t output_offset; - - // Used only for float evals: - float output_activation_min_f32; - float output_activation_max_f32; -}; - -TfLiteStatus CalculateOpDataAdd(TfLiteContext* context, TfLiteAddParams* params, - const TfLiteTensor* input1, - const TfLiteTensor* input2, - TfLiteTensor* output, OpDataAdd* data); - -TfLiteStatus AddPrepare(TfLiteContext* context, TfLiteNode* node); - -// Generic must define registration function. -TfLiteRegistration Register_ADD(); - -#if defined(CMSIS_NN) -TfLiteRegistration Register_ADD_INT8(); - -TfLiteRegistration Register_ADD_INT16(); -#else -// Fallback registration -inline TfLiteRegistration Register_ADD_INT8() { return Register_ADD(); } - -inline TfLiteRegistration Register_ADD_INT16() { return Register_ADD(); } -#endif -} // namespace tflite - -#endif // TENSORFLOW_LITE_MICRO_KERNELS_ADD_H_ diff --git a/code/components/tflite-lib/tensorflow/lite/micro/kernels/add_n.cc b/code/components/tflite-lib/tensorflow/lite/micro/kernels/add_n.cc deleted file mode 100644 index 35336681..00000000 --- a/code/components/tflite-lib/tensorflow/lite/micro/kernels/add_n.cc +++ /dev/null @@ -1,214 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/kernels/internal/reference/add_n.h" - -#include - -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/quantization_util.h" -#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "tensorflow/lite/kernels/kernel_util.h" -#include "tensorflow/lite/micro/kernels/kernel_util.h" - -namespace tflite { -namespace { - -constexpr int kInputTensor0 = 0; -constexpr int kOutputTensor = 0; - -constexpr int kAddNIntegerShift = 20; - -// only used with INT8 tensors -struct OpData { - int32_t output_activation_min; - int32_t output_activation_max; - int32_t input_offset; - int32_t output_offset; - int32_t input_multiplier; - int32_t output_multiplier; - int input_shift; - int output_shift; - int left_shift; - int scratch_index; -}; - -TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteNode* node) { - int num_inputs = NumInputs(node); - TF_LITE_ENSURE(context, num_inputs >= 2); - TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - - MicroContext* micro_context = GetMicroContext(context); - TfLiteTensor* input_tensor_first = - micro_context->AllocateTempInputTensor(node, kInputTensor0); - TF_LITE_ENSURE(context, input_tensor_first != nullptr); - TfLiteTensor* output = - micro_context->AllocateTempOutputTensor(node, kOutputTensor); - TF_LITE_ENSURE(context, output != nullptr); - - // Check that all tensors have the same shape and type. - TF_LITE_ENSURE_TYPES_EQ(context, output->type, input_tensor_first->type); - for (int i = kInputTensor0 + 1; i < num_inputs; ++i) { - TfLiteTensor* input = micro_context->AllocateTempInputTensor(node, i); - TF_LITE_ENSURE(context, input != nullptr); - TF_LITE_ENSURE(context, HaveSameShapes(input_tensor_first, input)); - TF_LITE_ENSURE_TYPES_EQ(context, input_tensor_first->type, input->type); - - // Check that all INT8 input tensors have the same zero-point and scale. - if (input_tensor_first->type == kTfLiteInt8) { - TF_LITE_ENSURE(context, input_tensor_first->params.zero_point == - input->params.zero_point); - TF_LITE_ENSURE(context, - input_tensor_first->params.scale == input->params.scale); - } - - micro_context->DeallocateTempTfLiteTensor(input); - } - - if (output->type == kTfLiteFloat32) { - // Allocate scratch buffer space for pointer to each tensor's data - // and store the scratch buffer index in the node's user_data - int scratch_index; - size_t scratch_size = sizeof(float*) * num_inputs; - TF_LITE_ENSURE_OK(context, context->RequestScratchBufferInArena( - context, scratch_size, &scratch_index)); - node->user_data = - reinterpret_castuser_data)>(scratch_index); - } else if (output->type == kTfLiteInt8) { - node->user_data = - context->AllocatePersistentBuffer(context, sizeof(OpData)); - OpData* data = static_cast(node->user_data); - - // Allocate scratch buffer space for pointer to each tensor's data - // and store the scratch buffer index in OpData - size_t scratch_size = sizeof(int8_t*) * num_inputs; - TF_LITE_ENSURE_OK( - context, context->RequestScratchBufferInArena(context, scratch_size, - &data->scratch_index)); - - // 8bit -> 8bit general quantized path, with general rescalings - data->input_offset = -input_tensor_first->params.zero_point; - data->output_offset = output->params.zero_point; - data->left_shift = kAddNIntegerShift; - const double twice_max_input_scale = - 2 * static_cast(input_tensor_first->params.scale); - const double real_input_multiplier = - static_cast(input_tensor_first->params.scale) / - twice_max_input_scale; - const double real_output_multiplier = - twice_max_input_scale / - ((1 << data->left_shift) * static_cast(output->params.scale)); - - QuantizeMultiplierSmallerThanOneExp( - real_input_multiplier, &data->input_multiplier, &data->input_shift); - - QuantizeMultiplierSmallerThanOneExp( - real_output_multiplier, &data->output_multiplier, &data->output_shift); - - TF_LITE_ENSURE_STATUS(CalculateActivationRangeQuantized( - context, kTfLiteActNone, output, &data->output_activation_min, - &data->output_activation_max)); - } else { - MicroPrintf("ADD_N only supports FLOAT32 and INT8, got %s.", - TfLiteTypeGetName(output->type)); - return kTfLiteError; - } - - micro_context->DeallocateTempTfLiteTensor(input_tensor_first); - micro_context->DeallocateTempTfLiteTensor(output); - - return kTfLiteOk; -} - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - return CalculateOpData(context, node); -} - -template -inline const T** CopyInputsToScratchBuffer(TfLiteContext* context, - TfLiteNode* node, - const int scratch_index) { - int num_inputs = NumInputs(node); - void* scratch_buffer = context->GetScratchBuffer(context, scratch_index); - const T** all_inputs = static_cast(scratch_buffer); - for (int i = 0; i < num_inputs; i++) { - const TfLiteEvalTensor* next_input = - tflite::micro::GetEvalInput(context, node, kInputTensor0 + i); - all_inputs[i] = tflite::micro::GetTensorData(next_input); - } - - return all_inputs; -} - -template -void EvalAddN(TfLiteContext* context, TfLiteNode* node, - TfLiteEvalTensor* output) { - int num_inputs = NumInputs(node); - - int scratch_index = - static_cast(reinterpret_cast(node->user_data)); - const T** all_inputs = - CopyInputsToScratchBuffer(context, node, scratch_index); - - reference_ops::AddN(tflite::micro::GetTensorShape(output), num_inputs, - all_inputs, tflite::micro::GetTensorData(output)); -} - -template -void EvalAddNQuantized(TfLiteContext* context, TfLiteNode* node, - TfLiteEvalTensor* output) { - int num_inputs = NumInputs(node); - - OpData* data = static_cast(node->user_data); - const T** all_inputs = - CopyInputsToScratchBuffer(context, node, data->scratch_index); - - ArithmeticParams params; - params.left_shift = data->left_shift; - params.input1_offset = data->input_offset; - params.input1_multiplier = data->input_multiplier; - params.input1_shift = data->input_shift; - params.output_offset = data->output_offset; - params.output_multiplier = data->output_multiplier; - params.output_shift = data->output_shift; - SetActivationParams(data->output_activation_min, data->output_activation_max, - ¶ms); - - reference_ops::AddN(params, tflite::micro::GetTensorShape(output), num_inputs, - all_inputs, tflite::micro::GetTensorData(output)); -} - -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - if (output->type == kTfLiteFloat32) { - EvalAddN(context, node, output); - } else if (output->type == kTfLiteInt8) { - EvalAddNQuantized(context, node, output); - } else { - MicroPrintf("ADD_N only supports FLOAT32 and INT8, got %s.", - TfLiteTypeGetName(output->type)); - return kTfLiteError; - } - return kTfLiteOk; -} - -} // namespace - -TfLiteRegistration Register_ADD_N() { - return tflite::micro::RegisterOp(nullptr, Prepare, Eval); -} - -} // namespace tflite diff --git a/code/components/tflite-lib/tensorflow/lite/micro/kernels/arg_min_max.cc b/code/components/tflite-lib/tensorflow/lite/micro/kernels/arg_min_max.cc deleted file mode 100644 index d06b94a6..00000000 --- a/code/components/tflite-lib/tensorflow/lite/micro/kernels/arg_min_max.cc +++ /dev/null @@ -1,115 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/kernels/internal/reference/arg_min_max.h" - -#include "tensorflow/lite/c/builtin_op_data.h" -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "tensorflow/lite/kernels/kernel_util.h" -#include "tensorflow/lite/micro/kernels/kernel_util.h" -#include "tensorflow/lite/micro/kernels/micro_utils.h" - -namespace tflite { -namespace ops { -namespace micro { -namespace arg_min_max { - -constexpr int kInputTensor = 0; -constexpr int kAxis = 1; -constexpr int kOutputTensor = 0; - -template -inline void ArgMinMaxHelper(const RuntimeShape& input1_shape, - const T1* input1_data, const T3* input2_data, - const RuntimeShape& output_shape, T2* output_data, - bool is_arg_max) { - if (is_arg_max) { - reference_ops::ArgMinMax(input1_shape, input1_data, input2_data, - output_shape, output_data, micro::Greater()); - } else { - reference_ops::ArgMinMax(input1_shape, input1_data, input2_data, - output_shape, output_data, micro::Less()); - } -} - -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node, bool is_arg_max) { - const TfLiteEvalTensor* input = - tflite::micro::GetEvalInput(context, node, kInputTensor); - const TfLiteEvalTensor* axis = - tflite::micro::GetEvalInput(context, node, kAxis); - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - -#define TF_LITE_ARG_MIN_MAX(data_type, axis_type, output_type) \ - ArgMinMaxHelper(tflite::micro::GetTensorShape(input), \ - tflite::micro::GetTensorData(input), \ - tflite::micro::GetTensorData(axis), \ - tflite::micro::GetTensorShape(output), \ - tflite::micro::GetTensorData(output), \ - is_arg_max) - if (axis->type == kTfLiteInt32) { - if (output->type == kTfLiteInt32) { - switch (input->type) { - case kTfLiteFloat32: - TF_LITE_ARG_MIN_MAX(float, int32_t, int32_t); - break; - case kTfLiteInt8: - TF_LITE_ARG_MIN_MAX(int8_t, int32_t, int32_t); - break; - default: - MicroPrintf( - "Only float32, uint8_t and int8_t are " - "supported currently, got %s.", - TfLiteTypeGetName(input->type)); - return kTfLiteError; - } - } else { - MicroPrintf("Only int32_t are supported currently, got %s.", - TfLiteTypeGetName(output->type)); - return kTfLiteError; - } - } else { - MicroPrintf("Only int32_t are supported currently, got %s.", - TfLiteTypeGetName(axis->type)); - return kTfLiteError; - } - -#undef TF_LITE_ARG_MIN_MAX - - return kTfLiteOk; -} - -TfLiteStatus ArgMinEval(TfLiteContext* context, TfLiteNode* node) { - return Eval(context, node, false); -} - -TfLiteStatus ArgMaxEval(TfLiteContext* context, TfLiteNode* node) { - return Eval(context, node, true); -} - -} // namespace arg_min_max - -TfLiteRegistration Register_ARG_MAX() { - return tflite::micro::RegisterOp(nullptr, nullptr, arg_min_max::ArgMaxEval); -} - -TfLiteRegistration Register_ARG_MIN() { - return tflite::micro::RegisterOp(nullptr, nullptr, arg_min_max::ArgMinEval); -} - -} // namespace micro -} // namespace ops -} // namespace tflite diff --git a/code/components/tflite-lib/tensorflow/lite/micro/kernels/batch_to_space_nd.cc b/code/components/tflite-lib/tensorflow/lite/micro/kernels/batch_to_space_nd.cc deleted file mode 100644 index eebf7c68..00000000 --- a/code/components/tflite-lib/tensorflow/lite/micro/kernels/batch_to_space_nd.cc +++ /dev/null @@ -1,111 +0,0 @@ -/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/kernels/internal/reference/batch_to_space_nd.h" - -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "tensorflow/lite/kernels/kernel_util.h" -#include "tensorflow/lite/micro/kernels/kernel_util.h" -#include "tensorflow/lite/micro/micro_utils.h" - -namespace tflite { - -namespace { - -constexpr int kInputTensor = 0; -constexpr int kBlockShapeTensor = 1; -constexpr int kCropsTensor = 2; -constexpr int kOutputTensor = 0; - -// Currently, only 3D NHC and 4D NHWC input/output op_context are supported. -// In case of 3D input, it will be extended to 3D NHWC by adding W=1. -// The 4D array need to have exactly 2 spatial dimensions. -// TODO(b/149952582): Support arbitrary dimension in SpaceToBatchND. -const int kInputOutputMinDimensionNum = 3; -const int kInputOutputMaxDimensionNum = 4; - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - TF_LITE_ENSURE_EQ(context, NumInputs(node), 3); - TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - - MicroContext* micro_context = GetMicroContext(context); - - TfLiteTensor* input = - micro_context->AllocateTempInputTensor(node, kInputTensor); - TfLiteTensor* output = - micro_context->AllocateTempOutputTensor(node, kOutputTensor); - TF_LITE_ENSURE(context, input != nullptr && output != nullptr); - - TF_LITE_ENSURE(context, NumDimensions(input) >= kInputOutputMinDimensionNum); - TF_LITE_ENSURE(context, NumDimensions(output) >= kInputOutputMinDimensionNum); - TF_LITE_ENSURE(context, NumDimensions(input) <= kInputOutputMaxDimensionNum); - TF_LITE_ENSURE(context, NumDimensions(output) <= kInputOutputMaxDimensionNum); - TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); - - micro_context->DeallocateTempTfLiteTensor(input); - micro_context->DeallocateTempTfLiteTensor(output); - - return kTfLiteOk; -} - -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - const TfLiteEvalTensor* input = - tflite::micro::GetEvalInput(context, node, kInputTensor); - const TfLiteEvalTensor* block_shape = - tflite::micro::GetEvalInput(context, node, kBlockShapeTensor); - const TfLiteEvalTensor* crops = - tflite::micro::GetEvalInput(context, node, kCropsTensor); - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - - switch (input->type) { // Already know in/out types are same. - case kTfLiteFloat32: - reference_ops::BatchToSpaceND( - tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(block_shape), - tflite::micro::GetTensorData(block_shape), - tflite::micro::GetTensorShape(crops), - tflite::micro::GetTensorData(crops), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - break; - case kTfLiteInt8: - reference_ops::BatchToSpaceND( - tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(block_shape), - tflite::micro::GetTensorData(block_shape), - tflite::micro::GetTensorShape(crops), - tflite::micro::GetTensorData(crops), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - break; - default: - MicroPrintf("Type %s (%d) not supported.", TfLiteTypeGetName(input->type), - input->type); - return kTfLiteError; - } - return kTfLiteOk; -} - -} // namespace. - -TfLiteRegistration Register_BATCH_TO_SPACE_ND() { - return tflite::micro::RegisterOp(nullptr, Prepare, Eval); -} - -} // namespace tflite diff --git a/code/components/tflite-lib/tensorflow/lite/micro/kernels/circular_buffer.cc b/code/components/tflite-lib/tensorflow/lite/micro/kernels/circular_buffer.cc deleted file mode 100644 index a45a8d26..00000000 --- a/code/components/tflite-lib/tensorflow/lite/micro/kernels/circular_buffer.cc +++ /dev/null @@ -1,116 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/micro/kernels/circular_buffer.h" - -#include "tensorflow/lite/c/builtin_op_data.h" -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/compatibility.h" -#include "tensorflow/lite/kernels/internal/quantization_util.h" -#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "tensorflow/lite/kernels/kernel_util.h" -#include "tensorflow/lite/kernels/op_macros.h" -#include "tensorflow/lite/micro/flatbuffer_utils.h" -#include "tensorflow/lite/micro/kernels/kernel_util.h" - -/* - * The circular buffer custom operator is used to implement strided streaming - * convolutions on TFLite Micro. Each time this operator is invoked, it checks - * whether or not to run, based on a predetermined stride in time. If the op - * runs, it inserts the input into the end of the output buffer and shifts the - * output values towards the start of the buffer. It discards the oldest value - * in the output buffer. - * - * Input: [, , , ] - * - * After shifting: - * Output: [, , , ] - * - * We make some assumptions in this custom operator: - * - Input shape must be [1, 1, 1, depth] - * - Output shape must be [1, num_slots, 1, depth] - * - Input and output types must match. - * - Input and output quantization params must be identical. - */ -namespace tflite { - -void* CircularBufferInit(TfLiteContext* context, const char* buffer, - size_t length) { - TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); - OpDataCircularBuffer* op_data = static_cast( - context->AllocatePersistentBuffer(context, sizeof(OpDataCircularBuffer))); - - if (buffer != nullptr && length > 0) { - const uint8_t* buffer_t = reinterpret_cast(buffer); - tflite::FlexbufferWrapper wrapper(buffer_t, length); - op_data->cycles_max = wrapper.ElementAsInt32(kCircularBufferCyclesMaxIndex); - } else { - op_data->cycles_max = 0; - } - - return op_data; -} - -// Shifts buffer over by the output depth, and write new input to end of buffer. -// num_slots is the number of samples stored in the output buffer. -// depth is the size of each sample. -void EvalInt8(const int8_t* input, int num_slots, int depth, int8_t* output) { - memmove(output, &output[depth], (num_slots - 1) * depth); - memcpy(&output[(num_slots - 1) * depth], input, depth); -} - -TfLiteStatus CircularBufferEval(TfLiteContext* context, TfLiteNode* node) { - const TfLiteEvalTensor* input = - tflite::micro::GetEvalInput(context, node, kCircularBufferInputTensor); - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kCircularBufferOutputTensor); - - TFLITE_DCHECK(node->user_data != nullptr); - OpDataCircularBuffer* data = - reinterpret_cast(node->user_data); - - int num_slots = output->dims->data[1]; - int depth = output->dims->data[2] * output->dims->data[3]; - - if (input->type == kTfLiteInt8) { - EvalInt8(tflite::micro::GetTensorData(input), num_slots, depth, - tflite::micro::GetTensorData(output)); - } else { - MicroPrintf("Type %s (%d) not supported.", - TfLiteTypeGetName(input->type), input->type); - return kTfLiteError; - } - - if (--data->cycles_until_run != 0) { - // Signal the interpreter to end current run if the delay before op invoke - // has not been reached. - // TODO(b/149795762): Add kTfLiteAbort to TfLiteStatus enum. - return static_cast(kTfLiteAbort); - } - - data->cycles_until_run = data->cycles_max; - - return kTfLiteOk; -} - -TfLiteRegistration* Register_CIRCULAR_BUFFER() { - static TfLiteRegistration r = tflite::micro::RegisterOp( - CircularBufferInit, CircularBufferPrepare, CircularBufferEval); - return &r; -} - -} // namespace tflite diff --git a/code/components/tflite-lib/tensorflow/lite/micro/kernels/comparisons.cc b/code/components/tflite-lib/tensorflow/lite/micro/kernels/comparisons.cc deleted file mode 100644 index 409373fb..00000000 --- a/code/components/tflite-lib/tensorflow/lite/micro/kernels/comparisons.cc +++ /dev/null @@ -1,617 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#include "tensorflow/lite/kernels/internal/reference/comparisons.h" - -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/quantization_util.h" -#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "tensorflow/lite/kernels/kernel_util.h" -#include "tensorflow/lite/micro/kernels/kernel_util.h" - -namespace tflite { -namespace ops { -namespace micro { -namespace comparisons { -namespace { - -struct OpData { - ComparisonParams params; -}; - -constexpr int kInputTensor1 = 0; -constexpr int kInputTensor2 = 1; -constexpr int kOutputTensor = 0; - -TfLiteStatus EqualEval(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->user_data != nullptr); - const OpData* data = static_cast(node->user_data); - - const TfLiteEvalTensor* input1 = - tflite::micro::GetEvalInput(context, node, kInputTensor1); - const TfLiteEvalTensor* input2 = - tflite::micro::GetEvalInput(context, node, kInputTensor2); - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - - RuntimeShape input1_shape = tflite::micro::GetTensorShape(input1); - RuntimeShape input2_shape = tflite::micro::GetTensorShape(input2); - RuntimeShape output_shape = tflite::micro::GetTensorShape(output); - bool* output_data = tflite::micro::GetTensorData(output); - - bool requires_broadcast = !tflite::micro::HaveSameShapes(input1, input2); - switch (input1->type) { - case kTfLiteBool: - requires_broadcast - ? reference_ops::Broadcast4DSlowEqualNoScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data) - : reference_ops::EqualNoScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data); - break; - case kTfLiteFloat32: - requires_broadcast - ? reference_ops::Broadcast4DSlowEqualNoScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data) - : reference_ops::EqualNoScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data); - break; - case kTfLiteInt32: - requires_broadcast - ? reference_ops::Broadcast4DSlowEqualNoScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data) - : reference_ops::EqualNoScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data); - break; - case kTfLiteInt64: - requires_broadcast - ? reference_ops::Broadcast4DSlowEqualNoScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data) - : reference_ops::EqualNoScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data); - break; - case kTfLiteInt8: - requires_broadcast - ? reference_ops::Broadcast4DSlowEqualWithScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data) - : reference_ops::EqualWithScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data); - break; - default: - MicroPrintf("Type %s (%d) not supported.", - TfLiteTypeGetName(input1->type), input1->type); - return kTfLiteError; - } - return kTfLiteOk; -} - -// TODO(renjieliu): Refactor the logic to avoid duplications. -TfLiteStatus NotEqualEval(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->user_data != nullptr); - const OpData* data = static_cast(node->user_data); - - const TfLiteEvalTensor* input1 = - tflite::micro::GetEvalInput(context, node, kInputTensor1); - const TfLiteEvalTensor* input2 = - tflite::micro::GetEvalInput(context, node, kInputTensor2); - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - - RuntimeShape input1_shape = tflite::micro::GetTensorShape(input1); - RuntimeShape input2_shape = tflite::micro::GetTensorShape(input2); - RuntimeShape output_shape = tflite::micro::GetTensorShape(output); - bool* output_data = tflite::micro::GetTensorData(output); - - bool requires_broadcast = !tflite::micro::HaveSameShapes(input1, input2); - switch (input1->type) { - case kTfLiteBool: - requires_broadcast - ? reference_ops::Broadcast4DSlowNotEqualNoScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data) - : reference_ops::NotEqualNoScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data); - break; - case kTfLiteFloat32: - requires_broadcast - ? reference_ops::Broadcast4DSlowNotEqualNoScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data) - : reference_ops::NotEqualNoScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data); - break; - case kTfLiteInt32: - requires_broadcast - ? reference_ops::Broadcast4DSlowNotEqualNoScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data) - : reference_ops::NotEqualNoScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data); - break; - case kTfLiteInt64: - requires_broadcast - ? reference_ops::Broadcast4DSlowNotEqualNoScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data) - : reference_ops::NotEqualNoScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data); - break; - case kTfLiteInt8: - requires_broadcast - ? reference_ops::Broadcast4DSlowNotEqualWithScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data) - : reference_ops::NotEqualWithScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data); - break; - default: - MicroPrintf("Type %s (%d) not supported.", - TfLiteTypeGetName(input1->type), input1->type); - return kTfLiteError; - } - return kTfLiteOk; -} - -TfLiteStatus GreaterEval(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->user_data != nullptr); - const OpData* data = static_cast(node->user_data); - - const TfLiteEvalTensor* input1 = - tflite::micro::GetEvalInput(context, node, kInputTensor1); - const TfLiteEvalTensor* input2 = - tflite::micro::GetEvalInput(context, node, kInputTensor2); - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - - RuntimeShape input1_shape = tflite::micro::GetTensorShape(input1); - RuntimeShape input2_shape = tflite::micro::GetTensorShape(input2); - RuntimeShape output_shape = tflite::micro::GetTensorShape(output); - bool* output_data = tflite::micro::GetTensorData(output); - - bool requires_broadcast = !tflite::micro::HaveSameShapes(input1, input2); - switch (input1->type) { - case kTfLiteFloat32: - requires_broadcast - ? reference_ops::Broadcast4DSlowGreaterNoScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data) - : reference_ops::GreaterNoScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data); - break; - case kTfLiteInt32: - requires_broadcast - ? reference_ops::Broadcast4DSlowGreaterNoScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data) - : reference_ops::GreaterNoScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data); - break; - case kTfLiteInt64: - requires_broadcast - ? reference_ops::Broadcast4DSlowGreaterNoScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data) - : reference_ops::GreaterNoScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data); - break; - case kTfLiteInt8: - requires_broadcast - ? reference_ops::Broadcast4DSlowGreaterWithScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data) - : reference_ops::GreaterWithScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data); - break; - default: - MicroPrintf("Type %s (%d) not supported.", - TfLiteTypeGetName(input1->type), input1->type); - return kTfLiteError; - } - return kTfLiteOk; -} - -TfLiteStatus GreaterEqualEval(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->user_data != nullptr); - const OpData* data = static_cast(node->user_data); - - const TfLiteEvalTensor* input1 = - tflite::micro::GetEvalInput(context, node, kInputTensor1); - const TfLiteEvalTensor* input2 = - tflite::micro::GetEvalInput(context, node, kInputTensor2); - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - - RuntimeShape input1_shape = tflite::micro::GetTensorShape(input1); - RuntimeShape input2_shape = tflite::micro::GetTensorShape(input2); - RuntimeShape output_shape = tflite::micro::GetTensorShape(output); - bool* output_data = tflite::micro::GetTensorData(output); - - bool requires_broadcast = !tflite::micro::HaveSameShapes(input1, input2); - switch (input1->type) { - case kTfLiteFloat32: - requires_broadcast - ? reference_ops::Broadcast4DSlowGreaterEqualNoScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data) - : reference_ops::GreaterEqualNoScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data); - break; - case kTfLiteInt32: - requires_broadcast - ? reference_ops::Broadcast4DSlowGreaterEqualNoScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data) - : reference_ops::GreaterEqualNoScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data); - break; - case kTfLiteInt64: - requires_broadcast - ? reference_ops::Broadcast4DSlowGreaterEqualNoScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data) - : reference_ops::GreaterEqualNoScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data); - break; - case kTfLiteInt8: - requires_broadcast - ? reference_ops::Broadcast4DSlowGreaterEqualWithScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data) - : reference_ops::GreaterEqualWithScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data); - break; - default: - MicroPrintf("Type %s (%d) not supported.", - TfLiteTypeGetName(input1->type), input1->type); - return kTfLiteError; - } - return kTfLiteOk; -} - -TfLiteStatus LessEval(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->user_data != nullptr); - const OpData* data = static_cast(node->user_data); - - const TfLiteEvalTensor* input1 = - tflite::micro::GetEvalInput(context, node, kInputTensor1); - const TfLiteEvalTensor* input2 = - tflite::micro::GetEvalInput(context, node, kInputTensor2); - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - - RuntimeShape input1_shape = tflite::micro::GetTensorShape(input1); - RuntimeShape input2_shape = tflite::micro::GetTensorShape(input2); - RuntimeShape output_shape = tflite::micro::GetTensorShape(output); - bool* output_data = tflite::micro::GetTensorData(output); - - bool requires_broadcast = !tflite::micro::HaveSameShapes(input1, input2); - switch (input1->type) { - case kTfLiteFloat32: - requires_broadcast - ? reference_ops::Broadcast4DSlowLessNoScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data) - : reference_ops::LessNoScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data); - break; - case kTfLiteInt32: - requires_broadcast - ? reference_ops::Broadcast4DSlowLessNoScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data) - : reference_ops::LessNoScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data); - break; - case kTfLiteInt64: - requires_broadcast - ? reference_ops::Broadcast4DSlowLessNoScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data) - : reference_ops::LessNoScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data); - break; - case kTfLiteInt8: - requires_broadcast - ? reference_ops::Broadcast4DSlowLessWithScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data) - : reference_ops::LessWithScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data); - break; - default: - MicroPrintf("Type %s (%d) not supported.", - TfLiteTypeGetName(input1->type), input1->type); - return kTfLiteError; - } - return kTfLiteOk; -} - -TfLiteStatus LessEqualEval(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->user_data != nullptr); - const OpData* data = static_cast(node->user_data); - - const TfLiteEvalTensor* input1 = - tflite::micro::GetEvalInput(context, node, kInputTensor1); - const TfLiteEvalTensor* input2 = - tflite::micro::GetEvalInput(context, node, kInputTensor2); - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - - RuntimeShape input1_shape = tflite::micro::GetTensorShape(input1); - RuntimeShape input2_shape = tflite::micro::GetTensorShape(input2); - RuntimeShape output_shape = tflite::micro::GetTensorShape(output); - bool* output_data = tflite::micro::GetTensorData(output); - - bool requires_broadcast = !tflite::micro::HaveSameShapes(input1, input2); - switch (input1->type) { - case kTfLiteFloat32: - requires_broadcast - ? reference_ops::Broadcast4DSlowLessEqualNoScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data) - : reference_ops::LessEqualNoScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data); - break; - case kTfLiteInt32: - requires_broadcast - ? reference_ops::Broadcast4DSlowLessEqualNoScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data) - : reference_ops::LessEqualNoScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data); - break; - case kTfLiteInt64: - requires_broadcast - ? reference_ops::Broadcast4DSlowLessEqualNoScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data) - : reference_ops::LessEqualNoScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data); - break; - case kTfLiteInt8: - requires_broadcast - ? reference_ops::Broadcast4DSlowLessEqualWithScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data) - : reference_ops::LessEqualWithScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data); - break; - default: - MicroPrintf("Type %s (%d) not supported.", - TfLiteTypeGetName(input1->type), input1->type); - return kTfLiteError; - } - return kTfLiteOk; -} - -} // namespace - -void* Init(TfLiteContext* context, const char* buffer, size_t length) { - TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); - return context->AllocatePersistentBuffer(context, sizeof(OpData)); -} - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->user_data != nullptr); - OpData* data = static_cast(node->user_data); - - MicroContext* micro_context = GetMicroContext(context); - - TfLiteTensor* input1 = - micro_context->AllocateTempInputTensor(node, kInputTensor1); - TF_LITE_ENSURE(context, input1 != nullptr); - TfLiteTensor* input2 = - micro_context->AllocateTempInputTensor(node, kInputTensor2); - TF_LITE_ENSURE(context, input2 != nullptr); - - if (input1->type == kTfLiteInt8) { - auto input1_offset = -input1->params.zero_point; - auto input2_offset = -input2->params.zero_point; - const int kLeftShift = 8; - - int32_t input1_multiplier; - int input1_shift; - QuantizeMultiplierSmallerThanOneExp( - static_cast(input1->params.scale), &input1_multiplier, - &input1_shift); - int32_t input2_multiplier; - int input2_shift; - QuantizeMultiplierSmallerThanOneExp( - static_cast(input2->params.scale), &input2_multiplier, - &input2_shift); - - data->params.left_shift = kLeftShift; - data->params.input1_offset = input1_offset; - data->params.input1_multiplier = input1_multiplier; - data->params.input1_shift = input1_shift; - data->params.input2_offset = input2_offset; - data->params.input2_multiplier = input2_multiplier; - data->params.input2_shift = input2_shift; - } - - micro_context->DeallocateTempTfLiteTensor(input1); - micro_context->DeallocateTempTfLiteTensor(input2); - - return kTfLiteOk; -} - -} // namespace comparisons - -TfLiteRegistration Register_EQUAL() { - return tflite::micro::RegisterOp(comparisons::Init, comparisons::Prepare, - comparisons::EqualEval); -} - -TfLiteRegistration Register_NOT_EQUAL() { - return tflite::micro::RegisterOp(comparisons::Init, comparisons::Prepare, - comparisons::NotEqualEval); -} - -TfLiteRegistration Register_GREATER() { - return tflite::micro::RegisterOp(comparisons::Init, comparisons::Prepare, - comparisons::GreaterEval); -} - -TfLiteRegistration Register_GREATER_EQUAL() { - return tflite::micro::RegisterOp(comparisons::Init, comparisons::Prepare, - comparisons::GreaterEqualEval); -} - -TfLiteRegistration Register_LESS() { - return tflite::micro::RegisterOp(comparisons::Init, comparisons::Prepare, - comparisons::LessEval); -} - -TfLiteRegistration Register_LESS_EQUAL() { - return tflite::micro::RegisterOp(comparisons::Init, comparisons::Prepare, - comparisons::LessEqualEval); -} - -} // namespace micro -} // namespace ops -} // namespace tflite diff --git a/code/components/tflite-lib/tensorflow/lite/micro/kernels/concatenation.cc b/code/components/tflite-lib/tensorflow/lite/micro/kernels/concatenation.cc deleted file mode 100644 index 8b4d68d9..00000000 --- a/code/components/tflite-lib/tensorflow/lite/micro/kernels/concatenation.cc +++ /dev/null @@ -1,262 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#include "tensorflow/lite/kernels/internal/reference/concatenation.h" - -#include - -#include "tensorflow/lite/c/builtin_op_data.h" -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/portable_tensor.h" -#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "tensorflow/lite/kernels/internal/types.h" -#include "tensorflow/lite/kernels/kernel_util.h" -#include "tensorflow/lite/micro/kernels/kernel_util.h" - -namespace tflite { -namespace ops { -namespace micro { -namespace concatenation { - -constexpr int kMaxInputNum = 10; // Maximum number of input tensors -constexpr int kOutputTensor = 0; - -struct OpData { - ConcatenationParams params; -}; - -// Handles negative axis index, coerces to positive index value. -inline int CalculatePositiveAxis(int axis, const TfLiteTensor* output_tensor) { - if (axis >= 0) { - return axis; - } else { - return NumDimensions(output_tensor) + axis; - } -} - -// The following functions are helpers to get tensor data in the format that the -// reference op implementation expects. They provide the same functionality as -// class VectorOfTensors and class VectorOfQuantizedTensors in TFLite. - -// Gets shapes from a list of tensors. -inline void GetAllInputTensorShapes(const TfLiteContext* context, - const TfLiteNode* node, - RuntimeShape all_shapes[kMaxInputNum]) { - TFLITE_DCHECK(context != nullptr); - TFLITE_DCHECK(node != nullptr); - for (int i = 0; i < node->inputs->size; ++i) { - const TfLiteEvalTensor* t = tflite::micro::GetEvalInput(context, node, i); - RuntimeShape shape = tflite::micro::GetTensorShape(t); - all_shapes[i].ReplaceWith(shape.DimensionsCount(), shape.DimsData()); - } -} - -// Get shape pointers from a list of shapes. -inline void GetShapesPointers(const RuntimeShape* shapes, size_t num, - const RuntimeShape* pointers[]) { - for (size_t i = 0; i < num; ++i) { - pointers[i] = &shapes[i]; - } -} - -// Gets data pointers from a list of tensors. -template -inline void GetAllInputTensorData(const TfLiteContext* context, - const TfLiteNode* node, - T* all_data[kMaxInputNum]) { - TFLITE_DCHECK(context != nullptr); - TFLITE_DCHECK(node != nullptr); - for (int i = 0; i < node->inputs->size; ++i) { - const TfLiteEvalTensor* t = tflite::micro::GetEvalInput(context, node, i); - all_data[i] = tflite::micro::GetTensorData(t); - } -} - -template -void EvalUnquantized(TfLiteContext* context, TfLiteNode* node) { - // Collect the shapes and data pointer of input tensors - RuntimeShape inputs_shape[kMaxInputNum]; - const RuntimeShape* inputs_shape_ptr[kMaxInputNum]; - const data_type* inputs_data[kMaxInputNum]; - GetAllInputTensorShapes(context, node, inputs_shape); - GetShapesPointers(inputs_shape, node->inputs->size, inputs_shape_ptr); - GetAllInputTensorData(context, node, inputs_data); - - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - - TFLITE_DCHECK(node->user_data != nullptr); - const OpData* data = static_cast(node->user_data); - - reference_ops::Concatenation(data->params, inputs_shape_ptr, inputs_data, - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); -} - -void* Init(TfLiteContext* context, const char* buffer, size_t length) { - TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); - return context->AllocatePersistentBuffer(context, sizeof(OpData)); -} - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - // This function only checks the types. Additional shape validations are - // performed in the reference implementation called during Eval(). - const TfLiteConcatenationParams* params = - reinterpret_cast(node->builtin_data); - - MicroContext* micro_context = GetMicroContext(context); - - TfLiteTensor* input_tensor = micro_context->AllocateTempInputTensor(node, 0); - TF_LITE_ENSURE(context, input_tensor != nullptr); - TfLiteType input_type = input_tensor->type; - TfLiteTensor* output_tensor = - micro_context->AllocateTempOutputTensor(node, kOutputTensor); - TF_LITE_ENSURE(context, output_tensor != nullptr); - TfLiteType output_type = output_tensor->type; - - micro_context->DeallocateTempTfLiteTensor(input_tensor); - micro_context->DeallocateTempTfLiteTensor(output_tensor); - - // Check activation and input type - TF_LITE_ENSURE_EQ(context, params->activation, kTfLiteActNone); - TF_LITE_ENSURE(context, - input_type == kTfLiteFloat32 || input_type == kTfLiteInt8 || - input_type == kTfLiteInt16 || input_type == kTfLiteInt32 || - input_type == kTfLiteInt64 || input_type == kTfLiteBool); - - // Output type must match input type - TF_LITE_ENSURE_EQ(context, output_type, input_type); - - // This implementation does not support large number of input tensors - const int num_inputs = NumInputs(node); - TF_LITE_ENSURE(context, num_inputs <= kMaxInputNum); - - // Shapes with dimensions >4 are not yet supported with static allocation. - for (int i = 0; i < num_inputs; ++i) { - TfLiteTensor* input = micro_context->AllocateTempInputTensor(node, i); - TF_LITE_ENSURE(context, input != nullptr); - int num_dimensions = NumDimensions(input); - - if (num_dimensions > RuntimeShape::kMaxSmallSize) { - MicroPrintf( - "Op Concatenation does not currently support num dimensions > %d " - "Tensor has %d dimensions.", - RuntimeShape::kMaxSmallSize, num_dimensions); - return kTfLiteError; - } - micro_context->DeallocateTempTfLiteTensor(input); - } - - // Calculate OpData. - TFLITE_DCHECK(node->user_data != nullptr); - OpData* data = static_cast(node->user_data); - - TfLiteTensor* output = - micro_context->AllocateTempOutputTensor(node, kOutputTensor); - TF_LITE_ENSURE(context, output != nullptr); - - switch (output_type) { // Already know in/outtypes are same. - case kTfLiteBool: - case kTfLiteFloat32: - case kTfLiteInt16: - case kTfLiteInt32: - case kTfLiteInt64: { - data->params.axis = CalculatePositiveAxis(params->axis, output); - data->params.inputs_count = node->inputs->size; - break; - } - case kTfLiteInt8: { - data->params.axis = CalculatePositiveAxis(params->axis, output); - data->params.inputs_count = node->inputs->size; - - float* input_scales = - reinterpret_cast(context->AllocatePersistentBuffer( - context, node->inputs->size * sizeof(float))); - - int32_t* input_zero_points = - reinterpret_cast(context->AllocatePersistentBuffer( - context, node->inputs->size * sizeof(int32_t))); - - // Allocate persistent scale and zeropoint buffers. - // Store input scale and zero point values in OpParams: - for (int i = 0; i < node->inputs->size; ++i) { - TfLiteTensor* t = micro_context->AllocateTempInputTensor(node, i); - TF_LITE_ENSURE(context, t != nullptr); - input_scales[i] = t->params.scale; - input_zero_points[i] = t->params.zero_point; - micro_context->DeallocateTempTfLiteTensor(t); - } - - data->params.input_scale = input_scales; - data->params.input_zeropoint = input_zero_points; - data->params.output_zeropoint = output->params.zero_point; - data->params.output_scale = output->params.scale; - break; - } - default: - MicroPrintf("Op Concatenation does not currently support Type '%s'.", - TfLiteTypeGetName(output_type)); - return kTfLiteError; - } - - micro_context->DeallocateTempTfLiteTensor(output); - - return kTfLiteOk; -} - -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - const TfLiteEvalTensor* output_tensor = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - TF_LITE_ENSURE(context, output_tensor != nullptr); - TfLiteType output_type = output_tensor->type; - - switch (output_type) { // Already know in/outtypes are same. - case kTfLiteFloat32: - EvalUnquantized(context, node); - break; - case kTfLiteInt32: - EvalUnquantized(context, node); - break; - case kTfLiteInt8: - EvalUnquantized(context, node); - break; - case kTfLiteInt64: - EvalUnquantized(context, node); - break; - case kTfLiteInt16: - EvalUnquantized(context, node); - break; - case kTfLiteBool: - EvalUnquantized(context, node); - break; - - default: - MicroPrintf("Op Concatenation does not currently support Type '%s'.", - TfLiteTypeGetName(output_type)); - return kTfLiteError; - } - - return kTfLiteOk; -} - -} // namespace concatenation - -TfLiteRegistration Register_CONCATENATION() { - return tflite::micro::RegisterOp(concatenation::Init, concatenation::Prepare, - concatenation::Eval); -} - -} // namespace micro -} // namespace ops -} // namespace tflite diff --git a/code/components/tflite-lib/tensorflow/lite/micro/kernels/cumsum.cc b/code/components/tflite-lib/tensorflow/lite/micro/kernels/cumsum.cc deleted file mode 100644 index 751654fe..00000000 --- a/code/components/tflite-lib/tensorflow/lite/micro/kernels/cumsum.cc +++ /dev/null @@ -1,174 +0,0 @@ -/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/kernels/internal/reference/cumsum.h" - -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/quantization_util.h" -#include "tensorflow/lite/kernels/internal/types.h" -#include "tensorflow/lite/kernels/kernel_util.h" -#include "tensorflow/lite/micro/kernels/kernel_util.h" - -namespace tflite { -namespace { - -constexpr int kInputTensor = 0; -constexpr int kAxisTensor = 1; -constexpr int kOutputTensor = 0; - -constexpr int kCumSumIntegerShift = 20; - -// only used with INT8 tensors -struct OpData { - int32_t output_activation_min; - int32_t output_activation_max; - int32_t input_offset; - int32_t output_offset; - int32_t input_multiplier; - int32_t output_multiplier; - int input_shift; - int output_shift; - int left_shift; -}; - -TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteNode* node) { - TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); - TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - - MicroContext* micro_context = GetMicroContext(context); - - TfLiteTensor* input = - micro_context->AllocateTempInputTensor(node, kInputTensor); - TfLiteTensor* axis = - micro_context->AllocateTempInputTensor(node, kAxisTensor); - - TF_LITE_ENSURE(context, - input->type == kTfLiteFloat32 || input->type == kTfLiteInt8); - TF_LITE_ENSURE_EQ(context, axis->type, kTfLiteInt32); - - TF_LITE_ENSURE_EQ(context, NumElements(axis), 1); - - TF_LITE_ENSURE(context, NumDimensions(input) >= 1); - - TfLiteTensor* output = - micro_context->AllocateTempOutputTensor(node, kOutputTensor); - - TF_LITE_ENSURE_EQ(context, input->type, output->type); - TF_LITE_ENSURE(context, HaveSameShapes(input, output)); - - if (output->type == kTfLiteInt8) { - node->user_data = - context->AllocatePersistentBuffer(context, sizeof(OpData)); - OpData* data = static_cast(node->user_data); - - // 8bit -> 8bit general quantized path, with general rescalings - data->input_offset = -input->params.zero_point; - data->output_offset = output->params.zero_point; - data->left_shift = kCumSumIntegerShift; - const double twice_max_input_scale = - 2 * static_cast(input->params.scale); - const double real_input_multiplier = - static_cast(input->params.scale) / twice_max_input_scale; - const double real_output_multiplier = - twice_max_input_scale / - ((1 << data->left_shift) * static_cast(output->params.scale)); - - QuantizeMultiplierSmallerThanOneExp( - real_input_multiplier, &data->input_multiplier, &data->input_shift); - - QuantizeMultiplierSmallerThanOneExp( - real_output_multiplier, &data->output_multiplier, &data->output_shift); - - TF_LITE_ENSURE_STATUS(CalculateActivationRangeQuantized( - context, kTfLiteActNone, output, &data->output_activation_min, - &data->output_activation_max)); - } - - micro_context->DeallocateTempTfLiteTensor(input); - micro_context->DeallocateTempTfLiteTensor(axis); - micro_context->DeallocateTempTfLiteTensor(output); - - return kTfLiteOk; -} - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - return CalculateOpData(context, node); -} - -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - const TfLiteEvalTensor* input = - tflite::micro::GetEvalInput(context, node, kInputTensor); - const TfLiteEvalTensor* axis_tensor = - tflite::micro::GetEvalInput(context, node, kAxisTensor); - - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - - auto* cs_params = static_cast(node->builtin_data); - auto input_shape = tflite::micro::GetTensorShape(input); - - int32_t axis = *tflite::micro::GetTensorData(axis_tensor); - if (axis < 0) axis += input_shape.DimensionsCount(); - - if (axis < 0 || axis >= input_shape.DimensionsCount()) { - MicroPrintf("CUMSUM Invalid axis: %d", axis); - return kTfLiteError; - } - - switch (input->type) { - case kTfLiteFloat32: { - reference_ops::CumSum(tflite::micro::GetTensorData(input), - input_shape, axis, cs_params->exclusive, - cs_params->reverse, - tflite::micro::GetTensorData(output)); - return kTfLiteOk; - } break; - - case kTfLiteInt8: { - auto* data = static_cast(node->user_data); - ArithmeticParams params; - params.left_shift = data->left_shift; - params.input1_offset = data->input_offset; - params.input1_multiplier = data->input_multiplier; - params.input1_shift = data->input_shift; - params.output_offset = data->output_offset; - params.output_multiplier = data->output_multiplier; - params.output_shift = data->output_shift; - SetActivationParams(data->output_activation_min, - data->output_activation_max, ¶ms); - reference_ops::CumSum(params, tflite::micro::GetTensorData(input), - input_shape, axis, cs_params->exclusive, - cs_params->reverse, - tflite::micro::GetTensorData(output)); - return kTfLiteOk; - } break; - - default: { - MicroPrintf("CUMSUM only supports FLOAT32 and INT8, got %s.", - TfLiteTypeGetName(output->type)); - return kTfLiteError; - } - } - - return kTfLiteError; -} - -} // namespace - -TfLiteRegistration Register_CUMSUM() { - return tflite::micro::RegisterOp(nullptr, Prepare, Eval); -} - -} // namespace tflite diff --git a/code/components/tflite-lib/tensorflow/lite/micro/kernels/depth_to_space.cc b/code/components/tflite-lib/tensorflow/lite/micro/kernels/depth_to_space.cc deleted file mode 100644 index 4dda7192..00000000 --- a/code/components/tflite-lib/tensorflow/lite/micro/kernels/depth_to_space.cc +++ /dev/null @@ -1,141 +0,0 @@ -/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#include "tensorflow/lite/kernels/internal/reference/depth_to_space.h" - -#include - -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/types.h" -#include "tensorflow/lite/kernels/kernel_util.h" -#include "tensorflow/lite/micro/kernels/kernel_util.h" - -namespace tflite { -namespace { - -constexpr int kInputTensor = 0; -constexpr int kOutputTensor = 0; - -// input/output tensor shape rank associations -constexpr int kBatchRank = 0; -constexpr int kHeightRank = 1; -constexpr int kWidthRank = 2; -constexpr int kDepthRank = 3; - -TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteNode* node) { - auto* params = - reinterpret_cast(node->builtin_data); - - TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); - TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - - MicroContext* micro_context = GetMicroContext(context); - - TfLiteTensor* input = - micro_context->AllocateTempInputTensor(node, kInputTensor); - TF_LITE_ENSURE(context, input != nullptr); - TfLiteTensor* output = - micro_context->AllocateTempOutputTensor(node, kOutputTensor); - TF_LITE_ENSURE(context, output != nullptr); - - TF_LITE_ENSURE_EQ(context, NumDimensions(input), 4); - - auto data_type = output->type; - TF_LITE_ENSURE(context, - data_type == kTfLiteFloat32 || data_type == kTfLiteInt8); - TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); - - const int block_size = params->block_size; - TF_LITE_ENSURE(context, block_size > 0); - const int input_height = input->dims->data[kHeightRank]; - const int input_width = input->dims->data[kWidthRank]; - const int input_channels = input->dims->data[kDepthRank]; - int output_height = input_height * block_size; - int output_width = input_width * block_size; - int output_channels = input_channels / block_size / block_size; - - TF_LITE_ENSURE_EQ(context, input_height, output_height / block_size); - TF_LITE_ENSURE_EQ(context, input_width, output_width / block_size); - TF_LITE_ENSURE_EQ(context, input_channels, - output_channels * block_size * block_size); - - // We must update the output tensor dimensions. - // The dims storage is expected to be the same area in memory - // for both TfLiteTensor and TfLiteEvalTensor. This is important - // because TfLiteTensor in the MicroInterpreter is a temporary - // allocation. For the KernelRunner interpreter, TfLiteEvalTensor - // is a temporary allocation. We must therefore relocate the dims - // from the FlatBuffer to the persistant storage arena. - TfLiteEvalTensor* output_eval = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - TF_LITE_ENSURE_OK(context, tflite::micro::CreateWritableTensorDimsWithCopy( - context, output, output_eval)); - output->dims->data[kBatchRank] = input->dims->data[kBatchRank]; - output->dims->data[kHeightRank] = output_height; - output->dims->data[kWidthRank] = output_width; - output->dims->data[kDepthRank] = output_channels; - - micro_context->DeallocateTempTfLiteTensor(input); - micro_context->DeallocateTempTfLiteTensor(output); - - return kTfLiteOk; -} - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - return CalculateOpData(context, node); -} - -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - auto* params = - reinterpret_cast(node->builtin_data); - - const TfLiteEvalTensor* input = - tflite::micro::GetEvalInput(context, node, kInputTensor); - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - - tflite::DepthToSpaceParams op_params; - op_params.block_size = static_cast(params->block_size); - - switch (input->type) { // Already know in/out types are same. - case kTfLiteFloat32: - reference_ops::DepthToSpace(op_params, - tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - break; - case kTfLiteInt8: - reference_ops::DepthToSpace(op_params, - tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - break; - default: - MicroPrintf("DEPTH_TO_SPACE only supports FLOAT32 and INT8, got %s.", - TfLiteTypeGetName(output->type)); - return kTfLiteError; - } - - return kTfLiteOk; -} - -} // namespace - -TfLiteRegistration Register_DEPTH_TO_SPACE() { - return tflite::micro::RegisterOp(nullptr, Prepare, Eval); -} - -} // namespace tflite diff --git a/code/components/tflite-lib/tensorflow/lite/micro/kernels/depthwise_conv.cc b/code/components/tflite-lib/tensorflow/lite/micro/kernels/depthwise_conv.cc deleted file mode 100644 index c2ed8892..00000000 --- a/code/components/tflite-lib/tensorflow/lite/micro/kernels/depthwise_conv.cc +++ /dev/null @@ -1,98 +0,0 @@ -/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/micro/kernels/depthwise_conv.h" - -#include "tensorflow/lite/c/builtin_op_data.h" -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/common.h" -#include "tensorflow/lite/kernels/internal/quantization_util.h" -#include "tensorflow/lite/kernels/internal/reference/depthwiseconv_float.h" -#include "tensorflow/lite/kernels/internal/reference/integer_ops/depthwise_conv.h" -#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "tensorflow/lite/kernels/kernel_util.h" -#include "tensorflow/lite/kernels/padding.h" -#include "tensorflow/lite/micro/kernels/kernel_util.h" - -namespace tflite { -namespace { - -void* Init(TfLiteContext* context, const char* buffer, size_t length) { - TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); - return context->AllocatePersistentBuffer(context, sizeof(OpDataConv)); -} - -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->user_data != nullptr); - TFLITE_DCHECK(node->builtin_data != nullptr); - - auto& params = - *(reinterpret_cast(node->builtin_data)); - const OpDataConv& data = *(static_cast(node->user_data)); - - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kDepthwiseConvOutputTensor); - const TfLiteEvalTensor* input = - tflite::micro::GetEvalInput(context, node, kDepthwiseConvInputTensor); - const TfLiteEvalTensor* filter = - tflite::micro::GetEvalInput(context, node, kDepthwiseConvWeightsTensor); - const TfLiteEvalTensor* bias = - (NumInputs(node) == 3) - ? tflite::micro::GetEvalInput(context, node, kDepthwiseConvBiasTensor) - : nullptr; - - switch (input->type) { // Already know in/out types are same. - case kTfLiteFloat32: { - tflite::reference_ops::DepthwiseConv( - DepthwiseConvParamsFloat(params, data), - tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(filter), - tflite::micro::GetTensorData(filter), - tflite::micro::GetTensorShape(bias), - tflite::micro::GetOptionalTensorData(bias), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - break; - } - case kTfLiteInt8: { - reference_integer_ops::DepthwiseConvPerChannel( - DepthwiseConvParamsQuantized(params, data), - data.per_channel_output_multiplier, data.per_channel_output_shift, - tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(filter), - tflite::micro::GetTensorData(filter), - tflite::micro::GetTensorShape(bias), - tflite::micro::GetOptionalTensorData(bias), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - break; - } - default: - MicroPrintf("Type %s (%d) not supported.", TfLiteTypeGetName(input->type), - input->type); - return kTfLiteError; - } - return kTfLiteOk; -} - -} // namespace - -TfLiteRegistration Register_DEPTHWISE_CONV_2D() { - return tflite::micro::RegisterOp(Init, DepthwiseConvPrepare, Eval); -} - -} // namespace tflite diff --git a/code/components/tflite-lib/tensorflow/lite/micro/kernels/div.cc b/code/components/tflite-lib/tensorflow/lite/micro/kernels/div.cc deleted file mode 100644 index 34bf6d7c..00000000 --- a/code/components/tflite-lib/tensorflow/lite/micro/kernels/div.cc +++ /dev/null @@ -1,207 +0,0 @@ -/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/kernels/internal/reference/div.h" - -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/quantization_util.h" -#include "tensorflow/lite/kernels/internal/reference/process_broadcast_shapes.h" -#include "tensorflow/lite/kernels/internal/types.h" -#include "tensorflow/lite/kernels/kernel_util.h" -#include "tensorflow/lite/micro/kernels/kernel_util.h" - -namespace tflite { -namespace { - -constexpr int kInputTensor1 = 0; -constexpr int kInputTensor2 = 1; -constexpr int kOutputTensor = 0; - -struct OpDataDiv { - // Parameters used in the quantized paths where the output is 8bit - int32_t input1_zero_point; - int32_t input2_zero_point; - int32_t output_zero_point; - int32_t output_activation_min; - int32_t output_activation_max; - - // Parameters used in all quantized paths - int32_t output_multiplier; - int output_shift; -}; - -TfLiteStatus CalculateOpDataDiv(TfLiteContext* context, TfLiteTensor* input1, - TfLiteTensor* input2, TfLiteTensor* output, - TfLiteDivParams* params, OpDataDiv* data) { - TF_LITE_ENSURE_TYPES_EQ(context, input1->type, input2->type); - TF_LITE_ENSURE_TYPES_EQ(context, input1->type, output->type); - - if (output->type == kTfLiteInt8) { - TF_LITE_ENSURE_STATUS(CalculateActivationRangeQuantized( - context, params->activation, output, &data->output_activation_min, - &data->output_activation_max)); - const double real_multiplier = static_cast( - input1->params.scale / (input2->params.scale * output->params.scale)); - QuantizeMultiplier(real_multiplier, &data->output_multiplier, - &data->output_shift); - data->input1_zero_point = input1->params.zero_point; - data->input2_zero_point = input2->params.zero_point; - data->output_zero_point = output->params.zero_point; - } - - return kTfLiteOk; -} - -void* Init(TfLiteContext* context, const char* buffer, size_t length) { - TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); - return context->AllocatePersistentBuffer(context, sizeof(OpDataDiv)); -} - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->user_data != nullptr); - TFLITE_DCHECK(node->builtin_data != nullptr); - - MicroContext* micro_context = GetMicroContext(context); - TfLiteTensor* input1 = - micro_context->AllocateTempInputTensor(node, kInputTensor1); - TF_LITE_ENSURE(context, input1 != nullptr); - TfLiteTensor* input2 = - micro_context->AllocateTempInputTensor(node, kInputTensor2); - TF_LITE_ENSURE(context, input2 != nullptr); - TfLiteTensor* output = - micro_context->AllocateTempOutputTensor(node, kOutputTensor); - TF_LITE_ENSURE(context, output != nullptr); - - OpDataDiv* data = static_cast(node->user_data); - auto* params = reinterpret_cast(node->builtin_data); - - TF_LITE_ENSURE_STATUS( - CalculateOpDataDiv(context, input1, input2, output, params, data)); - - micro_context->DeallocateTempTfLiteTensor(input1); - micro_context->DeallocateTempTfLiteTensor(input2); - micro_context->DeallocateTempTfLiteTensor(output); - return kTfLiteOk; -} - -void EvalDiv(TfLiteContext* context, TfLiteNode* node, TfLiteDivParams* params, - const OpDataDiv* data, const TfLiteEvalTensor* input1, - const TfLiteEvalTensor* input2, TfLiteEvalTensor* output) { - tflite::ArithmeticParams op_params = {}; - -#define TF_LITE_DIV(type, opname, data_type) \ - data_type output_activation_min, output_activation_max; \ - CalculateActivationRange(params->activation, &output_activation_min, \ - &output_activation_max); \ - SetActivationParams(output_activation_min, output_activation_max, \ - &op_params); \ - type::opname(op_params, tflite::micro::GetTensorShape(input1), \ - tflite::micro::GetTensorData(input1), \ - tflite::micro::GetTensorShape(input2), \ - tflite::micro::GetTensorData(input2), \ - tflite::micro::GetTensorShape(output), \ - tflite::micro::GetTensorData(output)) - - bool requires_broadcast = reference_ops::ProcessBroadcastShapes( - tflite::micro::GetTensorShape(input1), - tflite::micro::GetTensorShape(input2), &op_params); - - if (requires_broadcast) { - TF_LITE_DIV(reference_ops, BroadcastDivSlow, float); - } else { - TF_LITE_DIV(reference_ops, Div, float); - } -#undef TF_LITE_DIV -} - -TfLiteStatus EvalQuantized(TfLiteContext* context, TfLiteNode* node, - TfLiteDivParams* params, const OpDataDiv* data, - const TfLiteEvalTensor* input1, - const TfLiteEvalTensor* input2, - TfLiteEvalTensor* output) { - tflite::ArithmeticParams op_params = {}; - -#define TF_LITE_DIV(type, opname, dtype) \ - type::opname(op_params, tflite::micro::GetTensorShape(input1), \ - tflite::micro::GetTensorData(input1), \ - tflite::micro::GetTensorShape(input2), \ - tflite::micro::GetTensorData(input2), \ - tflite::micro::GetTensorShape(output), \ - tflite::micro::GetTensorData(output)) - - if (input1->type == kTfLiteInt8 && input2->type == kTfLiteInt8 && - output->type == kTfLiteInt8) { - SetActivationParams(data->output_activation_min, - data->output_activation_max, &op_params); - op_params.input1_offset = -data->input1_zero_point; - op_params.input2_offset = -data->input2_zero_point; - op_params.output_offset = data->output_zero_point; - op_params.output_multiplier = data->output_multiplier; - op_params.output_shift = data->output_shift; - - bool requires_broadcast = reference_ops::ProcessBroadcastShapes( - tflite::micro::GetTensorShape(input1), - tflite::micro::GetTensorShape(input2), &op_params); - - if (requires_broadcast) { - TF_LITE_DIV(reference_ops, BroadcastDivSlow, int8_t); - } else { - TF_LITE_DIV(reference_ops, Div, int8_t); - } -#undef TF_LITE_DIV - } else { - MicroPrintf("Unsupported combination of input and output types in DIV."); - return kTfLiteError; - } - - return kTfLiteOk; -} - -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->builtin_data != nullptr); - auto* params = static_cast(node->builtin_data); - TFLITE_DCHECK(node->user_data != nullptr); - auto* data = static_cast(node->user_data); - - const TfLiteEvalTensor* input1 = - tflite::micro::GetEvalInput(context, node, kInputTensor1); - const TfLiteEvalTensor* input2 = - tflite::micro::GetEvalInput(context, node, kInputTensor2); - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - - if (output->type == kTfLiteFloat32) { - EvalDiv(context, node, params, data, input1, input2, output); - } else if (output->type == kTfLiteInt8) { - TF_LITE_ENSURE_OK(context, EvalQuantized(context, node, params, data, - input1, input2, output)); - } else { - MicroPrintf( - "DIV only supports FLOAT32, quantized INT8 " - "now, got type %s (%d).", - TfLiteTypeGetName(output->type), output->type); - return kTfLiteError; - } - - return kTfLiteOk; -} - -} // namespace - -TfLiteRegistration Register_DIV() { - return tflite::micro::RegisterOp(Init, Prepare, Eval); -} - -} // namespace tflite diff --git a/code/components/tflite-lib/tensorflow/lite/micro/kernels/elementwise.cc b/code/components/tflite-lib/tensorflow/lite/micro/kernels/elementwise.cc deleted file mode 100644 index bb3c6545..00000000 --- a/code/components/tflite-lib/tensorflow/lite/micro/kernels/elementwise.cc +++ /dev/null @@ -1,429 +0,0 @@ -/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include - -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/common.h" -#include "tensorflow/lite/kernels/internal/quantization_util.h" -#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "tensorflow/lite/kernels/kernel_util.h" -#include "tensorflow/lite/micro/kernels/kernel_util.h" -#include "tensorflow/lite/micro/micro_utils.h" - -namespace tflite { -namespace ops { -namespace micro { -namespace elementwise { -namespace { - -constexpr int kAbsNameId = 0; -constexpr int kRsrqtNameId = 1; - -const int kElementwiseInputTensor = 0; -const int kElementwiseOutputTensor = 0; - -struct OpDataAbsRsqrt { - int32_t multiplier; - int shift; - int input_offset; - int output_offset; - bool needs_rescale; - TfLiteQuantizationType input_quantization_type; - TfLiteType input_type; -}; - -bool IsNumericSupportedType(const TfLiteType type) { - return type == kTfLiteFloat32; -} - -bool IsLogicalSupportedType(const TfLiteType type) { - return type == kTfLiteBool; -} - -bool IsAbsSupportedType(const TfLiteType type) { - return type == kTfLiteFloat32 || type == kTfLiteInt8 || type == kTfLiteInt16; -} - -bool IsRsqrtSupportedType(const TfLiteType type) { - return type == kTfLiteFloat32 || type == kTfLiteInt8; -} - -inline void SetAbsOutputMultiplier(const float input_scale, - const float output_scale, - int32_t* multiplier, int* shift) { - QuantizeMultiplier(static_cast(input_scale / output_scale), - multiplier, shift); -} - -inline void SetRsqrtOutputMultiplier(const float input_scale, - const float output_scale, - int32_t* multiplier, int* shift) { - const double scale = - 1. / static_cast((std::sqrt(input_scale) * output_scale)); - QuantizeMultiplier(scale, multiplier, shift); -} - -typedef bool (*IsSupportedType)(TfLiteType); -template -TfLiteStatus GenericPrepare(TfLiteContext* context, TfLiteNode* node) { - MicroContext* micro_context = GetMicroContext(context); - TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); - TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - TfLiteTensor* input = - micro_context->AllocateTempInputTensor(node, kElementwiseInputTensor); - TF_LITE_ENSURE(context, input != nullptr); - TfLiteTensor* output = - micro_context->AllocateTempOutputTensor(node, kElementwiseOutputTensor); - TF_LITE_ENSURE(context, output != nullptr); - TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); - if (!IsSupportedType(input->type)) { - MicroPrintf("Input data type %s (%d) is not supported.", - TfLiteTypeGetName(input->type), input->type); - return kTfLiteError; - } - - micro_context->DeallocateTempTfLiteTensor(input); - micro_context->DeallocateTempTfLiteTensor(output); - return kTfLiteOk; -} - -typedef bool (*IsSupportedType)(TfLiteType); -template -TfLiteStatus PrepareAbsRsqrt(TfLiteContext* context, TfLiteNode* node) { - MicroContext* micro_context = GetMicroContext(context); - TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); - TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - TfLiteTensor* input = micro_context->AllocateTempInputTensor(node, 0); - TF_LITE_ENSURE(context, input != nullptr); - TfLiteTensor* output = micro_context->AllocateTempOutputTensor(node, 0); - TF_LITE_ENSURE(context, output != nullptr); - TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); - if (!IsSupportedType(input->type)) { - MicroPrintf("Input data type %s (%d) is not supported.", - TfLiteTypeGetName(input->type), input->type); - return kTfLiteError; - } - - auto* op_data = static_cast(node->user_data); - op_data->input_type = input->type; - - // For int16 type input, we support both quantized and non-quantized - // evaluation. - if (op_nameid == kAbsNameId) { - op_data->input_quantization_type = input->quantization.type; - } - - if (input->type == kTfLiteInt8 || - (input->type == kTfLiteInt16 && - input->quantization.type != kTfLiteNoQuantization)) { - TF_LITE_ENSURE_EQ(context, input->quantization.type, - kTfLiteAffineQuantization); - TF_LITE_ENSURE_EQ(context, output->quantization.type, - kTfLiteAffineQuantization); - const auto* input_params = - reinterpret_cast(input->quantization.params); - const auto* output_params = reinterpret_cast( - output->quantization.params); - TF_LITE_ENSURE(context, input_params != nullptr); - TF_LITE_ENSURE(context, input_params->scale != nullptr); - TF_LITE_ENSURE(context, input_params->scale->size > 0); - TF_LITE_ENSURE(context, input_params->zero_point->size > 0); - TF_LITE_ENSURE(context, output_params != nullptr); - TF_LITE_ENSURE(context, output_params->scale != nullptr); - TF_LITE_ENSURE(context, output_params->scale->size > 0); - TF_LITE_ENSURE(context, output_params->zero_point->size > 0); - op_data->input_offset = input_params->zero_point->data[0]; - op_data->output_offset = output_params->zero_point->data[0]; - if (input->type == kTfLiteInt16) { - TF_LITE_ENSURE_EQ(context, op_data->input_offset, 0); - TF_LITE_ENSURE_EQ(context, op_data->output_offset, 0); - } - const float input_scale = input_params->scale->data[0]; - const float output_scale = output_params->scale->data[0]; - op_data->needs_rescale = input_scale != output_scale; - if (op_nameid == kAbsNameId && op_data->needs_rescale) { - SetAbsOutputMultiplier(input_scale, output_scale, &op_data->multiplier, - &op_data->shift); - } else if (op_nameid == kRsrqtNameId) { - SetRsqrtOutputMultiplier(input_scale, output_scale, &op_data->multiplier, - &op_data->shift); - } - } - micro_context->DeallocateTempTfLiteTensor(input); - micro_context->DeallocateTempTfLiteTensor(output); - return kTfLiteOk; -} - -template -inline TfLiteStatus EvalImplQuantized( - TfLiteContext* context, TfLiteNode* node, - T func(TfLiteContext*, TfLiteNode*, T), - TfLiteStatus validate_input_func(TfLiteContext*, TfLiteNode*, T), - TfLiteType expected_type) { - const TfLiteEvalTensor* input = tflite::micro::GetEvalInput(context, node, 0); - TfLiteEvalTensor* output = tflite::micro::GetEvalOutput(context, node, 0); - TF_LITE_ENSURE_TYPES_EQ(context, input->type, expected_type); - const size_t num_elements = ElementCount(*input->dims); - const T* in_data = tflite::micro::GetTensorData(input); - T* out_data = tflite::micro::GetTensorData(output); - for (size_t i = 0; i < num_elements; ++i) { - if (validate_input_func) { - TF_LITE_ENSURE_OK(context, - validate_input_func(context, node, in_data[i])); - } - out_data[i] = func(context, node, in_data[i]); - } - return kTfLiteOk; -} - -template -inline T AbsHelper(T i) { - return std::abs(i); -} - -template -inline TfLiteStatus EvalImpl(TfLiteContext* context, TfLiteNode* node, - T func(T), TfLiteStatus validate_input_func(T), - TfLiteType expected_type) { - const TfLiteEvalTensor* input = tflite::micro::GetEvalInput(context, node, 0); - TfLiteEvalTensor* output = tflite::micro::GetEvalOutput(context, node, 0); - TF_LITE_ENSURE_TYPES_EQ(context, input->type, expected_type); - const size_t num_elements = ElementCount(*input->dims); - const T* in_data = tflite::micro::GetTensorData(input); - T* out_data = tflite::micro::GetTensorData(output); - for (size_t i = 0; i < num_elements; ++i) { - if (validate_input_func) { - TF_LITE_ENSURE_OK(context, validate_input_func(in_data[i])); - } - out_data[i] = func(in_data[i]); - } - return kTfLiteOk; -} - -inline TfLiteStatus EvalNumeric(TfLiteContext* context, TfLiteNode* node, - float float_func(float)) { - return EvalImpl(context, node, float_func, - /*validate_input_func=*/nullptr, kTfLiteFloat32); -} - -inline TfLiteStatus EvalLogical(TfLiteContext* context, TfLiteNode* node, - - bool bool_func(bool)) { - return EvalImpl(context, node, bool_func, - /*validate_input_func=*/nullptr, kTfLiteBool); -} - -void* ElementWiseAbsRsqrtInit(TfLiteContext* context, const char* buffer, - size_t length) { - TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); - return context->AllocatePersistentBuffer(context, sizeof(OpDataAbsRsqrt)); -} - -template -inline T AbsEvalQuantized(TfLiteContext* context, TfLiteNode* node, T i) { - const auto* op_data = static_cast(node->user_data); - const int kMin = std::numeric_limits::min(); - const int kMax = std::numeric_limits::max(); - - const int32_t value = std::abs(i - op_data->input_offset); - if (!op_data->needs_rescale) { - return static_cast( - std::min(std::max(static_cast(value + op_data->output_offset), - static_cast(kMin)), - static_cast(kMax))); - } - - const int32_t output = tflite::MultiplyByQuantizedMultiplier( - value, op_data->multiplier, op_data->shift) + - op_data->output_offset; - return static_cast(std::min( - std::max(static_cast(output), static_cast(kMin)), - static_cast(kMax))); -} - -template -inline T RsqrtEvalQuantized(TfLiteContext* context, TfLiteNode* node, T i) { - const auto* op_data = static_cast(node->user_data); - const int kMin = std::numeric_limits::min(); - const int kMax = std::numeric_limits::max(); - - const int32_t value = (i - op_data->input_offset); - const int32_t kShift = 20; // Shift to keep value integer. - if (value == 0) { - // Assume that any value close to 0 represents the max output value. - return static_cast(kMax); - } - int32_t inv_sqrt_multiplier; - int inv_sqrt_shift; - GetInvSqrtQuantizedMultiplierExp(value, kReverseShift, &inv_sqrt_multiplier, - &inv_sqrt_shift); - const int32_t data = tflite::MultiplyByQuantizedMultiplier( - static_cast(1), inv_sqrt_multiplier, inv_sqrt_shift + kShift); - const int32_t output = - tflite::MultiplyByQuantizedMultiplier(data, op_data->multiplier, - op_data->shift - kShift) + - op_data->output_offset; - return static_cast(std::min( - std::max(static_cast(output), static_cast(kMin)), - static_cast(kMax))); -} - -template -TfLiteStatus validate_input_func(TfLiteContext* context, TfLiteNode* node, - T i) { - const auto* op_data = static_cast(node->user_data); - - TF_LITE_ENSURE_MSG(context, i >= op_data->input_offset, - "Rsqrt is only defined for positive values"); - return static_cast(kTfLiteOk); -} - -TfLiteStatus AbsEval(TfLiteContext* context, TfLiteNode* node) { - OpDataAbsRsqrt* op_data = reinterpret_cast(node->user_data); - TfLiteType type = op_data->input_type; - TfLiteQuantizationType input_quantization_type = - op_data->input_quantization_type; - TfLiteStatus eval_result; - - switch (type) { - case kTfLiteFloat32: - eval_result = EvalNumeric(context, node, std::abs); - break; - case kTfLiteInt8: - eval_result = - EvalImplQuantized(context, node, AbsEvalQuantized, - /*validate_input_func=*/nullptr, type); - break; - case kTfLiteInt16: - eval_result = - input_quantization_type == kTfLiteNoQuantization - ? EvalImpl(context, node, AbsHelper, - /*validate_input_func=*/nullptr, type) - : EvalImplQuantized(context, node, AbsEvalQuantized, - /*validate_input_func=*/nullptr, - type); - break; - default: - MicroPrintf("Current data type %s is not supported.", - TfLiteTypeGetName(type)); - return kTfLiteError; - break; - } - return eval_result; -} - -TfLiteStatus SinEval(TfLiteContext* context, TfLiteNode* node) { - return EvalNumeric(context, node, std::sin); -} - -TfLiteStatus CosEval(TfLiteContext* context, TfLiteNode* node) { - return EvalNumeric(context, node, std::cos); -} - -TfLiteStatus LogEval(TfLiteContext* context, TfLiteNode* node) { - return EvalNumeric(context, node, std::log); -} - -TfLiteStatus SqrtEval(TfLiteContext* context, TfLiteNode* node) { - return EvalNumeric(context, node, std::sqrt); -} - -TfLiteStatus RsqrtEval(TfLiteContext* context, TfLiteNode* node) { - const auto* op_data = static_cast(node->user_data); - TfLiteType type = op_data->input_type; - switch (type) { - case kTfLiteFloat32: - return EvalImpl( - context, node, [](float f) { return 1.f / std::sqrt(f); }, - /*validate_input_func=*/nullptr, type); - case kTfLiteInt8: - return EvalImplQuantized(context, node, - elementwise::RsqrtEvalQuantized, - elementwise::validate_input_func, type); - - default: - MicroPrintf("Current data type %s is not supported.", - TfLiteTypeGetName(type)); - return kTfLiteError; - } -} - -TfLiteStatus SquareEval(TfLiteContext* context, TfLiteNode* node) { - return EvalNumeric(context, node, [](float f) { return f * f; }); -} - -TfLiteStatus LogicalNotEval(TfLiteContext* context, TfLiteNode* node) { - return EvalLogical(context, node, [](bool v) { return !v; }); -} - -} // namespace -} // namespace elementwise - -TfLiteRegistration Register_ABS() { - return tflite::micro::RegisterOp( - elementwise::ElementWiseAbsRsqrtInit, - elementwise::PrepareAbsRsqrt, - elementwise::AbsEval); -} - -TfLiteRegistration Register_SIN() { - return tflite::micro::RegisterOp( - nullptr, elementwise::GenericPrepare, - elementwise::SinEval); -} - -TfLiteRegistration Register_COS() { - return tflite::micro::RegisterOp( - nullptr, elementwise::GenericPrepare, - elementwise::CosEval); -} - -TfLiteRegistration Register_LOG() { - return tflite::micro::RegisterOp( - nullptr, elementwise::GenericPrepare, - elementwise::LogEval); -} - -TfLiteRegistration Register_SQRT() { - return tflite::micro::RegisterOp( - nullptr, elementwise::GenericPrepare, - elementwise::SqrtEval); -} - -TfLiteRegistration Register_RSQRT() { - return tflite::micro::RegisterOp( - elementwise::ElementWiseAbsRsqrtInit, - elementwise::PrepareAbsRsqrt, - elementwise::RsqrtEval); -} - -TfLiteRegistration Register_SQUARE() { - return tflite::micro::RegisterOp( - nullptr, elementwise::GenericPrepare, - elementwise::SquareEval); -} - -TfLiteRegistration Register_LOGICAL_NOT() { - return tflite::micro::RegisterOp( - nullptr, elementwise::GenericPrepare, - elementwise::LogicalNotEval); -} - -} // namespace micro -} // namespace ops -} // namespace tflite diff --git a/code/components/tflite-lib/tensorflow/lite/micro/kernels/elu.cc b/code/components/tflite-lib/tensorflow/lite/micro/kernels/elu.cc deleted file mode 100644 index 7d1169d1..00000000 --- a/code/components/tflite-lib/tensorflow/lite/micro/kernels/elu.cc +++ /dev/null @@ -1,150 +0,0 @@ -/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/kernels/internal/reference/elu.h" - -#include -#include - -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/cppmath.h" -#include "tensorflow/lite/kernels/internal/quantization_util.h" -#include "tensorflow/lite/kernels/internal/reference/process_broadcast_shapes.h" -#include "tensorflow/lite/kernels/internal/types.h" -#include "tensorflow/lite/kernels/kernel_util.h" -#include "tensorflow/lite/micro/kernels/kernel_util.h" - -namespace tflite { -namespace { - -// Input/output tensor index. -constexpr int kInputTensor = 0; -constexpr int kOutputTensor = 0; - -// OLD-TODO(b/142762739): We should figure out a multi-threading plan for most -// of the activation ops below. - -struct OpData { - int8_t table[256]; -}; - -using TransformFunc = float (*)(float); - -template -void PopulateLookupTable(const TfLiteTensor* input, const TfLiteTensor* output, - const TransformFunc transform, OpData* data) { - if (sizeof(T) != 1) { - MicroPrintf("Lookup table valid only for 8bit"); - TFLITE_ABORT; - } - - const float inverse_scale = 1 / output->params.scale; - int32_t maxval = std::numeric_limits::max(); - int32_t minval = std::numeric_limits::min(); - for (int32_t val = minval; val <= maxval; ++val) { - const float dequantized = - input->params.scale * (val - input->params.zero_point); - const float transformed = transform(dequantized); - const float rescaled = TfLiteRound(transformed * inverse_scale); - const int32_t quantized = - static_cast(rescaled + output->params.zero_point); - data->table[static_cast(static_cast(val))] = - static_cast(std::max(std::min(maxval, quantized), minval)); - } -} - -// OLD-TODO(b/143696793): move this to optimized_ops. -void EvalUsingLookupTable(const OpData* data, const TfLiteEvalTensor* input, - TfLiteEvalTensor* output) { - const int size = MatchingFlatSize(tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorShape(output)); - int8_t* output_data = tflite::micro::GetTensorData(output); - const int8_t* input_data = tflite::micro::GetTensorData(input); - - for (int i = 0; i < size; ++i) { - output_data[i] = data->table[static_cast(input_data[i])]; - } -} - -TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteNode* node) { - MicroContext* micro_context = GetMicroContext(context); - - TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); - TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - TfLiteTensor* input = - micro_context->AllocateTempInputTensor(node, kInputTensor); - TF_LITE_ENSURE(context, input != nullptr); - TfLiteTensor* output = - micro_context->AllocateTempOutputTensor(node, kOutputTensor); - TF_LITE_ENSURE(context, output != nullptr); - TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); - - // Use LUT to handle quantized elu path. - if (input->type == kTfLiteInt8) { - OpData* data = static_cast(node->user_data); - TransformFunc transform = [](float value) { - return value < 0.0f ? std::exp(value) - 1.0f : value; - }; - PopulateLookupTable(input, output, transform, data); - } - micro_context->DeallocateTempTfLiteTensor(input); - micro_context->DeallocateTempTfLiteTensor(output); - return kTfLiteOk; -} - -void* EluInit(TfLiteContext* context, const char* buffer, size_t length) { - // This is a builtin op, so we don't use the contents in 'buffer', if any. - // Instead, we allocate a new object to carry information from Prepare() to - // Eval(). - TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); - return context->AllocatePersistentBuffer(context, sizeof(OpData)); -} - -TfLiteStatus EluPrepare(TfLiteContext* context, TfLiteNode* node) { - return CalculateOpData(context, node); -} - -TfLiteStatus EluEval(TfLiteContext* context, TfLiteNode* node) { - const TfLiteEvalTensor* input = - tflite::micro::GetEvalInput(context, node, kInputTensor); - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - switch (input->type) { - case kTfLiteFloat32: { - reference_ops::Elu(tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - return kTfLiteOk; - } - case kTfLiteInt8: { - const OpData* data = static_cast(node->user_data); - EvalUsingLookupTable(data, input, output); - return kTfLiteOk; - } - default: - MicroPrintf("ELU only supports float32 and int8 currently, got %s.", - TfLiteTypeGetName(input->type)); - return kTfLiteError; - } -} - -} // namespace - -TfLiteRegistration Register_ELU() { - return tflite::micro::RegisterOp(EluInit, EluPrepare, EluEval); -} - -} // namespace tflite diff --git a/code/components/tflite-lib/tensorflow/lite/micro/kernels/esp_nn/conv.cc b/code/components/tflite-lib/tensorflow/lite/micro/kernels/esp_nn/conv.cc deleted file mode 100644 index b442e8ed..00000000 --- a/code/components/tflite-lib/tensorflow/lite/micro/kernels/esp_nn/conv.cc +++ /dev/null @@ -1,343 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/micro/kernels/conv.h" - -#include "tensorflow/lite/c/builtin_op_data.h" -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/common.h" -#include "tensorflow/lite/kernels/internal/quantization_util.h" -#include "tensorflow/lite/kernels/internal/reference/conv.h" -#include "tensorflow/lite/kernels/internal/reference/integer_ops/conv.h" -#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "tensorflow/lite/kernels/kernel_util.h" -#include "tensorflow/lite/kernels/padding.h" -#include "tensorflow/lite/micro/kernels/kernel_util.h" - -#include - -#if ESP_NN -#include -#endif - - -long long conv_total_time = 0; - -namespace tflite { -namespace { - -struct NodeData { - OpDataConv op_data; -#if ESP_NN - int buffer_idx; -#endif -}; - -void* Init(TfLiteContext* context, const char* buffer, size_t length) { - TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); - return context->AllocatePersistentBuffer(context, sizeof(NodeData)); -} - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->user_data != nullptr); - TFLITE_DCHECK(node->builtin_data != nullptr); - - NodeData* data = static_cast(node->user_data); - const auto& params = - *(static_cast(node->builtin_data)); - - MicroContext* micro_context = GetMicroContext(context); - - TfLiteTensor* input = - micro_context->AllocateTempInputTensor(node, kConvInputTensor); - TF_LITE_ENSURE(context, input != nullptr); - TfLiteTensor* filter = - micro_context->AllocateTempInputTensor(node, kConvWeightsTensor); - TF_LITE_ENSURE(context, filter != nullptr); - TfLiteTensor* output = - micro_context->AllocateTempOutputTensor(node, kConvOutputTensor); - TF_LITE_ENSURE(context, output != nullptr); - - const int input_width = input->dims->data[2]; - const int input_height = input->dims->data[1]; - const int filter_width = filter->dims->data[2]; - const int filter_height = filter->dims->data[1]; - const int output_width = output->dims->data[2]; - const int output_height = output->dims->data[1]; - - // Dynamically allocate per-channel quantization parameters. - const int num_channels = filter->dims->data[kConvQuantizedDimension]; - data->op_data.per_channel_output_multiplier = - static_cast(context->AllocatePersistentBuffer( - context, num_channels * sizeof(int32_t))); - data->op_data.per_channel_output_shift = - static_cast(context->AllocatePersistentBuffer( - context, num_channels * sizeof(int32_t))); - - // All per-channel quantized tensors need valid zero point and scale arrays. - if (input->type == kTfLiteInt8) { - TF_LITE_ENSURE_EQ(context, filter->quantization.type, - kTfLiteAffineQuantization); - - const auto* affine_quantization = - static_cast(filter->quantization.params); - TFLITE_DCHECK(affine_quantization != nullptr); - TFLITE_DCHECK(affine_quantization->scale != nullptr); - TFLITE_DCHECK(affine_quantization->zero_point != nullptr); - - TF_LITE_ENSURE(context, - affine_quantization->scale->size == 1 || - affine_quantization->scale->size == - filter->dims->data[kConvQuantizedDimension]); - TF_LITE_ENSURE_EQ(context, affine_quantization->scale->size, - affine_quantization->zero_point->size); - } - - TF_LITE_ENSURE_STATUS(CalculateOpDataConv( - context, node, params, input_width, input_height, filter_width, - filter_height, output_width, output_height, input->type, &data->op_data)); - -#if ESP_NN - if (input->type == kTfLiteInt8) { - data_dims_t input_dims = { - .width = input_width, .height = input_height, - .channels = input->dims->data[3], 1 - }; - data_dims_t output_dims = { - .width = output_width, .height = output_height, - .channels = output->dims->data[3], 1 - }; - data_dims_t filter_dims = {.width = filter_width, .height = filter_height, 0, 0}; - conv_params_t conv_params = { - .in_offset = 0, .out_offset = 0, - .stride = {params.stride_width, params.stride_height}, - .padding = {data->op_data.padding.width, data->op_data.padding.height}, - .dilation = {0, 0}, .activation = {-128, 127} - }; - - int scratch_buf_size = esp_nn_get_conv_scratch_size( - &input_dims, &filter_dims, &output_dims, &conv_params); - if (scratch_buf_size > 0) { - TF_LITE_ENSURE_STATUS(context->RequestScratchBufferInArena( - context, scratch_buf_size, &data->buffer_idx)); - } else { - data->buffer_idx = -1; - } - } -#endif - - micro_context->DeallocateTempTfLiteTensor(output); - micro_context->DeallocateTempTfLiteTensor(input); - micro_context->DeallocateTempTfLiteTensor(filter); - - return kTfLiteOk; -} - -#if ESP_NN -// Fixed-point per-channel-quantization convolution Int8 function wrapper. -inline void EvalQuantizedPerChannel( - TfLiteContext* context, TfLiteNode* node, const TfLiteConvParams& params, - const NodeData& data, const TfLiteEvalTensor* input, - const TfLiteEvalTensor* filter, const TfLiteEvalTensor* bias, - TfLiteEvalTensor* output) { - const int dilation_width_factor = params.dilation_width_factor; - const int dilation_height_factor = params.dilation_height_factor; - - if (dilation_width_factor == 1 && dilation_height_factor == 1) { - // Get parameters. - RuntimeShape filter_shape = tflite::micro::GetTensorShape(filter); - RuntimeShape input_shape = tflite::micro::GetTensorShape(input); - RuntimeShape output_shape = tflite::micro::GetTensorShape(output); - RuntimeShape bias_shape = tflite::micro::GetTensorShape(bias); - - const int8_t *input_data = tflite::micro::GetTensorData(input); - int8_t *output_data = tflite::micro::GetTensorData(output); - - const int32_t input_offset = -data.op_data.input_zero_point; - const int32_t output_offset = data.op_data.output_zero_point; - const int stride_width = params.stride_width; - const int stride_height = params.stride_height; - const int pad_width = data.op_data.padding.width; - const int pad_height = data.op_data.padding.height; - - const int input_height = input_shape.Dims(1); - const int input_width = input_shape.Dims(2); - const int filter_height = filter_shape.Dims(1); - const int filter_width = filter_shape.Dims(2); - const int output_height = output_shape.Dims(1); - const int output_width = output_shape.Dims(2); - - // Set min and max value of the output. - const int32_t activation_min = data.op_data.output_activation_min; - const int32_t activation_max = data.op_data.output_activation_max; - - // Consistency check. - TFLITE_DCHECK_LE(activation_min, activation_max); - TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4); - TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 4); - TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4); - const int batch_size = MatchingDim(input_shape, 0, output_shape, 0); - const int input_depth = MatchingDim(input_shape, 3, filter_shape, 3); - const int output_depth = MatchingDim(filter_shape, 0, output_shape, 3); - - if (tflite::micro::GetTensorData(bias)) { - TFLITE_DCHECK_EQ(bias_shape.FlatSize(), output_depth); - } - - void *scratch_buf = NULL; - if (data.buffer_idx > -1) { - scratch_buf = context->GetScratchBuffer(context, data.buffer_idx); - } - esp_nn_set_conv_scratch_buf(scratch_buf); - - const int input_size = input_width * input_height * input_depth; - const int output_size = output_width * output_height * output_depth; - - data_dims_t input_dims = { - .width = input_width, .height = input_height, - .channels = input_depth, 1 - }; - data_dims_t output_dims = { - .width = output_width, .height = output_height, - .channels = output_depth, 1 - }; - data_dims_t filter_dims = {.width = filter_width, .height = filter_height, 0, 0}; - conv_params_t conv_params = { - .in_offset = input_offset, .out_offset = output_offset, - .stride = {stride_width, stride_height}, - .padding = {pad_width, pad_height}, - .dilation = {0, 0}, - .activation = {activation_min, activation_max} - }; - quant_data_t quant_data = { - .shift = data.op_data.per_channel_output_shift, - .mult = data.op_data.per_channel_output_multiplier - }; - - for (int i_batch = 0; i_batch < batch_size; i_batch++) { - esp_nn_conv_s8(&input_dims, input_data + i_batch * input_size, - &filter_dims, tflite::micro::GetTensorData(filter), - tflite::micro::GetTensorData(bias), - &output_dims, output_data + i_batch * output_size, - &conv_params, &quant_data); - } - } else { - reference_integer_ops::ConvPerChannel( - ConvParamsQuantized(params, data.op_data), - data.op_data.per_channel_output_multiplier, - data.op_data.per_channel_output_shift, - tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(filter), - tflite::micro::GetTensorData(filter), - tflite::micro::GetTensorShape(bias), - tflite::micro::GetTensorData(bias), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } -} -#endif - -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - const TfLiteEvalTensor* input = - tflite::micro::GetEvalInput(context, node, kConvInputTensor); - const TfLiteEvalTensor* filter = - tflite::micro::GetEvalInput(context, node, kConvWeightsTensor); - const TfLiteEvalTensor* bias = - (NumInputs(node) == 3) - ? tflite::micro::GetEvalInput(context, node, kConvBiasTensor) - : nullptr; - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kConvOutputTensor); - - TFLITE_DCHECK(node->builtin_data != nullptr); - const auto& params = - *(reinterpret_cast(node->builtin_data)); - TFLITE_DCHECK(node->user_data != nullptr); - const auto& data = *(static_cast(node->user_data)); - - TF_LITE_ENSURE_EQ(context, input->type, output->type); - TF_LITE_ENSURE_MSG(context, input->type == filter->type, - "Hybrid models are not supported on TFLite Micro."); - - long long start_time = esp_timer_get_time(); - switch (input->type) { // Already know in/out types are same. - case kTfLiteFloat32: { - tflite::reference_ops::Conv( - ConvParamsFloat(params, data.op_data), - tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(filter), - tflite::micro::GetTensorData(filter), - tflite::micro::GetTensorShape(bias), - tflite::micro::GetTensorData(bias), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output), - tflite::micro::GetTensorShape(nullptr), nullptr); - break; - } - case kTfLiteInt8: { -#if ESP_NN - EvalQuantizedPerChannel(context, node, params, data, input, filter, - bias, output); -#else - reference_integer_ops::ConvPerChannel( - ConvParamsQuantized(params, data.op_data), - data.op_data.per_channel_output_multiplier, - data.op_data.per_channel_output_shift, - tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(filter), - tflite::micro::GetTensorData(filter), - tflite::micro::GetTensorShape(bias), - tflite::micro::GetTensorData(bias), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); -#endif - break; - } - case kTfLiteUInt8: { - //EvalQuantized - reference_ops::Conv(ConvParamsQuantized(params, data.op_data), - tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(filter), - tflite::micro::GetTensorData(filter), - tflite::micro::GetTensorShape(bias), - tflite::micro::GetTensorData(bias), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output), - tflite::micro::GetTensorShape(nullptr), nullptr, - nullptr); - break; - } - default: - TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", - TfLiteTypeGetName(input->type), input->type); - return kTfLiteError; - } - long long time_this_instance = esp_timer_get_time() - start_time; - conv_total_time += time_this_instance; - //printf("time this instance: %llu\n", time_this_instance / 1000); - return kTfLiteOk; -} - -} // namespace - -TfLiteRegistration Register_CONV_2D() { - return tflite::micro::RegisterOp(Init, Prepare, Eval); -} - -} // namespace tflite diff --git a/code/components/tflite-lib/tensorflow/lite/micro/kernels/esp_nn/depthwise_conv.cc b/code/components/tflite-lib/tensorflow/lite/micro/kernels/esp_nn/depthwise_conv.cc deleted file mode 100644 index 41a2bff7..00000000 --- a/code/components/tflite-lib/tensorflow/lite/micro/kernels/esp_nn/depthwise_conv.cc +++ /dev/null @@ -1,345 +0,0 @@ -/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/micro/kernels/depthwise_conv.h" - -#include "tensorflow/lite/c/builtin_op_data.h" -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/common.h" -#include "tensorflow/lite/kernels/internal/quantization_util.h" -#include "tensorflow/lite/kernels/internal/reference/depthwiseconv_float.h" -#include "tensorflow/lite/kernels/internal/reference/depthwiseconv_uint8.h" -#include "tensorflow/lite/kernels/internal/reference/integer_ops/depthwise_conv.h" -#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "tensorflow/lite/kernels/kernel_util.h" -#include "tensorflow/lite/kernels/padding.h" -#include "tensorflow/lite/micro/kernels/kernel_util.h" - -#include - -#if ESP_NN -#include -#endif - -long long dc_total_time = 0; - -namespace tflite { -namespace { - -struct NodeData { - OpDataConv op_data; -#if ESP_NN - int buffer_idx; -#endif -}; - -void* Init(TfLiteContext* context, const char* buffer, size_t length) { - TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); - return context->AllocatePersistentBuffer(context, sizeof(NodeData)); -} - -#if ESP_NN -inline void EvalQuantizedPerChannel(TfLiteContext* context, TfLiteNode* node, - const TfLiteDepthwiseConvParams& params, - const NodeData& data, - const TfLiteEvalTensor* input, - const TfLiteEvalTensor* filter, - const TfLiteEvalTensor* bias, - TfLiteEvalTensor* output) { - const int dilation_width_factor = params.dilation_width_factor; - const int dilation_height_factor = params.dilation_height_factor; - - if (dilation_width_factor == 1 && dilation_height_factor == 1) { - // Get parameters. - RuntimeShape input_shape = tflite::micro::GetTensorShape(input); - RuntimeShape filter_shape = tflite::micro::GetTensorShape(filter); - RuntimeShape output_shape = tflite::micro::GetTensorShape(output); - RuntimeShape bias_shape = tflite::micro::GetTensorShape(bias); - - TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4); - TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 4); - TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4); - - const int8_t *input_data = tflite::micro::GetTensorData(input); - int8_t *output_data = tflite::micro::GetTensorData(output); - - const int depth_multiplier = params.depth_multiplier; - const int32_t input_offset = -data.op_data.input_zero_point; - const int32_t output_offset = data.op_data.output_zero_point; - const int stride_width = params.stride_width; - const int stride_height = params.stride_height; - const int pad_width = data.op_data.padding.width; - const int pad_height = data.op_data.padding.height; - - const int input_height = input_shape.Dims(1); - const int input_width = input_shape.Dims(2); - const int input_depth = input_shape.Dims(3); - const int filter_height = filter_shape.Dims(1); - const int filter_width = filter_shape.Dims(2); - const int output_height = output_shape.Dims(1); - const int output_width = output_shape.Dims(2); - - // Set min and max value of the output. - const int32_t activation_min = data.op_data.output_activation_min; - const int32_t activation_max = data.op_data.output_activation_max; - - // Consistency check. - TFLITE_DCHECK_LE(activation_min, activation_max); - const int batch_size = MatchingDim(input_shape, 0, output_shape, 0); - const int output_depth = MatchingDim(filter_shape, 3, output_shape, 3); - - TFLITE_DCHECK_EQ(output_depth, input_depth * depth_multiplier); - if (tflite::micro::GetTensorData(bias)) { - TFLITE_DCHECK_EQ(bias_shape.FlatSize(), output_depth); - } - - const int input_size = input_width * input_height * input_depth; - const int output_size = output_width * output_height * output_depth; - void *scratch_buf = NULL; - if (data.buffer_idx > -1) { - scratch_buf = context->GetScratchBuffer(context, data.buffer_idx); - } - - esp_nn_set_depthwise_conv_scratch_buf(scratch_buf); - - data_dims_t input_dims = { - .width = input_width, .height = input_height, - .channels = input_depth, 1 - }; - data_dims_t output_dims = { - .width = output_width, .height = output_height, - .channels = output_depth, 1 - }; - data_dims_t filter_dims = {.width = filter_width, .height = filter_height, 0, 0}; - dw_conv_params_t conv_params = { - .in_offset = input_offset, .out_offset = output_offset, - .ch_mult = depth_multiplier, - .stride = {stride_width, stride_height}, - .padding = {pad_width, pad_height}, .dilation = {0, 0}, - .activation = {activation_min, activation_max} - }; - quant_data_t quant_data = { - .shift = data.op_data.per_channel_output_shift, - .mult = data.op_data.per_channel_output_multiplier - }; - - for (int i_batch = 0; i_batch < batch_size; i_batch++) { - esp_nn_depthwise_conv_s8(&input_dims, input_data + i_batch * input_size, - &filter_dims, tflite::micro::GetTensorData(filter), - tflite::micro::GetTensorData(bias), - &output_dims, output_data + i_batch * output_size, - &conv_params, &quant_data); - } - } else { - reference_integer_ops::DepthwiseConvPerChannel( - DepthwiseConvParamsQuantized(params, data.op_data), - data.op_data.per_channel_output_multiplier, - data.op_data.per_channel_output_shift, - tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(filter), - tflite::micro::GetTensorData(filter), - tflite::micro::GetTensorShape(bias), - tflite::micro::GetTensorData(bias), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } -} -#endif - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->user_data != nullptr); - TFLITE_DCHECK(node->builtin_data != nullptr); - - NodeData* data = static_cast(node->user_data); - const TfLiteDepthwiseConvParams& params = - *(static_cast(node->builtin_data)); - - MicroContext* micro_context = GetMicroContext(context); - - TfLiteTensor* input = - micro_context->AllocateTempInputTensor(node, kConvInputTensor); - TF_LITE_ENSURE(context, input != nullptr); - TfLiteTensor* filter = - micro_context->AllocateTempInputTensor(node, kConvWeightsTensor); - TF_LITE_ENSURE(context, filter != nullptr); - TfLiteTensor* bias = - micro_context->AllocateTempInputTensor(node, kConvBiasTensor); - TfLiteTensor* output = - micro_context->AllocateTempOutputTensor(node, kConvOutputTensor); - TF_LITE_ENSURE(context, output != nullptr); - - const int input_width = input->dims->data[2]; - const int input_height = input->dims->data[1]; - const int filter_width = filter->dims->data[2]; - const int filter_height = filter->dims->data[1]; - const int output_width = output->dims->data[2]; - const int output_height = output->dims->data[1]; - - // Dynamically allocate per-channel quantization parameters. - const int num_channels = filter->dims->data[kDepthwiseConvQuantizedDimension]; - data->op_data.per_channel_output_multiplier = - static_cast(context->AllocatePersistentBuffer( - context, num_channels * sizeof(int32_t))); - data->op_data.per_channel_output_shift = - static_cast(context->AllocatePersistentBuffer( - context, num_channels * sizeof(int32_t))); - - // All per-channel quantized tensors need valid zero point and scale arrays. - if (input->type == kTfLiteInt8) { - TF_LITE_ENSURE_EQ(context, filter->quantization.type, - kTfLiteAffineQuantization); - - const auto* affine_quantization = - static_cast(filter->quantization.params); - TFLITE_DCHECK(affine_quantization != nullptr); - TFLITE_DCHECK(affine_quantization->scale != nullptr); - TFLITE_DCHECK(affine_quantization->zero_point != nullptr); - - TF_LITE_ENSURE( - context, affine_quantization->scale->size == 1 || - affine_quantization->scale->size == - filter->dims->data[kDepthwiseConvQuantizedDimension]); - - TF_LITE_ENSURE_EQ(context, affine_quantization->scale->size, - affine_quantization->zero_point->size); - } - - TF_LITE_ENSURE_STATUS(CalculateOpDataDepthwiseConv( - context, node, params, input_width, input_height, filter_width, - filter_height, output_width, output_height, input->type, &data->op_data)); - -#if ESP_NN - if (input->type == kTfLiteInt8) { - data_dims_t input_dims = { - .width = input_width, .height = input_height, - .channels = input->dims->data[3], 1 - }; - data_dims_t output_dims = { - .width = output_width, .height = output_height, - .channels = output->dims->data[3], 1 - }; - data_dims_t filter_dims = {.width = filter_width, .height = filter_height, 0, 0}; - dw_conv_params_t conv_params = { - .in_offset = 0, .out_offset = 0, - .ch_mult = params.depth_multiplier, - .stride = {params.stride_width, params.stride_height}, - .padding = {data->op_data.padding.width, data->op_data.padding.height}, - .dilation = {0, 0}, .activation = {-128, 127} - }; - - int scratch_buf_size = esp_nn_get_depthwise_conv_scratch_size( - &input_dims, &filter_dims, &output_dims, &conv_params); - if (scratch_buf_size > 0) { - TF_LITE_ENSURE_STATUS(context->RequestScratchBufferInArena( - context, scratch_buf_size, &data->buffer_idx)); - } else { - data->buffer_idx = -1; - } - } -#endif - - micro_context->DeallocateTempTfLiteTensor(input); - micro_context->DeallocateTempTfLiteTensor(filter); - micro_context->DeallocateTempTfLiteTensor(bias); - micro_context->DeallocateTempTfLiteTensor(output); - - return kTfLiteOk; -} - -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->user_data != nullptr); - TFLITE_DCHECK(node->builtin_data != nullptr); - - auto& params = - *(reinterpret_cast(node->builtin_data)); - const NodeData& data = *(static_cast(node->user_data)); - - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kDepthwiseConvOutputTensor); - const TfLiteEvalTensor* input = - tflite::micro::GetEvalInput(context, node, kDepthwiseConvInputTensor); - const TfLiteEvalTensor* filter = - tflite::micro::GetEvalInput(context, node, kDepthwiseConvWeightsTensor); - const TfLiteEvalTensor* bias = - (NumInputs(node) == 3) - ? tflite::micro::GetEvalInput(context, node, kDepthwiseConvBiasTensor) - : nullptr; - - long long start_time = esp_timer_get_time(); - switch (input->type) { // Already know in/out types are same. - case kTfLiteFloat32: - tflite::reference_ops::DepthwiseConv( - DepthwiseConvParamsFloat(params, data.op_data), - tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(filter), - tflite::micro::GetTensorData(filter), - tflite::micro::GetTensorShape(bias), - tflite::micro::GetTensorData(bias), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - break; - case kTfLiteInt8: -#if ESP_NN - EvalQuantizedPerChannel(context, node, params, data, input, filter, bias, - output); -#else - reference_integer_ops::DepthwiseConvPerChannel( - DepthwiseConvParamsQuantized(params, data.op_data), - data.op_data.per_channel_output_multiplier, - data.op_data.per_channel_output_shift, - tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(filter), - tflite::micro::GetTensorData(filter), - tflite::micro::GetTensorShape(bias), - tflite::micro::GetTensorData(bias), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); -#endif - break; - case kTfLiteUInt8: - //EvalQuantized(context, node, params, &data, input, filter, bias, output); - reference_ops::DepthwiseConv( - DepthwiseConvParamsQuantized(params, data.op_data), - tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(filter), - tflite::micro::GetTensorData(filter), - tflite::micro::GetTensorShape(bias), - tflite::micro::GetTensorData(bias), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - break; - default: - TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", - TfLiteTypeGetName(input->type), input->type); - return kTfLiteError; - } - long long time_this_instance = esp_timer_get_time() - start_time; - dc_total_time += time_this_instance; - // printf("time this instance: %llu\n", time_this_instance / 1000); - - return kTfLiteOk; -} - -} // namespace - -TfLiteRegistration Register_DEPTHWISE_CONV_2D() { - return tflite::micro::RegisterOp(Init, Prepare, Eval); -} - -} // namespace tflite diff --git a/code/components/tflite-lib/tensorflow/lite/micro/kernels/esp_nn/softmax.cc b/code/components/tflite-lib/tensorflow/lite/micro/kernels/esp_nn/softmax.cc deleted file mode 100644 index df0da908..00000000 --- a/code/components/tflite-lib/tensorflow/lite/micro/kernels/esp_nn/softmax.cc +++ /dev/null @@ -1,207 +0,0 @@ -/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/micro/kernels/softmax.h" - -#include "tensorflow/lite/c/builtin_op_data.h" -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/common.h" -#include "tensorflow/lite/kernels/internal/quantization_util.h" -#include "tensorflow/lite/kernels/internal/reference/softmax.h" -#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "tensorflow/lite/kernels/kernel_util.h" -#include "tensorflow/lite/kernels/op_macros.h" -#include "tensorflow/lite/micro/kernels/kernel_util.h" - -#include - -#if ESP_NN -#include -#endif - -long long softmax_total_time = 0; - -namespace tflite { -namespace { -// Softmax parameter data that persists in user_data -const int kInt16LUTArraySize = 513; - -struct NodeData { - SoftmaxParams op_data; -#if ESP_NN - int buffer_idx; -#endif -}; - -static void* Init(TfLiteContext* context, const char* buffer, size_t length) { - TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); - return context->AllocatePersistentBuffer(context, sizeof(NodeData)); -} - -void SoftmaxQuantized(TfLiteContext* context, const TfLiteEvalTensor* input, - TfLiteEvalTensor* output, const NodeData* data) { - if (input->type == kTfLiteInt8) { - if (output->type == kTfLiteInt16) { - tflite::reference_ops::Softmax( - data->op_data, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } else { -#if ESP_NN - const int32_t input_beta_multiplier = data->op_data.input_multiplier; - const int32_t input_beta_left_shift = data->op_data.input_left_shift; - const int diff_min = data->op_data.diff_min; - const RuntimeShape input_shape = tflite::micro::GetTensorShape(input); - const RuntimeShape output_shape = tflite::micro::GetTensorShape(output); - const int trailing_dim = input_shape.DimensionsCount() - 1; - const int outer_size = - MatchingFlatSizeSkipDim(input_shape, trailing_dim, output_shape); - const int depth = - MatchingDim(input_shape, trailing_dim, output_shape, trailing_dim); - const int8_t *in_ptr = tflite::micro::GetTensorData(input); - int8_t *out_ptr = tflite::micro::GetTensorData(output); - void *scratch_buf = NULL; - if (data->buffer_idx > -1) { - scratch_buf = context->GetScratchBuffer(context, data->buffer_idx); - } - esp_nn_set_softmax_scratch_buf(scratch_buf); - esp_nn_softmax_s8(in_ptr, outer_size, depth, input_beta_multiplier, - input_beta_left_shift, diff_min, out_ptr); -#else - tflite::reference_ops::Softmax( - data->op_data, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); -#endif - } - } else { - tflite::reference_ops::SoftmaxInt16( - data->op_data, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } -} - -static TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - const TfLiteEvalTensor* input = tflite::micro::GetEvalInput(context, node, 0); - TfLiteEvalTensor* output = tflite::micro::GetEvalOutput(context, node, 0); - - TFLITE_DCHECK(node->user_data != nullptr); - NodeData data = *static_cast(node->user_data); - - long long start_time = esp_timer_get_time(); - switch (input->type) { - case kTfLiteFloat32: { - tflite::reference_ops::Softmax( - data.op_data, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } - break; - case kTfLiteInt8: - case kTfLiteInt16: { - SoftmaxQuantized(context, input, output, &data); - } - break; - default: - TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", - TfLiteTypeGetName(input->type), input->type); - return kTfLiteError; - } - softmax_total_time += esp_timer_get_time() - start_time; - return kTfLiteOk; -} - -static TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - MicroContext* micro_context = GetMicroContext(context); - - TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); - TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - TfLiteTensor* input = micro_context->AllocateTempInputTensor(node, 0); - TF_LITE_ENSURE(context, input != nullptr); - TF_LITE_ENSURE(context, NumDimensions(input) >= 1); - TfLiteTensor* output = micro_context->AllocateTempOutputTensor(node, 0); - TF_LITE_ENSURE(context, output != nullptr); - - TF_LITE_ENSURE(context, node->user_data != nullptr); - NodeData* data = static_cast(node->user_data); - // Only allocate LUTs for KTfLiteInt16 data type - if (input->type == kTfLiteInt16) { - void* raw_exp_lut = context->AllocatePersistentBuffer( - context, sizeof(int16_t) * kInt16LUTArraySize); - TF_LITE_ENSURE(context, raw_exp_lut != nullptr); - data->op_data.exp_lut = reinterpret_cast(raw_exp_lut); - void* one_over_one_plus_x_lut = context->AllocatePersistentBuffer( - context, sizeof(int16_t) * kInt16LUTArraySize); - TF_LITE_ENSURE(context, one_over_one_plus_x_lut != nullptr); - data->op_data.one_over_one_plus_x_lut = - reinterpret_cast(one_over_one_plus_x_lut); - } - - if (output->type == kTfLiteInt16) { - TF_LITE_ENSURE(context, - input->type == kTfLiteInt8 || input->type == kTfLiteInt16); - } else { - TF_LITE_ENSURE_EQ(context, input->type, output->type); - } - - // Populate LUT if required - if (input->type == kTfLiteInt16) { - TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0); - // exp LUT only used on negative values - // we consider exp(-10.0) is insignificant to accumulation - gen_lut( - [](float value) { return std::exp(value); }, -10.0f, 0.0f, -1.0f, 1.0f, - data->op_data.exp_lut); - gen_lut( - [](float value) { return 1.0f / (1.0f + value); }, 0.0f, 1.0f, -1.0f, - 1.0f, data->op_data.one_over_one_plus_x_lut); - data->op_data.zero_point = output->params.zero_point; - data->op_data.scale = output->params.scale; - } - - auto* params = static_cast(node->builtin_data); - auto ret_val = - CalculateSoftmaxParams(context, input, output, params, &data->op_data); - -#if ESP_NN - if (output->type == kTfLiteInt8 && input->type == kTfLiteInt8) { - const int32_t input_width = input->dims->data[1]; - const int32_t input_height = input->dims->data[2]; - int scratch_buf_size = esp_nn_get_softmax_scratch_size(input_width, - input_height); - if (scratch_buf_size > 0) { - TF_LITE_ENSURE_STATUS(context->RequestScratchBufferInArena( - context, scratch_buf_size, &data->buffer_idx)); - } - } -#endif - - micro_context->DeallocateTempTfLiteTensor(input); - micro_context->DeallocateTempTfLiteTensor(output); - return ret_val; -} - -} // namespace - -TfLiteRegistration Register_SOFTMAX() { - return tflite::micro::RegisterOp(Init, Prepare, Eval); -} - -} // namespace tflite diff --git a/code/components/tflite-lib/tensorflow/lite/micro/kernels/exp.cc b/code/components/tflite-lib/tensorflow/lite/micro/kernels/exp.cc deleted file mode 100644 index 64de090e..00000000 --- a/code/components/tflite-lib/tensorflow/lite/micro/kernels/exp.cc +++ /dev/null @@ -1,79 +0,0 @@ -/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/kernels/internal/reference/exp.h" - -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "tensorflow/lite/kernels/kernel_util.h" -#include "tensorflow/lite/micro/kernels/kernel_util.h" -#include "tensorflow/lite/micro/micro_error_reporter.h" - -namespace tflite { -namespace { - -constexpr int kInputTensor = 0; -constexpr int kOutputTensor = 0; - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - MicroContext* micro_context = GetMicroContext(context); - - TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); - TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - TfLiteTensor* input = - micro_context->AllocateTempInputTensor(node, kInputTensor); - TF_LITE_ENSURE(context, input != nullptr); - TfLiteTensor* output = - micro_context->AllocateTempOutputTensor(node, kOutputTensor); - TF_LITE_ENSURE(context, output != nullptr); - TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteFloat32); - TF_LITE_ENSURE_TYPES_EQ(context, output->type, input->type); - TF_LITE_ENSURE_EQ(context, output->bytes, input->bytes); - TF_LITE_ENSURE_EQ(context, output->dims->size, input->dims->size); - for (int i = 0; i < output->dims->size; ++i) { - TF_LITE_ENSURE_EQ(context, output->dims->data[i], input->dims->data[i]); - } - micro_context->DeallocateTempTfLiteTensor(input); - micro_context->DeallocateTempTfLiteTensor(output); - - return kTfLiteOk; -} - -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - const TfLiteEvalTensor* input = - tflite::micro::GetEvalInput(context, node, kInputTensor); - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - int flat_size = MatchingFlatSize(tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorShape(output)); - - if (input->type == kTfLiteFloat32) { - reference_ops::Exp(tflite::micro::GetTensorData(input), - static_cast(flat_size), - tflite::micro::GetTensorData(output)); - } else { - MicroPrintf("Type %s (%d) currently not supported by Exp.", - TfLiteTypeGetName(input->type), input->type); - return kTfLiteError; - } - return kTfLiteOk; -} -} // namespace - -TfLiteRegistration Register_EXP() { - return tflite::micro::RegisterOp(nullptr, Prepare, Eval); -} - -} // namespace tflite diff --git a/code/components/tflite-lib/tensorflow/lite/micro/kernels/expand_dims.cc b/code/components/tflite-lib/tensorflow/lite/micro/kernels/expand_dims.cc deleted file mode 100644 index 85e3e37c..00000000 --- a/code/components/tflite-lib/tensorflow/lite/micro/kernels/expand_dims.cc +++ /dev/null @@ -1,148 +0,0 @@ -/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "tensorflow/lite/kernels/kernel_util.h" -#include "tensorflow/lite/micro/kernels/kernel_util.h" -#include "tensorflow/lite/micro/micro_utils.h" - -namespace tflite { -namespace { - -constexpr int kInputTensor = 0; -constexpr int kAxisTensor = 1; -constexpr int kOutputTensor = 0; - -TfLiteStatus GetAxisValueFromTensor(TfLiteContext* context, - const TfLiteTensor* axis, - int32_t* axis_value) { - const int axis_dims = (tflite::GetTensorShape(axis)).DimensionsCount(); - if (axis_dims > 1) { - MicroPrintf("Axis has only one element for Expand_Dims.", axis_dims); - return kTfLiteError; - } - - if (kTfLiteInt32 == (axis->type)) { - const int32_t* axis_ptr = tflite::GetTensorData(axis); - *axis_value = axis_ptr[0]; - return kTfLiteOk; - } else { - MicroPrintf("Axis type %s (%d) not supported by Expand_Dims.", - TfLiteTypeGetName(axis->type), axis->type); - return kTfLiteError; - } -} - -// Verifies that the output tensor's dimension shape is equivalent to inserting -// a dimension of length 1 at the dimension index axis of input's shape as -// defined in https://www.tensorflow.org/api_docs/python/tf/expand_dims. -TfLiteStatus VerifyTensorDim(TfLiteContext* context, const TfLiteTensor* input, - const TfLiteTensor* axis_tensor, - const TfLiteTensor* output) { - int32_t axis_value = 0; - TF_LITE_ENSURE_OK(context, - GetAxisValueFromTensor(context, axis_tensor, &axis_value)); - - tflite::RuntimeShape input_shape = tflite::GetTensorShape(input); - if (axis_value < 0) { - axis_value = input_shape.DimensionsCount() + 1 + axis_value; - } - TF_LITE_ENSURE(context, axis_value <= input_shape.DimensionsCount()); - - // TFLM only supports fixed dimension tensor and assumes that the output shape - // is fully specified in the model. As such, TFLM directly use the pointer to - // the dimension array in the model buffer. - tflite::RuntimeShape output_shape = tflite::GetTensorShape(output); - - TF_LITE_ENSURE(context, output_shape.DimensionsCount() == - input_shape.DimensionsCount() + 1); - for (int i = 0; i < output_shape.DimensionsCount(); ++i) { - if (i < axis_value) { - TF_LITE_ENSURE(context, output_shape.Dims(i) == input_shape.Dims(i)); - } else if (i == axis_value) { - TF_LITE_ENSURE(context, output_shape.Dims(i) == 1); - } else { - TF_LITE_ENSURE(context, output_shape.Dims(i) == input_shape.Dims(i - 1)); - } - } - return kTfLiteOk; -} - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - MicroContext* micro_context = GetMicroContext(context); - - TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); - TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - TfLiteTensor* input = - micro_context->AllocateTempInputTensor(node, kInputTensor); - TF_LITE_ENSURE(context, input != nullptr); - TfLiteTensor* axis = - micro_context->AllocateTempInputTensor(node, kAxisTensor); - TF_LITE_ENSURE(context, axis != nullptr); - TfLiteTensor* output = - micro_context->AllocateTempOutputTensor(node, kOutputTensor); - TF_LITE_ENSURE(context, output != nullptr); - output->type = input->type; - if (IsDynamicTensor(axis)) { - MicroPrintf("DynamicTensor is not yet supported by Expand_Dims."); - return kTfLiteError; - } - TF_LITE_ENSURE_OK(context, VerifyTensorDim(context, input, axis, output)); - - micro_context->DeallocateTempTfLiteTensor(input); - micro_context->DeallocateTempTfLiteTensor(axis); - micro_context->DeallocateTempTfLiteTensor(output); - return kTfLiteOk; -} - -template -void memCopyN(T* out, const T* in, const int num_elements) { - for (int i = 0; i < num_elements; ++i) { - out[i] = in[i]; - } -} - -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - const TfLiteEvalTensor* input = - tflite::micro::GetEvalInput(context, node, kInputTensor); - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - const int flat_size = ElementCount(*input->dims); - - switch (input->type) { - case kTfLiteFloat32: { - memCopyN(tflite::micro::GetTensorData(output), - tflite::micro::GetTensorData(input), flat_size); - } break; - case kTfLiteInt8: { - memCopyN(tflite::micro::GetTensorData(output), - tflite::micro::GetTensorData(input), flat_size); - } break; - default: - MicroPrintf( - "Expand_Dims only currently supports int8 and float32, got %d.", - input->type); - return kTfLiteError; - } - return kTfLiteOk; -} -} // namespace - -TfLiteRegistration Register_EXPAND_DIMS() { - return tflite::micro::RegisterOp(nullptr, Prepare, Eval); -} - -} // namespace tflite diff --git a/code/components/tflite-lib/tensorflow/lite/micro/kernels/fill.cc b/code/components/tflite-lib/tensorflow/lite/micro/kernels/fill.cc deleted file mode 100644 index 1191eba8..00000000 --- a/code/components/tflite-lib/tensorflow/lite/micro/kernels/fill.cc +++ /dev/null @@ -1,139 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/kernels/internal/reference/fill.h" - -#include - -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "tensorflow/lite/kernels/kernel_util.h" -#include "tensorflow/lite/micro/kernels/kernel_util.h" - -namespace tflite { - -namespace { - -template -TfLiteStatus EnsureEqImpl(TfLiteContext* context, const TfLiteIntArray* array, - const TfLiteTensor* tensor) { - for (int i = 0; i < array->size; ++i) { - TF_LITE_ENSURE_EQ(context, array->data[i], GetTensorData(tensor)[i]); - } - return kTfLiteOk; -} - -// Ensure the equality of an int array and a tensor, which must be -// one-dimensional and of an integer type. -TfLiteStatus EnsureEq(TfLiteContext* context, const TfLiteIntArray* array, - const TfLiteTensor* tensor) { - TF_LITE_ENSURE_EQ(context, NumDimensions(tensor), 1); - const auto tensor_len = tensor->dims->data[0]; - TF_LITE_ENSURE_EQ(context, array->size, tensor_len); - - switch (tensor->type) { - case kTfLiteInt8: - return EnsureEqImpl(context, array, tensor); - case kTfLiteInt16: - return EnsureEqImpl(context, array, tensor); - case kTfLiteInt32: - return EnsureEqImpl(context, array, tensor); - case kTfLiteInt64: - return EnsureEqImpl(context, array, tensor); - default: - MicroPrintf("cannot compare int array to tensor of type %d.", - tensor->type); - return kTfLiteError; - } -} - -constexpr int kDimsTensor = 0; -constexpr int kValueTensor = 1; -constexpr int kOutputTensor = 0; - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - MicroContext* micro_context = GetMicroContext(context); - - // Ensure inputs and outputs exist. - TfLiteTensor* dims = - micro_context->AllocateTempInputTensor(node, kDimsTensor); - TF_LITE_ENSURE(context, dims != nullptr); - TfLiteTensor* value = - micro_context->AllocateTempInputTensor(node, kValueTensor); - TF_LITE_ENSURE(context, value != nullptr); - TfLiteTensor* output = - micro_context->AllocateTempOutputTensor(node, kOutputTensor); - TF_LITE_ENSURE(context, output != nullptr); - - // The value tensor must be a scalar. - TF_LITE_ENSURE_EQ(context, NumDimensions(value), 0); - - // The value type and output type must match. - TF_LITE_ENSURE_EQ(context, value->type, output->type); - - // The dimension of the output tensor is known in model already. - TFLITE_DCHECK(output->dims != nullptr); - - if (dims->data.data != nullptr) { - // When the dims tensor is specified in model already (i.e. is not an - // activation tensor), the dims tensor must match the output tensor shape. - // As a byproduct, ensures the dims tensor is of an integer type. - TF_LITE_ENSURE_OK(context, EnsureEq(context, output->dims, dims)); - } - - micro_context->DeallocateTempTfLiteTensor(dims); - micro_context->DeallocateTempTfLiteTensor(value); - micro_context->DeallocateTempTfLiteTensor(output); - return kTfLiteOk; -} - -template -void FillImpl(const TfLiteEvalTensor* value, TfLiteEvalTensor* output) { - reference_ops::Fill( - micro::GetTensorShape(value), micro::GetTensorData(value), - micro::GetTensorShape(output), micro::GetTensorData(output)); -} - -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - const TfLiteEvalTensor* value = - micro::GetEvalInput(context, node, kValueTensor); - TfLiteEvalTensor* output = micro::GetEvalOutput(context, node, kOutputTensor); - - switch (value->type) { - case kTfLiteFloat32: - FillImpl(value, output); - break; - case kTfLiteInt32: - FillImpl(value, output); - break; - case kTfLiteInt8: - FillImpl(value, output); - break; - default: - MicroPrintf("Fill only currently supports float32 for input 1, got %d.", - TfLiteTypeGetName(value->type)); - return kTfLiteError; - } - - return kTfLiteOk; -} - -} // namespace - -TfLiteRegistration Register_FILL() { - return tflite::micro::RegisterOp(nullptr, Prepare, Eval); -} - -} // namespace tflite diff --git a/code/components/tflite-lib/tensorflow/lite/micro/kernels/floor_div.cc b/code/components/tflite-lib/tensorflow/lite/micro/kernels/floor_div.cc deleted file mode 100644 index d8a96734..00000000 --- a/code/components/tflite-lib/tensorflow/lite/micro/kernels/floor_div.cc +++ /dev/null @@ -1,129 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/kernels/internal/reference/floor_div.h" - -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/reference/binary_function.h" -#include "tensorflow/lite/kernels/internal/types.h" -#include "tensorflow/lite/kernels/kernel_util.h" -#include "tensorflow/lite/micro/kernels/kernel_util.h" -#include "tensorflow/lite/micro/micro_utils.h" - -namespace tflite { -namespace { - -// Input/output tensor index. -constexpr int kInputTensor1 = 0; -constexpr int kInputTensor2 = 1; -constexpr int kOutputTensor = 0; - -TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteNode* node) { - MicroContext* micro_context = GetMicroContext(context); - - TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); - TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - - TfLiteTensor* input1 = - micro_context->AllocateTempInputTensor(node, kInputTensor1); - TF_LITE_ENSURE(context, input1 != nullptr); - TfLiteTensor* input2 = - micro_context->AllocateTempInputTensor(node, kInputTensor2); - TF_LITE_ENSURE(context, input2 != nullptr); - TfLiteTensor* output = - micro_context->AllocateTempOutputTensor(node, kOutputTensor); - TF_LITE_ENSURE(context, output != nullptr); - - TF_LITE_ENSURE_TYPES_EQ(context, input1->type, input2->type); - TF_LITE_ENSURE_TYPES_EQ(context, input1->type, output->type); - - micro_context->DeallocateTempTfLiteTensor(input1); - micro_context->DeallocateTempTfLiteTensor(input2); - micro_context->DeallocateTempTfLiteTensor(output); - - return kTfLiteOk; -} - -void* Init(TfLiteContext* context, const char* buffer, size_t length) { - return nullptr; -} - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - return CalculateOpData(context, node); -} - -template -TfLiteStatus EvalFloorDiv(TfLiteContext* context, - const TfLiteEvalTensor* input1, - const TfLiteEvalTensor* input2, - TfLiteEvalTensor* output) { - const T* denominator_data = tflite::micro::GetTensorData(input2); - - // Validate the denominator. - for (int i = 0; i < tflite::ElementCount(*input2->dims); ++i) { - if (std::equal_to()(denominator_data[i], 0)) { - MicroPrintf("Division by 0"); - return kTfLiteError; - } - } - - bool requires_broadcast = !tflite::micro::HaveSameShapes(input1, input2); - - if (requires_broadcast) { - reference_ops::BroadcastBinaryFunction4DSlow( - tflite::micro::GetTensorShape(input1), - tflite::micro::GetTensorData(input1), - tflite::micro::GetTensorShape(input2), denominator_data, - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output), reference_ops::FloorDiv); - } else { - reference_ops::BinaryFunction( - tflite::micro::GetTensorShape(input1), - tflite::micro::GetTensorData(input1), - tflite::micro::GetTensorShape(input2), denominator_data, - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output), reference_ops::FloorDiv); - } - - return kTfLiteOk; -} - -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - const TfLiteEvalTensor* input1 = - tflite::micro::GetEvalInput(context, node, kInputTensor1); - const TfLiteEvalTensor* input2 = - tflite::micro::GetEvalInput(context, node, kInputTensor2); - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - - switch (input1->type) { - case kTfLiteFloat32: { - return EvalFloorDiv(context, input1, input2, output); - } - default: { - MicroPrintf("Type '%s' is not supported by FLOOR_DIV.", - TfLiteTypeGetName(input1->type)); - return kTfLiteError; - } - } -} - -} // namespace - -TfLiteRegistration Register_FLOOR_DIV() { - return tflite::micro::RegisterOp(Init, Prepare, Eval); -} - -} // namespace tflite diff --git a/code/components/tflite-lib/tensorflow/lite/micro/kernels/floor_mod.cc b/code/components/tflite-lib/tensorflow/lite/micro/kernels/floor_mod.cc deleted file mode 100644 index ca12800c..00000000 --- a/code/components/tflite-lib/tensorflow/lite/micro/kernels/floor_mod.cc +++ /dev/null @@ -1,127 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/kernels/internal/reference/floor_mod.h" - -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/reference/binary_function.h" -#include "tensorflow/lite/kernels/internal/reference/process_broadcast_shapes.h" -#include "tensorflow/lite/kernels/internal/types.h" -#include "tensorflow/lite/kernels/kernel_util.h" -#include "tensorflow/lite/micro/kernels/kernel_util.h" -#include "tensorflow/lite/micro/micro_utils.h" - -// OLD-TODO(b/117523611): We should factor out a binary_op and put binary ops -// there. -namespace tflite { -namespace { - -// Input/output tensor index. -constexpr int kInputTensor1 = 0; -constexpr int kInputTensor2 = 1; -constexpr int kOutputTensor = 0; - -// OLD-TODO(b/117912880): Support quantization. - -TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteNode* node) { - MicroContext* micro_context = GetMicroContext(context); - - TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); - TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - - TfLiteTensor* input1 = - micro_context->AllocateTempInputTensor(node, kInputTensor1); - TF_LITE_ENSURE(context, input1 != nullptr); - TfLiteTensor* input2 = - micro_context->AllocateTempInputTensor(node, kInputTensor2); - TF_LITE_ENSURE(context, input2 != nullptr); - TfLiteTensor* output = - micro_context->AllocateTempOutputTensor(node, kOutputTensor); - TF_LITE_ENSURE(context, output != nullptr); - - TF_LITE_ENSURE_TYPES_EQ(context, input1->type, input2->type); - TF_LITE_ENSURE_TYPES_EQ(context, input1->type, output->type); - - micro_context->DeallocateTempTfLiteTensor(input1); - micro_context->DeallocateTempTfLiteTensor(input2); - micro_context->DeallocateTempTfLiteTensor(output); - - return kTfLiteOk; -} - -void* Init(TfLiteContext* context, const char* buffer, size_t length) { - return nullptr; -} - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - return CalculateOpData(context, node); -} - -template -TfLiteStatus EvalFloorMod(TfLiteContext* context, bool requires_broadcast, - const TfLiteEvalTensor* input1, - const TfLiteEvalTensor* input2, - TfLiteEvalTensor* output) { - const T* denominator_data = tflite::micro::GetTensorData(input2); - - if (requires_broadcast) { - reference_ops::BroadcastBinaryFunction4DSlow( - tflite::micro::GetTensorShape(input1), - tflite::micro::GetTensorData(input1), - tflite::micro::GetTensorShape(input2), denominator_data, - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output), reference_ops::FloorMod); - } else { - reference_ops::BinaryFunction( - tflite::micro::GetTensorShape(input1), - tflite::micro::GetTensorData(input1), - tflite::micro::GetTensorShape(input2), denominator_data, - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output), reference_ops::FloorMod); - } - - return kTfLiteOk; -} - -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - const TfLiteEvalTensor* input1 = - tflite::micro::GetEvalInput(context, node, kInputTensor1); - const TfLiteEvalTensor* input2 = - tflite::micro::GetEvalInput(context, node, kInputTensor2); - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - - bool requires_broadcast = !tflite::micro::HaveSameShapes(input1, input2); - - switch (input1->type) { - case kTfLiteFloat32: { - return EvalFloorMod(context, requires_broadcast, input1, input2, - output); - } - default: { - MicroPrintf("Type '%s' is not supported by FLOOR_MOD.", - TfLiteTypeGetName(input1->type)); - return kTfLiteError; - } - } -} - -} // namespace - -TfLiteRegistration Register_FLOOR_MOD() { - return tflite::micro::RegisterOp(Init, Prepare, Eval); -} - -} // namespace tflite diff --git a/code/components/tflite-lib/tensorflow/lite/micro/kernels/fully_connected.cc b/code/components/tflite-lib/tensorflow/lite/micro/kernels/fully_connected.cc deleted file mode 100644 index 82d87284..00000000 --- a/code/components/tflite-lib/tensorflow/lite/micro/kernels/fully_connected.cc +++ /dev/null @@ -1,158 +0,0 @@ -/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/micro/kernels/fully_connected.h" - -#include "tensorflow/lite/c/builtin_op_data.h" -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/common.h" -#include "tensorflow/lite/kernels/internal/quantization_util.h" -#include "tensorflow/lite/kernels/internal/reference/fully_connected.h" -#include "tensorflow/lite/kernels/internal/reference/integer_ops/fully_connected.h" -#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "tensorflow/lite/kernels/kernel_util.h" -#include "tensorflow/lite/micro/kernels/kernel_util.h" - -namespace tflite { -namespace { - -void* Init(TfLiteContext* context, const char* buffer, size_t length) { - TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); - return context->AllocatePersistentBuffer(context, - sizeof(OpDataFullyConnected)); -} - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - MicroContext* micro_context = GetMicroContext(context); - - TFLITE_DCHECK(node->user_data != nullptr); - TFLITE_DCHECK(node->builtin_data != nullptr); - - auto* data = static_cast(node->user_data); - const auto params = - static_cast(node->builtin_data); - - TfLiteTensor* input = - micro_context->AllocateTempInputTensor(node, kFullyConnectedInputTensor); - TF_LITE_ENSURE(context, input != nullptr); - TfLiteTensor* filter = micro_context->AllocateTempInputTensor( - node, kFullyConnectedWeightsTensor); - TF_LITE_ENSURE(context, filter != nullptr); - TfLiteTensor* bias = - micro_context->AllocateTempInputTensor(node, kFullyConnectedBiasTensor); - TfLiteTensor* output = micro_context->AllocateTempOutputTensor( - node, kFullyConnectedOutputTensor); - TF_LITE_ENSURE(context, output != nullptr); - TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); - - TF_LITE_ENSURE_OK(context, CalculateOpDataFullyConnected( - context, params->activation, input->type, - input, filter, bias, output, data)); - - micro_context->DeallocateTempTfLiteTensor(input); - micro_context->DeallocateTempTfLiteTensor(filter); - if (bias != nullptr) { - micro_context->DeallocateTempTfLiteTensor(bias); - } - micro_context->DeallocateTempTfLiteTensor(output); - return kTfLiteOk; -} - -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->builtin_data != nullptr); - const auto* params = - static_cast(node->builtin_data); - - const TfLiteEvalTensor* input = - tflite::micro::GetEvalInput(context, node, kFullyConnectedInputTensor); - const TfLiteEvalTensor* filter = - tflite::micro::GetEvalInput(context, node, kFullyConnectedWeightsTensor); - const TfLiteEvalTensor* bias = - tflite::micro::GetEvalInput(context, node, kFullyConnectedBiasTensor); - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kFullyConnectedOutputTensor); - - TFLITE_DCHECK(node->user_data != nullptr); - const auto& data = - *(static_cast(node->user_data)); - - // Checks in Prepare ensure input, output and filter types are all the same. - switch (input->type) { - case kTfLiteFloat32: { - const float* bias_data = - nullptr != bias ? tflite::micro::GetTensorData(bias) : nullptr; - - tflite::reference_ops::FullyConnected( - FullyConnectedParamsFloat(params->activation), - tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(filter), - tflite::micro::GetTensorData(filter), - tflite::micro::GetTensorShape(bias), bias_data, - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - break; - } - - case kTfLiteInt8: { - const int32_t* bias_data = - nullptr != bias ? tflite::micro::GetTensorData(bias) - : nullptr; - - tflite::reference_integer_ops::FullyConnected( - FullyConnectedParamsQuantized(data), - tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(filter), - tflite::micro::GetTensorData(filter), - tflite::micro::GetTensorShape(bias), bias_data, - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - break; - } - - case kTfLiteInt16: { - const int64_t* bias_data = - nullptr != bias ? tflite::micro::GetTensorData(bias) - : nullptr; - - tflite::reference_integer_ops::FullyConnected( - FullyConnectedParamsQuantized(data), - tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(filter), - tflite::micro::GetTensorData(filter), - tflite::micro::GetTensorShape(bias), bias_data, - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - break; - } - - default: { - MicroPrintf("Type %s (%d) not supported.", TfLiteTypeGetName(input->type), - input->type); - return kTfLiteError; - } - } - return kTfLiteOk; -} - -} // namespace - -TfLiteRegistration Register_FULLY_CONNECTED() { - return tflite::micro::RegisterOp(Init, Prepare, Eval); -} - -} // namespace tflite diff --git a/code/components/tflite-lib/tensorflow/lite/micro/kernels/gather.cc b/code/components/tflite-lib/tensorflow/lite/micro/kernels/gather.cc deleted file mode 100644 index ec4e2e5a..00000000 --- a/code/components/tflite-lib/tensorflow/lite/micro/kernels/gather.cc +++ /dev/null @@ -1,223 +0,0 @@ -/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/c/builtin_op_data.h" -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "tensorflow/lite/kernels/kernel_util.h" -#include "tensorflow/lite/micro/kernels/kernel_util.h" -#include "tensorflow/lite/micro/micro_utils.h" - -namespace tflite { -namespace { - -constexpr int kInputTensor = 0; -constexpr int kInputPositions = 1; -constexpr int kOutputTensor = 0; - -template -TfLiteStatus Gather(const TfLiteGatherParams* params, - const TfLiteEvalTensor* input, - const TfLiteEvalTensor* coords, TfLiteEvalTensor* output) { - const InputT* input_data = tflite::micro::GetTensorData(input); - const CoordsT* coords_data = tflite::micro::GetTensorData(coords); - InputT* output_data = tflite::micro::GetTensorData(output); - const TfLiteIntArray* input_dims = input->dims; - const int input_dims_size = input_dims->size; - int axis = params->axis; - if (axis < 0) { - axis += input_dims_size; - } - TFLITE_DCHECK_GE(axis, 0); - TFLITE_DCHECK_LT(axis, input_dims_size); - - int batch_dims = params->batch_dims; - // batch_dims should be in range: [-rank(coords), rank(coords)]. - // Negative batch_dims is added with rank of coords. - const TfLiteIntArray* coords_dims = coords->dims; - const int coords_dims_size = coords_dims->size; - if (batch_dims < 0) { - batch_dims += coords_dims_size; - } - TFLITE_DCHECK_GE(batch_dims, 0); - TFLITE_DCHECK_LT(batch_dims, input_dims_size); - TFLITE_DCHECK_LE(batch_dims, coords_dims_size); - TFLITE_DCHECK_GE(axis, batch_dims); - for (int i = 0; i < batch_dims; ++i) { - TFLITE_DCHECK_EQ(input_dims->data[i], coords_dims->data[i]); - } - - const int axis_size = input_dims->data[axis]; - - int batch_size = 1; - for (int i = 0; i < batch_dims; ++i) { - batch_size *= input_dims->data[i]; - } - int outer_size = 1; - for (int i = batch_dims; i < axis; ++i) { - outer_size *= input_dims->data[i]; - } - int inner_size = 1; - for (int i = axis + 1; i < input_dims_size; ++i) { - inner_size *= input_dims->data[i]; - } - int coord_size = 1; - for (int i = batch_dims; i < coords_dims_size; ++i) { - coord_size *= coords_dims->data[i]; - } - - for (int batch = 0; batch < batch_size; ++batch) { - for (int outer = 0; outer < outer_size; ++outer) { - for (int coord = 0; coord < coord_size; ++coord) { - TFLITE_DCHECK_GE(coords_data[coord], 0); - TFLITE_DCHECK_LT(coords_data[coord], axis_size); - std::memcpy(output_data + - (((batch * outer_size) + outer) * coord_size + coord) * - inner_size, - input_data + (((batch * outer_size) + outer) * axis_size + - coords_data[batch * coord_size + coord]) * - inner_size, - sizeof(InputT) * inner_size); - } - } - } - return kTfLiteOk; -} - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - MicroContext* micro_context = GetMicroContext(context); - - TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); - TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - - const auto* params = - reinterpret_cast(node->builtin_data); - TfLiteTensor* input = - micro_context->AllocateTempInputTensor(node, kInputTensor); - TF_LITE_ENSURE(context, input != nullptr); - TfLiteTensor* coords = - micro_context->AllocateTempInputTensor(node, kInputPositions); - TF_LITE_ENSURE(context, coords != nullptr); - TfLiteTensor* output = - micro_context->AllocateTempOutputTensor(node, kOutputTensor); - TF_LITE_ENSURE(context, output != nullptr); - - switch (coords->type) { - case kTfLiteInt32: - break; - default: - MicroPrintf("Positions of type '%s' are not supported by gather.", - TfLiteTypeGetName(coords->type)); - return kTfLiteError; - break; - } - - // Assign to output the input type. - output->type = input->type; - - // Check conditions for different types. - switch (input->type) { - case kTfLiteFloat32: - case kTfLiteInt8: - break; - default: - MicroPrintf("Type '%s' is not supported by gather.", - TfLiteTypeGetName(input->type)); - return kTfLiteError; - break; - } - - int axis = params->axis; - if (axis < 0) { - axis += NumDimensions(input); - } - TF_LITE_ENSURE(context, 0 <= axis && axis < NumDimensions(input)); - - int batch_dims = params->batch_dims; - // batch_dims should be in range: [-rank(coords), rank(coords)]. - // Negative batch_dims is added with rank of coords. - if (batch_dims < 0) { - batch_dims += NumDimensions(coords); - } - TF_LITE_ENSURE(context, batch_dims <= axis); - TF_LITE_ENSURE(context, 0 <= batch_dims && batch_dims < NumDimensions(input)); - TF_LITE_ENSURE(context, batch_dims <= NumDimensions(coords)); - for (int i = 0; i < batch_dims; ++i) { - TF_LITE_ENSURE_EQ(context, input->dims->data[i], coords->dims->data[i]); - } - - // GATHER updates the output tensor dimensions, but TfLiteTensor in the - // MicroInterpreter is a temporary allocation. We must therefore relocate the - // dims from the FlatBuffer to the persistant storage arena. - TfLiteEvalTensor* output_eval = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - TF_LITE_ENSURE_OK(context, tflite::micro::CreateWritableTensorDimsWithCopy( - context, output, output_eval)); - - TfLiteIntArray* output_shape = output->dims; - output_shape->size = - NumDimensions(input) + NumDimensions(coords) - 1 - batch_dims; - int output_index = 0; - for (int i = 0; i < axis; ++i) { - output_shape->data[output_index++] = input->dims->data[i]; - } - for (int i = batch_dims; i < coords->dims->size; ++i) { - output_shape->data[output_index++] = coords->dims->data[i]; - } - for (int i = axis + 1; i < input->dims->size; ++i) { - output_shape->data[output_index++] = input->dims->data[i]; - } - - micro_context->DeallocateTempTfLiteTensor(input); - micro_context->DeallocateTempTfLiteTensor(coords); - micro_context->DeallocateTempTfLiteTensor(output); - - return kTfLiteOk; -} - -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - const auto* params = - reinterpret_cast(node->builtin_data); - const TfLiteEvalTensor* input = - tflite::micro::GetEvalInput(context, node, kInputTensor); - const TfLiteEvalTensor* coords = - tflite::micro::GetEvalInput(context, node, kInputPositions); - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - - if (coords->type == kTfLiteInt32) { - switch (input->type) { - case kTfLiteFloat32: - return Gather(params, input, coords, output); - break; - case kTfLiteInt8: - return Gather(params, input, coords, output); - break; - default: - MicroPrintf("Type '%s' is not supported by gather.", - TfLiteTypeGetName(input->type)); - return kTfLiteError; - break; - } - } - return kTfLiteOk; -} -} // namespace - -TfLiteRegistration Register_GATHER() { - return tflite::micro::RegisterOp(nullptr, Prepare, Eval); -} - -} // namespace tflite diff --git a/code/components/tflite-lib/tensorflow/lite/micro/kernels/gather_nd.cc b/code/components/tflite-lib/tensorflow/lite/micro/kernels/gather_nd.cc deleted file mode 100644 index 5bb4dd84..00000000 --- a/code/components/tflite-lib/tensorflow/lite/micro/kernels/gather_nd.cc +++ /dev/null @@ -1,204 +0,0 @@ -/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "tensorflow/lite/kernels/kernel_util.h" -#include "tensorflow/lite/micro/kernels/kernel_util.h" -#include "tensorflow/lite/micro/micro_utils.h" - -namespace tflite { -namespace { - -constexpr int kParams = 0; -constexpr int kIndices = 1; -constexpr int kOutputTensor = 0; -constexpr int MAX_INDICES_ND = 5; - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - MicroContext* micro_context = GetMicroContext(context); - - TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); - TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - - TfLiteTensor* params = micro_context->AllocateTempInputTensor(node, kParams); - TF_LITE_ENSURE(context, params != nullptr); - TfLiteTensor* indices = - micro_context->AllocateTempInputTensor(node, kIndices); - TF_LITE_ENSURE(context, indices != nullptr); - TfLiteTensor* output = - micro_context->AllocateTempOutputTensor(node, kOutputTensor); - TF_LITE_ENSURE(context, output != nullptr); - - switch (params->type) { - case kTfLiteFloat32: - case kTfLiteInt8: - break; - default: - MicroPrintf("Params of type '%s' are not supported by gather_nd.", - TfLiteTypeGetName(params->type)); - return kTfLiteError; - break; - } - switch (indices->type) { - case kTfLiteInt32: - break; - default: - MicroPrintf("Indices of type '%s' are not supported by gather_nd.", - TfLiteTypeGetName(indices->type)); - return kTfLiteError; - } - - const int params_rank = NumDimensions(params); - const int indices_rank = NumDimensions(indices); - const int indices_nd = SizeOfDimension(indices, indices_rank - 1); - if (params_rank < 1) { - MicroPrintf("Params must be at least a vector."); - return kTfLiteError; - } - if (indices_rank < 1) { - MicroPrintf("Indices must be at least a vector."); - return kTfLiteError; - } - if (indices_nd > params_rank) { - MicroPrintf("Index innermost dimension length must be <= params rank."); - return kTfLiteError; - } - if (indices_nd > MAX_INDICES_ND) { - MicroPrintf("Index innermost dimension length must not exceed %d.", - MAX_INDICES_ND); - return kTfLiteError; - } - - // Assign to output the input type. - output->type = params->type; - - // TFLM gather_nd does not create the output tensor, but it needs to ensure - // that the output shape is correct. The result shape is - // indices.shape[:-1] + params.shape[indices.shape[-1]:] - TfLiteIntArray* output_shape = output->dims; - int output_index = 0; - for (int i = 0; i < indices_rank - 1; ++i) { - output_shape->data[output_index++] = indices->dims->data[i]; - } - for (int i = indices_nd; i < params_rank; ++i) { - output_shape->data[output_index++] = params->dims->data[i]; - } - output_shape->size = output_index; - - micro_context->DeallocateTempTfLiteTensor(params); - micro_context->DeallocateTempTfLiteTensor(indices); - micro_context->DeallocateTempTfLiteTensor(output); - return kTfLiteOk; -} - -template -TfLiteStatus GatherNd(const TfLiteEvalTensor* params, - const TfLiteEvalTensor* indices, - TfLiteEvalTensor* output) { - const int indices_dims = indices->dims->size; - const int indices_nd = indices->dims->data[indices_dims - 1]; - const int params_dims = params->dims->size; - const IndicesT* index_data = tflite::micro::GetTensorData(indices); - const ParamsT* param_data = tflite::micro::GetTensorData(params); - ParamsT* output_data = tflite::micro::GetTensorData(output); - - int n_slices = 1; - for (int i = 0; i < indices_dims - 1; ++i) { - n_slices *= indices->dims->data[i]; - } - - // If indices[-1] == params.rank, fetch single elements. - // If indices[-1] < params.rank, fetch slices. - int slice_size = 1; - for (int i = indices_nd; i < params_dims; ++i) { - slice_size *= params->dims->data[i]; - } - - int params_flat_size = ElementCount(*params->dims); - int remain_flat_size = params_flat_size; - - // Number of elements per dimension - int dims_to_count[MAX_INDICES_ND]; - for (int i = 0; i < indices_nd; ++i) { - dims_to_count[i] = remain_flat_size / params->dims->data[i]; - remain_flat_size = dims_to_count[i]; - } - - for (int i = 0; i < n_slices; ++i) { - int from_pos = 0; - for (int j = 0; j < indices_nd; ++j) { - int offset = i * indices_nd + j; - IndicesT index = index_data[offset]; - from_pos += index * dims_to_count[j]; - } - if (from_pos < 0 || from_pos + slice_size > params_flat_size) { - return kTfLiteError; - } - std::memcpy(output_data + i * slice_size, param_data + from_pos, - sizeof(ParamsT) * slice_size); - } - return kTfLiteOk; -} - -template -TfLiteStatus EvalGatherNd(TfLiteContext* context, - const TfLiteEvalTensor* params, - const TfLiteEvalTensor* indices, - TfLiteEvalTensor* output) { - TfLiteStatus status = kTfLiteError; - switch (params->type) { - case kTfLiteFloat32: - status = GatherNd(params, indices, output); - break; - case kTfLiteInt8: - status = GatherNd(params, indices, output); - break; - default: - MicroPrintf("Params type '%s' are not supported by gather_nd.", - TfLiteTypeGetName(params->type)); - return kTfLiteError; - } - if (status != kTfLiteOk) { - MicroPrintf("gather_nd index out of bounds"); - } - return status; -} - -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - const TfLiteEvalTensor* params = - tflite::micro::GetEvalInput(context, node, kParams); - const TfLiteEvalTensor* indices = - tflite::micro::GetEvalInput(context, node, kIndices); - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - - switch (indices->type) { - case kTfLiteInt32: - return EvalGatherNd(context, params, indices, output); - break; - default: - MicroPrintf("Indices of type '%s' are not supported by gather_nd.", - TfLiteTypeGetName(indices->type)); - return kTfLiteError; - } -} -} // namespace - -TfLiteRegistration Register_GATHER_ND() { - return tflite::micro::RegisterOp(nullptr, Prepare, Eval); -} - -} // namespace tflite diff --git a/code/components/tflite-lib/tensorflow/lite/micro/kernels/kernel_runner.cc b/code/components/tflite-lib/tensorflow/lite/micro/kernels/kernel_runner.cc deleted file mode 100644 index 7e61ef29..00000000 --- a/code/components/tflite-lib/tensorflow/lite/micro/kernels/kernel_runner.cc +++ /dev/null @@ -1,122 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/micro/kernels/kernel_runner.h" - -#include "tensorflow/lite/micro/arena_allocator/single_arena_buffer_allocator.h" -#include "tensorflow/lite/micro/micro_arena_constants.h" -#include "tensorflow/lite/micro/micro_error_reporter.h" -#include "tensorflow/lite/micro/test_helpers.h" - -namespace tflite { -namespace micro { - -// TODO(b/161841696): Consider moving away from global arena buffers: -constexpr int KernelRunner::kKernelRunnerBufferSize_; -uint8_t KernelRunner::kKernelRunnerBuffer_[]; - -void ClearBufferApi(TfLiteContext* context_) { - context_->GetScratchBuffer = nullptr; - context_->GetExternalContext = nullptr; - context_->AllocatePersistentBuffer = nullptr; - context_->RequestScratchBufferInArena = nullptr; -} - -KernelRunner::KernelRunner(const TfLiteRegistration& registration, - TfLiteTensor* tensors, int tensors_size, - TfLiteIntArray* inputs, TfLiteIntArray* outputs, - void* builtin_data, TfLiteIntArray* intermediates) - : registration_(registration), - allocator_(SingleArenaBufferAllocator::Create(GetMicroErrorReporter(), - kKernelRunnerBuffer_, - kKernelRunnerBufferSize_)), - mock_micro_graph_(allocator_), - fake_micro_context_(tensors, allocator_, &mock_micro_graph_) { - // Prepare TfLiteContext: - context_.impl_ = static_cast(&fake_micro_context_); - context_.ReportError = MicroContextReportOpError; - context_.recommended_num_threads = 1; - context_.GetTensor = MicroContextGetTensor; - context_.GetEvalTensor = MicroContextGetEvalTensor; - tflite::micro::ClearBufferApi(&context_); - context_.AllocatePersistentBuffer = MicroContextAllocatePersistentBuffer; - - context_.recommended_num_threads = 0; - - // Prepare TfLiteNode: - node_.inputs = inputs; - node_.outputs = outputs; - node_.builtin_data = builtin_data; - node_.intermediates = intermediates; -} - -bool KernelRunner::ValidateTempBufferDeallocated() { - return fake_micro_context_.IsAllTempTfLiteTensorDeallocated(); -} - -TfLiteStatus KernelRunner::InitAndPrepare(const char* init_data, - size_t length) { - if (registration_.init) { - tflite::micro::ClearBufferApi(&context_); - context_.AllocatePersistentBuffer = MicroContextAllocatePersistentBuffer; - node_.user_data = registration_.init(&context_, init_data, length); - } - - TF_LITE_ENSURE(&context_, ValidateTempBufferDeallocated()); - - if (registration_.prepare) { - tflite ::micro::ClearBufferApi(&context_); - context_.AllocatePersistentBuffer = MicroContextAllocatePersistentBuffer; - context_.RequestScratchBufferInArena = - MicroContextRequestScratchBufferInArena; - context_.GetExternalContext = MicroContextGetExternalContext; - TF_LITE_ENSURE_STATUS(registration_.prepare(&context_, &node_)); - } - - TF_LITE_ENSURE(&context_, ValidateTempBufferDeallocated()); - - return kTfLiteOk; -} - -TfLiteStatus KernelRunner::Invoke() { - tflite::micro::ClearBufferApi(&context_); - context_.GetScratchBuffer = MicroContextGetScratchBuffer; - - if (registration_.invoke == nullptr) { - MicroPrintf("TfLiteRegistration missing invoke function pointer!"); - return kTfLiteError; - } - - TF_LITE_ENSURE_STATUS(registration_.invoke(&context_, &node_)); - - TF_LITE_ENSURE(&context_, ValidateTempBufferDeallocated()); - - return kTfLiteOk; -} - -TfLiteStatus KernelRunner::Free() { - tflite::micro::ClearBufferApi(&context_); - context_.GetScratchBuffer = MicroContextGetScratchBuffer; - - if (registration_.free == nullptr) { - MicroPrintf("TfLiteRegistration missing free function pointer!"); - return kTfLiteError; - } - - registration_.free(&context_, node_.user_data); - return kTfLiteOk; -} -} // namespace micro -} // namespace tflite \ No newline at end of file diff --git a/code/components/tflite-lib/tensorflow/lite/micro/kernels/kernel_runner.h b/code/components/tflite-lib/tensorflow/lite/micro/kernels/kernel_runner.h deleted file mode 100644 index c7d53c3a..00000000 --- a/code/components/tflite-lib/tensorflow/lite/micro/kernels/kernel_runner.h +++ /dev/null @@ -1,81 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#ifndef TENSORFLOW_LITE_MICRO_KERNELS_KERNEL_RUNNER_H_ -#define TENSORFLOW_LITE_MICRO_KERNELS_KERNEL_RUNNER_H_ - -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/compatibility.h" -#include "tensorflow/lite/micro/arena_allocator/single_arena_buffer_allocator.h" -#include "tensorflow/lite/micro/fake_micro_context.h" -#include "tensorflow/lite/micro/mock_micro_graph.h" - -namespace tflite { -namespace micro { - -// Helper class to perform a simulated kernel (i.e. TfLiteRegistration) -// lifecycle (init, prepare, invoke). All internal allocations are handled by -// this class. Simply pass in the registration, list of required tensors, inputs -// array, outputs array, and any pre-builtin data. Calling Invoke() will -// automatically walk the kernel and outputs will be ready on the TfLiteTensor -// output provided during construction. -class KernelRunner { - public: - KernelRunner(const TfLiteRegistration& registration, TfLiteTensor* tensors, - int tensors_size, TfLiteIntArray* inputs, - TfLiteIntArray* outputs, void* builtin_data, - TfLiteIntArray* intermediates = nullptr); - - // Calls init and prepare on the kernel (i.e. TfLiteRegistration) struct. Any - // exceptions will be DebugLog'd and returned as a status code. - TfLiteStatus InitAndPrepare(const char* init_data = nullptr, - size_t length = 0); - - // Calls init, prepare, and invoke on a given TfLiteRegistration pointer. - // After successful invoke, results will be available in the output tensor as - // passed into the constructor of this class. - TfLiteStatus Invoke(); - - // Calls Free on a given TfLiteRegistration pointer(if it's implemented). - // After successful Free, kTfLiteOk status will be returned. If Free is not - // implemented for a given kernel kTfLiteError will be returned. - TfLiteStatus Free(); - - // Returns a pointer to the internal MockMicroGraph which KernelRunner uses - // to stub out MicroGraph methods and track invocations on each subgraph. - MockMicroGraph* GetMockGraph() { return &mock_micro_graph_; } - - // Returns true if all temp buffer in tests are deallocated. - // TODO(b/209453859): move this function to private after deallocation checks - // are enabled for all kernel tests. - bool ValidateTempBufferDeallocated(); - - private: - static constexpr int kKernelRunnerBufferSize_ = 10000; - static uint8_t kKernelRunnerBuffer_[kKernelRunnerBufferSize_]; - - TfLiteContext context_ = {}; - TfLiteNode node_ = {}; - const TfLiteRegistration& registration_; - - SingleArenaBufferAllocator* allocator_; - MockMicroGraph mock_micro_graph_; - FakeMicroContext fake_micro_context_; -}; - -} // namespace micro -} // namespace tflite - -#endif // TENSORFLOW_LITE_MICRO_KERNELS_KERNEL_RUNNER_H_ diff --git a/code/components/tflite-lib/tensorflow/lite/micro/kernels/kernel_util.cc b/code/components/tflite-lib/tensorflow/lite/micro/kernels/kernel_util.cc deleted file mode 100644 index a8e88d30..00000000 --- a/code/components/tflite-lib/tensorflow/lite/micro/kernels/kernel_util.cc +++ /dev/null @@ -1,260 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/micro/kernels/kernel_util.h" - -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/micro/memory_helpers.h" -#include "tensorflow/lite/micro/micro_error_reporter.h" - -namespace tflite { -namespace micro { - -namespace { - -int ValidateTensorIndexing(const TfLiteContext* context, int index, - int max_size, const int* tensor_indices) { - if (index >= 0 && index < max_size) { - const int tensor_index = tensor_indices[index]; - if (tensor_index != kTfLiteOptionalTensor) { - return tensor_index; - } - } - return -1; -} - -} // namespace - -TfLiteRegistration RegisterOp( - void* (*init)(TfLiteContext* context, const char* buffer, size_t length), - TfLiteStatus (*prepare)(TfLiteContext* context, TfLiteNode* node), - TfLiteStatus (*invoke)(TfLiteContext* context, TfLiteNode* node), - void (*free)(TfLiteContext* context, void* buffer)) { - return {/*init=*/init, - /*free=*/free, - /*prepare=*/prepare, - /*invoke=*/invoke, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0, - /*registration_external=*/nullptr}; -} - -// Returns a mutable tensor for a given input index. is_variable must be checked -// during prepare when the full TfLiteTensor is available. -TfLiteEvalTensor* GetMutableEvalInput(const TfLiteContext* context, - const TfLiteNode* node, int index) { - TFLITE_DCHECK(context != nullptr); - TFLITE_DCHECK(node != nullptr); - const int tensor_index = ValidateTensorIndexing( - context, index, node->inputs->size, node->inputs->data); - - if (tensor_index < 0) { - return nullptr; - } - - return context->GetEvalTensor(context, node->inputs->data[index]); -} - -// Returns the TfLiteEvalTensor struct for a given input index in a node. -const TfLiteEvalTensor* GetEvalInput(const TfLiteContext* context, - const TfLiteNode* node, int index) { - return GetMutableEvalInput(context, node, index); -} - -// Returns the TfLiteEvalTensor struct for a given output index in a node. -TfLiteEvalTensor* GetEvalOutput(const TfLiteContext* context, - const TfLiteNode* node, int index) { - TFLITE_DCHECK(context != nullptr); - TFLITE_DCHECK(node != nullptr); - return context->GetEvalTensor(context, node->outputs->data[index]); -} - -bool HaveSameShapes(const TfLiteEvalTensor* input1, - const TfLiteEvalTensor* input2) { - TFLITE_DCHECK(input1 != nullptr); - TFLITE_DCHECK(input2 != nullptr); - return TfLiteIntArrayEqual(input1->dims, input2->dims); -} - -const RuntimeShape GetTensorShape(const TfLiteEvalTensor* tensor) { - if (tensor == nullptr || tensor->dims == nullptr) { - return RuntimeShape(); - } - TfLiteIntArray* dims = tensor->dims; - const int dims_size = dims->size; - const int32_t* dims_data = reinterpret_cast(dims->data); - return RuntimeShape(dims_size, dims_data); -} - -PaddingType RuntimePaddingType(TfLitePadding padding) { - switch (padding) { - case TfLitePadding::kTfLitePaddingSame: - return PaddingType::kSame; - case TfLitePadding::kTfLitePaddingValid: - return PaddingType::kValid; - case TfLitePadding::kTfLitePaddingUnknown: - default: - return PaddingType::kNone; - } -} - -// Relocate tensor dims from FlatBuffer to the persistent storage arena. -// The old dims data is copied to the new storage area. -// The tensor and eval_tensor must be the same tensor. -// Only use during Prepare phase. -TfLiteStatus CreateWritableTensorDimsWithCopy(TfLiteContext* context, - TfLiteTensor* tensor, - TfLiteEvalTensor* eval_tensor) { - TF_LITE_ENSURE(context, tensor != nullptr); - TF_LITE_ENSURE(context, eval_tensor != nullptr); - TF_LITE_ENSURE(context, context->AllocatePersistentBuffer != nullptr); - int ranks = tensor->dims->size; - size_t alloc_size = TfLiteIntArrayGetSizeInBytes(ranks); - TfLiteIntArray* new_dims = static_cast( - context->AllocatePersistentBuffer(context, alloc_size)); - TfLiteIntArray* old_dims = tensor->dims; - new_dims->size = ranks; - tensor->dims = new_dims; - eval_tensor->dims = new_dims; - for (int i = 0; i < ranks; i++) { - new_dims->data[i] = old_dims->data[i]; - } - - return kTfLiteOk; -} - -// Verify that both tensors have the same type and size, then return the size -// of both tensors in bytes if they are the same, or -1 if they are different. -size_t ValidateAndGetTensorSizes(const TfLiteEvalTensor* tensor1, - const TfLiteEvalTensor* tensor2) { - TFLITE_DCHECK(tensor1->type == tensor2->type); - size_t tensor1_size = 0; - size_t tensor2_size = 0; - TfLiteEvalTensorByteLength(tensor1, &tensor1_size); - TfLiteEvalTensorByteLength(tensor2, &tensor2_size); - return (tensor1_size == tensor2_size) ? tensor1_size : -1; -} - -TfLiteStatus CopyOpInputsToOpOutputs(TfLiteContext* context, TfLiteNode* node) { - TF_LITE_ENSURE(context, node->inputs->size == node->outputs->size); - for (int i = 0; i < node->inputs->size; i++) { - const TfLiteEvalTensor* input = - tflite::micro::GetEvalInput(context, node, i); - TfLiteEvalTensor* output = tflite::micro::GetEvalOutput(context, node, i); - int bytes = ValidateAndGetTensorSizes(input, output); - TF_LITE_ENSURE(context, bytes >= 0); - memcpy(output->data.raw, input->data.raw, bytes); - } - return kTfLiteOk; -} - -// Args: -// 1. int8_t tensor_data - int8_t buffer of unknown size who's data you'd -// like -// to print -// 2. int n_btyes - a small int representing number of bytes you want to -// print -// to debug output. It should always be <= tensor_data's size. -// 3. prefix - optional message you'd like to print before printing bytes -// -// Purpose: -// Function takes in paramaters above and prints n_bytes bytes from the -// tensor_data buffer. This can be use to debug the output of a model and it's -// op. - -void PrintNBytes(const int8_t* tensor_data, int n_bytes, const char* prefix) { - if (prefix != nullptr) { - MicroPrintf("%s", prefix); - } - - for (int i = 0; i < n_bytes; ++i) { - MicroPrintf(" %x", tensor_data[i]); - } - MicroPrintf("\n"); -} - -// same as the PrintNBytes above but the buffer needs to be extracted out of the -// TfLiteEvalTensor* -void PrintNBytes(const TfLiteEvalTensor* tensor, int n_bytes, - const char* prefix) { - const int8_t* tensor_data = tflite::micro::GetTensorData(tensor); - PrintNBytes(tensor_data, n_bytes, prefix); -} - -// same as the PrintNBytes above but the buffer needs to be extracted out of the -// TfLiteEvalTensor* -void PrintNBytes(const TfLiteTensor* tensor, int n_bytes, const char* prefix) { - const int8_t* tensor_data = tflite::GetTensorData(tensor); - PrintNBytes(tensor_data, n_bytes, prefix); -} - -TfLiteStatus CopyOpInputsToSubgraphInputs(TfLiteContext* context, - TfLiteNode* node, - MicroGraph* graph_info, - int subgraph_idx, - int first_tensor_idx) { - TF_LITE_ENSURE(context, - static_cast(node->inputs->size - first_tensor_idx) == - graph_info->NumSubgraphInputs(subgraph_idx)); - for (int i = 0; i < node->inputs->size - first_tensor_idx; i++) { - const TfLiteEvalTensor* input = - tflite::micro::GetEvalInput(context, node, i + first_tensor_idx); - TfLiteEvalTensor* subgraph_input = - graph_info->GetSubgraphInput(subgraph_idx, i); - int bytes = ValidateAndGetTensorSizes(input, subgraph_input); - TF_LITE_ENSURE(context, bytes >= 0); - memcpy(subgraph_input->data.raw, input->data.raw, bytes); - } - return kTfLiteOk; -} - -TfLiteStatus CopyOpOutputsToSubgraphInputs(TfLiteContext* context, - TfLiteNode* node, - MicroGraph* graph_info, - int subgraph_idx) { - TF_LITE_ENSURE(context, static_cast(node->outputs->size) == - graph_info->NumSubgraphInputs(subgraph_idx)); - for (int i = 0; i < node->outputs->size; i++) { - TfLiteEvalTensor* output = tflite::micro::GetEvalOutput(context, node, i); - TfLiteEvalTensor* subgraph_input = - graph_info->GetSubgraphInput(subgraph_idx, i); - int bytes = ValidateAndGetTensorSizes(output, subgraph_input); - TF_LITE_ENSURE(context, bytes >= 0); - memcpy(subgraph_input->data.raw, output->data.raw, bytes); - } - return kTfLiteOk; -} - -TfLiteStatus CopySubgraphOutputsToOpOutputs(TfLiteContext* context, - TfLiteNode* node, - MicroGraph* graph_info, - int subgraph_idx) { - TF_LITE_ENSURE(context, static_cast(node->outputs->size) == - graph_info->NumSubgraphOutputs(subgraph_idx)); - for (int i = 0; i < node->outputs->size; i++) { - TfLiteEvalTensor* output = tflite::micro::GetEvalOutput(context, node, i); - TfLiteEvalTensor* subgraph_output = - graph_info->GetSubgraphOutput(subgraph_idx, i); - int bytes = ValidateAndGetTensorSizes(output, subgraph_output); - TF_LITE_ENSURE(context, bytes >= 0); - memcpy(output->data.raw, subgraph_output->data.raw, bytes); - } - return kTfLiteOk; -} - -} // namespace micro -} // namespace tflite diff --git a/code/components/tflite-lib/tensorflow/lite/micro/kernels/kernel_util.h b/code/components/tflite-lib/tensorflow/lite/micro/kernels/kernel_util.h deleted file mode 100644 index 6ac1cb36..00000000 --- a/code/components/tflite-lib/tensorflow/lite/micro/kernels/kernel_util.h +++ /dev/null @@ -1,138 +0,0 @@ -/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#ifndef TENSORFLOW_LITE_MICRO_KERNELS_KERNEL_UTIL_H_ -#define TENSORFLOW_LITE_MICRO_KERNELS_KERNEL_UTIL_H_ - -#include - -#include "tensorflow/lite/c/builtin_op_data.h" -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/compatibility.h" -#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "tensorflow/lite/kernels/internal/types.h" -#include "tensorflow/lite/micro/micro_context.h" -#include "tensorflow/lite/micro/micro_error_reporter.h" - -namespace tflite { -namespace micro { - -TfLiteRegistration RegisterOp( - void* (*init)(TfLiteContext* context, const char* buffer, size_t length), - TfLiteStatus (*prepare)(TfLiteContext* context, TfLiteNode* node), - TfLiteStatus (*invoke)(TfLiteContext* context, TfLiteNode* node), - void (*free)(TfLiteContext* context, void* buffer) = nullptr); - -// Prints out n bytes in a int8_t buffer as hex -void PrintNBytes(const int8_t* tensor_data, int n_bytes, - const char* prefix = nullptr); - -// Prints out the the n bytes in a TfLiteEvalTensor as hex -void PrintNBytes(const TfLiteEvalTensor* tensor, int n_bytes, - const char* prefix = nullptr); - -// Prints out the the n bytes in a TfLiteTensor as hex -void PrintNBytes(const TfLiteTensor* tensor, int n_bytes, - const char* prefix = nullptr); - -// Returns a mutable tensor for a given input index. is_variable must be checked -// during prepare when the full TfLiteTensor is available. -TfLiteEvalTensor* GetMutableEvalInput(const TfLiteContext* context, - const TfLiteNode* node, int index); - -// Returns the TfLiteEvalTensor struct for a given input index in a node. -const TfLiteEvalTensor* GetEvalInput(const TfLiteContext* context, - const TfLiteNode* node, int index); - -// Returns the TfLiteEvalTensor struct for a given output index in a node. -TfLiteEvalTensor* GetEvalOutput(const TfLiteContext* context, - const TfLiteNode* node, int index); - -// Returns data for a TfLiteEvalTensor struct that are expected to exist. -template -T* GetTensorData(TfLiteEvalTensor* tensor) { - TFLITE_DCHECK(tensor != nullptr); - return reinterpret_cast(tensor->data.raw); -} - -// Returns const data for a TfLiteEvalTensor struct that are expected to exist. -template -const T* GetTensorData(const TfLiteEvalTensor* tensor) { - TFLITE_DCHECK(tensor != nullptr); - return reinterpret_cast(tensor->data.raw); -} - -// Returns data for a TfLiteEvalTensor struct that could be null. -template -T* GetOptionalTensorData(TfLiteEvalTensor* tensor) { - return tensor == nullptr ? nullptr : reinterpret_cast(tensor->data.raw); -} - -// Returns const data for a TfLiteEvalTensor struct that could be null. -template -const T* GetOptionalTensorData(const TfLiteEvalTensor* tensor) { - return tensor == nullptr ? nullptr - : reinterpret_cast(tensor->data.raw); -} - -// Returns the shape of a TfLiteEvalTensor struct. -const RuntimeShape GetTensorShape(const TfLiteEvalTensor* tensor); - -// Return true if the given tensors have the same shape. -bool HaveSameShapes(const TfLiteEvalTensor* input1, - const TfLiteEvalTensor* input2); - -PaddingType RuntimePaddingType(TfLitePadding padding); - -// Relocate tensor dims from FlatBuffer to the persistent storage arena. -// The old dims data is copied to the new storage area. -// The tensor and eval_tensor must be the same tensor. -// Only use during Prepare phase. -TfLiteStatus CreateWritableTensorDimsWithCopy(TfLiteContext* context, - TfLiteTensor* tensor, - TfLiteEvalTensor* eval_tensor); - -// Copy all op input tensors to op output tensors. Requires all op input tensor -// shapes and types to be identical to op output tensor shapes and types. -TfLiteStatus CopyOpInputsToOpOutputs(TfLiteContext* context, TfLiteNode* node); - -// Copy all op input tensors to subgraph input tensors. Requires all op input -// tensor shapes and types to be identical to subgraph input tensor shapes and -// types. -TfLiteStatus CopyOpInputsToSubgraphInputs(TfLiteContext* context, - TfLiteNode* node, - MicroGraph* graph_info, - int subgraph_idx, - int first_tensor_idx); - -// Copy all op output tensors to subgraph input tensors. Requires all op output -// tensor shapes and types to be identical to subgraph input tensor shapes and -// types. -TfLiteStatus CopyOpOutputsToSubgraphInputs(TfLiteContext* context, - TfLiteNode* node, - MicroGraph* graph_info, - int subgraph_idx); - -// Copy all subgraph output tensors to op outputs. Requires all subgraph output -// tensor shapes and types to be identical to op output tensor shapes and types. -TfLiteStatus CopySubgraphOutputsToOpOutputs(TfLiteContext* context, - TfLiteNode* node, - MicroGraph* graph_info, - int subgraph_idx); - -} // namespace micro -} // namespace tflite - -#endif // TENSORFLOW_LITE_MICRO_KERNELS_KERNEL_UTIL_H_ diff --git a/code/components/tflite-lib/tensorflow/lite/micro/kernels/l2_pool_2d.cc b/code/components/tflite-lib/tensorflow/lite/micro/kernels/l2_pool_2d.cc deleted file mode 100644 index fbba4e0b..00000000 --- a/code/components/tflite-lib/tensorflow/lite/micro/kernels/l2_pool_2d.cc +++ /dev/null @@ -1,141 +0,0 @@ -/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#include -#include - -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/reference/pooling.h" -#include "tensorflow/lite/kernels/internal/types.h" -#include "tensorflow/lite/kernels/kernel_util.h" -#include "tensorflow/lite/kernels/padding.h" -#include "tensorflow/lite/micro/kernels/kernel_util.h" - -namespace tflite { -namespace { - -// Input/output tensor index. -constexpr int kInputTensor = 0; -constexpr int kOutputTensor = 0; - -// required rank for input/output tensor shape -constexpr int kTensorShapeRank = 4; - -// input/output tensor shape rank associations -enum { kBatchRank = 0, kHeightRank, kWidthRank, kChannelRank }; - -TfLiteStatus L2Prepare(TfLiteContext* context, TfLiteNode* node) { - MicroContext* micro_context = GetMicroContext(context); - - auto* params = static_cast(node->builtin_data); - - TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); - TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - TfLiteTensor* output = - micro_context->AllocateTempOutputTensor(node, kOutputTensor); - TF_LITE_ENSURE(context, output != nullptr); - TfLiteTensor* input = - micro_context->AllocateTempInputTensor(node, kInputTensor); - TF_LITE_ENSURE(context, input != nullptr); - TF_LITE_ENSURE_EQ(context, NumDimensions(input), kTensorShapeRank); - TF_LITE_ENSURE_EQ(context, NumDimensions(output), kTensorShapeRank); - TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); - - int batches = SizeOfDimension(input, kBatchRank); - int height = SizeOfDimension(input, kHeightRank); - int width = SizeOfDimension(input, kWidthRank); - int channels_out = SizeOfDimension(input, kChannelRank); - - // Matching GetWindowedOutputSize in TensorFlow. - auto padding = params->padding; - int out_width, out_height; - - params->computed.padding = ComputePaddingHeightWidth( - params->stride_height, params->stride_width, 1, 1, height, width, - params->filter_height, params->filter_width, padding, &out_height, - &out_width); - - // We currently don't have a quantized implementation of L2Pool - TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteFloat32); - - // We must update the output tensor dimensions. - // The dims storage is expected to be the same area in memory - // for both TfLiteTensor and TfLiteEvalTensor. This is important - // because TfLiteTensor in the MicroInterpreter is a temporary - // allocation. For the KernelRunner interpreter, TfLiteEvalTensor - // is a temporary allocation. We must therefore relocate the dims - // from the FlatBuffer to the persistant storage arena. - TfLiteEvalTensor* output_eval = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - TF_LITE_ENSURE_OK(context, tflite::micro::CreateWritableTensorDimsWithCopy( - context, output, output_eval)); - output->dims->data[kBatchRank] = batches; - output->dims->data[kHeightRank] = out_height; - output->dims->data[kWidthRank] = out_width; - output->dims->data[kChannelRank] = channels_out; - - micro_context->DeallocateTempTfLiteTensor(output); - micro_context->DeallocateTempTfLiteTensor(input); - - return kTfLiteOk; -} - -void L2EvalFloat(const TfLitePoolParams& params, const TfLiteEvalTensor& input, - tflite::PoolParams* op_params, TfLiteEvalTensor* output) { - float activation_min, activation_max; - CalculateActivationRange(params.activation, &activation_min, &activation_max); - - op_params->float_activation_min = activation_min; - op_params->float_activation_max = activation_max; - reference_ops::L2Pool(*op_params, tflite::micro::GetTensorShape(&input), - tflite::micro::GetTensorData(&input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); -} - -TfLiteStatus L2Eval(TfLiteContext* context, TfLiteNode* node) { - auto* params = static_cast(node->builtin_data); - - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - const TfLiteEvalTensor* input = - tflite::micro::GetEvalInput(context, node, kInputTensor); - - tflite::PoolParams op_params; - op_params.stride_height = params->stride_height; - op_params.stride_width = params->stride_width; - op_params.filter_height = params->filter_height; - op_params.filter_width = params->filter_width; - op_params.padding_values.height = params->computed.padding.height; - op_params.padding_values.width = params->computed.padding.width; - - switch (input->type) { // Already know in/out types are same. - case kTfLiteFloat32: - L2EvalFloat(*params, *input, &op_params, output); - break; - default: - MicroPrintf("L2_POOL_2D only supports float32 currently, got %s.", - TfLiteTypeGetName(input->type)); - return kTfLiteError; - } - return kTfLiteOk; -} - -} // namespace - -TfLiteRegistration Register_L2_POOL_2D() { - return tflite::micro::RegisterOp(nullptr, L2Prepare, L2Eval); -} - -} // namespace tflite diff --git a/code/components/tflite-lib/tensorflow/lite/micro/kernels/l2norm.cc b/code/components/tflite-lib/tensorflow/lite/micro/kernels/l2norm.cc deleted file mode 100644 index e8ce4ec0..00000000 --- a/code/components/tflite-lib/tensorflow/lite/micro/kernels/l2norm.cc +++ /dev/null @@ -1,147 +0,0 @@ -/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/portable_tensor.h" -#include "tensorflow/lite/kernels/internal/reference/integer_ops/l2normalization.h" -#include "tensorflow/lite/kernels/internal/reference/l2normalization.h" -#include "tensorflow/lite/kernels/kernel_util.h" -#include "tensorflow/lite/micro/kernels/kernel_util.h" - -namespace tflite { -namespace ops { -namespace micro { -namespace l2norm { - -namespace { - -// This file has two implementation of L2Norm. -enum KernelType { - kReference, - kGenericOptimized, -}; - -constexpr int kInputTensor = 0; -constexpr int kOutputTensor = 0; - -} // namespace - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->user_data != nullptr); - TFLITE_DCHECK(node->builtin_data != nullptr); - - auto* params = reinterpret_cast(node->builtin_data); - L2NormalizationParams* data = - static_cast(node->user_data); - - TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); - TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - - MicroContext* micro_context = GetMicroContext(context); - - TfLiteTensor* input = - micro_context->AllocateTempInputTensor(node, kInputTensor); - TF_LITE_ENSURE(context, input != nullptr); - TfLiteTensor* output = - micro_context->AllocateTempOutputTensor(node, kOutputTensor); - TF_LITE_ENSURE(context, output != nullptr); - TF_LITE_ENSURE(context, NumDimensions(input) <= 4); - - TF_LITE_ENSURE(context, - output->type == kTfLiteFloat32 || output->type == kTfLiteInt8); - TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); - - if (output->type == kTfLiteInt8) { - data->input_zero_point = input->params.zero_point; - } else if (output->type == kTfLiteFloat32) { - data->input_zero_point = 0; - } - - // Our implementations don't currently support activations. - TF_LITE_ENSURE_EQ(context, params->activation, kTfLiteActNone); - - micro_context->DeallocateTempTfLiteTensor(input); - micro_context->DeallocateTempTfLiteTensor(output); - return kTfLiteOk; -} - -void* Init(TfLiteContext* context, const char* buffer, size_t length) { - TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); - return context->AllocatePersistentBuffer(context, - sizeof(L2NormalizationParams)); -} - -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->user_data != nullptr); - const L2NormalizationParams& data = - *(static_cast(node->user_data)); - - const TfLiteEvalTensor* input = - tflite::micro::GetEvalInput(context, node, kInputTensor); - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - - // TODO(b/143912164): instead of hardcode the epsilon here, we should read it - // from tensorflow, i.e., adding a params. - // We don't compute epsilon for quantized kernel: - // - // epsilon_float = (epsilon_quant - zp) * scale - // so - // espsilon_quant = epsilon_float / scale + zp - // We know epsilon_float is just a very small number to avoid division by - // zero error, and scale is > 1, so the integer value of epsilon for quant - // is just dominated by the zero point. - // Also, GetInvSqrtQuantizedMultiplierExp handles the scenario where the sum - // of input value squared is zero case well. - // So we don't even need to do handle the epsilon for quantized kernel case. - const float epsilon = 1e-6f; - if (output->type == kTfLiteFloat32) { - reference_ops::L2Normalization(data, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output), - epsilon); - } else if (output->type == kTfLiteInt8) { - const auto input_shape = tflite::micro::GetTensorShape(input); - const auto output_shape = tflite::micro::GetTensorShape(output); - const int trailing_dim = input_shape.DimensionsCount() - 1; - const int depth = - MatchingDim(input_shape, trailing_dim, output_shape, trailing_dim); - const int outer_size = - MatchingFlatSizeSkipDim(input_shape, trailing_dim, output_shape); - reference_integer_ops::L2Normalization( - data.input_zero_point, outer_size, depth, - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorData(output)); - } else { - MicroPrintf("Output type is %s, requires float.", - TfLiteTypeGetName(output->type)); - return kTfLiteError; - } - - return kTfLiteOk; -} - -} // namespace l2norm - -TfLiteRegistration Register_L2NORM_REF() { - return tflite::micro::RegisterOp(l2norm::Init, l2norm::Prepare, l2norm::Eval); -} - -TfLiteRegistration Register_L2_NORMALIZATION() { return Register_L2NORM_REF(); } - -} // namespace micro -} // namespace ops -} // namespace tflite diff --git a/code/components/tflite-lib/tensorflow/lite/micro/kernels/log_softmax.cc b/code/components/tflite-lib/tensorflow/lite/micro/kernels/log_softmax.cc deleted file mode 100644 index 5958319a..00000000 --- a/code/components/tflite-lib/tensorflow/lite/micro/kernels/log_softmax.cc +++ /dev/null @@ -1,147 +0,0 @@ -/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#include "tensorflow/lite/kernels/internal/reference/log_softmax.h" - -#include -#include - -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/quantization_util.h" -#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "tensorflow/lite/kernels/internal/types.h" -#include "tensorflow/lite/kernels/kernel_util.h" -#include "tensorflow/lite/micro/kernels/kernel_util.h" - -namespace tflite { -namespace { - -// used only with quantized data -struct LogSoftmaxOpData { - int32_t input_multiplier; - int32_t input_left_shift; - int32_t reverse_scaling_divisor; - int32_t reverse_scaling_right_shift; - int diff_min; - size_t outer_size; // number of tensor elements skipping computation axis - size_t depth; // number of tensor elements on computation axis -}; - -// input/output tensor index -constexpr int kInputTensor = 0; -constexpr int kOutputTensor = 0; - -TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteNode* node) { - MicroContext* micro_context = GetMicroContext(context); - - TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); - TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - TfLiteTensor* input = - micro_context->AllocateTempInputTensor(node, kInputTensor); - TF_LITE_ENSURE(context, input != nullptr); - TfLiteTensor* output = - micro_context->AllocateTempOutputTensor(node, kOutputTensor); - TF_LITE_ENSURE(context, output != nullptr); - TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); - - TF_LITE_ENSURE(context, HaveSameShapes(input, output)); - - if (input->type == kTfLiteInt8) { - node->user_data = - context->AllocatePersistentBuffer(context, sizeof(LogSoftmaxOpData)); - auto data = static_cast(node->user_data); - - // quantization datum - constexpr int32_t kOutputZeroPoint = 127; - constexpr float kOutputScale = 16.0 / 256; - constexpr double kBeta = 1.0; - constexpr int kScaledDiffIntegerBits = 5; - - TF_LITE_ENSURE(context, output->params.scale == kOutputScale); - TF_LITE_ENSURE(context, output->params.zero_point == kOutputZeroPoint); - - int input_left_shift; - int reverse_scaling_right_shift; - tflite::PreprocessLogSoftmaxScalingExp( - kBeta, static_cast(input->params.scale), kScaledDiffIntegerBits, - &data->input_multiplier, &input_left_shift, - &data->reverse_scaling_divisor, &reverse_scaling_right_shift); - data->input_left_shift = static_cast(input_left_shift); - data->reverse_scaling_right_shift = - static_cast(-reverse_scaling_right_shift); - // diff_min has a negative value, and is used to limit the maximum magnitude - // of the diffs, which are <= 0. - data->diff_min = - -tflite::CalculateInputRadius(kScaledDiffIntegerBits, input_left_shift); - - RuntimeShape input_shape = GetTensorShape(input); - const int trailing_dim = input_shape.DimensionsCount() - 1; - data->outer_size = - static_cast(FlatSizeSkipDim(input_shape, trailing_dim)); - data->depth = static_cast(input_shape.Dims(trailing_dim)); - } - - micro_context->DeallocateTempTfLiteTensor(input); - micro_context->DeallocateTempTfLiteTensor(output); - return kTfLiteOk; -} - -TfLiteStatus LogSoftmaxPrepare(TfLiteContext* context, TfLiteNode* node) { - return CalculateOpData(context, node); -} - -TfLiteStatus LogSoftmaxEval(TfLiteContext* context, TfLiteNode* node) { - const LogSoftmaxOpData* data = - static_cast(node->user_data); - const TfLiteEvalTensor* input = - tflite::micro::GetEvalInput(context, node, kInputTensor); - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - switch (input->type) { - case kTfLiteFloat32: { - SoftmaxParams op_params = {}; - reference_ops::LogSoftmax(op_params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - return kTfLiteOk; - } - case kTfLiteInt8: { - SoftmaxParams op_params = {}; - op_params.input_multiplier = data->input_multiplier; - op_params.input_left_shift = data->input_left_shift; - op_params.reverse_scaling_divisor = data->reverse_scaling_divisor; - op_params.reverse_scaling_right_shift = data->reverse_scaling_right_shift; - op_params.diff_min = data->diff_min; - reference_ops::LogSoftmax(op_params, data->outer_size, data->depth, - tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - return kTfLiteOk; - } - default: - MicroPrintf("LOG_SOFTMAX only supports float32, int8, got %s.", - TfLiteTypeGetName(input->type)); - return kTfLiteError; - } -} - -} // namespace - -TfLiteRegistration Register_LOG_SOFTMAX() { - return tflite::micro::RegisterOp(nullptr, LogSoftmaxPrepare, LogSoftmaxEval); -} - -} // namespace tflite diff --git a/code/components/tflite-lib/tensorflow/lite/micro/kernels/lstm_eval.cc b/code/components/tflite-lib/tensorflow/lite/micro/kernels/lstm_eval.cc deleted file mode 100644 index 58484638..00000000 --- a/code/components/tflite-lib/tensorflow/lite/micro/kernels/lstm_eval.cc +++ /dev/null @@ -1,2981 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#include "tensorflow/lite/micro/kernels/lstm_eval.h" - -#include -#include -#include -#include - -#include "tensorflow/lite/c/builtin_op_data.h" -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/compatibility.h" -#include "tensorflow/lite/kernels/internal/reference/integer_ops/logistic.h" -#include "tensorflow/lite/kernels/internal/reference/integer_ops/tanh.h" -#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "tensorflow/lite/kernels/op_macros.h" -#include "tensorflow/lite/micro/kernels/kernel_util.h" -#include "tensorflow/lite/micro/kernels/micro_tensor_utils.h" -namespace tflite { -namespace { - -void ComputeRowSums( - int32_t* input_to_input_row_sums, int32_t* input_to_forget_row_sums, - int32_t* input_to_cell_row_sums, int32_t* input_to_output_row_sums, - int32_t* aux_input_to_input_row_sums, int32_t* aux_input_to_forget_row_sums, - int32_t* aux_input_to_cell_row_sums, int32_t* aux_input_to_output_row_sums, - int32_t* recurrent_to_input_row_sums, int32_t* recurrent_to_forget_row_sums, - int32_t* recurrent_to_cell_row_sums, int32_t* recurrent_to_output_row_sums, - int32_t* projection_weights_row_sums, int32_t* row_sums, int n_cell, - int n_input, int n_aux_input, int n_output, - const int8_t* input_to_input_weights_ptr, - const int8_t* input_to_forget_weights_ptr, - const int8_t* input_to_cell_weights_ptr, - const int8_t* input_to_output_weights_ptr, - const int8_t* aux_input_to_input_weights_ptr, - const int8_t* aux_input_to_forget_weights_ptr, - const int8_t* aux_input_to_cell_weights_ptr, - const int8_t* aux_input_to_output_weights_ptr, - const int8_t* recurrent_to_input_weights_ptr, - const int8_t* recurrent_to_forget_weights_ptr, - const int8_t* recurrent_to_cell_weights_ptr, - const int8_t* recurrent_to_output_weights_ptr, - const int8_t* projection_weights_ptr, bool use_cifg, - const float* aux_input_ptr) { - // Compute the row sums for dequantization - if (!use_cifg) { - micro_tensor_utils::ReductionSumVector( - input_to_input_weights_ptr, input_to_input_row_sums, n_cell, n_input); - } - micro_tensor_utils::ReductionSumVector( - input_to_forget_weights_ptr, input_to_forget_row_sums, n_cell, n_input); - micro_tensor_utils::ReductionSumVector( - input_to_cell_weights_ptr, input_to_cell_row_sums, n_cell, n_input); - micro_tensor_utils::ReductionSumVector( - input_to_output_weights_ptr, input_to_output_row_sums, n_cell, n_input); - - if (aux_input_ptr) { - if (!use_cifg) { - micro_tensor_utils::ReductionSumVector(aux_input_to_input_weights_ptr, - aux_input_to_input_row_sums, - n_cell, n_aux_input); - } - micro_tensor_utils::ReductionSumVector(aux_input_to_forget_weights_ptr, - aux_input_to_forget_row_sums, n_cell, - n_aux_input); - micro_tensor_utils::ReductionSumVector(aux_input_to_cell_weights_ptr, - aux_input_to_cell_row_sums, n_cell, - n_aux_input); - micro_tensor_utils::ReductionSumVector(aux_input_to_output_weights_ptr, - aux_input_to_output_row_sums, n_cell, - n_aux_input); - } - if (!use_cifg) { - micro_tensor_utils::ReductionSumVector(recurrent_to_input_weights_ptr, - recurrent_to_input_row_sums, n_cell, - n_output); - } - micro_tensor_utils::ReductionSumVector(recurrent_to_forget_weights_ptr, - recurrent_to_forget_row_sums, n_cell, - n_output); - micro_tensor_utils::ReductionSumVector(recurrent_to_cell_weights_ptr, - recurrent_to_cell_row_sums, n_cell, - n_output); - micro_tensor_utils::ReductionSumVector(recurrent_to_output_weights_ptr, - recurrent_to_output_row_sums, n_cell, - n_output); - - if (projection_weights_ptr != nullptr) { - micro_tensor_utils::ReductionSumVector( - projection_weights_ptr, projection_weights_row_sums, n_output, n_cell); - } -} - -// Calculates a single LSTM gate. -// -// Implements the following formula: (* is matrix multiply) -// gate = activate(W_input * input + W_aux * aux_input + -// W_peephole * cell + W_recurrent * prev_output + bias) -// with layer norm: -// gate = activate(W_norm * normalize(...) + bias) // not adding bias inside -// -// Activation is sigmoid except for the "cell" gate (configurable, usually tanh) -// -// Parameters: -// Input vectors (to LSTM): | Size: | Optional? -// input | n_input | -// aux_input | n_aux_input | y (bidir LSTM) -// Input vectors (persistent states): -// output_state | n_output | -// cell_state | n_cell | -// 'Constant' inputs: -// input_to_gate_weights | n_cell * n_input | -// aux_input_to_gate_weights | n_cell * n_aux_input | y (bidir LSTM) -// recurrent_to_gate_weights | n_cell * n_output | -// cell_to_gate_weights | n_cell | y (peephole) -// gate_bias | n_cell | -// layer_norm_coefficients | n_cell | y (layer norm) -// Output vector: -// gate | n_cell | -// Scalar parameters: -// n_batch - batch size / number of vectors -// n_input, n_aux_input, n_output, n_cell - size of vectors. -// activation - activation to use. -// is_input_all_zeros, is_aux_input_all_zeros - if input vectors are all zero. -// use_layer_norm - if doing layer norm LSTM. -inline void CalculateLstmGateFloat( - const float* input, const float* input_to_gate_weights, - const float* aux_input, const float* aux_input_to_gate_weights, - const float* output_state, const float* recurrent_to_gate_weights, - const float* cell_state, const float* cell_to_gate_weights, - const float* layer_norm_coefficients, const float* gate_bias, - const int n_batch, const int n_input, const int n_aux_input, - const int n_output, const int n_cell, - const TfLiteFusedActivation activation, float* gate, - const bool is_input_all_zeros, const bool is_aux_input_all_zeros) { - const bool use_peephole = (cell_to_gate_weights != nullptr); - const bool use_layer_norm = (layer_norm_coefficients != nullptr); - - // Initialize scratch buffers with bias for regular lstm or initialize with - // zero for layer norm lstm. - if (use_layer_norm) { - memset(gate, 0, n_cell * n_batch * sizeof(float)); - } else { - micro_tensor_utils::VectorBatchVectorAssign(gate_bias, n_cell, n_batch, - gate); - } - // For each batch and cell: compute input_weight * input. - // Skip if input is all zeros. - if (!is_input_all_zeros) { - micro_tensor_utils::MatrixBatchVectorMultiplyAccumulate( - input_to_gate_weights, n_cell, n_input, input, n_batch, gate); - } - // For each batch and cell: compute aux_input_weight * aux_input. - // Skip if auxiliary input is not available or all zeros. - if (!is_aux_input_all_zeros) { - micro_tensor_utils::MatrixBatchVectorMultiplyAccumulate( - aux_input_to_gate_weights, n_cell, n_aux_input, aux_input, n_batch, - gate); - } - // For each batch and cell: compute recurrent_weight * output_state. - micro_tensor_utils::MatrixBatchVectorMultiplyAccumulate( - recurrent_to_gate_weights, n_cell, n_output, output_state, n_batch, gate); - // For each batch and cell: compute cell_weight .* cell_state (peephole LSTM) - if (use_peephole) { - micro_tensor_utils::VectorBatchVectorCwiseProductAccumulate( - cell_to_gate_weights, n_cell, cell_state, n_batch, gate); - } - // Do layer normalization (if layer norm LSTM) - if (use_layer_norm) { - micro_tensor_utils::MeanStddevNormalization(gate, gate, n_cell, n_batch); - micro_tensor_utils::VectorBatchVectorCwiseProduct( - layer_norm_coefficients, n_cell, gate, n_batch, gate); - micro_tensor_utils::VectorBatchVectorAdd(gate_bias, n_cell, n_batch, gate); - } - // Apply activation - micro_tensor_utils::ApplyActivationToVector(gate, n_batch * n_cell, - activation, gate); -} - -// Updates the LSTM cell state, used by both float and hybrid LSTM versions. -// -// Implements the following formula: -// cell_state_new = clip(forget_gate * cell_state + input_gate * cell_gate) -// -// With CIFG LSTM, input gate is replaced by (1-forget_gate). -// -// Parameters: -// - n_batch, n_cell: sizes of vectors -// - cell_state: input/output vector, size n_batch*n_cell -// - input_gate: input vector, size n_batch*n_cell. -// - forget_gate: input/scratch vector, size n_batch*n_cell, modified with CIFG -// - cell_gate: input vector, size n_batch*n_cell. -// - use_cifg: use 1-forget_gate instead of input_gate. -// - clip: if > 0, clip the resulting cell state to [-clip, +clip]. -void UpdateLstmCellFloat(int n_batch, int n_cell, float* cell_state, - const float* input_gate, float* forget_gate, - const float* cell_gate, bool use_cifg, float clip) { - micro_tensor_utils::VectorVectorCwiseProduct(forget_gate, cell_state, - n_batch * n_cell, cell_state); - - if (use_cifg) { - // With CIFG, input_gate = 1-forget_gate. Use the forget_gate array as - // scratch, as input_gate array is not allocated in this case. (Be careful - // not to write to the scratch before reading the forget gate data.) - float* scratch = forget_gate; - micro_tensor_utils::Sub1Vector(forget_gate, n_batch * n_cell, scratch); - micro_tensor_utils::VectorVectorCwiseProductAccumulate( - cell_gate, scratch, n_batch * n_cell, cell_state); - } else { - micro_tensor_utils::VectorVectorCwiseProductAccumulate( - cell_gate, input_gate, n_batch * n_cell, cell_state); - } - if (clip > 0.0f) { - micro_tensor_utils::CwiseClipping(cell_state, n_batch * n_cell, clip); - } -} - -// Calculates the output state tensor of an LSTM step. -// -// Implements the following formula: -// output_no_projection = output_gate .* activate(cell_state) -// (elementwise vector product) -// If no projection is used: -// output = output_state = output_no_projection -// With projection: -// output = output_state = clip(W*output_no_projection + bias) -// -// Output might not have a different 'stride' than n_batch, so we need to copy. -// -// Parameters: -// - n_batch: batches: the number of distinct vectors in each array. -// - n_cell, n_output: sizes of vectors. -// - cell_state, output_gate: input vectors, size n_batch*n_cell. -// - projection_weights, projection_weights_scale, projection_bias: -// constant inputs, describing projection matrix and bias. -// - proj_clip: if > 0, clip the output of the projection. -// - output_state: output vector, size n_batch*n_output. Must be contigous. -// - scratch: scratch area, size n_batch*n_cell. -void CalculateLstmOutputFloat(int n_batch, int n_cell, int n_output, - const float* cell_state, const float* output_gate, - TfLiteFusedActivation activation, - const float* projection_weights, - const float* projection_bias, - const float proj_clip, float* output_state, - float* scratch) { - micro_tensor_utils::ApplyActivationToVector(cell_state, n_batch * n_cell, - activation, scratch); - micro_tensor_utils::VectorVectorCwiseProduct(output_gate, scratch, - n_batch * n_cell, scratch); - - const bool use_projection = (projection_weights != nullptr); - const bool use_projection_bias = (projection_bias != nullptr); - - if (use_projection) { - if (use_projection_bias) { - micro_tensor_utils::VectorBatchVectorAssign(projection_bias, n_output, - n_batch, output_state); - } else { - memset(output_state, 0, n_batch * n_output * sizeof(float)); - } - micro_tensor_utils::MatrixBatchVectorMultiplyAccumulate( - projection_weights, n_output, n_cell, scratch, n_batch, output_state); - if (proj_clip > 0.0f) { - micro_tensor_utils::CwiseClipping(output_state, n_batch * n_output, - proj_clip); - } - } else { - std::memcpy(output_state, scratch, n_batch * n_output * sizeof(float)); - } -} - -// Calculates a single LSTM gate, hybrid version. -// Implements the same functionality as CalculateLstmGateFloat. -void CalculateLstmGateHybrid( - // Input and weights - const int8_t* input, const float* input_sf, const int32_t* input_zp, - const int8_t* input_to_gate_weights, - const uint8_t* input_to_gate_weights_ledger, - const float input_to_gate_weights_scale, int32_t* input_to_gate_row_sums, - // Aux input and weights - const int8_t* aux_input, const float* aux_input_sf, - const int32_t* aux_input_zp, const int8_t* aux_input_to_gate_weights, - const float aux_input_to_gate_weights_scale, - int32_t* aux_input_to_gate_row_sums, - // Output state and weights - const int8_t* output_state, const float* output_state_sf, - const int32_t* output_state_zp, const int8_t* recurrent_to_gate_weights, - const uint8_t* recurrent_to_gate_weights_ledger, - const float recurrent_to_gate_weights_scale, - int32_t* recurrent_to_gate_row_sums, - // Cell state and weights (peephole LSTM) - const float* cell_state, const int8_t* cell_to_gate_weights, - const float cell_to_gate_weights_scale, - // Layer normalization coefficients (layer norm LSTM) + gate bias - const float* layer_norm_coefficients, const float* gate_bias, - // Array sizes - const int n_batch, const int n_input, const int n_aux_input, - const int n_output, const int n_cell, - const TfLiteFusedActivation activation, - // Output - float* gate, - // Parameters for performance optimizations - const bool is_input_all_zeros, const bool is_aux_input_all_zeros, - const bool is_output_state_all_zeros, bool* compute_row_sums, - // Scratch arrays - float* scratch0, // size: n_batch - float* scratch1, // size: n_cell, only used if peephole LSTM - float* scales, // size: n_batch - int32_t* accum_scratch // For MatrixBatchVectorMultiplyAccumulate -) { - const bool use_peephole = (cell_to_gate_weights != nullptr); - const bool use_layer_norm = (layer_norm_coefficients != nullptr); - - // Initialize scratch buffers with bias for regular lstm or initialize with - // zero for layer norm lstm. - if (use_layer_norm) { - memset(gate, 0, n_cell * n_batch * sizeof(float)); - } else { - micro_tensor_utils::VectorBatchVectorAssign(gate_bias, n_cell, n_batch, - gate); - } - // For each batch and cell: compute input_weight * input. - // Skip if input is all zeros. - if (!is_input_all_zeros) { - if (input_to_gate_weights_ledger != nullptr) { - for (int i = 0; i < n_batch; i++) { - scales[i] = input_to_gate_weights_scale * input_sf[i]; - } - micro_tensor_utils::SparseMatrixBatchVectorMultiplyAccumulate( - input_to_gate_weights, input_to_gate_weights_ledger, n_cell, n_input, - input, scales, n_batch, gate); - - } else { - micro_tensor_utils::MatrixBatchVectorMultiplyAccumulate( - input_to_gate_weights, n_cell, n_input, input, - input_to_gate_weights_scale, input_sf, n_batch, gate, - /*per_channel_scale=*/nullptr, input_zp, accum_scratch, - input_to_gate_row_sums, compute_row_sums, scratch0, nullptr); - } - } - // For each batch and cell: compute aux_input_weight * aux_input. - // Skip if auxiliary input is not available or all zeros. - if (!is_aux_input_all_zeros) { - micro_tensor_utils::MatrixBatchVectorMultiplyAccumulate( - aux_input_to_gate_weights, n_cell, n_aux_input, aux_input, - aux_input_to_gate_weights_scale, aux_input_sf, n_batch, gate, - /*per_channel_scale=*/nullptr, aux_input_zp, accum_scratch, - aux_input_to_gate_row_sums, compute_row_sums, scratch0, nullptr); - } - // For each batch and cell: compute recurrent_weight * output_state. - // Skip if output state is all zeros. - if (!is_output_state_all_zeros) { - if (recurrent_to_gate_weights_ledger != nullptr) { - for (int i = 0; i < n_batch; i++) { - scales[i] = recurrent_to_gate_weights_scale * input_sf[i]; - } - micro_tensor_utils::SparseMatrixBatchVectorMultiplyAccumulate( - recurrent_to_gate_weights, recurrent_to_gate_weights_ledger, n_cell, - n_output, output_state, scales, n_batch, gate); - } else { - micro_tensor_utils::MatrixBatchVectorMultiplyAccumulate( - recurrent_to_gate_weights, n_cell, n_output, output_state, - recurrent_to_gate_weights_scale, output_state_sf, n_batch, gate, - /*per_channel_scale=*/nullptr, output_state_zp, accum_scratch, - recurrent_to_gate_row_sums, compute_row_sums, scratch0, nullptr); - } - } - // For each batch and cell: compute cell_weight .* cell_state (peephole LSTM) - if (use_peephole) { - float* recovered_cell_weights = scratch1; - micro_tensor_utils::VectorScalarMultiply(cell_to_gate_weights, n_cell, - cell_to_gate_weights_scale, - recovered_cell_weights); - micro_tensor_utils::VectorBatchVectorCwiseProductAccumulate( - recovered_cell_weights, n_cell, cell_state, n_batch, gate); - } - // Do layer normalization (if layer norm LSTM) - if (use_layer_norm) { - micro_tensor_utils::MeanStddevNormalization(gate, gate, n_cell, n_batch); - micro_tensor_utils::VectorBatchVectorCwiseProduct( - layer_norm_coefficients, n_cell, gate, n_batch, gate); - micro_tensor_utils::VectorBatchVectorAdd(gate_bias, n_cell, n_batch, gate); - } - // Apply activation - micro_tensor_utils::ApplyActivationToVector(gate, n_cell * n_batch, - activation, gate); -} - -// Calculates the output state tensor of an LSTM step. See Float version too. -// -// Parameters: -// - n_batch: batches: the number of distinct vectors in each array. -// - n_cell, n_output: sizes of vectors. -// - cell_state, output_gate: input vectors, size n_batch*n_cell. -// - projection_weights, projection_weights_scale, projection_bias: -// constant inputs, describing projection matrix and bias. -// - proj_clip: if > 0, clip the output of the projection. -// - output_state: output vector, size n_batch*n_output. Must be contigous. -// - asymmetric_quantize_inputs: parameter to control quantization. -// - projection_weights_row_sums, compute_row_sums: Data for optimized -// MatrixBatchVectorMultiplyAccumulate. -// - scratch0: scratch area of size n_batch*n_cell -// - scratch1: scratch area of size n_batch*n_cell -// - scratch2: scratch area of size n_batch -// - scratch3: scratch area of size n_batch -// - scratch4: scratch area used by MatrixBatchVectorMultiplyAccumulate -// - scales: scratch area of size n_batch -void CalculateLstmOutputHybrid( - int n_batch, int n_cell, int n_output, const float* cell_state, - const float* output_gate, TfLiteFusedActivation activation, - const int8_t* projection_weights, const uint8_t* projection_weights_ledger, - float projection_weights_scale, const float* projection_bias, - const float proj_clip, float* output_state, bool asymmetric_quantize_inputs, - int32_t* projection_weights_row_sums, bool* compute_row_sums, - float* scratch0, int8_t* scratch1, float* scratch2, int32_t* scratch3, - int32_t* scratch4, float* scales) { - micro_tensor_utils::ApplyActivationToVector(cell_state, n_batch * n_cell, - activation, scratch0); - micro_tensor_utils::VectorVectorCwiseProduct(output_gate, scratch0, - n_batch * n_cell, scratch0); - - const bool use_projection = (projection_weights != nullptr); - const bool use_projection_bias = (projection_bias != nullptr); - - if (use_projection) { - if (use_projection_bias) { - micro_tensor_utils::VectorBatchVectorAssign(projection_bias, n_output, - n_batch, output_state); - } else { - memset(output_state, 0, n_batch * n_output * sizeof(float)); - } - if (!micro_tensor_utils::IsZeroVector(scratch0, n_batch * n_cell)) { - // Save quantization and matmul computation for all zero output. - micro_tensor_utils::BatchQuantizeFloats(scratch0, n_batch, n_cell, - scratch1, scratch2, scratch3, - asymmetric_quantize_inputs); - if (projection_weights_ledger != nullptr) { - for (int i = 0; i < n_batch; i++) { - scales[i] = projection_weights_scale * scratch2[i]; - } - micro_tensor_utils::SparseMatrixBatchVectorMultiplyAccumulate( - projection_weights, projection_weights_ledger, n_output, n_cell, - scratch1, scales, n_batch, output_state); - } else { - micro_tensor_utils::MatrixBatchVectorMultiplyAccumulate( - projection_weights, n_output, n_cell, scratch1, - projection_weights_scale, scratch2, n_batch, output_state, - /*per_channel_scale=*/nullptr, scratch3, scratch4, - projection_weights_row_sums, compute_row_sums, scratch2, nullptr); - } - } - if (proj_clip > 0.0f) { - micro_tensor_utils::CwiseClipping(output_state, n_batch * n_output, - proj_clip); - } - } else { - std::memcpy(output_state, scratch0, n_batch * n_output * sizeof(float)); - } -} - -// Calculates a single LSTM gate, int8x8_16 version. -// Implements the same functionality as CalculateLstmGateFloat. -void CalculateLstmGateInteger8x8_16( - // Input and weights - const int8_t* input, const int8_t* input_to_gate_weights, - const int32_t* input_to_gate_bias, const int32_t input_to_gate_scale_a, - const int32_t input_to_gate_scale_b, - // Output state and weights - const int8_t* output_state, const int8_t* recurrent_to_gate_weights, - const int32_t* recurrent_to_gate_bias, - const int32_t recurrent_to_gate_scale_a, - const int32_t recurrent_to_gate_scale_b, - // Cell state and weights - const int16_t* cell_state, const int16_t* cell_to_gate_weights, - const int32_t cell_to_gate_scale_a, const int32_t cell_to_gate_scale_b, - // Layer normalization parameters (layer norm LSTM) - const int16_t* layer_norm_coefficients, const int32_t* layer_norm_bias, - const int32_t layer_norm_input_scale_a, - const int32_t layer_norm_input_scale_b, - const int32_t layer_norm_variance_guard, - // Array sizes - const int n_batch, const int n_input, const int n_output, const int n_cell, - const TfLiteFusedActivation activation, - // Output - int16_t* gate, - // Parameters for performance optimizations - // Scratch arrays - int32_t* scratch5) { - const bool use_peephole = (cell_to_gate_weights != nullptr); - const bool use_layer_norm = (layer_norm_coefficients != nullptr); - - // Initialize scratch buffers with zeros. Note that unlike float and hybrid - // versions, bias is only used in layer normalization. - memset(gate, 0, n_batch * n_cell * sizeof(int16_t)); - // For each batch and cell: compute input_weight * input. - micro_tensor_utils::MatrixBatchVectorMultiplyAccumulate( - input, input_to_gate_bias, input_to_gate_weights, input_to_gate_scale_a, - input_to_gate_scale_b, n_batch, n_input, n_cell, 0, scratch5, gate, - nullptr); - // Note: no aux_input. - - // For each batch and cell: compute recurrent_weight * output_state. - micro_tensor_utils::MatrixBatchVectorMultiplyAccumulate( - output_state, recurrent_to_gate_bias, recurrent_to_gate_weights, - recurrent_to_gate_scale_a, recurrent_to_gate_scale_b, n_batch, n_output, - n_cell, 0, scratch5, gate, nullptr); - // For each batch and cell: compute cell_weight * cell_state (peephole LSTM) - if (use_peephole) { - micro_tensor_utils::VectorBatchVectorCwiseProductAccumulate( - cell_to_gate_weights, n_output, cell_state, n_batch, - cell_to_gate_scale_a, cell_to_gate_scale_b, gate); - } - // Do layer normalization (if layer norm LSTM) - if (use_layer_norm) { - micro_tensor_utils::ApplyLayerNorm( - gate, layer_norm_coefficients, layer_norm_bias, - layer_norm_input_scale_a, layer_norm_input_scale_b, - layer_norm_variance_guard, n_batch, n_cell, gate); - } - // Apply activation - switch (activation) { - case kTfLiteActSigmoid: - - reference_integer_ops::Logistic( - 0 /*data->input_multiplier*/, 0 /*data->input_left_shift */, - n_batch * n_cell /*NumElements(input->dims)*/, - gate /* tflite::micro::GetTensorData(input) */, - gate /*tflite::micro::GetTensorData(output) */); - - break; - case kTfLiteActTanh: { - int32_t dims_data = n_batch * n_cell; - RuntimeShape tanh_inp_shape = RuntimeShape(1, &dims_data); - reference_integer_ops::Tanh(0, 0, tanh_inp_shape, gate, tanh_inp_shape, - gate); - } break; - default: - // Only Sigmoid or Tanh is used. - TFLITE_ASSERT_FALSE; - } -} - -// Updates the LSTM cell state, used by both integer LSTM versions. -// Also see UpdateLstmCellFloat. -// -// Parameters: -// - n_batch, n_cell: sizes of vectors -// - cell_state: input/output vector, size n_batch*n_cell -// - cell_state_scale: scaling factor of cell state. -// - input_gate: input vector, size n_batch*n_cell. -// - forget_gate: input/scratch vector, size n_batch*n_cell, always modified. -// - cell_gate: input vector, size n_batch*n_cell. -// - use_cifg: use 1-forget_gate instead of input_gate. -// - clip: if > 0, clip the resulting cell state to [-clip, +clip]. -void UpdateLstmCellInteger(int n_batch, int n_cell, int16_t* cell_state, - int32_t cell_state_scale, const int16_t* input_gate, - int16_t* forget_gate, const int16_t* cell_gate, - bool use_cifg, int16_t clip) { - // Use the forget_gate array as scratch, as input_gate array is not allocated - // in CIFG case. (Be careful not to write to the scratch before reading the - // forget gate data.) - int16_t* scratch = forget_gate; - - micro_tensor_utils::CwiseMul(forget_gate, cell_state, n_batch, n_cell, 15, - cell_state); - if (use_cifg) { - micro_tensor_utils::Sub1Vector(forget_gate, n_batch * n_cell, scratch); - micro_tensor_utils::CwiseMul(scratch, cell_gate, n_batch, n_cell, - 30 + cell_state_scale, scratch); - } else { - micro_tensor_utils::CwiseMul(input_gate, cell_gate, n_batch, n_cell, - 30 + cell_state_scale, scratch); - } - micro_tensor_utils::CwiseAdd(cell_state, scratch, n_batch, n_cell, - cell_state); - - if (clip > 0) { - micro_tensor_utils::CwiseClipping(cell_state, n_batch * n_cell, clip); - } -} - -// Calculates the output state tensor of an LSTM step. See Float and hybrid -// versions as well. -// -// Parameters: -// - n_batch: batches: the number of distinct vectors in each array. -// - n_cell, n_output: sizes of vectors. -// - cell_state, output_gate: input vectors, size n_batch*n_cell. -// - cell_state_scale: scaling of cell_state. -// - hidden_scale_[a|b]: effective scale of cell_state.*output_gate -// - hidden_zp: zero_point for cell_state.*output_gate -// - projection_weights, proj_scale_[a|b], projection_bias: -// constant inputs, describing projection matrix and bias. -// - output_state_zp: zero point of output_state. (Input, calibrated value.) -// - quantized_proj_clip: if > 0, clip the output of the projection. -// - output_state: output vector, size n_batch*n_output. Must be contigous. -// - scratch0: scratch area of size n_batch*n_cell -// - scratch1: scratch area of size n_batch*n_cell -// - scratch2: scratch area used by MatrixBatchVectorMultiplyAccumulate -void CalculateLstmOutputInteger8x8_16( - int n_batch, int n_cell, int n_output, int16_t* cell_state, - int32_t cell_state_scale, const int16_t* output_gate, - int32_t hidden_scale_a, int32_t hidden_scale_b, int32_t hidden_zp, - const int8_t* projection_weights, int32_t proj_scale_a, - int32_t proj_scale_b, const int32_t* projection_bias, - int32_t output_state_zp, int8_t quantized_proj_clip, int8_t* output_state, - int16_t* scratch0, int8_t* scratch1, int32_t* scratch2) { - // Note: unlike float/hybrid, the activation is always Tanh. - - { - int32_t tanh_input_left_shift = (15 + cell_state_scale) - 3; - int32_t dims_data = n_batch * n_cell; - if (tanh_input_left_shift < 0) /* handling negative shift value */ - { - int32_t i; - tanh_input_left_shift = -tanh_input_left_shift; - for (i = 0; i < dims_data; i++) { - cell_state[i] = cell_state[i] >> tanh_input_left_shift; - } - tanh_input_left_shift = 0; - } - RuntimeShape tanh_inp_shape = RuntimeShape(1, &dims_data); - reference_integer_ops::Tanh(0, tanh_input_left_shift, tanh_inp_shape, - cell_state, tanh_inp_shape, scratch0); - } - micro_tensor_utils::CwiseMul(output_gate, scratch0, hidden_scale_a, - hidden_scale_b, n_batch, n_cell, hidden_zp, - scratch1); - - const bool use_projection = (projection_weights != nullptr); - - if (use_projection) { - // Note: no bias like in float/hybrid - memset(output_state, 0, n_batch * n_output * sizeof(int8_t)); - micro_tensor_utils::MatrixBatchVectorMultiplyAccumulate( - scratch1, projection_bias, projection_weights, proj_scale_a, - proj_scale_b, n_batch, n_cell, n_output, output_state_zp, scratch2, - output_state, nullptr); - if (quantized_proj_clip > 0) { - micro_tensor_utils::CwiseClipping(output_state, n_batch * n_output, - quantized_proj_clip); - } - } else { - std::memcpy(output_state, scratch1, n_batch * n_output * sizeof(int8_t)); - } -} - -// Calculates a single LSTM gate, int8x8_8 version. -// Implements the same functionality as CalculateLstmGateFloat. -void CalculateLstmGateInteger8x8_8( - // Inputs and weights - const int8_t* input, int32_t input_zp, const int8_t* input_to_gate_weight, - const int32_t input_to_gate_scale_a, const int32_t input_to_gate_scale_b, - const int32_t input_times_weights_scale_a, - const int32_t input_times_weights_scale_b, - const int32_t input_times_weights_zp, - // Output state and weights - const int8_t* output_state, const int32_t output_state_zp, - const int8_t* recurrent_to_gate_weight, - const int32_t recurrent_to_gate_scale_a, - const int32_t recurrent_to_gate_scale_b, - const int32_t output_state_times_weights_scale_a, - const int32_t output_state_times_weights_scale_b, - const int32_t output_state_times_weights_zp, - // Layer normalization parameters (layer norm LSTM) - const int16_t* layer_norm_gate_weight, - const int32_t layer_norm_gate_scale_a, - const int32_t layer_norm_gate_scale_b, const int32_t* gate_bias, - // Array sizes - const int n_batch, const int n_input, const int n_output, const int n_cell, - const TfLiteFusedActivation activation, - // Output - int16_t* gate, - // Scratch arrays, both sized n_batch*n_cell - int8_t* scratch0, int8_t* scratch1) { - // Multiply input * input_weights => scratch0 - micro_tensor_utils::MatrixBatchVectorMultiply( - input, input_zp, input_to_gate_weight, input_to_gate_scale_a, - input_to_gate_scale_b, n_batch, n_input, n_cell, scratch0, - input_times_weights_zp); - // Multiply output_state * recurrent_weights => scratch1 - micro_tensor_utils::MatrixBatchVectorMultiply( - output_state, output_state_zp, recurrent_to_gate_weight, - recurrent_to_gate_scale_a, recurrent_to_gate_scale_b, n_batch, n_output, - n_cell, scratch1, output_state_times_weights_zp); - // Add scratch0 + scratch1 => gate - micro_tensor_utils::TwoGateSaturatingAdd( - scratch0, input_times_weights_zp, scratch1, output_state_times_weights_zp, - input_times_weights_scale_a, input_times_weights_scale_b, - output_state_times_weights_scale_a, output_state_times_weights_scale_b, - n_batch, n_cell, gate); - // Apply layer normalization. - micro_tensor_utils::ApplyLayerNormFloat( - gate, layer_norm_gate_weight, layer_norm_gate_scale_a, - layer_norm_gate_scale_b, gate_bias, n_batch, n_cell, gate); - // Apply activation. - switch (activation) { - case kTfLiteActSigmoid: - micro_tensor_utils::ApplySigmoidFloat(gate, n_batch, n_cell, gate); - break; - case kTfLiteActTanh: - micro_tensor_utils::ApplyTanhFloat(gate, n_batch, n_cell, -12, gate); - break; - default: - // Only Sigmoid or Tanh is used. - TFLITE_ASSERT_FALSE; - } -} - -// Calculates the output state tensor of an LSTM step. See Float and hybrid -// versions as well. -// -// Parameters: -// - n_batch: batches: the number of distinct vectors in each array. -// - n_cell, n_output: sizes of vectors. -// - cell_state, output_gate: input vectors, size n_batch*n_cell. -// - projection_weights, proj_scale_[a|b], projection_bias: -// constant inputs, describing projection matrix and bias. -// - output_state_zp: zero point of the output state. -// - quantized_proj_clip: if > 0, clip the output of the projection. -// - output_state: output vector, size n_batch*n_output. Must be contigous. -// - scratch: scratch area of size n_batch*n_cell -void CalculateLstmOutputInteger8x8_8( - int n_batch, int n_cell, int n_output, const int16_t* cell_state, - const int16_t* output_gate, const int8_t* projection_weights, - int32_t proj_scale_a, int32_t proj_scale_b, const int32_t* projection_bias, - int32_t output_state_zp, int32_t quantized_proj_clip, int8_t* output_state, - int16_t* scratch) { - // Note: unlike float/hybrid, the activation is always Tanh. - micro_tensor_utils::ApplyTanhFloat(cell_state, n_batch, n_cell, -15, scratch); - micro_tensor_utils::CwiseMul(output_gate, scratch, n_batch, n_cell, - 15 + 15 - 15, scratch); - // Note: no bias like in float/hybrid - micro_tensor_utils::MatrixBatchVectorMultiply( - scratch, projection_weights, proj_scale_a, proj_scale_b, projection_bias, - n_batch, n_cell, n_output, output_state_zp, output_state); - if (quantized_proj_clip > 0) { - micro_tensor_utils::CwiseClipping(output_state, n_batch * n_output, - quantized_proj_clip); - } -} - -// Performs an LSTM batch inference step for input specified by input_ptr. -// The LSTM cell is specified by the pointers to its weights (*_weights_ptr) and -// biases (*_bias_ptr), and buffers (*_scratch), along with additional -// parameters: -// - params: various LSTM params including activation, clipping, etc., -// - n_batch: size of batch, -// - n_cell: number of cells (or units), -// - n_input: the input size, -// - n_aux_input: the auxiliary input size. -// - n_output: the output size. -// - output_batch_leading_dim: the leading dimension of the output buffer. -// -// Input of size 'n_batch * n_input': -// input_ptr -// Input of size 'n_batch * n_aux_input': -// aux_input_ptr - optional (can be nullptr) -// -// LSTM weights: -// Input weights of size 'n_cell * n_input': -// input_to_input_weights - optional -// input_to_forget_weights -// input_to_cell_weights -// input_to_output_weights -// Auxiliary input weights of size 'n_cell * n_aux_input': -// aux_input_to_input_weights - optional -// aux_input_to_forget_weights - optional -// aux_input_to_cell_weights - optional -// aux_input_to_output_weights - optional -// Recurrent weights of size 'n_cell * n_output': -// recurrent_to_input_weights - optional -// recurrent_to_forget_weights -// recurrent_to_cell_weights -// recurrent_to_input_weights -// Peephole weights of size 'n_cell', representing diagonal matrices. -// cell_to_input_weights - optional -// cell_to_cell_weights - optional -// cell_to_output_weights - optional -// Projection weights of size 'n_output * n_cell' -// projection_weights_ptr - optional -// Gate biases of size 'n_cell': -// input_gate_bias_ptr - optional -// forget_gate_bias_ptr -// cell_gate_bias_ptr -// output_gate_bias_ptr -// -// Layer norm coefficients of size 'n_cell', representing diagonal matrices. -// input_layer_norm_coefficients_ptr - optional -// forget_layer_norm_coefficients_ptr - optional -// cell_layer_norm_coefficients_ptr - optional -// output_layer_norm_coefficients_ptr - optional -// -// The pointers to the cell and output state and the output are updated. -// -// The pointers input_ptr, aux_input_ptr, and output_ptr point to data aligned -// in batch_major order, and each step processes batch_size many inputs from -// input_ptr, and updates batch_size many cell and output states. -// -// The output_batch_dim is output.shape[-1], i.e. the outermost dimension of the -// output tensor, and in most cases will be equal to n_output. It is usually not -// when we want to store the LSTM output into a slice of the output tensor, e.g. -// for bidirectional LSTMs with merge_outputs. In this case, the batched -// operations cannot be used since they assume that the batched outputs are -// contiguous, and we manually loop over the batched outputs. -inline void LstmStepFloat( - const float* input_ptr, const float* input_to_input_weights_ptr, - const float* input_to_forget_weights_ptr, - const float* input_to_cell_weights_ptr, - const float* input_to_output_weights_ptr, const float* aux_input_ptr, - const float* aux_input_to_input_weights_ptr, - const float* aux_input_to_forget_weights_ptr, - const float* aux_input_to_cell_weights_ptr, - const float* aux_input_to_output_weights_ptr, - const float* recurrent_to_input_weights_ptr, - const float* recurrent_to_forget_weights_ptr, - const float* recurrent_to_cell_weights_ptr, - const float* recurrent_to_output_weights_ptr, - const float* cell_to_input_weights_ptr, - const float* cell_to_forget_weights_ptr, - const float* cell_to_output_weights_ptr, - const float* input_layer_norm_coefficients_ptr, - const float* forget_layer_norm_coefficients_ptr, - const float* cell_layer_norm_coefficients_ptr, - const float* output_layer_norm_coefficients_ptr, - const float* input_gate_bias_ptr, const float* forget_gate_bias_ptr, - const float* cell_gate_bias_ptr, const float* output_gate_bias_ptr, - const float* projection_weights_ptr, const float* projection_bias_ptr, - const TfLiteLSTMParams* params, int n_batch, int n_cell, int n_input, - int n_aux_input, int n_output, int output_batch_leading_dim, - float* output_state_ptr, float* cell_state_ptr, float* scratch0, - float* scratch1, float* scratch2, float* scratch3, float* output_ptr) { - // Since we have already checked that weights are all there or none, we can - // check the existence of only one to the get the condition. - const bool use_cifg = (input_to_input_weights_ptr == nullptr); - - // Make named scratch buffers. - float* input_gate_scratch = scratch0; - float* forget_gate_scratch = scratch1; - float* cell_gate_scratch = scratch2; - float* output_gate_scratch = scratch3; - - // Check if inputs are all zeros so we can skip some computations. - const bool is_input_all_zeros = - micro_tensor_utils::IsZeroVector(input_ptr, n_batch * n_input); - const bool is_aux_input_all_zeros = - (aux_input_ptr == nullptr || - micro_tensor_utils::IsZeroVector(aux_input_ptr, n_batch * n_aux_input)); - if (!use_cifg) { - // Calculate the input gate. (If not CIFG.) - CalculateLstmGateFloat( - input_ptr, input_to_input_weights_ptr, aux_input_ptr, - aux_input_to_input_weights_ptr, output_state_ptr, - recurrent_to_input_weights_ptr, cell_state_ptr, - cell_to_input_weights_ptr, input_layer_norm_coefficients_ptr, - input_gate_bias_ptr, n_batch, n_input, n_aux_input, n_output, n_cell, - /*activation=*/kTfLiteActSigmoid, input_gate_scratch, - is_input_all_zeros, is_aux_input_all_zeros); - } - // Calculate the forget gate. - CalculateLstmGateFloat( - input_ptr, input_to_forget_weights_ptr, aux_input_ptr, - aux_input_to_forget_weights_ptr, output_state_ptr, - recurrent_to_forget_weights_ptr, cell_state_ptr, - cell_to_forget_weights_ptr, forget_layer_norm_coefficients_ptr, - forget_gate_bias_ptr, n_batch, n_input, n_aux_input, n_output, n_cell, - /*activation=*/kTfLiteActSigmoid, forget_gate_scratch, is_input_all_zeros, - is_aux_input_all_zeros); - // Calculate the cell update gate. - CalculateLstmGateFloat(input_ptr, input_to_cell_weights_ptr, aux_input_ptr, - aux_input_to_cell_weights_ptr, output_state_ptr, - recurrent_to_cell_weights_ptr, /*cell_state=*/nullptr, - /*cell_to_gate_weights=*/nullptr, - cell_layer_norm_coefficients_ptr, cell_gate_bias_ptr, - n_batch, n_input, n_aux_input, n_output, n_cell, - params->activation, cell_gate_scratch, - is_input_all_zeros, is_aux_input_all_zeros); - // Update the cell state. - UpdateLstmCellFloat(n_batch, n_cell, cell_state_ptr, input_gate_scratch, - forget_gate_scratch, cell_gate_scratch, use_cifg, - params->cell_clip); - // Calculate output gate. - CalculateLstmGateFloat( - input_ptr, input_to_output_weights_ptr, aux_input_ptr, - aux_input_to_output_weights_ptr, output_state_ptr, - recurrent_to_output_weights_ptr, cell_state_ptr, - cell_to_output_weights_ptr, output_layer_norm_coefficients_ptr, - output_gate_bias_ptr, n_batch, n_input, n_aux_input, n_output, n_cell, - /*activation=*/kTfLiteActSigmoid, output_gate_scratch, is_input_all_zeros, - is_aux_input_all_zeros); - // Update the output state. - CalculateLstmOutputFloat(n_batch, n_cell, n_output, cell_state_ptr, - output_gate_scratch, params->activation, - projection_weights_ptr, projection_bias_ptr, - params->proj_clip, output_state_ptr, scratch2); - // Copy output state to the output. Note that the output's rows may not be - // contiguous (output_batch_leading_dim != n_output). - for (int b = 0; b < n_batch; b++) { - std::memcpy(output_ptr + b * output_batch_leading_dim, - output_state_ptr + b * n_output, n_output * sizeof(float)); - } -} - -// Same as above but with quantized weight matrices. In detail: -// Input of size 'n_batch * n_input': -// input_ptr -// Input of size 'n_batch * n_aux_input': -// aux_input_ptr - optional (can be nullptr) -// -// LSTM weights: -// Quantized input weights of size 'n_cell * n_input': -// input_to_input_weights - optional -// input_to_forget_weights -// input_to_cell_weights -// input_to_input_weights -// Quantized auxiliary input weights of size 'n_cell * n_aux_input': -// aux_input_to_input_weights - optional -// aux_input_to_forget_weights - optional -// aux_input_to_cell_weights - optional -// aux_input_to_output_weights - optional -// Quantized recurrent weights of size 'n_cell * n_output': -// recurrent_to_input_weights - optional -// recurrent_to_forget_weights -// recurrent_to_cell_weights -// recurrent_to_input_weights -// Quantized peephole weights of size 'n_cell', representing diagonal matrices. -// cell_to_input_weights - optional -// cell_to_cell_weights - optional -// cell_to_output_weights - optional -// Quantized projection weights of size 'n_output * n_cell' -// projection_weights_ptr - optional -// Weight scales (scalars) for each of the weights above. -// input_to_input_weights_scale - optional -// input_to_forget_weights_scale -// input_to_cell_weights_scale -// input_to_output_weights_scale -// aux_input_to_input_weights_scale - optional -// aux_input_to_forget_weights_scale - optional -// aux_input_to_cell_weights_scale - optional -// aux_input_to_output_weights_scale - optional -// recurrent_to_input_weights_scale - optional -// recurrent_to_forget_weights_scale -// recurrent_to_cell_weights_scale -// recurrent_to_output_weights_scale -// cell_to_input_weights_scale, -// cell_to_forget_weights_scale, -// cell_to_output_weights_scale, -// projection_weights_scale - optional -// Gate biases of size 'n_cell': -// input_gate_bias_ptr - optional -// forget_gate_bias_ptr -// cell_gate_bias_ptr -// output_gate_bias_ptr -// -// Layer norm coefficients of size 'n_cell', representing diagonal matrices. -// input_layer_norm_coefficients_ptr - optional -// forget_layer_norm_coefficients_ptr - optional -// cell_layer_norm_coefficients_ptr - optional -// output_layer_norm_coefficients_ptr - optional -// -// Temporary pre-allocated storage for quantized values: -// quantized_input_ptr (same size as input_ptr) -// quantized_output_state_ptr (same size as output_state_ptr) -// quantized_output_scratch (same size as cell_state_ptr) -// Temporary pre-allocated storage for recovered values: -// recovered_cell_weights (same size as cell_to_*_weights) -// -// Outputs: -// output_state_ptr - size 'n_batch * n_output' -// cell_state_ptr - size 'n_batch * n_cell' -// output_ptr - size 'n_batch * output_batch_leading_dim' -inline void LstmStepHybrid( - const float* input_ptr, const int8_t* input_to_input_weights_ptr, - const uint8_t* input_to_input_weights_ledger_ptr, - float input_to_input_weights_scale, - const int8_t* input_to_forget_weights_ptr, - const uint8_t* input_to_forget_weights_ledger_ptr, - float input_to_forget_weights_scale, - const int8_t* input_to_cell_weights_ptr, - const uint8_t* input_to_cell_weights_ledger_ptr, - float input_to_cell_weights_scale, - const int8_t* input_to_output_weights_ptr, - const uint8_t* input_to_output_weights_ledger_ptr, - float input_to_output_weights_scale, const float* aux_input_ptr, - const int8_t* aux_input_to_input_weights_ptr, - float aux_input_to_input_weights_scale, - const int8_t* aux_input_to_forget_weights_ptr, - float aux_input_to_forget_weights_scale, - const int8_t* aux_input_to_cell_weights_ptr, - float aux_input_to_cell_weights_scale, - const int8_t* aux_input_to_output_weights_ptr, - float aux_input_to_output_weights_scale, - const int8_t* recurrent_to_input_weights_ptr, - const uint8_t* recurrent_to_input_weights_ledger_ptr, - float recurrent_to_input_weights_scale, - const int8_t* recurrent_to_forget_weights_ptr, - const uint8_t* recurrent_to_forget_weights_ledger_ptr, - float recurrent_to_forget_weights_scale, - const int8_t* recurrent_to_cell_weights_ptr, - const uint8_t* recurrent_to_cell_weights_ledger_ptr, - float recurrent_to_cell_weights_scale, - const int8_t* recurrent_to_output_weights_ptr, - const uint8_t* recurrent_to_output_weights_ledger_ptr, - float recurrent_to_output_weights_scale, - const int8_t* cell_to_input_weights_ptr, float cell_to_input_weights_scale, - const int8_t* cell_to_forget_weights_ptr, - float cell_to_forget_weights_scale, - const int8_t* cell_to_output_weights_ptr, - float cell_to_output_weights_scale, - const float* input_layer_norm_coefficients_ptr, - const float* forget_layer_norm_coefficients_ptr, - const float* cell_layer_norm_coefficients_ptr, - const float* output_layer_norm_coefficients_ptr, - const float* input_gate_bias_ptr, const float* forget_gate_bias_ptr, - const float* cell_gate_bias_ptr, const float* output_gate_bias_ptr, - const int8_t* projection_weights_ptr, - const uint8_t* projection_weights_ledger_ptr, - float projection_weights_scale, const float* projection_bias_ptr, - const TfLiteLSTMParams* params, int n_batch, int n_cell, int n_input, - int n_aux_input, int n_output, int output_batch_leading_dim, - float* scratch0, float* scratch1, float* scratch2, float* scratch3, - float* scales, float* input_sf, float* aux_input_sf, float* output_state_sf, - float* scaling_factors_scratch, float* recovered_cell_weights, - int8_t* quantized_input_ptr, int8_t* quantized_aux_input_ptr, - int8_t* quantized_output_state_ptr, int8_t* quantized_output_scratch, - float* output_state_ptr, float* cell_state_ptr, int32_t* accum_scratch_ptr, - float* output_ptr, int32_t* input_zp, int32_t* aux_input_zp, - int32_t* output_state_zp, int32_t* row_sums, int row_sums_size, - bool* compute_row_sums, bool asymmetric_quantize_inputs) { - // Since we have already checked that weights are all there or none, we - // can check the existence of only one to the get the condition. - const bool use_cifg = (input_to_input_weights_ptr == nullptr); - // Make named scratch buffers for the different gates. - float* input_gate_scratch = scratch0; - float* forget_gate_scratch = scratch1; - float* cell_gate_scratch = scratch2; - float* output_gate_scratch = scratch3; - - int32_t* input_to_input_row_sums = nullptr; - int32_t* input_to_forget_row_sums = nullptr; - int32_t* input_to_cell_row_sums = nullptr; - int32_t* input_to_output_row_sums = nullptr; - int32_t* aux_input_to_input_row_sums = nullptr; - int32_t* aux_input_to_forget_row_sums = nullptr; - int32_t* aux_input_to_cell_row_sums = nullptr; - int32_t* aux_input_to_output_row_sums = nullptr; - int32_t* recurrent_to_input_row_sums = nullptr; - int32_t* recurrent_to_forget_row_sums = nullptr; - int32_t* recurrent_to_cell_row_sums = nullptr; - int32_t* recurrent_to_output_row_sums = nullptr; - int32_t* projection_weights_row_sums = nullptr; - - if (asymmetric_quantize_inputs) { - int num_row_sums = use_cifg ? 6 : 8; - if (aux_input_ptr != nullptr) { - num_row_sums += use_cifg ? 3 : 4; - } - if (projection_weights_ptr != nullptr) { - num_row_sums += ceil(static_cast(n_output) / n_cell); - } - TFLITE_DCHECK(row_sums_size == num_row_sums); - input_to_input_row_sums = row_sums; - input_to_forget_row_sums = - use_cifg ? input_to_input_row_sums : input_to_input_row_sums + n_cell; - input_to_cell_row_sums = input_to_forget_row_sums + n_cell; - input_to_output_row_sums = input_to_cell_row_sums + n_cell; - if (aux_input_ptr != nullptr) { - aux_input_to_input_row_sums = input_to_output_row_sums + n_cell; - aux_input_to_forget_row_sums = use_cifg - ? aux_input_to_input_row_sums - : aux_input_to_input_row_sums + n_cell; - aux_input_to_cell_row_sums = aux_input_to_forget_row_sums + n_cell; - aux_input_to_output_row_sums = aux_input_to_cell_row_sums + n_cell; - } - recurrent_to_input_row_sums = aux_input_ptr - ? aux_input_to_output_row_sums + n_cell - : input_to_output_row_sums + n_cell; - recurrent_to_forget_row_sums = use_cifg - ? recurrent_to_input_row_sums - : recurrent_to_input_row_sums + n_cell; - recurrent_to_cell_row_sums = recurrent_to_forget_row_sums + n_cell; - recurrent_to_output_row_sums = recurrent_to_cell_row_sums + n_cell; - if (projection_weights_ptr != nullptr) { - projection_weights_row_sums = recurrent_to_output_row_sums + n_cell; - } - if (*compute_row_sums) { - ComputeRowSums( - input_to_input_row_sums, input_to_forget_row_sums, - input_to_cell_row_sums, input_to_output_row_sums, - aux_input_to_input_row_sums, aux_input_to_forget_row_sums, - aux_input_to_cell_row_sums, aux_input_to_output_row_sums, - recurrent_to_input_row_sums, recurrent_to_forget_row_sums, - recurrent_to_cell_row_sums, recurrent_to_output_row_sums, - projection_weights_row_sums, row_sums, n_cell, n_input, n_aux_input, - n_output, input_to_input_weights_ptr, input_to_forget_weights_ptr, - input_to_cell_weights_ptr, input_to_output_weights_ptr, - aux_input_to_input_weights_ptr, aux_input_to_forget_weights_ptr, - aux_input_to_cell_weights_ptr, aux_input_to_output_weights_ptr, - recurrent_to_input_weights_ptr, recurrent_to_forget_weights_ptr, - recurrent_to_cell_weights_ptr, recurrent_to_output_weights_ptr, - projection_weights_ptr, use_cifg, aux_input_ptr); - *compute_row_sums = false; - } - } - - // Check if inputs are all zeros so we can skip some computations. - const bool is_input_all_zeros = - micro_tensor_utils::IsZeroVector(input_ptr, n_batch * n_input); - const bool is_aux_input_all_zeros = - (aux_input_ptr == nullptr || - micro_tensor_utils::IsZeroVector(aux_input_ptr, n_batch * n_aux_input)); - const bool is_output_state_all_zeros = - micro_tensor_utils::IsZeroVector(output_state_ptr, n_batch * n_output); - // Quantize inputs. - if (!is_input_all_zeros) { - micro_tensor_utils::BatchQuantizeFloats( - input_ptr, n_batch, n_input, quantized_input_ptr, input_sf, input_zp, - asymmetric_quantize_inputs); - } - if (!is_aux_input_all_zeros) { - micro_tensor_utils::BatchQuantizeFloats( - aux_input_ptr, n_batch, n_aux_input, quantized_aux_input_ptr, - aux_input_sf, aux_input_zp, asymmetric_quantize_inputs); - } - if (!is_output_state_all_zeros) { - micro_tensor_utils::BatchQuantizeFloats( - output_state_ptr, n_batch, n_output, quantized_output_state_ptr, - output_state_sf, output_state_zp, asymmetric_quantize_inputs); - } - if (!use_cifg) { - // Calculate the input gate. (If not CIFG.) - CalculateLstmGateHybrid( - quantized_input_ptr, input_sf, input_zp, input_to_input_weights_ptr, - input_to_input_weights_ledger_ptr, input_to_input_weights_scale, - input_to_input_row_sums, quantized_aux_input_ptr, aux_input_sf, - aux_input_zp, aux_input_to_input_weights_ptr, - aux_input_to_input_weights_scale, aux_input_to_input_row_sums, - quantized_output_state_ptr, output_state_sf, output_state_zp, - recurrent_to_input_weights_ptr, recurrent_to_input_weights_ledger_ptr, - recurrent_to_input_weights_scale, recurrent_to_input_row_sums, - cell_state_ptr, cell_to_input_weights_ptr, cell_to_input_weights_scale, - input_layer_norm_coefficients_ptr, input_gate_bias_ptr, n_batch, - n_input, n_aux_input, n_output, n_cell, kTfLiteActSigmoid, - input_gate_scratch, is_input_all_zeros, is_aux_input_all_zeros, - is_output_state_all_zeros, compute_row_sums, scaling_factors_scratch, - recovered_cell_weights, scales, accum_scratch_ptr); - } - // Calculate the forget gate. - CalculateLstmGateHybrid( - quantized_input_ptr, input_sf, input_zp, input_to_forget_weights_ptr, - input_to_forget_weights_ledger_ptr, input_to_forget_weights_scale, - input_to_forget_row_sums, quantized_aux_input_ptr, aux_input_sf, - aux_input_zp, aux_input_to_forget_weights_ptr, - aux_input_to_forget_weights_scale, aux_input_to_forget_row_sums, - quantized_output_state_ptr, output_state_sf, output_state_zp, - recurrent_to_forget_weights_ptr, recurrent_to_forget_weights_ledger_ptr, - recurrent_to_forget_weights_scale, recurrent_to_forget_row_sums, - cell_state_ptr, cell_to_forget_weights_ptr, cell_to_forget_weights_scale, - forget_layer_norm_coefficients_ptr, forget_gate_bias_ptr, n_batch, - n_input, n_aux_input, n_output, n_cell, kTfLiteActSigmoid, - forget_gate_scratch, is_input_all_zeros, is_aux_input_all_zeros, - is_output_state_all_zeros, compute_row_sums, scaling_factors_scratch, - recovered_cell_weights, scales, accum_scratch_ptr); - // Calculate the cell update gate. - CalculateLstmGateHybrid( - quantized_input_ptr, input_sf, input_zp, input_to_cell_weights_ptr, - input_to_cell_weights_ledger_ptr, input_to_cell_weights_scale, - input_to_cell_row_sums, quantized_aux_input_ptr, aux_input_sf, - aux_input_zp, aux_input_to_cell_weights_ptr, - aux_input_to_cell_weights_scale, aux_input_to_cell_row_sums, - quantized_output_state_ptr, output_state_sf, output_state_zp, - recurrent_to_cell_weights_ptr, recurrent_to_cell_weights_ledger_ptr, - recurrent_to_cell_weights_scale, recurrent_to_cell_row_sums, - /*cell_state=*/nullptr, /*cell_to_gate_weights=*/nullptr, - /*cell_to_gate_weights_scale=*/0.0f, cell_layer_norm_coefficients_ptr, - cell_gate_bias_ptr, n_batch, n_input, n_aux_input, n_output, n_cell, - params->activation, cell_gate_scratch, is_input_all_zeros, - is_aux_input_all_zeros, is_output_state_all_zeros, compute_row_sums, - scaling_factors_scratch, recovered_cell_weights, scales, - accum_scratch_ptr); - // Update the cell state. - UpdateLstmCellFloat(n_batch, n_cell, cell_state_ptr, input_gate_scratch, - forget_gate_scratch, cell_gate_scratch, use_cifg, - params->cell_clip); - // Calculate the output gate. - CalculateLstmGateHybrid( - quantized_input_ptr, input_sf, input_zp, input_to_output_weights_ptr, - input_to_output_weights_ledger_ptr, input_to_output_weights_scale, - input_to_output_row_sums, quantized_aux_input_ptr, aux_input_sf, - aux_input_zp, aux_input_to_output_weights_ptr, - aux_input_to_output_weights_scale, aux_input_to_output_row_sums, - quantized_output_state_ptr, output_state_sf, output_state_zp, - recurrent_to_output_weights_ptr, recurrent_to_output_weights_ledger_ptr, - recurrent_to_output_weights_scale, recurrent_to_output_row_sums, - cell_state_ptr, cell_to_output_weights_ptr, cell_to_output_weights_scale, - output_layer_norm_coefficients_ptr, output_gate_bias_ptr, n_batch, - n_input, n_aux_input, n_output, n_cell, kTfLiteActSigmoid, - output_gate_scratch, is_input_all_zeros, is_aux_input_all_zeros, - is_output_state_all_zeros, compute_row_sums, scaling_factors_scratch, - recovered_cell_weights, scales, accum_scratch_ptr); - // Update the output state. - CalculateLstmOutputHybrid( - n_batch, n_cell, n_output, cell_state_ptr, output_gate_scratch, - params->activation, projection_weights_ptr, projection_weights_ledger_ptr, - projection_weights_scale, projection_bias_ptr, params->proj_clip, - output_state_ptr, asymmetric_quantize_inputs, projection_weights_row_sums, - compute_row_sums, scratch2, quantized_output_scratch, input_sf, input_zp, - accum_scratch_ptr, scales); - // Copy output state to the output. Note that the output's rows may not be - // contiguous (output_batch_leading_dim != n_output). - for (int b = 0; b < n_batch; b++) { - std::memcpy(output_ptr + b * output_batch_leading_dim, - output_state_ptr + b * n_output, n_output * sizeof(float)); - } -} - -// Fully quantized lstm kernel for 16 bit gate matmul output. -// -// Input tensor of size n_batch * n_input: -// input_ptr -// -// LSTM weights: -// Quantized input weights of size 'n_cell * n_input': -// input_to_input_weight_ptr - optional -// input_to_forget_weight_ptr - optional -// input_to_cell_weight_ptr - optional -// input_to_output_weight_ptr - optional -// -// Quantized recurrent weights of size 'n_cell * n_output': -// recurrent_to_input_weight_ptr - optional -// recurrent_to_forget_weights_ptr -// recurrent_to_cell_weights_ptr -// recurrent_to_input_weights_ptr -// -// Quantized peephole weights of size 'n_cell', representing diagonal matrices. -// cell_to_input_weights - optional -// cell_to_cell_weights - optional -// cell_to_output_weights - optional -// -// Quantized projection weights of size 'n_output * n_cell' -// projection_weight_ptr - optional -// -// Weight scales (scalars) for each of the weights above. -// effective_input_to_input_scale_a - optional -// effective_input_to_input_scale_b - optional -// effective_input_to_forget_scale_a -// effective_input_to_forget_scale_b -// effective_input_to_cell_scale_a -// effective_input_to_cell_scale_b -// effective_input_to_output_scale_a -// effective_input_to_output_scale_b -// effective_recurrent_to_input_scale_a - optional -// effective_recurrent_to_input_scale_b - optional -// effective_recurrent_to_forget_scale_a -// effective_recurrent_to_forget_scale_b -// effective_recurrent_to_cell_scale_a -// effective_recurrent_to_cell_scale_b -// effective_recurrent_to_output_scale_a -// effective_recurrent_to_output_scale_b -// effective_proj_scale_a - optional -// effective_proj_scale_b - optional -// -// Gate biases of size 'n_cell': -// input_gate_bias_ptr - optional -// forget_gate_bias_ptr -// cell_gate_bias_ptr -// output_gate_bias_ptr -// -// Layer norm coefficients of size 'n_cell', representing diagonal matrices. -// layer_norm_input_weight_ptr - optional -// layer_norm_forget_weight_ptr - optional -// layer_norm_cell_weight_ptr - optional -// layer_norm_output_weight_ptr - optional -// -// Layer norm scales of size 'n_cell'. -// layer_norm_input_scale_a - optional -// layer_norm_input_scale_b - optional -// layer_norm_forget_scale_a - optional -// layer_norm_forget_scale_b - optional -// layer_norm_cell_scale_a - optional -// layer_norm_cell_scale_b - optional -// layer_norm_output_scale_a - optional -// layer_norm_output_scale_b - optional -// -// Scalar values: -// quantized_cell_clip: quantized clip value for cell. -// quantized_proj_clip: quantized clip value for projection. -// cell_state_scale: the power of two scale for cell state. -// -// Zero points: -// output_state_zp: zero point of output state -// hidden_zp: zero point for hidden state. -// -// Temporary pre-allocated storage for the calculation. Each is of size n_cell * -// n_batch. -// scratch0 -// scratch1 -// scratch2 -// scratch3 -// scratch4 -// scratch5: this scratch buffer is created purely for optimizing the -// MatrixBatchVectorMultiplyAccumulate. -// -// Outputs: -// output_state_ptr - size 'n_batch * n_output' -// cell_state_ptr - size 'n_batch * n_cell' -// output_ptr - size 'n_batch * n_output' -// TODO(b/159947023): scratch0 is not used if (!cifg). Don't allocate then. -inline void LstmStepInteger8x8_16( - const int8_t* input_ptr, const int8_t* input_to_input_weight_ptr, - int32_t effective_input_to_input_scale_a, - int32_t effective_input_to_input_scale_b, - const int8_t* input_to_forget_weight_ptr, - int32_t effective_input_to_forget_scale_a, - int32_t effective_input_to_forget_scale_b, - const int8_t* input_to_cell_weight_ptr, - int32_t effective_input_to_cell_scale_a, - int32_t effective_input_to_cell_scale_b, - const int8_t* input_to_output_weight_ptr, - int32_t effective_input_to_output_scale_a, - int32_t effective_input_to_output_scale_b, - const int8_t* recurrent_to_input_weight_ptr, - int32_t effective_recurrent_to_input_scale_a, - int32_t effective_recurrent_to_input_scale_b, - const int8_t* recurrent_to_forget_weight_ptr, - int32_t effective_recurrent_to_forget_scale_a, - int32_t effective_recurrent_to_forget_scale_b, - const int8_t* recurrent_to_cell_weight_ptr, - int32_t effective_recurrent_to_cell_scale_a, - int32_t effective_recurrent_to_cell_scale_b, - const int8_t* recurrent_to_output_weight_ptr, - int32_t effective_recurrent_to_output_scale_a, - int32_t effective_recurrent_to_output_scale_b, - const int16_t* cell_to_input_weight_ptr, - int32_t effective_cell_to_input_scale_a, - int32_t effective_cell_to_input_scale_b, - const int16_t* cell_to_forget_weight_ptr, - int32_t effective_cell_to_forget_scale_a, - int32_t effective_cell_to_forget_scale_b, - const int16_t* cell_to_output_weight_ptr, - int32_t effective_cell_to_output_scale_a, - int32_t effective_cell_to_output_scale_b, - const int8_t* projection_weight_ptr, int32_t effective_proj_scale_a, - int32_t effective_proj_scale_b, int32_t hidden_zp, - int32_t effective_hidden_scale_a, int32_t effective_hidden_scale_b, - const int16_t* layer_norm_input_weight_ptr, - int32_t layer_norm_input_scale_a, int32_t layer_norm_input_scale_b, - const int16_t* layer_norm_forget_weight_ptr, - int32_t layer_norm_forget_scale_a, int32_t layer_norm_forget_scale_b, - const int16_t* layer_norm_cell_weight_ptr, int32_t layer_norm_cell_scale_a, - int32_t layer_norm_cell_scale_b, - const int16_t* layer_norm_output_weight_ptr, - int32_t layer_norm_output_scale_a, int32_t layer_norm_output_scale_b, - const int32_t* input_gate_bias_ptr, const int32_t* forget_gate_bias_ptr, - const int32_t* cell_gate_bias_ptr, const int32_t* output_gate_bias_ptr, - int16_t quantized_cell_clip, int8_t quantized_proj_clip, - int32_t cell_state_scale, int32_t input_variance_guard, - int32_t forget_variance_guard, int32_t cell_variance_guard, - int32_t output_variance_guard, - const int32_t* input_to_forget_effective_bias, - const int32_t* recurrent_to_forget_effective_bias, - const int32_t* input_to_cell_effective_bias, - const int32_t* recurrent_to_cell_effective_bias, - const int32_t* input_to_output_effective_bias, - const int32_t* recurrent_to_output_effective_bias, - const int32_t* input_to_input_effective_bias, - const int32_t* recurrent_to_input_effective_bias, - const int32_t* projection_effective_bias, int n_batch, int n_cell, - int n_input, int n_output, int8_t* output_state_ptr, - int32_t output_state_zp, int16_t* cell_state_ptr, int8_t* output_ptr, - int16_t* scratch0, int16_t* scratch1, int16_t* scratch2, int16_t* scratch3, - int8_t* scratch4, int32_t* scratch5) { - // Make named scratch buffers for the different gates. - int16_t* input_gate_scratch = scratch0; - int16_t* forget_gate_scratch = scratch1; - int16_t* cell_gate_scratch = scratch2; - int16_t* output_gate_scratch = scratch3; - - // Since we have already checked that weights are all there or none, we - // can check the existence of only one to the get the condition. - const bool use_cifg = (input_to_input_weight_ptr == nullptr); - - // Check for nullptrs. - TFLITE_DCHECK(input_to_forget_effective_bias); - TFLITE_DCHECK(recurrent_to_forget_effective_bias); - TFLITE_DCHECK(input_to_cell_effective_bias); - TFLITE_DCHECK(recurrent_to_cell_effective_bias); - TFLITE_DCHECK(input_to_output_effective_bias); - TFLITE_DCHECK(recurrent_to_output_effective_bias); - if (!use_cifg) { - TFLITE_DCHECK(input_to_input_effective_bias); - TFLITE_DCHECK(recurrent_to_input_effective_bias); - } - const bool use_projection = (projection_weight_ptr != nullptr); - if (use_projection) { - TFLITE_DCHECK(projection_effective_bias); - } - if (!use_cifg) { - // Calculate the input gate. (If not CIFG.) - CalculateLstmGateInteger8x8_16( - input_ptr, input_to_input_weight_ptr, input_to_input_effective_bias, - effective_input_to_input_scale_a, effective_input_to_input_scale_b, - output_state_ptr, recurrent_to_input_weight_ptr, - recurrent_to_input_effective_bias, effective_recurrent_to_input_scale_a, - effective_recurrent_to_input_scale_b, cell_state_ptr, - cell_to_input_weight_ptr, effective_cell_to_input_scale_a, - effective_cell_to_input_scale_b, layer_norm_input_weight_ptr, - input_gate_bias_ptr, layer_norm_input_scale_a, layer_norm_input_scale_b, - input_variance_guard, n_batch, n_input, n_output, n_cell, - kTfLiteActSigmoid, input_gate_scratch, scratch5); - } - // Calculate the forget gate. - CalculateLstmGateInteger8x8_16( - input_ptr, input_to_forget_weight_ptr, input_to_forget_effective_bias, - effective_input_to_forget_scale_a, effective_input_to_forget_scale_b, - output_state_ptr, recurrent_to_forget_weight_ptr, - recurrent_to_forget_effective_bias, effective_recurrent_to_forget_scale_a, - effective_recurrent_to_forget_scale_b, cell_state_ptr, - cell_to_forget_weight_ptr, effective_cell_to_forget_scale_a, - effective_cell_to_forget_scale_b, layer_norm_forget_weight_ptr, - forget_gate_bias_ptr, layer_norm_forget_scale_a, - layer_norm_forget_scale_b, forget_variance_guard, n_batch, n_input, - n_output, n_cell, kTfLiteActSigmoid, forget_gate_scratch, scratch5); - // Calculate the cell update gate. - CalculateLstmGateInteger8x8_16( - input_ptr, input_to_cell_weight_ptr, input_to_cell_effective_bias, - effective_input_to_cell_scale_a, effective_input_to_cell_scale_b, - output_state_ptr, recurrent_to_cell_weight_ptr, - recurrent_to_cell_effective_bias, effective_recurrent_to_cell_scale_a, - effective_recurrent_to_cell_scale_b, cell_state_ptr, - /*cell_to_gate_weights=*/nullptr, /*cell_to_gate_scale_a=*/0, - /*cell_to_gate_scale_b=*/0, layer_norm_cell_weight_ptr, - cell_gate_bias_ptr, layer_norm_cell_scale_a, layer_norm_cell_scale_b, - cell_variance_guard, n_batch, n_input, n_output, n_cell, kTfLiteActTanh, - cell_gate_scratch, scratch5); - // Update the cell state. - UpdateLstmCellInteger(n_batch, n_cell, cell_state_ptr, cell_state_scale, - input_gate_scratch, forget_gate_scratch, - cell_gate_scratch, use_cifg, quantized_cell_clip); - // Calculate the output gate. - CalculateLstmGateInteger8x8_16( - input_ptr, input_to_output_weight_ptr, input_to_output_effective_bias, - effective_input_to_output_scale_a, effective_input_to_output_scale_b, - output_state_ptr, recurrent_to_output_weight_ptr, - recurrent_to_output_effective_bias, effective_recurrent_to_output_scale_a, - effective_recurrent_to_output_scale_b, cell_state_ptr, - cell_to_output_weight_ptr, effective_cell_to_output_scale_a, - effective_cell_to_output_scale_b, layer_norm_output_weight_ptr, - output_gate_bias_ptr, layer_norm_output_scale_a, - layer_norm_output_scale_b, output_variance_guard, n_batch, n_input, - n_output, n_cell, kTfLiteActSigmoid, output_gate_scratch, scratch5); - // Update the output state. - CalculateLstmOutputInteger8x8_16( - n_batch, n_cell, n_output, cell_state_ptr, cell_state_scale, - output_gate_scratch, effective_hidden_scale_a, effective_hidden_scale_b, - hidden_zp, projection_weight_ptr, effective_proj_scale_a, - effective_proj_scale_b, projection_effective_bias, output_state_zp, - quantized_proj_clip, output_state_ptr, scratch0, scratch4, scratch5); - // Copy output state to the output. Note that unlike float or hybrid, output - // is always contiguous. - std::memcpy(output_ptr, output_state_ptr, - n_batch * n_output * sizeof(int8_t)); -} - -// Fully quantized lstm kernel for 8 bit gate matmul output. -// -// Input tensor of size n_batch * n_input: -// input_ptr -// -// LSTM weights: -// Quantized input weights of size 'n_cell * n_input': -// input_to_input_weight_ptr - optional -// input_to_forget_weight_ptr - optional -// input_to_cell_weight_ptr - optional -// input_to_output_weight_ptr - optional -// -// Quantized recurrent weights of size 'n_cell * n_output': -// recurrent_to_input_weight_ptr - optional -// recurrent_to_forget_weights_ptr -// recurrent_to_cell_weights_ptr -// recurrent_to_input_weights_ptr -// -// Quantized peephole weights of size 'n_cell', representing diagonal matrices. -// cell_to_input_weights - optional -// cell_to_cell_weights - optional -// cell_to_output_weights - optional -// -// Quantized projection weights of size 'n_output * n_cell' -// projection_weight_ptr - optional -// -// Weight scales (scalars) for each of the weights above. -// effective_input_to_input_scale_a - optional -// effective_input_to_input_scale_b - optional -// effective_input_to_forget_scale_a -// effective_input_to_forget_scale_b -// effective_input_to_cell_scale_a -// effective_input_to_cell_scale_b -// effective_input_to_output_scale_a -// effective_input_to_output_scale_b -// effective_recurrent_to_input_scale_a - optional -// effective_recurrent_to_input_scale_b - optional -// effective_recurrent_to_forget_scale_a -// effective_recurrent_to_forget_scale_b -// effective_recurrent_to_cell_scale_a -// effective_recurrent_to_cell_scale_b -// effective_recurrent_to_output_scale_a -// effective_recurrent_to_output_scale_b -// effective_proj_scale_a - optional -// effective_proj_scale_b - optional -// -// Gate biases of size 'n_cell': -// input_gate_bias_ptr - optional -// forget_gate_bias_ptr -// cell_gate_bias_ptr -// output_gate_bias_ptr -// -// Layer norm coefficients of size 'n_cell', representing diagonal matrices. -// layer_norm_input_weight_ptr - optional -// layer_norm_forget_weight_ptr - optional -// layer_norm_cell_weight_ptr - optional -// layer_norm_output_weight_ptr - optional -// -// Layer norm scales of size 'n_cell'. -// layer_norm_input_scale_a - optional -// layer_norm_input_scale_b - optional -// layer_norm_forget_scale_a - optional -// layer_norm_forget_scale_b - optional -// layer_norm_cell_scale_a - optional -// layer_norm_cell_scale_b - optional -// layer_norm_output_scale_a - optional -// layer_norm_output_scale_b - optional -// -// Scalar values: -// quantized_cell_clip: quantized clip value for cell. -// quantized_proj_clip: quantized clip value for projection. -// cell_state_scale: the power of two scale for cell state. -// -// Zero points: -// input_zp: zero point for input tensor. -// output_state_zp: zero point of output state. -// hidden_zp: zero point for hidden state. -// -// Temporary pre-allocated storage for the calculation. Each is of size n_cell * -// n_batch. -// scratch0 -// scratch1 -// scratch2 -// scratch3 -// scratch4 -// scratch5 -// scratch6 -// scratch7 -// -// Outputs: -// output_state_ptr - size 'n_batch * n_output' -// cell_state_ptr - size 'n_batch * n_cell' -// output_ptr - size 'n_batch * n_output' -// -// Can move zero point calculation into Prepare() for better perfomance. -// TODO(b/159947023): scratch5 is unused, remove. -inline void LstmStepInteger8x8_8( - const int8_t* input_ptr, int32_t input_zp, - const int8_t* input_to_input_weight_ptr, - int32_t effective_input_to_input_scale_a, - int32_t effective_input_to_input_scale_b, - const int8_t* input_to_forget_weight_ptr, - int32_t effective_input_to_forget_scale_a, - int32_t effective_input_to_forget_scale_b, - const int8_t* input_to_cell_weight_ptr, - int32_t effective_input_to_cell_scale_a, - int32_t effective_input_to_cell_scale_b, - const int8_t* input_to_output_weight_ptr, - int32_t effective_input_to_output_scale_a, - int32_t effective_input_to_output_scale_b, - const int8_t* recurrent_to_input_weight_ptr, - int32_t effective_recurrent_to_input_scale_a, - int32_t effective_recurrent_to_input_scale_b, - const int8_t* recurrent_to_forget_weight_ptr, - int32_t effective_recurrent_to_forget_scale_a, - int32_t effective_recurrent_to_forget_scale_b, - const int8_t* recurrent_to_cell_weight_ptr, - int32_t effective_recurrent_to_cell_scale_a, - int32_t effective_recurrent_to_cell_scale_b, - const int8_t* recurrent_to_output_weight_ptr, - int32_t effective_recurrent_to_output_scale_a, - int32_t effective_recurrent_to_output_scale_b, - const int8_t* cell_to_input_weight_ptr, - int32_t effective_cell_to_input_scale_a, - int32_t effective_cell_to_input_scale_b, - const int8_t* cell_to_forget_weight_ptr, - int32_t effective_cell_to_forget_scale_a, - int32_t effective_cell_to_forget_scale_b, - const int8_t* cell_to_output_weight_ptr, - int32_t effective_cell_to_output_scale_a, - int32_t effective_cell_to_output_scale_b, - const int8_t* projection_weight_ptr, int32_t effective_proj_scale_a, - int32_t effective_proj_scale_b, const int16_t* layer_norm_input_weight_ptr, - int32_t layer_norm_input_scale_a, int32_t layer_norm_input_scale_b, - const int16_t* layer_norm_forget_weight_ptr, - int32_t layer_norm_forget_scale_a, int32_t layer_norm_forget_scale_b, - const int16_t* layer_norm_cell_weight_ptr, int32_t layer_norm_cell_scale_a, - int32_t layer_norm_cell_scale_b, - const int16_t* layer_norm_output_weight_ptr, - int32_t layer_norm_output_scale_a, int32_t layer_norm_output_scale_b, - const int32_t* input_gate_bias_ptr, const int32_t* forget_gate_bias_ptr, - const int32_t* cell_gate_bias_ptr, const int32_t* output_gate_bias_ptr, - const int32_t* projection_bias_ptr, const TfLiteLSTMParams* params, - const int32_t* intermediate_scale_a, const int32_t* intermediate_scale_b, - const int32_t* intermediate_zp, int16_t quantized_cell_clip, - int8_t quantized_proj_clip, int n_batch, int n_cell, int n_input, - int n_output, int output_batch_leading_dim, int8_t* output_state_ptr, - int32_t output_state_zp, int16_t* cell_state_ptr, int8_t* output_ptr, - int8_t* scratch0, int8_t* scratch1, int16_t* scratch2, int16_t* scratch3, - int16_t* scratch4, int16_t* scratch5, int16_t* scratch6, - int16_t* scratch7) { - // TODO(b/159066113): scratch5 is unused, remove. - - // Make named scratch buffers for the different gates. - int16_t* forget_gate_scratch = scratch2; - int16_t* cell_gate_scratch = scratch3; - int16_t* output_gate_scratch = scratch4; - // no-CIFG is not supported here - - // Calculate the forget gate. - CalculateLstmGateInteger8x8_8( - input_ptr, input_zp, input_to_forget_weight_ptr, - effective_input_to_forget_scale_a, effective_input_to_forget_scale_b, - intermediate_scale_a[2], intermediate_scale_b[2], intermediate_zp[4], - output_state_ptr, output_state_zp, recurrent_to_forget_weight_ptr, - effective_recurrent_to_forget_scale_a, - effective_recurrent_to_forget_scale_b, intermediate_scale_a[3], - intermediate_scale_b[3], intermediate_zp[5], layer_norm_forget_weight_ptr, - layer_norm_forget_scale_a, layer_norm_forget_scale_b, - forget_gate_bias_ptr, n_batch, n_input, n_output, n_cell, - kTfLiteActSigmoid, forget_gate_scratch, scratch0, scratch1); - // Calculate the cell update gate. - CalculateLstmGateInteger8x8_8( - input_ptr, input_zp, input_to_cell_weight_ptr, - effective_input_to_cell_scale_a, effective_input_to_cell_scale_b, - intermediate_scale_a[4], intermediate_scale_b[4], intermediate_zp[7], - output_state_ptr, output_state_zp, recurrent_to_cell_weight_ptr, - effective_recurrent_to_cell_scale_a, effective_recurrent_to_cell_scale_b, - intermediate_scale_a[5], intermediate_scale_b[5], intermediate_zp[8], - layer_norm_cell_weight_ptr, layer_norm_cell_scale_a, - layer_norm_cell_scale_b, cell_gate_bias_ptr, n_batch, n_input, n_output, - n_cell, kTfLiteActTanh, cell_gate_scratch, scratch0, scratch1); - // Update the cell state. - UpdateLstmCellInteger(n_batch, n_cell, cell_state_ptr, - /*cell_state_scale=*/-15, /*input_gate=*/nullptr, - forget_gate_scratch, cell_gate_scratch, - /*use_cifg=*/true, quantized_cell_clip); - // Calculate the output gate. - CalculateLstmGateInteger8x8_8( - input_ptr, input_zp, input_to_output_weight_ptr, - effective_input_to_output_scale_a, effective_input_to_output_scale_b, - intermediate_scale_a[6], intermediate_scale_b[6], intermediate_zp[10], - output_state_ptr, output_state_zp, recurrent_to_output_weight_ptr, - effective_recurrent_to_output_scale_a, - effective_recurrent_to_output_scale_b, intermediate_scale_a[11], - intermediate_scale_b[7], intermediate_zp[7], layer_norm_output_weight_ptr, - layer_norm_output_scale_a, layer_norm_output_scale_b, - output_gate_bias_ptr, n_batch, n_input, n_output, n_cell, - kTfLiteActSigmoid, output_gate_scratch, scratch0, scratch1); - // Update the output state. - CalculateLstmOutputInteger8x8_8( - n_batch, n_cell, n_output, cell_state_ptr, output_gate_scratch, - projection_weight_ptr, effective_proj_scale_a, effective_proj_scale_b, - projection_bias_ptr, output_state_zp, quantized_proj_clip, - output_state_ptr, scratch2); - // Copy output state to the output. Note that unlike float or hybrid, output - // is always contigous. - std::memcpy(output_ptr, output_state_ptr, - n_batch * n_output * sizeof(int8_t)); -} - -} // namespace - -TfLiteStatus EvalFloatLstm( - const TfLiteEvalTensor* input, - const TfLiteEvalTensor* input_to_input_weights, - const TfLiteEvalTensor* input_to_forget_weights, - const TfLiteEvalTensor* input_to_cell_weights, - const TfLiteEvalTensor* input_to_output_weights, - const TfLiteEvalTensor* recurrent_to_input_weights, - const TfLiteEvalTensor* recurrent_to_forget_weights, - const TfLiteEvalTensor* recurrent_to_cell_weights, - const TfLiteEvalTensor* recurrent_to_output_weights, - const TfLiteEvalTensor* cell_to_input_weights, - const TfLiteEvalTensor* cell_to_forget_weights, - const TfLiteEvalTensor* cell_to_output_weights, - const TfLiteEvalTensor* input_layer_norm_coefficients, - const TfLiteEvalTensor* forget_layer_norm_coefficients, - const TfLiteEvalTensor* cell_layer_norm_coefficients, - const TfLiteEvalTensor* output_layer_norm_coefficients, - const TfLiteEvalTensor* aux_input, - const TfLiteEvalTensor* aux_input_to_input_weights, - const TfLiteEvalTensor* aux_input_to_forget_weights, - const TfLiteEvalTensor* aux_input_to_cell_weights, - const TfLiteEvalTensor* aux_input_to_output_weights, - const TfLiteEvalTensor* input_gate_bias, - const TfLiteEvalTensor* forget_gate_bias, - const TfLiteEvalTensor* cell_gate_bias, - const TfLiteEvalTensor* output_gate_bias, - const TfLiteEvalTensor* projection_weights, - const TfLiteEvalTensor* projection_bias, const TfLiteLSTMParams* params, - bool forward_sequence, bool time_major, int output_offset, - float* scratch_buffer, TfLiteEvalTensor* output_state, - TfLiteEvalTensor* cell_state, TfLiteEvalTensor* output) { - TFLITE_DCHECK(input->dims->size >= 2 && input->dims->size <= 3); - int max_time, n_batch; - if (input->dims->size == 3) { - max_time = (time_major) ? input->dims->data[0] : input->dims->data[1]; - n_batch = (time_major) ? input->dims->data[1] : input->dims->data[0]; - } else { - max_time = 1; - n_batch = input->dims->data[0]; - } - const int n_input = input->dims->data[input->dims->size - 1]; - const int aux_input_size = - (aux_input) ? aux_input->dims->data[aux_input->dims->size - 1] : 0; - - // n_cell and n_output will be the same size when there is no projection. - const int n_cell = input_to_output_weights->dims->data[0]; - const int n_output = recurrent_to_output_weights->dims->data[1]; - - // Since we have already checked that weights are all there or none, we can - // check the existence of only one to the get the condition. - const bool use_cifg = (input_to_input_weights == nullptr); - - // Index the scratch buffers pointers to the global scratch buffer. - float* input_gate_scratch = nullptr; - float* cell_gate_scratch = nullptr; - float* forget_gate_scratch = nullptr; - float* output_gate_scratch = nullptr; - if (use_cifg) { - cell_gate_scratch = scratch_buffer; - forget_gate_scratch = scratch_buffer + n_cell * n_batch; - output_gate_scratch = scratch_buffer + 2 * n_cell * n_batch; - } else { - input_gate_scratch = scratch_buffer; - cell_gate_scratch = scratch_buffer + n_cell * n_batch; - forget_gate_scratch = scratch_buffer + 2 * n_cell * n_batch; - output_gate_scratch = scratch_buffer + 3 * n_cell * n_batch; - } - - const int output_batch_leading_dim = - output->dims->data[output->dims->size - 1]; - if (time_major) { - // Loop through the sequence. - const int input_step = n_batch * n_input; - const int output_step = n_batch * output_batch_leading_dim; - for (int t = 0; t < max_time; t++) { - // If this is the forward_sequence, step forward, otherwise step - // backwards. - const int t_rel = forward_sequence ? t : max_time - t - 1; - const float* input_ptr = - tflite::micro::GetTensorData(input) + t_rel * input_step; - const float* aux_input_ptr = nullptr; - if (aux_input) { - aux_input_ptr = - tflite::micro::GetTensorData(aux_input) + t_rel * input_step; - } - float* output_ptr = tflite::micro::GetTensorData(output) + - t_rel * output_step + output_offset; - - LstmStepFloat( - input_ptr, - input_to_input_weights == nullptr - ? nullptr - : tflite::micro::GetTensorData(input_to_input_weights), - input_to_forget_weights == nullptr - ? nullptr - : tflite::micro::GetTensorData(input_to_forget_weights), - input_to_cell_weights == nullptr - ? nullptr - : tflite::micro::GetTensorData(input_to_cell_weights), - input_to_output_weights == nullptr - ? nullptr - : tflite::micro::GetTensorData(input_to_output_weights), - aux_input_ptr, - aux_input_to_input_weights == nullptr - ? nullptr - : tflite::micro::GetTensorData(aux_input_to_input_weights), - aux_input_to_forget_weights == nullptr - ? nullptr - : tflite::micro::GetTensorData( - aux_input_to_forget_weights), - aux_input_to_cell_weights == nullptr - ? nullptr - : tflite::micro::GetTensorData(aux_input_to_cell_weights), - aux_input_to_output_weights == nullptr - ? nullptr - : tflite::micro::GetTensorData( - aux_input_to_output_weights), - recurrent_to_input_weights == nullptr - ? nullptr - : tflite::micro::GetTensorData(recurrent_to_input_weights), - recurrent_to_forget_weights == nullptr - ? nullptr - : tflite::micro::GetTensorData( - recurrent_to_forget_weights), - recurrent_to_cell_weights == nullptr - ? nullptr - : tflite::micro::GetTensorData(recurrent_to_cell_weights), - recurrent_to_output_weights == nullptr - ? nullptr - : tflite::micro::GetTensorData( - recurrent_to_output_weights), - cell_to_input_weights == nullptr - ? nullptr - : tflite::micro::GetTensorData(cell_to_input_weights), - cell_to_forget_weights == nullptr - ? nullptr - : tflite::micro::GetTensorData(cell_to_forget_weights), - cell_to_output_weights == nullptr - ? nullptr - : tflite::micro::GetTensorData(cell_to_output_weights), - input_layer_norm_coefficients == nullptr - ? nullptr - : tflite::micro::GetTensorData( - input_layer_norm_coefficients), - forget_layer_norm_coefficients == nullptr - ? nullptr - : tflite::micro::GetTensorData( - forget_layer_norm_coefficients), - cell_layer_norm_coefficients == nullptr - ? nullptr - : tflite::micro::GetTensorData( - cell_layer_norm_coefficients), - output_layer_norm_coefficients == nullptr - ? nullptr - : tflite::micro::GetTensorData( - output_layer_norm_coefficients), - input_gate_bias == nullptr - ? nullptr - : tflite::micro::GetTensorData(input_gate_bias), - forget_gate_bias == nullptr - ? nullptr - : tflite::micro::GetTensorData(forget_gate_bias), - cell_gate_bias == nullptr - ? nullptr - : tflite::micro::GetTensorData(cell_gate_bias), - output_gate_bias == nullptr - ? nullptr - : tflite::micro::GetTensorData(output_gate_bias), - projection_weights == nullptr - ? nullptr - : tflite::micro::GetTensorData(projection_weights), - projection_bias == nullptr - ? nullptr - : tflite::micro::GetTensorData(projection_bias), - params, n_batch, n_cell, n_input, aux_input_size, n_output, - output_batch_leading_dim, - tflite::micro::GetTensorData(output_state), - tflite::micro::GetTensorData(cell_state), input_gate_scratch, - forget_gate_scratch, cell_gate_scratch, output_gate_scratch, - output_ptr); - } - } else { - for (int b = 0; b < n_batch; b++) { - const int input_step = n_input; - const int output_step = output_batch_leading_dim; - for (int t = 0; t < max_time; t++) { - // If this is the forward_sequence, step forward, otherwise step - // backwards. - const int t_rel = forward_sequence ? t : max_time - t - 1; - const int time_offset = b * max_time + t_rel; - const float* input_ptr = tflite::micro::GetTensorData(input) + - time_offset * input_step; - const float* aux_input_ptr = nullptr; - if (aux_input) { - aux_input_ptr = tflite::micro::GetTensorData(aux_input) + - time_offset * input_step; - } - float* output_ptr = tflite::micro::GetTensorData(output) + - time_offset * output_step + output_offset; - - // Offset the {output,cell}_state pointers to the right batch. - float* output_state_ptr = - tflite::micro::GetTensorData(output_state) + - b * output_batch_leading_dim; - float* cell_state_ptr = - tflite::micro::GetTensorData(cell_state) + b * n_cell; - // Offset the scratch pointers to the right batch. - float* input_gate_scratch_ptr = - input_gate_scratch ? input_gate_scratch + b * n_cell : nullptr; - float* forget_gate_scratch_ptr = forget_gate_scratch + b * n_cell; - float* cell_gate_scratch_ptr = cell_gate_scratch + b * n_cell; - float* output_gate_scratch_ptr = output_gate_scratch + b * n_cell; - - LstmStepFloat( - input_ptr, - input_to_input_weights == nullptr - ? nullptr - : tflite::micro::GetTensorData(input_to_input_weights), - input_to_forget_weights == nullptr - ? nullptr - : tflite::micro::GetTensorData(input_to_forget_weights), - input_to_cell_weights == nullptr - ? nullptr - : tflite::micro::GetTensorData(input_to_cell_weights), - input_to_output_weights == nullptr - ? nullptr - : tflite::micro::GetTensorData(input_to_output_weights), - aux_input_ptr, - aux_input_to_input_weights == nullptr - ? nullptr - : tflite::micro::GetTensorData( - aux_input_to_input_weights), - aux_input_to_forget_weights == nullptr - ? nullptr - : tflite::micro::GetTensorData( - aux_input_to_forget_weights), - aux_input_to_cell_weights == nullptr - ? nullptr - : tflite::micro::GetTensorData( - aux_input_to_cell_weights), - aux_input_to_output_weights == nullptr - ? nullptr - : tflite::micro::GetTensorData( - aux_input_to_output_weights), - recurrent_to_input_weights == nullptr - ? nullptr - : tflite::micro::GetTensorData( - recurrent_to_input_weights), - recurrent_to_forget_weights == nullptr - ? nullptr - : tflite::micro::GetTensorData( - recurrent_to_forget_weights), - recurrent_to_cell_weights == nullptr - ? nullptr - : tflite::micro::GetTensorData( - recurrent_to_cell_weights), - recurrent_to_output_weights == nullptr - ? nullptr - : tflite::micro::GetTensorData( - recurrent_to_output_weights), - cell_to_input_weights == nullptr - ? nullptr - : tflite::micro::GetTensorData(cell_to_input_weights), - cell_to_forget_weights == nullptr - ? nullptr - : tflite::micro::GetTensorData(cell_to_forget_weights), - cell_to_output_weights == nullptr - ? nullptr - : tflite::micro::GetTensorData(cell_to_output_weights), - input_layer_norm_coefficients == nullptr - ? nullptr - : tflite::micro::GetTensorData( - input_layer_norm_coefficients), - forget_layer_norm_coefficients == nullptr - ? nullptr - : tflite::micro::GetTensorData( - forget_layer_norm_coefficients), - cell_layer_norm_coefficients == nullptr - ? nullptr - : tflite::micro::GetTensorData( - cell_layer_norm_coefficients), - output_layer_norm_coefficients == nullptr - ? nullptr - : tflite::micro::GetTensorData( - output_layer_norm_coefficients), - input_gate_bias == nullptr - ? nullptr - : tflite::micro::GetTensorData(input_gate_bias), - forget_gate_bias == nullptr - ? nullptr - : tflite::micro::GetTensorData(forget_gate_bias), - cell_gate_bias == nullptr - ? nullptr - : tflite::micro::GetTensorData(cell_gate_bias), - output_gate_bias == nullptr - ? nullptr - : tflite::micro::GetTensorData(output_gate_bias), - projection_weights == nullptr - ? nullptr - : tflite::micro::GetTensorData(projection_weights), - projection_bias == nullptr - ? nullptr - : tflite::micro::GetTensorData(projection_bias), - params, - /*n_batch=*/1, n_cell, n_input, aux_input_size, n_output, - output_batch_leading_dim, output_state_ptr, cell_state_ptr, - input_gate_scratch_ptr, forget_gate_scratch_ptr, - cell_gate_scratch_ptr, output_gate_scratch_ptr, output_ptr); - } - } - } - return kTfLiteOk; -} - -TfLiteStatus EvalHybridLstm( - const HybridLstmScales* hybrid_lstm_scales, const TfLiteEvalTensor* input, - const TfLiteEvalTensor* input_to_input_weights, - const TfLiteEvalTensor* input_to_input_weights_ledger, - const TfLiteEvalTensor* input_to_forget_weights, - const TfLiteEvalTensor* input_to_forget_weights_ledger, - const TfLiteEvalTensor* input_to_cell_weights, - const TfLiteEvalTensor* input_to_cell_weights_ledger, - const TfLiteEvalTensor* input_to_output_weights, - const TfLiteEvalTensor* input_to_output_weights_ledger, - const TfLiteEvalTensor* recurrent_to_input_weights, - const TfLiteEvalTensor* recurrent_to_input_weights_ledger, - const TfLiteEvalTensor* recurrent_to_forget_weights, - const TfLiteEvalTensor* recurrent_to_forget_weights_ledger, - const TfLiteEvalTensor* recurrent_to_cell_weights, - const TfLiteEvalTensor* recurrent_to_cell_weights_ledger, - const TfLiteEvalTensor* recurrent_to_output_weights, - const TfLiteEvalTensor* recurrent_to_output_weights_ledger, - const TfLiteEvalTensor* cell_to_input_weights, - const TfLiteEvalTensor* cell_to_forget_weights, - const TfLiteEvalTensor* cell_to_output_weights, - const TfLiteEvalTensor* input_layer_norm_coefficients, - const TfLiteEvalTensor* forget_layer_norm_coefficients, - const TfLiteEvalTensor* cell_layer_norm_coefficients, - const TfLiteEvalTensor* output_layer_norm_coefficients, - const TfLiteEvalTensor* aux_input, - const TfLiteEvalTensor* aux_input_to_input_weights, - const TfLiteEvalTensor* aux_input_to_forget_weights, - const TfLiteEvalTensor* aux_input_to_cell_weights, - const TfLiteEvalTensor* aux_input_to_output_weights, - const TfLiteEvalTensor* input_gate_bias, - const TfLiteEvalTensor* forget_gate_bias, - const TfLiteEvalTensor* cell_gate_bias, - const TfLiteEvalTensor* output_gate_bias, - const TfLiteEvalTensor* projection_weights, - const TfLiteEvalTensor* projection_weights_ledger, - const TfLiteEvalTensor* projection_bias, const TfLiteLSTMParams* params, - bool forward_sequence, bool time_major, int output_offset, - float* scratch_buffer, float* input_sf, float* aux_input_sf, - float* output_state_sf, float* prod_scaling_factors, - float* recovered_cell_weights, int8_t* input_quantized, - int8_t* aux_input_quantized, int8_t* output_state_quantized, - int8_t* cell_state_quantized, float* scales, TfLiteEvalTensor* output_state, - TfLiteEvalTensor* cell_state, int32_t* output_scratch_buffer, - TfLiteEvalTensor* output, int32_t* input_zp, int32_t* aux_input_zp, - int32_t* output_state_zp, int32_t* row_sums, int row_sums_size, - bool* compute_row_sums) { - TFLITE_DCHECK(input->dims->size >= 2 && input->dims->size <= 3); - const int n_input = input->dims->data[input->dims->size - 1]; - int max_time, n_batch; - if (input->dims->size == 2) { - max_time = 1; - n_batch = input->dims->data[0]; - } else { - max_time = (time_major) ? input->dims->data[0] : input->dims->data[1]; - n_batch = (time_major) ? input->dims->data[1] : input->dims->data[0]; - } - const int aux_input_size = - (aux_input) ? aux_input->dims->data[aux_input->dims->size - 1] : 0; - // n_cell and n_output will be the same size when there is no projection. - const int n_cell = input_to_output_weights->dims->data[0]; - const int n_output = recurrent_to_output_weights->dims->data[1]; - - // Since we have already checked that weights are all there or none, we can - // check the existence of only one to get the condition. - const bool use_cifg = (input_to_input_weights == nullptr); - - float* input_gate_scratch = nullptr; - float* cell_gate_scratch = nullptr; - float* forget_gate_scratch = nullptr; - float* output_gate_scratch = nullptr; - if (use_cifg) { - cell_gate_scratch = scratch_buffer; - forget_gate_scratch = scratch_buffer + n_cell * n_batch; - output_gate_scratch = scratch_buffer + 2 * n_cell * n_batch; - } else { - input_gate_scratch = scratch_buffer; - cell_gate_scratch = scratch_buffer + n_cell * n_batch; - forget_gate_scratch = scratch_buffer + 2 * n_cell * n_batch; - output_gate_scratch = scratch_buffer + 3 * n_cell * n_batch; - } - - const int output_batch_leading_dim = - output->dims->data[output->dims->size - 1]; - - int32_t* input_zp_ptr = nullptr; - int32_t* aux_input_zp_ptr = nullptr; - int32_t* output_state_zp_ptr = nullptr; - int32_t* row_sums_ptr = nullptr; - if (params->asymmetric_quantize_inputs) { - input_zp_ptr = input_zp; - aux_input_zp_ptr = aux_input_zp; - output_state_zp_ptr = output_state_zp; - row_sums_ptr = row_sums; - } - - if (time_major) { - // Feed the sequence into the LSTM step-by-step. - const int input_step = n_batch * n_input; - const int output_step = n_batch * output_batch_leading_dim; - for (int t = 0; t < max_time; t++) { - // If this is the forward_sequence, step forward, otherwise step - // backwards. - const int t_rel = forward_sequence ? t : max_time - t - 1; - const float* input_ptr = - tflite::micro::GetTensorData(input) + t_rel * input_step; - const float* aux_input_ptr = nullptr; - if (aux_input) { - aux_input_ptr = - tflite::micro::GetTensorData(aux_input) + t_rel * input_step; - } - float* output_ptr = tflite::micro::GetTensorData(output) + - t_rel * output_step + output_offset; - LstmStepHybrid( - input_ptr, - input_to_input_weights == nullptr - ? nullptr - : tflite::micro::GetTensorData(input_to_input_weights), - input_to_input_weights_ledger == nullptr - ? nullptr - : tflite::micro::GetTensorData( - input_to_input_weights_ledger), - hybrid_lstm_scales->input_to_input_weights_scale, - input_to_forget_weights == nullptr - ? nullptr - : tflite::micro::GetTensorData(input_to_forget_weights), - input_to_forget_weights_ledger == nullptr - ? nullptr - : tflite::micro::GetTensorData( - input_to_forget_weights_ledger), - hybrid_lstm_scales->input_to_forget_weights_scale, - input_to_cell_weights == nullptr - ? nullptr - : tflite::micro::GetTensorData(input_to_cell_weights), - input_to_cell_weights_ledger == nullptr - ? nullptr - : tflite::micro::GetTensorData( - input_to_cell_weights_ledger), - hybrid_lstm_scales->input_to_cell_weights_scale, - input_to_output_weights == nullptr - ? nullptr - : tflite::micro::GetTensorData(input_to_output_weights), - input_to_output_weights_ledger == nullptr - ? nullptr - : tflite::micro::GetTensorData( - input_to_output_weights_ledger), - hybrid_lstm_scales->input_to_output_weights_scale, aux_input_ptr, - aux_input_to_input_weights == nullptr - ? nullptr - : tflite::micro::GetTensorData( - aux_input_to_input_weights), - hybrid_lstm_scales->aux_input_to_input_weights_scale, - aux_input_to_forget_weights == nullptr - ? nullptr - : tflite::micro::GetTensorData( - aux_input_to_forget_weights), - hybrid_lstm_scales->aux_input_to_forget_weights_scale, - aux_input_to_cell_weights == nullptr - ? nullptr - : tflite::micro::GetTensorData(aux_input_to_cell_weights), - hybrid_lstm_scales->aux_input_to_cell_weights_scale, - aux_input_to_output_weights == nullptr - ? nullptr - : tflite::micro::GetTensorData( - aux_input_to_output_weights), - hybrid_lstm_scales->aux_input_to_output_weights_scale, - recurrent_to_input_weights == nullptr - ? nullptr - : tflite::micro::GetTensorData( - recurrent_to_input_weights), - recurrent_to_input_weights_ledger == nullptr - ? nullptr - : tflite::micro::GetTensorData( - recurrent_to_input_weights_ledger), - hybrid_lstm_scales->recurrent_to_input_weights_scale, - recurrent_to_forget_weights == nullptr - ? nullptr - : tflite::micro::GetTensorData( - recurrent_to_forget_weights), - recurrent_to_forget_weights_ledger == nullptr - ? nullptr - : tflite::micro::GetTensorData( - recurrent_to_forget_weights_ledger), - hybrid_lstm_scales->recurrent_to_forget_weights_scale, - recurrent_to_cell_weights == nullptr - ? nullptr - : tflite::micro::GetTensorData(recurrent_to_cell_weights), - recurrent_to_cell_weights_ledger == nullptr - ? nullptr - : tflite::micro::GetTensorData( - recurrent_to_cell_weights_ledger), - hybrid_lstm_scales->recurrent_to_cell_weights_scale, - recurrent_to_output_weights == nullptr - ? nullptr - : tflite::micro::GetTensorData( - recurrent_to_output_weights), - recurrent_to_output_weights_ledger == nullptr - ? nullptr - : tflite::micro::GetTensorData( - recurrent_to_output_weights_ledger), - hybrid_lstm_scales->recurrent_to_output_weights_scale, - cell_to_input_weights == nullptr - ? nullptr - : tflite::micro::GetTensorData(cell_to_input_weights), - hybrid_lstm_scales->cell_to_input_weights_scale, - cell_to_forget_weights == nullptr - ? nullptr - : tflite::micro::GetTensorData(cell_to_forget_weights), - hybrid_lstm_scales->cell_to_forget_weights_scale, - cell_to_output_weights == nullptr - ? nullptr - : tflite::micro::GetTensorData(cell_to_output_weights), - hybrid_lstm_scales->cell_to_output_weights_scale, - input_layer_norm_coefficients == nullptr - ? nullptr - : tflite::micro::GetTensorData( - input_layer_norm_coefficients), - forget_layer_norm_coefficients == nullptr - ? nullptr - : tflite::micro::GetTensorData( - forget_layer_norm_coefficients), - cell_layer_norm_coefficients == nullptr - ? nullptr - : tflite::micro::GetTensorData( - cell_layer_norm_coefficients), - output_layer_norm_coefficients == nullptr - ? nullptr - : tflite::micro::GetTensorData( - output_layer_norm_coefficients), - input_gate_bias == nullptr - ? nullptr - : tflite::micro::GetTensorData(input_gate_bias), - forget_gate_bias == nullptr - ? nullptr - : tflite::micro::GetTensorData(forget_gate_bias), - cell_gate_bias == nullptr - ? nullptr - : tflite::micro::GetTensorData(cell_gate_bias), - output_gate_bias == nullptr - ? nullptr - : tflite::micro::GetTensorData(output_gate_bias), - projection_weights == nullptr - ? nullptr - : tflite::micro::GetTensorData(projection_weights), - projection_weights_ledger == nullptr - ? nullptr - : tflite::micro::GetTensorData( - projection_weights_ledger), - hybrid_lstm_scales->projection_weights_scale, - projection_bias == nullptr - ? nullptr - : tflite::micro::GetTensorData(projection_bias), - params, n_batch, n_cell, n_input, aux_input_size, n_output, - output_batch_leading_dim, input_gate_scratch, forget_gate_scratch, - cell_gate_scratch, output_gate_scratch, scales, input_sf, - aux_input_sf, output_state_sf, prod_scaling_factors, - recovered_cell_weights, input_quantized, aux_input_quantized, - output_state_quantized, cell_state_quantized, - tflite::micro::GetTensorData(output_state), - tflite::micro::GetTensorData(cell_state), - output_scratch_buffer, output_ptr, input_zp_ptr, aux_input_zp_ptr, - output_state_zp_ptr, row_sums_ptr, row_sums_size, compute_row_sums, - params->asymmetric_quantize_inputs); - } - } else { - for (int b = 0; b < n_batch; b++) { - const int input_step = n_input; - const int output_step = output_batch_leading_dim; - for (int t = 0; t < max_time; t++) { - // If this is the forward_sequence, step forward, otherwise step - // backwards. - const int t_rel = forward_sequence ? t : max_time - t - 1; - const int time_offset = b * max_time + t_rel; - const float* input_ptr = tflite::micro::GetTensorData(input) + - time_offset * input_step; - const float* aux_input_ptr = nullptr; - if (aux_input) { - aux_input_ptr = tflite::micro::GetTensorData(aux_input) + - time_offset * input_step; - } - float* output_ptr = tflite::micro::GetTensorData(output) + - time_offset * output_step + output_offset; - - // Offset the {output,cell}_state pointers to the right batch. - float* output_state_ptr = - tflite::micro::GetTensorData(output_state) + - b * output_batch_leading_dim; - float* cell_state_ptr = - tflite::micro::GetTensorData(cell_state) + b * n_cell; - // Offset the scratch pointers to the right batch. - float* input_gate_scratch_ptr = - input_gate_scratch ? input_gate_scratch + b * n_cell : nullptr; - float* forget_gate_scratch_ptr = forget_gate_scratch + b * n_cell; - float* cell_gate_scratch_ptr = cell_gate_scratch + b * n_cell; - float* output_gate_scratch_ptr = output_gate_scratch + b * n_cell; - - LstmStepHybrid( - input_ptr, - input_to_input_weights == nullptr - ? nullptr - : tflite::micro::GetTensorData(input_to_input_weights), - input_to_input_weights_ledger == nullptr - ? nullptr - : tflite::micro::GetTensorData( - input_to_input_weights_ledger), - hybrid_lstm_scales->input_to_input_weights_scale, - input_to_forget_weights == nullptr - ? nullptr - : tflite::micro::GetTensorData(input_to_forget_weights), - input_to_forget_weights_ledger == nullptr - ? nullptr - : tflite::micro::GetTensorData( - input_to_forget_weights_ledger), - hybrid_lstm_scales->input_to_forget_weights_scale, - input_to_cell_weights == nullptr - ? nullptr - : tflite::micro::GetTensorData(input_to_cell_weights), - input_to_cell_weights_ledger == nullptr - ? nullptr - : tflite::micro::GetTensorData( - input_to_cell_weights_ledger), - hybrid_lstm_scales->input_to_cell_weights_scale, - input_to_output_weights == nullptr - ? nullptr - : tflite::micro::GetTensorData(input_to_output_weights), - input_to_output_weights_ledger == nullptr - ? nullptr - : tflite::micro::GetTensorData( - input_to_output_weights_ledger), - hybrid_lstm_scales->input_to_output_weights_scale, aux_input_ptr, - aux_input_to_input_weights == nullptr - ? nullptr - : tflite::micro::GetTensorData( - aux_input_to_input_weights), - hybrid_lstm_scales->aux_input_to_input_weights_scale, - aux_input_to_forget_weights == nullptr - ? nullptr - : tflite::micro::GetTensorData( - aux_input_to_forget_weights), - hybrid_lstm_scales->aux_input_to_forget_weights_scale, - aux_input_to_cell_weights == nullptr - ? nullptr - : tflite::micro::GetTensorData( - aux_input_to_cell_weights), - hybrid_lstm_scales->aux_input_to_cell_weights_scale, - aux_input_to_output_weights == nullptr - ? nullptr - : tflite::micro::GetTensorData( - aux_input_to_output_weights), - hybrid_lstm_scales->aux_input_to_output_weights_scale, - recurrent_to_input_weights == nullptr - ? nullptr - : tflite::micro::GetTensorData( - recurrent_to_input_weights), - recurrent_to_input_weights_ledger == nullptr - ? nullptr - : tflite::micro::GetTensorData( - recurrent_to_input_weights_ledger), - hybrid_lstm_scales->recurrent_to_input_weights_scale, - recurrent_to_forget_weights == nullptr - ? nullptr - : tflite::micro::GetTensorData( - recurrent_to_forget_weights), - recurrent_to_forget_weights_ledger == nullptr - ? nullptr - : tflite::micro::GetTensorData( - recurrent_to_forget_weights_ledger), - hybrid_lstm_scales->recurrent_to_forget_weights_scale, - recurrent_to_cell_weights == nullptr - ? nullptr - : tflite::micro::GetTensorData( - recurrent_to_cell_weights), - recurrent_to_cell_weights_ledger == nullptr - ? nullptr - : tflite::micro::GetTensorData( - recurrent_to_cell_weights_ledger), - hybrid_lstm_scales->recurrent_to_cell_weights_scale, - recurrent_to_output_weights == nullptr - ? nullptr - : tflite::micro::GetTensorData( - recurrent_to_output_weights), - recurrent_to_output_weights_ledger == nullptr - ? nullptr - : tflite::micro::GetTensorData( - recurrent_to_output_weights_ledger), - hybrid_lstm_scales->recurrent_to_output_weights_scale, - cell_to_input_weights == nullptr - ? nullptr - : tflite::micro::GetTensorData(cell_to_input_weights), - hybrid_lstm_scales->cell_to_input_weights_scale, - cell_to_forget_weights == nullptr - ? nullptr - : tflite::micro::GetTensorData(cell_to_forget_weights), - hybrid_lstm_scales->cell_to_forget_weights_scale, - cell_to_output_weights == nullptr - ? nullptr - : tflite::micro::GetTensorData(cell_to_output_weights), - hybrid_lstm_scales->cell_to_output_weights_scale, - input_layer_norm_coefficients == nullptr - ? nullptr - : tflite::micro::GetTensorData( - input_layer_norm_coefficients), - forget_layer_norm_coefficients == nullptr - ? nullptr - : tflite::micro::GetTensorData( - forget_layer_norm_coefficients), - cell_layer_norm_coefficients == nullptr - ? nullptr - : tflite::micro::GetTensorData( - cell_layer_norm_coefficients), - output_layer_norm_coefficients == nullptr - ? nullptr - : tflite::micro::GetTensorData( - output_layer_norm_coefficients), - input_gate_bias == nullptr - ? nullptr - : tflite::micro::GetTensorData(input_gate_bias), - forget_gate_bias == nullptr - ? nullptr - : tflite::micro::GetTensorData(forget_gate_bias), - cell_gate_bias == nullptr - ? nullptr - : tflite::micro::GetTensorData(cell_gate_bias), - output_gate_bias == nullptr - ? nullptr - : tflite::micro::GetTensorData(output_gate_bias), - projection_weights == nullptr - ? nullptr - : tflite::micro::GetTensorData(projection_weights), - projection_weights_ledger == nullptr - ? nullptr - : tflite::micro::GetTensorData( - projection_weights_ledger), - hybrid_lstm_scales->projection_weights_scale, - projection_bias == nullptr - ? nullptr - : tflite::micro::GetTensorData(projection_bias), - params, - /*n_batch=*/1, n_cell, n_input, aux_input_size, n_output, - output_batch_leading_dim, input_gate_scratch_ptr, - forget_gate_scratch_ptr, cell_gate_scratch_ptr, - output_gate_scratch_ptr, scales, input_sf, aux_input_sf, - output_state_sf, prod_scaling_factors, recovered_cell_weights, - input_quantized, aux_input_quantized, output_state_quantized, - cell_state_quantized, output_state_ptr, cell_state_ptr, - output_scratch_buffer, output_ptr, input_zp_ptr, aux_input_zp_ptr, - output_state_zp_ptr, row_sums_ptr, row_sums_size, compute_row_sums, - params->asymmetric_quantize_inputs); - } - } - } - - return kTfLiteOk; -} - -TfLiteStatus EvalInteger8x8_16Lstm( - const TfLiteEvalTensor* input, - const TfLiteEvalTensor* input_to_input_weights, - const TfLiteEvalTensor* input_to_forget_weights, - const TfLiteEvalTensor* input_to_cell_weights, - const TfLiteEvalTensor* input_to_output_weights, - const TfLiteEvalTensor* recurrent_to_input_weights, - const TfLiteEvalTensor* recurrent_to_forget_weights, - const TfLiteEvalTensor* recurrent_to_cell_weights, - const TfLiteEvalTensor* recurrent_to_output_weights, - const TfLiteEvalTensor* cell_to_input_weights, - const TfLiteEvalTensor* cell_to_forget_weights, - const TfLiteEvalTensor* cell_to_output_weights, - const TfLiteEvalTensor* input_layer_norm_coefficients, - const TfLiteEvalTensor* forget_layer_norm_coefficients, - const TfLiteEvalTensor* cell_layer_norm_coefficients, - const TfLiteEvalTensor* output_layer_norm_coefficients, - const TfLiteEvalTensor* input_gate_bias, - const TfLiteEvalTensor* forget_gate_bias, - const TfLiteEvalTensor* cell_gate_bias, - const TfLiteEvalTensor* output_gate_bias, - const TfLiteEvalTensor* projection_weights, - const TfLiteEvalTensor* projection_bias, const TfLiteLSTMParams* params, - bool forward_sequence, bool time_major, - const IntegerLstmParameter* integer_lstm_param, int32_t output_state_zp, - TfLiteEvalTensor* output_state, TfLiteEvalTensor* cell_state, - TfLiteEvalTensor* output, int16_t* scratch0, int16_t* scratch1, - int16_t* scratch2, int16_t* scratch3, int8_t* scratch4, int32_t* scratch5) { - TFLITE_DCHECK(input->dims->size >= 2 && input->dims->size <= 3); - const int n_input = input->dims->data[input->dims->size - 1]; - int max_time, n_batch; - if (input->dims->size == 2) { - max_time = 1; - n_batch = input->dims->data[0]; - } else { - max_time = (time_major) ? input->dims->data[0] : input->dims->data[1]; - n_batch = (time_major) ? input->dims->data[1] : input->dims->data[0]; - } - - // n_cell and n_output will be the same size when there is no projection. - const int n_cell = input_to_output_weights->dims->data[0]; - const int n_output = recurrent_to_output_weights->dims->data[1]; - - // Get params for time/batch/sequence. - const int output_batch_leading_dim = - output->dims->data[output->dims->size - 1]; - - if (time_major) { - const int input_step = n_batch * n_input; - const int output_step = n_batch * output_batch_leading_dim; - for (int t = 0; t < max_time; t++) { - const int t_rel = t; - int8_t* output_ptr = - tflite::micro::GetTensorData(output) + t_rel * output_step; - const int8_t* input_ptr = - tflite::micro::GetTensorData(input) + t_rel * input_step; - LstmStepInteger8x8_16( - input_ptr, - input_to_input_weights == nullptr - ? nullptr - : tflite::micro::GetTensorData(input_to_input_weights), - integer_lstm_param->effective_input_to_input_scale_a, - integer_lstm_param->effective_input_to_input_scale_b, - input_to_forget_weights == nullptr - ? nullptr - : tflite::micro::GetTensorData(input_to_forget_weights), - integer_lstm_param->effective_input_to_forget_scale_a, - integer_lstm_param->effective_input_to_forget_scale_b, - input_to_cell_weights == nullptr - ? nullptr - : tflite::micro::GetTensorData(input_to_cell_weights), - integer_lstm_param->effective_input_to_cell_scale_a, - integer_lstm_param->effective_input_to_cell_scale_b, - input_to_output_weights == nullptr - ? nullptr - : tflite::micro::GetTensorData(input_to_output_weights), - integer_lstm_param->effective_input_to_output_scale_a, - integer_lstm_param->effective_input_to_output_scale_b, - recurrent_to_input_weights == nullptr - ? nullptr - : tflite::micro::GetTensorData( - recurrent_to_input_weights), - integer_lstm_param->effective_recurrent_to_input_scale_a, - integer_lstm_param->effective_recurrent_to_input_scale_b, - recurrent_to_forget_weights == nullptr - ? nullptr - : tflite::micro::GetTensorData( - recurrent_to_forget_weights), - integer_lstm_param->effective_recurrent_to_forget_scale_a, - integer_lstm_param->effective_recurrent_to_forget_scale_b, - recurrent_to_cell_weights == nullptr - ? nullptr - : tflite::micro::GetTensorData(recurrent_to_cell_weights), - integer_lstm_param->effective_recurrent_to_cell_scale_a, - integer_lstm_param->effective_recurrent_to_cell_scale_b, - recurrent_to_output_weights == nullptr - ? nullptr - : tflite::micro::GetTensorData( - recurrent_to_output_weights), - integer_lstm_param->effective_recurrent_to_output_scale_a, - integer_lstm_param->effective_recurrent_to_output_scale_b, - cell_to_input_weights == nullptr - ? nullptr - : tflite::micro::GetTensorData(cell_to_input_weights), - integer_lstm_param->effective_cell_to_input_scale_a, - integer_lstm_param->effective_cell_to_input_scale_b, - cell_to_forget_weights == nullptr - ? nullptr - : tflite::micro::GetTensorData(cell_to_forget_weights), - integer_lstm_param->effective_cell_to_forget_scale_a, - integer_lstm_param->effective_cell_to_forget_scale_b, - cell_to_output_weights == nullptr - ? nullptr - : tflite::micro::GetTensorData(cell_to_output_weights), - integer_lstm_param->effective_cell_to_output_scale_a, - integer_lstm_param->effective_cell_to_output_scale_b, - projection_weights == nullptr - ? nullptr - : tflite::micro::GetTensorData(projection_weights), - integer_lstm_param->effective_proj_scale_a, - integer_lstm_param->effective_proj_scale_b, - integer_lstm_param->hidden_zp, - integer_lstm_param->effective_hidden_scale_a, - integer_lstm_param->effective_hidden_scale_b, - input_layer_norm_coefficients == nullptr - ? nullptr - : tflite::micro::GetTensorData( - input_layer_norm_coefficients), - integer_lstm_param->layer_norm_input_scale_a, - integer_lstm_param->layer_norm_input_scale_b, - forget_layer_norm_coefficients == nullptr - ? nullptr - : tflite::micro::GetTensorData( - forget_layer_norm_coefficients), - integer_lstm_param->layer_norm_forget_scale_a, - integer_lstm_param->layer_norm_forget_scale_b, - cell_layer_norm_coefficients == nullptr - ? nullptr - : tflite::micro::GetTensorData( - cell_layer_norm_coefficients), - integer_lstm_param->layer_norm_cell_scale_a, - integer_lstm_param->layer_norm_cell_scale_b, - output_layer_norm_coefficients == nullptr - ? nullptr - : tflite::micro::GetTensorData( - output_layer_norm_coefficients), - integer_lstm_param->layer_norm_output_scale_a, - integer_lstm_param->layer_norm_output_scale_b, - input_gate_bias == nullptr - ? nullptr - : tflite::micro::GetTensorData(input_gate_bias), - forget_gate_bias == nullptr - ? nullptr - : tflite::micro::GetTensorData(forget_gate_bias), - cell_gate_bias == nullptr - ? nullptr - : tflite::micro::GetTensorData(cell_gate_bias), - output_gate_bias == nullptr - ? nullptr - : tflite::micro::GetTensorData(output_gate_bias), - integer_lstm_param->quantized_cell_clip, - integer_lstm_param->quantized_proj_clip, - integer_lstm_param->cell_scale, - integer_lstm_param->input_variance_guard, - integer_lstm_param->forget_variance_guard, - integer_lstm_param->cell_variance_guard, - integer_lstm_param->output_variance_guard, - integer_lstm_param->input_to_forget_effective_bias, - integer_lstm_param->recurrent_to_forget_effective_bias, - integer_lstm_param->input_to_cell_effective_bias, - integer_lstm_param->recurrent_to_cell_effective_bias, - integer_lstm_param->input_to_output_effective_bias, - integer_lstm_param->recurrent_to_output_effective_bias, - integer_lstm_param->input_to_input_effective_bias, - integer_lstm_param->recurrent_to_input_effective_bias, - integer_lstm_param->projection_effective_bias, n_batch, n_cell, - n_input, n_output, tflite::micro::GetTensorData(output_state), - output_state_zp, tflite::micro::GetTensorData(cell_state), - output_ptr, scratch0, scratch1, scratch2, scratch3, scratch4, - scratch5); - } - } else { - for (int b = 0; b < n_batch; b++) { - const int input_step = n_input; - const int output_step = output_batch_leading_dim; - for (int t = 0; t < max_time; t++) { - // If this is the forward_sequence, step forward, otherwise step - // backwards. - const int t_rel = forward_sequence ? t : max_time - t - 1; - const int time_offset = b * max_time + t_rel; - const int8_t* input_ptr = tflite::micro::GetTensorData(input) + - time_offset * input_step; - int8_t* output_ptr = tflite::micro::GetTensorData(output) + - time_offset * output_step; - - // Offset the {output,cell}_state pointers to the right batch. - int8_t* output_state_ptr = - tflite::micro::GetTensorData(output_state) + - b * output_batch_leading_dim; - int16_t* cell_state_ptr = - tflite::micro::GetTensorData(cell_state) + b * n_cell; - - LstmStepInteger8x8_16( - input_ptr, - input_to_input_weights == nullptr - ? nullptr - : tflite::micro::GetTensorData(input_to_input_weights), - integer_lstm_param->effective_input_to_input_scale_a, - integer_lstm_param->effective_input_to_input_scale_b, - input_to_forget_weights == nullptr - ? nullptr - : tflite::micro::GetTensorData(input_to_forget_weights), - integer_lstm_param->effective_input_to_forget_scale_a, - integer_lstm_param->effective_input_to_forget_scale_b, - input_to_cell_weights == nullptr - ? nullptr - : tflite::micro::GetTensorData(input_to_cell_weights), - integer_lstm_param->effective_input_to_cell_scale_a, - integer_lstm_param->effective_input_to_cell_scale_b, - input_to_output_weights == nullptr - ? nullptr - : tflite::micro::GetTensorData(input_to_output_weights), - integer_lstm_param->effective_input_to_output_scale_a, - integer_lstm_param->effective_input_to_output_scale_b, - recurrent_to_input_weights == nullptr - ? nullptr - : tflite::micro::GetTensorData( - recurrent_to_input_weights), - integer_lstm_param->effective_recurrent_to_input_scale_a, - integer_lstm_param->effective_recurrent_to_input_scale_b, - recurrent_to_forget_weights == nullptr - ? nullptr - : tflite::micro::GetTensorData( - recurrent_to_forget_weights), - integer_lstm_param->effective_recurrent_to_forget_scale_a, - integer_lstm_param->effective_recurrent_to_forget_scale_b, - recurrent_to_cell_weights == nullptr - ? nullptr - : tflite::micro::GetTensorData( - recurrent_to_cell_weights), - integer_lstm_param->effective_recurrent_to_cell_scale_a, - integer_lstm_param->effective_recurrent_to_cell_scale_b, - recurrent_to_output_weights == nullptr - ? nullptr - : tflite::micro::GetTensorData( - recurrent_to_output_weights), - integer_lstm_param->effective_recurrent_to_output_scale_a, - integer_lstm_param->effective_recurrent_to_output_scale_b, - cell_to_input_weights == nullptr - ? nullptr - : tflite::micro::GetTensorData(cell_to_input_weights), - integer_lstm_param->effective_cell_to_input_scale_a, - integer_lstm_param->effective_cell_to_input_scale_b, - cell_to_forget_weights == nullptr - ? nullptr - : tflite::micro::GetTensorData(cell_to_forget_weights), - integer_lstm_param->effective_cell_to_forget_scale_a, - integer_lstm_param->effective_cell_to_forget_scale_b, - cell_to_output_weights == nullptr - ? nullptr - : tflite::micro::GetTensorData(cell_to_output_weights), - integer_lstm_param->effective_cell_to_output_scale_a, - integer_lstm_param->effective_cell_to_output_scale_b, - projection_weights == nullptr - ? nullptr - : tflite::micro::GetTensorData(projection_weights), - integer_lstm_param->effective_proj_scale_a, - integer_lstm_param->effective_proj_scale_b, - integer_lstm_param->hidden_zp, - integer_lstm_param->effective_hidden_scale_a, - integer_lstm_param->effective_hidden_scale_b, - input_layer_norm_coefficients == nullptr - ? nullptr - : tflite::micro::GetTensorData( - input_layer_norm_coefficients), - integer_lstm_param->layer_norm_input_scale_a, - integer_lstm_param->layer_norm_input_scale_b, - forget_layer_norm_coefficients == nullptr - ? nullptr - : tflite::micro::GetTensorData( - forget_layer_norm_coefficients), - integer_lstm_param->layer_norm_forget_scale_a, - integer_lstm_param->layer_norm_forget_scale_b, - cell_layer_norm_coefficients == nullptr - ? nullptr - : tflite::micro::GetTensorData( - cell_layer_norm_coefficients), - integer_lstm_param->layer_norm_cell_scale_a, - integer_lstm_param->layer_norm_cell_scale_b, - output_layer_norm_coefficients == nullptr - ? nullptr - : tflite::micro::GetTensorData( - output_layer_norm_coefficients), - integer_lstm_param->layer_norm_output_scale_a, - integer_lstm_param->layer_norm_output_scale_b, - input_gate_bias == nullptr - ? nullptr - : tflite::micro::GetTensorData(input_gate_bias), - forget_gate_bias == nullptr - ? nullptr - : tflite::micro::GetTensorData(forget_gate_bias), - cell_gate_bias == nullptr - ? nullptr - : tflite::micro::GetTensorData(cell_gate_bias), - output_gate_bias == nullptr - ? nullptr - : tflite::micro::GetTensorData(output_gate_bias), - integer_lstm_param->quantized_cell_clip, - integer_lstm_param->quantized_proj_clip, - integer_lstm_param->cell_scale, - integer_lstm_param->input_variance_guard, - integer_lstm_param->forget_variance_guard, - integer_lstm_param->cell_variance_guard, - integer_lstm_param->output_variance_guard, - integer_lstm_param->input_to_forget_effective_bias, - integer_lstm_param->recurrent_to_forget_effective_bias, - integer_lstm_param->input_to_cell_effective_bias, - integer_lstm_param->recurrent_to_cell_effective_bias, - integer_lstm_param->input_to_output_effective_bias, - integer_lstm_param->recurrent_to_output_effective_bias, - integer_lstm_param->input_to_input_effective_bias, - integer_lstm_param->recurrent_to_input_effective_bias, - integer_lstm_param->projection_effective_bias, /*n_batch=*/1, - n_cell, n_input, n_output, output_state_ptr, output_state_zp, - cell_state_ptr, output_ptr, scratch0, scratch1, scratch2, scratch3, - scratch4, scratch5); - } - } - } - - return kTfLiteOk; -} - -TfLiteStatus EvalInteger8x8_8Lstm( - const TfLiteEvalTensor* input, - const TfLiteEvalTensor* input_to_input_weights, - const TfLiteEvalTensor* input_to_forget_weights, - const TfLiteEvalTensor* input_to_cell_weights, - const TfLiteEvalTensor* input_to_output_weights, - const TfLiteEvalTensor* recurrent_to_input_weights, - const TfLiteEvalTensor* recurrent_to_forget_weights, - const TfLiteEvalTensor* recurrent_to_cell_weights, - const TfLiteEvalTensor* recurrent_to_output_weights, - const TfLiteEvalTensor* cell_to_input_weights, - const TfLiteEvalTensor* cell_to_forget_weights, - const TfLiteEvalTensor* cell_to_output_weights, - const TfLiteEvalTensor* input_layer_norm_coefficients, - const TfLiteEvalTensor* forget_layer_norm_coefficients, - const TfLiteEvalTensor* cell_layer_norm_coefficients, - const TfLiteEvalTensor* output_layer_norm_coefficients, - const TfLiteEvalTensor* input_gate_bias, - const TfLiteEvalTensor* forget_gate_bias, - const TfLiteEvalTensor* cell_gate_bias, - const TfLiteEvalTensor* output_gate_bias, - const TfLiteEvalTensor* projection_weights, - const TfLiteEvalTensor* projection_bias, const TfLiteLSTMParams* params, - TfLiteEvalTensor* output_state, TfLiteEvalTensor* cell_state, - TfLiteEvalTensor* output, const IntegerLstmParameter* integer_lstm_param, - int32_t input_zp, int32_t output_state_zp, int8_t* scratch0, - int8_t* scratch1, int16_t* scratch2, int16_t* scratch3, int16_t* scratch4, - int16_t* scratch5, int16_t* scratch6, int16_t* scratch7) { - TFLITE_DCHECK(input->dims->size >= 2 && input->dims->size <= 3); - const int n_input = input->dims->data[input->dims->size - 1]; - int max_time, n_batch; - if (input->dims->size == 2) { - max_time = 1; - n_batch = input->dims->data[0]; - } else { - max_time = input->dims->data[0]; - n_batch = input->dims->data[1]; - } - - // n_cell and n_output will be the same size when there is no projection. - const int n_cell = input_to_output_weights->dims->data[0]; - const int n_output = recurrent_to_output_weights->dims->data[1]; - - // Get params for time/batch/sequence. - const int output_batch_leading_dim = - output->dims->data[output->dims->size - 1]; - const int input_step = n_batch * n_input; - const int output_step = n_batch * output_batch_leading_dim; - - for (int t = 0; t < max_time; t++) { - const int t_rel = t; - int8_t* output_ptr = - tflite::micro::GetTensorData(output) + t_rel * output_step; - // Input can be int8 asymmetric or int16 symmetric. - const int8_t* input_ptr = - tflite::micro::GetTensorData(input) + t_rel * input_step; - LstmStepInteger8x8_8( - input_ptr, input_zp, - - input_to_input_weights == nullptr - ? nullptr - : tflite::micro::GetTensorData(input_to_input_weights), - integer_lstm_param->effective_input_to_input_scale_a, - integer_lstm_param->effective_input_to_input_scale_b, - - input_to_forget_weights == nullptr - ? nullptr - : tflite::micro::GetTensorData(input_to_forget_weights), - integer_lstm_param->effective_input_to_forget_scale_a, - integer_lstm_param->effective_input_to_forget_scale_b, - - input_to_cell_weights == nullptr - ? nullptr - : tflite::micro::GetTensorData(input_to_cell_weights), - integer_lstm_param->effective_input_to_cell_scale_a, - integer_lstm_param->effective_input_to_cell_scale_b, - - input_to_output_weights == nullptr - ? nullptr - : tflite::micro::GetTensorData(input_to_output_weights), - integer_lstm_param->effective_input_to_output_scale_a, - integer_lstm_param->effective_input_to_output_scale_b, - - recurrent_to_input_weights == nullptr - ? nullptr - : tflite::micro::GetTensorData(recurrent_to_input_weights), - integer_lstm_param->effective_recurrent_to_input_scale_a, - integer_lstm_param->effective_recurrent_to_input_scale_b, - - recurrent_to_forget_weights == nullptr - ? nullptr - : tflite::micro::GetTensorData(recurrent_to_forget_weights), - integer_lstm_param->effective_recurrent_to_forget_scale_a, - integer_lstm_param->effective_recurrent_to_forget_scale_b, - - recurrent_to_cell_weights == nullptr - ? nullptr - : tflite::micro::GetTensorData(recurrent_to_cell_weights), - integer_lstm_param->effective_recurrent_to_cell_scale_a, - integer_lstm_param->effective_recurrent_to_cell_scale_b, - - recurrent_to_output_weights == nullptr - ? nullptr - : tflite::micro::GetTensorData(recurrent_to_output_weights), - integer_lstm_param->effective_recurrent_to_output_scale_a, - integer_lstm_param->effective_recurrent_to_output_scale_b, - - cell_to_input_weights == nullptr - ? nullptr - : tflite::micro::GetTensorData(cell_to_input_weights), - integer_lstm_param->effective_cell_to_input_scale_a, - integer_lstm_param->effective_cell_to_input_scale_b, - - cell_to_forget_weights == nullptr - ? nullptr - : tflite::micro::GetTensorData(cell_to_forget_weights), - integer_lstm_param->effective_cell_to_forget_scale_a, - integer_lstm_param->effective_cell_to_forget_scale_b, - - cell_to_output_weights == nullptr - ? nullptr - : tflite::micro::GetTensorData(cell_to_output_weights), - integer_lstm_param->effective_cell_to_output_scale_a, - integer_lstm_param->effective_cell_to_output_scale_b, - - projection_weights == nullptr - ? nullptr - : tflite::micro::GetTensorData(projection_weights), - integer_lstm_param->effective_proj_scale_a, - integer_lstm_param->effective_proj_scale_b, - - input_layer_norm_coefficients == nullptr - ? nullptr - : tflite::micro::GetTensorData( - input_layer_norm_coefficients), - integer_lstm_param->layer_norm_input_scale_a, - integer_lstm_param->layer_norm_input_scale_b, - - forget_layer_norm_coefficients == nullptr - ? nullptr - : tflite::micro::GetTensorData( - forget_layer_norm_coefficients), - integer_lstm_param->layer_norm_forget_scale_a, - integer_lstm_param->layer_norm_forget_scale_b, - - cell_layer_norm_coefficients == nullptr - ? nullptr - : tflite::micro::GetTensorData( - cell_layer_norm_coefficients), - integer_lstm_param->layer_norm_cell_scale_a, - integer_lstm_param->layer_norm_cell_scale_b, - - output_layer_norm_coefficients == nullptr - ? nullptr - : tflite::micro::GetTensorData( - output_layer_norm_coefficients), - integer_lstm_param->layer_norm_output_scale_a, - integer_lstm_param->layer_norm_output_scale_b, - - input_gate_bias == nullptr - ? nullptr - : tflite::micro::GetTensorData(input_gate_bias), - forget_gate_bias == nullptr - ? nullptr - : tflite::micro::GetTensorData(forget_gate_bias), - cell_gate_bias == nullptr - ? nullptr - : tflite::micro::GetTensorData(cell_gate_bias), - output_gate_bias == nullptr - ? nullptr - : tflite::micro::GetTensorData(output_gate_bias), - projection_bias == nullptr - ? nullptr - : tflite::micro::GetTensorData(projection_bias), - - params, integer_lstm_param->intermediate_scale_a, - integer_lstm_param->intermediate_scale_b, - integer_lstm_param->intermediate_zp, - integer_lstm_param->quantized_cell_clip, - integer_lstm_param->quantized_proj_clip, n_batch, n_cell, n_input, - n_output, output_batch_leading_dim, - tflite::micro::GetTensorData(output_state), output_state_zp, - tflite::micro::GetTensorData(cell_state), output_ptr, scratch0, - scratch1, scratch2, scratch3, scratch4, scratch5, scratch6, scratch7); - } - - return kTfLiteOk; -} - -} // namespace tflite diff --git a/code/components/tflite-lib/tensorflow/lite/micro/kernels/maximum_minimum.cc b/code/components/tflite-lib/tensorflow/lite/micro/kernels/maximum_minimum.cc deleted file mode 100644 index cb3cae24..00000000 --- a/code/components/tflite-lib/tensorflow/lite/micro/kernels/maximum_minimum.cc +++ /dev/null @@ -1,131 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/kernels/internal/reference/maximum_minimum.h" - -#include "tensorflow/lite/c/builtin_op_data.h" -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/common.h" -#include "tensorflow/lite/kernels/internal/quantization_util.h" -#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "tensorflow/lite/kernels/kernel_util.h" -#include "tensorflow/lite/kernels/op_macros.h" -#include "tensorflow/lite/micro/kernels/kernel_util.h" - -namespace tflite { -namespace ops { -namespace micro { -namespace maximum_minimum { -namespace { - -// This file has a reference implementation of TFMaximum/TFMinimum. -enum KernelType { - kReference, -}; - -constexpr int kInputTensor1 = 0; -constexpr int kInputTensor2 = 1; -constexpr int kOutputTensor = 0; - -struct OpContext { - OpContext(TfLiteContext* context, TfLiteNode* node) { - input1 = tflite::micro::GetEvalInput(context, node, kInputTensor1); - input2 = tflite::micro::GetEvalInput(context, node, kInputTensor2); - output = tflite::micro::GetEvalOutput(context, node, kOutputTensor); - } - const TfLiteEvalTensor* input1; - const TfLiteEvalTensor* input2; - TfLiteEvalTensor* output; -}; - -struct MaximumOp { - template - static data_type op(data_type el1, data_type el2) { - return el1 > el2 ? el1 : el2; - } -}; - -struct MinimumOp { - template - static data_type op(data_type el1, data_type el2) { - return el1 < el2 ? el1 : el2; - } -}; - -} // namespace - -template -void TFLiteOperation(TfLiteContext* context, TfLiteNode* node, - const OpContext& op_context) { - reference_ops::MaximumMinimumBroadcastSlow( - tflite::micro::GetTensorShape(op_context.input1), - tflite::micro::GetTensorData(op_context.input1), - tflite::micro::GetTensorShape(op_context.input2), - tflite::micro::GetTensorData(op_context.input2), - tflite::micro::GetTensorShape(op_context.output), - tflite::micro::GetTensorData(op_context.output), - op_type::template op); -} - -template -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - OpContext op_context(context, node); - - if (kernel_type == kReference) { - switch (op_context.output->type) { - case kTfLiteFloat32: - TFLiteOperation(context, node, op_context); - break; - case kTfLiteInt8: - TFLiteOperation(context, node, op_context); - break; - case kTfLiteInt32: - TFLiteOperation(context, node, op_context); - break; - case kTfLiteInt64: - TFLiteOperation(context, node, op_context); - break; - default: - MicroPrintf("Type %s (%d) is not supported by Maximum/Minimum.", - TfLiteTypeGetName(op_context.output->type), - op_context.output->type); - return kTfLiteError; - } - } else { - MicroPrintf("Kernel type not supported by Maximum/Minimum."); - return kTfLiteError; - } - return kTfLiteOk; -} - -} // namespace maximum_minimum - -TfLiteRegistration Register_MAXIMUM() { - return tflite::micro::RegisterOp( - nullptr, nullptr, - maximum_minimum::Eval); -} - -TfLiteRegistration Register_MINIMUM() { - return tflite::micro::RegisterOp( - nullptr, nullptr, - maximum_minimum::Eval); -} - -} // namespace micro -} // namespace ops -} // namespace tflite diff --git a/code/components/tflite-lib/tensorflow/lite/micro/kernels/micro_ops.h b/code/components/tflite-lib/tensorflow/lite/micro/kernels/micro_ops.h deleted file mode 100644 index 68583f75..00000000 --- a/code/components/tflite-lib/tensorflow/lite/micro/kernels/micro_ops.h +++ /dev/null @@ -1,135 +0,0 @@ -/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_MICRO_KERNELS_MICRO_OPS_H_ -#define TENSORFLOW_LITE_MICRO_KERNELS_MICRO_OPS_H_ - -#include "tensorflow/lite/c/common.h" - -// Forward declaration of all micro op kernel registration methods. These -// registrations are included with the standard `BuiltinOpResolver`. -// -// This header is particularly useful in cases where only a subset of ops are -// needed. In such cases, the client can selectively add only the registrations -// their model requires, using a custom `(Micro)MutableOpResolver`. Selective -// registration in turn allows the linker to strip unused kernels. - -namespace tflite { - -// TFLM is incrementally moving towards a flat tflite namespace -// (https://abseil.io/tips/130). Any new ops (or cleanup of existing ops should -// have their Register function declarations in the tflite namespace. - -TfLiteRegistration Register_ADD(); -TfLiteRegistration Register_ADD_N(); -TfLiteRegistration Register_ASSIGN_VARIABLE(); -TfLiteRegistration Register_AVERAGE_POOL_2D(); -TfLiteRegistration Register_BATCH_TO_SPACE_ND(); -TfLiteRegistration Register_BROADCAST_ARGS(); -TfLiteRegistration Register_BROADCAST_TO(); -TfLiteRegistration Register_CALL_ONCE(); -TfLiteRegistration Register_CAST(); -// TODO(b/160234179): Change custom OPs to also return by value. -TfLiteRegistration* Register_CIRCULAR_BUFFER(); -TfLiteRegistration Register_CUMSUM(); -TfLiteRegistration Register_DEPTH_TO_SPACE(); -TfLiteRegistration Register_DEPTHWISE_CONV_2D(); -TfLiteRegistration Register_DEQUANTIZE(); -TfLiteRegistration Register_DIV(); -TfLiteRegistration Register_ELU(); -TfLiteRegistration Register_EXP(); -TfLiteRegistration Register_EXPAND_DIMS(); -TfLiteRegistration Register_FILL(); -TfLiteRegistration Register_FLOOR_DIV(); -TfLiteRegistration Register_FLOOR_MOD(); -TfLiteRegistration Register_GATHER(); -TfLiteRegistration Register_GATHER_ND(); -TfLiteRegistration Register_HARD_SWISH(); -TfLiteRegistration Register_IF(); -TfLiteRegistration Register_L2_POOL_2D(); -TfLiteRegistration Register_LEAKY_RELU(); -TfLiteRegistration Register_LOG_SOFTMAX(); -TfLiteRegistration Register_LOGICAL_AND(); -TfLiteRegistration Register_LOGICAL_OR(); -TfLiteRegistration Register_LOGISTIC(); -TfLiteRegistration Register_MAX_POOL_2D(); -TfLiteRegistration Register_MIRROR_PAD(); -TfLiteRegistration Register_PRELU(); -TfLiteRegistration Register_MUL(); -TfLiteRegistration Register_QUANTIZE(); -TfLiteRegistration Register_READ_VARIABLE(); -TfLiteRegistration Register_RELU(); -TfLiteRegistration Register_RELU6(); -TfLiteRegistration Register_RESIZE_BILINEAR(); -TfLiteRegistration Register_SELECT_V2(); -TfLiteRegistration Register_SHAPE(); -TfLiteRegistration Register_SLICE(); -TfLiteRegistration Register_SPACE_TO_BATCH_ND(); -TfLiteRegistration Register_SPACE_TO_DEPTH(); -TfLiteRegistration Register_SQUARED_DIFFERENCE(); -TfLiteRegistration Register_SQUEEZE(); -TfLiteRegistration Register_SUB(); -TfLiteRegistration Register_SUM(); -TfLiteRegistration Register_SVDF(); -TfLiteRegistration Register_TRANSPOSE(); -TfLiteRegistration Register_TRANSPOSE_CONV(); -// TODO(b/230666079): resolve conflict with xtensa implementation -TfLiteRegistration Register_UNIDIRECTIONAL_SEQUENCE_LSTM(); -TfLiteRegistration Register_VAR_HANDLE(); -TfLiteRegistration Register_WHILE(); -TfLiteRegistration Register_ZEROS_LIKE(); - -namespace ops { -namespace micro { - -TfLiteRegistration Register_ABS(); -TfLiteRegistration Register_ARG_MAX(); -TfLiteRegistration Register_ARG_MIN(); -TfLiteRegistration Register_CEIL(); -TfLiteRegistration Register_CONCATENATION(); -TfLiteRegistration Register_COS(); -TfLiteRegistration Register_EQUAL(); -TfLiteRegistration Register_FLOOR(); -TfLiteRegistration Register_GREATER(); -TfLiteRegistration Register_GREATER_EQUAL(); -TfLiteRegistration Register_LESS(); -TfLiteRegistration Register_LESS_EQUAL(); -TfLiteRegistration Register_LOG(); -TfLiteRegistration Register_LOGICAL_NOT(); -TfLiteRegistration Register_MAXIMUM(); -TfLiteRegistration Register_MINIMUM(); -TfLiteRegistration Register_NEG(); -TfLiteRegistration Register_NOT_EQUAL(); -TfLiteRegistration Register_PACK(); -TfLiteRegistration Register_PAD(); -TfLiteRegistration Register_PADV2(); -TfLiteRegistration Register_RESHAPE(); -TfLiteRegistration Register_RESIZE_NEAREST_NEIGHBOR(); -TfLiteRegistration Register_ROUND(); -TfLiteRegistration Register_RSQRT(); -TfLiteRegistration Register_SIN(); -TfLiteRegistration Register_SPLIT(); -TfLiteRegistration Register_SPLIT_V(); -TfLiteRegistration Register_SQRT(); -TfLiteRegistration Register_SQUARE(); -TfLiteRegistration Register_STRIDED_SLICE(); -TfLiteRegistration Register_UNPACK(); -TfLiteRegistration Register_L2_NORMALIZATION(); -TfLiteRegistration Register_TANH(); - -} // namespace micro -} // namespace ops -} // namespace tflite - -#endif // TENSORFLOW_LITE_MICRO_KERNELS_MICRO_OPS_H_ diff --git a/code/components/tflite-lib/tensorflow/lite/micro/kernels/micro_tensor_utils.cc b/code/components/tflite-lib/tensorflow/lite/micro/kernels/micro_tensor_utils.cc deleted file mode 100644 index 32c68121..00000000 --- a/code/components/tflite-lib/tensorflow/lite/micro/kernels/micro_tensor_utils.cc +++ /dev/null @@ -1,809 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#include "tensorflow/lite/micro/kernels/micro_tensor_utils.h" - -#include -#include -#include -#include -#include -#include - -#include "fixedpoint/fixedpoint.h" // from @gemmlowp -#include "tensorflow/lite/kernels/internal/common.h" -#include "tensorflow/lite/kernels/internal/compatibility.h" -#include "tensorflow/lite/kernels/internal/cppmath.h" -#include "tensorflow/lite/kernels/op_macros.h" - -namespace tflite { -namespace micro_tensor_utils { - -namespace { -const int32_t kInt16Max = std::numeric_limits::max(); -const int32_t kInt16Min = std::numeric_limits::min(); -} // namespace - -void PortableSymmetricQuantizeFloats(const float* values, const int size, - int8_t* quantized_values, float* min_value, - float* max_value, float* scaling_factor) { - auto minmax = std::minmax_element(values, values + size); - *min_value = *minmax.first; - *max_value = *minmax.second; - - PortableSymmetricQuantizeFloats(values, size, quantized_values, *min_value, - *max_value, scaling_factor); -} - -void PortableSymmetricQuantizeFloats(const float* values, const int size, - int8_t* quantized_values, float min_value, - float max_value, float* scaling_factor) { - const int32_t kScale = 127; - const float range = std::max(std::abs(min_value), std::abs(max_value)); - if (range == 0) { - memset(quantized_values, 0, size * sizeof(int8_t)); - *scaling_factor = 1; - return; - } - *scaling_factor = range / kScale; - const float scaling_factor_inv = kScale / range; - for (int i = 0; i < size; ++i) { - const int32_t quantized_value = - static_cast(TfLiteRound(values[i] * scaling_factor_inv)); - // Clamp: just in case some odd numeric offset. - quantized_values[i] = static_cast( - std::min(kScale, std::max(-kScale, quantized_value))); - } -} - -void PortableAsymmetricQuantizeFloats(const float* values, const int size, - int8_t* quantized_values, - float* scaling_factor, int32_t* offset) { - const int32_t kMinScale = -128; - const int32_t kMaxScale = 127; - const double qmin_double = kMinScale; - const double qmax_double = kMaxScale; - const auto minmax = std::minmax_element(values, values + size); - const double rmin = static_cast(std::min(0.0f, *minmax.first)); - const double rmax = static_cast(std::max(0.0f, *minmax.second)); - if (rmin == rmax) { - memset(quantized_values, 0, size * sizeof(int8_t)); - *scaling_factor = 1; - *offset = 0; - return; - } else { - double scale = (rmax - rmin) / (qmax_double - qmin_double); - const double zero_point_from_min = qmin_double - rmin / scale; - const double zero_point_from_max = qmax_double - rmax / scale; - const double zero_point_from_min_error = - std::abs(qmin_double) + std::abs(rmin / scale); - const double zero_point_from_max_error = - std::abs(qmax_double) + std::abs(rmax / scale); - const double zero_point_double = - zero_point_from_min_error < zero_point_from_max_error - ? zero_point_from_min - : zero_point_from_max; - int8_t nudged_zero_point = 0; - if (zero_point_double <= qmin_double) { - nudged_zero_point = kMinScale; - } else if (zero_point_double >= qmax_double) { - nudged_zero_point = kMaxScale; - } else { - nudged_zero_point = static_cast(round(zero_point_double)); - } - *scaling_factor = scale; - *offset = nudged_zero_point; - } - const float scaling_factor_inv = 1.0f / *scaling_factor; - for (int i = 0; i < size; ++i) { - const int32_t quantized_value = static_cast( - TfLiteRound(*offset + values[i] * scaling_factor_inv)); - quantized_values[i] = - std::min(kMaxScale, std::max(kMinScale, quantized_value)); - } -} - -void PortableMatrixBatchVectorMultiplyAccumulate(const float* matrix, - int m_rows, int m_cols, - const float* vector, - int n_batch, float* result) { - float* result_in_batch = result; - for (int b = 0; b < n_batch; b++) { - const float* matrix_ptr = matrix; - for (int r = 0; r < m_rows; r++) { - float dot_prod = 0.0f; - const float* vector_in_batch = vector + b * m_cols; - for (int c = 0; c < m_cols; c++) { - dot_prod += *matrix_ptr++ * *vector_in_batch++; - } - *result_in_batch += dot_prod; - ++result_in_batch; - } - } -} - -void PortableMatrixBatchVectorMultiplyAccumulate( - const int8_t* __restrict__ matrix, const int m_rows, const int m_cols, - const int8_t* __restrict__ vectors, const float* scaling_factors, - int n_batch, float* __restrict__ result) { - for (int batch = 0; batch < n_batch; ++batch, vectors += m_cols) { - const float batch_scaling_factor = scaling_factors[batch]; - // Get the address of the first row. - const int8_t* row_ptr = matrix; - for (int row = 0; row < m_rows; ++row) { - // Initialize the dot product sum for the row to 0. - int32_t dotprod = 0; - // TODO(b/230666277): remove this -#if defined(__GNUC__) - // Prefetch the row to cache. - __builtin_prefetch(row_ptr, 0 /* prefetch for read */, - 3 /* temporal locality */); -#endif - for (int col = 0; col < m_cols; ++col, ++row_ptr) { - dotprod += (*row_ptr) * (vectors[col]); - } // for col - *result += dotprod * batch_scaling_factor; - ++result; - } // for row - } // for batch -} - -void PortableMatrixBatchVectorMultiplyAccumulate( - const int8_t* __restrict__ matrix, const int m_rows, const int m_cols, - const int8_t* __restrict__ vectors, const float* scaling_factors, - int n_batch, float* __restrict__ result, const float* per_channel_scale, - const int32_t* input_offset, int32_t* scratch, int32_t* row_sums, - bool* compute_row_sums, CpuBackendContext* context) { - if (input_offset == nullptr) { - PortableMatrixBatchVectorMultiplyAccumulate( - matrix, m_rows, m_cols, vectors, scaling_factors, n_batch, result); - return; - } - if (!compute_row_sums || *compute_row_sums) { - PortableReductionSumVector(matrix, row_sums, m_rows, m_cols); - if (compute_row_sums) { - *compute_row_sums = false; - } - } - - for (int batch = 0; batch < n_batch; ++batch, vectors += m_cols) { - const float batch_scaling_factor = scaling_factors[batch]; - const int32_t batch_offset = input_offset[batch]; - const int8_t* row_ptr = matrix; - for (int row = 0; row < m_rows; ++row) { - int32_t dotprod = 0; - float scale = batch_scaling_factor; - if (per_channel_scale) { - scale *= per_channel_scale[row]; - } -#if defined(__GNUC__) - // Prefetch the row to cache. - __builtin_prefetch(row_ptr, 0 /* prefetch for read */, - 3 /* temporal locality */); -#endif - for (int col = 0; col < m_cols; ++col, ++row_ptr) { - dotprod += (*row_ptr) * vectors[col]; - } // for col - dotprod -= row_sums[row] * batch_offset; - *result += dotprod * scale; - ++result; - } // for row - } // for batch -} - -void PortableSparseMatrixBatchVectorMultiplyAccumulate1x4( - const float* __restrict__ matrix, const int32_t* __restrict__ segments, - const int32_t* __restrict__ indices, int m_rows, int m_cols, - const float* __restrict__ vector, int n_batch, float* __restrict__ result) { - const int kBlockSize = 4; - TFLITE_DCHECK_EQ(m_cols % kBlockSize, 0); - for (int batch = 0; batch < n_batch; batch++) { - const float* matrix_ptr = matrix; - for (int row = 0; row < m_rows; row++) { - float dot_prod = 0.0f; - const float* vector_in_batch = vector + batch * m_cols; - for (int i = segments[row]; i < segments[row + 1]; i++) { - const int block_start_index = indices[i] * kBlockSize; - const float* vector_block_in_batch_ptr = - vector_in_batch + block_start_index; - for (int c = 0; c < kBlockSize; c++) { - dot_prod += *matrix_ptr++ * *vector_block_in_batch_ptr++; - } - } - result[batch * m_rows + row] += dot_prod; - } - } -} - -void PortableSparseMatrixBatchVectorMultiplyAccumulate1x16( - const int8_t* __restrict__ matrix, const int32_t* __restrict__ segments, - const int32_t* __restrict__ indices, int m_rows, int m_cols, - const int8_t* __restrict__ vector, const int32_t* __restrict__ bias_vector, - int n_batch, const int32_t input_offset, const int32_t output_multiplier, - const int32_t output_shift, const int32_t output_offset, - const int32_t output_activation_min, const int32_t output_activation_max, - int8_t* __restrict__ result) { - const int kBlockSize = 16; - TFLITE_DCHECK_EQ(m_cols % kBlockSize, 0); - for (int batch = 0; batch < n_batch; ++batch) { - const int8_t* matrix_ptr = matrix; - for (int row = 0; row < m_rows; ++row) { - int32_t dot_prod = 0; - const int8_t* vector_in_batch = vector + batch * m_cols; - for (int i = segments[row]; i < segments[row + 1]; ++i) { - const int block_start_index = indices[i] * kBlockSize; - const int8_t* vector_block_in_batch_ptr = - vector_in_batch + block_start_index; - for (int c = 0; c < kBlockSize; c++) { - dot_prod += *matrix_ptr * *vector_block_in_batch_ptr++; - dot_prod += *matrix_ptr++ * input_offset; - } - } - const int32_t bias_value = bias_vector != nullptr ? bias_vector[row] : 0; - dot_prod = MultiplyByQuantizedMultiplier(dot_prod + bias_value, - output_multiplier, output_shift); - dot_prod += output_offset; - result[batch * m_rows + row] = - static_cast(ActivationFunctionWithMinMax( - dot_prod, output_activation_min, output_activation_max)); - } - } -} - -void PortableSparseMatrixBatchVectorMultiplyAccumulate( - const float* __restrict__ matrix, const uint8_t* __restrict__ ledger, - int m_rows, int m_cols, const float* __restrict__ vector, int n_batch, - float* __restrict__ result) { - const int kBlockSize = 16; - TFLITE_DCHECK_EQ( // NOLINT - m_cols % kBlockSize, 0); - for (int batch = 0; batch < n_batch; batch++) { - const float* matrix_ptr = matrix; - const uint8_t* ledger_ptr = ledger; - for (int row = 0; row < m_rows; row++) { - float dot_prod = 0.0f; - int num_nonzero_blocks = *ledger_ptr++; - if (num_nonzero_blocks > 0) { - const float* vector_in_batch = vector + batch * m_cols; - for (int i = 0; i < num_nonzero_blocks; i++) { - const int block_start_index = *ledger_ptr++ * kBlockSize; - const float* vector_block_in_batch_ptr = - vector_in_batch + block_start_index; - for (int c = 0; c < kBlockSize; c++) { - dot_prod += *matrix_ptr++ * *vector_block_in_batch_ptr++; - } - } - } - result[batch * m_rows + row] += dot_prod; - } - } -} - -void PortableSparseMatrixBatchVectorMultiplyAccumulate( - const int8_t* __restrict__ matrix, const uint8_t* ledger, const int m_rows, - const int m_cols, const int8_t* __restrict__ vectors, - const float* scaling_factors, int n_batch, float* __restrict__ result) { - static const int kBlockSize = 16; - TFLITE_DCHECK_EQ( // NOLINT - m_cols % kBlockSize, 0); - for (int batch = 0; batch < n_batch; ++batch, vectors += m_cols) { - const float batch_scaling_factor = scaling_factors[batch]; - const uint8_t* ledger_ptr = ledger; - // Get the address of the first row. - const int8_t* row_ptr = matrix; - for (int row = 0; row < m_rows; ++row) { - // Initialize the dot product sum for the row to 0. - int32_t dotprod = 0; -#if defined(__GNUC__) - // Prefetch the row to cache. - __builtin_prefetch(row_ptr, 0 /* prefetch for read */, - 3 /* temporal locality */); -#endif - int num_nonzero_blocks = *ledger_ptr++; - for (int i = 0; i < num_nonzero_blocks; i++) { - const int block_start_index = *ledger_ptr++ * kBlockSize; - const int8_t* vector_block_ptr = vectors + block_start_index; - for (int c = 0; c < kBlockSize; c++) { - dotprod += (*row_ptr++) * (*vector_block_ptr++); - } // for block - } // for num_nonzero_blocks - result[batch * m_rows + row] += dotprod * batch_scaling_factor; - } // for row - } // for batch -} - -template -void PortableMatrixBatchVectorMultiplyAccumulateImpl( - const int8_t* input, const int32_t* bias, - const int8_t* input_to_gate_weights, int32_t multiplier, int32_t shift, - int32_t n_batch, int32_t n_input, int32_t n_output, int32_t output_zp, - T* output) { - const int16_t output_max = std::numeric_limits::max(); - const int16_t output_min = std::numeric_limits::min(); - for (int batch = 0; batch < n_batch; ++batch) { - for (int row = 0; row < n_output; ++row) { - int32_t acc = bias[row]; - for (int col = 0; col < n_input; ++col) { - int8_t input_val = input[batch * n_input + col]; - int8_t weights_val = input_to_gate_weights[row * n_input + col]; - acc += input_val * weights_val; - } - acc = MultiplyByQuantizedMultiplier(acc, multiplier, shift); - acc += output_zp; - acc += output[batch * n_output + row]; - if (acc > output_max) { - acc = output_max; - } - if (acc < output_min) { - acc = output_min; - } - output[batch * n_output + row] = static_cast(acc); - } - } -} - -void PortableMatrixBatchVectorMultiplyAccumulate( - const int8_t* input, const int32_t* bias, - const int8_t* input_to_gate_weights, int32_t multiplier, int32_t shift, - int32_t n_batch, int32_t n_input, int32_t n_output, int32_t output_zp, - int32_t* scratch, int16_t* output, CpuBackendContext* context) { - PortableMatrixBatchVectorMultiplyAccumulateImpl( - input, bias, input_to_gate_weights, multiplier, shift, n_batch, n_input, - n_output, output_zp, output); -} - -void PortableMatrixBatchVectorMultiplyAccumulate( - const int8_t* input, const int32_t* bias, - const int8_t* input_to_gate_weights, int32_t multiplier, int32_t shift, - int32_t n_batch, int32_t n_input, int32_t n_output, int32_t output_zp, - int32_t* scratch, int8_t* output, CpuBackendContext* context) { - PortableMatrixBatchVectorMultiplyAccumulateImpl( - input, bias, input_to_gate_weights, multiplier, shift, n_batch, n_input, - n_output, output_zp, output); -} - -void PortableMatrixBatchVectorMultiply(const int8_t* input, - int32_t input_zeropoint, - const int8_t* input_to_gate_weights, - int32_t input_to_gate_effective_scale_a, - int32_t input_to_gate_effective_scale_b, - int32_t n_batch, int32_t n_input, - int32_t n_cell, int8_t* gate_output, - int8_t gate_output_zp) { - const int32_t int8_max = std::numeric_limits::max(); - const int32_t int8_min = std::numeric_limits::min(); - for (int batch = 0; batch < n_batch; ++batch) { - for (int row = 0; row < n_cell; ++row) { - int32_t acc = 0; - for (int col = 0; col < n_input; ++col) { - int32_t input_val = input[batch * n_input + col]; - int8_t weights_val = input_to_gate_weights[row * n_input + col]; - acc += (input_val - input_zeropoint) * weights_val; - } - acc = MultiplyByQuantizedMultiplier(acc, input_to_gate_effective_scale_a, - input_to_gate_effective_scale_b); - acc += gate_output_zp; - if (acc > int8_max) { - acc = int8_max; - } - if (acc < int8_min) { - acc = int8_min; - } - gate_output[batch * n_cell + row] = static_cast(acc); - } - } -} - -void PortableMatrixBatchVectorMultiply( - const int16_t* hidden, const int8_t* hidden_to_output_weights, - int32_t proj_effective_scale_a, int32_t proj_effective_scale_b, - const int32_t* gate_bias, int32_t n_batch, int32_t n_hidden, - int32_t n_output, int32_t output_zp, int8_t* proj_output) { - const int16_t int8_max = std::numeric_limits::max(); - const int16_t int8_min = std::numeric_limits::min(); - for (int batch = 0; batch < n_batch; ++batch) { - for (int row = 0; row < n_output; ++row) { - int64_t acc = gate_bias[row]; - for (int col = 0; col < n_hidden; ++col) { - int16_t input_val = hidden[batch * n_hidden + col]; - int8_t weights_val = hidden_to_output_weights[row * n_hidden + col]; - int64_t curr = acc; - acc += input_val * weights_val; - if (input_val * weights_val > 0 && acc < curr) { - acc = std::numeric_limits::max(); - } - if (input_val * weights_val < 0 && acc > curr) { - acc = std::numeric_limits::min(); - } - } - acc = MultiplyByQuantizedMultiplier(acc, proj_effective_scale_a, - proj_effective_scale_b); - acc += output_zp; - if (acc > int8_max) { - acc = int8_max; - } - if (acc < int8_min) { - acc = int8_min; - } - proj_output[batch * n_output + row] = acc; - } - } -} - -void PortableApplyLayerNorm(const int16_t* input, - const int16_t* layer_norm_weights, - const int32_t* bias, int32_t layer_norm_scale_a, - int32_t layer_norm_scale_b, int32_t variance_limit, - int n_batch, int n_input, int16_t* output) { - // The square of std::pow(2, 10), which is the extra factor that makes sure - // normalized values has enough resolution. - static const int kTwoToPower20 = 1 << 20; - for (int i = 0; i < n_batch; ++i) { - int64_t sum = 0; - int64_t sum_sq = 0; - for (int j = 0; j < n_input; ++j) { - const int32_t index = i * n_input + j; - int32_t val = static_cast(input[index]); - sum += val; - sum_sq += val * val; - } - int32_t mean = - static_cast(static_cast(sum) * 1024 / n_input); - // TODO(b/173994730): Avoids overflow but only works for POT n_input. - int32_t temp = kTwoToPower20 / n_input; - int64_t variance = - sum_sq * temp - static_cast(mean) * static_cast(mean); - int32_t variance2 = static_cast(variance / kTwoToPower20); - if (variance2 < 1) { - variance2 = variance_limit; - } - int32_t stddev_inverse_a; - int stddev_inverse_b; - GetInvSqrtQuantizedMultiplierExp(variance2, /*reverse_shift*/ -1, - &stddev_inverse_a, &stddev_inverse_b); - - for (int j = 0; j < n_input; ++j) { - const int32_t index = i * n_input + j; - int32_t val = static_cast(input[index]); - int32_t shifted = 1024 * val - mean; - int32_t rescaled = MultiplyByQuantizedMultiplier( - shifted, stddev_inverse_a, stddev_inverse_b); - int64_t val3 = rescaled * layer_norm_weights[j] + bias[j]; - int32_t val4 = - static_cast((val3 > 0 ? val3 + 512 : val3 - 512) / 1024); - int32_t val5 = MultiplyByQuantizedMultiplier(val4, layer_norm_scale_a, - layer_norm_scale_b + 12); - val5 = std::min(std::max(kInt16Min, val5), kInt16Max); - output[index] = static_cast(val5); - } - } -} - -void PortableApplyLayerNormFloat(const int16_t* input, - const int16_t* layer_norm_weights, - int32_t layer_norm_scale_a, - int32_t layer_norm_scale_b, - const int32_t* bias, int n_batch, int n_input, - int16_t* output) { - const int32_t int16_max = std::numeric_limits::max(); - const int32_t int16_min = std::numeric_limits::min(); - const float layer_norm_scale = - layer_norm_scale_a * - std::pow(2.0, static_cast(layer_norm_scale_b - 31)); - const float bias_scale = - static_cast(std::pow(2.0, -10)) * layer_norm_scale; - - for (int batch = 0; batch < n_batch; ++batch) { - float sum = 0.0f; - float sum_sq = 0.0f; - for (int i = 0; i < n_input; ++i) { - const int index = batch * n_input + i; - const float value = static_cast(input[index]); - sum += value; - sum_sq += value * value; - } - const float mean = sum / n_input; - float stddev_inv = 0.0f; - const float variance = sum_sq / n_input - mean * mean; - if (variance == 0) { - stddev_inv = 1.0f / std::sqrt(1e-8f); - } else { - stddev_inv = 1.0f / std::sqrt(variance); - } - for (int i = 0; i < n_input; ++i) { - const int index = batch * n_input + i; - const float normalized_value = - (static_cast(input[index]) - mean) * stddev_inv; - const float weighted_normalized_value = - normalized_value * layer_norm_weights[i] * layer_norm_scale + - bias[i] * bias_scale; - const int32_t quant_output = static_cast(round( - weighted_normalized_value * static_cast(std::pow(2, 12)))); - output[index] = std::min(int16_max, std::max(int16_min, quant_output)); - } - } -} - -void PortableMatrixScalarMultiplyAccumulate(const int8_t* matrix, - int32_t scalar, int32_t n_row, - int32_t n_col, int32_t* output) { - for (int i = 0; i < n_row; ++i) { - int32_t row_sum = 0; - for (int j = 0; j < n_col; ++j) { - row_sum += *matrix++; - } - output[i] += row_sum * scalar; - } -} - -void PortableApplySigmoid(const int16_t* input, int32_t n_batch, - int32_t n_input, int16_t* output) { - for (int batch = 0; batch < n_batch; ++batch) { - for (int c = 0; c < n_input; c++) { - using F3 = gemmlowp::FixedPoint; - using F0 = gemmlowp::FixedPoint; - const int index = batch * n_input + c; - F3 sigmoid_input = F3::FromRaw(input[index]); - F0 sigmoid_output = gemmlowp::logistic(sigmoid_input); - output[index] = sigmoid_output.raw(); - } - } -} - -void PortableApplySigmoidFloat(const int16_t* input, int32_t n_batch, - int32_t n_input, int16_t* output) { - const int32_t int16_max = std::numeric_limits::max(); - const int32_t int16_min = std::numeric_limits::min(); - for (int batch = 0; batch < n_batch; ++batch) { - for (int i = 0; i < n_input; ++i) { - const int index = batch * n_input + i; - const float float_input = - input[index] * static_cast(std::pow(2, -12)); - const float float_output = 1.0f / (1.0f + std::exp(-float_input)); - const int32_t quant_output = static_cast( - float_output * static_cast(std::pow(2, 15))); - const int32_t quant_output_clamped = - std::min(int16_max, std::max(int16_min, quant_output)); - output[index] = static_cast(quant_output_clamped); - } - } -} - -template -void PortableApplyTanhImpl(const int16_t* input, int32_t n_batch, - int32_t n_input, int16_t* output) { - using FX = gemmlowp::FixedPoint; - using F0 = gemmlowp::FixedPoint; - for (int batch = 0; batch < n_batch; ++batch) { - for (int i = 0; i < n_input; ++i) { - const int index = batch * n_input + i; - FX tanh_input = FX::FromRaw(input[index]); - F0 tanh_output = gemmlowp::tanh(tanh_input); - output[index] = tanh_output.raw(); - } - } -} - -void PortableApplyTanh(int32_t integer_bits, const int16_t* input, - int32_t n_batch, int32_t n_input, int16_t* output) { - if (integer_bits > 6) { - TFLITE_ASSERT_FALSE; - } -#define DISPATCH_TANH(i) \ - case i: \ - PortableApplyTanhImpl(input, n_batch, n_input, output); \ - break; - switch (integer_bits) { - DISPATCH_TANH(0); - DISPATCH_TANH(1); - DISPATCH_TANH(2); - DISPATCH_TANH(3); - DISPATCH_TANH(4); - DISPATCH_TANH(5); - DISPATCH_TANH(6); - default: - return; - } -#undef DISPATCH_TANH -} - -void PortableApplyTanhFloat(const int16_t* input, int32_t n_batch, - int32_t n_input, int32_t integer_bits, - int16_t* output) { - const int32_t int16_max = std::numeric_limits::max(); - const int32_t int16_min = std::numeric_limits::min(); - const double two = 2.0; - for (int batch = 0; batch < n_batch; ++batch) { - for (int i = 0; i < n_input; ++i) { - const int index = batch * n_input + i; - const float float_input = - input[index] * std::pow(two, static_cast(integer_bits)); - const float float_output = std::tanh(float_input); - const int32_t quant_output = static_cast( - float_output * static_cast(std::pow(2, 15))); - const int32_t quant_output_clamped = - std::min(int16_max, std::max(int16_min, quant_output)); - output[index] = static_cast(quant_output_clamped); - } - } -} - -void PortableCwiseMul(const int16_t* input_1, const int16_t* input_2, - int n_batch, int n_input, int shift, int16_t* output) { - for (int batch = 0; batch < n_batch; ++batch) { - for (int i = 0; i < n_input; ++i) { - const int index = batch * n_input + i; - const int16_t a = input_1[index]; - const int16_t b = input_2[index]; - const int32_t value = static_cast(a) * static_cast(b); - output[index] = - static_cast(gemmlowp::RoundingDivideByPOT(value, shift)); - } - } -} - -void PortableCwiseMul(const int16_t* input_1, const int16_t* input_2, - int32_t multiplier, int32_t shift, int32_t n_batch, - int32_t n_input, int32_t output_zp, int8_t* output) { - for (int batch = 0; batch < n_batch; ++batch) { - for (int i = 0; i < n_input; ++i) { - const int index = batch * n_input + i; - const int16_t a = input_1[index]; - const int16_t b = input_2[index]; - int32_t value = static_cast(a) * static_cast(b); - value = MultiplyByQuantizedMultiplier(value, multiplier, shift); - value += output_zp; - value = std::min(std::max(static_cast(-128), value), - static_cast(127)); - - output[index] = static_cast(value); - } - } -} - -void PortableCwiseAdd(const int16_t* input_1, const int16_t* input_2, - int n_batch, int n_input, int16_t* output) { - for (int batch = 0; batch < n_batch; ++batch) { - for (int i = 0; i < n_input; ++i) { - const int index = batch * n_input + i; - int32_t sum = input_1[index] + input_2[index]; - const int32_t sum_clamped = std::min(kInt16Max, std::max(kInt16Min, sum)); - output[index] = static_cast(sum_clamped); - } - } -} - -float PortableVectorVectorDotProduct(const float* vector1, const float* vector2, - int v_size) { - float result = 0.0; - for (int v = 0; v < v_size; v++) { - result += *vector1++ * *vector2++; - } - return result; -} - -namespace { -inline int32_t VectorVectorDotProduct(const int16_t* vector1, - const int16_t* vector2, int v_size) { - int32_t result = 0; - for (int v = 0; v < v_size; v++) { - result += *vector1++ * *vector2++; - } - return result; -} -} // namespace - -void PortableBatchVectorBatchVectorDotProduct(const int16_t* vector1, - const int16_t* vector2, - int v_size, int n_batch, - int32_t* result) { - for (int b = 0; b < n_batch; b++) { - result[b] = VectorVectorDotProduct(vector1, vector2, v_size); - vector1 += v_size; - vector2 += v_size; - } -} - -void PortableVectorBatchVectorCwiseProductAccumulate( - const int16_t* vector, int v_size, const int16_t* batch_vector, int n_batch, - int32_t multiplier, int shift, int16_t* result) { - for (int b = 0; b < n_batch; b++) { - for (int v = 0; v < v_size; v++) { - int32_t prod = vector[v] * *batch_vector++; - prod = MultiplyByQuantizedMultiplier(prod, multiplier, shift); - int32_t output = prod + *result; - output = std::max(std::min(static_cast(32767), output), - static_cast(-32768)); - *result++ = output; - } - } -} - -void PortableSub1Vector(const float* vector, int v_size, float* result) { - for (int v = 0; v < v_size; v++) { - *result++ = 1.0f - *vector++; - } -} - -void PortableSub1Vector(const int16_t* vector, int v_size, int16_t* result) { - static const int16_t kOne = 32767; - for (int v = 0; v < v_size; v++) { - *result++ = kOne - *vector++; - } -} - -void PortableVectorScalarMultiply(const int8_t* vector, const int v_size, - const float scale, float* result) { - for (int v = 0; v < v_size; ++v) { - *result++ = scale * *vector++; - } -} - -void PortableMeanStddevNormalization(const float* __restrict__ input_vector, - float* __restrict__ output_vector, - int v_size, int n_batch) { - for (int batch = 0; batch < n_batch; ++batch) { - float sum = 0.0f; - for (int i = 0; i < v_size; ++i) { - sum += input_vector[i]; - } - const float mean = sum / v_size; - float sum_diff_sq = 0.0f; - for (int i = 0; i < v_size; ++i) { - const float diff = input_vector[i] - mean; - sum_diff_sq += diff * diff; - } - const float variance = sum_diff_sq / v_size; - constexpr float kNormalizationConstant = 1e-8f; - const float stddev_inv = - 1.0f / std::sqrt(variance + kNormalizationConstant); - for (int i = 0; i < v_size; ++i) { - output_vector[i] = (input_vector[i] - mean) * stddev_inv; - } - input_vector += v_size; - output_vector += v_size; - } -} - -void PortableTwoGateSaturatingAdd(const int8_t* input, int8_t input_zp, - const int8_t* recurrent, int8_t recurrent_zp, - int32_t input_effective_scale_a, - int32_t input_effective_scale_b, - int32_t recurrent_effective_scale_a, - int32_t recurrent_effective_scale_b, - int32_t n_batch, int32_t n_cell, - int16_t* output) { - const int32_t int16_max = std::numeric_limits::max(); - const int32_t int16_min = std::numeric_limits::min(); - for (int i = 0; i < n_batch * n_cell; ++i) { - int32_t x = static_cast(input[i]) - static_cast(input_zp); - int32_t h = - static_cast(recurrent[i]) - static_cast(recurrent_zp); - int32_t x_scaled = MultiplyByQuantizedMultiplier(x, input_effective_scale_a, - input_effective_scale_b); - int32_t h_scaled = MultiplyByQuantizedMultiplier( - h, recurrent_effective_scale_a, recurrent_effective_scale_b); - int32_t y = h_scaled + x_scaled; - if (y > int16_max) { - y = int16_max; - } - if (y < int16_min) { - y = int16_min; - } - output[i] = static_cast(y); - } -} - -} // namespace micro_tensor_utils -} // namespace tflite diff --git a/code/components/tflite-lib/tensorflow/lite/micro/kernels/mul.h b/code/components/tflite-lib/tensorflow/lite/micro/kernels/mul.h deleted file mode 100644 index 0c6379e1..00000000 --- a/code/components/tflite-lib/tensorflow/lite/micro/kernels/mul.h +++ /dev/null @@ -1,74 +0,0 @@ -/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#ifndef TENSORFLOW_LITE_MICRO_KERNELS_MUL_H_ -#define TENSORFLOW_LITE_MICRO_KERNELS_MUL_H_ - -#include - -#include "tensorflow/lite/c/builtin_op_data.h" -#include "tensorflow/lite/c/common.h" - -namespace tflite { - -extern const int kMulInput1Tensor; -extern const int kMulInput2Tensor; -extern const int kMulOutputTensor; - -struct OpDataMul { - int32_t input1_zero_point; - int32_t input2_zero_point; - - int32_t output_activation_min; - int32_t output_activation_max; - int32_t output_zero_point; - int32_t output_multiplier; - int output_shift; - - float output_activation_min_f32; - float output_activation_max_f32; -}; - -void* MulInit(TfLiteContext* context, const char* buffer, size_t length); - -TfLiteStatus CalculateOpDataMul(TfLiteContext* context, TfLiteNode* node, - TfLiteMulParams* params, OpDataMul* data); - -TfLiteStatus MulPrepare(TfLiteContext* context, TfLiteNode* node); - -void EvalMulQuantizedReference(TfLiteContext* context, TfLiteNode* node, - const OpDataMul* data, - const TfLiteEvalTensor* input1, - const TfLiteEvalTensor* input2, - TfLiteEvalTensor* output); - -void EvalMulFloatReference(TfLiteContext* context, TfLiteNode* node, - TfLiteMulParams* params, const OpDataMul* data, - const TfLiteEvalTensor* input1, - const TfLiteEvalTensor* input2, - TfLiteEvalTensor* output); - -// Generic must define registration function. -TfLiteRegistration Register_MUL(); - -#if defined(CMSIS_NN) -TfLiteRegistration Register_MUL_INT8(); -#else -// Fallback registration -inline TfLiteRegistration Register_MUL_INT8() { return Register_MUL(); } -#endif -} // namespace tflite - -#endif // TENSORFLOW_LITE_MICRO_KERNELS_MUL_H_ diff --git a/code/components/tflite-lib/tensorflow/lite/micro/kernels/neg.cc b/code/components/tflite-lib/tensorflow/lite/micro/kernels/neg.cc deleted file mode 100644 index 03cbbb09..00000000 --- a/code/components/tflite-lib/tensorflow/lite/micro/kernels/neg.cc +++ /dev/null @@ -1,59 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/kernels/internal/reference/neg.h" - -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "tensorflow/lite/micro/kernels/kernel_util.h" - -namespace tflite { -namespace ops { -namespace micro { -namespace neg { - -constexpr int kInputTensor = 0; -constexpr int kOutputTensor = 0; - -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - const TfLiteEvalTensor* input = - tflite::micro::GetEvalInput(context, node, kInputTensor); - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - switch (input->type) { - // TODO(wangtz): handle for kTfLiteInt8 - case kTfLiteFloat32: - reference_ops::Negate(tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - break; - default: - MicroPrintf("Type %s (%d) not supported.", TfLiteTypeGetName(input->type), - input->type); - return kTfLiteError; - } - return kTfLiteOk; -} - -} // namespace neg - -TfLiteRegistration Register_NEG() { - return tflite::micro::RegisterOp(nullptr, nullptr, neg::Eval); -} - -} // namespace micro -} // namespace ops -} // namespace tflite diff --git a/code/components/tflite-lib/tensorflow/lite/micro/kernels/pack.cc b/code/components/tflite-lib/tensorflow/lite/micro/kernels/pack.cc deleted file mode 100644 index 01d62f77..00000000 --- a/code/components/tflite-lib/tensorflow/lite/micro/kernels/pack.cc +++ /dev/null @@ -1,116 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/c/builtin_op_data.h" -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "tensorflow/lite/micro/kernels/kernel_util.h" - -namespace tflite { -namespace ops { -namespace micro { -namespace pack { -namespace { - -constexpr int kOutputTensor = 0; - -template -TfLiteStatus PackImpl(TfLiteContext* context, TfLiteNode* node, - TfLiteEvalTensor* output, int values_count, int axis) { - const TfLiteEvalTensor* input0 = - tflite::micro::GetEvalInput(context, node, 0); - - const int dimensions = output->dims->size; - const TfLiteIntArray* input_dims = input0->dims; - const TfLiteIntArray* output_dims = output->dims; - - if (axis < 0) { - axis += dimensions; - } - - int outer_size = 1; - for (int i = 0; i < axis; ++i) { - outer_size *= output_dims->data[i]; - } - int copy_size = 1; - for (int i = axis + 1; i < dimensions; ++i) { - copy_size *= output_dims->data[i]; - } - int input_size = 1; - for (int i = 0; i < input_dims->size; ++i) { - input_size *= input_dims->data[i]; - } - TFLITE_DCHECK_EQ(input_size, copy_size * outer_size); - - T* output_data = tflite::micro::GetTensorData(output); - - for (int i = 0; i < values_count; ++i) { - const TfLiteEvalTensor* t = tflite::micro::GetEvalInput(context, node, i); - const T* input_data = tflite::micro::GetTensorData(t); - for (int k = 0; k < outer_size; ++k) { - const T* input_ptr = input_data + copy_size * k; - int loc = k * values_count * copy_size + i * copy_size; - T* output_ptr = output_data + loc; - for (int j = 0; j < copy_size; ++j) output_ptr[j] = input_ptr[j]; - } - } - - return kTfLiteOk; -} - -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - const TfLitePackParams* data = - reinterpret_cast(node->builtin_data); - - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - - switch (output->type) { - case kTfLiteFloat32: { - return PackImpl(context, node, output, data->values_count, - data->axis); - } - case kTfLiteInt8: { - return PackImpl(context, node, output, data->values_count, - data->axis); - } - case kTfLiteInt32: { - return PackImpl(context, node, output, data->values_count, - data->axis); - } - case kTfLiteInt64: { - return PackImpl(context, node, output, data->values_count, - data->axis); - } - default: { - MicroPrintf("Type '%s' is not supported by pack.", - TfLiteTypeGetName(output->type)); - return kTfLiteError; - } - } - - return kTfLiteOk; -} - -} // namespace -} // namespace pack - -TfLiteRegistration Register_PACK() { - return tflite::micro::RegisterOp(nullptr, nullptr, pack::Eval); -} - -} // namespace micro -} // namespace ops -} // namespace tflite diff --git a/code/components/tflite-lib/tensorflow/lite/micro/kernels/pad.cc b/code/components/tflite-lib/tensorflow/lite/micro/kernels/pad.cc deleted file mode 100644 index 61c304bf..00000000 --- a/code/components/tflite-lib/tensorflow/lite/micro/kernels/pad.cc +++ /dev/null @@ -1,236 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#include "tensorflow/lite/kernels/internal/reference/pad.h" - -#include - -#include "tensorflow/lite/c/builtin_op_data.h" -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/portable_tensor.h" -#include "tensorflow/lite/kernels/internal/types.h" -#include "tensorflow/lite/kernels/kernel_util.h" -#include "tensorflow/lite/kernels/op_macros.h" -#include "tensorflow/lite/micro/kernels/kernel_util.h" - -namespace tflite { -namespace ops { -namespace micro { -namespace pad { -namespace { - -struct OpData { - PadParams params; - int32_t output_zero_point; -}; - -} // namespace - -void* Init(TfLiteContext* context, const char* buffer, size_t length) { - TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); - return context->AllocatePersistentBuffer(context, sizeof(OpData)); -} - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - MicroContext* micro_context = GetMicroContext(context); - - TFLITE_DCHECK(node->user_data != nullptr); - OpData* data = static_cast(node->user_data); - - TF_LITE_ENSURE(context, NumInputs(node) == 2 || NumInputs(node) == 3); - TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - - TfLiteTensor* input = - micro_context->AllocateTempInputTensor(node, /*index=*/0); - TF_LITE_ENSURE(context, input != nullptr); - TfLiteTensor* paddings = - micro_context->AllocateTempInputTensor(node, /*index=*/1); - TF_LITE_ENSURE(context, paddings != nullptr); - TfLiteTensor* constant_values = - NumInputs(node) == 3 - ? micro_context->AllocateTempInputTensor(node, /*index=*/2) - : nullptr; - TfLiteTensor* output = - micro_context->AllocateTempOutputTensor(node, /*index=*/0); - TF_LITE_ENSURE(context, output != nullptr); - - TF_LITE_ENSURE_EQ(context, input->type, output->type); - - // Current implementations rely on the inputs being <= 4D. - TF_LITE_ENSURE(context, NumDimensions(input) <= - reference_ops::PadKernelMaxDimensionCount()); - - if (constant_values != nullptr) { - TF_LITE_ENSURE_EQ(context, input->type, constant_values->type); - // Ensure that constant_values is a scalar. - TF_LITE_ENSURE_EQ(context, NumElements(constant_values), 1); - } - - // There must be a pair of paddings for each output dimension. - TF_LITE_ENSURE_EQ(context, GetTensorShape(paddings).FlatSize(), - output->dims->size * 2); - - // On Micro, outputs must be properly sized by the converter. - // NOTE: This data is only available because the paddings buffer is stored in - // the flatbuffer: - TF_LITE_ENSURE(context, IsConstantTensor(paddings)); - const int32_t* paddings_data = GetTensorData(paddings); - for (int i = 0; i < output->dims->size; i++) { - int output_dim = output->dims->data[i]; - int expected_dim = - input->dims->data[i] + paddings_data[i * 2] + paddings_data[i * 2 + 1]; - TF_LITE_ENSURE_EQ(context, output_dim, expected_dim); - } - - // Calculate OpData: - data->params.resizing_category = ResizingCategory::kGenericResize; - const int paddings_total = GetTensorShape(paddings).FlatSize(); - if (paddings_total == 8 && (paddings_data[0] == 0 && paddings_data[1] == 0) && - (paddings_data[6] == 0 && paddings_data[7] == 0)) { - data->params.resizing_category = ResizingCategory::kImageStyle; - } - - const int num_input_dimensions = NumDimensions(input); - data->params.left_padding_count = num_input_dimensions; - data->params.right_padding_count = num_input_dimensions; - - for (int idx = num_input_dimensions - 1; idx >= 0; --idx) { - data->params.left_padding[idx] = paddings_data[idx * 2]; - data->params.right_padding[idx] = paddings_data[idx * 2 + 1]; - } - - if (input->type == kTfLiteInt8) { - if (constant_values == nullptr) { - // Quantized Pad requires that 0 is represented in the quantized - // range. - TF_LITE_ENSURE(context, output->params.zero_point >= - std::numeric_limits::min()); - TF_LITE_ENSURE(context, output->params.zero_point <= - std::numeric_limits::max()); - } else { - // Quantized Pad requires that 'constant_values' is represented in the - // same quantized range as the input and output tensors. - TF_LITE_ENSURE_EQ(context, output->params.zero_point, - constant_values->params.zero_point); - TF_LITE_ENSURE_EQ(context, static_cast(output->params.scale), - static_cast(constant_values->params.scale)); - } - data->output_zero_point = output->params.zero_point; - } - - micro_context->DeallocateTempTfLiteTensor(input); - micro_context->DeallocateTempTfLiteTensor(paddings); - if (constant_values != nullptr) { - micro_context->DeallocateTempTfLiteTensor(constant_values); - } - micro_context->DeallocateTempTfLiteTensor(output); - - return kTfLiteOk; -} - -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->user_data != nullptr); - const OpData* data = static_cast(node->user_data); - - const TfLiteEvalTensor* input = - tflite::micro::GetEvalInput(context, node, /*index=*/0); - const TfLiteEvalTensor* constant_values = - NumInputs(node) == 3 - ? tflite::micro::GetEvalInput(context, node, /*index=*/2) - : nullptr; - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, /*index=*/0); - - switch (input->type) { - case kTfLiteFloat32: { - float pad_value = - constant_values == nullptr - ? 0.f - : *tflite::micro::GetTensorData(constant_values); - if (data->params.resizing_category == ResizingCategory::kImageStyle) { - reference_ops::PadImageStyle( - data->params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), &pad_value, - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } else { - reference_ops::Pad(data->params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - &pad_value, tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } - } break; - case kTfLiteInt8: { - int8_t pad_value; - if (constant_values == nullptr) { - pad_value = static_cast(data->output_zero_point); - } else { - pad_value = *tflite::micro::GetTensorData(constant_values); - } - if (data->params.resizing_category == ResizingCategory::kImageStyle) { - reference_ops::PadImageStyle( - data->params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), &pad_value, - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } else { - reference_ops::Pad(data->params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - &pad_value, tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } - } break; - case kTfLiteInt16: { - int16_t pad_value = - constant_values == nullptr - ? 0 - : *tflite::micro::GetTensorData(constant_values); - reference_ops::Pad(data->params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - &pad_value, tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } break; - case kTfLiteInt32: { - int32_t pad_value = - constant_values == nullptr - ? 0 - : *tflite::micro::GetTensorData(constant_values); - reference_ops::Pad(data->params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - &pad_value, tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } break; - default: - - MicroPrintf("Type %s not currently supported by Pad.", - TfLiteTypeGetName(input->type)); - return kTfLiteError; - } - return kTfLiteOk; -} - -} // namespace pad - -TfLiteRegistration Register_PAD() { - return tflite::micro::RegisterOp(pad::Init, pad::Prepare, pad::Eval); -} - -// Also register Pad as PadV2. -TfLiteRegistration Register_PADV2() { - return tflite::micro::RegisterOp(pad::Init, pad::Prepare, pad::Eval); -} - -} // namespace micro -} // namespace ops -} // namespace tflite diff --git a/code/components/tflite-lib/tensorflow/lite/micro/kernels/pooling.cc b/code/components/tflite-lib/tensorflow/lite/micro/kernels/pooling.cc deleted file mode 100644 index f32014e8..00000000 --- a/code/components/tflite-lib/tensorflow/lite/micro/kernels/pooling.cc +++ /dev/null @@ -1,98 +0,0 @@ -/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#include "tensorflow/lite/kernels/internal/reference/pooling.h" - -#include "tensorflow/lite/c/builtin_op_data.h" -#include "tensorflow/lite/kernels/kernel_util.h" -#include "tensorflow/lite/micro/kernels/kernel_util.h" -#include "tensorflow/lite/micro/kernels/pooling.h" - -namespace tflite { - -namespace { - -TfLiteStatus AverageEval(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->builtin_data != nullptr); - auto* params = reinterpret_cast(node->builtin_data); - - TFLITE_DCHECK(node->user_data != nullptr); - const OpDataPooling* data = - static_cast(node->user_data); - - const TfLiteEvalTensor* input = - micro::GetEvalInput(context, node, kPoolingInputTensor); - TfLiteEvalTensor* output = - micro::GetEvalOutput(context, node, kPoolingOutputTensor); - - // Inputs and outputs share the same type, guaranteed by the converter. - switch (input->type) { - case kTfLiteFloat32: - AveragePoolingEvalFloat(context, node, params, data, input, output); - break; - case kTfLiteInt8: - AveragePoolingEvalQuantized(context, node, params, data, input, output); - break; - default: - MicroPrintf("Input type %s is not currently supported", - TfLiteTypeGetName(input->type)); - return kTfLiteError; - } - return kTfLiteOk; -} - -TfLiteStatus MaxEval(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->builtin_data != nullptr); - auto* params = reinterpret_cast(node->builtin_data); - - TFLITE_DCHECK(node->user_data != nullptr); - const OpDataPooling* data = - static_cast(node->user_data); - - const TfLiteEvalTensor* input = - micro::GetEvalInput(context, node, kPoolingInputTensor); - TfLiteEvalTensor* output = - micro::GetEvalOutput(context, node, kPoolingOutputTensor); - - switch (input->type) { - case kTfLiteFloat32: - MaxPoolingEvalFloat(context, node, params, data, input, output); - break; - case kTfLiteInt8: - MaxPoolingEvalQuantized(context, node, params, data, input, output); - break; - default: - MicroPrintf("Type %s not currently supported.", - TfLiteTypeGetName(input->type)); - return kTfLiteError; - } - return kTfLiteOk; -} - -void* Init(TfLiteContext* context, const char* buffer, size_t length) { - TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); - return context->AllocatePersistentBuffer(context, sizeof(OpDataPooling)); -} - -} // namespace - -TfLiteRegistration Register_AVERAGE_POOL_2D() { - return tflite::micro::RegisterOp(Init, PoolingPrepare, AverageEval); -} - -TfLiteRegistration Register_MAX_POOL_2D() { - return tflite::micro::RegisterOp(Init, PoolingPrepare, MaxEval); -} - -} // namespace tflite diff --git a/code/components/tflite-lib/tensorflow/lite/micro/kernels/pooling.h b/code/components/tflite-lib/tensorflow/lite/micro/kernels/pooling.h deleted file mode 100644 index 493250ee..00000000 --- a/code/components/tflite-lib/tensorflow/lite/micro/kernels/pooling.h +++ /dev/null @@ -1,85 +0,0 @@ -/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#ifndef TENSORFLOW_LITE_MICRO_KERNELS_POOLING_H_ -#define TENSORFLOW_LITE_MICRO_KERNELS_POOLING_H_ - -#include - -#include "tensorflow/lite/c/builtin_op_data.h" -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/micro/kernels/micro_ops.h" - -namespace tflite { - -extern const int kPoolingInputTensor; -extern const int kPoolingOutputTensor; - -struct OpDataPooling { - TfLitePaddingValues padding; - int32_t activation_min; - int32_t activation_max; - float activation_min_f32; - float activation_max_f32; -}; - -TfLiteStatus CalculateOpDataPooling(const TfLiteContext* context, - const TfLitePoolParams* params, - const TfLiteTensor* input, - const TfLiteTensor* output, - OpDataPooling* data); - -TfLiteStatus PoolingPrepare(TfLiteContext* context, TfLiteNode* node); - -void AveragePoolingEvalFloat(const TfLiteContext* context, - const TfLiteNode* node, - const TfLitePoolParams* params, - const OpDataPooling* data, - const TfLiteEvalTensor* input, - TfLiteEvalTensor* output); - -void AveragePoolingEvalQuantized(TfLiteContext* context, const TfLiteNode* node, - const TfLitePoolParams* params, - const OpDataPooling* data, - const TfLiteEvalTensor* input, - TfLiteEvalTensor* output); - -void MaxPoolingEvalFloat(TfLiteContext* context, TfLiteNode* node, - TfLitePoolParams* params, const OpDataPooling* data, - const TfLiteEvalTensor* input, - TfLiteEvalTensor* output); - -void MaxPoolingEvalQuantized(TfLiteContext* context, TfLiteNode* node, - TfLitePoolParams* params, - const OpDataPooling* data, - const TfLiteEvalTensor* input, - TfLiteEvalTensor* output); - -#if defined(CMSIS_NN) -TfLiteRegistration Register_AVERAGE_POOL_2D_INT8(); - -TfLiteRegistration Register_MAX_POOL_2D_INT8(); -#else -inline TfLiteRegistration Register_AVERAGE_POOL_2D_INT8() { - return tflite::Register_AVERAGE_POOL_2D(); -} - -inline TfLiteRegistration Register_MAX_POOL_2D_INT8() { - return tflite::Register_MAX_POOL_2D(); -} -#endif -} // namespace tflite - -#endif // TENSORFLOW_LITE_MICRO_KERNELS_POOLING_H_ diff --git a/code/components/tflite-lib/tensorflow/lite/micro/kernels/prelu.cc b/code/components/tflite-lib/tensorflow/lite/micro/kernels/prelu.cc deleted file mode 100644 index 6c5a8a4f..00000000 --- a/code/components/tflite-lib/tensorflow/lite/micro/kernels/prelu.cc +++ /dev/null @@ -1,74 +0,0 @@ -/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/kernels/internal/reference/prelu.h" - -#include - -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/quantization_util.h" -#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "tensorflow/lite/kernels/kernel_util.h" -#include "tensorflow/lite/micro/kernels/kernel_util.h" -#include "tensorflow/lite/micro/kernels/prelu.h" - -namespace tflite { - -void* PreluInit(TfLiteContext* context, const char* buffer, size_t length) { - TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); - return context->AllocatePersistentBuffer(context, sizeof(PreluParams)); -} - -TfLiteStatus PreluEval(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->user_data != nullptr); - const PreluParams& params = - *(static_cast(node->user_data)); - - const TfLiteEvalTensor* input = tflite::micro::GetEvalInput(context, node, 0); - const TfLiteEvalTensor* alpha = tflite::micro::GetEvalInput(context, node, 1); - TfLiteEvalTensor* output = tflite::micro::GetEvalOutput(context, node, 0); - - switch (input->type) { - case kTfLiteFloat32: { - BroadcastPrelu4DSlowFloat(tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(alpha), - tflite::micro::GetTensorData(alpha), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - return kTfLiteOk; - } break; - case kTfLiteInt8: { - reference_ops::BroadcastPrelu4DSlow( - params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(alpha), - tflite::micro::GetTensorData(alpha), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - return kTfLiteOk; - } break; - default: - MicroPrintf("Only float32 and uint8_t are supported currently, got %d.", - TfLiteTypeGetName(input->type)); - return kTfLiteError; - } -} - -TfLiteRegistration Register_PRELU() { - return tflite::micro::RegisterOp(PreluInit, PreluPrepare, PreluEval); -} - -} // namespace tflite diff --git a/code/components/tflite-lib/tensorflow/lite/micro/kernels/reduce_common.cc b/code/components/tflite-lib/tensorflow/lite/micro/kernels/reduce_common.cc deleted file mode 100644 index b191ba21..00000000 --- a/code/components/tflite-lib/tensorflow/lite/micro/kernels/reduce_common.cc +++ /dev/null @@ -1,374 +0,0 @@ -/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/c/builtin_op_data.h" -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/quantization_util.h" -#include "tensorflow/lite/kernels/internal/reference/integer_ops/mean.h" -#include "tensorflow/lite/kernels/internal/reference/reduce.h" -#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "tensorflow/lite/kernels/internal/types.h" -#include "tensorflow/lite/kernels/kernel_util.h" -#include "tensorflow/lite/micro/kernels/kernel_util.h" -#include "tensorflow/lite/micro/kernels/reduce.h" -#include "tensorflow/lite/micro/micro_error_reporter.h" -#include "tensorflow/lite/micro/micro_utils.h" - -namespace tflite { - -const int kMaxNumberOfAxis = 5; -const int kMaxNumberOfReducedAxis = 2; - -TfLiteStatus PrepareSimple(TfLiteContext* context, TfLiteNode* node, - int32_t* multiplier, int* shift) { - MicroContext* micro_context = GetMicroContext(context); - - // Inputs Tensor (dtype depends on quantization): - // [0] = Input - // [1] = Axis - TfLiteTensor* input = micro_context->AllocateTempInputTensor(node, 0); - - // Outputs Tensor (dtype depends on quantization): - // [0] = Output - - // Validate number of inputs and outputs - TF_LITE_ENSURE_EQ(context, node->inputs->size, 2); - TF_LITE_ENSURE_EQ(context, node->outputs->size, 1); - - // Validate axis type - TfLiteTensor* axis = micro_context->AllocateTempInputTensor(node, 1); - TF_LITE_ENSURE(context, axis != nullptr); - TF_LITE_ENSURE_TYPES_EQ(context, axis->type, kTfLiteInt32); - - if (input->type == kTfLiteInt8) { - TfLiteTensor* output = micro_context->AllocateTempOutputTensor(node, 0); - const double real_multiplier = static_cast(input->params.scale) / - static_cast(output->params.scale); - QuantizeMultiplier(real_multiplier, multiplier, shift); - micro_context->DeallocateTempTfLiteTensor(output); - } - micro_context->DeallocateTempTfLiteTensor(axis); - micro_context->DeallocateTempTfLiteTensor(input); - return kTfLiteOk; -} - -TfLiteStatus PrepareMaxHelper(TfLiteContext* context, TfLiteNode* node, - OpDataReduce* op_data) { - TF_LITE_ENSURE_OK(context, PrepareSimple(context, node, &op_data->multiplier, - &op_data->shift)); - - MicroContext* micro_context = GetMicroContext(context); - TfLiteTensor* input = micro_context->AllocateTempInputTensor(node, 0); - TfLiteTensor* output = micro_context->AllocateTempOutputTensor(node, 0); - TfLiteTensor* axis = micro_context->AllocateTempInputTensor(node, 1); - - op_data->input_scale = input->params.scale; - op_data->output_scale = output->params.scale; - op_data->num_output_elements = NumElements(output); - - context->RequestScratchBufferInArena(context, sizeof(int) * input->dims->size, - &op_data->temp_buffer_idx); - context->RequestScratchBufferInArena( - context, sizeof(int) * static_cast(ElementCount(*axis->dims)), - &op_data->resolved_axis_idx); - - micro_context->DeallocateTempTfLiteTensor(input); - micro_context->DeallocateTempTfLiteTensor(output); - micro_context->DeallocateTempTfLiteTensor(axis); - return kTfLiteOk; -} - -TfLiteStatus PrepareMeanOrSumHelper(TfLiteContext* context, TfLiteNode* node, - OpDataReduce* op_data) { - MicroContext* micro_context = GetMicroContext(context); - TfLiteTensor* input = micro_context->AllocateTempInputTensor(node, 0); - TfLiteTensor* output = micro_context->AllocateTempOutputTensor(node, 0); - if (input->type == kTfLiteInt8 || input->type == kTfLiteInt16) { - const double real_multiplier = static_cast(input->params.scale) / - static_cast(output->params.scale); - QuantizeMultiplier(real_multiplier, &op_data->multiplier, &op_data->shift); - } - - int output_size = NumElements(output); - if (input->type == kTfLiteInt8 || input->type == kTfLiteInt16) { - context->RequestScratchBufferInArena(context, output_size * sizeof(int32_t), - &op_data->temp_buffer_idx); - op_data->input_zp = input->params.zero_point; - op_data->input_scale = input->params.scale; - op_data->output_zp = output->params.zero_point; - op_data->output_scale = output->params.scale; - } - - TF_LITE_ENSURE_OK( - context, - PrepareSimple(context, node, &(op_data->multiplier), &(op_data->shift))); - // TODO(b/144955155): Support uint8_t(b/144955155) and int8_t(b/144955018) - micro_context->DeallocateTempTfLiteTensor(input); - micro_context->DeallocateTempTfLiteTensor(output); - return kTfLiteOk; -} - -void ResolveAxis(const int* axis_data, int axis_count, - tflite::MeanParams* op_params) { - int i = 0; - for (; i < axis_count; ++i) { - op_params->axis[i] = static_cast(axis_data[i]); - } - for (; i < 4; ++i) { - op_params->axis[i] = 1; - } - op_params->axis_count = axis_count; -} - -TfLiteStatus EvalMeanHelper(TfLiteContext* context, TfLiteNode* node, - OpDataReduce* op_data) { - const TfLiteEvalTensor* input = tflite::micro::GetEvalInput(context, node, 0); - const TfLiteEvalTensor* axis = tflite::micro::GetEvalInput(context, node, 1); - TfLiteEvalTensor* output = tflite::micro::GetEvalOutput(context, node, 0); - TfLiteReducerParams* params = - reinterpret_cast(node->builtin_data); - - int num_axis = static_cast(ElementCount(*axis->dims)); - int temp_index[kMaxNumberOfAxis]; - int resolved_axis[kMaxNumberOfReducedAxis]; - - tflite::MeanParams op_params; - ResolveAxis(tflite::micro::GetTensorData(axis), num_axis, &op_params); - - // Special case mean implementation exists for 4D mean across axes 1 and 2. - bool special_case_4d_axes_1_and_2 = - input->dims->size == 4 && op_params.axis_count == 2 && - ((op_params.axis[0] == 1 && op_params.axis[1] == 2) || - (op_params.axis[0] == 2 && op_params.axis[1] == 1)); - - switch (input->type) { - case kTfLiteFloat32: { - // Defer to specialized implementation for 4D Mean across axes 1 & 2. - if (params->keep_dims && special_case_4d_axes_1_and_2) { - reference_ops::Mean(op_params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } else { - TF_LITE_ENSURE( - context, - reference_ops::Mean( - tflite::micro::GetTensorData(input), input->dims->data, - input->dims->size, tflite::micro::GetTensorData(output), - output->dims->data, output->dims->size, - tflite::micro::GetTensorData(axis), num_axis, - params->keep_dims, temp_index, resolved_axis, - tflite::micro::GetTensorData(output))); - } - } break; - case kTfLiteInt8: { - // Defer to specialized implementation for 4D Mean across axes 1 & 2. - if (params->keep_dims && special_case_4d_axes_1_and_2) { - reference_integer_ops::Mean( - op_params, op_data->multiplier, op_data->shift, - tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), op_data->input_zp, - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output), op_data->output_zp); - } else if (op_data->input_zp == op_data->output_zp && - op_data->input_scale == op_data->output_scale) { - int32_t* temp_buffer = static_cast( - context->GetScratchBuffer(context, op_data->temp_buffer_idx)); - TF_LITE_ENSURE( - context, - reference_ops::Mean( - tflite::micro::GetTensorData(input), input->dims->data, - input->dims->size, tflite::micro::GetTensorData(output), - output->dims->data, output->dims->size, - tflite::micro::GetTensorData(axis), num_axis, - params->keep_dims, temp_index, resolved_axis, temp_buffer)); - } else { - int32_t* temp_buffer = static_cast( - context->GetScratchBuffer(context, op_data->temp_buffer_idx)); - TF_LITE_ENSURE( - context, - reference_ops::QuantizedMeanOrSum( - tflite::micro::GetTensorData(input), op_data->input_zp, - op_data->input_scale, input->dims->data, input->dims->size, - tflite::micro::GetTensorData(output), - op_data->output_zp, op_data->output_scale, output->dims->data, - output->dims->size, tflite::micro::GetTensorData(axis), - num_axis, params->keep_dims, temp_index, resolved_axis, - temp_buffer, false)); - } - } break; - case kTfLiteInt16: { - // Defer to specialized implementation for 4D Mean across axes 1 & 2. - if (params->keep_dims && special_case_4d_axes_1_and_2) { - reference_integer_ops::Mean( - op_params, op_data->multiplier, op_data->shift, - tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), op_data->input_zp, - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output), op_data->output_zp); - } else if (op_data->input_zp == op_data->output_zp && - op_data->input_scale == op_data->output_scale) { - int32_t* temp_buffer = static_cast( - context->GetScratchBuffer(context, op_data->temp_buffer_idx)); - TF_LITE_ENSURE( - context, - reference_ops::Mean(tflite::micro::GetTensorData(input), - input->dims->data, input->dims->size, - tflite::micro::GetTensorData(output), - output->dims->data, output->dims->size, - tflite::micro::GetTensorData(axis), - num_axis, params->keep_dims, temp_index, - resolved_axis, temp_buffer)); - } else { - int32_t* temp_buffer = static_cast( - context->GetScratchBuffer(context, op_data->temp_buffer_idx)); - TF_LITE_ENSURE( - context, - reference_ops::QuantizedMeanOrSum( - tflite::micro::GetTensorData(input), op_data->input_zp, - op_data->input_scale, input->dims->data, input->dims->size, - tflite::micro::GetTensorData(output), - op_data->output_zp, op_data->output_scale, output->dims->data, - output->dims->size, tflite::micro::GetTensorData(axis), - num_axis, params->keep_dims, temp_index, resolved_axis, - temp_buffer, false)); - } - } break; - default: - TF_LITE_ENSURE_MSG(context, false, - "Currently, only float32, int8 or int16 input type " - "is supported."); - } - return kTfLiteOk; -} - -TfLiteStatus EvalMaxHelper(TfLiteContext* context, TfLiteNode* node, - OpDataReduce* op_data) { - const TfLiteEvalTensor* input = tflite::micro::GetEvalInput(context, node, 0); - const TfLiteEvalTensor* axis = tflite::micro::GetEvalInput(context, node, 1); - TfLiteEvalTensor* output = tflite::micro::GetEvalOutput(context, node, 0); - TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); - TfLiteReducerParams* params = - static_cast(node->builtin_data); - - // Interpret an axis tensor with null dimensions as a scalar - int num_axis = static_cast(ElementCount(*axis->dims)); - int* temp_buffer = static_cast( - context->GetScratchBuffer(context, op_data->temp_buffer_idx)); - int* resolved_axis = static_cast( - context->GetScratchBuffer(context, op_data->resolved_axis_idx)); - switch (input->type) { - case kTfLiteFloat32: - TF_LITE_ENSURE( - context, - reference_ops::ReduceGeneric( - tflite::micro::GetTensorData(input), input->dims->data, - input->dims->size, tflite::micro::GetTensorData(output), - output->dims->data, output->dims->size, - tflite::micro::GetTensorData(axis), num_axis, - params->keep_dims, temp_buffer, resolved_axis, - std::numeric_limits::lowest(), - [](const float current, const float in) -> float { - return (in > current) ? in : current; - })); - break; - case kTfLiteInt8: - TF_LITE_ENSURE_EQ(context, static_cast(op_data->input_scale), - static_cast(op_data->output_scale)); - TF_LITE_ENSURE_EQ(context, op_data->input_zp, op_data->output_zp); - TF_LITE_ENSURE( - context, - reference_ops::ReduceGeneric( - tflite::micro::GetTensorData(input), input->dims->data, - input->dims->size, tflite::micro::GetTensorData(output), - output->dims->data, output->dims->size, - tflite::micro::GetTensorData(axis), num_axis, - params->keep_dims, temp_buffer, resolved_axis, - std::numeric_limits::lowest(), - [](const int8_t current, const int8_t in) -> int8_t { - return (in > current) ? in : current; - })); - break; - default: - MicroPrintf("Only float32 and int8 types are supported."); - return kTfLiteError; - } - return kTfLiteOk; -} - -TfLiteStatus EvalSumHelper(TfLiteContext* context, TfLiteNode* node, - OpDataReduce* op_data) { - const TfLiteEvalTensor* input = tflite::micro::GetEvalInput(context, node, 0); - const TfLiteEvalTensor* axis = tflite::micro::GetEvalInput(context, node, 1); - TfLiteEvalTensor* output = tflite::micro::GetEvalOutput(context, node, 0); - TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); - TfLiteReducerParams* params = - static_cast(node->builtin_data); - - // Interpret an axis tensor with null dimensions as a scalar. - int num_axis = static_cast(ElementCount(*axis->dims)); - int temp_index[kMaxNumberOfAxis]; - int resolved_axis[kMaxNumberOfReducedAxis]; - - switch (input->type) { - case kTfLiteFloat32: { - TF_LITE_ENSURE( - context, - reference_ops::ReduceGeneric( - tflite::micro::GetTensorData(input), input->dims->data, - input->dims->size, tflite::micro::GetTensorData(output), - output->dims->data, output->dims->size, - tflite::micro::GetTensorData(axis), num_axis, - params->keep_dims, temp_index, resolved_axis, /*init_value=*/0.f, - [](const float current, const float in) -> float { - return in + current; - })); - } break; - case kTfLiteInt8: { - int32_t* temp_buffer = static_cast( - context->GetScratchBuffer(context, op_data->temp_buffer_idx)); - TF_LITE_ENSURE( - context, - reference_ops::QuantizedMeanOrSum( - tflite::micro::GetTensorData(input), op_data->input_zp, - op_data->input_scale, input->dims->data, input->dims->size, - tflite::micro::GetTensorData(output), op_data->output_zp, - op_data->output_scale, output->dims->data, output->dims->size, - tflite::micro::GetTensorData(axis), num_axis, - params->keep_dims, temp_index, resolved_axis, temp_buffer, - /*compute_sum=*/true)); - } break; - case kTfLiteInt16: { - int32_t* temp_buffer = static_cast( - context->GetScratchBuffer(context, op_data->temp_buffer_idx)); - TF_LITE_ENSURE( - context, - reference_ops::QuantizedMeanOrSum( - tflite::micro::GetTensorData(input), op_data->input_zp, - op_data->input_scale, input->dims->data, input->dims->size, - tflite::micro::GetTensorData(output), op_data->output_zp, - op_data->output_scale, output->dims->data, output->dims->size, - tflite::micro::GetTensorData(axis), num_axis, - params->keep_dims, temp_index, resolved_axis, temp_buffer, - /*compute_sum=*/true)); - } break; - default: - MicroPrintf("Only float32, int8, and int16 types are supported."); - return kTfLiteError; - } - return kTfLiteOk; -} - -} // namespace tflite diff --git a/code/components/tflite-lib/tensorflow/lite/micro/kernels/resize_bilinear.cc b/code/components/tflite-lib/tensorflow/lite/micro/kernels/resize_bilinear.cc deleted file mode 100644 index f2acd9f4..00000000 --- a/code/components/tflite-lib/tensorflow/lite/micro/kernels/resize_bilinear.cc +++ /dev/null @@ -1,115 +0,0 @@ -/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#include "tensorflow/lite/kernels/internal/reference/resize_bilinear.h" - -#include "tensorflow/lite/c/builtin_op_data.h" -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "tensorflow/lite/kernels/kernel_util.h" -#include "tensorflow/lite/kernels/op_macros.h" -#include "tensorflow/lite/micro/kernels/kernel_util.h" -#include "tensorflow/lite/micro/micro_utils.h" - -namespace tflite { -namespace { - -constexpr int kInputTensor = 0; -constexpr int kSizeTensor = 1; -constexpr int kOutputTensor = 0; - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - MicroContext* micro_context = GetMicroContext(context); - - TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); - TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - - TfLiteTensor* input = - micro_context->AllocateTempInputTensor(node, kInputTensor); - TfLiteTensor* size = - micro_context->AllocateTempInputTensor(node, kSizeTensor); - TfLiteTensor* output = - micro_context->AllocateTempOutputTensor(node, kOutputTensor); - - TF_LITE_ENSURE_EQ(context, NumDimensions(input), 4); - TF_LITE_ENSURE_EQ(context, NumDimensions(size), 1); - - TF_LITE_ENSURE_EQ(context, size->type, kTfLiteInt32); - output->type = input->type; - - TF_LITE_ENSURE_MSG(context, IsConstantTensor(size), - "Non constant size tensor not supported"); - - // Ensure params are valid. - auto* params = - reinterpret_cast(node->builtin_data); - if (params->half_pixel_centers && params->align_corners) { - MicroPrintf("If half_pixel_centers is True, align_corners must be False."); - return kTfLiteError; - } - - micro_context->DeallocateTempTfLiteTensor(input); - micro_context->DeallocateTempTfLiteTensor(size); - micro_context->DeallocateTempTfLiteTensor(output); - return kTfLiteOk; -} - -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - auto* params = - reinterpret_cast(node->builtin_data); - - const TfLiteEvalTensor* input = - tflite::micro::GetEvalInput(context, node, kInputTensor); - const TfLiteEvalTensor* size = - tflite::micro::GetEvalInput(context, node, kSizeTensor); - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - - if (output->type == kTfLiteFloat32) { - tflite::ResizeBilinearParams op_params; - op_params.align_corners = params->align_corners; - op_params.half_pixel_centers = params->half_pixel_centers; - reference_ops::ResizeBilinear(op_params, - tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(size), - tflite::micro::GetTensorData(size), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } else if (output->type == kTfLiteInt8) { - tflite::ResizeBilinearParams op_params; - op_params.align_corners = params->align_corners; - op_params.half_pixel_centers = params->half_pixel_centers; - reference_ops::ResizeBilinearInteger( - op_params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(size), - tflite::micro::GetTensorData(size), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } else { - MicroPrintf("Output type is %d, requires float or int8.", output->type); - return kTfLiteError; - } - - return kTfLiteOk; -} - -} // namespace - -TfLiteRegistration Register_RESIZE_BILINEAR() { - return tflite::micro::RegisterOp(nullptr, Prepare, Eval); -} - -} // namespace tflite diff --git a/code/components/tflite-lib/tensorflow/lite/micro/kernels/resize_nearest_neighbor.cc b/code/components/tflite-lib/tensorflow/lite/micro/kernels/resize_nearest_neighbor.cc deleted file mode 100644 index aa3b8fa1..00000000 --- a/code/components/tflite-lib/tensorflow/lite/micro/kernels/resize_nearest_neighbor.cc +++ /dev/null @@ -1,125 +0,0 @@ -/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/kernels/internal/reference/resize_nearest_neighbor.h" - -#include "tensorflow/lite/c/builtin_op_data.h" -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "tensorflow/lite/kernels/kernel_util.h" -#include "tensorflow/lite/kernels/op_macros.h" -#include "tensorflow/lite/micro/kernels/kernel_util.h" - -namespace tflite { -namespace ops { -namespace micro { -namespace resize_nearest_neighbor { - -constexpr int kInputTensor = 0; -constexpr int kSizeTensor = 1; -constexpr int kOutputTensor = 0; - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - MicroContext* micro_context = GetMicroContext(context); - - TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); - TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - - TfLiteTensor* input = - micro_context->AllocateTempInputTensor(node, kInputTensor); - TfLiteTensor* size = - micro_context->AllocateTempInputTensor(node, kSizeTensor); - TfLiteTensor* output = - micro_context->AllocateTempOutputTensor(node, kOutputTensor); - - // Our current implementations rely on the input being 4D, - // and the size being 1D tensor with exactly 2 elements. - TF_LITE_ENSURE_EQ(context, NumDimensions(input), 4); - TF_LITE_ENSURE_EQ(context, NumDimensions(size), 1); - TF_LITE_ENSURE_EQ(context, size->type, kTfLiteInt32); - TF_LITE_ENSURE_EQ(context, size->dims->data[0], 2); - - output->type = input->type; - - if (!IsConstantTensor(size)) { - MicroPrintf("Dynamic tensors are unsupported in tfmicro."); - return kTfLiteError; - } - - micro_context->DeallocateTempTfLiteTensor(input); - micro_context->DeallocateTempTfLiteTensor(size); - micro_context->DeallocateTempTfLiteTensor(output); - - return kTfLiteOk; -} - -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - auto* params = - reinterpret_cast(node->builtin_data); - - const TfLiteEvalTensor* input = - tflite::micro::GetEvalInput(context, node, kInputTensor); - const TfLiteEvalTensor* size = - tflite::micro::GetEvalInput(context, node, kSizeTensor); - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - - tflite::ResizeNearestNeighborParams op_params; - op_params.align_corners = params->align_corners; - op_params.half_pixel_centers = false; - - if (output->type == kTfLiteFloat32) { - reference_ops::ResizeNearestNeighbor( - op_params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(size), - tflite::micro::GetTensorData(size), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } else if (output->type == kTfLiteInt8) { - reference_ops::ResizeNearestNeighbor( - op_params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(size), - tflite::micro::GetTensorData(size), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } else if (output->type == kTfLiteInt16) { - reference_ops::ResizeNearestNeighbor( - op_params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(size), - tflite::micro::GetTensorData(size), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } else { - MicroPrintf("Output tensor type %s (%d) not supported.", - TfLiteTypeGetName(output->type), output->type); - - return kTfLiteError; - } - - return kTfLiteOk; -} -} // namespace resize_nearest_neighbor - -TfLiteRegistration Register_RESIZE_NEAREST_NEIGHBOR() { - return tflite::micro::RegisterOp(nullptr, resize_nearest_neighbor::Prepare, - resize_nearest_neighbor::Eval); -} - -} // namespace micro -} // namespace ops -} // namespace tflite diff --git a/code/components/tflite-lib/tensorflow/lite/micro/kernels/select.cc b/code/components/tflite-lib/tensorflow/lite/micro/kernels/select.cc deleted file mode 100644 index 0bcbfbea..00000000 --- a/code/components/tflite-lib/tensorflow/lite/micro/kernels/select.cc +++ /dev/null @@ -1,196 +0,0 @@ -/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#include "tensorflow/lite/kernels/internal/reference/select.h" - -#include -#include - -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "tensorflow/lite/kernels/kernel_util.h" -#include "tensorflow/lite/micro/kernels/kernel_util.h" - -namespace tflite { - -constexpr int kInputTensorCondition = 0; -constexpr int kInputTensorX = 1; -constexpr int kInputTensorY = 2; -constexpr int kOutputTensor = 0; - -struct OpData { - bool requires_broadcast; - // True if input condition is scalar or input condition has rank one and - // matches the first dimension of other inputs. - bool has_low_rank_input_condition; -}; - -void* SelectInit(TfLiteContext* context, const char* buffer, size_t length) { - TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); - auto* data = static_cast( - context->AllocatePersistentBuffer(context, sizeof(OpData))); - data->requires_broadcast = false; - data->has_low_rank_input_condition = false; - return data; -} - -TfLiteStatus CheckBroadcastShape(TfLiteContext* context, - const TfLiteTensor* input1, - const TfLiteTensor* input2, - const TfLiteTensor* input3, - const TfLiteIntArray* output_shape) { - const int dims1 = NumDimensions(input1); - const int dims2 = NumDimensions(input2); - const int dims3 = NumDimensions(input3); - const int out_dims = std::max(std::max(dims1, dims2), dims3); - TF_LITE_ENSURE_EQ(context, out_dims, output_shape->size); - - for (int i = 0; i < out_dims; ++i) { - const int d1 = i >= dims1 ? 1 : SizeOfDimension(input1, dims1 - i - 1); - const int d2 = i >= dims2 ? 1 : SizeOfDimension(input2, dims2 - i - 1); - const int d3 = i >= dims3 ? 1 : SizeOfDimension(input3, dims3 - i - 1); - const int min_value = std::min(std::min(d1, d2), d3); - int max_value = std::max(std::max(d1, d2), d3); - // If one dimention is 0, others must be 0 or 1. - if (min_value == 0) max_value = 0; - if (!(d1 == 1 || d1 == max_value) || !(d2 == 1 || d2 == max_value) || - !(d3 == 1 || d3 == max_value)) { - MicroPrintf("Given shapes are not broadcastable."); - return kTfLiteError; - } - TF_LITE_ENSURE_EQ(context, output_shape->data[out_dims - i - 1], max_value); - } - return kTfLiteOk; -} - -TfLiteStatus SelectPrepare(TfLiteContext* context, TfLiteNode* node) { - OpData* data = reinterpret_cast(node->user_data); - - TF_LITE_ENSURE_EQ(context, NumInputs(node), 3); - TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - - MicroContext* micro_context = GetMicroContext(context); - TfLiteTensor* input_condition = - micro_context->AllocateTempInputTensor(node, kInputTensorCondition); - - TfLiteTensor* input_x = - micro_context->AllocateTempInputTensor(node, kInputTensorX); - - TfLiteTensor* input_y = - micro_context->AllocateTempInputTensor(node, kInputTensorY); - - TfLiteTensor* output = - micro_context->AllocateTempOutputTensor(node, kOutputTensor); - - // Input must be bool. - TF_LITE_ENSURE_TYPES_EQ(context, input_condition->type, kTfLiteBool); - TF_LITE_ENSURE_TYPES_EQ(context, input_x->type, input_y->type); - output->type = input_x->type; - - // Respect the original output shape when there are mixed shapes to represent - // a scalar data. - if (GetTensorShape(input_condition).FlatSize() == 1 && - GetTensorShape(input_x).FlatSize() == 1 && - GetTensorShape(input_y).FlatSize() == 1 && - GetTensorShape(output).FlatSize() == 1) { - return kTfLiteOk; - } - - bool same_shape = HaveSameShapes(input_condition, input_x) && - HaveSameShapes(input_x, input_y); - if (!same_shape) { - TF_LITE_ENSURE_OK( - context, CheckBroadcastShape(context, input_condition, input_x, input_y, - output->dims)); - data->requires_broadcast = true; - } - - micro_context->DeallocateTempTfLiteTensor(input_condition); - micro_context->DeallocateTempTfLiteTensor(input_x); - micro_context->DeallocateTempTfLiteTensor(input_y); - micro_context->DeallocateTempTfLiteTensor(output); - - return kTfLiteOk; -} - -TfLiteStatus SelectEval(TfLiteContext* context, TfLiteNode* node) { - OpData* data = static_cast(node->user_data); - MicroContext* micro_context = GetMicroContext(context); - - TfLiteTensor* input_condition = - micro_context->AllocateTempInputTensor(node, kInputTensorCondition); - - TfLiteTensor* input_x = - micro_context->AllocateTempInputTensor(node, kInputTensorX); - - TfLiteTensor* input_y = - micro_context->AllocateTempInputTensor(node, kInputTensorY); - - TfLiteTensor* output = - micro_context->AllocateTempOutputTensor(node, kOutputTensor); - -#define TF_LITE_SELECT(type, op) \ - reference_ops::op(GetTensorShape(input_condition), \ - GetTensorData(input_condition), \ - GetTensorShape(input_x), GetTensorData(input_x), \ - GetTensorShape(input_y), GetTensorData(input_y), \ - GetTensorShape(output), GetTensorData(output)); - -#define TF_LITE_SWITCH(type, op) \ - switch (type) { \ - case kTfLiteFloat32: \ - TF_LITE_SELECT(float, op); \ - break; \ - case kTfLiteInt8: \ - TF_LITE_SELECT(int8_t, op); \ - break; \ - case kTfLiteInt16: \ - TF_LITE_SELECT(int16_t, op); \ - break; \ - default: \ - MicroPrintf("Does not support type other than %s, but got %s", \ - "int8|int16|float32", TfLiteTypeGetName(type)); \ - return kTfLiteError; \ - } - - if (data->has_low_rank_input_condition) { - MicroPrintf("Not yet implemented."); - return kTfLiteError; - } else if (data->requires_broadcast) { - TF_LITE_SWITCH(input_x->type, BroadcastSelect5DSlow); - } else { - TF_LITE_SWITCH(input_x->type, Select); - } - -#undef TF_LITE_SELECT -#undef TF_LITE_SWITCH - micro_context->DeallocateTempTfLiteTensor(input_condition); - micro_context->DeallocateTempTfLiteTensor(input_x); - micro_context->DeallocateTempTfLiteTensor(input_y); - micro_context->DeallocateTempTfLiteTensor(output); - - return kTfLiteOk; -} - -// SelectV2 op selects values of 'x' if the corresponding value of 'condition' -// is true or the value of 'y' if false. There are valid condition input sizes: -// -// 1. Either the same shape (in which case the select is elementwise), or -// 2. Broadcastable shapes between 'condition', 'x' and 'y'. -TfLiteRegistration Register_SELECT_V2() { - return tflite::micro::RegisterOp(tflite::SelectInit, tflite::SelectPrepare, - tflite::SelectEval); -} - -} // namespace tflite diff --git a/code/components/tflite-lib/tensorflow/lite/micro/kernels/shape.cc b/code/components/tflite-lib/tensorflow/lite/micro/kernels/shape.cc deleted file mode 100644 index b8bd5544..00000000 --- a/code/components/tflite-lib/tensorflow/lite/micro/kernels/shape.cc +++ /dev/null @@ -1,66 +0,0 @@ -/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/c/builtin_op_data.h" -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "tensorflow/lite/kernels/kernel_util.h" -#include "tensorflow/lite/kernels/op_macros.h" -#include "tensorflow/lite/micro/kernels/kernel_util.h" -#include "tensorflow/lite/micro/memory_helpers.h" -#include "tensorflow/lite/micro/micro_utils.h" - -namespace tflite { - -namespace { -constexpr int kInputTensor = 0; -constexpr int kOutputTensor = 0; - -void ExtractShape(const TfLiteEvalTensor* input, int32_t* output_data) { - for (int i = 0; i < input->dims->size; ++i) { - output_data[i] = input->dims->data[i]; - } -} - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); - TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - - return kTfLiteOk; -} - -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - const TfLiteEvalTensor* input = - tflite::micro::GetEvalInput(context, node, kInputTensor); - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - if (output->type != kTfLiteInt32) { - MicroPrintf("Output type %s (%d) not supported.", - TfLiteTypeGetName(output->type), output->type); - return kTfLiteError; - } else { - ExtractShape(input, tflite::micro::GetTensorData(output)); - } - - return kTfLiteOk; -} - -} // namespace - -TfLiteRegistration Register_SHAPE() { - return tflite::micro::RegisterOp(nullptr, Prepare, Eval); -} - -} // namespace tflite diff --git a/code/components/tflite-lib/tensorflow/lite/micro/kernels/slice.cc b/code/components/tflite-lib/tensorflow/lite/micro/kernels/slice.cc deleted file mode 100644 index a6ecd935..00000000 --- a/code/components/tflite-lib/tensorflow/lite/micro/kernels/slice.cc +++ /dev/null @@ -1,157 +0,0 @@ -/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/kernels/internal/reference/slice.h" - -#include "tensorflow/lite/c/builtin_op_data.h" -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "tensorflow/lite/kernels/kernel_util.h" -#include "tensorflow/lite/micro/kernels/kernel_util.h" -#include "tensorflow/lite/micro/micro_error_reporter.h" - -namespace tflite { - -namespace { - -constexpr int kInputTensor = 0; -constexpr int kBeginTensor = 1; -constexpr int kSizeTensor = 2; -constexpr int kOutputTensor = 0; - -const int kMaxDim = 5; - -template -void GetBeginAndSizeVectors(int dimensions, const TfLiteEvalTensor* begin, - const TfLiteEvalTensor* size, int32_t* begins, - int32_t* sizes) { - int offset = kMaxDim - dimensions; - for (int idx = 0; idx < dimensions; ++idx) { - begins[offset + idx] = tflite::micro::GetTensorData(begin)[idx]; - sizes[offset + idx] = tflite::micro::GetTensorData(size)[idx]; - } -} - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - MicroContext* micro_context = GetMicroContext(context); - - TF_LITE_ENSURE_EQ(context, NumInputs(node), 3); - TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - - TfLiteTensor* input = - micro_context->AllocateTempInputTensor(node, kInputTensor); - TFLITE_DCHECK(input != nullptr); - TfLiteTensor* begin = - micro_context->AllocateTempInputTensor(node, kBeginTensor); - TFLITE_DCHECK(begin != nullptr); - TfLiteTensor* size = - micro_context->AllocateTempInputTensor(node, kSizeTensor); - TFLITE_DCHECK(size != nullptr); - TfLiteTensor* output = - micro_context->AllocateTempOutputTensor(node, kOutputTensor); - TFLITE_DCHECK(output != nullptr); - - // Ensure validity of input tensor and its dimension. - TFLITE_DCHECK(input->type == output->type); - TFLITE_DCHECK(begin->type == size->type); - TFLITE_DCHECK(begin->type == kTfLiteInt32 || begin->type == kTfLiteInt64); - TFLITE_DCHECK(size->type == kTfLiteInt32 || size->type == kTfLiteInt64); - TFLITE_DCHECK(NumDimensions(begin) == 1); - TFLITE_DCHECK(NumDimensions(size) == 1); - TFLITE_DCHECK(NumElements(begin) == NumElements(size)); - TFLITE_DCHECK(NumDimensions(input) <= kMaxDim); - - micro_context->DeallocateTempTfLiteTensor(input); - micro_context->DeallocateTempTfLiteTensor(begin); - micro_context->DeallocateTempTfLiteTensor(size); - micro_context->DeallocateTempTfLiteTensor(output); - - return kTfLiteOk; -} - -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - const TfLiteEvalTensor* input = - tflite::micro::GetEvalInput(context, node, kInputTensor); - const TfLiteEvalTensor* begin = - tflite::micro::GetEvalInput(context, node, kBeginTensor); - const TfLiteEvalTensor* size = - tflite::micro::GetEvalInput(context, node, kSizeTensor); - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - - tflite::SliceParams op_params; - op_params.begin_count = kMaxDim; - op_params.size_count = kMaxDim; - for (int i = 0; i < kMaxDim; ++i) { - op_params.begin[i] = 0; - op_params.size[i] = 1; - } - - if (begin->type == kTfLiteInt32) { - GetBeginAndSizeVectors(input->dims->size, begin, size, - op_params.begin, op_params.size); - } else if (begin->type == kTfLiteInt64) { - GetBeginAndSizeVectors(input->dims->size, begin, size, - op_params.begin, op_params.size); - } else { - MicroPrintf("Begin tensor type %s (%d) not supported.", - TfLiteTypeGetName(input->type), input->type); - return kTfLiteError; - } - - switch (input->type) { - case kTfLiteFloat32: - reference_ops::Slice(op_params, - tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - break; - case kTfLiteInt32: - reference_ops::Slice( - op_params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - break; - case kTfLiteInt8: - reference_ops::Slice( - op_params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - break; - case kTfLiteInt16: - reference_ops::Slice( - op_params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - break; - default: - MicroPrintf("Input tensor type %s (%d) not supported.", - TfLiteTypeGetName(input->type), input->type); - return kTfLiteError; - } - return kTfLiteOk; -} - -} // namespace - -TfLiteRegistration Register_SLICE() { - return tflite::micro::RegisterOp(nullptr, Prepare, Eval); -} - -} // namespace tflite diff --git a/code/components/tflite-lib/tensorflow/lite/micro/kernels/softmax.cc b/code/components/tflite-lib/tensorflow/lite/micro/kernels/softmax.cc deleted file mode 100644 index 1ad5be0c..00000000 --- a/code/components/tflite-lib/tensorflow/lite/micro/kernels/softmax.cc +++ /dev/null @@ -1,89 +0,0 @@ -/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/micro/kernels/softmax.h" - -#include "tensorflow/lite/c/builtin_op_data.h" -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/common.h" -#include "tensorflow/lite/kernels/internal/quantization_util.h" -#include "tensorflow/lite/kernels/internal/reference/softmax.h" -#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "tensorflow/lite/kernels/kernel_util.h" -#include "tensorflow/lite/kernels/op_macros.h" -#include "tensorflow/lite/micro/kernels/kernel_util.h" - -namespace tflite { -namespace { - -void SoftmaxQuantized(const TfLiteEvalTensor* input, TfLiteEvalTensor* output, - const SoftmaxParams& op_data) { - if (input->type == kTfLiteInt8) { - if (output->type == kTfLiteInt16) { - tflite::reference_ops::Softmax( - op_data, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } else { - tflite::reference_ops::Softmax( - op_data, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } - } else { - tflite::reference_ops::SoftmaxInt16( - op_data, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } -} - -TfLiteStatus SoftmaxEval(TfLiteContext* context, TfLiteNode* node) { - const TfLiteEvalTensor* input = tflite::micro::GetEvalInput(context, node, 0); - TfLiteEvalTensor* output = tflite::micro::GetEvalOutput(context, node, 0); - - TFLITE_DCHECK(node->user_data != nullptr); - SoftmaxParams op_data = *static_cast(node->user_data); - - switch (input->type) { - case kTfLiteFloat32: { - tflite::reference_ops::Softmax( - op_data, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - return kTfLiteOk; - } - case kTfLiteInt8: - case kTfLiteInt16: { - SoftmaxQuantized(input, output, op_data); - return kTfLiteOk; - } - default: - MicroPrintf("Type %s (%d) not supported.", TfLiteTypeGetName(input->type), - input->type); - return kTfLiteError; - } -} -} // namespace - -TfLiteRegistration Register_SOFTMAX() { - return tflite::micro::RegisterOp(SoftmaxInit, SoftmaxPrepare, SoftmaxEval); -} - -} // namespace tflite diff --git a/code/components/tflite-lib/tensorflow/lite/micro/kernels/space_to_batch_nd.cc b/code/components/tflite-lib/tensorflow/lite/micro/kernels/space_to_batch_nd.cc deleted file mode 100644 index 7a7f61e9..00000000 --- a/code/components/tflite-lib/tensorflow/lite/micro/kernels/space_to_batch_nd.cc +++ /dev/null @@ -1,120 +0,0 @@ -/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/kernels/internal/reference/space_to_batch_nd.h" - -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "tensorflow/lite/kernels/internal/types.h" -#include "tensorflow/lite/kernels/kernel_util.h" -#include "tensorflow/lite/micro/kernels/kernel_util.h" -#include "tensorflow/lite/micro/micro_utils.h" - -namespace tflite { - -namespace { - -constexpr int kInputTensor = 0; -constexpr int kBlockShapeTensor = 1; -constexpr int kCropsTensor = 2; -constexpr int kOutputTensor = 0; - -// Currently, only 3D NHC and 4D NHWC input/output op_context are supported. -// In case of 3D input, it will be extended to 3D NHWC by adding W=1. -// The 4D array need to have exactly 2 spatial dimensions. -// TODO(b/149952582): Support arbitrary dimension in SpaceToBatchND. -const int kInputOutputMinDimensionNum = 3; -const int kInputOutputMaxDimensionNum = 4; - -void* Init(TfLiteContext* context, const char* buffer, size_t length) { - TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); - return context->AllocatePersistentBuffer(context, sizeof(SpaceToBatchParams)); -} - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - MicroContext* micro_context = GetMicroContext(context); - - TF_LITE_ENSURE_EQ(context, NumInputs(node), 3); - TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - - TfLiteTensor* input = - micro_context->AllocateTempInputTensor(node, kInputTensor); - TfLiteTensor* output = - micro_context->AllocateTempOutputTensor(node, kOutputTensor); - TF_LITE_ENSURE(context, input != nullptr && output != nullptr); - - TF_LITE_ENSURE(context, NumDimensions(input) >= kInputOutputMinDimensionNum); - TF_LITE_ENSURE(context, NumDimensions(output) >= kInputOutputMinDimensionNum); - TF_LITE_ENSURE(context, NumDimensions(input) <= kInputOutputMaxDimensionNum); - TF_LITE_ENSURE(context, NumDimensions(output) <= kInputOutputMaxDimensionNum); - TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); - - micro_context->DeallocateTempTfLiteTensor(input); - micro_context->DeallocateTempTfLiteTensor(output); - return kTfLiteOk; -} - -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->user_data != nullptr); - const SpaceToBatchParams& params = - *(static_cast(node->user_data)); - - const TfLiteEvalTensor* input = - tflite::micro::GetEvalInput(context, node, kInputTensor); - const TfLiteEvalTensor* block_shape = - tflite::micro::GetEvalInput(context, node, kBlockShapeTensor); - const TfLiteEvalTensor* crops = - tflite::micro::GetEvalInput(context, node, kCropsTensor); - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - - switch (input->type) { // Already know in/out types are same. - case kTfLiteFloat32: - reference_ops::SpaceToBatchND( - params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(block_shape), - tflite::micro::GetTensorData(block_shape), - tflite::micro::GetTensorShape(crops), - tflite::micro::GetTensorData(crops), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - break; - case kTfLiteInt8: - reference_ops::SpaceToBatchND( - params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(block_shape), - tflite::micro::GetTensorData(block_shape), - tflite::micro::GetTensorShape(crops), - tflite::micro::GetTensorData(crops), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - break; - default: - MicroPrintf("Type %s (%d) not supported.", TfLiteTypeGetName(input->type), - input->type); - return kTfLiteError; - } - return kTfLiteOk; -} - -} // namespace. - -TfLiteRegistration Register_SPACE_TO_BATCH_ND() { - return tflite::micro::RegisterOp(Init, Prepare, Eval); -} - -} // namespace tflite diff --git a/code/components/tflite-lib/tensorflow/lite/micro/kernels/space_to_depth.cc b/code/components/tflite-lib/tensorflow/lite/micro/kernels/space_to_depth.cc deleted file mode 100644 index b8635de8..00000000 --- a/code/components/tflite-lib/tensorflow/lite/micro/kernels/space_to_depth.cc +++ /dev/null @@ -1,126 +0,0 @@ -/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#include "tensorflow/lite/kernels/internal/reference/space_to_depth.h" - -#include - -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/types.h" -#include "tensorflow/lite/kernels/kernel_util.h" -#include "tensorflow/lite/micro/kernels/kernel_util.h" - -namespace tflite { - -namespace { - -constexpr int kInputTensor = 0; -constexpr int kOutputTensor = 0; -constexpr int kBatchRank = 0; -constexpr int kHeightRank = 1; -constexpr int kWidthRank = 2; -constexpr int kDepthRank = 3; - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - auto* params = - reinterpret_cast(node->builtin_data); - - TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); - TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - - MicroContext* micro_context = GetMicroContext(context); - - TfLiteTensor* input = - micro_context->AllocateTempInputTensor(node, kInputTensor); - TF_LITE_ENSURE(context, input != nullptr); - TfLiteTensor* output = - micro_context->AllocateTempOutputTensor(node, kOutputTensor); - TF_LITE_ENSURE(context, output != nullptr); - - TF_LITE_ENSURE_EQ(context, NumDimensions(input), 4); - - auto data_type = output->type; - TF_LITE_ENSURE(context, - data_type == kTfLiteFloat32 || data_type == kTfLiteInt8); - TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); - - const int block_size = params->block_size; - const int input_height = input->dims->data[kHeightRank]; - const int input_width = input->dims->data[kWidthRank]; - int output_height = input_height / block_size; - int output_width = input_width / block_size; - - TF_LITE_ENSURE_EQ(context, input_height, output_height * block_size); - TF_LITE_ENSURE_EQ(context, input_width, output_width * block_size); - - // Relocate dims to the persistent storage arena before changing them, - // otherwise we'd be modifying temporary copies made by the interpreters each - // time they process the layer. - TfLiteEvalTensor* output_eval = - micro::GetEvalOutput(context, node, kOutputTensor); - TF_LITE_ENSURE_OK(context, micro::CreateWritableTensorDimsWithCopy( - context, output, output_eval)); - - output->dims->data[kBatchRank] = input->dims->data[kBatchRank]; - output->dims->data[kHeightRank] = output_height; - output->dims->data[kWidthRank] = output_width; - output->dims->data[kDepthRank] = - input->dims->data[kDepthRank] * block_size * block_size; - - micro_context->DeallocateTempTfLiteTensor(input); - micro_context->DeallocateTempTfLiteTensor(output); - - return kTfLiteOk; -} - -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - auto* params = - reinterpret_cast(node->builtin_data); - - const TfLiteEvalTensor* input = - micro::GetEvalInput(context, node, kInputTensor); - TfLiteEvalTensor* output = micro::GetEvalOutput(context, node, kOutputTensor); - - SpaceToDepthParams op_params; - op_params.block_size = params->block_size; - - switch (input->type) { // Already know in/out types are same. - case kTfLiteFloat32: - reference_ops::SpaceToDepth(op_params, micro::GetTensorShape(input), - micro::GetTensorData(input), - micro::GetTensorShape(output), - micro::GetTensorData(output)); - break; - case kTfLiteInt8: - reference_ops::SpaceToDepth(op_params, micro::GetTensorShape(input), - micro::GetTensorData(input), - micro::GetTensorShape(output), - micro::GetTensorData(output)); - break; - default: - MicroPrintf("SPACE_TO_DEPTH only supports FLOAT32 and INT8, got %s.", - TfLiteTypeGetName(input->type)); - return kTfLiteError; - } - - return kTfLiteOk; -} - -} // namespace - -TfLiteRegistration Register_SPACE_TO_DEPTH() { - return tflite::micro::RegisterOp(nullptr, Prepare, Eval); -} - -} // namespace tflite diff --git a/code/components/tflite-lib/tensorflow/lite/micro/kernels/split_v.cc b/code/components/tflite-lib/tensorflow/lite/micro/kernels/split_v.cc deleted file mode 100644 index 959796c3..00000000 --- a/code/components/tflite-lib/tensorflow/lite/micro/kernels/split_v.cc +++ /dev/null @@ -1,129 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/c/builtin_op_data.h" -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "tensorflow/lite/kernels/kernel_util.h" -#include "tensorflow/lite/kernels/op_macros.h" -#include "tensorflow/lite/micro/kernels/kernel_util.h" - -namespace tflite { -namespace ops { -namespace micro { -namespace split_v { - -template -TfLiteStatus SplitImpl(TfLiteContext* context, TfLiteNode* node, - const TfLiteEvalTensor* input, int axis_value) { - const TfLiteIntArray* input_dims = input->dims; - const TfLiteEvalTensor* output0 = - tflite::micro::GetEvalOutput(context, node, 0); - - const int split_dimensions = input_dims->size; - - TFLITE_DCHECK_LT(axis_value, split_dimensions); - TFLITE_DCHECK_EQ(output0->dims->size, split_dimensions); - - int64_t split_size = 0; - const int output_count = NumOutputs(node); - for (int i = 0; i < output_count; i++) { - split_size += - tflite::micro::GetEvalOutput(context, node, i)->dims->data[axis_value]; - } - TFLITE_DCHECK_EQ(split_size, input_dims->data[axis_value]); - int64_t outer_size = 1; - for (int i = 0; i < axis_value; ++i) { - outer_size *= input_dims->data[i]; - } - - int64_t base_inner_size = 1; - for (int i = axis_value + 1; i < split_dimensions; ++i) { - base_inner_size *= input_dims->data[i]; - } - - const T* input_ptr = tflite::micro::GetTensorData(input); - for (int k = 0; k < outer_size; ++k) { - for (int i = 0; i < output_count; ++i) { - TfLiteEvalTensor* output_tensor = - tflite::micro::GetEvalOutput(context, node, i); - T* output_data = tflite::micro::GetTensorData(output_tensor); - const int copy_size = - output_tensor->dims->data[axis_value] * base_inner_size; - T* output_ptr = output_data + k * copy_size; - for (int j = 0; j < copy_size; ++j) output_ptr[j] = input_ptr[j]; - input_ptr += copy_size; - } - } - - return kTfLiteOk; -} - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - TF_LITE_ENSURE_EQ(context, NumInputs(node), 3); - - MicroContext* micro_context = GetMicroContext(context); - // Dynamic output tensors are needed if axis tensor is not constant. - // But Micro doesn't support dynamic memory allocation, so we only support - // constant axis tensor for now. - TfLiteTensor* axis = micro_context->AllocateTempInputTensor(node, 2); - TF_LITE_ENSURE_MSG(context, IsConstantTensor(axis), - "Non constant axis tensor not supported"); - micro_context->DeallocateTempTfLiteTensor(axis); - return kTfLiteOk; -} - -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - const TfLiteEvalTensor* input = tflite::micro::GetEvalInput(context, node, 0); - const TfLiteEvalTensor* axis = tflite::micro::GetEvalInput(context, node, 2); - - int axis_value = tflite::micro::GetTensorData(axis)[0]; - if (axis_value < 0) { - axis_value += input->dims->size; - } - - TF_LITE_ENSURE(context, axis_value >= 0); - TF_LITE_ENSURE(context, axis_value < input->dims->size); - - switch (input->type) { - case kTfLiteFloat32: { - return SplitImpl(context, node, input, axis_value); - } - case kTfLiteInt8: { - return SplitImpl(context, node, input, axis_value); - } - case kTfLiteInt16: { - return SplitImpl(context, node, input, axis_value); - } - case kTfLiteInt32: { - return SplitImpl(context, node, input, axis_value); - } - default: - MicroPrintf("Type %s currently not supported.", - TfLiteTypeGetName(input->type)); - return kTfLiteError; - } - return kTfLiteOk; -} - -} // namespace split_v - -TfLiteRegistration Register_SPLIT_V() { - return tflite::micro::RegisterOp(nullptr, split_v::Prepare, split_v::Eval); -} - -} // namespace micro -} // namespace ops -} // namespace tflite diff --git a/code/components/tflite-lib/tensorflow/lite/micro/kernels/squeeze.cc b/code/components/tflite-lib/tensorflow/lite/micro/kernels/squeeze.cc deleted file mode 100644 index 86841753..00000000 --- a/code/components/tflite-lib/tensorflow/lite/micro/kernels/squeeze.cc +++ /dev/null @@ -1,117 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/c/builtin_op_data.h" -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/quantization_util.h" -#include "tensorflow/lite/kernels/internal/reference/process_broadcast_shapes.h" -#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "tensorflow/lite/kernels/kernel_util.h" -#include "tensorflow/lite/kernels/op_macros.h" -#include "tensorflow/lite/micro/kernels/kernel_util.h" -#include "tensorflow/lite/micro/memory_helpers.h" - -namespace tflite { -namespace { - -struct SqueezeContext { - SqueezeContext(TfLiteContext* context, TfLiteNode* node) { - params = reinterpret_cast(node->builtin_data); - micro_context = GetMicroContext(context); - input = micro_context->AllocateTempInputTensor(node, 0); - output = micro_context->AllocateTempOutputTensor(node, 0); - } - ~SqueezeContext() { - micro_context->DeallocateTempTfLiteTensor(input); - micro_context->DeallocateTempTfLiteTensor(output); - } - MicroContext* micro_context; - TfLiteSqueezeParams* params; - TfLiteTensor* input; - TfLiteTensor* output; -}; - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); - TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - - SqueezeContext op_context(context, node); - const int input_num_dims = NumDimensions(op_context.input); - const int num_squeeze_dims = op_context.params->num_squeeze_dims; - - // Determines number of dimensions of output tensor after squeeze. - const TfLiteIntArray* input_dims = op_context.input->dims; - const TfLiteIntArray* output_dims = op_context.output->dims; - const int* squeeze_dims = op_context.params->squeeze_dims; - - constexpr int max_squeeze_dims = 8; - TF_LITE_ENSURE(context, input_num_dims <= max_squeeze_dims); - bool should_squeeze[max_squeeze_dims] = {}; - - if (num_squeeze_dims == 0) { - for (int idx = 0; idx < input_num_dims; ++idx) { - if (input_dims->data[idx] == 1) { - should_squeeze[idx] = true; - } - } - } else { - for (int idx = 0; idx < num_squeeze_dims; ++idx) { - int current = squeeze_dims[idx] < 0 ? squeeze_dims[idx] + input_num_dims - : squeeze_dims[idx]; - TF_LITE_ENSURE(context, current >= 0 && current < input_num_dims && - input_dims->data[current] == 1); - should_squeeze[current] = true; - } - } - - // Ensure output dimensions are big enough. - for (int in_idx = 0, out_idx = 0; in_idx < input_num_dims; ++in_idx) { - if (!should_squeeze[in_idx]) { - TFLITE_CHECK_GE(output_dims->data[out_idx++], input_dims->data[in_idx]); - } - } - - return kTfLiteOk; -} - -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - const TfLiteEvalTensor* input = tflite::micro::GetEvalInput(context, node, 0); - - if (input->type == kTfLiteString) { - MicroPrintf("Type %s (%d) not supported.", TfLiteTypeGetName(input->type), - input->type); - return kTfLiteError; - } - - TfLiteEvalTensor* output = tflite::micro::GetEvalOutput(context, node, 0); - size_t input_byte_size; - size_t output_byte_size; - TF_LITE_ENSURE_OK(context, - TfLiteEvalTensorByteLength(input, &input_byte_size)); - TF_LITE_ENSURE_OK(context, - TfLiteEvalTensorByteLength(output, &output_byte_size)); - - TF_LITE_ENSURE_EQ(context, input_byte_size, output_byte_size); - memcpy(output->data.raw, input->data.raw, input_byte_size); - return kTfLiteOk; -} - -} // namespace - -TfLiteRegistration Register_SQUEEZE() { - return tflite::micro::RegisterOp(nullptr, Prepare, Eval); -} - -} // namespace tflite diff --git a/code/components/tflite-lib/tensorflow/lite/micro/kernels/strided_slice.cc b/code/components/tflite-lib/tensorflow/lite/micro/kernels/strided_slice.cc deleted file mode 100644 index 344698d6..00000000 --- a/code/components/tflite-lib/tensorflow/lite/micro/kernels/strided_slice.cc +++ /dev/null @@ -1,209 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#include "tensorflow/lite/kernels/internal/reference/strided_slice.h" - -#include -#include - -#include "tensorflow/lite/c/builtin_op_data.h" -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "tensorflow/lite/kernels/kernel_util.h" -#include "tensorflow/lite/kernels/op_macros.h" -#include "tensorflow/lite/micro/kernels/kernel_util.h" - -namespace tflite { -namespace ops { -namespace micro { -namespace strided_slice { - -constexpr int kInputTensor = 0; -constexpr int kBeginTensor = 1; -constexpr int kEndTensor = 2; -constexpr int kStridesTensor = 3; -constexpr int kOutputTensor = 0; - -struct StridedSliceContext { - StridedSliceContext(TfLiteContext* context, TfLiteNode* node) { - params = reinterpret_cast(node->builtin_data); - micro_context = GetMicroContext(context); - input = micro_context->AllocateTempInputTensor(node, kInputTensor); - begin = micro_context->AllocateTempInputTensor(node, kBeginTensor); - end = micro_context->AllocateTempInputTensor(node, kEndTensor); - strides = micro_context->AllocateTempInputTensor(node, kStridesTensor); - output = micro_context->AllocateTempOutputTensor(node, kOutputTensor); - dims = NumDimensions(input); - } - ~StridedSliceContext() { - micro_context->DeallocateTempTfLiteTensor(input); - micro_context->DeallocateTempTfLiteTensor(begin); - micro_context->DeallocateTempTfLiteTensor(end); - micro_context->DeallocateTempTfLiteTensor(strides); - micro_context->DeallocateTempTfLiteTensor(output); - } - const TfLiteStridedSliceParams* params; - MicroContext* micro_context; - TfLiteTensor* input; - TfLiteTensor* begin; - TfLiteTensor* end; - TfLiteTensor* strides; - TfLiteTensor* output; - int dims; -}; - -// This Op only supports 1-4D cases and since we use the reference 4D -// implementation, the 1-3D tensors are mapped to 4D. -const int kMaxDim = 4; - -tflite::StridedSliceParams BuildStridedSliceParams( - StridedSliceContext* op_context) { - tflite::StridedSliceParams op_params; - op_params.start_indices_count = op_context->dims; - op_params.stop_indices_count = op_context->dims; - op_params.strides_count = op_context->dims; - - for (int i = 0; i < op_context->dims; ++i) { - op_params.start_indices[i] = GetTensorData(op_context->begin)[i]; - op_params.stop_indices[i] = GetTensorData(op_context->end)[i]; - op_params.strides[i] = GetTensorData(op_context->strides)[i]; - } - - op_params.begin_mask = op_context->params->begin_mask; - op_params.ellipsis_mask = 0; - op_params.end_mask = op_context->params->end_mask; - op_params.new_axis_mask = 0; - op_params.shrink_axis_mask = op_context->params->shrink_axis_mask; - return op_params; -} - -// Processes the indexing tensors (begin, end and strides) to resize the -// output tensor. This function is callable from both Prepare() and Eval() as -// long as the caller ensures the indexing tensors are present. -TfLiteStatus CheckOutputSize(TfLiteContext* context, - StridedSliceContext* op_context) { - using ::tflite::strided_slice::StartForAxis; - using ::tflite::strided_slice::StopForAxis; - TfLiteIntArray* output_shape = op_context->output->dims; - int shape_size = 0; - auto op_params = BuildStridedSliceParams(op_context); - auto input_shape = GetTensorShape(op_context->input); - for (int idx = 0; idx < op_context->dims; ++idx) { - int32_t stride = GetTensorData(op_context->strides)[idx]; - TF_LITE_ENSURE_MSG(context, stride != 0, "stride value has to be non-zero"); - int32_t begin = StartForAxis(op_params, input_shape, idx); - int32_t end = StopForAxis(op_params, input_shape, idx, begin); - - // When shrinking an axis, the end position does not matter (and can be - // incorrect when negative indexing is used, see Issue #19260). Always use - // begin + 1 to generate a length 1 slice, since begin has - // already been adjusted for negative indices by StartForAxis. - const bool shrink_axis = op_context->params->shrink_axis_mask & (1 << idx); - if (shrink_axis) { - end = begin + 1; - } - - // This is valid for both positive and negative strides - int32_t dim_shape = std::ceil((end - begin) / static_cast(stride)); - dim_shape = dim_shape < 0 ? 0 : dim_shape; - if (!shrink_axis) { - TF_LITE_ENSURE_EQ(context, output_shape->data[shape_size], dim_shape); - shape_size++; - } - } - TF_LITE_ENSURE_EQ(context, output_shape->size, shape_size); - return kTfLiteOk; -} - -void* Init(TfLiteContext* context, const char* buffer, size_t length) { - TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); - return context->AllocatePersistentBuffer(context, sizeof(StridedSliceParams)); -} - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->user_data != nullptr); - StridedSliceParams* op_params = - static_cast(node->user_data); - TF_LITE_ENSURE_EQ(context, NumInputs(node), 4); - TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - StridedSliceContext op_context(context, node); - TF_LITE_ENSURE_MSG(context, op_context.dims <= kMaxDim, - "input dim should not exceed 4"); - auto params = BuildStridedSliceParams(&op_context); - memcpy(op_params, ¶ms, sizeof(StridedSliceParams)); - return CheckOutputSize(context, &op_context); -} - -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->user_data != nullptr); - const StridedSliceParams& op_params = - *(static_cast(node->user_data)); - - const TfLiteEvalTensor* input = - tflite::micro::GetEvalInput(context, node, kInputTensor); - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - switch (output->type) { - case kTfLiteFloat32: - reference_ops::StridedSlice(op_params, - tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - break; - case kTfLiteInt8: - reference_ops::StridedSlice(op_params, - tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - break; - case kTfLiteInt16: - reference_ops::StridedSlice( - op_params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - break; - case kTfLiteInt32: - reference_ops::StridedSlice( - op_params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - break; - case kTfLiteBool: - reference_ops::StridedSlice(op_params, - tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - break; - default: - MicroPrintf("Type %s (%d) not supported.", TfLiteTypeGetName(input->type), - input->type); - return kTfLiteError; - } - return kTfLiteOk; -} -} // namespace strided_slice - -TfLiteRegistration Register_STRIDED_SLICE() { - return tflite::micro::RegisterOp(strided_slice::Init, strided_slice::Prepare, - strided_slice::Eval); -} - -} // namespace micro -} // namespace ops -} // namespace tflite diff --git a/code/components/tflite-lib/tensorflow/lite/micro/kernels/svdf.h b/code/components/tflite-lib/tensorflow/lite/micro/kernels/svdf.h deleted file mode 100644 index 0915c9fd..00000000 --- a/code/components/tflite-lib/tensorflow/lite/micro/kernels/svdf.h +++ /dev/null @@ -1,99 +0,0 @@ -/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_MICRO_KERNELS_SVDF_H_ -#define TENSORFLOW_LITE_MICRO_KERNELS_SVDF_H_ - -#include "tensorflow/lite/c/builtin_op_data.h" -#include "tensorflow/lite/c/common.h" - -namespace tflite { - -struct OpDataSvdf { - int32_t effective_scale_1_a; - int32_t effective_scale_2_a; - // b versions of each scale are kept at int since the numbers are just the - // shift value - typically between [-32, 32]. - int effective_scale_1_b; - int effective_scale_2_b; - int scratch_tensor_index; - int scratch_output_tensor_index; - - // Cached tensor zero point values for quantized operations. - int input_zero_point; - int output_zero_point; - int activation_state_zero_point; -}; - -// Input tensors. -extern const int kSvdfInputTensor; -extern const int kSvdfWeightsFeatureTensor; -extern const int kSvdfWeightsTimeTensor; -extern const int kSvdfBiasTensor; -// This is a variable tensor, and will be modified by this op. -extern const int kSvdfInputActivationStateTensor; - -// Output tensor. -extern const int kSvdfOutputTensor; - -void EvalInt8SvdfReference(TfLiteContext* context, TfLiteNode* node, - const TfLiteEvalTensor* input_tensor, - const TfLiteEvalTensor* weights_feature_tensor, - const TfLiteEvalTensor* weights_time_tensor, - const TfLiteEvalTensor* bias_tensor, - const TfLiteSVDFParams* params, - TfLiteEvalTensor* activation_state_tensor, - TfLiteEvalTensor* output_tensor, - const OpDataSvdf& data); - -// TODO(#523): remove 16-bit code when no longer needed. -void EvalInt16SvdfReference(TfLiteContext* context, TfLiteNode* node, - const TfLiteEvalTensor* input_tensor, - const TfLiteEvalTensor* weights_feature_tensor, - const TfLiteEvalTensor* weights_time_tensor, - const TfLiteEvalTensor* bias_tensor, - const TfLiteSVDFParams* params, - TfLiteEvalTensor* activation_state_tensor, - TfLiteEvalTensor* output_tensor, - const OpDataSvdf& data); - -void EvalFloatSvdfReference( - TfLiteContext* context, TfLiteNode* node, const TfLiteEvalTensor* input, - const TfLiteEvalTensor* weights_feature, - const TfLiteEvalTensor* weights_time, const TfLiteEvalTensor* bias, - const TfLiteSVDFParams* params, int scratch_tensor_index, - TfLiteEvalTensor* activation_state, TfLiteEvalTensor* output); - -TfLiteStatus PrepareSvdf(TfLiteContext* context, TfLiteNode* node); - -// This is the most generic TfLiteRegistration. The actual supported types may -// still be target dependent. The only requirement is that every implementation -// (reference or optimized) must define this function. -TfLiteRegistration Register_SVDF(); - -#if defined(HEXAGON) || defined(CMSIS_NN) -TfLiteRegistration Register_SVDF_INT8(); - -#else -// Note that while this block gets used for both reference and optimized kernels -// that do not have any specialized implementations, the only goal here is to -// define fallback implementation that allow reference kernels to still be used -// from applications that call a more specific kernel variant. - -inline TfLiteRegistration Register_SVDF_INT8() { return Register_SVDF(); } - -#endif -} // namespace tflite - -#endif // TENSORFLOW_LITE_MICRO_KERNELS_SVDF_H_ diff --git a/code/components/tflite-lib/tensorflow/lite/micro/kernels/tanh.cc b/code/components/tflite-lib/tensorflow/lite/micro/kernels/tanh.cc deleted file mode 100644 index bb4133bf..00000000 --- a/code/components/tflite-lib/tensorflow/lite/micro/kernels/tanh.cc +++ /dev/null @@ -1,203 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/kernels/internal/reference/integer_ops/tanh.h" - -#include "tensorflow/lite/c/builtin_op_data.h" -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/common.h" -#include "tensorflow/lite/kernels/internal/quantization_util.h" -#include "tensorflow/lite/kernels/internal/reference/tanh.h" -#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "tensorflow/lite/kernels/kernel_util.h" -#include "tensorflow/lite/kernels/op_macros.h" -#include "tensorflow/lite/micro/kernels/kernel_util.h" -#include "tensorflow/lite/micro/micro_utils.h" - -namespace tflite { -namespace ops { -namespace micro { -namespace activations { -namespace { -constexpr int kInputTensor = 0; -constexpr int kOutputTensor = 0; - -struct OpData { - int32_t input_zero_point; - int32_t input_range_radius; - int32_t input_multiplier; - int input_left_shift; -}; - -void* TanhInit(TfLiteContext* context, const char* buffer, size_t length) { - TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); - return context->AllocatePersistentBuffer(context, sizeof(OpData)); -} - -TfLiteStatus CalculateArithmeticOpData(TfLiteContext* context, TfLiteNode* node, - OpData* data) { - MicroContext* micro_context = GetMicroContext(context); - TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); - TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - TfLiteTensor* input = - micro_context->AllocateTempInputTensor(node, kInputTensor); - TF_LITE_ENSURE(context, input != nullptr); - TfLiteTensor* output = - micro_context->AllocateTempOutputTensor(node, kOutputTensor); - TF_LITE_ENSURE(context, output != nullptr); - - TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); - - if (input->type == kTfLiteInt8) { - static constexpr int kInputIntegerBits = 4; - const double input_real_multiplier = - static_cast(input->params.scale) * - static_cast(1 << (31 - kInputIntegerBits)); - - const double q = std::frexp(input_real_multiplier, &data->input_left_shift); - data->input_multiplier = static_cast(TfLiteRound(q * (1ll << 31))); - - data->input_range_radius = - CalculateInputRadius(kInputIntegerBits, data->input_left_shift, 31); - } - - if (input->type == kTfLiteInt16) { - static constexpr int kInputIntegerBits = 3; - static constexpr int kOutputFractionalBits = 15; - - // These operators are implemented in fixed-point arithmetic, - // which intrinsically wants symmetric ranges (zero_point==0) - // and power-of-two scales (power-of-two is abbreviated below as POT). - // While more general support would be possible by means of rescaling, - // that would add some overhead and some loss of accuracy and wouldn't - // be used at the moment as current quantized LSTM applications are - // happy with symmetric, power-of-two-scales quantization. So we just - // implement that narrow case only for now. - - TF_LITE_ENSURE_EQ(context, input->params.zero_point, 0); - TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0); - - int input_scale_log2_rounded; - bool param_scale_pot = - CheckedLog2(input->params.scale, &input_scale_log2_rounded); - - data->input_left_shift = - (15 - kInputIntegerBits) + input_scale_log2_rounded; - param_scale_pot &= - (data->input_left_shift == 0 || data->input_left_shift == 1); - - if (param_scale_pot) { - data->input_multiplier = 0; - } else { - // Calculate multiplier to change input scale to 1/(3*4096) - // as required by the table lookup. - // The number 3.0 in the multiplier comes from here, - // because the interval is [-10.7, 10.7] instead of [-8, 8]. - // So, in this scaling +/-2^17 represents +/-10.7. - - double multiplier = - static_cast(input->params.scale) * 4096.0 * 3.0; - data->input_left_shift = 0; - - while (multiplier <= 32767.0 / 2.0 && data->input_left_shift <= 30) { - data->input_left_shift++; - multiplier = multiplier * 2.0; - } - - data->input_multiplier = static_cast(multiplier); - } - - int output_scale_log2_rounded; - TF_LITE_ENSURE( - context, CheckedLog2(output->params.scale, &output_scale_log2_rounded)); - TF_LITE_ENSURE_EQ(context, output_scale_log2_rounded, - -kOutputFractionalBits); - } - - micro_context->DeallocateTempTfLiteTensor(input); - micro_context->DeallocateTempTfLiteTensor(output); - return kTfLiteOk; -} - -TfLiteStatus TanhPrepare(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->user_data != nullptr); - - OpData* data = static_cast(node->user_data); - - MicroContext* micro_context = GetMicroContext(context); - TfLiteTensor* input = - micro_context->AllocateTempInputTensor(node, kInputTensor); - TF_LITE_ENSURE(context, input != nullptr); - data->input_zero_point = input->params.zero_point; - TF_LITE_ENSURE_OK(context, CalculateArithmeticOpData(context, node, data)); - - micro_context->DeallocateTempTfLiteTensor(input); - return kTfLiteOk; -} - -} // namespace - -TfLiteStatus TanhEval(TfLiteContext* context, TfLiteNode* node) { - const TfLiteEvalTensor* input = - tflite::micro::GetEvalInput(context, node, kInputTensor); - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - - TFLITE_DCHECK(node->user_data != nullptr); - const OpData& data = *(static_cast(node->user_data)); - - switch (input->type) { - case kTfLiteFloat32: { - reference_ops::Tanh(tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - return kTfLiteOk; - } break; - case kTfLiteInt16: { - reference_integer_ops::Tanh( - data.input_multiplier, data.input_left_shift, - tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - return kTfLiteOk; - } break; - case kTfLiteInt8: { - reference_integer_ops::Tanh( - data.input_zero_point, data.input_range_radius, data.input_multiplier, - data.input_left_shift, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - return kTfLiteOk; - } break; - default: - MicroPrintf("Input %s, output %s not supported.", - TfLiteTypeGetName(input->type), - TfLiteTypeGetName(output->type), context); - return kTfLiteError; - } -} - -} // namespace activations - -TfLiteRegistration Register_TANH() { - return tflite::micro::RegisterOp( - activations::TanhInit, activations::TanhPrepare, activations::TanhEval); -} -} // namespace micro -} // namespace ops -} // namespace tflite diff --git a/code/components/tflite-lib/tensorflow/lite/micro/kernels/transpose.cc b/code/components/tflite-lib/tensorflow/lite/micro/kernels/transpose.cc deleted file mode 100644 index 740ef5f9..00000000 --- a/code/components/tflite-lib/tensorflow/lite/micro/kernels/transpose.cc +++ /dev/null @@ -1,121 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#include "tensorflow/lite/kernels/internal/reference/transpose.h" - -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "tensorflow/lite/kernels/internal/types.h" -#include "tensorflow/lite/kernels/kernel_util.h" -#include "tensorflow/lite/micro/kernels/kernel_util.h" - -namespace tflite { -namespace { - -constexpr int kInputTensor = 0; -constexpr int kPermTensor = 1; -constexpr int kOutputTensor = 0; - -struct TransposeContext { - TransposeContext(TfLiteContext* context, TfLiteNode* node) { - micro_context = GetMicroContext(context); - input = micro_context->AllocateTempInputTensor(node, kInputTensor); - perm = micro_context->AllocateTempInputTensor(node, kPermTensor); - output = micro_context->AllocateTempOutputTensor(node, kOutputTensor); - } - ~TransposeContext() { - micro_context->DeallocateTempTfLiteTensor(input); - micro_context->DeallocateTempTfLiteTensor(perm); - micro_context->DeallocateTempTfLiteTensor(output); - } - MicroContext* micro_context; - TfLiteTensor* input; - TfLiteTensor* perm; - TfLiteTensor* output; -}; - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); - TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - - TransposeContext op_context(context, node); - - // Ensure validity of input tensor. - TF_LITE_ENSURE_MSG(context, NumDimensions(op_context.input) <= 5, - "Transpose op only supports 1D-5D input arrays."); - TF_LITE_ENSURE_TYPES_EQ(context, op_context.input->type, - op_context.output->type); - - int dims = NumDimensions(op_context.input); - const int32_t* perm_data = GetTensorData(op_context.perm); - - // Ensure validity of the permutations tensor as a 1D tensor. - TF_LITE_ENSURE_EQ(context, NumDimensions(op_context.perm), 1); - TF_LITE_ENSURE_EQ(context, op_context.perm->dims->data[0], dims); - for (int idx = 0; idx < dims; ++idx) { - TF_LITE_ENSURE_MSG(context, (perm_data[idx] >= 0 && perm_data[idx] < dims), - "Transpose op permutations array is out of bounds."); - } - - return kTfLiteOk; -} - -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - const TfLiteEvalTensor* perm_tensor = - tflite::micro::GetEvalInput(context, node, kPermTensor); - const int32_t* perm_data = perm_tensor->data.i32; - const int size = perm_tensor->dims->data[0]; - TransposeParams params; - params.perm_count = size; - for (int i = 0; i < size; ++i) { - params.perm[i] = perm_data[i]; - } - - // Transpose kernel only does rearranging values not numeric evaluations - // on each cell. It's safe to implement per size of scalar type and this - // trick keeps the total code size in a reasonable range. - const TfLiteEvalTensor* input = - tflite::micro::GetEvalInput(context, node, kInputTensor); - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - switch (input->type) { - case kTfLiteFloat32: - reference_ops::Transpose(params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - break; - case kTfLiteInt8: - reference_ops::Transpose(params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - break; - default: - MicroPrintf( - "Type %s is currently not supported by Transpose. " - "Only float32 and int8 is supported", - TfLiteTypeGetName(input->type)); - return kTfLiteError; - } - - return kTfLiteOk; -} - -} // namespace - -TfLiteRegistration Register_TRANSPOSE() { - return tflite::micro::RegisterOp(nullptr, Prepare, Eval); -} -} // namespace tflite diff --git a/code/components/tflite-lib/tensorflow/lite/micro/kernels/transpose_conv.cc b/code/components/tflite-lib/tensorflow/lite/micro/kernels/transpose_conv.cc deleted file mode 100644 index 1b1e8b3b..00000000 --- a/code/components/tflite-lib/tensorflow/lite/micro/kernels/transpose_conv.cc +++ /dev/null @@ -1,343 +0,0 @@ -/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/kernels/internal/reference/transpose_conv.h" - -#include "tensorflow/lite/c/builtin_op_data.h" -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/common.h" -#include "tensorflow/lite/kernels/internal/quantization_util.h" -#include "tensorflow/lite/kernels/internal/reference/integer_ops/transpose_conv.h" -#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "tensorflow/lite/kernels/kernel_util.h" -#include "tensorflow/lite/kernels/padding.h" -#include "tensorflow/lite/micro/kernels/kernel_util.h" - -namespace tflite { -namespace { - -// For the TfLite transpose_conv implementation, input tensor 0 corresponds to -// the OutputShapeTensor. However, since TFLM does not support dynamic tensors, -// the TFLM implementation ignores input tensor 0 and the only inputs we care -// about are kFilterTensor, kInputTensor and kBiasTensor. -constexpr int kFilterTensor = 1; -constexpr int kInputTensor = 2; -constexpr int kBiasTensor = 3; -constexpr int kOutputTensor = 0; - -// Conv is quantized along dimension 0: -// https://www.tensorflow.org/lite/performance/quantization_spec -constexpr int kConvQuantizedDimension = 0; - -struct OpData { - ConvParams params; - - // A scratch buffer is required for quantized implementations. - int scratch_buffer_index; - - // TODO(b/192090531): Remove this once all 8x16 transpose conv models use - // 64-bit biases. - int bias_converted_buffer_index; - - // Multiplier and shift arrays are required for the int8 implementation. - int32_t* per_channel_output_multiplier; - int32_t* per_channel_output_shift; -}; - -inline PaddingType RuntimePaddingType(TfLitePadding padding) { - switch (padding) { - case TfLitePadding::kTfLitePaddingSame: - return PaddingType::kSame; - case TfLitePadding::kTfLitePaddingValid: - return PaddingType::kValid; - case TfLitePadding::kTfLitePaddingUnknown: - default: - return PaddingType::kNone; - } -} - -TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteNode* node, - const TfLiteTransposeConvParams* params, int width, - int height, int filter_width, int filter_height, - const TfLiteType data_type, OpData* data) { - bool has_bias = node->inputs->size == 4; - // Check number of inputs/outputs - TF_LITE_ENSURE(context, has_bias || node->inputs->size == 3); - TF_LITE_ENSURE_EQ(context, node->outputs->size, 1); - - // Matching GetWindowedOutputSize in TensorFlow. - auto padding = params->padding; - int unused_output_width; - int unused_output_height; - TfLitePaddingValues padding_values = ComputePaddingHeightWidth( - params->stride_height, params->stride_width, 1, - 1, // Dilation height and width are always 1 for transpose_conv. - height, width, filter_height, filter_width, padding, - &unused_output_height, &unused_output_width); - - data->params.padding_type = RuntimePaddingType(padding); - data->params.padding_values.width = padding_values.width; - data->params.padding_values.height = padding_values.height; - - // Note that quantized inference requires that all tensors have their - // parameters set. This is usually done during quantized training. - if (data_type != kTfLiteFloat32) { - MicroContext* micro_context = GetMicroContext(context); - - TfLiteTensor* input = - micro_context->AllocateTempInputTensor(node, kInputTensor); - TF_LITE_ENSURE(context, input != nullptr); - TfLiteTensor* filter = - micro_context->AllocateTempInputTensor(node, kFilterTensor); - TF_LITE_ENSURE(context, filter != nullptr); - TfLiteTensor* bias = - micro_context->AllocateTempInputTensor(node, kBiasTensor); - TfLiteTensor* output = - micro_context->AllocateTempOutputTensor(node, kOutputTensor); - TF_LITE_ENSURE(context, output != nullptr); - int output_channels = filter->dims->data[kConvQuantizedDimension]; - - TF_LITE_ENSURE_STATUS(tflite::PopulateConvolutionQuantizationParams( - context, input, filter, bias, output, kTfLiteActNone, - &data->params.output_multiplier, &data->params.output_shift, - &data->params.quantized_activation_min, - &data->params.quantized_activation_max, - data->per_channel_output_multiplier, data->per_channel_output_shift, - output_channels)); - - // TODO(b/192090531): Remove this once all 8x16 transpose conv models use - // 64-bit biases. - if (input->type == kTfLiteInt16) { - TFLITE_DCHECK(filter->type == kTfLiteInt8); - TFLITE_DCHECK(output->type == kTfLiteInt16); - if (bias->type == kTfLiteInt16) { - TFLITE_DCHECK( - context->RequestScratchBufferInArena( - context, GetTensorShape(bias).FlatSize() * sizeof(std::int64_t), - &(data->bias_converted_buffer_index)) == kTfLiteOk); - } - } - - micro_context->DeallocateTempTfLiteTensor(input); - micro_context->DeallocateTempTfLiteTensor(filter); - micro_context->DeallocateTempTfLiteTensor(output); - if (bias != nullptr) { - micro_context->DeallocateTempTfLiteTensor(bias); - } - } - return kTfLiteOk; -} - -void* Init(TfLiteContext* context, const char* buffer, size_t length) { - TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); - return context->AllocatePersistentBuffer(context, sizeof(OpData)); -} - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->user_data != nullptr); - TFLITE_DCHECK(node->builtin_data != nullptr); - - OpData* data = static_cast(node->user_data); - const auto params = - static_cast(node->builtin_data); - - MicroContext* micro_context = GetMicroContext(context); - - TfLiteTensor* output = - micro_context->AllocateTempOutputTensor(node, kOutputTensor); - TF_LITE_ENSURE(context, output != nullptr); - TfLiteTensor* input = - micro_context->AllocateTempInputTensor(node, kInputTensor); - TF_LITE_ENSURE(context, input != nullptr); - TfLiteTensor* filter = - micro_context->AllocateTempInputTensor(node, kFilterTensor); - TF_LITE_ENSURE(context, filter != nullptr); - - // Get height and width of the output. - const int width = SizeOfDimension(output, 2); - const int height = SizeOfDimension(output, 1); - const int filter_width = SizeOfDimension(filter, 2); - const int filter_height = SizeOfDimension(filter, 1); - - // Dynamically allocate per-channel quantization parameters. - const int num_channels = filter->dims->data[kConvQuantizedDimension]; - data->per_channel_output_multiplier = - static_cast(context->AllocatePersistentBuffer( - context, num_channels * sizeof(int32_t))); - data->per_channel_output_shift = - static_cast(context->AllocatePersistentBuffer( - context, num_channels * sizeof(int32_t))); - - // Quantized kernels use an int32 scratch buffer. - if (input->type == kTfLiteInt8) { - TFLITE_DCHECK(context->RequestScratchBufferInArena != nullptr); - TFLITE_DCHECK(context->RequestScratchBufferInArena( - context, - GetTensorShape(output).FlatSize() * sizeof(int32_t), - &(data->scratch_buffer_index)) == kTfLiteOk); - } - - // Quantized 16x8 kernels use an int64 scratch buffer. - if (input->type == kTfLiteInt16) { - TFLITE_DCHECK(context->RequestScratchBufferInArena != nullptr); - TFLITE_DCHECK(context->RequestScratchBufferInArena( - context, - GetTensorShape(output).FlatSize() * sizeof(std::int64_t), - &(data->scratch_buffer_index)) == kTfLiteOk); - } - - // All per-channel quantized tensors need valid zero point and scale arrays. - if (input->type == kTfLiteInt8 || input->type == kTfLiteInt16) { - TF_LITE_ENSURE_EQ(context, filter->quantization.type, - kTfLiteAffineQuantization); - - const auto* affine_quantization = - static_cast(filter->quantization.params); - TF_LITE_ENSURE(context, affine_quantization); - TF_LITE_ENSURE(context, affine_quantization->scale); - TF_LITE_ENSURE(context, affine_quantization->zero_point); - - TF_LITE_ENSURE(context, - affine_quantization->scale->size == 1 || - affine_quantization->scale->size == - filter->dims->data[kConvQuantizedDimension]); - TF_LITE_ENSURE_EQ(context, affine_quantization->scale->size, - affine_quantization->zero_point->size); - } - - TF_LITE_ENSURE_STATUS(CalculateOpData(context, node, params, width, height, - filter_width, filter_height, - input->type, data)); - - // Offsets (zero points) - data->params.input_offset = -input->params.zero_point; - data->params.weights_offset = -filter->params.zero_point; - data->params.output_offset = output->params.zero_point; - - // Stride - data->params.stride_width = params->stride_width; - data->params.stride_height = params->stride_height; - - micro_context->DeallocateTempTfLiteTensor(output); - micro_context->DeallocateTempTfLiteTensor(input); - micro_context->DeallocateTempTfLiteTensor(filter); - return kTfLiteOk; -} - -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - const TfLiteEvalTensor* input = - tflite::micro::GetEvalInput(context, node, kInputTensor); - const TfLiteEvalTensor* filter = - tflite::micro::GetEvalInput(context, node, kFilterTensor); - const TfLiteEvalTensor* bias = - (NumInputs(node) == 4) - ? tflite::micro::GetEvalInput(context, node, kBiasTensor) - : nullptr; - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - - TFLITE_DCHECK(node->user_data != nullptr); - const OpData& data = *(static_cast(node->user_data)); - - TF_LITE_ENSURE_EQ(context, input->type, output->type); - TF_LITE_ENSURE_MSG( - context, - input->type == filter->type || - (input->type == kTfLiteInt16 && filter->type == kTfLiteInt8), - "Hybrid models are not supported on TFLite Micro."); - - switch (input->type) { // Already know in/out types are same. - case kTfLiteFloat32: { - reference_ops::TransposeConv( - data.params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(filter), - tflite::micro::GetTensorData(filter), - tflite::micro::GetTensorShape(bias), - tflite::micro::GetOptionalTensorData(bias), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output), - tflite::micro::GetTensorShape(nullptr), nullptr); - break; - } - case kTfLiteInt8: { - int32_t* scratch_buffer = static_cast( - context->GetScratchBuffer(context, data.scratch_buffer_index)); - reference_integer_ops::TransposeConv( - data.params, data.per_channel_output_multiplier, - data.per_channel_output_shift, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(filter), - tflite::micro::GetTensorData(filter), - tflite::micro::GetTensorShape(bias), - tflite::micro::GetOptionalTensorData(bias), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output), - tflite::micro::GetTensorShape(nullptr), nullptr, scratch_buffer); - break; - } - case kTfLiteInt16: { - std::int64_t* scratch_buffer = static_cast( - context->GetScratchBuffer(context, data.scratch_buffer_index)); - // TODO(b/192090531): Remove this once all 8x16 transpose conv models use - // 64-bit biases. - if (bias != nullptr && bias->type == kTfLiteInt16) { - std::int64_t* bias_converted_buffer = - static_cast(context->GetScratchBuffer( - context, data.bias_converted_buffer_index)); - for (int i = 0; i < tflite::micro::GetTensorShape(bias).FlatSize(); - i++) { - bias_converted_buffer[i] = bias->data.i16[i]; - } - reference_integer_ops::TransposeConv( - data.params, data.per_channel_output_multiplier, - data.per_channel_output_shift, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(filter), - tflite::micro::GetTensorData(filter), - tflite::micro::GetTensorShape(bias), bias_converted_buffer, - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output), - tflite::micro::GetTensorShape(nullptr), nullptr, scratch_buffer); - } else { - reference_integer_ops::TransposeConv( - data.params, data.per_channel_output_multiplier, - data.per_channel_output_shift, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(filter), - tflite::micro::GetTensorData(filter), - tflite::micro::GetTensorShape(bias), - tflite::micro::GetOptionalTensorData(bias), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output), - tflite::micro::GetTensorShape(nullptr), nullptr, scratch_buffer); - } - break; - } - default: - MicroPrintf("Type %s (%d) not supported.", TfLiteTypeGetName(input->type), - input->type); - return kTfLiteError; - } - return kTfLiteOk; -} - -} // namespace - -TfLiteRegistration Register_TRANSPOSE_CONV() { - return tflite::micro::RegisterOp(Init, Prepare, Eval); -} - -} // namespace tflite diff --git a/code/components/tflite-lib/tensorflow/lite/micro/kernels/unidirectional_sequence_lstm_test_config.h b/code/components/tflite-lib/tensorflow/lite/micro/kernels/unidirectional_sequence_lstm_test_config.h deleted file mode 100644 index 24c838fa..00000000 --- a/code/components/tflite-lib/tensorflow/lite/micro/kernels/unidirectional_sequence_lstm_test_config.h +++ /dev/null @@ -1,244 +0,0 @@ -/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_MICRO_KERNELS_UNIDIRECTIONAL_SEQUENCE_LSTM_TEST_CONFIG_H_ -#define TENSORFLOW_LITE_MICRO_KERNELS_UNIDIRECTIONAL_SEQUENCE_LSTM_TEST_CONFIG_H_ - -#include "tensorflow/lite/c/common.h" - -namespace tflite { -namespace testing { - -// TODO(b/230666079) enable below tests for xtensa when the xtensa -// kernel is reconciled with reference kernel -#if !defined(XTENSA) - -struct LstmIntegerTestConfig { - const int n_batch; - const int n_input; - const int n_cell; - const int n_output; - const int sequence_length; - const bool time_major; - const bool use_cifg; - const bool use_peephole; - const bool use_projection_weights; - const bool use_projection_bias; - const bool use_layer_norm; - const bool use_8x8_8_implementation; - float intermediate_scale[5][2]; - int intermediate_zp[5][2]; - TfLiteAffineQuantization* intermediate_qparam; - - const float* input; - int8_t* input_quant; - - const float* input_to_input_weights; - int8_t* lstm_i2i_quant; - const float* input_to_forget_weights; - int8_t* lstm_i2f_quant; - const float* input_to_cell_weights; - int8_t* lstm_i2c_quant; - const float* input_to_output_weights; - int8_t* lstm_i2o_quant; - - const float* recurrent_to_input_weights; - int8_t* lstm_r2i_quant; - const float* recurrent_to_forget_weights; - int8_t* lstm_r2f_quant; - const float* recurrent_to_cell_weights; - int8_t* lstm_r2c_quant; - const float* recurrent_to_output_weights; - int8_t* lstm_r2o_quant; - - const float* cell_to_input_weights; - int16_t* lstm_c2i_quant; - const float* cell_to_forget_weights; - int16_t* lstm_c2f_quant; - const float* cell_to_output_weights; - int16_t* lstm_c2o_quant; - - const float* input_gate_bias; - int32_t* lstm_igate_bias_quant; - const float* forget_gate_bias; - int32_t* lstm_fgate_bias_quant; - const float* cell_gate_bias; - int32_t* lstm_cgate_bias_quant; - const float* output_gate_bias; - int32_t* lstm_ogate_bias_quant; - - const float* projection_weights; - int8_t* lstm_proj_w_quant; - const float* projection_bias; - int32_t* projection_bias_quant; - - int16_t* output_state; - int16_t* cell_state; - - const float* input_layer_norm_coefficients; - int16_t* lstm_input_layer_norm_coeff_quant; - const float* forget_layer_norm_coefficients; - int16_t* lstm_forget_layer_norm_coeff_quant; - const float* cell_layer_norm_coefficients; - int16_t* lstm_cell_layer_norm_coeff_quant; - const float* output_layer_norm_coefficients; - int16_t* lstm_output_layer_norm_coeff_quant; - - int8_t* output; - const int8_t* expected_output; - - bool asymmetric_quantize_inputs; - const float ranges[25][2]; -}; - -struct LstmFloatTestConfig { - const int n_batch; - const int n_input; - const int n_cell; - const int n_output; - const int sequence_length; - const bool time_major; - const bool use_cifg; - const bool use_peephole; - const bool use_projection_weights; - const bool use_projection_bias; - const bool use_layer_norm; - const float cell_clip; - const float proj_clip; - - const float* input_original; - float* input; - - const float* input_to_input_weights; - const float* input_to_forget_weights; - const float* input_to_cell_weights; - const float* input_to_output_weights; - - const float* recurrent_to_input_weights; - const float* recurrent_to_forget_weights; - const float* recurrent_to_cell_weights; - const float* recurrent_to_output_weights; - - const float* cell_to_input_weights; - const float* cell_to_forget_weights; - const float* cell_to_output_weights; - - const float* input_gate_bias; - const float* forget_gate_bias; - const float* cell_gate_bias; - const float* output_gate_bias; - - const float* projection_weights; - const float* projection_bias; - - float* output_state; - float* cell_state; - - const float* input_layer_norm_coefficients; - const float* forget_layer_norm_coefficients; - const float* cell_layer_norm_coefficients; - const float* output_layer_norm_coefficients; - - float* output; - const float* expected_output_original; - float* expected_output; -}; - -struct LstmWeightQuantizationBuffers { - int8_t* lstm_i2i_quant; - float* lstm_i2i_scale; - int* lstm_i2i_zp; - TfLiteAffineQuantization* lstm_i2i_qparam; - - int8_t* lstm_i2f_quant; - float* lstm_i2f_scale; - int* lstm_i2f_zp; - TfLiteAffineQuantization* lstm_i2f_qparam; - - int8_t* lstm_i2c_quant; - float* lstm_i2c_scale; - int* lstm_i2c_zp; - TfLiteAffineQuantization* lstm_i2c_qparam; - - int8_t* lstm_i2o_quant; - float* lstm_i2o_scale; - int* lstm_i2o_zp; - TfLiteAffineQuantization* lstm_i2o_qparam; - - int8_t* lstm_r2i_quant; - float* lstm_r2i_scale; - int* lstm_r2i_zp; - TfLiteAffineQuantization* lstm_r2i_qparam; - - int8_t* lstm_r2f_quant; - float* lstm_r2f_scale; - int* lstm_r2f_zp; - TfLiteAffineQuantization* lstm_r2f_qparam; - - int8_t* lstm_r2c_quant; - float* lstm_r2c_scale; - int* lstm_r2c_zp; - TfLiteAffineQuantization* lstm_r2c_qparam; - - int8_t* lstm_r2o_quant; - float* lstm_r2o_scale; - int* lstm_r2o_zp; - TfLiteAffineQuantization* lstm_r2o_qparam; - - int8_t* lstm_c2i_quant; - float* lstm_c2i_scale; - int* lstm_c2i_zp; - TfLiteAffineQuantization* lstm_c2i_qparam; - - int8_t* lstm_c2f_quant; - float* lstm_c2f_scale; - int* lstm_c2f_zp; - TfLiteAffineQuantization* lstm_c2f_qparam; - - int8_t* lstm_c2o_quant; - float* lstm_c2o_scale; - int* lstm_c2o_zp; - TfLiteAffineQuantization* lstm_c2o_qparam; - - int8_t* lstm_proj_w_quant; - float* lstm_proj_w_scale; - int* lstm_proj_w_zp; - TfLiteAffineQuantization* lstm_proj_w_qparam; -}; - -extern LstmIntegerTestConfig lstm_integer_no_peephole_config; - -extern LstmIntegerTestConfig lstm_integer_peephole_config; - -extern LstmFloatTestConfig lstm_no_cifg_no_peephole_no_proj_config; - -extern LstmFloatTestConfig lstm_cifg_peephole_no_proj_config; - -extern LstmFloatTestConfig lstm_no_cifg_peephole_proj_config; - -extern LstmFloatTestConfig lstm_no_cifg_peephole_proj_bias_config; - -extern LstmWeightQuantizationBuffers lstm_no_cifg_no_peephole_no_proj_buffers; - -extern LstmWeightQuantizationBuffers lstm_cifg_peephole_no_proj_buffers; - -extern LstmWeightQuantizationBuffers lstm_no_cifg_peephole_proj_buffers; - -extern LstmFloatTestConfig cifg_peephole_no_proj_config_layer_norm; - -#endif // !defined(XTENSA) -} // namespace testing -} // namespace tflite - -#endif // TENSORFLOW_LITE_MICRO_KERNELS_UNIDIRECTIONAL_SEQUENCE_LSTM_TEST_CONFIG_H_ diff --git a/code/components/tflite-lib/tensorflow/lite/micro/kernels/unpack.cc b/code/components/tflite-lib/tensorflow/lite/micro/kernels/unpack.cc deleted file mode 100644 index 9a0ef177..00000000 --- a/code/components/tflite-lib/tensorflow/lite/micro/kernels/unpack.cc +++ /dev/null @@ -1,111 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/c/builtin_op_data.h" -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "tensorflow/lite/kernels/kernel_util.h" -#include "tensorflow/lite/micro/kernels/kernel_util.h" - -namespace tflite { -namespace ops { -namespace micro { -namespace unpack { -namespace { - -constexpr int kInputTensor = 0; - -template -TfLiteStatus UnpackImpl(TfLiteContext* context, TfLiteNode* node, - const TfLiteEvalTensor* input, int output_count, - int axis) { - const TfLiteEvalTensor* output0 = - tflite::micro::GetEvalOutput(context, node, 0); - const TfLiteIntArray* input_dims = input->dims; - const TfLiteIntArray* output_dims = output0->dims; - const int dimensions = input_dims->size; - - if (axis < 0) { - axis += input->dims->size; - } - - TFLITE_DCHECK_LT(axis, dimensions); - - int outer_size = 1; - for (int i = 0; i < axis; ++i) { - outer_size *= input_dims->data[i]; - } - int copy_size = 1; - for (int i = axis + 1; i < dimensions; ++i) { - copy_size *= input_dims->data[i]; - } - int output_size = 1; - for (int i = 0; i < output_dims->size; ++i) { - output_size *= output_dims->data[i]; - } - TFLITE_DCHECK_EQ(output_size, copy_size * outer_size); - - const T* input_data = tflite::micro::GetTensorData(input); - - for (int i = 0; i < output_count; ++i) { - TfLiteEvalTensor* t = tflite::micro::GetEvalOutput(context, node, i); - T* output_data = tflite::micro::GetTensorData(t); - for (int k = 0; k < outer_size; ++k) { - T* output_ptr = output_data + copy_size * k; - int loc = k * output_count * copy_size + i * copy_size; - const T* input_ptr = input_data + loc; - for (int j = 0; j < copy_size; ++j) output_ptr[j] = input_ptr[j]; - } - } - - return kTfLiteOk; -} - -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - TfLiteUnpackParams* data = - reinterpret_cast(node->builtin_data); - - const TfLiteEvalTensor* input = - tflite::micro::GetEvalInput(context, node, kInputTensor); - - switch (input->type) { - case kTfLiteFloat32: { - return UnpackImpl(context, node, input, data->num, data->axis); - } - case kTfLiteInt32: { - return UnpackImpl(context, node, input, data->num, data->axis); - } - case kTfLiteInt8: { - return UnpackImpl(context, node, input, data->num, data->axis); - } - default: { - MicroPrintf("Type '%s' is not supported by unpack.", - TfLiteTypeGetName(input->type)); - return kTfLiteError; - } - } - - return kTfLiteOk; -} -} // namespace -} // namespace unpack - -TfLiteRegistration Register_UNPACK() { - return tflite::micro::RegisterOp(nullptr, nullptr, unpack::Eval); -} - -} // namespace micro -} // namespace ops -} // namespace tflite diff --git a/code/components/tflite-lib/tensorflow/lite/micro/kernels/zeros_like.cc b/code/components/tflite-lib/tensorflow/lite/micro/kernels/zeros_like.cc deleted file mode 100644 index 9c77e7ad..00000000 --- a/code/components/tflite-lib/tensorflow/lite/micro/kernels/zeros_like.cc +++ /dev/null @@ -1,87 +0,0 @@ -/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "tensorflow/lite/kernels/kernel_util.h" -#include "tensorflow/lite/micro/kernels/kernel_util.h" - -namespace tflite { -namespace { - -constexpr int kInputTensor = 0; -constexpr int kOutputTensor = 0; - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - MicroContext* micro_context = GetMicroContext(context); - - TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); - TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - TfLiteTensor* input = - micro_context->AllocateTempInputTensor(node, kInputTensor); - TF_LITE_ENSURE(context, input != nullptr); - TfLiteTensor* output = - micro_context->AllocateTempOutputTensor(node, kOutputTensor); - TF_LITE_ENSURE(context, output != nullptr); - output->type = input->type; - - micro_context->DeallocateTempTfLiteTensor(input); - micro_context->DeallocateTempTfLiteTensor(output); - return kTfLiteOk; -} - -template -void resetZeros(T* out, const int num_elements) { - for (int i = 0; i < num_elements; ++i) { - out[i] = static_cast(0); - } -} - -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - const TfLiteEvalTensor* input = - tflite::micro::GetEvalInput(context, node, kInputTensor); - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - int flat_size = MatchingFlatSize(tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorShape(output)); - switch (input->type) { - case kTfLiteInt64: - resetZeros(tflite::micro::GetTensorData(output), flat_size); - break; - case kTfLiteInt32: - resetZeros(tflite::micro::GetTensorData(output), flat_size); - break; - case kTfLiteInt8: - resetZeros(tflite::micro::GetTensorData(output), flat_size); - break; - case kTfLiteFloat32: - resetZeros(tflite::micro::GetTensorData(output), flat_size); - break; - default: - MicroPrintf( - "ZerosLike only currently supports int64, int32, " - "and float32, got %d.", - input->type); - return kTfLiteError; - } - return kTfLiteOk; -} -} // namespace - -TfLiteRegistration Register_ZEROS_LIKE() { - return tflite::micro::RegisterOp(nullptr, Prepare, Eval); -} - -} // namespace tflite diff --git a/code/components/tflite-lib/tensorflow/lite/micro/micro_allocation_info.cc b/code/components/tflite-lib/tensorflow/lite/micro/micro_allocation_info.cc deleted file mode 100644 index c1ebac3f..00000000 --- a/code/components/tflite-lib/tensorflow/lite/micro/micro_allocation_info.cc +++ /dev/null @@ -1,355 +0,0 @@ -/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#include "tensorflow/lite/micro/micro_allocation_info.h" - -#include "tensorflow/lite/c/c_api_types.h" -#include "tensorflow/lite/kernels/internal/compatibility.h" -#include "tensorflow/lite/kernels/kernel_util.h" -#include "tensorflow/lite/micro/memory_helpers.h" -#include "tensorflow/lite/micro/memory_planner/greedy_memory_planner.h" -#include "tensorflow/lite/micro/micro_error_reporter.h" - -namespace tflite { - -namespace { -constexpr char kOfflineMemAllocMetadata[] = "OfflineMemoryAllocation"; -constexpr int kUninitializedLifetime = -1; -} // namespace - -// Mark the given Allocation info as first created at the specified allocation -// scope count. Only the first creation must be recorded since the allocation -// scope count monotonically increases throughout the lifetime marking process. -void AllocationInfoBuilder::UpdateFirstCreated(AllocationInfo* current, - int allocation_scope_count) { - TFLITE_DCHECK(current->first_created <= allocation_scope_count); - if (current->first_created == kUninitializedLifetime) { - current->first_created = allocation_scope_count; - } -} - -// Mark the given AllocationInfo as last used at the specified allocation scope -// count. Update the last used marker every time, since the allocation scope -// count monotonically increases through the lifetime marking process. -void AllocationInfoBuilder::UpdateLastUsed(AllocationInfo* current, - int allocation_scope_count) { - TFLITE_DCHECK(current->last_used <= allocation_scope_count); - current->last_used = allocation_scope_count; -} - -TfLiteStatus AllocationInfoBuilder::MarkSubgraphLifetimesIfNecessary( - const Operator* op, internal::ScratchBufferRequest* scratch_buffer_requests, - ScratchBufferHandle* scratch_buffer_handles, - SubgraphAllocations* allocations) { - int first_subgraph_index = -1; - int second_subgraph_index = -1; - const OperatorCode* opcode = - model_->operator_codes()->Get(op->opcode_index()); - switch (opcode->builtin_code()) { - case BuiltinOperator_IF: { - first_subgraph_index = - op->builtin_options_as_IfOptions()->then_subgraph_index(); - second_subgraph_index = - op->builtin_options_as_IfOptions()->else_subgraph_index(); - break; - } - case BuiltinOperator_CALL_ONCE: { - first_subgraph_index = - op->builtin_options_as_CallOnceOptions()->init_subgraph_index(); - break; - } - case BuiltinOperator_WHILE: { - first_subgraph_index = - op->builtin_options_as_WhileOptions()->cond_subgraph_index(); - second_subgraph_index = - op->builtin_options_as_WhileOptions()->body_subgraph_index(); - break; - } - default: { - break; - } - } - if (first_subgraph_index != -1) { - // Enter a new allocation scope for each subgraph. - allocation_scope_count_++; - TF_LITE_ENSURE_STATUS( - MarkAllocationLifetimes(first_subgraph_index, scratch_buffer_requests, - scratch_buffer_handles, allocations)); - } - if (second_subgraph_index != -1) { - // Enter a new allocation scope for each subgraph. - allocation_scope_count_++; - TF_LITE_ENSURE_STATUS( - MarkAllocationLifetimes(second_subgraph_index, scratch_buffer_requests, - scratch_buffer_handles, allocations)); - } - return kTfLiteOk; -} - -TfLiteStatus AllocationInfoBuilder::CreateAllocationInfo( - int scratch_buffer_request_count) { - size_t subgraph_offsets_length = model_->subgraphs()->size() * sizeof(size_t); - info_.subgraph_offsets = - reinterpret_cast(non_persistent_allocator_->AllocateTemp( - subgraph_offsets_length, alignof(size_t))); - if (info_.subgraph_offsets == nullptr) { - TF_LITE_REPORT_ERROR( - reporter_, - "Failed to allocate memory for memory planning, %d bytes required", - subgraph_offsets_length); - return kTfLiteError; - } - size_t tensor_count = 0; - for (size_t subgraph_idx = 0; subgraph_idx < model_->subgraphs()->size(); - subgraph_idx++) { - // Add all tensors in each subgraph to the AllocationInfo array. Even weight - // tensors are added but marked with needs_allocating = false. Including all - // tensors in the graph here simplifies logic. - info_.subgraph_offsets[subgraph_idx] = tensor_count; - tensor_count += model_->subgraphs()->Get(subgraph_idx)->tensors()->size(); - } - info_.tensor_count = tensor_count; - - // Scratch buffer allocations follow tensor allocations, so the scratch offset - // is equal to the number of tensor allocations. - info_.scratch_offset = tensor_count; - info_.allocation_info_count = tensor_count + scratch_buffer_request_count; - info_.scratch_buffer_count = scratch_buffer_request_count; - size_t bytes = sizeof(AllocationInfo) * info_.allocation_info_count; - - // Allocate an array of AllocationInfo structs from the temp section. This - // struct will be used by AllocationInfoBuilder to find buffer usage. - info_.allocation_info = reinterpret_cast( - non_persistent_allocator_->AllocateTemp(bytes, alignof(AllocationInfo))); - if (info_.allocation_info == nullptr) { - TF_LITE_REPORT_ERROR( - reporter_, - "Failed to allocate memory for memory planning, %d bytes required", - bytes); - return kTfLiteError; - } - return kTfLiteOk; -} - -TfLiteStatus AllocationInfoBuilder::FreeAllocationInfo() { - non_persistent_allocator_->DeallocateTemp( - reinterpret_cast(info_.allocation_info)); - non_persistent_allocator_->DeallocateTemp( - reinterpret_cast(info_.subgraph_offsets)); - return kTfLiteOk; -} - -TfLiteStatus AllocationInfoBuilder::ValidateSubgraph( - const SubGraph* subgraph, TfLiteEvalTensor* eval_tensors) { - uint32_t operators_size = NumSubgraphOperators(subgraph); - - for (uint32_t i = 0; i < operators_size; i++) { - const auto op = subgraph->operators()->Get(i); - for (size_t n = 0; - op->intermediates() != nullptr && n < op->intermediates()->size(); - n++) { - const int tensor_index = op->intermediates()->Get(n); - size_t tensor_size = -1; - TF_LITE_ENSURE_STATUS(TfLiteEvalTensorByteLength( - &eval_tensors[tensor_index], &tensor_size)); - if (tensor_size != 0) { - MicroPrintf( - "Does not support intermediate tensor with non-zero size: %d", - tensor_size); - return kTfLiteError; - } - } - } - return kTfLiteOk; -} - -TfLiteStatus AllocationInfoBuilder::InitializeAllocationInfo( - const int32_t* offline_offsets, SubgraphAllocations* allocations) { - AllocationInfo* allocation_info = info_.allocation_info; - // Initialize allocation info for every tensor in every subgraph. - for (size_t subgraph_idx = 0; subgraph_idx < model_->subgraphs()->size(); - subgraph_idx++) { - const SubGraph* subgraph = model_->subgraphs()->Get(subgraph_idx); - TfLiteEvalTensor* eval_tensors = allocations[subgraph_idx].tensors; - AllocationInfo* subgraph_allocation_info = - &allocation_info[info_.subgraph_offsets[subgraph_idx]]; - - // Ensure constraints are met. - TF_LITE_ENSURE_STATUS(ValidateSubgraph(subgraph, eval_tensors)); - - for (size_t i = 0; i < subgraph->tensors()->size(); ++i) { - AllocationInfo* current = &subgraph_allocation_info[i]; - current->output_ptr = &(eval_tensors[i].data.data); - - TF_LITE_ENSURE_STATUS( - TfLiteEvalTensorByteLength(&eval_tensors[i], ¤t->bytes)); - - current->first_created = kUninitializedLifetime; - current->last_used = kUninitializedLifetime; - current->needs_allocating = - (eval_tensors[i].data.data == nullptr) && - (!subgraph->tensors()->Get(i)->is_variable()) && - (current->bytes != 0); - if (offline_offsets) { - current->offline_offset = offline_offsets[i]; - } else { - current->offline_offset = kOnlinePlannedBuffer; - } - } - } - // Initialize allocation info for every scratch buffer. - AllocationInfo* scratch_allocation_info = - &allocation_info[info_.scratch_offset]; - for (size_t i = 0; i < info_.scratch_buffer_count; i++) { - AllocationInfo* current = &scratch_allocation_info[i]; - current->first_created = kUninitializedLifetime; - current->last_used = kUninitializedLifetime; - current->needs_allocating = true; - current->offline_offset = kOnlinePlannedBuffer; - } - return kTfLiteOk; -} - -TfLiteStatus AllocationInfoBuilder::MarkAllocationLifetimes( - int subgraph_idx, internal::ScratchBufferRequest* scratch_buffer_requests, - ScratchBufferHandle* scratch_buffer_handles, - SubgraphAllocations* allocations) { - const SubGraph* subgraph = model_->subgraphs()->Get(subgraph_idx); - - AllocationInfo* allocation_info = info_.allocation_info; - // Each subgraph's tensor allocations are in a contiguous block starting at - // subgraph_offsets_[subgraph index] with one entry per tensor. - AllocationInfo* subgraph_allocation_info = - &allocation_info[info_.subgraph_offsets[subgraph_idx]]; - - uint32_t operators_size = NumSubgraphOperators(subgraph); - // Mark all inputs as created at the start of the subgraph invocation. - for (size_t i = 0; - subgraph->inputs() != nullptr && i < subgraph->inputs()->size(); ++i) { - const int tensor_index = subgraph->inputs()->Get(i); - AllocationInfo* current = &subgraph_allocation_info[tensor_index]; - UpdateFirstCreated(current, allocation_scope_count_); - } - - for (uint32_t i = 0; i < operators_size; i++) { - // Each operator has a new allocation scope. - allocation_scope_count_++; - const auto* op = subgraph->operators()->Get(i); - // Figure out when the first creation and use of each tensor is. - for (size_t n = 0; op->outputs() != nullptr && n < op->outputs()->size(); - ++n) { - const int tensor_index = op->outputs()->Get(n); - AllocationInfo* current = &subgraph_allocation_info[tensor_index]; - UpdateFirstCreated(current, allocation_scope_count_); - } - - // Keep track of scope count before any subgraphs, so that scratch buffers' - // lifetime within a control flow op properly overlaps with all subgraphs. - int start_allocation_scope_count = allocation_scope_count_; - - // Control flow operators can invoke subgraphs. Plan these subgraphs - // before continuing on to the rest of the graph. - MarkSubgraphLifetimesIfNecessary(op, scratch_buffer_requests, - scratch_buffer_handles, allocations); - - // Figure out when the last use of each tensor is. - for (size_t n = 0; op->inputs() != nullptr && n < op->inputs()->size(); - ++n) { - const int tensor_index = op->inputs()->Get(n); - // Optional bias tensors can have an index of -1 when they are omitted. - if (tensor_index >= 0) { - AllocationInfo* current = &subgraph_allocation_info[tensor_index]; - // No need to update creation since it is either marked by the subgraph - // or producer op, or it is not part of the memory plan (weight, bias - // tensor). - UpdateLastUsed(current, allocation_scope_count_); - } - } - for (size_t n = 0; op->outputs() != nullptr && n < op->outputs()->size(); - ++n) { - const int tensor_index = op->outputs()->Get(n); - AllocationInfo* current = &subgraph_allocation_info[tensor_index]; - UpdateLastUsed(current, allocation_scope_count_); - } - - // Mark thse lifetime of scratch buffers belonging to the current node. This - // operation is O(N * M) where N is the total number of visited nodes and M - // is the total number of scratch buffers. - // TODO(b/217794030): Optimize this memory planning code. - AllocationInfo* scratch_allocation_info = - &allocation_info[info_.scratch_offset]; - for (size_t scratch_idx = 0; scratch_idx < info_.scratch_buffer_count; - scratch_idx++) { - internal::ScratchBufferRequest request = - scratch_buffer_requests[scratch_idx]; - AllocationInfo* current = &scratch_allocation_info[scratch_idx]; - if (request.node_idx == static_cast(i) && - request.subgraph_idx == static_cast(subgraph_idx)) { - ScratchBufferHandle* current_handle = - &(scratch_buffer_handles[scratch_idx]); - current->output_ptr = reinterpret_cast(¤t_handle->data); - current->bytes = request.bytes; - UpdateFirstCreated(current, start_allocation_scope_count); - UpdateLastUsed(current, allocation_scope_count_); - } - } - } - - // Mark all outputs as persistent to the end of the subgraph invocation. - for (size_t i = 0; - subgraph->outputs() != nullptr && i < subgraph->outputs()->size(); ++i) { - const int tensor_index = subgraph->outputs()->Get(i); - AllocationInfo* current = &subgraph_allocation_info[tensor_index]; - UpdateLastUsed(current, allocation_scope_count_); - } - return kTfLiteOk; -} - -// Get offline tensors allocation plan. See -// micro/docs/memory_management.md for more info. -TfLiteStatus AllocationInfoBuilder::GetOfflinePlannedOffsets( - const int32_t** offline_planner_offsets) { - if (model_->metadata()) { - for (size_t i = 0; i < model_->metadata()->size(); ++i) { - auto metadata = model_->metadata()->Get(i); - const size_t metadata_name_size = (size_t)metadata->name()->size(); - - if ((strncmp(metadata->name()->c_str(), kOfflineMemAllocMetadata, - std::min(metadata_name_size, - strlen(kOfflineMemAllocMetadata))) == 0) && - metadata_name_size == strlen(kOfflineMemAllocMetadata)) { - const flatbuffers::Vector>* buffers = - model_->buffers(); - auto* buffer = (*buffers)[metadata->buffer()]; - auto* array = buffer->data(); - const uint32_t* metadata_buffer = - reinterpret_cast(array->data()); - const size_t nbr_tensors = static_cast(metadata_buffer[2]); - *offline_planner_offsets = - reinterpret_cast(&metadata_buffer[3]); - - if (info_.tensor_count != nbr_tensors) { - TF_LITE_REPORT_ERROR(reporter_, - "Nbr of offline buffer offsets (%d) in metadata " - "not equal nbr tensors (%d)\n", - nbr_tensors, info_.tensor_count); - return kTfLiteError; - } - } - } - } - return kTfLiteOk; -} - -} // namespace tflite diff --git a/code/components/tflite-lib/tensorflow/lite/micro/micro_allocator.cc b/code/components/tflite-lib/tensorflow/lite/micro/micro_allocator.cc deleted file mode 100644 index 707f2e83..00000000 --- a/code/components/tflite-lib/tensorflow/lite/micro/micro_allocator.cc +++ /dev/null @@ -1,965 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/micro/micro_allocator.h" - -#include -#include - -#include "flatbuffers/flatbuffers.h" // from @flatbuffers -#include "tensorflow/lite/c/c_api_types.h" -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/core/api/error_reporter.h" -#include "tensorflow/lite/core/api/flatbuffer_conversions.h" -#include "tensorflow/lite/core/api/op_resolver.h" -#include "tensorflow/lite/core/api/tensor_utils.h" -#include "tensorflow/lite/kernels/internal/compatibility.h" -#include "tensorflow/lite/micro/arena_allocator/non_persistent_arena_buffer_allocator.h" -#include "tensorflow/lite/micro/arena_allocator/persistent_arena_buffer_allocator.h" -#include "tensorflow/lite/micro/arena_allocator/single_arena_buffer_allocator.h" -#include "tensorflow/lite/micro/compatibility.h" -#include "tensorflow/lite/micro/flatbuffer_utils.h" -#include "tensorflow/lite/micro/memory_helpers.h" -#include "tensorflow/lite/micro/memory_planner/greedy_memory_planner.h" -#include "tensorflow/lite/micro/memory_planner/micro_memory_planner.h" -#include "tensorflow/lite/micro/micro_allocation_info.h" -#include "tensorflow/lite/micro/micro_arena_constants.h" -#include "tensorflow/lite/micro/micro_error_reporter.h" -#include "tensorflow/lite/schema/schema_generated.h" -#include "tensorflow/lite/schema/schema_utils.h" - -namespace tflite { - -namespace { - -// Maximum number of scratch buffer requests per operator. Operator kernels that -// request more than this value will receive an exception. -constexpr size_t kMaxScratchBuffersPerOp = 12; - -// Sentinel value used as a placeholder to mark a ScratchBufferRequest request -// needs a node id assignment. -constexpr int kUnassignedScratchBufferRequestIndex = -1; - -const TfLiteIntArray kZeroLengthIntArray = {}; - -class MicroBuiltinDataAllocator : public BuiltinDataAllocator { - public: - explicit MicroBuiltinDataAllocator( - IPersistentBufferAllocator* persistent_allocator) - : persistent_allocator_(persistent_allocator) {} - - void* Allocate(size_t size, size_t alignment_hint) override { - return persistent_allocator_->AllocatePersistentBuffer(size, - alignment_hint); - } - void Deallocate(void* data) override { - // Do not deallocate, builtin data needs to be available for the life time - // of the model. - } - - TF_LITE_REMOVE_VIRTUAL_DELETE - - private: - IPersistentBufferAllocator* persistent_allocator_; -}; - -TfLiteStatus CreatePlan(ErrorReporter* error_reporter, - MicroMemoryPlanner* planner, - const AllocationInfo* allocation_info, - size_t allocation_info_size) { - // Add the tensors to our allocation plan. - for (size_t i = 0; i < allocation_info_size; ++i) { - const AllocationInfo* current = &allocation_info[i]; - if (current->needs_allocating) { - size_t aligned_bytes_required = - AlignSizeUp(current->bytes, MicroArenaBufferAlignment()); - if (current->offline_offset == kOnlinePlannedBuffer) { - TF_LITE_ENSURE_STATUS( - planner->AddBuffer(error_reporter, aligned_bytes_required, - current->first_created, current->last_used)); - } else { - TF_LITE_ENSURE_STATUS(planner->AddBuffer( - error_reporter, aligned_bytes_required, current->first_created, - current->last_used, current->offline_offset)); - } - } - } - return kTfLiteOk; -} - -TfLiteStatus CommitPlan(ErrorReporter* error_reporter, - MicroMemoryPlanner* planner, uint8_t* starting_point, - const AllocationInfo* allocation_info, - size_t allocation_info_size) { - // Figure out the actual memory addresses for each buffer, based on the plan. - int planner_index = 0; - for (size_t i = 0; i < allocation_info_size; ++i) { - const AllocationInfo* current = &allocation_info[i]; - if (current->needs_allocating) { - int offset = -1; - TF_LITE_ENSURE_STATUS( - planner->GetOffsetForBuffer(error_reporter, planner_index, &offset)); - *current->output_ptr = reinterpret_cast(starting_point + offset); - ++planner_index; - } - } - return kTfLiteOk; -} - -IPersistentBufferAllocator* CreatePersistentArenaAllocator(uint8_t* buffer_head, - size_t buffer_size) { - // Align the actually used area by the tail because persistent buffer grows - // from the bottom to top. - uint8_t* aligned_buffer_tail = - AlignPointerDown(buffer_head + buffer_size, MicroArenaBufferAlignment()); - size_t aligned_buffer_size = aligned_buffer_tail - buffer_head; - PersistentArenaBufferAllocator tmp = - PersistentArenaBufferAllocator(buffer_head, aligned_buffer_size); - - // Allocate enough bytes from the buffer to create a - // SingleArenaBufferAllocator. The new instance will use the current adjusted - // tail buffer from the tmp allocator instance. - uint8_t* allocator_buffer = - tmp.AllocatePersistentBuffer(sizeof(PersistentArenaBufferAllocator), - alignof(PersistentArenaBufferAllocator)); - // Use the default copy constructor to populate internal states. - return new (allocator_buffer) PersistentArenaBufferAllocator(tmp); -} - -// NonPersistentBufferAllocator instance is created in the persistent buffer -// because it has to be persistent to keep track of the non-persistent buffer -// information. -INonPersistentBufferAllocator* CreateNonPersistentArenaAllocator( - uint8_t* buffer_head, size_t buffer_size, - IPersistentBufferAllocator* persistent_buffer_allocator) { - uint8_t* allocator_buffer = - persistent_buffer_allocator->AllocatePersistentBuffer( - sizeof(NonPersistentArenaBufferAllocator), - alignof(NonPersistentArenaBufferAllocator)); - // Align the actually used area by the head because persistent buffer grows - // from the head to bottom. - uint8_t* aligned_buffer_head = - AlignPointerUp(buffer_head, MicroArenaBufferAlignment()); - size_t aligned_buffer_size = buffer_head + buffer_size - aligned_buffer_head; - - INonPersistentBufferAllocator* non_persistent_buffer_allocator = - new (allocator_buffer) NonPersistentArenaBufferAllocator( - aligned_buffer_head, aligned_buffer_size); - return non_persistent_buffer_allocator; -} - -} // namespace - -namespace internal { - -// Returns a pointer to any buffer associated with the flatbuffer tensor. Can -// return nullptr if no buffer is found. -void* GetFlatbufferTensorBuffer( - const tflite::Tensor& flatbuffer_tensor, - const flatbuffers::Vector>* buffers) { - // We need to figure out where the actual contents of this tensor are stored - // in memory. We'll check to see if there's a serialized buffer (pretty much - // the same as a constant op in TensorFlow) associated with this tensor first, - // and if there is update the runtime structure to point to its location in - // memory. - // First see if there's any buffer information in the serialized tensor. - // TODO(b/170379532): Add better unit tests to validate flatbuffer values. - void* out_buffer = nullptr; - if (auto* buffer = (*buffers)[flatbuffer_tensor.buffer()]) { - // If we've found a buffer, does it have any data? - if (auto* array = buffer->data()) { - // If it has any data, is the data size larger than zero? - if (array->size()) { - // We've found a buffer with valid data, so update the runtime tensor - // data structure to point to it. - out_buffer = const_cast(static_cast(array->data())); - } - } - // TODO(petewarden): It's not clear in what circumstances we could have a - // buffer in the serialized tensor, but it doesn't have any data in it. Is - // that a validly-generated file, and if so what does it mean, or is it an - // error condition? It would be good to tighten up the specification to make - // it less ambiguous. - } - return out_buffer; -} - -TfLiteStatus InitializeTfLiteTensorFromFlatbuffer( - IPersistentBufferAllocator* persistent_buffer_allocator, - INonPersistentBufferAllocator* non_persistent_buffer_allocator, - bool allocate_temp, const tflite::Tensor& flatbuffer_tensor, - const flatbuffers::Vector>* buffers, - ErrorReporter* error_reporter, TfLiteTensor* result) { - TFLITE_DCHECK(result != nullptr); - - *result = {}; - // Make sure the serialized type is one we know how to deal with, and convert - // it from a flatbuffer enum into a constant used by the kernel C API. - TF_LITE_ENSURE_STATUS(ConvertTensorType(flatbuffer_tensor.type(), - &result->type, error_reporter)); - // Make sure we remember if the serialized tensor is designated as a variable. - result->is_variable = flatbuffer_tensor.is_variable(); - - result->data.data = GetFlatbufferTensorBuffer(flatbuffer_tensor, buffers); - - // TODO(petewarden): Some of these paths aren't getting enough testing - // coverage, so we should figure out some tests that exercise them. - if (result->data.data == nullptr) { - // The tensor contents haven't been set from a serialized buffer, so - // make a note that they will be allocated from memory. The actual - // allocation won't happen until later. - result->allocation_type = kTfLiteArenaRw; - } else { - // We set the data from a serialized buffer, so record tha. - result->allocation_type = kTfLiteMmapRo; - } - - // Figure out what the size in bytes of the buffer is and store it. - size_t type_size; - TF_LITE_ENSURE_STATUS(BytesRequiredForTensor( - flatbuffer_tensor, &result->bytes, &type_size, error_reporter)); - - if (flatbuffer_tensor.shape() == nullptr) { - // flatbuffer_tensor.shape() can return a nullptr in the case of a scalar - // tensor. - // TODO(b/188459715): figure out why const_cast is required here. - result->dims = const_cast(&kZeroLengthIntArray); - } else { - // TFLM doesn't allow reshaping the tensor which requires dynamic memory - // allocation so it is safe to drop the const qualifier. In the future, if - // we really want to update the tensor shape, we can always pass in a new - // TfLiteIntArray - especially we have to do so if the dimension is - result->dims = FlatBufferVectorToTfLiteTypeArray(flatbuffer_tensor.shape()); - } - - // Copy the quantization information from the serialized data. - const auto* src_quantization = flatbuffer_tensor.quantization(); - if (src_quantization && src_quantization->scale() && - (src_quantization->scale()->size() > 0) && - src_quantization->zero_point() && - (src_quantization->zero_point()->size() > 0)) { - // Always populate the TfLiteTensor.params field, even if there are - // per-channel quantization parameters. - result->params.scale = src_quantization->scale()->Get(0); - // Note that the zero_point field in the FlatBuffers schema is a 64-bit - // integer, but the zero_point field in the TfLiteQuantizationParams struct - // is a 32-bit integer. - result->params.zero_point = - static_cast(src_quantization->zero_point()->Get(0)); - - // Populate per-channel quantization params. - int channels = src_quantization->scale()->size(); - TfLiteAffineQuantization* quantization = - allocate_temp - ? reinterpret_cast( - non_persistent_buffer_allocator->AllocateTemp( - sizeof(TfLiteAffineQuantization), - alignof(TfLiteAffineQuantization))) - : reinterpret_cast( - persistent_buffer_allocator->AllocatePersistentBuffer( - sizeof(TfLiteAffineQuantization), - alignof(TfLiteAffineQuantization))); - if (quantization == nullptr) { - TF_LITE_REPORT_ERROR(error_reporter, - "Unable to allocate TfLiteAffineQuantization.\n"); - return kTfLiteError; - } - - // TODO(b/153688719): Reduce tail allocation by using a global zero-point - // buffer. This value can not be reused from the flatbuffer since the - // zero_point is stored as a int64_t. - quantization->zero_point = - allocate_temp - ? reinterpret_cast( - non_persistent_buffer_allocator->AllocateTemp( - TfLiteIntArrayGetSizeInBytes(channels), - alignof(TfLiteIntArray))) - : reinterpret_cast( - persistent_buffer_allocator->AllocatePersistentBuffer( - TfLiteIntArrayGetSizeInBytes(channels), - alignof(TfLiteIntArray))); - if (quantization->zero_point == nullptr) { - TF_LITE_REPORT_ERROR(error_reporter, - "Unable to allocate quantization->zero_point.\n"); - return kTfLiteError; - } - - quantization->scale = - FlatBufferVectorToTfLiteTypeArray(src_quantization->scale()); - - quantization->zero_point->size = channels; - int* zero_point_data = quantization->zero_point->data; - for (int i = 0; i < channels; i++) { - // As a space-saving optimization, zero point arrays for weights can be - // reduced to a single value, since all zero points for weights are 0. - zero_point_data[i] = src_quantization->zero_point()->size() == - src_quantization->scale()->size() - ? src_quantization->zero_point()->Get(i) - : src_quantization->zero_point()->Get(0); - } - // TODO(rocky): Need to add a micro_allocator test case that fails when - // this is not copied: - quantization->quantized_dimension = src_quantization->quantized_dimension(); - - result->quantization = {kTfLiteAffineQuantization, quantization}; - } - return kTfLiteOk; -} - -TfLiteStatus InitializeTfLiteEvalTensorFromFlatbuffer( - const tflite::Tensor& flatbuffer_tensor, - const flatbuffers::Vector>* buffers, - ErrorReporter* error_reporter, TfLiteEvalTensor* result) { - *result = {}; - // Make sure the serialized type is one we know how to deal with, and convert - // it from a flatbuffer enum into a constant used by the kernel C API. - TF_LITE_ENSURE_STATUS(ConvertTensorType(flatbuffer_tensor.type(), - &result->type, error_reporter)); - - result->data.data = GetFlatbufferTensorBuffer(flatbuffer_tensor, buffers); - - if (flatbuffer_tensor.shape() == nullptr) { - // flatbuffer_tensor.shape() can return a nullptr in the case of a scalar - // tensor. - result->dims = const_cast(&kZeroLengthIntArray); - } else { - result->dims = FlatBufferVectorToTfLiteTypeArray(flatbuffer_tensor.shape()); - } - return kTfLiteOk; -} - -} // namespace internal - -size_t MicroAllocator::GetDefaultTailUsage(bool is_memory_planner_given) { - // TODO(b/208703041): a template version of AlignSizeUp to make expression - // shorter. - size_t total_size = - AlignSizeUp(sizeof(SingleArenaBufferAllocator), - alignof(SingleArenaBufferAllocator)) + - AlignSizeUp(sizeof(MicroAllocator), alignof(MicroAllocator)) + - AlignSizeUp(sizeof(MicroBuiltinDataAllocator), - alignof(MicroBuiltinDataAllocator)) + - AlignSizeUp(sizeof(SubgraphAllocations), alignof(SubgraphAllocations)); - if (!is_memory_planner_given) { - total_size += - AlignSizeUp(sizeof(GreedyMemoryPlanner), alignof(GreedyMemoryPlanner)); - } - return total_size; -} - -MicroAllocator::MicroAllocator(SingleArenaBufferAllocator* memory_allocator, - MicroMemoryPlanner* memory_planner, - ErrorReporter* error_reporter) - : non_persistent_buffer_allocator_(memory_allocator), - persistent_buffer_allocator_(memory_allocator), - memory_planner_(memory_planner), - error_reporter_(error_reporter), - model_is_allocating_(false) {} - -MicroAllocator::MicroAllocator( - IPersistentBufferAllocator* persistent_buffer_allocator, - INonPersistentBufferAllocator* non_persistent_buffer_allocator, - MicroMemoryPlanner* memory_planner, ErrorReporter* error_reporter) - : non_persistent_buffer_allocator_(non_persistent_buffer_allocator), - persistent_buffer_allocator_(persistent_buffer_allocator), - memory_planner_(memory_planner), - error_reporter_(error_reporter), - model_is_allocating_(false) {} - -MicroAllocator::~MicroAllocator() {} - -MicroAllocator* MicroAllocator::Create(uint8_t* tensor_arena, size_t arena_size, - MicroMemoryPlanner* memory_planner, - ErrorReporter* error_reporter) { - uint8_t* aligned_arena = - AlignPointerUp(tensor_arena, MicroArenaBufferAlignment()); - size_t aligned_arena_size = tensor_arena + arena_size - aligned_arena; - SingleArenaBufferAllocator* memory_allocator = - SingleArenaBufferAllocator::Create(error_reporter, aligned_arena, - aligned_arena_size); - - return Create(memory_allocator, memory_planner, error_reporter); -} - -MicroAllocator* MicroAllocator::Create(uint8_t* tensor_arena, size_t arena_size, - ErrorReporter* error_reporter) { - uint8_t* aligned_arena = - AlignPointerUp(tensor_arena, MicroArenaBufferAlignment()); - size_t aligned_arena_size = tensor_arena + arena_size - aligned_arena; - SingleArenaBufferAllocator* memory_allocator = - SingleArenaBufferAllocator::Create(error_reporter, aligned_arena, - aligned_arena_size); - - // By default create GreedyMemoryPlanner. - // If a different MemoryPlanner is needed, use the other api. - uint8_t* memory_planner_buffer = memory_allocator->AllocatePersistentBuffer( - sizeof(GreedyMemoryPlanner), alignof(GreedyMemoryPlanner)); - GreedyMemoryPlanner* memory_planner = - new (memory_planner_buffer) GreedyMemoryPlanner(); - - return Create(memory_allocator, memory_planner, error_reporter); -} - -MicroAllocator* MicroAllocator::Create( - SingleArenaBufferAllocator* memory_allocator, - MicroMemoryPlanner* memory_planner, ErrorReporter* error_reporter) { - TFLITE_DCHECK(memory_allocator != nullptr); - TFLITE_DCHECK(error_reporter != nullptr); - TFLITE_DCHECK(memory_planner != nullptr); - - uint8_t* allocator_buffer = memory_allocator->AllocatePersistentBuffer( - sizeof(MicroAllocator), alignof(MicroAllocator)); - MicroAllocator* allocator = new (allocator_buffer) MicroAllocator( - memory_allocator, memory_allocator, memory_planner, error_reporter); - return allocator; -} - -MicroAllocator* MicroAllocator::Create(uint8_t* persistent_tensor_arena, - size_t persistent_arena_size, - uint8_t* non_persistent_tensor_arena, - size_t non_persistent_arena_size, - ErrorReporter* error_reporter) { - TFLITE_DCHECK(persistent_tensor_arena != nullptr); - TFLITE_DCHECK(non_persistent_tensor_arena != nullptr); - TFLITE_DCHECK(persistent_tensor_arena != non_persistent_tensor_arena); - TFLITE_DCHECK(error_reporter != nullptr); - - IPersistentBufferAllocator* persistent_buffer_allocator = - CreatePersistentArenaAllocator(persistent_tensor_arena, - persistent_arena_size); - INonPersistentBufferAllocator* non_persistent_buffer_allocator = - CreateNonPersistentArenaAllocator(non_persistent_tensor_arena, - non_persistent_arena_size, - persistent_buffer_allocator); - - uint8_t* memory_planner_buffer = - persistent_buffer_allocator->AllocatePersistentBuffer( - sizeof(GreedyMemoryPlanner), alignof(GreedyMemoryPlanner)); - GreedyMemoryPlanner* memory_planner = - new (memory_planner_buffer) GreedyMemoryPlanner(); - - uint8_t* micro_allocator_buffer = - persistent_buffer_allocator->AllocatePersistentBuffer( - sizeof(MicroAllocator), alignof(MicroAllocator)); - MicroAllocator* allocator = new (micro_allocator_buffer) MicroAllocator( - persistent_buffer_allocator, non_persistent_buffer_allocator, - memory_planner, error_reporter); - return allocator; -} - -SubgraphAllocations* MicroAllocator::StartModelAllocation(const Model* model) { - TFLITE_DCHECK(model != nullptr); - - if (model_is_allocating_) { - TF_LITE_REPORT_ERROR(error_reporter_, - "MicroAllocator: Model allocation started before " - "finishing previously allocated model"); - return nullptr; - } - - model_is_allocating_ = true; - - uint8_t* data_allocator_buffer = - persistent_buffer_allocator_->AllocatePersistentBuffer( - sizeof(MicroBuiltinDataAllocator), - alignof(MicroBuiltinDataAllocator)); - builtin_data_allocator_ = new (data_allocator_buffer) - MicroBuiltinDataAllocator(persistent_buffer_allocator_); - - if (InitScratchBufferData() != kTfLiteOk) { - return nullptr; - } - - // Allocate struct to store eval tensors, nodes and registrations. - SubgraphAllocations* output = reinterpret_cast( - persistent_buffer_allocator_->AllocatePersistentBuffer( - sizeof(SubgraphAllocations) * model->subgraphs()->size(), - alignof(SubgraphAllocations))); - if (output == nullptr) { - MicroPrintf("Failed to allocate memory for model metadata."); - return nullptr; - } - - if (AllocateTfLiteEvalTensors(model, output) != kTfLiteOk || - AllocateNodeAndRegistrations(model, output) != kTfLiteOk) { - return nullptr; - } - return output; -} - -TfLiteStatus MicroAllocator::FinishModelAllocation( - const Model* model, SubgraphAllocations* subgraph_allocations, - ScratchBufferHandle** scratch_buffer_handles) { - if (!model_is_allocating_) { - TF_LITE_REPORT_ERROR(error_reporter_, - "MicroAllocator: Model allocation finished before " - "starting allocating model"); - return kTfLiteError; - } - - // Allocate scratch buffer metadata. - TF_LITE_ENSURE_STATUS(AllocateScratchBufferHandles( - scratch_buffer_handles, scratch_buffer_request_count_)); - - // Allocate buffers for variable tensors. - for (size_t subgraph_idx = 0; subgraph_idx < model->subgraphs()->size(); - subgraph_idx++) { - const SubGraph* subgraph = model->subgraphs()->Get(subgraph_idx); - TFLITE_DCHECK(subgraph != nullptr); - TF_LITE_ENSURE_STATUS(AllocateVariables( - subgraph, subgraph_allocations[subgraph_idx].tensors)); - } - - // Plan all subgraphs and scratch buffers together. - TF_LITE_ENSURE_STATUS(CommitStaticMemoryPlan(model, subgraph_allocations, - *scratch_buffer_handles)); - model_is_allocating_ = false; - return kTfLiteOk; -} - -void* MicroAllocator::AllocatePersistentBuffer(size_t bytes) { - return persistent_buffer_allocator_->AllocatePersistentBuffer( - bytes, MicroArenaBufferAlignment()); -} - -TfLiteStatus MicroAllocator::RequestScratchBufferInArena(size_t bytes, - int subgraph_idx, - int* buffer_idx) { - // All scratch buffer requests are stored in the head section of the arena - // when a model is in the prepare phase. First align a scratch buffer request - // pointer to the start of the head: - internal::ScratchBufferRequest* requests = GetScratchBufferRequests(); - - // Count the number of requested scratch buffers for the current node: - size_t current_node_request_count = 0; - for (size_t i = 0; i < scratch_buffer_request_count_; ++i) { - if (requests[i].node_idx == kUnassignedScratchBufferRequestIndex) { - ++current_node_request_count; - } - } - - // First, ensure that the per-kernel request has not exceeded the limit: - if (current_node_request_count >= kMaxScratchBuffersPerOp) { - TF_LITE_REPORT_ERROR( - error_reporter_, - "Scratch buffer request exeeds limit per operator (%d)", - kMaxScratchBuffersPerOp); - return kTfLiteError; - } - - // Initialize and assign values for the request at the current index: - internal::ScratchBufferRequest* current_request = - &requests[scratch_buffer_request_count_]; - *current_request = {}; - // Assign -1 as a sentinel value that will be updated when the node finishes - // allocating: - current_request->bytes = bytes; - current_request->node_idx = kUnassignedScratchBufferRequestIndex; - current_request->subgraph_idx = subgraph_idx; - - // Assign the current request index to the out-param: - *buffer_idx = scratch_buffer_request_count_; - - // Bump the request count to prepare for the next request: - ++scratch_buffer_request_count_; - return kTfLiteOk; -} - -TfLiteStatus MicroAllocator::FinishPrepareNodeAllocations(int node_id) { - // When a node has finished preparing, all temp allocations performed by the - // kernel should be cleaned up: - TF_LITE_ENSURE_STATUS(ResetTempAllocations()); - - // Find and update any new scratch buffer requests for the current node: - internal::ScratchBufferRequest* requests = GetScratchBufferRequests(); - - for (size_t i = 0; i < scratch_buffer_request_count_; ++i) { - // A request with a node_idx of -1 is a sentinel value used to indicate this - // was a new request for the current node. The allocator finally knows the - // node index at this point. Assign the value and update the list of new - // requests so the head section can be adjusted to allow for the next kernel - // to allocate at most kMaxScratchBuffersPerOp requests: - if (requests[i].node_idx == kUnassignedScratchBufferRequestIndex) { - requests[i].node_idx = node_id; - } - } - - // Ensure that the head is re-adjusted to allow for another at-most - // kMaxScratchBuffersPerOp scratch buffer requests in the next operator: - TF_LITE_ENSURE_STATUS(non_persistent_buffer_allocator_->ResizeBuffer( - scratch_buffer_head_, - sizeof(internal::ScratchBufferRequest) * - (scratch_buffer_request_count_ + kMaxScratchBuffersPerOp), - alignof(internal::ScratchBufferRequest))); - - return kTfLiteOk; -} - -size_t MicroAllocator::used_bytes() const { - return non_persistent_buffer_allocator_->GetNonPersistentUsedBytes() + - persistent_buffer_allocator_->GetPersistentUsedBytes(); -} - -TfLiteStatus MicroAllocator::AllocateNodeAndRegistrations( - const Model* model, SubgraphAllocations* subgraph_allocations) { - TFLITE_DCHECK(subgraph_allocations != nullptr); - - for (size_t subgraph_idx = 0; subgraph_idx < model->subgraphs()->size(); - subgraph_idx++) { - const SubGraph* subgraph = model->subgraphs()->Get(subgraph_idx); - TFLITE_DCHECK(subgraph != nullptr); - - uint32_t operators_size = NumSubgraphOperators(subgraph); - - // Initialize NodeAndRegistrations for the subgraph. - NodeAndRegistration* output = reinterpret_cast( - persistent_buffer_allocator_->AllocatePersistentBuffer( - sizeof(NodeAndRegistration) * operators_size, - alignof(NodeAndRegistration))); - if (output == nullptr) { - TF_LITE_REPORT_ERROR( - error_reporter_, - "Failed to allocate memory for node_and_registrations."); - return kTfLiteError; - } - subgraph_allocations[subgraph_idx].node_and_registrations = output; - } - return kTfLiteOk; -} - -TfLiteTensor* MicroAllocator::AllocatePersistentTfLiteTensor( - const Model* model, const SubgraphAllocations* subgraph_allocations, - int tensor_index, int subgraph_index) { - const SubGraph* subgraph = model->subgraphs()->Get(subgraph_index); - TFLITE_DCHECK(subgraph != nullptr); - - // This value is allocated from persistent arena space. It is guaranteed to be - // around for the lifetime of the application. - TfLiteTensor* tensor = AllocatePersistentTfLiteTensorInternal(); - - // Populate any fields from the flatbuffer, since this TfLiteTensor struct is - // allocated in the persistent section of the arena, ensure that additional - // allocations also take place in that section of the arena. - if (PopulateTfLiteTensorFromFlatbuffer( - model, tensor, tensor_index, subgraph_index, - /*allocate_temp=*/false) != kTfLiteOk) { - TF_LITE_REPORT_ERROR(error_reporter_, - "Failed to populate a persistent TfLiteTensor struct " - "from flatbuffer data!"); - return nullptr; - } - - if (subgraph_allocations != nullptr) { - // Tensor buffers that are allocated at runtime (e.g. non-weight buffers) - // and not located in the flatbuffer are stored on the pre-allocated list of - // TfLiteEvalTensors structs. These structs are the source of truth, simply - // point the corresponding buffer to the new TfLiteTensor data value. - tensor->data.data = - subgraph_allocations[subgraph_index].tensors[tensor_index].data.data; - // TfLiteEvalTensor structs must also be the source of truth for the - // TfLiteTensor dims. - tensor->dims = - subgraph_allocations[subgraph_index].tensors[tensor_index].dims; - } - return tensor; -} - -void MicroAllocator::DeallocateTempTfLiteTensor(TfLiteTensor* tensor) { - TFLITE_DCHECK(tensor != nullptr); - - if (tensor->quantization.type == kTfLiteAffineQuantization) { - TFLITE_DCHECK(tensor->quantization.params != nullptr); - TfLiteAffineQuantization* quantization = - reinterpret_cast( - tensor->quantization.params); - - non_persistent_buffer_allocator_->DeallocateTemp( - reinterpret_cast(quantization->zero_point)); - non_persistent_buffer_allocator_->DeallocateTemp( - reinterpret_cast(quantization)); - } - - // Clear the data in case someone still access tensor arena by mistake - tensor->quantization.type = kTfLiteNoQuantization; - tensor->quantization.params = nullptr; - tensor->data.data = nullptr; - tensor->dims = nullptr; - non_persistent_buffer_allocator_->DeallocateTemp( - reinterpret_cast(tensor)); -} - -TfLiteTensor* MicroAllocator::AllocateTempTfLiteTensor( - const Model* model, const SubgraphAllocations* subgraph_allocations, - int tensor_index, int subgraph_index) { - const SubGraph* subgraph = model->subgraphs()->Get(subgraph_index); - TFLITE_DCHECK(subgraph != nullptr); - - // This value is allocated from temporary arena space. It is guaranteed to be - // around for at least the scope of the calling function. Since this struct - // allocation takes place in temp space, no need to own or cleanup. - TfLiteTensor* tensor = reinterpret_cast( - non_persistent_buffer_allocator_->AllocateTemp(sizeof(TfLiteTensor), - alignof(TfLiteTensor))); - - // Populate any fields from the flatbuffer, since this TfLiteTensor struct is - // allocated in the temp section of the arena, ensure that additional - // allocations also take place in that section of the arena. - if (PopulateTfLiteTensorFromFlatbuffer(model, tensor, tensor_index, - subgraph_index, - /*allocate_temp=*/true) != kTfLiteOk) { - TF_LITE_REPORT_ERROR( - error_reporter_, - "Failed to populate a temp TfLiteTensor struct from flatbuffer data!"); - return nullptr; - } - - if (subgraph_allocations != nullptr) { - // Tensor buffers that are allocated at runtime (e.g. non-weight buffers) - // and not located in the flatbuffer are stored on the pre-allocated list of - // TfLiteEvalTensors structs. These structs are the source of truth, simply - // point the corresponding buffer to the new TfLiteTensor data value. - tensor->data.data = - subgraph_allocations[subgraph_index].tensors[tensor_index].data.data; - // TfLiteEvalTensor structs must also be the source of truth for the - // TfLiteTensor dims. - tensor->dims = - subgraph_allocations[subgraph_index].tensors[tensor_index].dims; - } - return tensor; -} - -TfLiteStatus MicroAllocator::ResetTempAllocations() { - return non_persistent_buffer_allocator_->ResetTempAllocations(); -} - -bool MicroAllocator::IsAllTempDeallocated() { - return non_persistent_buffer_allocator_->IsAllTempDeallocated(); -} - -TfLiteStatus MicroAllocator::AllocateTfLiteEvalTensors( - const Model* model, SubgraphAllocations* subgraph_allocations) { - TFLITE_DCHECK(subgraph_allocations != nullptr); - - for (size_t subgraph_idx = 0; subgraph_idx < model->subgraphs()->size(); - subgraph_idx++) { - const SubGraph* subgraph = model->subgraphs()->Get(subgraph_idx); - TFLITE_DCHECK(subgraph != nullptr); - - size_t alloc_count = subgraph->tensors()->size(); - TfLiteEvalTensor* tensors = reinterpret_cast( - persistent_buffer_allocator_->AllocatePersistentBuffer( - sizeof(TfLiteEvalTensor) * alloc_count, alignof(TfLiteEvalTensor))); - if (tensors == nullptr) { - TF_LITE_REPORT_ERROR( - error_reporter_, - "Failed to allocate memory for context->eval_tensors, " - "%d bytes required", - sizeof(TfLiteEvalTensor) * alloc_count); - return kTfLiteError; - } - - for (size_t i = 0; i < alloc_count; ++i) { - TfLiteStatus status = internal::InitializeTfLiteEvalTensorFromFlatbuffer( - *subgraph->tensors()->Get(i), model->buffers(), error_reporter_, - &tensors[i]); - if (status != kTfLiteOk) { - TF_LITE_REPORT_ERROR(error_reporter_, "Failed to initialize tensor %d", - i); - return kTfLiteError; - } - } - subgraph_allocations[subgraph_idx].tensors = tensors; - } - return kTfLiteOk; -} - -TfLiteStatus MicroAllocator::AllocateVariables(const SubGraph* subgraph, - TfLiteEvalTensor* eval_tensors) { - for (size_t i = 0; i < subgraph->tensors()->size(); ++i) { - auto* tensor = subgraph->tensors()->Get(i); - if (tensor->is_variable()) { - size_t buffer_size; - TF_LITE_ENSURE_STATUS( - TfLiteEvalTensorByteLength(&eval_tensors[i], &buffer_size)); - - eval_tensors[i].data.data = - persistent_buffer_allocator_->AllocatePersistentBuffer( - buffer_size, MicroArenaBufferAlignment()); - - if (eval_tensors[i].data.data == nullptr) { - TF_LITE_REPORT_ERROR(error_reporter_, - "Failed to allocate variable tensor of size %d", - buffer_size); - return kTfLiteError; - } - } - } - return kTfLiteOk; -} - -TfLiteTensor* MicroAllocator::AllocatePersistentTfLiteTensorInternal() { - return reinterpret_cast( - persistent_buffer_allocator_->AllocatePersistentBuffer( - sizeof(TfLiteTensor), alignof(TfLiteTensor))); -} - -TfLiteStatus MicroAllocator::PopulateTfLiteTensorFromFlatbuffer( - const Model* model, TfLiteTensor* tensor, int tensor_index, - int subgraph_idx, bool allocate_temp) { - // TODO(b/162311891): This method serves as a stub to ensure quantized - // allocations in the tail can be recorded. Once the interpreter has APIs for - // accessing buffers on TfLiteEvalTensor this method can be dropped. - return internal::InitializeTfLiteTensorFromFlatbuffer( - persistent_buffer_allocator_, non_persistent_buffer_allocator_, - allocate_temp, - *model->subgraphs()->Get(subgraph_idx)->tensors()->Get(tensor_index), - model->buffers(), error_reporter_, tensor); -} - -ErrorReporter* MicroAllocator::error_reporter() const { - return error_reporter_; -} - -TfLiteStatus MicroAllocator::CommitStaticMemoryPlan( - const Model* model, SubgraphAllocations* allocations, - ScratchBufferHandle* scratch_buffer_handles) { - size_t head_usage = 0; - // Create static memory plan - // 1. Calculate AllocationInfo to know the lifetime of each tensor/buffer. - // 2. Add them into the planner (such as the GreedyMemoryPlanner). - // 3. Static memory planning using the planner. - // 4. Set tensor/buffer pointers based on the offsets from the previous step. - // - // Note that AllocationInfo is only needed for creating the plan. It will be - // allocated from the temp section and cleaned up at the bottom of this - // function. - - // Use the AllocationInfoBuilder class to help determine where buffers are - // used in the subgraph. - AllocationInfoBuilder builder(model, non_persistent_buffer_allocator_, - error_reporter_); - TF_LITE_ENSURE_STATUS( - builder.CreateAllocationInfo(scratch_buffer_request_count_)); - - const int32_t* offline_planner_offsets = nullptr; - TF_LITE_ENSURE_STATUS( - builder.GetOfflinePlannedOffsets(&offline_planner_offsets)); - TF_LITE_ENSURE_STATUS( - builder.InitializeAllocationInfo(offline_planner_offsets, allocations)); - - internal::ScratchBufferRequest* scratch_buffer_requests = - GetScratchBufferRequests(); - TF_LITE_ENSURE_STATUS(builder.MarkAllocationLifetimes( - 0, scratch_buffer_requests, scratch_buffer_handles, allocations)); - int allocation_info_count = builder.AllocationCount(); - AllocationInfo* allocation_info = builder.Finish(); - - // Remaining arena size that memory planner can use for calculating offsets. - size_t remaining_arena_size = - non_persistent_buffer_allocator_->GetAvailableMemory( - MicroArenaBufferAlignment()); - uint8_t* planner_arena = non_persistent_buffer_allocator_->AllocateTemp( - remaining_arena_size, MicroArenaBufferAlignment()); - TF_LITE_ENSURE(error_reporter_, planner_arena != nullptr); - memory_planner_->Init(planner_arena, remaining_arena_size); - TF_LITE_ENSURE_STATUS(CreatePlan(error_reporter_, memory_planner_, - allocation_info, allocation_info_count)); - - // Commit the plan. - TF_LITE_ENSURE_STATUS( - CommitPlan(error_reporter_, memory_planner_, - non_persistent_buffer_allocator_->GetOverlayMemoryAddress(), - allocation_info, allocation_info_count)); - - // Reset all temp allocations used above: - builder.FreeAllocationInfo(); - non_persistent_buffer_allocator_->DeallocateTemp(planner_arena); - TF_LITE_ENSURE_STATUS( - non_persistent_buffer_allocator_->ResetTempAllocations()); - TF_LITE_ENSURE_STATUS( - non_persistent_buffer_allocator_->DeallocateResizableBuffer( - scratch_buffer_head_)); - -#ifdef TF_LITE_SHOW_MEMORY_USE - memory_planner_->PrintMemoryPlan(); -#endif - head_usage = memory_planner_->GetMaximumMemorySize(); - - // The head is used to store memory plans for one model at a time during the - // model preparation stage, and is re-purposed to store scratch buffer handles - // during model invocation. The head must be as large as the greater of the - // largest model memory plan's size and the total space required for all - // scratch buffer handles. - if (max_head_buffer_usage_ < head_usage) { - max_head_buffer_usage_ = head_usage; - } - - // The head is used for storing scratch buffer allocations before finalizing a - // memory plan in this function. Ensure that the head is set to the largest - // memory plan sent through the allocator: - TF_LITE_ENSURE_STATUS( - non_persistent_buffer_allocator_->ReserveNonPersistentOverlayMemory( - max_head_buffer_usage_, MicroArenaBufferAlignment())); - return kTfLiteOk; -} - -TfLiteStatus MicroAllocator::AllocateScratchBufferHandles( - ScratchBufferHandle** scratch_buffer_handles, size_t handle_count) { - TFLITE_DCHECK(scratch_buffer_handles != nullptr); - - if (scratch_buffer_request_count_ == 0) { - // No scratch buffer requests were requested during model allocation. - return kTfLiteOk; - } - - // Allocate a consecutive block of memory store the scratch buffer handles. - // This alignment ensures quick lookup during inference time for the model: - *scratch_buffer_handles = reinterpret_cast( - persistent_buffer_allocator_->AllocatePersistentBuffer( - sizeof(ScratchBufferHandle) * handle_count, - alignof(ScratchBufferHandle))); - - return kTfLiteOk; -} - -TfLiteStatus MicroAllocator::InitScratchBufferData() { - // A model is preparing to allocate resources, ensure that scratch buffer - // request counter is cleared: - scratch_buffer_request_count_ = 0; - - // All requests will be stored in the head section. Each kernel is allowed at - // most kMaxScratchBuffersPerOp requests. Adjust the head to reserve at most - // that many requests to begin: - scratch_buffer_head_ = - non_persistent_buffer_allocator_->AllocateResizableBuffer( - sizeof(internal::ScratchBufferRequest) * kMaxScratchBuffersPerOp, - alignof(internal::ScratchBufferRequest)); - if (scratch_buffer_head_ == nullptr) { - return kTfLiteError; - } - - return kTfLiteOk; -} - -internal::ScratchBufferRequest* MicroAllocator::GetScratchBufferRequests() { - return reinterpret_cast(AlignPointerUp( - scratch_buffer_head_, alignof(internal::ScratchBufferRequest))); -} - -BuiltinDataAllocator* MicroAllocator::GetBuiltinDataAllocator() { - return builtin_data_allocator_; -} - -} // namespace tflite diff --git a/code/components/tflite-lib/tensorflow/lite/micro/micro_allocator.h b/code/components/tflite-lib/tensorflow/lite/micro/micro_allocator.h deleted file mode 100644 index f9bd8b75..00000000 --- a/code/components/tflite-lib/tensorflow/lite/micro/micro_allocator.h +++ /dev/null @@ -1,331 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_MICRO_MICRO_ALLOCATOR_H_ -#define TENSORFLOW_LITE_MICRO_MICRO_ALLOCATOR_H_ - -#include -#include - -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/core/api/error_reporter.h" -#include "tensorflow/lite/core/api/flatbuffer_conversions.h" -#include "tensorflow/lite/micro/arena_allocator/single_arena_buffer_allocator.h" -#include "tensorflow/lite/micro/compatibility.h" -#include "tensorflow/lite/micro/flatbuffer_utils.h" -#include "tensorflow/lite/micro/memory_planner/micro_memory_planner.h" -#include "tensorflow/lite/schema/schema_generated.h" - -namespace tflite { - -// TODO(b/199402574): rename to tflite_internal or just remove internal -// namespace. -namespace internal { - -// Sets up all of the data structure members for a TfLiteTensor based on the -// contents of a serialized tensor in the flatbuffer. -// TODO(b/162311891): Drop this method when the interpreter has an API for -// returning buffers on TfLiteEvalTensor. -TfLiteStatus InitializeTfLiteTensorFromFlatbuffer( - IPersistentBufferAllocator* persistent_buffer_allocator, - INonPersistentBufferAllocator* non_persistent_buffer_allocator, - bool allocate_temp, const tflite::Tensor& flatbuffer_tensor, - const flatbuffers::Vector>* buffers, - ErrorReporter* error_reporter, TfLiteTensor* result); - -// Holds placeholder information for a scratch buffer request from a kernel. -// This struct is only used during the model prepare stage. Each request from a -// kernel is stored in the head section. During the prepare stage, the head -// section will at least hold kMaxScratchBuffersPerOp number of requests plus -// any requests from previous kernel requests. -// -// When the memory plan is finalized, these structs are no longer used in favor -// of a sequential, array of ScratchBufferHandle allocations in the tail -// section. These allocations are indexed by the request API defined in the -// TfLiteContext struct. -struct ScratchBufferRequest { - // Number of bytes required by the buffer. The actual allocated size might be - // greater than `bytes` due to buffer alignment. - size_t bytes; - // Node where the buffer is allocated for. This provides useful information to - // determine the lifetime of the buffer. In AllocationInfo, this buffer will - // have `before` = node_idx and `after` = node_idx. - int node_idx; - int subgraph_idx; -}; - -} // namespace internal - -struct NodeAndRegistration { - TfLiteNode node; - const TfLiteRegistration* registration; -}; - -// Holds a pointer to a buffer for a scratch buffer requested by a kernel during -// the model prepare stage. This struct is allocated in-place and allows for -// quick pointer-indexed lookup for speed during model inference. -struct ScratchBufferHandle { - // Pointer to location of the scratch buffer: - uint8_t* data; -}; - -// Stores all per-subgraph allocations. This includes the node and registration -// array, and tensor list for each subgraph. -struct SubgraphAllocations { - NodeAndRegistration* node_and_registrations; - TfLiteEvalTensor* tensors; -}; - -// Allocator responsible for allocating memory for all intermediate tensors -// necessary to invoke a model. -// -// The lifetime of the model, tensor arena and error reporter must be at -// least as long as that of the allocator object, since the allocator needs -// them to be accessible during its entire lifetime. -// -// The MicroAllocator simply plans out additional allocations that are required -// to standup a model for inference in TF Micro. This class currently relies on -// an additional allocator - SingleArenaBufferAllocator - for all allocations -// from an arena. These allocations are divided into head (non-persistent) and -// tail (persistent) regions: -// -// Memory layout to help understand how it works -// This information could change in the future version. -// ************** .memory_allocator->GetBuffer() -// Tensors/Scratch buffers (head) -// ************** .head_watermark -// unused memory -// ************** .memory_allocator->GetBuffer() + ->GetMaxBufferSize() -// - ->GetDataSize() -// persistent area (tail) -// ************** .memory_allocator->GetBuffer() + ->GetMaxBufferSize() -class MicroAllocator { - public: - // Creates a MicroAllocator instance from a given tensor arena. This arena - // will be managed by the created instance. The GreedyMemoryPlanner will - // by default be used and created on the arena. - // Note: Please use alignas(16) to make sure tensor_arena is 16 - // bytes aligned, otherwise some head room will be wasted. - // TODO(b/157615197): Cleanup constructor + factory usage. - static MicroAllocator* Create(uint8_t* tensor_arena, size_t arena_size, - ErrorReporter* error_reporter); - - // Creates a MicroAllocator instance from a given tensor arena and a given - // MemoryPlanner. This arena will be managed by the created instance. Note: - // Please use alignas(16) to make sure tensor_arena is 16 bytes - // aligned, otherwise some head room will be wasted. - static MicroAllocator* Create(uint8_t* tensor_arena, size_t arena_size, - MicroMemoryPlanner* memory_planner, - ErrorReporter* error_reporter); - - // Creates a MicroAllocator instance using the provided - // SingleArenaBufferAllocator instance and the MemoryPlanner. This allocator - // instance will use the SingleArenaBufferAllocator instance to manage - // allocations internally. - static MicroAllocator* Create(SingleArenaBufferAllocator* memory_allocator, - MicroMemoryPlanner* memory_planner, - ErrorReporter* error_reporter); - - // Creates a MicroAllocator instance using the provided - // SingleArenaBufferAllocator instance and the MemoryPlanner. This allocator - // instance will use the SingleArenaBufferAllocator instance to manage - // allocations internally. - static MicroAllocator* Create(uint8_t* persistent_tensor_arena, - size_t persistent_arena_size, - uint8_t* non_persistent_tensor_arena, - size_t non_persistent_arena_size, - ErrorReporter* error_reporter); - - // Returns the fixed amount of memory overhead of MicroAllocator. - static size_t GetDefaultTailUsage(bool is_memory_planner_given); - - // Allocates internal resources required for model inference for each subgraph - // from the arena. - // - // This method will run through the flatbuffer data supplied in the model to - // properly allocate tensor, node, and op registration data. This method is - // expected to be followed with a call to FinishModelAllocation() Returns a - // pointer to an array of SubgraphAllocations (also stored in the tail of the - // arena) where each index corresponds to a different subgraph in the model. - // Return value is nullptr if the allocations failed. - SubgraphAllocations* StartModelAllocation(const Model* model); - - // Finish allocating internal resources required for model inference. - // - // -Plan the memory for activation tensors and scratch buffers. - // -Update eval tensors for each subgraph based on planned offsets. - // -Allocate scratch buffer handles array and update based on planned offsets. - // - // This method should be called after assigning model resources - // in StartModelAllocation(). The subgraph_allocations pointer should be the - // value passed into this class during StartModelAllocation(). Scratch buffer - // handles are stored in the out-param `scratch_buffer_handles` array which is - // allocated in this method. This value will be used in `GetScratchBuffer` - // call to retrieve scratch buffers. - TfLiteStatus FinishModelAllocation( - const Model* model, SubgraphAllocations* subgraph_allocations, - ScratchBufferHandle** scratch_buffer_handles); - - // Allocates a TfLiteTensor struct and populates the returned value with - // properties from the model flatbuffer. This struct is allocated from - // persistent arena memory is only guaranteed for the lifetime of the - // application. The eval_tensors pointer should be the value passed into this - // class during StartModelAllocation() and contains the source-of-truth for - // buffers. - virtual TfLiteTensor* AllocatePersistentTfLiteTensor( - const Model* model, const SubgraphAllocations* subgraph_allocations, - int tensor_index, int subgraph_index); - - // Allocates a TfLiteTensor struct and populates the returned value with - // properties from the model flatbuffer. This struct is allocated from - // temporary arena memory is only guaranteed until a call is made to - // ResetTempAllocations(). Subgraph_allocaitons contains the array of - // TfLiteEvalTensors. If the newly allocated temp at the specified subgraph - // and tensor index is already present int the TfLiteEvalTensor array, its - // data buffer will be re-used. - virtual TfLiteTensor* AllocateTempTfLiteTensor( - const Model* model, const SubgraphAllocations* subgraph_allocations, - int tensor_index, int subgraph_index); - - virtual void DeallocateTempTfLiteTensor(TfLiteTensor*); - - // Resets all temporary allocations. This method should be called after a - // chain of temp allocations (e.g. chain of TfLiteTensor objects via - // AllocateTfLiteTensor()). - virtual TfLiteStatus ResetTempAllocations(); - - // Returns true if all temporary buffers including temp TfLiteTensor are - // already deallocated. - virtual bool IsAllTempDeallocated(); - - // Allocates persistent buffer which has the same life time as the allocator. - // The memory is immediately available and is allocated from the tail of the - // arena. - virtual void* AllocatePersistentBuffer(size_t bytes); - - // Register a scratch buffer of size `bytes` for Node with `node_id`. - // This method only requests a buffer with a given size to be used after a - // model has finished allocation via FinishModelAllocation(). All requested - // buffers will be accessible by the out-param in that method. - TfLiteStatus RequestScratchBufferInArena(size_t bytes, int subgraph_idx, - int* buffer_idx); - - // Finish allocating a specific NodeAndRegistration prepare block (kernel - // entry for a model) with a given node ID. This call ensures that any scratch - // buffer requests and temporary allocations are handled and ready for the - // next node prepare block. - TfLiteStatus FinishPrepareNodeAllocations(int node_id); - - // Returns the arena usage in bytes, only available after - // `FinishModelAllocation`. Otherwise, it will return 0. - size_t used_bytes() const; - - BuiltinDataAllocator* GetBuiltinDataAllocator(); - - protected: - MicroAllocator(SingleArenaBufferAllocator* memory_allocator, - MicroMemoryPlanner* memory_planner, - ErrorReporter* error_reporter); - MicroAllocator(IPersistentBufferAllocator* persistent_buffer_allocator, - INonPersistentBufferAllocator* non_persistent_buffer_allocator, - MicroMemoryPlanner* memory_planner, - ErrorReporter* error_reporter); - virtual ~MicroAllocator(); - - // Allocates an array in the arena to hold pointers to the node and - // registration pointers required to represent the inference graph of the - // model. - virtual TfLiteStatus AllocateNodeAndRegistrations( - const Model* model, SubgraphAllocations* subgraph_allocations); - - // Allocates the list of persistent TfLiteEvalTensors that are used for the - // "eval" phase of model inference. These structs will be the source of truth - // for all tensor buffers. - virtual TfLiteStatus AllocateTfLiteEvalTensors( - const Model* model, SubgraphAllocations* subgraph_allocations); - // Allocates persistent tensor buffers for variable tensors in the subgraph. - virtual TfLiteStatus AllocateVariables(const SubGraph* subgraph, - TfLiteEvalTensor* eval_tensors); - - // Allocate and return a persistent TfLiteTensor. - // TODO(b/162311891): Drop this method when the interpreter has an API for - // accessing TfLiteEvalTensor structs. - virtual TfLiteTensor* AllocatePersistentTfLiteTensorInternal(); - - // Populates a TfLiteTensor struct with data from the model flatbuffer. Any - // quantization data is allocated from either the tail (persistent) or temp - // sections of the arena based on the allocation flag. - virtual TfLiteStatus PopulateTfLiteTensorFromFlatbuffer(const Model* model, - TfLiteTensor* tensor, - int tensor_index, - int subgraph_idx, - bool allocate_temp); - - ErrorReporter* error_reporter() const; - - private: - // Commits a memory plan for all non-persistent buffer allocations in the - // 'head' section of the memory arena. The eval_tensors pointer is the list of - // pre-allocated TfLiteEvalTensor structs that will point to the buffers that - // will be allocated into the head section in this function call. The - // scratch_buffer_handles pointer is the array of pre-allocated - // ScratchBufferHandle structs that will point to allocated buffers also in - // the head section. - virtual TfLiteStatus CommitStaticMemoryPlan( - const Model* model, SubgraphAllocations* allocations, - ScratchBufferHandle* scratch_buffer_handles); - - // Allocates an array of ScratchBufferHandle structs in the tail section for a - // given number of handles. - virtual TfLiteStatus AllocateScratchBufferHandles( - ScratchBufferHandle** scratch_buffer_handles, size_t handle_count); - - // Clears all internal scratch buffer request counts and resets the head to - // prepare for kernels to request scratch buffer data when a model is - // preparing. - TfLiteStatus InitScratchBufferData(); - - // Returns the pointer for the array of ScratchBufferRequest allocations in - // the head section. - internal::ScratchBufferRequest* GetScratchBufferRequests(); - - // A simple memory allocator that always allocate from the arena tail or head. - INonPersistentBufferAllocator* non_persistent_buffer_allocator_; - IPersistentBufferAllocator* persistent_buffer_allocator_; - - // Allocator used to allocate persistent builtin data. - BuiltinDataAllocator* builtin_data_allocator_; - - // Activation buffer memory planner. - MicroMemoryPlanner* memory_planner_; - - ErrorReporter* error_reporter_; - bool model_is_allocating_; - - // Holds the number of ScratchBufferRequest instances stored in the head - // section when a model is allocating. - size_t scratch_buffer_request_count_ = 0; - - // Holds ScratchBufferRequest when a model is allocating - uint8_t* scratch_buffer_head_ = nullptr; - - // Holds the byte length of the memory plan with the largest head usage. Used - // to ensure that multi-tenant allocations can share the head for buffers. - size_t max_head_buffer_usage_ = 0; - - TF_LITE_REMOVE_VIRTUAL_DELETE -}; - -} // namespace tflite -#endif // TENSORFLOW_LITE_MICRO_MICRO_ALLOCATOR_H_ diff --git a/code/components/tflite-lib/tensorflow/lite/micro/micro_interpreter.cc b/code/components/tflite-lib/tensorflow/lite/micro/micro_interpreter.cc deleted file mode 100644 index 50fe7911..00000000 --- a/code/components/tflite-lib/tensorflow/lite/micro/micro_interpreter.cc +++ /dev/null @@ -1,340 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#include "tensorflow/lite/micro/micro_interpreter.h" - -#include -#include -#include - -#include "flatbuffers/flatbuffers.h" // from @flatbuffers -#include "tensorflow/lite/c/c_api_types.h" -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/core/api/error_reporter.h" -#include "tensorflow/lite/core/api/tensor_utils.h" -#include "tensorflow/lite/micro/flatbuffer_utils.h" -#include "tensorflow/lite/micro/memory_helpers.h" -#include "tensorflow/lite/micro/micro_allocator.h" -#include "tensorflow/lite/micro/micro_error_reporter.h" -#include "tensorflow/lite/micro/micro_op_resolver.h" -#include "tensorflow/lite/micro/micro_profiler.h" -#include "tensorflow/lite/schema/schema_generated.h" -#include "tensorflow/lite/schema/schema_utils.h" - -namespace tflite { - -MicroInterpreter::MicroInterpreter(const Model* model, - const MicroOpResolver& op_resolver, - uint8_t* tensor_arena, - size_t tensor_arena_size, - ErrorReporter* error_reporter, - MicroResourceVariables* resource_variables, - MicroProfiler* profiler) - : model_(model), - op_resolver_(op_resolver), - error_reporter_(error_reporter), - allocator_(*MicroAllocator::Create(tensor_arena, tensor_arena_size, - error_reporter)), - - graph_(&context_, model, &allocator_, resource_variables), - tensors_allocated_(false), - initialization_status_(kTfLiteError), - input_tensors_(nullptr), - output_tensors_(nullptr), - micro_context_(&allocator_, model_, &graph_) { - Init(profiler); -} - -MicroInterpreter::MicroInterpreter(const Model* model, - const MicroOpResolver& op_resolver, - MicroAllocator* allocator, - ErrorReporter* error_reporter, - MicroResourceVariables* resource_variables, - MicroProfiler* profiler) - : model_(model), - op_resolver_(op_resolver), - error_reporter_(error_reporter), - allocator_(*allocator), - graph_(&context_, model, allocator, resource_variables), - tensors_allocated_(false), - initialization_status_(kTfLiteError), - input_tensors_(nullptr), - output_tensors_(nullptr), - micro_context_(&allocator_, model_, &graph_) { - Init(profiler); -} - -MicroInterpreter::~MicroInterpreter() { - if (graph_.GetAllocations() != nullptr) { - graph_.FreeSubgraphs(); - } -} - -void MicroInterpreter::Init(MicroProfiler* profiler) { - context_.impl_ = static_cast(µ_context_); - context_.ReportError = MicroContextReportOpError; - context_.GetTensor = MicroContextGetTensor; - context_.GetEvalTensor = MicroContextGetEvalTensor; - context_.profiler = profiler; - - initialization_status_ = kTfLiteOk; -} - -TfLiteStatus MicroInterpreter::PrepareNodeAndRegistrationDataFromFlatbuffer() { - for (int subgraph_idx = 0; subgraph_idx < graph_.NumSubgraphs(); - subgraph_idx++) { - const SubGraph* subgraph = model_->subgraphs()->Get(subgraph_idx); - TFLITE_DCHECK(subgraph != nullptr); - - auto* opcodes = model_->operator_codes(); - BuiltinDataAllocator* builtin_data_allocator = - allocator_.GetBuiltinDataAllocator(); - uint32_t operators_size = NumSubgraphOperators(subgraph); - for (size_t i = 0; i < operators_size; ++i) { - const auto* op = subgraph->operators()->Get(i); - const size_t index = op->opcode_index(); - if (index >= opcodes->size()) { - MicroPrintf("Missing registration for opcode_index %d\n", index); - return kTfLiteError; - } - const auto* opcode = opcodes->Get(index); - TfLiteStatus status = - GetRegistrationFromOpCode(opcode, op_resolver_, error_reporter_, - &(graph_.GetAllocations()[subgraph_idx] - .node_and_registrations[i] - .registration)); - if (status != kTfLiteOk) { - MicroPrintf("Failed to get registration from op code %s\n ", - EnumNameBuiltinOperator(GetBuiltinCode(opcode))); - return status; - } - const auto* registration = graph_.GetAllocations()[subgraph_idx] - .node_and_registrations[i] - .registration; - if (registration == nullptr) { - MicroPrintf("Skipping op for opcode_index %d\n", index); - return kTfLiteError; - } - BuiltinOperator op_type = - static_cast(registration->builtin_code); - - const char* custom_data = nullptr; - size_t custom_data_size = 0; - unsigned char* builtin_data = nullptr; - - if (op_type == BuiltinOperator_CUSTOM) { - // Custom Ops may or may not have a non-null custom_options field. - if (op->custom_options() != nullptr) { - custom_data = - reinterpret_cast(op->custom_options()->data()); - custom_data_size = op->custom_options()->size(); - } - } else { - if (op->custom_options() != nullptr) { - MicroPrintf( - "Unsupported behavior: found builtin operator %s with custom " - "options.\n", - EnumNameBuiltinOperator(op_type)); - return kTfLiteError; - } - - MicroOpResolver::BuiltinParseFunction parser = - op_resolver_.GetOpDataParser(op_type); - if (parser == nullptr) { - MicroPrintf("Did not find a parser for %s", - EnumNameBuiltinOperator(op_type)); - - return kTfLiteError; - } - TF_LITE_ENSURE_STATUS(parser(op, error_reporter_, - builtin_data_allocator, - (void**)(&builtin_data))); - } - - TfLiteIntArray* inputs_array = - FlatBufferVectorToTfLiteTypeArray(op->inputs()); - TfLiteIntArray* outputs_array = - FlatBufferVectorToTfLiteTypeArray(op->outputs()); - - TfLiteNode* node = &( - graph_.GetAllocations()[subgraph_idx].node_and_registrations[i].node); - *node = {}; - node->inputs = inputs_array; - node->outputs = outputs_array; - node->builtin_data = reinterpret_cast(builtin_data); - node->custom_initial_data = custom_data; - node->custom_initial_data_size = custom_data_size; - - if (op->intermediates() && (op->intermediates()->size() > 0)) { - node->intermediates = - FlatBufferVectorToTfLiteTypeArray(op->intermediates()); - } - } - } - return kTfLiteOk; -} - -TfLiteStatus MicroInterpreter::AllocateTensors() { - SubgraphAllocations* allocations = allocator_.StartModelAllocation(model_); - - if (allocations == nullptr) { - TF_LITE_REPORT_ERROR(error_reporter_, - "Failed starting model allocation.\n"); - initialization_status_ = kTfLiteError; - return kTfLiteError; - } - - graph_.SetSubgraphAllocations(allocations); - - TF_LITE_ENSURE_STATUS(PrepareNodeAndRegistrationDataFromFlatbuffer()); - - // Only allow AllocatePersistentBuffer in Init stage. - context_.AllocatePersistentBuffer = MicroContextAllocatePersistentBuffer; - context_.RequestScratchBufferInArena = nullptr; - context_.GetScratchBuffer = nullptr; - context_.GetExternalContext = nullptr; - TF_LITE_ENSURE_STATUS(graph_.InitSubgraphs()); - - // Both AllocatePersistentBuffer and RequestScratchBufferInArena is - // available in Prepare stage. - context_.RequestScratchBufferInArena = - MicroContextRequestScratchBufferInArena; - // external_context become available in Prepare stage. - context_.GetExternalContext = MicroContextGetExternalContext; - - TF_LITE_ENSURE_STATUS(graph_.PrepareSubgraphs()); - - // Prepare is done, we're ready for Invoke. Memory allocation is no longer - // allowed. Kernels can only fetch scratch buffers via GetScratchBuffer. - context_.AllocatePersistentBuffer = nullptr; - context_.RequestScratchBufferInArena = nullptr; - context_.GetScratchBuffer = MicroContextGetScratchBuffer; - - TF_LITE_ENSURE_OK(&context_, allocator_.FinishModelAllocation( - model_, graph_.GetAllocations(), - &scratch_buffer_handles_)); - - micro_context_.SetScratchBufferHandles(scratch_buffer_handles_); - - // TODO(b/162311891): Drop these allocations when the interpreter supports - // handling buffers from TfLiteEvalTensor. - input_tensors_ = - reinterpret_cast(allocator_.AllocatePersistentBuffer( - sizeof(TfLiteTensor*) * inputs_size())); - if (input_tensors_ == nullptr) { - TF_LITE_REPORT_ERROR( - error_reporter_, - "Failed to allocate memory for context->input_tensors_, " - "%d bytes required", - sizeof(TfLiteTensor*) * inputs_size()); - return kTfLiteError; - } - - for (size_t i = 0; i < inputs_size(); ++i) { - input_tensors_[i] = allocator_.AllocatePersistentTfLiteTensor( - model_, graph_.GetAllocations(), inputs().Get(i), 0); - if (input_tensors_[i] == nullptr) { - TF_LITE_REPORT_ERROR(error_reporter_, - "Failed to initialize input tensor %d", i); - return kTfLiteError; - } - } - - // TODO(b/162311891): Drop these allocations when the interpreter supports - // handling buffers from TfLiteEvalTensor. - output_tensors_ = - reinterpret_cast(allocator_.AllocatePersistentBuffer( - sizeof(TfLiteTensor*) * outputs_size())); - if (output_tensors_ == nullptr) { - TF_LITE_REPORT_ERROR( - error_reporter_, - "Failed to allocate memory for context->output_tensors_, " - "%d bytes required", - sizeof(TfLiteTensor*) * outputs_size()); - return kTfLiteError; - } - - for (size_t i = 0; i < outputs_size(); ++i) { - output_tensors_[i] = allocator_.AllocatePersistentTfLiteTensor( - model_, graph_.GetAllocations(), outputs().Get(i), 0); - if (output_tensors_[i] == nullptr) { - TF_LITE_REPORT_ERROR(error_reporter_, - "Failed to initialize output tensor %d", i); - return kTfLiteError; - } - } - - TF_LITE_ENSURE_STATUS(ResetVariableTensors()); - - tensors_allocated_ = true; - return kTfLiteOk; -} - -TfLiteStatus MicroInterpreter::Invoke() { - if (initialization_status_ != kTfLiteOk) { - TF_LITE_REPORT_ERROR(error_reporter_, - "Invoke() called after initialization failed\n"); - return kTfLiteError; - } - - // Ensure tensors are allocated before the interpreter is invoked to avoid - // difficult to debug segfaults. - if (!tensors_allocated_) { - TF_LITE_ENSURE_OK(&context_, AllocateTensors()); - } - return graph_.InvokeSubgraph(0); -} - -TfLiteTensor* MicroInterpreter::input(size_t index) { - const size_t length = inputs_size(); - if (index >= length) { - TF_LITE_REPORT_ERROR(error_reporter_, - "Input index %d out of range (length is %d)", index, - length); - return nullptr; - } - return input_tensors_[index]; -} - -TfLiteTensor* MicroInterpreter::output(size_t index) { - const size_t length = outputs_size(); - if (index >= length) { - TF_LITE_REPORT_ERROR(error_reporter_, - "Output index %d out of range (length is %d)", index, - length); - return nullptr; - } - return output_tensors_[index]; -} -// Repurposing free subgraphs to reset state for some ops for now -// will reset api is made. See b/220940833#comment25 for more context. -TfLiteStatus MicroInterpreter::Reset() { - TfLiteStatus status = graph_.FreeSubgraphs(); - if (status != kTfLiteOk) { - return status; - } - return graph_.ResetVariableTensors(); -} - -// TODO: remove this API completely in favor of MicroInterpreter::Reset -TfLiteStatus MicroInterpreter::ResetVariableTensors() { - return graph_.ResetVariableTensors(); -} - -TfLiteStatus MicroInterpreter::SetMicroExternalContext( - void* external_context_payload) { - return micro_context_.set_external_context(external_context_payload); -} - -} // namespace tflite diff --git a/code/components/tflite-lib/tensorflow/lite/micro/micro_interpreter.h b/code/components/tflite-lib/tensorflow/lite/micro/micro_interpreter.h deleted file mode 100644 index 285a890f..00000000 --- a/code/components/tflite-lib/tensorflow/lite/micro/micro_interpreter.h +++ /dev/null @@ -1,177 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_MICRO_MICRO_INTERPRETER_H_ -#define TENSORFLOW_LITE_MICRO_MICRO_INTERPRETER_H_ - -#include -#include - -#include "flatbuffers/flatbuffers.h" // from @flatbuffers -#include "tensorflow/lite/c/c_api_types.h" -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/core/api/error_reporter.h" -#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "tensorflow/lite/micro/micro_allocator.h" -#include "tensorflow/lite/micro/micro_context.h" -#include "tensorflow/lite/micro/micro_graph.h" -#include "tensorflow/lite/micro/micro_op_resolver.h" -#include "tensorflow/lite/micro/micro_profiler.h" -#include "tensorflow/lite/portable_type_to_tflitetype.h" -#include "tensorflow/lite/schema/schema_generated.h" - -/// Copied from tensorflow/lite/version.h to avoid a dependency chain into -// tensorflow/core. -#define TFLITE_SCHEMA_VERSION (3) - -namespace tflite { - -class MicroInterpreter { - public: - // The lifetime of the model, op resolver, tensor arena, error reporter, - // resource variables, and profiler must be at least as long as that of the - // interpreter object, since the interpreter may need to access them at any - // time. This means that you should usually create them with the same scope as - // each other, for example having them all allocated on the stack as local - // variables through a top-level function. The interpreter doesn't do any - // deallocation of any of the pointed-to objects, ownership remains with the - // caller. - MicroInterpreter(const Model* model, const MicroOpResolver& op_resolver, - uint8_t* tensor_arena, size_t tensor_arena_size, - ErrorReporter* error_reporter, - MicroResourceVariables* resource_variables = nullptr, - MicroProfiler* profiler = nullptr); - - // Create an interpreter instance using an existing MicroAllocator instance. - // This constructor should be used when creating an allocator that needs to - // have allocation handled in more than one interpreter or for recording - // allocations inside the interpreter. The lifetime of the allocator must be - // as long as that of the interpreter object. - MicroInterpreter(const Model* model, const MicroOpResolver& op_resolver, - MicroAllocator* allocator, ErrorReporter* error_reporter, - MicroResourceVariables* resource_variables = nullptr, - MicroProfiler* profiler = nullptr); - - ~MicroInterpreter(); - - // Runs through the model and allocates all necessary input, output and - // intermediate tensors. - TfLiteStatus AllocateTensors(); - - // In order to support partial graph runs for strided models, this can return - // values other than kTfLiteOk and kTfLiteError. - // TODO(b/149795762): Add this to the TfLiteStatus enum. - TfLiteStatus Invoke(); - - // This is the recommended API for an application to pass an external payload - // pointer as an external context to kernels. The life time of the payload - // pointer should be at least as long as this interpreter. TFLM supports only - // one external context. - TfLiteStatus SetMicroExternalContext(void* external_context_payload); - - TfLiteTensor* input(size_t index); - size_t inputs_size() const { - return model_->subgraphs()->Get(0)->inputs()->size(); - } - const flatbuffers::Vector& inputs() const { - return *model_->subgraphs()->Get(0)->inputs(); - } - TfLiteTensor* input_tensor(size_t index) { return input(index); } - template - T* typed_input_tensor(int tensor_index) { - if (TfLiteTensor* tensor_ptr = input_tensor(tensor_index)) { - if (tensor_ptr->type == typeToTfLiteType()) { - return GetTensorData(tensor_ptr); - } - } - return nullptr; - } - - TfLiteTensor* output(size_t index); - size_t outputs_size() const { - return model_->subgraphs()->Get(0)->outputs()->size(); - } - const flatbuffers::Vector& outputs() const { - return *model_->subgraphs()->Get(0)->outputs(); - } - TfLiteTensor* output_tensor(size_t index) { return output(index); } - template - T* typed_output_tensor(int tensor_index) { - if (TfLiteTensor* tensor_ptr = output_tensor(tensor_index)) { - if (tensor_ptr->type == typeToTfLiteType()) { - return GetTensorData(tensor_ptr); - } - } - return nullptr; - } - - // Reset the state to be what you would expect when the interpreter is first - // created. i.e. after Init and Prepare is called for the very first time. - TfLiteStatus Reset(); - - // TODO(b/244457206): remove this in favor of Reset() - // Reset all variable tensors to the default value. - TfLiteStatus ResetVariableTensors(); - - TfLiteStatus initialization_status() const { return initialization_status_; } - - // Populates node and registration pointers representing the inference graph - // of the model from values inside the flatbuffer (loaded from the TfLiteModel - // instance). Persistent data (e.g. operator data) is allocated from the - // arena. - TfLiteStatus PrepareNodeAndRegistrationDataFromFlatbuffer(); - - // For debugging only. - // Returns the actual used arena in bytes. This method gives the optimal arena - // size. It's only available after `AllocateTensors` has been called. - // Note that normally `tensor_arena` requires 16 bytes alignment to fully - // utilize the space. If it's not the case, the optimial arena size would be - // arena_used_bytes() + 16. - size_t arena_used_bytes() const { return allocator_.used_bytes(); } - - protected: - const MicroAllocator& allocator() const { return allocator_; } - const TfLiteContext& context() const { return context_; } - - private: - // TODO(b/158263161): Consider switching to Create() function to enable better - // error reporting during initialization. - void Init(MicroProfiler* profiler); - - // Gets the current subgraph index used from within context methods. - int get_subgraph_index() { return graph_.GetCurrentSubgraphIndex(); } - - const Model* model_; - const MicroOpResolver& op_resolver_; - ErrorReporter* error_reporter_; - TfLiteContext context_ = {}; - MicroAllocator& allocator_; - MicroGraph graph_; - bool tensors_allocated_; - - TfLiteStatus initialization_status_; - - ScratchBufferHandle* scratch_buffer_handles_ = nullptr; - - // TODO(b/162311891): Clean these pointers up when this class supports buffers - // from TfLiteEvalTensor. - TfLiteTensor** input_tensors_; - TfLiteTensor** output_tensors_; - - MicroContext micro_context_; -}; - -} // namespace tflite - -#endif // TENSORFLOW_LITE_MICRO_MICRO_INTERPRETER_H_ diff --git a/code/components/tflite-lib/tensorflow/lite/micro/micro_mutable_op_resolver.h b/code/components/tflite-lib/tensorflow/lite/micro/micro_mutable_op_resolver.h deleted file mode 100644 index 738cd4fb..00000000 --- a/code/components/tflite-lib/tensorflow/lite/micro/micro_mutable_op_resolver.h +++ /dev/null @@ -1,645 +0,0 @@ -/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_MICRO_MICRO_MUTABLE_OP_RESOLVER_H_ -#define TENSORFLOW_LITE_MICRO_MICRO_MUTABLE_OP_RESOLVER_H_ - -#include -#include - -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/core/api/error_reporter.h" -#include "tensorflow/lite/core/api/flatbuffer_conversions.h" -#include "tensorflow/lite/kernels/internal/compatibility.h" -#include "tensorflow/lite/kernels/op_macros.h" -#include "tensorflow/lite/micro/compatibility.h" -#include "tensorflow/lite/micro/kernels/add.h" -#include "tensorflow/lite/micro/kernels/conv.h" -#include "tensorflow/lite/micro/kernels/depthwise_conv.h" -#include "tensorflow/lite/micro/kernels/ethosu.h" -#include "tensorflow/lite/micro/kernels/fully_connected.h" -#include "tensorflow/lite/micro/kernels/micro_ops.h" -#include "tensorflow/lite/micro/kernels/pooling.h" -#include "tensorflow/lite/micro/kernels/reduce.h" -#include "tensorflow/lite/micro/kernels/softmax.h" -#include "tensorflow/lite/micro/micro_error_reporter.h" -#include "tensorflow/lite/micro/micro_op_resolver.h" -#include "tensorflow/lite/schema/schema_generated.h" - -namespace tflite { -TfLiteRegistration* Register_DETECTION_POSTPROCESS(); - -template -class MicroMutableOpResolver : public MicroOpResolver { - public: - TF_LITE_REMOVE_VIRTUAL_DELETE - - explicit MicroMutableOpResolver(ErrorReporter* error_reporter = nullptr) - : error_reporter_(error_reporter) {} - - const TfLiteRegistration* FindOp(tflite::BuiltinOperator op) const override { - if (op == BuiltinOperator_CUSTOM) return nullptr; - - for (unsigned int i = 0; i < registrations_len_; ++i) { - const TfLiteRegistration& registration = registrations_[i]; - if (registration.builtin_code == op) { - return ®istration; - } - } - return nullptr; - } - - const TfLiteRegistration* FindOp(const char* op) const override { - for (unsigned int i = 0; i < registrations_len_; ++i) { - const TfLiteRegistration& registration = registrations_[i]; - if ((registration.builtin_code == BuiltinOperator_CUSTOM) && - (strcmp(registration.custom_name, op) == 0)) { - return ®istration; - } - } - return nullptr; - } - - MicroOpResolver::BuiltinParseFunction GetOpDataParser( - BuiltinOperator op) const override { - TFLITE_DCHECK(num_buitin_ops_ <= tOpCount); - for (unsigned int i = 0; i < num_buitin_ops_; ++i) { - if (builtin_codes_[i] == op) return builtin_parsers_[i]; - } - return nullptr; - } - - // Registers a Custom Operator with the MicroOpResolver. - // - // Only the first call for a given name will be successful. i.e. if this - // function is called again for a previously added Custom Operator, the - // MicroOpResolver will be unchanged and this function will return - // kTfLiteError. - TfLiteStatus AddCustom(const char* name, TfLiteRegistration* registration) { - if (registrations_len_ >= tOpCount) { - MicroPrintf( - "Couldn't register custom op '%s', resolver size is too" - "small (%d)", - name, tOpCount); - return kTfLiteError; - } - - if (FindOp(name) != nullptr) { - MicroPrintf("Calling AddCustom for the same op more than once "); - MicroPrintf("is not supported (Op: %s).", name); - return kTfLiteError; - } - - TfLiteRegistration* new_registration = ®istrations_[registrations_len_]; - registrations_len_ += 1; - - *new_registration = *registration; - new_registration->builtin_code = BuiltinOperator_CUSTOM; - new_registration->custom_name = name; - return kTfLiteOk; - } - - // The Add* functions below add the various Builtin operators to the - // MicroMutableOpResolver object. - - TfLiteStatus AddAbs() { - return AddBuiltin(BuiltinOperator_ABS, tflite::ops::micro::Register_ABS(), - ParseAbs); - } - - TfLiteStatus AddAdd(const TfLiteRegistration& registration = Register_ADD()) { - return AddBuiltin(BuiltinOperator_ADD, registration, ParseAdd); - } - - TfLiteStatus AddAddN() { - return AddBuiltin(BuiltinOperator_ADD_N, tflite::Register_ADD_N(), - ParseAddN); - } - - TfLiteStatus AddArgMax() { - return AddBuiltin(BuiltinOperator_ARG_MAX, - tflite::ops::micro::Register_ARG_MAX(), ParseArgMax); - } - - TfLiteStatus AddArgMin() { - return AddBuiltin(BuiltinOperator_ARG_MIN, - tflite::ops::micro::Register_ARG_MIN(), ParseArgMin); - } - - TfLiteStatus AddAssignVariable() { - return AddBuiltin(BuiltinOperator_ASSIGN_VARIABLE, - tflite::Register_ASSIGN_VARIABLE(), ParseAssignVariable); - } - - TfLiteStatus AddAveragePool2D( - const TfLiteRegistration& registration = Register_AVERAGE_POOL_2D()) { - return AddBuiltin(BuiltinOperator_AVERAGE_POOL_2D, registration, ParsePool); - } - - TfLiteStatus AddBatchToSpaceNd() { - return AddBuiltin(BuiltinOperator_BATCH_TO_SPACE_ND, - Register_BATCH_TO_SPACE_ND(), ParseBatchToSpaceNd); - } - - TfLiteStatus AddBroadcastArgs() { - return AddBuiltin(BuiltinOperator_BROADCAST_ARGS, Register_BROADCAST_ARGS(), - ParseBroadcastArgs); - } - - TfLiteStatus AddBroadcastTo() { - return AddBuiltin(BuiltinOperator_BROADCAST_TO, Register_BROADCAST_TO(), - ParseBroadcastTo); - } - - TfLiteStatus AddCallOnce() { - return AddBuiltin(BuiltinOperator_CALL_ONCE, Register_CALL_ONCE(), - ParseCallOnce); - } - - TfLiteStatus AddCast() { - return AddBuiltin(BuiltinOperator_CAST, Register_CAST(), ParseCast); - } - - TfLiteStatus AddCeil() { - return AddBuiltin(BuiltinOperator_CEIL, tflite::ops::micro::Register_CEIL(), - ParseCeil); - } - - TfLiteStatus AddCircularBuffer() { - return AddCustom("CIRCULAR_BUFFER", tflite::Register_CIRCULAR_BUFFER()); - } - - TfLiteStatus AddConcatenation() { - return AddBuiltin(BuiltinOperator_CONCATENATION, - tflite::ops::micro::Register_CONCATENATION(), - ParseConcatenation); - } - - TfLiteStatus AddConv2D( - const TfLiteRegistration& registration = Register_CONV_2D()) { - return AddBuiltin(BuiltinOperator_CONV_2D, registration, ParseConv2D); - } - - TfLiteStatus AddCos() { - return AddBuiltin(BuiltinOperator_COS, tflite::ops::micro::Register_COS(), - ParseCos); - } - - TfLiteStatus AddCumSum() { - return AddBuiltin(BuiltinOperator_CUMSUM, tflite::Register_CUMSUM(), - ParseCumsum); - } - - TfLiteStatus AddDepthToSpace() { - return AddBuiltin(BuiltinOperator_DEPTH_TO_SPACE, - tflite::Register_DEPTH_TO_SPACE(), ParseDepthToSpace); - } - - TfLiteStatus AddDepthwiseConv2D( - const TfLiteRegistration& registration = Register_DEPTHWISE_CONV_2D()) { - return AddBuiltin(BuiltinOperator_DEPTHWISE_CONV_2D, registration, - ParseDepthwiseConv2D); - } - - TfLiteStatus AddDequantize() { - return AddBuiltin(BuiltinOperator_DEQUANTIZE, tflite::Register_DEQUANTIZE(), - ParseDequantize); - } - - TfLiteStatus AddDetectionPostprocess() { - return AddCustom("TFLite_Detection_PostProcess", - tflite::Register_DETECTION_POSTPROCESS()); - } - - TfLiteStatus AddDiv() { - return AddBuiltin(BuiltinOperator_DIV, tflite::Register_DIV(), ParseDiv); - } - - TfLiteStatus AddElu() { - return AddBuiltin(BuiltinOperator_ELU, tflite::Register_ELU(), ParseElu); - } - - TfLiteStatus AddEqual() { - return AddBuiltin(BuiltinOperator_EQUAL, - tflite::ops::micro::Register_EQUAL(), ParseEqual); - } - - TfLiteStatus AddEthosU() { - TfLiteRegistration* registration = tflite::Register_ETHOSU(); - if (registration) { - return AddCustom(tflite::GetString_ETHOSU(), registration); - } - return kTfLiteOk; - } - - TfLiteStatus AddExp() { - return AddBuiltin(BuiltinOperator_EXP, Register_EXP(), ParseExp); - } - - TfLiteStatus AddExpandDims() { - return AddBuiltin(BuiltinOperator_EXPAND_DIMS, Register_EXPAND_DIMS(), - ParseExpandDims); - } - - TfLiteStatus AddFill() { - return AddBuiltin(BuiltinOperator_FILL, tflite::Register_FILL(), ParseFill); - } - - TfLiteStatus AddFloor() { - return AddBuiltin(BuiltinOperator_FLOOR, - tflite::ops::micro::Register_FLOOR(), ParseFloor); - } - - TfLiteStatus AddFloorDiv() { - return AddBuiltin(BuiltinOperator_FLOOR_DIV, tflite::Register_FLOOR_DIV(), - ParseFloorDiv); - } - - TfLiteStatus AddFloorMod() { - return AddBuiltin(BuiltinOperator_FLOOR_MOD, tflite::Register_FLOOR_MOD(), - ParseFloorMod); - } - - TfLiteStatus AddFullyConnected( - const TfLiteRegistration& registration = Register_FULLY_CONNECTED()) { - return AddBuiltin(BuiltinOperator_FULLY_CONNECTED, registration, - ParseFullyConnected); - } - - TfLiteStatus AddGather() { - return AddBuiltin(BuiltinOperator_GATHER, tflite::Register_GATHER(), - ParseGather); - } - - TfLiteStatus AddGatherNd() { - return AddBuiltin(BuiltinOperator_GATHER_ND, tflite::Register_GATHER_ND(), - ParseGatherNd); - } - - TfLiteStatus AddGreater() { - return AddBuiltin(BuiltinOperator_GREATER, - tflite::ops::micro::Register_GREATER(), ParseGreater); - } - - TfLiteStatus AddGreaterEqual() { - return AddBuiltin(BuiltinOperator_GREATER_EQUAL, - tflite::ops::micro::Register_GREATER_EQUAL(), - ParseGreaterEqual); - } - - TfLiteStatus AddHardSwish() { - return AddBuiltin(BuiltinOperator_HARD_SWISH, tflite::Register_HARD_SWISH(), - ParseHardSwish); - } - - TfLiteStatus AddIf() { - return AddBuiltin(BuiltinOperator_IF, tflite::Register_IF(), ParseIf); - } - - TfLiteStatus AddL2Normalization() { - return AddBuiltin(BuiltinOperator_L2_NORMALIZATION, - tflite::ops::micro::Register_L2_NORMALIZATION(), - ParseL2Normalization); - } - - TfLiteStatus AddL2Pool2D() { - return AddBuiltin(BuiltinOperator_L2_POOL_2D, tflite::Register_L2_POOL_2D(), - ParsePool); - } - - TfLiteStatus AddLeakyRelu() { - return AddBuiltin(BuiltinOperator_LEAKY_RELU, tflite::Register_LEAKY_RELU(), - ParseLeakyRelu); - } - - TfLiteStatus AddLess() { - return AddBuiltin(BuiltinOperator_LESS, tflite::ops::micro::Register_LESS(), - ParseLess); - } - - TfLiteStatus AddLessEqual() { - return AddBuiltin(BuiltinOperator_LESS_EQUAL, - tflite::ops::micro::Register_LESS_EQUAL(), - ParseLessEqual); - } - - TfLiteStatus AddLog() { - return AddBuiltin(BuiltinOperator_LOG, tflite::ops::micro::Register_LOG(), - ParseLog); - } - - TfLiteStatus AddLogicalAnd() { - return AddBuiltin(BuiltinOperator_LOGICAL_AND, - tflite::Register_LOGICAL_AND(), ParseLogicalAnd); - } - - TfLiteStatus AddLogicalNot() { - return AddBuiltin(BuiltinOperator_LOGICAL_NOT, - tflite::ops::micro::Register_LOGICAL_NOT(), - ParseLogicalNot); - } - - TfLiteStatus AddLogicalOr() { - return AddBuiltin(BuiltinOperator_LOGICAL_OR, tflite::Register_LOGICAL_OR(), - ParseLogicalOr); - } - - TfLiteStatus AddLogistic() { - return AddBuiltin(BuiltinOperator_LOGISTIC, tflite::Register_LOGISTIC(), - ParseLogistic); - } - - TfLiteStatus AddMaximum() { - return AddBuiltin(BuiltinOperator_MAXIMUM, - tflite::ops::micro::Register_MAXIMUM(), ParseMaximum); - } - - TfLiteStatus AddMaxPool2D( - const TfLiteRegistration& registration = Register_MAX_POOL_2D()) { - return AddBuiltin(BuiltinOperator_MAX_POOL_2D, registration, ParsePool); - } - - TfLiteStatus AddMirrorPad() { - return AddBuiltin(BuiltinOperator_MIRROR_PAD, tflite::Register_MIRROR_PAD(), - ParseMirrorPad); - } - - TfLiteStatus AddMean() { - return AddBuiltin(BuiltinOperator_MEAN, Register_MEAN(), ParseReducer); - } - - TfLiteStatus AddMinimum() { - return AddBuiltin(BuiltinOperator_MINIMUM, - tflite::ops::micro::Register_MINIMUM(), ParseMinimum); - } - - TfLiteStatus AddMul(const TfLiteRegistration& registration = Register_MUL()) { - return AddBuiltin(BuiltinOperator_MUL, registration, ParseMul); - } - - TfLiteStatus AddNeg() { - return AddBuiltin(BuiltinOperator_NEG, tflite::ops::micro::Register_NEG(), - ParseNeg); - } - - TfLiteStatus AddNotEqual() { - return AddBuiltin(BuiltinOperator_NOT_EQUAL, - tflite::ops::micro::Register_NOT_EQUAL(), ParseNotEqual); - } - - TfLiteStatus AddPack() { - return AddBuiltin(BuiltinOperator_PACK, tflite::ops::micro::Register_PACK(), - ParsePack); - } - - TfLiteStatus AddPad() { - return AddBuiltin(BuiltinOperator_PAD, tflite::ops::micro::Register_PAD(), - ParsePad); - } - - TfLiteStatus AddPadV2() { - return AddBuiltin(BuiltinOperator_PADV2, - tflite::ops::micro::Register_PADV2(), ParsePadV2); - } - - TfLiteStatus AddPrelu() { - return AddBuiltin(BuiltinOperator_PRELU, tflite::Register_PRELU(), - ParsePrelu); - } - - TfLiteStatus AddQuantize() { - return AddBuiltin(BuiltinOperator_QUANTIZE, Register_QUANTIZE(), - ParseQuantize); - } - - TfLiteStatus AddReadVariable() { - return AddBuiltin(BuiltinOperator_READ_VARIABLE, - tflite::Register_READ_VARIABLE(), ParseReadVariable); - } - - TfLiteStatus AddReduceMax() { - return AddBuiltin(BuiltinOperator_REDUCE_MAX, Register_REDUCE_MAX(), - ParseReducer); - } - - TfLiteStatus AddRelu() { - return AddBuiltin(BuiltinOperator_RELU, tflite::Register_RELU(), ParseRelu); - } - - TfLiteStatus AddRelu6() { - return AddBuiltin(BuiltinOperator_RELU6, tflite::Register_RELU6(), - ParseRelu6); - } - - TfLiteStatus AddReshape() { - return AddBuiltin(BuiltinOperator_RESHAPE, - tflite::ops::micro::Register_RESHAPE(), ParseReshape); - } - - TfLiteStatus AddResizeBilinear() { - return AddBuiltin(BuiltinOperator_RESIZE_BILINEAR, - Register_RESIZE_BILINEAR(), ParseResizeBilinear); - } - - TfLiteStatus AddResizeNearestNeighbor() { - return AddBuiltin(BuiltinOperator_RESIZE_NEAREST_NEIGHBOR, - tflite::ops::micro::Register_RESIZE_NEAREST_NEIGHBOR(), - ParseResizeNearestNeighbor); - } - - TfLiteStatus AddRound() { - return AddBuiltin(BuiltinOperator_ROUND, - tflite::ops::micro::Register_ROUND(), ParseRound); - } - - TfLiteStatus AddRsqrt() { - return AddBuiltin(BuiltinOperator_RSQRT, - tflite::ops::micro::Register_RSQRT(), ParseRsqrt); - } - - TfLiteStatus AddSelectV2() { - return AddBuiltin(BuiltinOperator_SELECT_V2, Register_SELECT_V2(), - ParseSelectV2); - } - - TfLiteStatus AddShape() { - return AddBuiltin(BuiltinOperator_SHAPE, Register_SHAPE(), ParseShape); - } - - TfLiteStatus AddSin() { - return AddBuiltin(BuiltinOperator_SIN, tflite::ops::micro::Register_SIN(), - ParseSin); - } - - TfLiteStatus AddSlice() { - return AddBuiltin(BuiltinOperator_SLICE, Register_SLICE(), ParseSlice); - } - - TfLiteStatus AddSoftmax( - const TfLiteRegistration& registration = Register_SOFTMAX()) { - return AddBuiltin(BuiltinOperator_SOFTMAX, registration, ParseSoftmax); - } - - TfLiteStatus AddSpaceToBatchNd() { - return AddBuiltin(BuiltinOperator_SPACE_TO_BATCH_ND, - Register_SPACE_TO_BATCH_ND(), ParseSpaceToBatchNd); - } - - TfLiteStatus AddSpaceToDepth() { - return AddBuiltin(BuiltinOperator_SPACE_TO_DEPTH, Register_SPACE_TO_DEPTH(), - ParseSpaceToDepth); - } - - TfLiteStatus AddSplit() { - return AddBuiltin(BuiltinOperator_SPLIT, - tflite::ops::micro::Register_SPLIT(), ParseSplit); - } - - TfLiteStatus AddSplitV() { - return AddBuiltin(BuiltinOperator_SPLIT_V, - tflite::ops::micro::Register_SPLIT_V(), ParseSplitV); - } - - TfLiteStatus AddSqueeze() { - return AddBuiltin(BuiltinOperator_SQUEEZE, Register_SQUEEZE(), - ParseSqueeze); - } - - TfLiteStatus AddSqrt() { - return AddBuiltin(BuiltinOperator_SQRT, tflite::ops::micro::Register_SQRT(), - ParseSqrt); - } - - TfLiteStatus AddSquare() { - return AddBuiltin(BuiltinOperator_SQUARE, - tflite::ops::micro::Register_SQUARE(), ParseSquare); - } - - TfLiteStatus AddSquaredDifference() { - return AddBuiltin(BuiltinOperator_SQUARED_DIFFERENCE, - tflite::Register_SQUARED_DIFFERENCE(), - ParseSquaredDifference); - } - - TfLiteStatus AddStridedSlice() { - return AddBuiltin(BuiltinOperator_STRIDED_SLICE, - tflite::ops::micro::Register_STRIDED_SLICE(), - ParseStridedSlice); - } - - TfLiteStatus AddSub() { - return AddBuiltin(BuiltinOperator_SUB, tflite::Register_SUB(), ParseSub); - } - - TfLiteStatus AddSum() { - return AddBuiltin(BuiltinOperator_SUM, Register_SUM(), ParseReducer); - } - - TfLiteStatus AddSvdf( - const TfLiteRegistration& registration = Register_SVDF()) { - return AddBuiltin(BuiltinOperator_SVDF, registration, ParseSvdf); - } - - TfLiteStatus AddTanh() { - return AddBuiltin(BuiltinOperator_TANH, tflite::ops::micro::Register_TANH(), - ParseTanh); - } - - TfLiteStatus AddTransposeConv() { - return AddBuiltin(BuiltinOperator_TRANSPOSE_CONV, - tflite::Register_TRANSPOSE_CONV(), ParseTransposeConv); - } - - TfLiteStatus AddTranspose() { - return AddBuiltin(BuiltinOperator_TRANSPOSE, Register_TRANSPOSE(), - ParseTranspose); - } - - TfLiteStatus AddUnpack() { - return AddBuiltin(BuiltinOperator_UNPACK, - tflite::ops::micro::Register_UNPACK(), ParseUnpack); - } - - TfLiteStatus AddUnidirectionalSequenceLSTM() { - return AddBuiltin(BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_LSTM, - Register_UNIDIRECTIONAL_SEQUENCE_LSTM(), - ParseUnidirectionalSequenceLSTM); - } - - TfLiteStatus AddVarHandle() { - return AddBuiltin(BuiltinOperator_VAR_HANDLE, Register_VAR_HANDLE(), - ParseVarHandle); - } - - TfLiteStatus AddWhile() { - return AddBuiltin(BuiltinOperator_WHILE, Register_WHILE(), ParseWhile); - } - - TfLiteStatus AddZerosLike() { - return AddBuiltin(BuiltinOperator_ZEROS_LIKE, Register_ZEROS_LIKE(), - ParseZerosLike); - } - - unsigned int GetRegistrationLength() { return registrations_len_; } - - private: - TfLiteStatus AddBuiltin(tflite::BuiltinOperator op, - const TfLiteRegistration& registration, - MicroOpResolver::BuiltinParseFunction parser) { - if (op == BuiltinOperator_CUSTOM) { - MicroPrintf("Invalid parameter BuiltinOperator_CUSTOM to the "); - MicroPrintf("AddBuiltin function."); - return kTfLiteError; - } - - if (FindOp(op) != nullptr) { - MicroPrintf("Calling AddBuiltin with the same op more than "); - MicroPrintf("once is not supported (Op: #%d).", op); - return kTfLiteError; - } - - if (registrations_len_ >= tOpCount) { - MicroPrintf("Couldn't register builtin op #%d, resolver size ", op); - MicroPrintf("is too small (%d).", tOpCount); - return kTfLiteError; - } - - registrations_[registrations_len_] = registration; - // Strictly speaking, the builtin_code is not necessary for TFLM but filling - // it in regardless. - registrations_[registrations_len_].builtin_code = op; - registrations_len_++; - - builtin_codes_[num_buitin_ops_] = op; - builtin_parsers_[num_buitin_ops_] = parser; - num_buitin_ops_++; - - return kTfLiteOk; - } - - TfLiteRegistration registrations_[tOpCount]; - unsigned int registrations_len_ = 0; - - // Arrays (and counter) to store the builtin codes and their corresponding - // parse functions as these are registered with the Op Resolver. - BuiltinOperator builtin_codes_[tOpCount]; - MicroOpResolver::BuiltinParseFunction builtin_parsers_[tOpCount]; - unsigned int num_buitin_ops_ = 0; - - ErrorReporter* error_reporter_; -}; - -}; // namespace tflite - -#endif // TENSORFLOW_LITE_MICRO_MICRO_MUTABLE_OP_RESOLVER_H_ diff --git a/code/components/tflite-lib/tensorflow/lite/micro/micro_profiler.cc b/code/components/tflite-lib/tensorflow/lite/micro/micro_profiler.cc deleted file mode 100644 index 86b1ebbb..00000000 --- a/code/components/tflite-lib/tensorflow/lite/micro/micro_profiler.cc +++ /dev/null @@ -1,115 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#include "tensorflow/lite/micro/micro_profiler.h" - -#include -#include -#include - -#include "tensorflow/lite/kernels/internal/compatibility.h" -#include "tensorflow/lite/micro/micro_error_reporter.h" -#include "tensorflow/lite/micro/micro_time.h" - -namespace tflite { - -uint32_t MicroProfiler::BeginEvent(const char* tag) { - if (num_events_ == kMaxEvents) { - num_events_ = 0; - } - - tags_[num_events_] = tag; - start_ticks_[num_events_] = GetCurrentTimeTicks(); - end_ticks_[num_events_] = start_ticks_[num_events_] - 1; - return num_events_++; -} - -void MicroProfiler::EndEvent(uint32_t event_handle) { - TFLITE_DCHECK(event_handle < kMaxEvents); - end_ticks_[event_handle] = GetCurrentTimeTicks(); -} - -uint32_t MicroProfiler::GetTotalTicks() const { - int32_t ticks = 0; - for (int i = 0; i < num_events_; ++i) { - ticks += end_ticks_[i] - start_ticks_[i]; - } - return ticks; -} - -void MicroProfiler::Log() const { -#if !defined(TF_LITE_STRIP_ERROR_STRINGS) - for (int i = 0; i < num_events_; ++i) { - uint32_t ticks = end_ticks_[i] - start_ticks_[i]; - MicroPrintf("%s took %" PRIu32 " ticks (%d ms).", tags_[i], ticks, - TicksToMs(ticks)); - } -#endif -} - -void MicroProfiler::LogCsv() const { -#if !defined(TF_LITE_STRIP_ERROR_STRINGS) - MicroPrintf("\"Event\",\"Tag\",\"Ticks\""); - for (int i = 0; i < num_events_; ++i) { - uint32_t ticks = end_ticks_[i] - start_ticks_[i]; - MicroPrintf("%d,%s,%" PRIu32, i, tags_[i], ticks); - } -#endif -} - -void MicroProfiler::LogTicksPerTagCsv() { -#if !defined(TF_LITE_STRIP_ERROR_STRINGS) - MicroPrintf( - "\"Unique Tag\",\"Total ticks across all events with that tag.\""); - int total_ticks = 0; - for (int i = 0; i < num_events_; ++i) { - uint32_t ticks = end_ticks_[i] - start_ticks_[i]; - TFLITE_DCHECK(tags_[i] != nullptr); - int position = FindExistingOrNextPosition(tags_[i]); - TFLITE_DCHECK(position >= 0); - total_ticks_per_tag[position].tag = tags_[i]; - total_ticks_per_tag[position].ticks = - total_ticks_per_tag[position].ticks + ticks; - total_ticks += ticks; - } - - for (int i = 0; i < num_events_; ++i) { - TicksPerTag each_tag_entry = total_ticks_per_tag[i]; - if (each_tag_entry.tag == nullptr) { - break; - } - MicroPrintf("%s, %d", each_tag_entry.tag, each_tag_entry.ticks); - } - MicroPrintf("total number of ticks, %d", total_ticks); -#endif -} - -// This method finds a particular array element in the total_ticks_per_tag array -// with the matching tag_name passed in the method. If it can find a -// matching array element that has the same tag_name, then it will return the -// position of the matching element. But if it unable to find a matching element -// with the given tag_name, it will return the next available empty position -// from the array. -int MicroProfiler::FindExistingOrNextPosition(const char* tag_name) { - int pos = 0; - for (; pos < num_events_; pos++) { - TicksPerTag each_tag_entry = total_ticks_per_tag[pos]; - if (each_tag_entry.tag == nullptr || - strcmp(each_tag_entry.tag, tag_name) == 0) { - return pos; - } - } - return pos < num_events_ ? pos : -1; -} -} // namespace tflite diff --git a/code/components/tflite-lib/tensorflow/lite/micro/micro_profiler.h b/code/components/tflite-lib/tensorflow/lite/micro/micro_profiler.h deleted file mode 100644 index c37978a0..00000000 --- a/code/components/tflite-lib/tensorflow/lite/micro/micro_profiler.h +++ /dev/null @@ -1,139 +0,0 @@ -/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#ifndef TENSORFLOW_LITE_MICRO_MICRO_PROFILER_H_ -#define TENSORFLOW_LITE_MICRO_MICRO_PROFILER_H_ - -#include - -#include "tensorflow/lite/micro/compatibility.h" - -namespace tflite { - -// MicroProfiler creates a common way to gain fine-grained insight into runtime -// performance. Bottleck operators can be identified along with slow code -// sections. This can be used in conjunction with running the relevant micro -// benchmark to evaluate end-to-end performance. -class MicroProfiler { - public: - MicroProfiler() = default; - virtual ~MicroProfiler() = default; - - // Marks the start of a new event and returns an event handle that can be used - // to mark the end of the event via EndEvent. The lifetime of the tag - // parameter must exceed that of the MicroProfiler. - virtual uint32_t BeginEvent(const char* tag); - - // Marks the end of an event associated with event_handle. It is the - // responsibility of the caller to ensure than EndEvent is called once and - // only once per event_handle. - // - // If EndEvent is called more than once for the same event_handle, the last - // call will be used as the end of event marker.If EndEvent is called 0 times - // for a particular event_handle, the duration of that event will be 0 ticks. - virtual void EndEvent(uint32_t event_handle); - - // Clears all the events that have been currently profiled. - void ClearEvents() { num_events_ = 0; } - - // Returns the sum of the ticks taken across all the events. This number - // is only meaningful if all of the events are disjoint (the end time of - // event[i] <= start time of event[i+1]). - uint32_t GetTotalTicks() const; - - // Prints the profiling information of each of the events in human readable - // form. - void Log() const; - - // Prints the profiling information of each of the events in CSV (Comma - // Separated Value) form. - void LogCsv() const; - - // Prints total ticks for each unique tag in CSV format. - // Output will have one row for each unique tag along with the - // total ticks summed across all events with that particular tag. - void LogTicksPerTagCsv(); - - private: - // Maximum number of events that this class can keep track of. If we call - // AddEvent more than kMaxEvents number of times, then the oldest event's - // profiling information will be overwritten. - static constexpr int kMaxEvents = 1024; - - const char* tags_[kMaxEvents]; - uint32_t start_ticks_[kMaxEvents]; - uint32_t end_ticks_[kMaxEvents]; - int num_events_ = 0; - - struct TicksPerTag { - const char* tag; - uint32_t ticks; - }; - // In practice, the number of tags will be much lower than the number of - // events. But it is theoretically possible that each event to be unique and - // hence we allow total_ticks_per_tag to have kMaxEvents entries. - TicksPerTag total_ticks_per_tag[kMaxEvents] = {}; - - int FindExistingOrNextPosition(const char* tag_name); - - TF_LITE_REMOVE_VIRTUAL_DELETE; -}; - -#if defined(TF_LITE_STRIP_ERROR_STRINGS) -// For release builds, the ScopedMicroProfiler is a noop. -// -// This is done because the ScipedProfiler is used as part of the -// MicroInterpreter and we want to ensure zero overhead for the release builds. -class ScopedMicroProfiler { - public: - explicit ScopedMicroProfiler(const char* tag, MicroProfiler* profiler) {} -}; - -#else - -// This class can be used to add events to a MicroProfiler object that span the -// lifetime of the ScopedMicroProfiler object. -// Usage example: -// -// MicroProfiler profiler(); -// ... -// { -// ScopedMicroProfiler scoped_profiler("custom_tag", profiler); -// work_to_profile(); -// } -class ScopedMicroProfiler { - public: - explicit ScopedMicroProfiler(const char* tag, MicroProfiler* profiler) - : profiler_(profiler) { - if (profiler_ != nullptr) { - event_handle_ = profiler_->BeginEvent(tag); - } - } - - ~ScopedMicroProfiler() { - if (profiler_ != nullptr) { - profiler_->EndEvent(event_handle_); - } - } - - private: - uint32_t event_handle_ = 0; - MicroProfiler* profiler_ = nullptr; -}; -#endif // !defined(TF_LITE_STRIP_ERROR_STRINGS) - -} // namespace tflite - -#endif // TENSORFLOW_LITE_MICRO_MICRO_PROFILER_H_ diff --git a/code/components/tflite-lib/tensorflow/lite/micro/recording_micro_allocator.cc b/code/components/tflite-lib/tensorflow/lite/micro/recording_micro_allocator.cc deleted file mode 100644 index cdfcc272..00000000 --- a/code/components/tflite-lib/tensorflow/lite/micro/recording_micro_allocator.cc +++ /dev/null @@ -1,270 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/micro/recording_micro_allocator.h" - -#include "tensorflow/lite/core/api/error_reporter.h" -#include "tensorflow/lite/kernels/internal/compatibility.h" -#include "tensorflow/lite/micro/arena_allocator/recording_single_arena_buffer_allocator.h" -#include "tensorflow/lite/micro/compatibility.h" -#include "tensorflow/lite/micro/memory_helpers.h" -#include "tensorflow/lite/micro/memory_planner/greedy_memory_planner.h" -#include "tensorflow/lite/micro/micro_allocator.h" -#include "tensorflow/lite/micro/micro_error_reporter.h" - -namespace tflite { - -size_t RecordingMicroAllocator::GetDefaultTailUsage() { - // RecordingMicroAllocator inherits from MicroAllocator and its tail usage is - // similar with MicroAllocator with SingleArenaBufferAllocator and - // MicroAllocator being replaced. - // TODO(b/208703041): a template version of AlignSizeUp to make expression - // shorter. - return MicroAllocator::GetDefaultTailUsage( - /*is_memory_planner_given=*/false) + - AlignSizeUp(sizeof(RecordingSingleArenaBufferAllocator), - alignof(RecordingSingleArenaBufferAllocator)) - - AlignSizeUp(sizeof(SingleArenaBufferAllocator), - alignof(SingleArenaBufferAllocator)) + - AlignSizeUp(sizeof(RecordingMicroAllocator), - alignof(RecordingMicroAllocator)) - - AlignSizeUp(sizeof(MicroAllocator), alignof(MicroAllocator)); -} - -RecordingMicroAllocator::RecordingMicroAllocator( - RecordingSingleArenaBufferAllocator* recording_memory_allocator, - MicroMemoryPlanner* memory_planner, ErrorReporter* error_reporter) - : MicroAllocator(recording_memory_allocator, memory_planner, - error_reporter), - recording_memory_allocator_(recording_memory_allocator) {} - -RecordingMicroAllocator* RecordingMicroAllocator::Create( - uint8_t* tensor_arena, size_t arena_size, ErrorReporter* error_reporter) { - TFLITE_DCHECK(error_reporter != nullptr); - - RecordingSingleArenaBufferAllocator* simple_memory_allocator = - RecordingSingleArenaBufferAllocator::Create(error_reporter, tensor_arena, - arena_size); - TFLITE_DCHECK(simple_memory_allocator != nullptr); - - uint8_t* memory_planner_buffer = - simple_memory_allocator->AllocatePersistentBuffer( - sizeof(GreedyMemoryPlanner), alignof(GreedyMemoryPlanner)); - GreedyMemoryPlanner* memory_planner = - new (memory_planner_buffer) GreedyMemoryPlanner(); - - uint8_t* allocator_buffer = simple_memory_allocator->AllocatePersistentBuffer( - sizeof(RecordingMicroAllocator), alignof(RecordingMicroAllocator)); - RecordingMicroAllocator* allocator = - new (allocator_buffer) RecordingMicroAllocator( - simple_memory_allocator, memory_planner, error_reporter); - return allocator; -} - -RecordedAllocation RecordingMicroAllocator::GetRecordedAllocation( - RecordedAllocationType allocation_type) const { - switch (allocation_type) { - case RecordedAllocationType::kTfLiteEvalTensorData: - return recorded_tflite_eval_tensor_data_; - case RecordedAllocationType::kPersistentTfLiteTensorData: - return recorded_persistent_tflite_tensor_data_; - case RecordedAllocationType::kPersistentTfLiteTensorQuantizationData: - return recorded_persistent_tflite_tensor_quantization_data_; - case RecordedAllocationType::kPersistentBufferData: - return recorded_persistent_buffer_data_; - case RecordedAllocationType::kTfLiteTensorVariableBufferData: - return recorded_tflite_tensor_variable_buffer_data_; - case RecordedAllocationType::kNodeAndRegistrationArray: - return recorded_node_and_registration_array_data_; - case RecordedAllocationType::kOpData: - return recorded_op_data_; - } - TF_LITE_REPORT_ERROR(error_reporter(), "Invalid allocation type supplied: %d", - allocation_type); - return RecordedAllocation(); -} - -const RecordingSingleArenaBufferAllocator* -RecordingMicroAllocator::GetSimpleMemoryAllocator() const { - return recording_memory_allocator_; -} - -void RecordingMicroAllocator::PrintAllocations() const { - TF_LITE_REPORT_ERROR( - error_reporter(), - "[RecordingMicroAllocator] Arena allocation total %d bytes", - recording_memory_allocator_->GetUsedBytes()); - TF_LITE_REPORT_ERROR( - error_reporter(), - "[RecordingMicroAllocator] Arena allocation head %d bytes", - recording_memory_allocator_->GetNonPersistentUsedBytes()); - TF_LITE_REPORT_ERROR( - error_reporter(), - "[RecordingMicroAllocator] Arena allocation tail %d bytes", - recording_memory_allocator_->GetPersistentUsedBytes()); - PrintRecordedAllocation(RecordedAllocationType::kTfLiteEvalTensorData, - "TfLiteEvalTensor data", "allocations"); - PrintRecordedAllocation(RecordedAllocationType::kPersistentTfLiteTensorData, - "Persistent TfLiteTensor data", "tensors"); - PrintRecordedAllocation( - RecordedAllocationType::kPersistentTfLiteTensorQuantizationData, - "Persistent TfLiteTensor quantization data", "allocations"); - PrintRecordedAllocation(RecordedAllocationType::kPersistentBufferData, - "Persistent buffer data", "allocations"); - PrintRecordedAllocation( - RecordedAllocationType::kTfLiteTensorVariableBufferData, - "TfLiteTensor variable buffer data", "allocations"); - PrintRecordedAllocation(RecordedAllocationType::kNodeAndRegistrationArray, - "NodeAndRegistration struct", - "NodeAndRegistration structs"); - PrintRecordedAllocation(RecordedAllocationType::kOpData, - "Operator runtime data", "OpData structs"); -} - -void* RecordingMicroAllocator::AllocatePersistentBuffer(size_t bytes) { - RecordedAllocation allocations = SnapshotAllocationUsage(); - void* buffer = MicroAllocator::AllocatePersistentBuffer(bytes); - RecordAllocationUsage(allocations, recorded_persistent_buffer_data_); - - return buffer; -} - -void RecordingMicroAllocator::PrintRecordedAllocation( - RecordedAllocationType allocation_type, const char* allocation_name, - const char* allocation_description) const { -#ifndef TF_LITE_STRIP_ERROR_STRINGS - RecordedAllocation allocation = GetRecordedAllocation(allocation_type); - if (allocation.used_bytes > 0 || allocation.requested_bytes > 0) { - TF_LITE_REPORT_ERROR( - error_reporter(), - "[RecordingMicroAllocator] '%s' used %d bytes with alignment overhead " - "(requested %d bytes for %d %s)", - allocation_name, allocation.used_bytes, allocation.requested_bytes, - allocation.count, allocation_description); - } -#endif -} - -TfLiteStatus RecordingMicroAllocator::AllocateNodeAndRegistrations( - const Model* model, SubgraphAllocations* subgraph_allocations) { - RecordedAllocation allocations = SnapshotAllocationUsage(); - - TfLiteStatus status = - MicroAllocator::AllocateNodeAndRegistrations(model, subgraph_allocations); - - RecordAllocationUsage(allocations, - recorded_node_and_registration_array_data_); - - for (size_t subgraph_idx = 0; subgraph_idx < model->subgraphs()->size(); - subgraph_idx++) { - // The allocation count in SingleArenaBufferAllocator will only be 1. To - // provide better logging, decrement by 1 and add in the actual number of - // operators used in the graph: The allocation for this recording will - // always be 1. This is because the parent class mallocs one large - // allocation for the number of nodes in the graph (e.g. - // sizeof(NodeAndRegistration) * num_nodes). To prevent extra overhead and - // potential for fragmentation, manually adjust the accounting by - // decrementing by 1 and adding the actual number of nodes used in the - // graph: - if (model->subgraphs()->Get(subgraph_idx)->operators()) { - recorded_node_and_registration_array_data_.count += - model->subgraphs()->Get(subgraph_idx)->operators()->size() - 1; - } else { - recorded_node_and_registration_array_data_.count -= 1; - } - } - return status; -} - -TfLiteStatus RecordingMicroAllocator::AllocateTfLiteEvalTensors( - const Model* model, SubgraphAllocations* subgraph_allocations) { - RecordedAllocation allocations = SnapshotAllocationUsage(); - - TfLiteStatus status = - MicroAllocator::AllocateTfLiteEvalTensors(model, subgraph_allocations); - - RecordAllocationUsage(allocations, recorded_tflite_eval_tensor_data_); - - for (size_t subgraph_idx = 0; subgraph_idx < model->subgraphs()->size(); - subgraph_idx++) { - // The allocation for this recording will always be 1. This is because the - // parent class mallocs one large allocation for the number of tensors in - // the graph (e.g. sizeof(TfLiteEvalTensor) * num_tensors). To prevent extra - // overhead and potential for fragmentation, manually adjust the accounting - // by decrementing by 1 and adding the actual number of tensors used in the - // graph: - recorded_tflite_eval_tensor_data_.count += - model->subgraphs()->Get(subgraph_idx)->tensors()->size() - 1; - } - return status; -} - -TfLiteStatus RecordingMicroAllocator::AllocateVariables( - const SubGraph* subgraph, TfLiteEvalTensor* eval_tensors) { - RecordedAllocation allocations = SnapshotAllocationUsage(); - - TfLiteStatus status = - MicroAllocator::AllocateVariables(subgraph, eval_tensors); - - RecordAllocationUsage(allocations, - recorded_tflite_tensor_variable_buffer_data_); - return status; -} - -TfLiteTensor* -RecordingMicroAllocator::AllocatePersistentTfLiteTensorInternal() { - RecordedAllocation allocations = SnapshotAllocationUsage(); - - TfLiteTensor* result = - MicroAllocator::AllocatePersistentTfLiteTensorInternal(); - - RecordAllocationUsage(allocations, recorded_persistent_tflite_tensor_data_); - return result; -} - -TfLiteStatus RecordingMicroAllocator::PopulateTfLiteTensorFromFlatbuffer( - const Model* model, TfLiteTensor* tensor, int tensor_index, - int subgraph_index, bool allocate_temp) { - RecordedAllocation allocations = SnapshotAllocationUsage(); - - TfLiteStatus status = MicroAllocator::PopulateTfLiteTensorFromFlatbuffer( - model, tensor, tensor_index, subgraph_index, allocate_temp); - - RecordAllocationUsage(allocations, - recorded_persistent_tflite_tensor_quantization_data_); - return status; -} - -RecordedAllocation RecordingMicroAllocator::SnapshotAllocationUsage() const { - return {/*requested_bytes=*/recording_memory_allocator_->GetRequestedBytes(), - /*used_bytes=*/recording_memory_allocator_->GetUsedBytes(), - /*count=*/recording_memory_allocator_->GetAllocatedCount()}; -} - -void RecordingMicroAllocator::RecordAllocationUsage( - const RecordedAllocation& snapshotted_allocation, - RecordedAllocation& recorded_allocation) { - recorded_allocation.requested_bytes += - recording_memory_allocator_->GetRequestedBytes() - - snapshotted_allocation.requested_bytes; - recorded_allocation.used_bytes += - recording_memory_allocator_->GetUsedBytes() - - snapshotted_allocation.used_bytes; - recorded_allocation.count += - recording_memory_allocator_->GetAllocatedCount() - - snapshotted_allocation.count; -} - -} // namespace tflite diff --git a/code/components/tflite-lib/tensorflow/lite/schema/schema_generated.h b/code/components/tflite-lib/tensorflow/lite/schema/schema_generated.h deleted file mode 100644 index b3a6831a..00000000 --- a/code/components/tflite-lib/tensorflow/lite/schema/schema_generated.h +++ /dev/null @@ -1,20136 +0,0 @@ -// automatically generated by the FlatBuffers compiler, do not modify - - -#ifndef FLATBUFFERS_GENERATED_SCHEMA_TFLITE_H_ -#define FLATBUFFERS_GENERATED_SCHEMA_TFLITE_H_ - -#include "flatbuffers/flatbuffers.h" - -// Ensure the included flatbuffers.h is the same version as when this file was -// generated, otherwise it may not be compatible. -static_assert(FLATBUFFERS_VERSION_MAJOR == 2 && - FLATBUFFERS_VERSION_MINOR == 0 && - FLATBUFFERS_VERSION_REVISION == 6, - "Non-compatible flatbuffers version included"); - -namespace tflite { - -struct CustomQuantization; -struct CustomQuantizationBuilder; -struct CustomQuantizationT; - -struct QuantizationParameters; -struct QuantizationParametersBuilder; -struct QuantizationParametersT; - -struct Int32Vector; -struct Int32VectorBuilder; -struct Int32VectorT; - -struct Uint16Vector; -struct Uint16VectorBuilder; -struct Uint16VectorT; - -struct Uint8Vector; -struct Uint8VectorBuilder; -struct Uint8VectorT; - -struct DimensionMetadata; -struct DimensionMetadataBuilder; -struct DimensionMetadataT; - -struct SparsityParameters; -struct SparsityParametersBuilder; -struct SparsityParametersT; - -struct VariantSubType; -struct VariantSubTypeBuilder; -struct VariantSubTypeT; - -struct Tensor; -struct TensorBuilder; -struct TensorT; - -struct Conv2DOptions; -struct Conv2DOptionsBuilder; -struct Conv2DOptionsT; - -struct Conv3DOptions; -struct Conv3DOptionsBuilder; -struct Conv3DOptionsT; - -struct Pool2DOptions; -struct Pool2DOptionsBuilder; -struct Pool2DOptionsT; - -struct DepthwiseConv2DOptions; -struct DepthwiseConv2DOptionsBuilder; -struct DepthwiseConv2DOptionsT; - -struct ConcatEmbeddingsOptions; -struct ConcatEmbeddingsOptionsBuilder; -struct ConcatEmbeddingsOptionsT; - -struct LSHProjectionOptions; -struct LSHProjectionOptionsBuilder; -struct LSHProjectionOptionsT; - -struct SVDFOptions; -struct SVDFOptionsBuilder; -struct SVDFOptionsT; - -struct RNNOptions; -struct RNNOptionsBuilder; -struct RNNOptionsT; - -struct SequenceRNNOptions; -struct SequenceRNNOptionsBuilder; -struct SequenceRNNOptionsT; - -struct BidirectionalSequenceRNNOptions; -struct BidirectionalSequenceRNNOptionsBuilder; -struct BidirectionalSequenceRNNOptionsT; - -struct FullyConnectedOptions; -struct FullyConnectedOptionsBuilder; -struct FullyConnectedOptionsT; - -struct SoftmaxOptions; -struct SoftmaxOptionsBuilder; -struct SoftmaxOptionsT; - -struct ConcatenationOptions; -struct ConcatenationOptionsBuilder; -struct ConcatenationOptionsT; - -struct AddOptions; -struct AddOptionsBuilder; -struct AddOptionsT; - -struct MulOptions; -struct MulOptionsBuilder; -struct MulOptionsT; - -struct L2NormOptions; -struct L2NormOptionsBuilder; -struct L2NormOptionsT; - -struct LocalResponseNormalizationOptions; -struct LocalResponseNormalizationOptionsBuilder; -struct LocalResponseNormalizationOptionsT; - -struct LSTMOptions; -struct LSTMOptionsBuilder; -struct LSTMOptionsT; - -struct UnidirectionalSequenceLSTMOptions; -struct UnidirectionalSequenceLSTMOptionsBuilder; -struct UnidirectionalSequenceLSTMOptionsT; - -struct BidirectionalSequenceLSTMOptions; -struct BidirectionalSequenceLSTMOptionsBuilder; -struct BidirectionalSequenceLSTMOptionsT; - -struct ResizeBilinearOptions; -struct ResizeBilinearOptionsBuilder; -struct ResizeBilinearOptionsT; - -struct ResizeNearestNeighborOptions; -struct ResizeNearestNeighborOptionsBuilder; -struct ResizeNearestNeighborOptionsT; - -struct CallOptions; -struct CallOptionsBuilder; -struct CallOptionsT; - -struct PadOptions; -struct PadOptionsBuilder; -struct PadOptionsT; - -struct PadV2Options; -struct PadV2OptionsBuilder; -struct PadV2OptionsT; - -struct ReshapeOptions; -struct ReshapeOptionsBuilder; -struct ReshapeOptionsT; - -struct SpaceToBatchNDOptions; -struct SpaceToBatchNDOptionsBuilder; -struct SpaceToBatchNDOptionsT; - -struct BatchToSpaceNDOptions; -struct BatchToSpaceNDOptionsBuilder; -struct BatchToSpaceNDOptionsT; - -struct SkipGramOptions; -struct SkipGramOptionsBuilder; -struct SkipGramOptionsT; - -struct SpaceToDepthOptions; -struct SpaceToDepthOptionsBuilder; -struct SpaceToDepthOptionsT; - -struct DepthToSpaceOptions; -struct DepthToSpaceOptionsBuilder; -struct DepthToSpaceOptionsT; - -struct SubOptions; -struct SubOptionsBuilder; -struct SubOptionsT; - -struct DivOptions; -struct DivOptionsBuilder; -struct DivOptionsT; - -struct TopKV2Options; -struct TopKV2OptionsBuilder; -struct TopKV2OptionsT; - -struct EmbeddingLookupSparseOptions; -struct EmbeddingLookupSparseOptionsBuilder; -struct EmbeddingLookupSparseOptionsT; - -struct GatherOptions; -struct GatherOptionsBuilder; -struct GatherOptionsT; - -struct TransposeOptions; -struct TransposeOptionsBuilder; -struct TransposeOptionsT; - -struct ExpOptions; -struct ExpOptionsBuilder; -struct ExpOptionsT; - -struct CosOptions; -struct CosOptionsBuilder; -struct CosOptionsT; - -struct ReducerOptions; -struct ReducerOptionsBuilder; -struct ReducerOptionsT; - -struct SqueezeOptions; -struct SqueezeOptionsBuilder; -struct SqueezeOptionsT; - -struct SplitOptions; -struct SplitOptionsBuilder; -struct SplitOptionsT; - -struct SplitVOptions; -struct SplitVOptionsBuilder; -struct SplitVOptionsT; - -struct StridedSliceOptions; -struct StridedSliceOptionsBuilder; -struct StridedSliceOptionsT; - -struct LogSoftmaxOptions; -struct LogSoftmaxOptionsBuilder; -struct LogSoftmaxOptionsT; - -struct CastOptions; -struct CastOptionsBuilder; -struct CastOptionsT; - -struct DequantizeOptions; -struct DequantizeOptionsBuilder; -struct DequantizeOptionsT; - -struct MaximumMinimumOptions; -struct MaximumMinimumOptionsBuilder; -struct MaximumMinimumOptionsT; - -struct TileOptions; -struct TileOptionsBuilder; -struct TileOptionsT; - -struct ArgMaxOptions; -struct ArgMaxOptionsBuilder; -struct ArgMaxOptionsT; - -struct ArgMinOptions; -struct ArgMinOptionsBuilder; -struct ArgMinOptionsT; - -struct GreaterOptions; -struct GreaterOptionsBuilder; -struct GreaterOptionsT; - -struct GreaterEqualOptions; -struct GreaterEqualOptionsBuilder; -struct GreaterEqualOptionsT; - -struct LessOptions; -struct LessOptionsBuilder; -struct LessOptionsT; - -struct LessEqualOptions; -struct LessEqualOptionsBuilder; -struct LessEqualOptionsT; - -struct NegOptions; -struct NegOptionsBuilder; -struct NegOptionsT; - -struct SelectOptions; -struct SelectOptionsBuilder; -struct SelectOptionsT; - -struct SliceOptions; -struct SliceOptionsBuilder; -struct SliceOptionsT; - -struct TransposeConvOptions; -struct TransposeConvOptionsBuilder; -struct TransposeConvOptionsT; - -struct ExpandDimsOptions; -struct ExpandDimsOptionsBuilder; -struct ExpandDimsOptionsT; - -struct SparseToDenseOptions; -struct SparseToDenseOptionsBuilder; -struct SparseToDenseOptionsT; - -struct EqualOptions; -struct EqualOptionsBuilder; -struct EqualOptionsT; - -struct NotEqualOptions; -struct NotEqualOptionsBuilder; -struct NotEqualOptionsT; - -struct ShapeOptions; -struct ShapeOptionsBuilder; -struct ShapeOptionsT; - -struct RankOptions; -struct RankOptionsBuilder; -struct RankOptionsT; - -struct PowOptions; -struct PowOptionsBuilder; -struct PowOptionsT; - -struct FakeQuantOptions; -struct FakeQuantOptionsBuilder; -struct FakeQuantOptionsT; - -struct PackOptions; -struct PackOptionsBuilder; -struct PackOptionsT; - -struct LogicalOrOptions; -struct LogicalOrOptionsBuilder; -struct LogicalOrOptionsT; - -struct OneHotOptions; -struct OneHotOptionsBuilder; -struct OneHotOptionsT; - -struct AbsOptions; -struct AbsOptionsBuilder; -struct AbsOptionsT; - -struct HardSwishOptions; -struct HardSwishOptionsBuilder; -struct HardSwishOptionsT; - -struct LogicalAndOptions; -struct LogicalAndOptionsBuilder; -struct LogicalAndOptionsT; - -struct LogicalNotOptions; -struct LogicalNotOptionsBuilder; -struct LogicalNotOptionsT; - -struct UnpackOptions; -struct UnpackOptionsBuilder; -struct UnpackOptionsT; - -struct FloorDivOptions; -struct FloorDivOptionsBuilder; -struct FloorDivOptionsT; - -struct SquareOptions; -struct SquareOptionsBuilder; -struct SquareOptionsT; - -struct ZerosLikeOptions; -struct ZerosLikeOptionsBuilder; -struct ZerosLikeOptionsT; - -struct FillOptions; -struct FillOptionsBuilder; -struct FillOptionsT; - -struct FloorModOptions; -struct FloorModOptionsBuilder; -struct FloorModOptionsT; - -struct RangeOptions; -struct RangeOptionsBuilder; -struct RangeOptionsT; - -struct LeakyReluOptions; -struct LeakyReluOptionsBuilder; -struct LeakyReluOptionsT; - -struct SquaredDifferenceOptions; -struct SquaredDifferenceOptionsBuilder; -struct SquaredDifferenceOptionsT; - -struct MirrorPadOptions; -struct MirrorPadOptionsBuilder; -struct MirrorPadOptionsT; - -struct UniqueOptions; -struct UniqueOptionsBuilder; -struct UniqueOptionsT; - -struct ReverseV2Options; -struct ReverseV2OptionsBuilder; -struct ReverseV2OptionsT; - -struct AddNOptions; -struct AddNOptionsBuilder; -struct AddNOptionsT; - -struct GatherNdOptions; -struct GatherNdOptionsBuilder; -struct GatherNdOptionsT; - -struct WhereOptions; -struct WhereOptionsBuilder; -struct WhereOptionsT; - -struct ReverseSequenceOptions; -struct ReverseSequenceOptionsBuilder; -struct ReverseSequenceOptionsT; - -struct MatrixDiagOptions; -struct MatrixDiagOptionsBuilder; -struct MatrixDiagOptionsT; - -struct QuantizeOptions; -struct QuantizeOptionsBuilder; -struct QuantizeOptionsT; - -struct MatrixSetDiagOptions; -struct MatrixSetDiagOptionsBuilder; -struct MatrixSetDiagOptionsT; - -struct IfOptions; -struct IfOptionsBuilder; -struct IfOptionsT; - -struct CallOnceOptions; -struct CallOnceOptionsBuilder; -struct CallOnceOptionsT; - -struct WhileOptions; -struct WhileOptionsBuilder; -struct WhileOptionsT; - -struct NonMaxSuppressionV4Options; -struct NonMaxSuppressionV4OptionsBuilder; -struct NonMaxSuppressionV4OptionsT; - -struct NonMaxSuppressionV5Options; -struct NonMaxSuppressionV5OptionsBuilder; -struct NonMaxSuppressionV5OptionsT; - -struct ScatterNdOptions; -struct ScatterNdOptionsBuilder; -struct ScatterNdOptionsT; - -struct SelectV2Options; -struct SelectV2OptionsBuilder; -struct SelectV2OptionsT; - -struct DensifyOptions; -struct DensifyOptionsBuilder; -struct DensifyOptionsT; - -struct SegmentSumOptions; -struct SegmentSumOptionsBuilder; -struct SegmentSumOptionsT; - -struct BatchMatMulOptions; -struct BatchMatMulOptionsBuilder; -struct BatchMatMulOptionsT; - -struct CumsumOptions; -struct CumsumOptionsBuilder; -struct CumsumOptionsT; - -struct BroadcastToOptions; -struct BroadcastToOptionsBuilder; -struct BroadcastToOptionsT; - -struct Rfft2dOptions; -struct Rfft2dOptionsBuilder; -struct Rfft2dOptionsT; - -struct HashtableOptions; -struct HashtableOptionsBuilder; -struct HashtableOptionsT; - -struct HashtableFindOptions; -struct HashtableFindOptionsBuilder; -struct HashtableFindOptionsT; - -struct HashtableImportOptions; -struct HashtableImportOptionsBuilder; -struct HashtableImportOptionsT; - -struct HashtableSizeOptions; -struct HashtableSizeOptionsBuilder; -struct HashtableSizeOptionsT; - -struct VarHandleOptions; -struct VarHandleOptionsBuilder; -struct VarHandleOptionsT; - -struct ReadVariableOptions; -struct ReadVariableOptionsBuilder; -struct ReadVariableOptionsT; - -struct AssignVariableOptions; -struct AssignVariableOptionsBuilder; -struct AssignVariableOptionsT; - -struct RandomOptions; -struct RandomOptionsBuilder; -struct RandomOptionsT; - -struct BucketizeOptions; -struct BucketizeOptionsBuilder; -struct BucketizeOptionsT; - -struct GeluOptions; -struct GeluOptionsBuilder; -struct GeluOptionsT; - -struct DynamicUpdateSliceOptions; -struct DynamicUpdateSliceOptionsBuilder; -struct DynamicUpdateSliceOptionsT; - -struct UnsortedSegmentProdOptions; -struct UnsortedSegmentProdOptionsBuilder; -struct UnsortedSegmentProdOptionsT; - -struct UnsortedSegmentMaxOptions; -struct UnsortedSegmentMaxOptionsBuilder; -struct UnsortedSegmentMaxOptionsT; - -struct UnsortedSegmentSumOptions; -struct UnsortedSegmentSumOptionsBuilder; -struct UnsortedSegmentSumOptionsT; - -struct ATan2Options; -struct ATan2OptionsBuilder; -struct ATan2OptionsT; - -struct UnsortedSegmentMinOptions; -struct UnsortedSegmentMinOptionsBuilder; -struct UnsortedSegmentMinOptionsT; - -struct SignOptions; -struct SignOptionsBuilder; -struct SignOptionsT; - -struct OperatorCode; -struct OperatorCodeBuilder; -struct OperatorCodeT; - -struct Operator; -struct OperatorBuilder; -struct OperatorT; - -struct SubGraph; -struct SubGraphBuilder; -struct SubGraphT; - -struct Buffer; -struct BufferBuilder; -struct BufferT; - -struct Metadata; -struct MetadataBuilder; -struct MetadataT; - -struct TensorMap; -struct TensorMapBuilder; -struct TensorMapT; - -struct SignatureDef; -struct SignatureDefBuilder; -struct SignatureDefT; - -struct Model; -struct ModelBuilder; -struct ModelT; - -enum TensorType : int8_t { - TensorType_FLOAT32 = 0, - TensorType_FLOAT16 = 1, - TensorType_INT32 = 2, - TensorType_UINT8 = 3, - TensorType_INT64 = 4, - TensorType_STRING = 5, - TensorType_BOOL = 6, - TensorType_INT16 = 7, - TensorType_COMPLEX64 = 8, - TensorType_INT8 = 9, - TensorType_FLOAT64 = 10, - TensorType_COMPLEX128 = 11, - TensorType_UINT64 = 12, - TensorType_RESOURCE = 13, - TensorType_VARIANT = 14, - TensorType_UINT32 = 15, - TensorType_UINT16 = 16, - TensorType_MIN = TensorType_FLOAT32, - TensorType_MAX = TensorType_UINT16 -}; - -inline const TensorType (&EnumValuesTensorType())[17] { - static const TensorType values[] = { - TensorType_FLOAT32, - TensorType_FLOAT16, - TensorType_INT32, - TensorType_UINT8, - TensorType_INT64, - TensorType_STRING, - TensorType_BOOL, - TensorType_INT16, - TensorType_COMPLEX64, - TensorType_INT8, - TensorType_FLOAT64, - TensorType_COMPLEX128, - TensorType_UINT64, - TensorType_RESOURCE, - TensorType_VARIANT, - TensorType_UINT32, - TensorType_UINT16 - }; - return values; -} - -inline const char * const *EnumNamesTensorType() { - static const char * const names[18] = { - "FLOAT32", - "FLOAT16", - "INT32", - "UINT8", - "INT64", - "STRING", - "BOOL", - "INT16", - "COMPLEX64", - "INT8", - "FLOAT64", - "COMPLEX128", - "UINT64", - "RESOURCE", - "VARIANT", - "UINT32", - "UINT16", - nullptr - }; - return names; -} - -inline const char *EnumNameTensorType(TensorType e) { - if (flatbuffers::IsOutRange(e, TensorType_FLOAT32, TensorType_UINT16)) return ""; - const size_t index = static_cast(e); - return EnumNamesTensorType()[index]; -} - -enum QuantizationDetails : uint8_t { - QuantizationDetails_NONE = 0, - QuantizationDetails_CustomQuantization = 1, - QuantizationDetails_MIN = QuantizationDetails_NONE, - QuantizationDetails_MAX = QuantizationDetails_CustomQuantization -}; - -inline const QuantizationDetails (&EnumValuesQuantizationDetails())[2] { - static const QuantizationDetails values[] = { - QuantizationDetails_NONE, - QuantizationDetails_CustomQuantization - }; - return values; -} - -inline const char * const *EnumNamesQuantizationDetails() { - static const char * const names[3] = { - "NONE", - "CustomQuantization", - nullptr - }; - return names; -} - -inline const char *EnumNameQuantizationDetails(QuantizationDetails e) { - if (flatbuffers::IsOutRange(e, QuantizationDetails_NONE, QuantizationDetails_CustomQuantization)) return ""; - const size_t index = static_cast(e); - return EnumNamesQuantizationDetails()[index]; -} - -template struct QuantizationDetailsTraits { - static const QuantizationDetails enum_value = QuantizationDetails_NONE; -}; - -template<> struct QuantizationDetailsTraits { - static const QuantizationDetails enum_value = QuantizationDetails_CustomQuantization; -}; - -template struct QuantizationDetailsUnionTraits { - static const QuantizationDetails enum_value = QuantizationDetails_NONE; -}; - -template<> struct QuantizationDetailsUnionTraits { - static const QuantizationDetails enum_value = QuantizationDetails_CustomQuantization; -}; - -struct QuantizationDetailsUnion { - QuantizationDetails type; - void *value; - - QuantizationDetailsUnion() : type(QuantizationDetails_NONE), value(nullptr) {} - QuantizationDetailsUnion(QuantizationDetailsUnion&& u) FLATBUFFERS_NOEXCEPT : - type(QuantizationDetails_NONE), value(nullptr) - { std::swap(type, u.type); std::swap(value, u.value); } - QuantizationDetailsUnion(const QuantizationDetailsUnion &); - QuantizationDetailsUnion &operator=(const QuantizationDetailsUnion &u) - { QuantizationDetailsUnion t(u); std::swap(type, t.type); std::swap(value, t.value); return *this; } - QuantizationDetailsUnion &operator=(QuantizationDetailsUnion &&u) FLATBUFFERS_NOEXCEPT - { std::swap(type, u.type); std::swap(value, u.value); return *this; } - ~QuantizationDetailsUnion() { Reset(); } - - void Reset(); - - template - void Set(T&& val) { - typedef typename std::remove_reference::type RT; - Reset(); - type = QuantizationDetailsUnionTraits::enum_value; - if (type != QuantizationDetails_NONE) { - value = new RT(std::forward(val)); - } - } - - static void *UnPack(const void *obj, QuantizationDetails type, const flatbuffers::resolver_function_t *resolver); - flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const flatbuffers::rehasher_function_t *_rehasher = nullptr) const; - - tflite::CustomQuantizationT *AsCustomQuantization() { - return type == QuantizationDetails_CustomQuantization ? - reinterpret_cast(value) : nullptr; - } - const tflite::CustomQuantizationT *AsCustomQuantization() const { - return type == QuantizationDetails_CustomQuantization ? - reinterpret_cast(value) : nullptr; - } -}; - -bool VerifyQuantizationDetails(flatbuffers::Verifier &verifier, const void *obj, QuantizationDetails type); -bool VerifyQuantizationDetailsVector(flatbuffers::Verifier &verifier, const flatbuffers::Vector> *values, const flatbuffers::Vector *types); - -enum DimensionType : int8_t { - DimensionType_DENSE = 0, - DimensionType_SPARSE_CSR = 1, - DimensionType_MIN = DimensionType_DENSE, - DimensionType_MAX = DimensionType_SPARSE_CSR -}; - -inline const DimensionType (&EnumValuesDimensionType())[2] { - static const DimensionType values[] = { - DimensionType_DENSE, - DimensionType_SPARSE_CSR - }; - return values; -} - -inline const char * const *EnumNamesDimensionType() { - static const char * const names[3] = { - "DENSE", - "SPARSE_CSR", - nullptr - }; - return names; -} - -inline const char *EnumNameDimensionType(DimensionType e) { - if (flatbuffers::IsOutRange(e, DimensionType_DENSE, DimensionType_SPARSE_CSR)) return ""; - const size_t index = static_cast(e); - return EnumNamesDimensionType()[index]; -} - -enum SparseIndexVector : uint8_t { - SparseIndexVector_NONE = 0, - SparseIndexVector_Int32Vector = 1, - SparseIndexVector_Uint16Vector = 2, - SparseIndexVector_Uint8Vector = 3, - SparseIndexVector_MIN = SparseIndexVector_NONE, - SparseIndexVector_MAX = SparseIndexVector_Uint8Vector -}; - -inline const SparseIndexVector (&EnumValuesSparseIndexVector())[4] { - static const SparseIndexVector values[] = { - SparseIndexVector_NONE, - SparseIndexVector_Int32Vector, - SparseIndexVector_Uint16Vector, - SparseIndexVector_Uint8Vector - }; - return values; -} - -inline const char * const *EnumNamesSparseIndexVector() { - static const char * const names[5] = { - "NONE", - "Int32Vector", - "Uint16Vector", - "Uint8Vector", - nullptr - }; - return names; -} - -inline const char *EnumNameSparseIndexVector(SparseIndexVector e) { - if (flatbuffers::IsOutRange(e, SparseIndexVector_NONE, SparseIndexVector_Uint8Vector)) return ""; - const size_t index = static_cast(e); - return EnumNamesSparseIndexVector()[index]; -} - -template struct SparseIndexVectorTraits { - static const SparseIndexVector enum_value = SparseIndexVector_NONE; -}; - -template<> struct SparseIndexVectorTraits { - static const SparseIndexVector enum_value = SparseIndexVector_Int32Vector; -}; - -template<> struct SparseIndexVectorTraits { - static const SparseIndexVector enum_value = SparseIndexVector_Uint16Vector; -}; - -template<> struct SparseIndexVectorTraits { - static const SparseIndexVector enum_value = SparseIndexVector_Uint8Vector; -}; - -template struct SparseIndexVectorUnionTraits { - static const SparseIndexVector enum_value = SparseIndexVector_NONE; -}; - -template<> struct SparseIndexVectorUnionTraits { - static const SparseIndexVector enum_value = SparseIndexVector_Int32Vector; -}; - -template<> struct SparseIndexVectorUnionTraits { - static const SparseIndexVector enum_value = SparseIndexVector_Uint16Vector; -}; - -template<> struct SparseIndexVectorUnionTraits { - static const SparseIndexVector enum_value = SparseIndexVector_Uint8Vector; -}; - -struct SparseIndexVectorUnion { - SparseIndexVector type; - void *value; - - SparseIndexVectorUnion() : type(SparseIndexVector_NONE), value(nullptr) {} - SparseIndexVectorUnion(SparseIndexVectorUnion&& u) FLATBUFFERS_NOEXCEPT : - type(SparseIndexVector_NONE), value(nullptr) - { std::swap(type, u.type); std::swap(value, u.value); } - SparseIndexVectorUnion(const SparseIndexVectorUnion &); - SparseIndexVectorUnion &operator=(const SparseIndexVectorUnion &u) - { SparseIndexVectorUnion t(u); std::swap(type, t.type); std::swap(value, t.value); return *this; } - SparseIndexVectorUnion &operator=(SparseIndexVectorUnion &&u) FLATBUFFERS_NOEXCEPT - { std::swap(type, u.type); std::swap(value, u.value); return *this; } - ~SparseIndexVectorUnion() { Reset(); } - - void Reset(); - - template - void Set(T&& val) { - typedef typename std::remove_reference::type RT; - Reset(); - type = SparseIndexVectorUnionTraits::enum_value; - if (type != SparseIndexVector_NONE) { - value = new RT(std::forward(val)); - } - } - - static void *UnPack(const void *obj, SparseIndexVector type, const flatbuffers::resolver_function_t *resolver); - flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const flatbuffers::rehasher_function_t *_rehasher = nullptr) const; - - tflite::Int32VectorT *AsInt32Vector() { - return type == SparseIndexVector_Int32Vector ? - reinterpret_cast(value) : nullptr; - } - const tflite::Int32VectorT *AsInt32Vector() const { - return type == SparseIndexVector_Int32Vector ? - reinterpret_cast(value) : nullptr; - } - tflite::Uint16VectorT *AsUint16Vector() { - return type == SparseIndexVector_Uint16Vector ? - reinterpret_cast(value) : nullptr; - } - const tflite::Uint16VectorT *AsUint16Vector() const { - return type == SparseIndexVector_Uint16Vector ? - reinterpret_cast(value) : nullptr; - } - tflite::Uint8VectorT *AsUint8Vector() { - return type == SparseIndexVector_Uint8Vector ? - reinterpret_cast(value) : nullptr; - } - const tflite::Uint8VectorT *AsUint8Vector() const { - return type == SparseIndexVector_Uint8Vector ? - reinterpret_cast(value) : nullptr; - } -}; - -bool VerifySparseIndexVector(flatbuffers::Verifier &verifier, const void *obj, SparseIndexVector type); -bool VerifySparseIndexVectorVector(flatbuffers::Verifier &verifier, const flatbuffers::Vector> *values, const flatbuffers::Vector *types); - -enum BuiltinOperator : int32_t { - BuiltinOperator_ADD = 0, - BuiltinOperator_AVERAGE_POOL_2D = 1, - BuiltinOperator_CONCATENATION = 2, - BuiltinOperator_CONV_2D = 3, - BuiltinOperator_DEPTHWISE_CONV_2D = 4, - BuiltinOperator_DEPTH_TO_SPACE = 5, - BuiltinOperator_DEQUANTIZE = 6, - BuiltinOperator_EMBEDDING_LOOKUP = 7, - BuiltinOperator_FLOOR = 8, - BuiltinOperator_FULLY_CONNECTED = 9, - BuiltinOperator_HASHTABLE_LOOKUP = 10, - BuiltinOperator_L2_NORMALIZATION = 11, - BuiltinOperator_L2_POOL_2D = 12, - BuiltinOperator_LOCAL_RESPONSE_NORMALIZATION = 13, - BuiltinOperator_LOGISTIC = 14, - BuiltinOperator_LSH_PROJECTION = 15, - BuiltinOperator_LSTM = 16, - BuiltinOperator_MAX_POOL_2D = 17, - BuiltinOperator_MUL = 18, - BuiltinOperator_RELU = 19, - BuiltinOperator_RELU_N1_TO_1 = 20, - BuiltinOperator_RELU6 = 21, - BuiltinOperator_RESHAPE = 22, - BuiltinOperator_RESIZE_BILINEAR = 23, - BuiltinOperator_RNN = 24, - BuiltinOperator_SOFTMAX = 25, - BuiltinOperator_SPACE_TO_DEPTH = 26, - BuiltinOperator_SVDF = 27, - BuiltinOperator_TANH = 28, - BuiltinOperator_CONCAT_EMBEDDINGS = 29, - BuiltinOperator_SKIP_GRAM = 30, - BuiltinOperator_CALL = 31, - BuiltinOperator_CUSTOM = 32, - BuiltinOperator_EMBEDDING_LOOKUP_SPARSE = 33, - BuiltinOperator_PAD = 34, - BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_RNN = 35, - BuiltinOperator_GATHER = 36, - BuiltinOperator_BATCH_TO_SPACE_ND = 37, - BuiltinOperator_SPACE_TO_BATCH_ND = 38, - BuiltinOperator_TRANSPOSE = 39, - BuiltinOperator_MEAN = 40, - BuiltinOperator_SUB = 41, - BuiltinOperator_DIV = 42, - BuiltinOperator_SQUEEZE = 43, - BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_LSTM = 44, - BuiltinOperator_STRIDED_SLICE = 45, - BuiltinOperator_BIDIRECTIONAL_SEQUENCE_RNN = 46, - BuiltinOperator_EXP = 47, - BuiltinOperator_TOPK_V2 = 48, - BuiltinOperator_SPLIT = 49, - BuiltinOperator_LOG_SOFTMAX = 50, - BuiltinOperator_DELEGATE = 51, - BuiltinOperator_BIDIRECTIONAL_SEQUENCE_LSTM = 52, - BuiltinOperator_CAST = 53, - BuiltinOperator_PRELU = 54, - BuiltinOperator_MAXIMUM = 55, - BuiltinOperator_ARG_MAX = 56, - BuiltinOperator_MINIMUM = 57, - BuiltinOperator_LESS = 58, - BuiltinOperator_NEG = 59, - BuiltinOperator_PADV2 = 60, - BuiltinOperator_GREATER = 61, - BuiltinOperator_GREATER_EQUAL = 62, - BuiltinOperator_LESS_EQUAL = 63, - BuiltinOperator_SELECT = 64, - BuiltinOperator_SLICE = 65, - BuiltinOperator_SIN = 66, - BuiltinOperator_TRANSPOSE_CONV = 67, - BuiltinOperator_SPARSE_TO_DENSE = 68, - BuiltinOperator_TILE = 69, - BuiltinOperator_EXPAND_DIMS = 70, - BuiltinOperator_EQUAL = 71, - BuiltinOperator_NOT_EQUAL = 72, - BuiltinOperator_LOG = 73, - BuiltinOperator_SUM = 74, - BuiltinOperator_SQRT = 75, - BuiltinOperator_RSQRT = 76, - BuiltinOperator_SHAPE = 77, - BuiltinOperator_POW = 78, - BuiltinOperator_ARG_MIN = 79, - BuiltinOperator_FAKE_QUANT = 80, - BuiltinOperator_REDUCE_PROD = 81, - BuiltinOperator_REDUCE_MAX = 82, - BuiltinOperator_PACK = 83, - BuiltinOperator_LOGICAL_OR = 84, - BuiltinOperator_ONE_HOT = 85, - BuiltinOperator_LOGICAL_AND = 86, - BuiltinOperator_LOGICAL_NOT = 87, - BuiltinOperator_UNPACK = 88, - BuiltinOperator_REDUCE_MIN = 89, - BuiltinOperator_FLOOR_DIV = 90, - BuiltinOperator_REDUCE_ANY = 91, - BuiltinOperator_SQUARE = 92, - BuiltinOperator_ZEROS_LIKE = 93, - BuiltinOperator_FILL = 94, - BuiltinOperator_FLOOR_MOD = 95, - BuiltinOperator_RANGE = 96, - BuiltinOperator_RESIZE_NEAREST_NEIGHBOR = 97, - BuiltinOperator_LEAKY_RELU = 98, - BuiltinOperator_SQUARED_DIFFERENCE = 99, - BuiltinOperator_MIRROR_PAD = 100, - BuiltinOperator_ABS = 101, - BuiltinOperator_SPLIT_V = 102, - BuiltinOperator_UNIQUE = 103, - BuiltinOperator_CEIL = 104, - BuiltinOperator_REVERSE_V2 = 105, - BuiltinOperator_ADD_N = 106, - BuiltinOperator_GATHER_ND = 107, - BuiltinOperator_COS = 108, - BuiltinOperator_WHERE = 109, - BuiltinOperator_RANK = 110, - BuiltinOperator_ELU = 111, - BuiltinOperator_REVERSE_SEQUENCE = 112, - BuiltinOperator_MATRIX_DIAG = 113, - BuiltinOperator_QUANTIZE = 114, - BuiltinOperator_MATRIX_SET_DIAG = 115, - BuiltinOperator_ROUND = 116, - BuiltinOperator_HARD_SWISH = 117, - BuiltinOperator_IF = 118, - BuiltinOperator_WHILE = 119, - BuiltinOperator_NON_MAX_SUPPRESSION_V4 = 120, - BuiltinOperator_NON_MAX_SUPPRESSION_V5 = 121, - BuiltinOperator_SCATTER_ND = 122, - BuiltinOperator_SELECT_V2 = 123, - BuiltinOperator_DENSIFY = 124, - BuiltinOperator_SEGMENT_SUM = 125, - BuiltinOperator_BATCH_MATMUL = 126, - BuiltinOperator_PLACEHOLDER_FOR_GREATER_OP_CODES = 127, - BuiltinOperator_CUMSUM = 128, - BuiltinOperator_CALL_ONCE = 129, - BuiltinOperator_BROADCAST_TO = 130, - BuiltinOperator_RFFT2D = 131, - BuiltinOperator_CONV_3D = 132, - BuiltinOperator_IMAG = 133, - BuiltinOperator_REAL = 134, - BuiltinOperator_COMPLEX_ABS = 135, - BuiltinOperator_HASHTABLE = 136, - BuiltinOperator_HASHTABLE_FIND = 137, - BuiltinOperator_HASHTABLE_IMPORT = 138, - BuiltinOperator_HASHTABLE_SIZE = 139, - BuiltinOperator_REDUCE_ALL = 140, - BuiltinOperator_CONV_3D_TRANSPOSE = 141, - BuiltinOperator_VAR_HANDLE = 142, - BuiltinOperator_READ_VARIABLE = 143, - BuiltinOperator_ASSIGN_VARIABLE = 144, - BuiltinOperator_BROADCAST_ARGS = 145, - BuiltinOperator_RANDOM_STANDARD_NORMAL = 146, - BuiltinOperator_BUCKETIZE = 147, - BuiltinOperator_RANDOM_UNIFORM = 148, - BuiltinOperator_MULTINOMIAL = 149, - BuiltinOperator_GELU = 150, - BuiltinOperator_DYNAMIC_UPDATE_SLICE = 151, - BuiltinOperator_RELU_0_TO_1 = 152, - BuiltinOperator_UNSORTED_SEGMENT_PROD = 153, - BuiltinOperator_UNSORTED_SEGMENT_MAX = 154, - BuiltinOperator_UNSORTED_SEGMENT_SUM = 155, - BuiltinOperator_ATAN2 = 156, - BuiltinOperator_UNSORTED_SEGMENT_MIN = 157, - BuiltinOperator_SIGN = 158, - BuiltinOperator_MIN = BuiltinOperator_ADD, - BuiltinOperator_MAX = BuiltinOperator_SIGN -}; - -inline const BuiltinOperator (&EnumValuesBuiltinOperator())[159] { - static const BuiltinOperator values[] = { - BuiltinOperator_ADD, - BuiltinOperator_AVERAGE_POOL_2D, - BuiltinOperator_CONCATENATION, - BuiltinOperator_CONV_2D, - BuiltinOperator_DEPTHWISE_CONV_2D, - BuiltinOperator_DEPTH_TO_SPACE, - BuiltinOperator_DEQUANTIZE, - BuiltinOperator_EMBEDDING_LOOKUP, - BuiltinOperator_FLOOR, - BuiltinOperator_FULLY_CONNECTED, - BuiltinOperator_HASHTABLE_LOOKUP, - BuiltinOperator_L2_NORMALIZATION, - BuiltinOperator_L2_POOL_2D, - BuiltinOperator_LOCAL_RESPONSE_NORMALIZATION, - BuiltinOperator_LOGISTIC, - BuiltinOperator_LSH_PROJECTION, - BuiltinOperator_LSTM, - BuiltinOperator_MAX_POOL_2D, - BuiltinOperator_MUL, - BuiltinOperator_RELU, - BuiltinOperator_RELU_N1_TO_1, - BuiltinOperator_RELU6, - BuiltinOperator_RESHAPE, - BuiltinOperator_RESIZE_BILINEAR, - BuiltinOperator_RNN, - BuiltinOperator_SOFTMAX, - BuiltinOperator_SPACE_TO_DEPTH, - BuiltinOperator_SVDF, - BuiltinOperator_TANH, - BuiltinOperator_CONCAT_EMBEDDINGS, - BuiltinOperator_SKIP_GRAM, - BuiltinOperator_CALL, - BuiltinOperator_CUSTOM, - BuiltinOperator_EMBEDDING_LOOKUP_SPARSE, - BuiltinOperator_PAD, - BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_RNN, - BuiltinOperator_GATHER, - BuiltinOperator_BATCH_TO_SPACE_ND, - BuiltinOperator_SPACE_TO_BATCH_ND, - BuiltinOperator_TRANSPOSE, - BuiltinOperator_MEAN, - BuiltinOperator_SUB, - BuiltinOperator_DIV, - BuiltinOperator_SQUEEZE, - BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_LSTM, - BuiltinOperator_STRIDED_SLICE, - BuiltinOperator_BIDIRECTIONAL_SEQUENCE_RNN, - BuiltinOperator_EXP, - BuiltinOperator_TOPK_V2, - BuiltinOperator_SPLIT, - BuiltinOperator_LOG_SOFTMAX, - BuiltinOperator_DELEGATE, - BuiltinOperator_BIDIRECTIONAL_SEQUENCE_LSTM, - BuiltinOperator_CAST, - BuiltinOperator_PRELU, - BuiltinOperator_MAXIMUM, - BuiltinOperator_ARG_MAX, - BuiltinOperator_MINIMUM, - BuiltinOperator_LESS, - BuiltinOperator_NEG, - BuiltinOperator_PADV2, - BuiltinOperator_GREATER, - BuiltinOperator_GREATER_EQUAL, - BuiltinOperator_LESS_EQUAL, - BuiltinOperator_SELECT, - BuiltinOperator_SLICE, - BuiltinOperator_SIN, - BuiltinOperator_TRANSPOSE_CONV, - BuiltinOperator_SPARSE_TO_DENSE, - BuiltinOperator_TILE, - BuiltinOperator_EXPAND_DIMS, - BuiltinOperator_EQUAL, - BuiltinOperator_NOT_EQUAL, - BuiltinOperator_LOG, - BuiltinOperator_SUM, - BuiltinOperator_SQRT, - BuiltinOperator_RSQRT, - BuiltinOperator_SHAPE, - BuiltinOperator_POW, - BuiltinOperator_ARG_MIN, - BuiltinOperator_FAKE_QUANT, - BuiltinOperator_REDUCE_PROD, - BuiltinOperator_REDUCE_MAX, - BuiltinOperator_PACK, - BuiltinOperator_LOGICAL_OR, - BuiltinOperator_ONE_HOT, - BuiltinOperator_LOGICAL_AND, - BuiltinOperator_LOGICAL_NOT, - BuiltinOperator_UNPACK, - BuiltinOperator_REDUCE_MIN, - BuiltinOperator_FLOOR_DIV, - BuiltinOperator_REDUCE_ANY, - BuiltinOperator_SQUARE, - BuiltinOperator_ZEROS_LIKE, - BuiltinOperator_FILL, - BuiltinOperator_FLOOR_MOD, - BuiltinOperator_RANGE, - BuiltinOperator_RESIZE_NEAREST_NEIGHBOR, - BuiltinOperator_LEAKY_RELU, - BuiltinOperator_SQUARED_DIFFERENCE, - BuiltinOperator_MIRROR_PAD, - BuiltinOperator_ABS, - BuiltinOperator_SPLIT_V, - BuiltinOperator_UNIQUE, - BuiltinOperator_CEIL, - BuiltinOperator_REVERSE_V2, - BuiltinOperator_ADD_N, - BuiltinOperator_GATHER_ND, - BuiltinOperator_COS, - BuiltinOperator_WHERE, - BuiltinOperator_RANK, - BuiltinOperator_ELU, - BuiltinOperator_REVERSE_SEQUENCE, - BuiltinOperator_MATRIX_DIAG, - BuiltinOperator_QUANTIZE, - BuiltinOperator_MATRIX_SET_DIAG, - BuiltinOperator_ROUND, - BuiltinOperator_HARD_SWISH, - BuiltinOperator_IF, - BuiltinOperator_WHILE, - BuiltinOperator_NON_MAX_SUPPRESSION_V4, - BuiltinOperator_NON_MAX_SUPPRESSION_V5, - BuiltinOperator_SCATTER_ND, - BuiltinOperator_SELECT_V2, - BuiltinOperator_DENSIFY, - BuiltinOperator_SEGMENT_SUM, - BuiltinOperator_BATCH_MATMUL, - BuiltinOperator_PLACEHOLDER_FOR_GREATER_OP_CODES, - BuiltinOperator_CUMSUM, - BuiltinOperator_CALL_ONCE, - BuiltinOperator_BROADCAST_TO, - BuiltinOperator_RFFT2D, - BuiltinOperator_CONV_3D, - BuiltinOperator_IMAG, - BuiltinOperator_REAL, - BuiltinOperator_COMPLEX_ABS, - BuiltinOperator_HASHTABLE, - BuiltinOperator_HASHTABLE_FIND, - BuiltinOperator_HASHTABLE_IMPORT, - BuiltinOperator_HASHTABLE_SIZE, - BuiltinOperator_REDUCE_ALL, - BuiltinOperator_CONV_3D_TRANSPOSE, - BuiltinOperator_VAR_HANDLE, - BuiltinOperator_READ_VARIABLE, - BuiltinOperator_ASSIGN_VARIABLE, - BuiltinOperator_BROADCAST_ARGS, - BuiltinOperator_RANDOM_STANDARD_NORMAL, - BuiltinOperator_BUCKETIZE, - BuiltinOperator_RANDOM_UNIFORM, - BuiltinOperator_MULTINOMIAL, - BuiltinOperator_GELU, - BuiltinOperator_DYNAMIC_UPDATE_SLICE, - BuiltinOperator_RELU_0_TO_1, - BuiltinOperator_UNSORTED_SEGMENT_PROD, - BuiltinOperator_UNSORTED_SEGMENT_MAX, - BuiltinOperator_UNSORTED_SEGMENT_SUM, - BuiltinOperator_ATAN2, - BuiltinOperator_UNSORTED_SEGMENT_MIN, - BuiltinOperator_SIGN - }; - return values; -} - -inline const char * const *EnumNamesBuiltinOperator() { - static const char * const names[160] = { - "ADD", - "AVERAGE_POOL_2D", - "CONCATENATION", - "CONV_2D", - "DEPTHWISE_CONV_2D", - "DEPTH_TO_SPACE", - "DEQUANTIZE", - "EMBEDDING_LOOKUP", - "FLOOR", - "FULLY_CONNECTED", - "HASHTABLE_LOOKUP", - "L2_NORMALIZATION", - "L2_POOL_2D", - "LOCAL_RESPONSE_NORMALIZATION", - "LOGISTIC", - "LSH_PROJECTION", - "LSTM", - "MAX_POOL_2D", - "MUL", - "RELU", - "RELU_N1_TO_1", - "RELU6", - "RESHAPE", - "RESIZE_BILINEAR", - "RNN", - "SOFTMAX", - "SPACE_TO_DEPTH", - "SVDF", - "TANH", - "CONCAT_EMBEDDINGS", - "SKIP_GRAM", - "CALL", - "CUSTOM", - "EMBEDDING_LOOKUP_SPARSE", - "PAD", - "UNIDIRECTIONAL_SEQUENCE_RNN", - "GATHER", - "BATCH_TO_SPACE_ND", - "SPACE_TO_BATCH_ND", - "TRANSPOSE", - "MEAN", - "SUB", - "DIV", - "SQUEEZE", - "UNIDIRECTIONAL_SEQUENCE_LSTM", - "STRIDED_SLICE", - "BIDIRECTIONAL_SEQUENCE_RNN", - "EXP", - "TOPK_V2", - "SPLIT", - "LOG_SOFTMAX", - "DELEGATE", - "BIDIRECTIONAL_SEQUENCE_LSTM", - "CAST", - "PRELU", - "MAXIMUM", - "ARG_MAX", - "MINIMUM", - "LESS", - "NEG", - "PADV2", - "GREATER", - "GREATER_EQUAL", - "LESS_EQUAL", - "SELECT", - "SLICE", - "SIN", - "TRANSPOSE_CONV", - "SPARSE_TO_DENSE", - "TILE", - "EXPAND_DIMS", - "EQUAL", - "NOT_EQUAL", - "LOG", - "SUM", - "SQRT", - "RSQRT", - "SHAPE", - "POW", - "ARG_MIN", - "FAKE_QUANT", - "REDUCE_PROD", - "REDUCE_MAX", - "PACK", - "LOGICAL_OR", - "ONE_HOT", - "LOGICAL_AND", - "LOGICAL_NOT", - "UNPACK", - "REDUCE_MIN", - "FLOOR_DIV", - "REDUCE_ANY", - "SQUARE", - "ZEROS_LIKE", - "FILL", - "FLOOR_MOD", - "RANGE", - "RESIZE_NEAREST_NEIGHBOR", - "LEAKY_RELU", - "SQUARED_DIFFERENCE", - "MIRROR_PAD", - "ABS", - "SPLIT_V", - "UNIQUE", - "CEIL", - "REVERSE_V2", - "ADD_N", - "GATHER_ND", - "COS", - "WHERE", - "RANK", - "ELU", - "REVERSE_SEQUENCE", - "MATRIX_DIAG", - "QUANTIZE", - "MATRIX_SET_DIAG", - "ROUND", - "HARD_SWISH", - "IF", - "WHILE", - "NON_MAX_SUPPRESSION_V4", - "NON_MAX_SUPPRESSION_V5", - "SCATTER_ND", - "SELECT_V2", - "DENSIFY", - "SEGMENT_SUM", - "BATCH_MATMUL", - "PLACEHOLDER_FOR_GREATER_OP_CODES", - "CUMSUM", - "CALL_ONCE", - "BROADCAST_TO", - "RFFT2D", - "CONV_3D", - "IMAG", - "REAL", - "COMPLEX_ABS", - "HASHTABLE", - "HASHTABLE_FIND", - "HASHTABLE_IMPORT", - "HASHTABLE_SIZE", - "REDUCE_ALL", - "CONV_3D_TRANSPOSE", - "VAR_HANDLE", - "READ_VARIABLE", - "ASSIGN_VARIABLE", - "BROADCAST_ARGS", - "RANDOM_STANDARD_NORMAL", - "BUCKETIZE", - "RANDOM_UNIFORM", - "MULTINOMIAL", - "GELU", - "DYNAMIC_UPDATE_SLICE", - "RELU_0_TO_1", - "UNSORTED_SEGMENT_PROD", - "UNSORTED_SEGMENT_MAX", - "UNSORTED_SEGMENT_SUM", - "ATAN2", - "UNSORTED_SEGMENT_MIN", - "SIGN", - nullptr - }; - return names; -} - -inline const char *EnumNameBuiltinOperator(BuiltinOperator e) { - if (flatbuffers::IsOutRange(e, BuiltinOperator_ADD, BuiltinOperator_SIGN)) return ""; - const size_t index = static_cast(e); - return EnumNamesBuiltinOperator()[index]; -} - -enum BuiltinOptions : uint8_t { - BuiltinOptions_NONE = 0, - BuiltinOptions_Conv2DOptions = 1, - BuiltinOptions_DepthwiseConv2DOptions = 2, - BuiltinOptions_ConcatEmbeddingsOptions = 3, - BuiltinOptions_LSHProjectionOptions = 4, - BuiltinOptions_Pool2DOptions = 5, - BuiltinOptions_SVDFOptions = 6, - BuiltinOptions_RNNOptions = 7, - BuiltinOptions_FullyConnectedOptions = 8, - BuiltinOptions_SoftmaxOptions = 9, - BuiltinOptions_ConcatenationOptions = 10, - BuiltinOptions_AddOptions = 11, - BuiltinOptions_L2NormOptions = 12, - BuiltinOptions_LocalResponseNormalizationOptions = 13, - BuiltinOptions_LSTMOptions = 14, - BuiltinOptions_ResizeBilinearOptions = 15, - BuiltinOptions_CallOptions = 16, - BuiltinOptions_ReshapeOptions = 17, - BuiltinOptions_SkipGramOptions = 18, - BuiltinOptions_SpaceToDepthOptions = 19, - BuiltinOptions_EmbeddingLookupSparseOptions = 20, - BuiltinOptions_MulOptions = 21, - BuiltinOptions_PadOptions = 22, - BuiltinOptions_GatherOptions = 23, - BuiltinOptions_BatchToSpaceNDOptions = 24, - BuiltinOptions_SpaceToBatchNDOptions = 25, - BuiltinOptions_TransposeOptions = 26, - BuiltinOptions_ReducerOptions = 27, - BuiltinOptions_SubOptions = 28, - BuiltinOptions_DivOptions = 29, - BuiltinOptions_SqueezeOptions = 30, - BuiltinOptions_SequenceRNNOptions = 31, - BuiltinOptions_StridedSliceOptions = 32, - BuiltinOptions_ExpOptions = 33, - BuiltinOptions_TopKV2Options = 34, - BuiltinOptions_SplitOptions = 35, - BuiltinOptions_LogSoftmaxOptions = 36, - BuiltinOptions_CastOptions = 37, - BuiltinOptions_DequantizeOptions = 38, - BuiltinOptions_MaximumMinimumOptions = 39, - BuiltinOptions_ArgMaxOptions = 40, - BuiltinOptions_LessOptions = 41, - BuiltinOptions_NegOptions = 42, - BuiltinOptions_PadV2Options = 43, - BuiltinOptions_GreaterOptions = 44, - BuiltinOptions_GreaterEqualOptions = 45, - BuiltinOptions_LessEqualOptions = 46, - BuiltinOptions_SelectOptions = 47, - BuiltinOptions_SliceOptions = 48, - BuiltinOptions_TransposeConvOptions = 49, - BuiltinOptions_SparseToDenseOptions = 50, - BuiltinOptions_TileOptions = 51, - BuiltinOptions_ExpandDimsOptions = 52, - BuiltinOptions_EqualOptions = 53, - BuiltinOptions_NotEqualOptions = 54, - BuiltinOptions_ShapeOptions = 55, - BuiltinOptions_PowOptions = 56, - BuiltinOptions_ArgMinOptions = 57, - BuiltinOptions_FakeQuantOptions = 58, - BuiltinOptions_PackOptions = 59, - BuiltinOptions_LogicalOrOptions = 60, - BuiltinOptions_OneHotOptions = 61, - BuiltinOptions_LogicalAndOptions = 62, - BuiltinOptions_LogicalNotOptions = 63, - BuiltinOptions_UnpackOptions = 64, - BuiltinOptions_FloorDivOptions = 65, - BuiltinOptions_SquareOptions = 66, - BuiltinOptions_ZerosLikeOptions = 67, - BuiltinOptions_FillOptions = 68, - BuiltinOptions_BidirectionalSequenceLSTMOptions = 69, - BuiltinOptions_BidirectionalSequenceRNNOptions = 70, - BuiltinOptions_UnidirectionalSequenceLSTMOptions = 71, - BuiltinOptions_FloorModOptions = 72, - BuiltinOptions_RangeOptions = 73, - BuiltinOptions_ResizeNearestNeighborOptions = 74, - BuiltinOptions_LeakyReluOptions = 75, - BuiltinOptions_SquaredDifferenceOptions = 76, - BuiltinOptions_MirrorPadOptions = 77, - BuiltinOptions_AbsOptions = 78, - BuiltinOptions_SplitVOptions = 79, - BuiltinOptions_UniqueOptions = 80, - BuiltinOptions_ReverseV2Options = 81, - BuiltinOptions_AddNOptions = 82, - BuiltinOptions_GatherNdOptions = 83, - BuiltinOptions_CosOptions = 84, - BuiltinOptions_WhereOptions = 85, - BuiltinOptions_RankOptions = 86, - BuiltinOptions_ReverseSequenceOptions = 87, - BuiltinOptions_MatrixDiagOptions = 88, - BuiltinOptions_QuantizeOptions = 89, - BuiltinOptions_MatrixSetDiagOptions = 90, - BuiltinOptions_HardSwishOptions = 91, - BuiltinOptions_IfOptions = 92, - BuiltinOptions_WhileOptions = 93, - BuiltinOptions_DepthToSpaceOptions = 94, - BuiltinOptions_NonMaxSuppressionV4Options = 95, - BuiltinOptions_NonMaxSuppressionV5Options = 96, - BuiltinOptions_ScatterNdOptions = 97, - BuiltinOptions_SelectV2Options = 98, - BuiltinOptions_DensifyOptions = 99, - BuiltinOptions_SegmentSumOptions = 100, - BuiltinOptions_BatchMatMulOptions = 101, - BuiltinOptions_CumsumOptions = 102, - BuiltinOptions_CallOnceOptions = 103, - BuiltinOptions_BroadcastToOptions = 104, - BuiltinOptions_Rfft2dOptions = 105, - BuiltinOptions_Conv3DOptions = 106, - BuiltinOptions_HashtableOptions = 107, - BuiltinOptions_HashtableFindOptions = 108, - BuiltinOptions_HashtableImportOptions = 109, - BuiltinOptions_HashtableSizeOptions = 110, - BuiltinOptions_VarHandleOptions = 111, - BuiltinOptions_ReadVariableOptions = 112, - BuiltinOptions_AssignVariableOptions = 113, - BuiltinOptions_RandomOptions = 114, - BuiltinOptions_BucketizeOptions = 115, - BuiltinOptions_GeluOptions = 116, - BuiltinOptions_DynamicUpdateSliceOptions = 117, - BuiltinOptions_UnsortedSegmentProdOptions = 118, - BuiltinOptions_UnsortedSegmentMaxOptions = 119, - BuiltinOptions_UnsortedSegmentMinOptions = 120, - BuiltinOptions_UnsortedSegmentSumOptions = 121, - BuiltinOptions_ATan2Options = 122, - BuiltinOptions_SignOptions = 123, - BuiltinOptions_MIN = BuiltinOptions_NONE, - BuiltinOptions_MAX = BuiltinOptions_SignOptions -}; - -inline const BuiltinOptions (&EnumValuesBuiltinOptions())[124] { - static const BuiltinOptions values[] = { - BuiltinOptions_NONE, - BuiltinOptions_Conv2DOptions, - BuiltinOptions_DepthwiseConv2DOptions, - BuiltinOptions_ConcatEmbeddingsOptions, - BuiltinOptions_LSHProjectionOptions, - BuiltinOptions_Pool2DOptions, - BuiltinOptions_SVDFOptions, - BuiltinOptions_RNNOptions, - BuiltinOptions_FullyConnectedOptions, - BuiltinOptions_SoftmaxOptions, - BuiltinOptions_ConcatenationOptions, - BuiltinOptions_AddOptions, - BuiltinOptions_L2NormOptions, - BuiltinOptions_LocalResponseNormalizationOptions, - BuiltinOptions_LSTMOptions, - BuiltinOptions_ResizeBilinearOptions, - BuiltinOptions_CallOptions, - BuiltinOptions_ReshapeOptions, - BuiltinOptions_SkipGramOptions, - BuiltinOptions_SpaceToDepthOptions, - BuiltinOptions_EmbeddingLookupSparseOptions, - BuiltinOptions_MulOptions, - BuiltinOptions_PadOptions, - BuiltinOptions_GatherOptions, - BuiltinOptions_BatchToSpaceNDOptions, - BuiltinOptions_SpaceToBatchNDOptions, - BuiltinOptions_TransposeOptions, - BuiltinOptions_ReducerOptions, - BuiltinOptions_SubOptions, - BuiltinOptions_DivOptions, - BuiltinOptions_SqueezeOptions, - BuiltinOptions_SequenceRNNOptions, - BuiltinOptions_StridedSliceOptions, - BuiltinOptions_ExpOptions, - BuiltinOptions_TopKV2Options, - BuiltinOptions_SplitOptions, - BuiltinOptions_LogSoftmaxOptions, - BuiltinOptions_CastOptions, - BuiltinOptions_DequantizeOptions, - BuiltinOptions_MaximumMinimumOptions, - BuiltinOptions_ArgMaxOptions, - BuiltinOptions_LessOptions, - BuiltinOptions_NegOptions, - BuiltinOptions_PadV2Options, - BuiltinOptions_GreaterOptions, - BuiltinOptions_GreaterEqualOptions, - BuiltinOptions_LessEqualOptions, - BuiltinOptions_SelectOptions, - BuiltinOptions_SliceOptions, - BuiltinOptions_TransposeConvOptions, - BuiltinOptions_SparseToDenseOptions, - BuiltinOptions_TileOptions, - BuiltinOptions_ExpandDimsOptions, - BuiltinOptions_EqualOptions, - BuiltinOptions_NotEqualOptions, - BuiltinOptions_ShapeOptions, - BuiltinOptions_PowOptions, - BuiltinOptions_ArgMinOptions, - BuiltinOptions_FakeQuantOptions, - BuiltinOptions_PackOptions, - BuiltinOptions_LogicalOrOptions, - BuiltinOptions_OneHotOptions, - BuiltinOptions_LogicalAndOptions, - BuiltinOptions_LogicalNotOptions, - BuiltinOptions_UnpackOptions, - BuiltinOptions_FloorDivOptions, - BuiltinOptions_SquareOptions, - BuiltinOptions_ZerosLikeOptions, - BuiltinOptions_FillOptions, - BuiltinOptions_BidirectionalSequenceLSTMOptions, - BuiltinOptions_BidirectionalSequenceRNNOptions, - BuiltinOptions_UnidirectionalSequenceLSTMOptions, - BuiltinOptions_FloorModOptions, - BuiltinOptions_RangeOptions, - BuiltinOptions_ResizeNearestNeighborOptions, - BuiltinOptions_LeakyReluOptions, - BuiltinOptions_SquaredDifferenceOptions, - BuiltinOptions_MirrorPadOptions, - BuiltinOptions_AbsOptions, - BuiltinOptions_SplitVOptions, - BuiltinOptions_UniqueOptions, - BuiltinOptions_ReverseV2Options, - BuiltinOptions_AddNOptions, - BuiltinOptions_GatherNdOptions, - BuiltinOptions_CosOptions, - BuiltinOptions_WhereOptions, - BuiltinOptions_RankOptions, - BuiltinOptions_ReverseSequenceOptions, - BuiltinOptions_MatrixDiagOptions, - BuiltinOptions_QuantizeOptions, - BuiltinOptions_MatrixSetDiagOptions, - BuiltinOptions_HardSwishOptions, - BuiltinOptions_IfOptions, - BuiltinOptions_WhileOptions, - BuiltinOptions_DepthToSpaceOptions, - BuiltinOptions_NonMaxSuppressionV4Options, - BuiltinOptions_NonMaxSuppressionV5Options, - BuiltinOptions_ScatterNdOptions, - BuiltinOptions_SelectV2Options, - BuiltinOptions_DensifyOptions, - BuiltinOptions_SegmentSumOptions, - BuiltinOptions_BatchMatMulOptions, - BuiltinOptions_CumsumOptions, - BuiltinOptions_CallOnceOptions, - BuiltinOptions_BroadcastToOptions, - BuiltinOptions_Rfft2dOptions, - BuiltinOptions_Conv3DOptions, - BuiltinOptions_HashtableOptions, - BuiltinOptions_HashtableFindOptions, - BuiltinOptions_HashtableImportOptions, - BuiltinOptions_HashtableSizeOptions, - BuiltinOptions_VarHandleOptions, - BuiltinOptions_ReadVariableOptions, - BuiltinOptions_AssignVariableOptions, - BuiltinOptions_RandomOptions, - BuiltinOptions_BucketizeOptions, - BuiltinOptions_GeluOptions, - BuiltinOptions_DynamicUpdateSliceOptions, - BuiltinOptions_UnsortedSegmentProdOptions, - BuiltinOptions_UnsortedSegmentMaxOptions, - BuiltinOptions_UnsortedSegmentMinOptions, - BuiltinOptions_UnsortedSegmentSumOptions, - BuiltinOptions_ATan2Options, - BuiltinOptions_SignOptions - }; - return values; -} - -inline const char * const *EnumNamesBuiltinOptions() { - static const char * const names[125] = { - "NONE", - "Conv2DOptions", - "DepthwiseConv2DOptions", - "ConcatEmbeddingsOptions", - "LSHProjectionOptions", - "Pool2DOptions", - "SVDFOptions", - "RNNOptions", - "FullyConnectedOptions", - "SoftmaxOptions", - "ConcatenationOptions", - "AddOptions", - "L2NormOptions", - "LocalResponseNormalizationOptions", - "LSTMOptions", - "ResizeBilinearOptions", - "CallOptions", - "ReshapeOptions", - "SkipGramOptions", - "SpaceToDepthOptions", - "EmbeddingLookupSparseOptions", - "MulOptions", - "PadOptions", - "GatherOptions", - "BatchToSpaceNDOptions", - "SpaceToBatchNDOptions", - "TransposeOptions", - "ReducerOptions", - "SubOptions", - "DivOptions", - "SqueezeOptions", - "SequenceRNNOptions", - "StridedSliceOptions", - "ExpOptions", - "TopKV2Options", - "SplitOptions", - "LogSoftmaxOptions", - "CastOptions", - "DequantizeOptions", - "MaximumMinimumOptions", - "ArgMaxOptions", - "LessOptions", - "NegOptions", - "PadV2Options", - "GreaterOptions", - "GreaterEqualOptions", - "LessEqualOptions", - "SelectOptions", - "SliceOptions", - "TransposeConvOptions", - "SparseToDenseOptions", - "TileOptions", - "ExpandDimsOptions", - "EqualOptions", - "NotEqualOptions", - "ShapeOptions", - "PowOptions", - "ArgMinOptions", - "FakeQuantOptions", - "PackOptions", - "LogicalOrOptions", - "OneHotOptions", - "LogicalAndOptions", - "LogicalNotOptions", - "UnpackOptions", - "FloorDivOptions", - "SquareOptions", - "ZerosLikeOptions", - "FillOptions", - "BidirectionalSequenceLSTMOptions", - "BidirectionalSequenceRNNOptions", - "UnidirectionalSequenceLSTMOptions", - "FloorModOptions", - "RangeOptions", - "ResizeNearestNeighborOptions", - "LeakyReluOptions", - "SquaredDifferenceOptions", - "MirrorPadOptions", - "AbsOptions", - "SplitVOptions", - "UniqueOptions", - "ReverseV2Options", - "AddNOptions", - "GatherNdOptions", - "CosOptions", - "WhereOptions", - "RankOptions", - "ReverseSequenceOptions", - "MatrixDiagOptions", - "QuantizeOptions", - "MatrixSetDiagOptions", - "HardSwishOptions", - "IfOptions", - "WhileOptions", - "DepthToSpaceOptions", - "NonMaxSuppressionV4Options", - "NonMaxSuppressionV5Options", - "ScatterNdOptions", - "SelectV2Options", - "DensifyOptions", - "SegmentSumOptions", - "BatchMatMulOptions", - "CumsumOptions", - "CallOnceOptions", - "BroadcastToOptions", - "Rfft2dOptions", - "Conv3DOptions", - "HashtableOptions", - "HashtableFindOptions", - "HashtableImportOptions", - "HashtableSizeOptions", - "VarHandleOptions", - "ReadVariableOptions", - "AssignVariableOptions", - "RandomOptions", - "BucketizeOptions", - "GeluOptions", - "DynamicUpdateSliceOptions", - "UnsortedSegmentProdOptions", - "UnsortedSegmentMaxOptions", - "UnsortedSegmentMinOptions", - "UnsortedSegmentSumOptions", - "ATan2Options", - "SignOptions", - nullptr - }; - return names; -} - -inline const char *EnumNameBuiltinOptions(BuiltinOptions e) { - if (flatbuffers::IsOutRange(e, BuiltinOptions_NONE, BuiltinOptions_SignOptions)) return ""; - const size_t index = static_cast(e); - return EnumNamesBuiltinOptions()[index]; -} - -template struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_NONE; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_Conv2DOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_DepthwiseConv2DOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_ConcatEmbeddingsOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_LSHProjectionOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_Pool2DOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_SVDFOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_RNNOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_FullyConnectedOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_SoftmaxOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_ConcatenationOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_AddOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_L2NormOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_LocalResponseNormalizationOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_LSTMOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_ResizeBilinearOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_CallOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_ReshapeOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_SkipGramOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_SpaceToDepthOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_EmbeddingLookupSparseOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_MulOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_PadOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_GatherOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_BatchToSpaceNDOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_SpaceToBatchNDOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_TransposeOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_ReducerOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_SubOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_DivOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_SqueezeOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_SequenceRNNOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_StridedSliceOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_ExpOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_TopKV2Options; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_SplitOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_LogSoftmaxOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_CastOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_DequantizeOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_MaximumMinimumOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_ArgMaxOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_LessOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_NegOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_PadV2Options; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_GreaterOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_GreaterEqualOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_LessEqualOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_SelectOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_SliceOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_TransposeConvOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_SparseToDenseOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_TileOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_ExpandDimsOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_EqualOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_NotEqualOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_ShapeOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_PowOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_ArgMinOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_FakeQuantOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_PackOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_LogicalOrOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_OneHotOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_LogicalAndOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_LogicalNotOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_UnpackOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_FloorDivOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_SquareOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_ZerosLikeOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_FillOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_BidirectionalSequenceLSTMOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_BidirectionalSequenceRNNOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_UnidirectionalSequenceLSTMOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_FloorModOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_RangeOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_ResizeNearestNeighborOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_LeakyReluOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_SquaredDifferenceOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_MirrorPadOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_AbsOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_SplitVOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_UniqueOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_ReverseV2Options; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_AddNOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_GatherNdOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_CosOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_WhereOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_RankOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_ReverseSequenceOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_MatrixDiagOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_QuantizeOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_MatrixSetDiagOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_HardSwishOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_IfOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_WhileOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_DepthToSpaceOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_NonMaxSuppressionV4Options; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_NonMaxSuppressionV5Options; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_ScatterNdOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_SelectV2Options; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_DensifyOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_SegmentSumOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_BatchMatMulOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_CumsumOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_CallOnceOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_BroadcastToOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_Rfft2dOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_Conv3DOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_HashtableOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_HashtableFindOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_HashtableImportOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_HashtableSizeOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_VarHandleOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_ReadVariableOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_AssignVariableOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_RandomOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_BucketizeOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_GeluOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_DynamicUpdateSliceOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_UnsortedSegmentProdOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_UnsortedSegmentMaxOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_UnsortedSegmentMinOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_UnsortedSegmentSumOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_ATan2Options; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_SignOptions; -}; - -template struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_NONE; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_Conv2DOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_DepthwiseConv2DOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_ConcatEmbeddingsOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_LSHProjectionOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_Pool2DOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_SVDFOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_RNNOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_FullyConnectedOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_SoftmaxOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_ConcatenationOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_AddOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_L2NormOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_LocalResponseNormalizationOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_LSTMOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_ResizeBilinearOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_CallOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_ReshapeOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_SkipGramOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_SpaceToDepthOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_EmbeddingLookupSparseOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_MulOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_PadOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_GatherOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_BatchToSpaceNDOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_SpaceToBatchNDOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_TransposeOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_ReducerOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_SubOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_DivOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_SqueezeOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_SequenceRNNOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_StridedSliceOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_ExpOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_TopKV2Options; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_SplitOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_LogSoftmaxOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_CastOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_DequantizeOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_MaximumMinimumOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_ArgMaxOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_LessOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_NegOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_PadV2Options; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_GreaterOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_GreaterEqualOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_LessEqualOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_SelectOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_SliceOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_TransposeConvOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_SparseToDenseOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_TileOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_ExpandDimsOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_EqualOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_NotEqualOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_ShapeOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_PowOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_ArgMinOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_FakeQuantOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_PackOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_LogicalOrOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_OneHotOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_LogicalAndOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_LogicalNotOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_UnpackOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_FloorDivOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_SquareOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_ZerosLikeOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_FillOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_BidirectionalSequenceLSTMOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_BidirectionalSequenceRNNOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_UnidirectionalSequenceLSTMOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_FloorModOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_RangeOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_ResizeNearestNeighborOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_LeakyReluOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_SquaredDifferenceOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_MirrorPadOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_AbsOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_SplitVOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_UniqueOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_ReverseV2Options; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_AddNOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_GatherNdOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_CosOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_WhereOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_RankOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_ReverseSequenceOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_MatrixDiagOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_QuantizeOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_MatrixSetDiagOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_HardSwishOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_IfOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_WhileOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_DepthToSpaceOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_NonMaxSuppressionV4Options; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_NonMaxSuppressionV5Options; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_ScatterNdOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_SelectV2Options; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_DensifyOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_SegmentSumOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_BatchMatMulOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_CumsumOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_CallOnceOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_BroadcastToOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_Rfft2dOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_Conv3DOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_HashtableOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_HashtableFindOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_HashtableImportOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_HashtableSizeOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_VarHandleOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_ReadVariableOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_AssignVariableOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_RandomOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_BucketizeOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_GeluOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_DynamicUpdateSliceOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_UnsortedSegmentProdOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_UnsortedSegmentMaxOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_UnsortedSegmentMinOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_UnsortedSegmentSumOptions; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_ATan2Options; -}; - -template<> struct BuiltinOptionsUnionTraits { - static const BuiltinOptions enum_value = BuiltinOptions_SignOptions; -}; - -struct BuiltinOptionsUnion { - BuiltinOptions type; - void *value; - - BuiltinOptionsUnion() : type(BuiltinOptions_NONE), value(nullptr) {} - BuiltinOptionsUnion(BuiltinOptionsUnion&& u) FLATBUFFERS_NOEXCEPT : - type(BuiltinOptions_NONE), value(nullptr) - { std::swap(type, u.type); std::swap(value, u.value); } - BuiltinOptionsUnion(const BuiltinOptionsUnion &); - BuiltinOptionsUnion &operator=(const BuiltinOptionsUnion &u) - { BuiltinOptionsUnion t(u); std::swap(type, t.type); std::swap(value, t.value); return *this; } - BuiltinOptionsUnion &operator=(BuiltinOptionsUnion &&u) FLATBUFFERS_NOEXCEPT - { std::swap(type, u.type); std::swap(value, u.value); return *this; } - ~BuiltinOptionsUnion() { Reset(); } - - void Reset(); - - template - void Set(T&& val) { - typedef typename std::remove_reference::type RT; - Reset(); - type = BuiltinOptionsUnionTraits::enum_value; - if (type != BuiltinOptions_NONE) { - value = new RT(std::forward(val)); - } - } - - static void *UnPack(const void *obj, BuiltinOptions type, const flatbuffers::resolver_function_t *resolver); - flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const flatbuffers::rehasher_function_t *_rehasher = nullptr) const; - - tflite::Conv2DOptionsT *AsConv2DOptions() { - return type == BuiltinOptions_Conv2DOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::Conv2DOptionsT *AsConv2DOptions() const { - return type == BuiltinOptions_Conv2DOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::DepthwiseConv2DOptionsT *AsDepthwiseConv2DOptions() { - return type == BuiltinOptions_DepthwiseConv2DOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::DepthwiseConv2DOptionsT *AsDepthwiseConv2DOptions() const { - return type == BuiltinOptions_DepthwiseConv2DOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::ConcatEmbeddingsOptionsT *AsConcatEmbeddingsOptions() { - return type == BuiltinOptions_ConcatEmbeddingsOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::ConcatEmbeddingsOptionsT *AsConcatEmbeddingsOptions() const { - return type == BuiltinOptions_ConcatEmbeddingsOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::LSHProjectionOptionsT *AsLSHProjectionOptions() { - return type == BuiltinOptions_LSHProjectionOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::LSHProjectionOptionsT *AsLSHProjectionOptions() const { - return type == BuiltinOptions_LSHProjectionOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::Pool2DOptionsT *AsPool2DOptions() { - return type == BuiltinOptions_Pool2DOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::Pool2DOptionsT *AsPool2DOptions() const { - return type == BuiltinOptions_Pool2DOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::SVDFOptionsT *AsSVDFOptions() { - return type == BuiltinOptions_SVDFOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::SVDFOptionsT *AsSVDFOptions() const { - return type == BuiltinOptions_SVDFOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::RNNOptionsT *AsRNNOptions() { - return type == BuiltinOptions_RNNOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::RNNOptionsT *AsRNNOptions() const { - return type == BuiltinOptions_RNNOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::FullyConnectedOptionsT *AsFullyConnectedOptions() { - return type == BuiltinOptions_FullyConnectedOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::FullyConnectedOptionsT *AsFullyConnectedOptions() const { - return type == BuiltinOptions_FullyConnectedOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::SoftmaxOptionsT *AsSoftmaxOptions() { - return type == BuiltinOptions_SoftmaxOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::SoftmaxOptionsT *AsSoftmaxOptions() const { - return type == BuiltinOptions_SoftmaxOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::ConcatenationOptionsT *AsConcatenationOptions() { - return type == BuiltinOptions_ConcatenationOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::ConcatenationOptionsT *AsConcatenationOptions() const { - return type == BuiltinOptions_ConcatenationOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::AddOptionsT *AsAddOptions() { - return type == BuiltinOptions_AddOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::AddOptionsT *AsAddOptions() const { - return type == BuiltinOptions_AddOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::L2NormOptionsT *AsL2NormOptions() { - return type == BuiltinOptions_L2NormOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::L2NormOptionsT *AsL2NormOptions() const { - return type == BuiltinOptions_L2NormOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::LocalResponseNormalizationOptionsT *AsLocalResponseNormalizationOptions() { - return type == BuiltinOptions_LocalResponseNormalizationOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::LocalResponseNormalizationOptionsT *AsLocalResponseNormalizationOptions() const { - return type == BuiltinOptions_LocalResponseNormalizationOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::LSTMOptionsT *AsLSTMOptions() { - return type == BuiltinOptions_LSTMOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::LSTMOptionsT *AsLSTMOptions() const { - return type == BuiltinOptions_LSTMOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::ResizeBilinearOptionsT *AsResizeBilinearOptions() { - return type == BuiltinOptions_ResizeBilinearOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::ResizeBilinearOptionsT *AsResizeBilinearOptions() const { - return type == BuiltinOptions_ResizeBilinearOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::CallOptionsT *AsCallOptions() { - return type == BuiltinOptions_CallOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::CallOptionsT *AsCallOptions() const { - return type == BuiltinOptions_CallOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::ReshapeOptionsT *AsReshapeOptions() { - return type == BuiltinOptions_ReshapeOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::ReshapeOptionsT *AsReshapeOptions() const { - return type == BuiltinOptions_ReshapeOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::SkipGramOptionsT *AsSkipGramOptions() { - return type == BuiltinOptions_SkipGramOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::SkipGramOptionsT *AsSkipGramOptions() const { - return type == BuiltinOptions_SkipGramOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::SpaceToDepthOptionsT *AsSpaceToDepthOptions() { - return type == BuiltinOptions_SpaceToDepthOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::SpaceToDepthOptionsT *AsSpaceToDepthOptions() const { - return type == BuiltinOptions_SpaceToDepthOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::EmbeddingLookupSparseOptionsT *AsEmbeddingLookupSparseOptions() { - return type == BuiltinOptions_EmbeddingLookupSparseOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::EmbeddingLookupSparseOptionsT *AsEmbeddingLookupSparseOptions() const { - return type == BuiltinOptions_EmbeddingLookupSparseOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::MulOptionsT *AsMulOptions() { - return type == BuiltinOptions_MulOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::MulOptionsT *AsMulOptions() const { - return type == BuiltinOptions_MulOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::PadOptionsT *AsPadOptions() { - return type == BuiltinOptions_PadOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::PadOptionsT *AsPadOptions() const { - return type == BuiltinOptions_PadOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::GatherOptionsT *AsGatherOptions() { - return type == BuiltinOptions_GatherOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::GatherOptionsT *AsGatherOptions() const { - return type == BuiltinOptions_GatherOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::BatchToSpaceNDOptionsT *AsBatchToSpaceNDOptions() { - return type == BuiltinOptions_BatchToSpaceNDOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::BatchToSpaceNDOptionsT *AsBatchToSpaceNDOptions() const { - return type == BuiltinOptions_BatchToSpaceNDOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::SpaceToBatchNDOptionsT *AsSpaceToBatchNDOptions() { - return type == BuiltinOptions_SpaceToBatchNDOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::SpaceToBatchNDOptionsT *AsSpaceToBatchNDOptions() const { - return type == BuiltinOptions_SpaceToBatchNDOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::TransposeOptionsT *AsTransposeOptions() { - return type == BuiltinOptions_TransposeOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::TransposeOptionsT *AsTransposeOptions() const { - return type == BuiltinOptions_TransposeOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::ReducerOptionsT *AsReducerOptions() { - return type == BuiltinOptions_ReducerOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::ReducerOptionsT *AsReducerOptions() const { - return type == BuiltinOptions_ReducerOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::SubOptionsT *AsSubOptions() { - return type == BuiltinOptions_SubOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::SubOptionsT *AsSubOptions() const { - return type == BuiltinOptions_SubOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::DivOptionsT *AsDivOptions() { - return type == BuiltinOptions_DivOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::DivOptionsT *AsDivOptions() const { - return type == BuiltinOptions_DivOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::SqueezeOptionsT *AsSqueezeOptions() { - return type == BuiltinOptions_SqueezeOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::SqueezeOptionsT *AsSqueezeOptions() const { - return type == BuiltinOptions_SqueezeOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::SequenceRNNOptionsT *AsSequenceRNNOptions() { - return type == BuiltinOptions_SequenceRNNOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::SequenceRNNOptionsT *AsSequenceRNNOptions() const { - return type == BuiltinOptions_SequenceRNNOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::StridedSliceOptionsT *AsStridedSliceOptions() { - return type == BuiltinOptions_StridedSliceOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::StridedSliceOptionsT *AsStridedSliceOptions() const { - return type == BuiltinOptions_StridedSliceOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::ExpOptionsT *AsExpOptions() { - return type == BuiltinOptions_ExpOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::ExpOptionsT *AsExpOptions() const { - return type == BuiltinOptions_ExpOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::TopKV2OptionsT *AsTopKV2Options() { - return type == BuiltinOptions_TopKV2Options ? - reinterpret_cast(value) : nullptr; - } - const tflite::TopKV2OptionsT *AsTopKV2Options() const { - return type == BuiltinOptions_TopKV2Options ? - reinterpret_cast(value) : nullptr; - } - tflite::SplitOptionsT *AsSplitOptions() { - return type == BuiltinOptions_SplitOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::SplitOptionsT *AsSplitOptions() const { - return type == BuiltinOptions_SplitOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::LogSoftmaxOptionsT *AsLogSoftmaxOptions() { - return type == BuiltinOptions_LogSoftmaxOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::LogSoftmaxOptionsT *AsLogSoftmaxOptions() const { - return type == BuiltinOptions_LogSoftmaxOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::CastOptionsT *AsCastOptions() { - return type == BuiltinOptions_CastOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::CastOptionsT *AsCastOptions() const { - return type == BuiltinOptions_CastOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::DequantizeOptionsT *AsDequantizeOptions() { - return type == BuiltinOptions_DequantizeOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::DequantizeOptionsT *AsDequantizeOptions() const { - return type == BuiltinOptions_DequantizeOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::MaximumMinimumOptionsT *AsMaximumMinimumOptions() { - return type == BuiltinOptions_MaximumMinimumOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::MaximumMinimumOptionsT *AsMaximumMinimumOptions() const { - return type == BuiltinOptions_MaximumMinimumOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::ArgMaxOptionsT *AsArgMaxOptions() { - return type == BuiltinOptions_ArgMaxOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::ArgMaxOptionsT *AsArgMaxOptions() const { - return type == BuiltinOptions_ArgMaxOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::LessOptionsT *AsLessOptions() { - return type == BuiltinOptions_LessOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::LessOptionsT *AsLessOptions() const { - return type == BuiltinOptions_LessOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::NegOptionsT *AsNegOptions() { - return type == BuiltinOptions_NegOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::NegOptionsT *AsNegOptions() const { - return type == BuiltinOptions_NegOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::PadV2OptionsT *AsPadV2Options() { - return type == BuiltinOptions_PadV2Options ? - reinterpret_cast(value) : nullptr; - } - const tflite::PadV2OptionsT *AsPadV2Options() const { - return type == BuiltinOptions_PadV2Options ? - reinterpret_cast(value) : nullptr; - } - tflite::GreaterOptionsT *AsGreaterOptions() { - return type == BuiltinOptions_GreaterOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::GreaterOptionsT *AsGreaterOptions() const { - return type == BuiltinOptions_GreaterOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::GreaterEqualOptionsT *AsGreaterEqualOptions() { - return type == BuiltinOptions_GreaterEqualOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::GreaterEqualOptionsT *AsGreaterEqualOptions() const { - return type == BuiltinOptions_GreaterEqualOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::LessEqualOptionsT *AsLessEqualOptions() { - return type == BuiltinOptions_LessEqualOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::LessEqualOptionsT *AsLessEqualOptions() const { - return type == BuiltinOptions_LessEqualOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::SelectOptionsT *AsSelectOptions() { - return type == BuiltinOptions_SelectOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::SelectOptionsT *AsSelectOptions() const { - return type == BuiltinOptions_SelectOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::SliceOptionsT *AsSliceOptions() { - return type == BuiltinOptions_SliceOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::SliceOptionsT *AsSliceOptions() const { - return type == BuiltinOptions_SliceOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::TransposeConvOptionsT *AsTransposeConvOptions() { - return type == BuiltinOptions_TransposeConvOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::TransposeConvOptionsT *AsTransposeConvOptions() const { - return type == BuiltinOptions_TransposeConvOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::SparseToDenseOptionsT *AsSparseToDenseOptions() { - return type == BuiltinOptions_SparseToDenseOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::SparseToDenseOptionsT *AsSparseToDenseOptions() const { - return type == BuiltinOptions_SparseToDenseOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::TileOptionsT *AsTileOptions() { - return type == BuiltinOptions_TileOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::TileOptionsT *AsTileOptions() const { - return type == BuiltinOptions_TileOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::ExpandDimsOptionsT *AsExpandDimsOptions() { - return type == BuiltinOptions_ExpandDimsOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::ExpandDimsOptionsT *AsExpandDimsOptions() const { - return type == BuiltinOptions_ExpandDimsOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::EqualOptionsT *AsEqualOptions() { - return type == BuiltinOptions_EqualOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::EqualOptionsT *AsEqualOptions() const { - return type == BuiltinOptions_EqualOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::NotEqualOptionsT *AsNotEqualOptions() { - return type == BuiltinOptions_NotEqualOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::NotEqualOptionsT *AsNotEqualOptions() const { - return type == BuiltinOptions_NotEqualOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::ShapeOptionsT *AsShapeOptions() { - return type == BuiltinOptions_ShapeOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::ShapeOptionsT *AsShapeOptions() const { - return type == BuiltinOptions_ShapeOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::PowOptionsT *AsPowOptions() { - return type == BuiltinOptions_PowOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::PowOptionsT *AsPowOptions() const { - return type == BuiltinOptions_PowOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::ArgMinOptionsT *AsArgMinOptions() { - return type == BuiltinOptions_ArgMinOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::ArgMinOptionsT *AsArgMinOptions() const { - return type == BuiltinOptions_ArgMinOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::FakeQuantOptionsT *AsFakeQuantOptions() { - return type == BuiltinOptions_FakeQuantOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::FakeQuantOptionsT *AsFakeQuantOptions() const { - return type == BuiltinOptions_FakeQuantOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::PackOptionsT *AsPackOptions() { - return type == BuiltinOptions_PackOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::PackOptionsT *AsPackOptions() const { - return type == BuiltinOptions_PackOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::LogicalOrOptionsT *AsLogicalOrOptions() { - return type == BuiltinOptions_LogicalOrOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::LogicalOrOptionsT *AsLogicalOrOptions() const { - return type == BuiltinOptions_LogicalOrOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::OneHotOptionsT *AsOneHotOptions() { - return type == BuiltinOptions_OneHotOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::OneHotOptionsT *AsOneHotOptions() const { - return type == BuiltinOptions_OneHotOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::LogicalAndOptionsT *AsLogicalAndOptions() { - return type == BuiltinOptions_LogicalAndOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::LogicalAndOptionsT *AsLogicalAndOptions() const { - return type == BuiltinOptions_LogicalAndOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::LogicalNotOptionsT *AsLogicalNotOptions() { - return type == BuiltinOptions_LogicalNotOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::LogicalNotOptionsT *AsLogicalNotOptions() const { - return type == BuiltinOptions_LogicalNotOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::UnpackOptionsT *AsUnpackOptions() { - return type == BuiltinOptions_UnpackOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::UnpackOptionsT *AsUnpackOptions() const { - return type == BuiltinOptions_UnpackOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::FloorDivOptionsT *AsFloorDivOptions() { - return type == BuiltinOptions_FloorDivOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::FloorDivOptionsT *AsFloorDivOptions() const { - return type == BuiltinOptions_FloorDivOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::SquareOptionsT *AsSquareOptions() { - return type == BuiltinOptions_SquareOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::SquareOptionsT *AsSquareOptions() const { - return type == BuiltinOptions_SquareOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::ZerosLikeOptionsT *AsZerosLikeOptions() { - return type == BuiltinOptions_ZerosLikeOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::ZerosLikeOptionsT *AsZerosLikeOptions() const { - return type == BuiltinOptions_ZerosLikeOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::FillOptionsT *AsFillOptions() { - return type == BuiltinOptions_FillOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::FillOptionsT *AsFillOptions() const { - return type == BuiltinOptions_FillOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::BidirectionalSequenceLSTMOptionsT *AsBidirectionalSequenceLSTMOptions() { - return type == BuiltinOptions_BidirectionalSequenceLSTMOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::BidirectionalSequenceLSTMOptionsT *AsBidirectionalSequenceLSTMOptions() const { - return type == BuiltinOptions_BidirectionalSequenceLSTMOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::BidirectionalSequenceRNNOptionsT *AsBidirectionalSequenceRNNOptions() { - return type == BuiltinOptions_BidirectionalSequenceRNNOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::BidirectionalSequenceRNNOptionsT *AsBidirectionalSequenceRNNOptions() const { - return type == BuiltinOptions_BidirectionalSequenceRNNOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::UnidirectionalSequenceLSTMOptionsT *AsUnidirectionalSequenceLSTMOptions() { - return type == BuiltinOptions_UnidirectionalSequenceLSTMOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::UnidirectionalSequenceLSTMOptionsT *AsUnidirectionalSequenceLSTMOptions() const { - return type == BuiltinOptions_UnidirectionalSequenceLSTMOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::FloorModOptionsT *AsFloorModOptions() { - return type == BuiltinOptions_FloorModOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::FloorModOptionsT *AsFloorModOptions() const { - return type == BuiltinOptions_FloorModOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::RangeOptionsT *AsRangeOptions() { - return type == BuiltinOptions_RangeOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::RangeOptionsT *AsRangeOptions() const { - return type == BuiltinOptions_RangeOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::ResizeNearestNeighborOptionsT *AsResizeNearestNeighborOptions() { - return type == BuiltinOptions_ResizeNearestNeighborOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::ResizeNearestNeighborOptionsT *AsResizeNearestNeighborOptions() const { - return type == BuiltinOptions_ResizeNearestNeighborOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::LeakyReluOptionsT *AsLeakyReluOptions() { - return type == BuiltinOptions_LeakyReluOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::LeakyReluOptionsT *AsLeakyReluOptions() const { - return type == BuiltinOptions_LeakyReluOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::SquaredDifferenceOptionsT *AsSquaredDifferenceOptions() { - return type == BuiltinOptions_SquaredDifferenceOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::SquaredDifferenceOptionsT *AsSquaredDifferenceOptions() const { - return type == BuiltinOptions_SquaredDifferenceOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::MirrorPadOptionsT *AsMirrorPadOptions() { - return type == BuiltinOptions_MirrorPadOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::MirrorPadOptionsT *AsMirrorPadOptions() const { - return type == BuiltinOptions_MirrorPadOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::AbsOptionsT *AsAbsOptions() { - return type == BuiltinOptions_AbsOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::AbsOptionsT *AsAbsOptions() const { - return type == BuiltinOptions_AbsOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::SplitVOptionsT *AsSplitVOptions() { - return type == BuiltinOptions_SplitVOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::SplitVOptionsT *AsSplitVOptions() const { - return type == BuiltinOptions_SplitVOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::UniqueOptionsT *AsUniqueOptions() { - return type == BuiltinOptions_UniqueOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::UniqueOptionsT *AsUniqueOptions() const { - return type == BuiltinOptions_UniqueOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::ReverseV2OptionsT *AsReverseV2Options() { - return type == BuiltinOptions_ReverseV2Options ? - reinterpret_cast(value) : nullptr; - } - const tflite::ReverseV2OptionsT *AsReverseV2Options() const { - return type == BuiltinOptions_ReverseV2Options ? - reinterpret_cast(value) : nullptr; - } - tflite::AddNOptionsT *AsAddNOptions() { - return type == BuiltinOptions_AddNOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::AddNOptionsT *AsAddNOptions() const { - return type == BuiltinOptions_AddNOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::GatherNdOptionsT *AsGatherNdOptions() { - return type == BuiltinOptions_GatherNdOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::GatherNdOptionsT *AsGatherNdOptions() const { - return type == BuiltinOptions_GatherNdOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::CosOptionsT *AsCosOptions() { - return type == BuiltinOptions_CosOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::CosOptionsT *AsCosOptions() const { - return type == BuiltinOptions_CosOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::WhereOptionsT *AsWhereOptions() { - return type == BuiltinOptions_WhereOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::WhereOptionsT *AsWhereOptions() const { - return type == BuiltinOptions_WhereOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::RankOptionsT *AsRankOptions() { - return type == BuiltinOptions_RankOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::RankOptionsT *AsRankOptions() const { - return type == BuiltinOptions_RankOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::ReverseSequenceOptionsT *AsReverseSequenceOptions() { - return type == BuiltinOptions_ReverseSequenceOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::ReverseSequenceOptionsT *AsReverseSequenceOptions() const { - return type == BuiltinOptions_ReverseSequenceOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::MatrixDiagOptionsT *AsMatrixDiagOptions() { - return type == BuiltinOptions_MatrixDiagOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::MatrixDiagOptionsT *AsMatrixDiagOptions() const { - return type == BuiltinOptions_MatrixDiagOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::QuantizeOptionsT *AsQuantizeOptions() { - return type == BuiltinOptions_QuantizeOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::QuantizeOptionsT *AsQuantizeOptions() const { - return type == BuiltinOptions_QuantizeOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::MatrixSetDiagOptionsT *AsMatrixSetDiagOptions() { - return type == BuiltinOptions_MatrixSetDiagOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::MatrixSetDiagOptionsT *AsMatrixSetDiagOptions() const { - return type == BuiltinOptions_MatrixSetDiagOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::HardSwishOptionsT *AsHardSwishOptions() { - return type == BuiltinOptions_HardSwishOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::HardSwishOptionsT *AsHardSwishOptions() const { - return type == BuiltinOptions_HardSwishOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::IfOptionsT *AsIfOptions() { - return type == BuiltinOptions_IfOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::IfOptionsT *AsIfOptions() const { - return type == BuiltinOptions_IfOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::WhileOptionsT *AsWhileOptions() { - return type == BuiltinOptions_WhileOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::WhileOptionsT *AsWhileOptions() const { - return type == BuiltinOptions_WhileOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::DepthToSpaceOptionsT *AsDepthToSpaceOptions() { - return type == BuiltinOptions_DepthToSpaceOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::DepthToSpaceOptionsT *AsDepthToSpaceOptions() const { - return type == BuiltinOptions_DepthToSpaceOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::NonMaxSuppressionV4OptionsT *AsNonMaxSuppressionV4Options() { - return type == BuiltinOptions_NonMaxSuppressionV4Options ? - reinterpret_cast(value) : nullptr; - } - const tflite::NonMaxSuppressionV4OptionsT *AsNonMaxSuppressionV4Options() const { - return type == BuiltinOptions_NonMaxSuppressionV4Options ? - reinterpret_cast(value) : nullptr; - } - tflite::NonMaxSuppressionV5OptionsT *AsNonMaxSuppressionV5Options() { - return type == BuiltinOptions_NonMaxSuppressionV5Options ? - reinterpret_cast(value) : nullptr; - } - const tflite::NonMaxSuppressionV5OptionsT *AsNonMaxSuppressionV5Options() const { - return type == BuiltinOptions_NonMaxSuppressionV5Options ? - reinterpret_cast(value) : nullptr; - } - tflite::ScatterNdOptionsT *AsScatterNdOptions() { - return type == BuiltinOptions_ScatterNdOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::ScatterNdOptionsT *AsScatterNdOptions() const { - return type == BuiltinOptions_ScatterNdOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::SelectV2OptionsT *AsSelectV2Options() { - return type == BuiltinOptions_SelectV2Options ? - reinterpret_cast(value) : nullptr; - } - const tflite::SelectV2OptionsT *AsSelectV2Options() const { - return type == BuiltinOptions_SelectV2Options ? - reinterpret_cast(value) : nullptr; - } - tflite::DensifyOptionsT *AsDensifyOptions() { - return type == BuiltinOptions_DensifyOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::DensifyOptionsT *AsDensifyOptions() const { - return type == BuiltinOptions_DensifyOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::SegmentSumOptionsT *AsSegmentSumOptions() { - return type == BuiltinOptions_SegmentSumOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::SegmentSumOptionsT *AsSegmentSumOptions() const { - return type == BuiltinOptions_SegmentSumOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::BatchMatMulOptionsT *AsBatchMatMulOptions() { - return type == BuiltinOptions_BatchMatMulOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::BatchMatMulOptionsT *AsBatchMatMulOptions() const { - return type == BuiltinOptions_BatchMatMulOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::CumsumOptionsT *AsCumsumOptions() { - return type == BuiltinOptions_CumsumOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::CumsumOptionsT *AsCumsumOptions() const { - return type == BuiltinOptions_CumsumOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::CallOnceOptionsT *AsCallOnceOptions() { - return type == BuiltinOptions_CallOnceOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::CallOnceOptionsT *AsCallOnceOptions() const { - return type == BuiltinOptions_CallOnceOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::BroadcastToOptionsT *AsBroadcastToOptions() { - return type == BuiltinOptions_BroadcastToOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::BroadcastToOptionsT *AsBroadcastToOptions() const { - return type == BuiltinOptions_BroadcastToOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::Rfft2dOptionsT *AsRfft2dOptions() { - return type == BuiltinOptions_Rfft2dOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::Rfft2dOptionsT *AsRfft2dOptions() const { - return type == BuiltinOptions_Rfft2dOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::Conv3DOptionsT *AsConv3DOptions() { - return type == BuiltinOptions_Conv3DOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::Conv3DOptionsT *AsConv3DOptions() const { - return type == BuiltinOptions_Conv3DOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::HashtableOptionsT *AsHashtableOptions() { - return type == BuiltinOptions_HashtableOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::HashtableOptionsT *AsHashtableOptions() const { - return type == BuiltinOptions_HashtableOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::HashtableFindOptionsT *AsHashtableFindOptions() { - return type == BuiltinOptions_HashtableFindOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::HashtableFindOptionsT *AsHashtableFindOptions() const { - return type == BuiltinOptions_HashtableFindOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::HashtableImportOptionsT *AsHashtableImportOptions() { - return type == BuiltinOptions_HashtableImportOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::HashtableImportOptionsT *AsHashtableImportOptions() const { - return type == BuiltinOptions_HashtableImportOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::HashtableSizeOptionsT *AsHashtableSizeOptions() { - return type == BuiltinOptions_HashtableSizeOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::HashtableSizeOptionsT *AsHashtableSizeOptions() const { - return type == BuiltinOptions_HashtableSizeOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::VarHandleOptionsT *AsVarHandleOptions() { - return type == BuiltinOptions_VarHandleOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::VarHandleOptionsT *AsVarHandleOptions() const { - return type == BuiltinOptions_VarHandleOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::ReadVariableOptionsT *AsReadVariableOptions() { - return type == BuiltinOptions_ReadVariableOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::ReadVariableOptionsT *AsReadVariableOptions() const { - return type == BuiltinOptions_ReadVariableOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::AssignVariableOptionsT *AsAssignVariableOptions() { - return type == BuiltinOptions_AssignVariableOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::AssignVariableOptionsT *AsAssignVariableOptions() const { - return type == BuiltinOptions_AssignVariableOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::RandomOptionsT *AsRandomOptions() { - return type == BuiltinOptions_RandomOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::RandomOptionsT *AsRandomOptions() const { - return type == BuiltinOptions_RandomOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::BucketizeOptionsT *AsBucketizeOptions() { - return type == BuiltinOptions_BucketizeOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::BucketizeOptionsT *AsBucketizeOptions() const { - return type == BuiltinOptions_BucketizeOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::GeluOptionsT *AsGeluOptions() { - return type == BuiltinOptions_GeluOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::GeluOptionsT *AsGeluOptions() const { - return type == BuiltinOptions_GeluOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::DynamicUpdateSliceOptionsT *AsDynamicUpdateSliceOptions() { - return type == BuiltinOptions_DynamicUpdateSliceOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::DynamicUpdateSliceOptionsT *AsDynamicUpdateSliceOptions() const { - return type == BuiltinOptions_DynamicUpdateSliceOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::UnsortedSegmentProdOptionsT *AsUnsortedSegmentProdOptions() { - return type == BuiltinOptions_UnsortedSegmentProdOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::UnsortedSegmentProdOptionsT *AsUnsortedSegmentProdOptions() const { - return type == BuiltinOptions_UnsortedSegmentProdOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::UnsortedSegmentMaxOptionsT *AsUnsortedSegmentMaxOptions() { - return type == BuiltinOptions_UnsortedSegmentMaxOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::UnsortedSegmentMaxOptionsT *AsUnsortedSegmentMaxOptions() const { - return type == BuiltinOptions_UnsortedSegmentMaxOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::UnsortedSegmentMinOptionsT *AsUnsortedSegmentMinOptions() { - return type == BuiltinOptions_UnsortedSegmentMinOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::UnsortedSegmentMinOptionsT *AsUnsortedSegmentMinOptions() const { - return type == BuiltinOptions_UnsortedSegmentMinOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::UnsortedSegmentSumOptionsT *AsUnsortedSegmentSumOptions() { - return type == BuiltinOptions_UnsortedSegmentSumOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::UnsortedSegmentSumOptionsT *AsUnsortedSegmentSumOptions() const { - return type == BuiltinOptions_UnsortedSegmentSumOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::ATan2OptionsT *AsATan2Options() { - return type == BuiltinOptions_ATan2Options ? - reinterpret_cast(value) : nullptr; - } - const tflite::ATan2OptionsT *AsATan2Options() const { - return type == BuiltinOptions_ATan2Options ? - reinterpret_cast(value) : nullptr; - } - tflite::SignOptionsT *AsSignOptions() { - return type == BuiltinOptions_SignOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::SignOptionsT *AsSignOptions() const { - return type == BuiltinOptions_SignOptions ? - reinterpret_cast(value) : nullptr; - } -}; - -bool VerifyBuiltinOptions(flatbuffers::Verifier &verifier, const void *obj, BuiltinOptions type); -bool VerifyBuiltinOptionsVector(flatbuffers::Verifier &verifier, const flatbuffers::Vector> *values, const flatbuffers::Vector *types); - -enum Padding : int8_t { - Padding_SAME = 0, - Padding_VALID = 1, - Padding_MIN = Padding_SAME, - Padding_MAX = Padding_VALID -}; - -inline const Padding (&EnumValuesPadding())[2] { - static const Padding values[] = { - Padding_SAME, - Padding_VALID - }; - return values; -} - -inline const char * const *EnumNamesPadding() { - static const char * const names[3] = { - "SAME", - "VALID", - nullptr - }; - return names; -} - -inline const char *EnumNamePadding(Padding e) { - if (flatbuffers::IsOutRange(e, Padding_SAME, Padding_VALID)) return ""; - const size_t index = static_cast(e); - return EnumNamesPadding()[index]; -} - -enum ActivationFunctionType : int8_t { - ActivationFunctionType_NONE = 0, - ActivationFunctionType_RELU = 1, - ActivationFunctionType_RELU_N1_TO_1 = 2, - ActivationFunctionType_RELU6 = 3, - ActivationFunctionType_TANH = 4, - ActivationFunctionType_SIGN_BIT = 5, - ActivationFunctionType_MIN = ActivationFunctionType_NONE, - ActivationFunctionType_MAX = ActivationFunctionType_SIGN_BIT -}; - -inline const ActivationFunctionType (&EnumValuesActivationFunctionType())[6] { - static const ActivationFunctionType values[] = { - ActivationFunctionType_NONE, - ActivationFunctionType_RELU, - ActivationFunctionType_RELU_N1_TO_1, - ActivationFunctionType_RELU6, - ActivationFunctionType_TANH, - ActivationFunctionType_SIGN_BIT - }; - return values; -} - -inline const char * const *EnumNamesActivationFunctionType() { - static const char * const names[7] = { - "NONE", - "RELU", - "RELU_N1_TO_1", - "RELU6", - "TANH", - "SIGN_BIT", - nullptr - }; - return names; -} - -inline const char *EnumNameActivationFunctionType(ActivationFunctionType e) { - if (flatbuffers::IsOutRange(e, ActivationFunctionType_NONE, ActivationFunctionType_SIGN_BIT)) return ""; - const size_t index = static_cast(e); - return EnumNamesActivationFunctionType()[index]; -} - -enum LSHProjectionType : int8_t { - LSHProjectionType_UNKNOWN = 0, - LSHProjectionType_SPARSE = 1, - LSHProjectionType_DENSE = 2, - LSHProjectionType_MIN = LSHProjectionType_UNKNOWN, - LSHProjectionType_MAX = LSHProjectionType_DENSE -}; - -inline const LSHProjectionType (&EnumValuesLSHProjectionType())[3] { - static const LSHProjectionType values[] = { - LSHProjectionType_UNKNOWN, - LSHProjectionType_SPARSE, - LSHProjectionType_DENSE - }; - return values; -} - -inline const char * const *EnumNamesLSHProjectionType() { - static const char * const names[4] = { - "UNKNOWN", - "SPARSE", - "DENSE", - nullptr - }; - return names; -} - -inline const char *EnumNameLSHProjectionType(LSHProjectionType e) { - if (flatbuffers::IsOutRange(e, LSHProjectionType_UNKNOWN, LSHProjectionType_DENSE)) return ""; - const size_t index = static_cast(e); - return EnumNamesLSHProjectionType()[index]; -} - -enum FullyConnectedOptionsWeightsFormat : int8_t { - FullyConnectedOptionsWeightsFormat_DEFAULT = 0, - FullyConnectedOptionsWeightsFormat_SHUFFLED4x16INT8 = 1, - FullyConnectedOptionsWeightsFormat_MIN = FullyConnectedOptionsWeightsFormat_DEFAULT, - FullyConnectedOptionsWeightsFormat_MAX = FullyConnectedOptionsWeightsFormat_SHUFFLED4x16INT8 -}; - -inline const FullyConnectedOptionsWeightsFormat (&EnumValuesFullyConnectedOptionsWeightsFormat())[2] { - static const FullyConnectedOptionsWeightsFormat values[] = { - FullyConnectedOptionsWeightsFormat_DEFAULT, - FullyConnectedOptionsWeightsFormat_SHUFFLED4x16INT8 - }; - return values; -} - -inline const char * const *EnumNamesFullyConnectedOptionsWeightsFormat() { - static const char * const names[3] = { - "DEFAULT", - "SHUFFLED4x16INT8", - nullptr - }; - return names; -} - -inline const char *EnumNameFullyConnectedOptionsWeightsFormat(FullyConnectedOptionsWeightsFormat e) { - if (flatbuffers::IsOutRange(e, FullyConnectedOptionsWeightsFormat_DEFAULT, FullyConnectedOptionsWeightsFormat_SHUFFLED4x16INT8)) return ""; - const size_t index = static_cast(e); - return EnumNamesFullyConnectedOptionsWeightsFormat()[index]; -} - -enum LSTMKernelType : int8_t { - LSTMKernelType_FULL = 0, - LSTMKernelType_BASIC = 1, - LSTMKernelType_MIN = LSTMKernelType_FULL, - LSTMKernelType_MAX = LSTMKernelType_BASIC -}; - -inline const LSTMKernelType (&EnumValuesLSTMKernelType())[2] { - static const LSTMKernelType values[] = { - LSTMKernelType_FULL, - LSTMKernelType_BASIC - }; - return values; -} - -inline const char * const *EnumNamesLSTMKernelType() { - static const char * const names[3] = { - "FULL", - "BASIC", - nullptr - }; - return names; -} - -inline const char *EnumNameLSTMKernelType(LSTMKernelType e) { - if (flatbuffers::IsOutRange(e, LSTMKernelType_FULL, LSTMKernelType_BASIC)) return ""; - const size_t index = static_cast(e); - return EnumNamesLSTMKernelType()[index]; -} - -enum CombinerType : int8_t { - CombinerType_SUM = 0, - CombinerType_MEAN = 1, - CombinerType_SQRTN = 2, - CombinerType_MIN = CombinerType_SUM, - CombinerType_MAX = CombinerType_SQRTN -}; - -inline const CombinerType (&EnumValuesCombinerType())[3] { - static const CombinerType values[] = { - CombinerType_SUM, - CombinerType_MEAN, - CombinerType_SQRTN - }; - return values; -} - -inline const char * const *EnumNamesCombinerType() { - static const char * const names[4] = { - "SUM", - "MEAN", - "SQRTN", - nullptr - }; - return names; -} - -inline const char *EnumNameCombinerType(CombinerType e) { - if (flatbuffers::IsOutRange(e, CombinerType_SUM, CombinerType_SQRTN)) return ""; - const size_t index = static_cast(e); - return EnumNamesCombinerType()[index]; -} - -enum MirrorPadMode : int8_t { - MirrorPadMode_REFLECT = 0, - MirrorPadMode_SYMMETRIC = 1, - MirrorPadMode_MIN = MirrorPadMode_REFLECT, - MirrorPadMode_MAX = MirrorPadMode_SYMMETRIC -}; - -inline const MirrorPadMode (&EnumValuesMirrorPadMode())[2] { - static const MirrorPadMode values[] = { - MirrorPadMode_REFLECT, - MirrorPadMode_SYMMETRIC - }; - return values; -} - -inline const char * const *EnumNamesMirrorPadMode() { - static const char * const names[3] = { - "REFLECT", - "SYMMETRIC", - nullptr - }; - return names; -} - -inline const char *EnumNameMirrorPadMode(MirrorPadMode e) { - if (flatbuffers::IsOutRange(e, MirrorPadMode_REFLECT, MirrorPadMode_SYMMETRIC)) return ""; - const size_t index = static_cast(e); - return EnumNamesMirrorPadMode()[index]; -} - -enum CustomOptionsFormat : int8_t { - CustomOptionsFormat_FLEXBUFFERS = 0, - CustomOptionsFormat_MIN = CustomOptionsFormat_FLEXBUFFERS, - CustomOptionsFormat_MAX = CustomOptionsFormat_FLEXBUFFERS -}; - -inline const CustomOptionsFormat (&EnumValuesCustomOptionsFormat())[1] { - static const CustomOptionsFormat values[] = { - CustomOptionsFormat_FLEXBUFFERS - }; - return values; -} - -inline const char * const *EnumNamesCustomOptionsFormat() { - static const char * const names[2] = { - "FLEXBUFFERS", - nullptr - }; - return names; -} - -inline const char *EnumNameCustomOptionsFormat(CustomOptionsFormat e) { - if (flatbuffers::IsOutRange(e, CustomOptionsFormat_FLEXBUFFERS, CustomOptionsFormat_FLEXBUFFERS)) return ""; - const size_t index = static_cast(e); - return EnumNamesCustomOptionsFormat()[index]; -} - -struct CustomQuantizationT : public flatbuffers::NativeTable { - typedef CustomQuantization TableType; - std::vector custom{}; -}; - -struct CustomQuantization FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef CustomQuantizationT NativeTableType; - typedef CustomQuantizationBuilder Builder; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_CUSTOM = 4 - }; - const flatbuffers::Vector *custom() const { - return GetPointer *>(VT_CUSTOM); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyOffset(verifier, VT_CUSTOM) && - verifier.VerifyVector(custom()) && - verifier.EndTable(); - } - CustomQuantizationT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(CustomQuantizationT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const CustomQuantizationT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct CustomQuantizationBuilder { - typedef CustomQuantization Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_custom(flatbuffers::Offset> custom) { - fbb_.AddOffset(CustomQuantization::VT_CUSTOM, custom); - } - explicit CustomQuantizationBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateCustomQuantization( - flatbuffers::FlatBufferBuilder &_fbb, - flatbuffers::Offset> custom = 0) { - CustomQuantizationBuilder builder_(_fbb); - builder_.add_custom(custom); - return builder_.Finish(); -} - -inline flatbuffers::Offset CreateCustomQuantizationDirect( - flatbuffers::FlatBufferBuilder &_fbb, - const std::vector *custom = nullptr) { - if (custom) { _fbb.ForceVectorAlignment(custom->size(), sizeof(uint8_t), 16); } - auto custom__ = custom ? _fbb.CreateVector(*custom) : 0; - return tflite::CreateCustomQuantization( - _fbb, - custom__); -} - -flatbuffers::Offset CreateCustomQuantization(flatbuffers::FlatBufferBuilder &_fbb, const CustomQuantizationT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct QuantizationParametersT : public flatbuffers::NativeTable { - typedef QuantizationParameters TableType; - std::vector min{}; - std::vector max{}; - std::vector scale{}; - std::vector zero_point{}; - tflite::QuantizationDetailsUnion details{}; - int32_t quantized_dimension = 0; -}; - -struct QuantizationParameters FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef QuantizationParametersT NativeTableType; - typedef QuantizationParametersBuilder Builder; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_MIN = 4, - VT_MAX = 6, - VT_SCALE = 8, - VT_ZERO_POINT = 10, - VT_DETAILS_TYPE = 12, - VT_DETAILS = 14, - VT_QUANTIZED_DIMENSION = 16 - }; - const flatbuffers::Vector *min() const { - return GetPointer *>(VT_MIN); - } - const flatbuffers::Vector *max() const { - return GetPointer *>(VT_MAX); - } - const flatbuffers::Vector *scale() const { - return GetPointer *>(VT_SCALE); - } - const flatbuffers::Vector *zero_point() const { - return GetPointer *>(VT_ZERO_POINT); - } - tflite::QuantizationDetails details_type() const { - return static_cast(GetField(VT_DETAILS_TYPE, 0)); - } - const void *details() const { - return GetPointer(VT_DETAILS); - } - template const T *details_as() const; - const tflite::CustomQuantization *details_as_CustomQuantization() const { - return details_type() == tflite::QuantizationDetails_CustomQuantization ? static_cast(details()) : nullptr; - } - int32_t quantized_dimension() const { - return GetField(VT_QUANTIZED_DIMENSION, 0); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyOffset(verifier, VT_MIN) && - verifier.VerifyVector(min()) && - VerifyOffset(verifier, VT_MAX) && - verifier.VerifyVector(max()) && - VerifyOffset(verifier, VT_SCALE) && - verifier.VerifyVector(scale()) && - VerifyOffset(verifier, VT_ZERO_POINT) && - verifier.VerifyVector(zero_point()) && - VerifyField(verifier, VT_DETAILS_TYPE, 1) && - VerifyOffset(verifier, VT_DETAILS) && - VerifyQuantizationDetails(verifier, details(), details_type()) && - VerifyField(verifier, VT_QUANTIZED_DIMENSION, 4) && - verifier.EndTable(); - } - QuantizationParametersT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(QuantizationParametersT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizationParametersT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -template<> inline const tflite::CustomQuantization *QuantizationParameters::details_as() const { - return details_as_CustomQuantization(); -} - -struct QuantizationParametersBuilder { - typedef QuantizationParameters Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_min(flatbuffers::Offset> min) { - fbb_.AddOffset(QuantizationParameters::VT_MIN, min); - } - void add_max(flatbuffers::Offset> max) { - fbb_.AddOffset(QuantizationParameters::VT_MAX, max); - } - void add_scale(flatbuffers::Offset> scale) { - fbb_.AddOffset(QuantizationParameters::VT_SCALE, scale); - } - void add_zero_point(flatbuffers::Offset> zero_point) { - fbb_.AddOffset(QuantizationParameters::VT_ZERO_POINT, zero_point); - } - void add_details_type(tflite::QuantizationDetails details_type) { - fbb_.AddElement(QuantizationParameters::VT_DETAILS_TYPE, static_cast(details_type), 0); - } - void add_details(flatbuffers::Offset details) { - fbb_.AddOffset(QuantizationParameters::VT_DETAILS, details); - } - void add_quantized_dimension(int32_t quantized_dimension) { - fbb_.AddElement(QuantizationParameters::VT_QUANTIZED_DIMENSION, quantized_dimension, 0); - } - explicit QuantizationParametersBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateQuantizationParameters( - flatbuffers::FlatBufferBuilder &_fbb, - flatbuffers::Offset> min = 0, - flatbuffers::Offset> max = 0, - flatbuffers::Offset> scale = 0, - flatbuffers::Offset> zero_point = 0, - tflite::QuantizationDetails details_type = tflite::QuantizationDetails_NONE, - flatbuffers::Offset details = 0, - int32_t quantized_dimension = 0) { - QuantizationParametersBuilder builder_(_fbb); - builder_.add_quantized_dimension(quantized_dimension); - builder_.add_details(details); - builder_.add_zero_point(zero_point); - builder_.add_scale(scale); - builder_.add_max(max); - builder_.add_min(min); - builder_.add_details_type(details_type); - return builder_.Finish(); -} - -inline flatbuffers::Offset CreateQuantizationParametersDirect( - flatbuffers::FlatBufferBuilder &_fbb, - const std::vector *min = nullptr, - const std::vector *max = nullptr, - const std::vector *scale = nullptr, - const std::vector *zero_point = nullptr, - tflite::QuantizationDetails details_type = tflite::QuantizationDetails_NONE, - flatbuffers::Offset details = 0, - int32_t quantized_dimension = 0) { - auto min__ = min ? _fbb.CreateVector(*min) : 0; - auto max__ = max ? _fbb.CreateVector(*max) : 0; - auto scale__ = scale ? _fbb.CreateVector(*scale) : 0; - auto zero_point__ = zero_point ? _fbb.CreateVector(*zero_point) : 0; - return tflite::CreateQuantizationParameters( - _fbb, - min__, - max__, - scale__, - zero_point__, - details_type, - details, - quantized_dimension); -} - -flatbuffers::Offset CreateQuantizationParameters(flatbuffers::FlatBufferBuilder &_fbb, const QuantizationParametersT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct Int32VectorT : public flatbuffers::NativeTable { - typedef Int32Vector TableType; - std::vector values{}; -}; - -struct Int32Vector FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef Int32VectorT NativeTableType; - typedef Int32VectorBuilder Builder; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_VALUES = 4 - }; - const flatbuffers::Vector *values() const { - return GetPointer *>(VT_VALUES); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyOffset(verifier, VT_VALUES) && - verifier.VerifyVector(values()) && - verifier.EndTable(); - } - Int32VectorT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(Int32VectorT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const Int32VectorT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct Int32VectorBuilder { - typedef Int32Vector Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_values(flatbuffers::Offset> values) { - fbb_.AddOffset(Int32Vector::VT_VALUES, values); - } - explicit Int32VectorBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateInt32Vector( - flatbuffers::FlatBufferBuilder &_fbb, - flatbuffers::Offset> values = 0) { - Int32VectorBuilder builder_(_fbb); - builder_.add_values(values); - return builder_.Finish(); -} - -inline flatbuffers::Offset CreateInt32VectorDirect( - flatbuffers::FlatBufferBuilder &_fbb, - const std::vector *values = nullptr) { - auto values__ = values ? _fbb.CreateVector(*values) : 0; - return tflite::CreateInt32Vector( - _fbb, - values__); -} - -flatbuffers::Offset CreateInt32Vector(flatbuffers::FlatBufferBuilder &_fbb, const Int32VectorT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct Uint16VectorT : public flatbuffers::NativeTable { - typedef Uint16Vector TableType; - std::vector values{}; -}; - -struct Uint16Vector FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef Uint16VectorT NativeTableType; - typedef Uint16VectorBuilder Builder; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_VALUES = 4 - }; - const flatbuffers::Vector *values() const { - return GetPointer *>(VT_VALUES); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyOffset(verifier, VT_VALUES) && - verifier.VerifyVector(values()) && - verifier.EndTable(); - } - Uint16VectorT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(Uint16VectorT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const Uint16VectorT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct Uint16VectorBuilder { - typedef Uint16Vector Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_values(flatbuffers::Offset> values) { - fbb_.AddOffset(Uint16Vector::VT_VALUES, values); - } - explicit Uint16VectorBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateUint16Vector( - flatbuffers::FlatBufferBuilder &_fbb, - flatbuffers::Offset> values = 0) { - Uint16VectorBuilder builder_(_fbb); - builder_.add_values(values); - return builder_.Finish(); -} - -inline flatbuffers::Offset CreateUint16VectorDirect( - flatbuffers::FlatBufferBuilder &_fbb, - const std::vector *values = nullptr) { - if (values) { _fbb.ForceVectorAlignment(values->size(), sizeof(uint16_t), 4); } - auto values__ = values ? _fbb.CreateVector(*values) : 0; - return tflite::CreateUint16Vector( - _fbb, - values__); -} - -flatbuffers::Offset CreateUint16Vector(flatbuffers::FlatBufferBuilder &_fbb, const Uint16VectorT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct Uint8VectorT : public flatbuffers::NativeTable { - typedef Uint8Vector TableType; - std::vector values{}; -}; - -struct Uint8Vector FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef Uint8VectorT NativeTableType; - typedef Uint8VectorBuilder Builder; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_VALUES = 4 - }; - const flatbuffers::Vector *values() const { - return GetPointer *>(VT_VALUES); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyOffset(verifier, VT_VALUES) && - verifier.VerifyVector(values()) && - verifier.EndTable(); - } - Uint8VectorT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(Uint8VectorT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const Uint8VectorT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct Uint8VectorBuilder { - typedef Uint8Vector Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_values(flatbuffers::Offset> values) { - fbb_.AddOffset(Uint8Vector::VT_VALUES, values); - } - explicit Uint8VectorBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateUint8Vector( - flatbuffers::FlatBufferBuilder &_fbb, - flatbuffers::Offset> values = 0) { - Uint8VectorBuilder builder_(_fbb); - builder_.add_values(values); - return builder_.Finish(); -} - -inline flatbuffers::Offset CreateUint8VectorDirect( - flatbuffers::FlatBufferBuilder &_fbb, - const std::vector *values = nullptr) { - if (values) { _fbb.ForceVectorAlignment(values->size(), sizeof(uint8_t), 4); } - auto values__ = values ? _fbb.CreateVector(*values) : 0; - return tflite::CreateUint8Vector( - _fbb, - values__); -} - -flatbuffers::Offset CreateUint8Vector(flatbuffers::FlatBufferBuilder &_fbb, const Uint8VectorT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct DimensionMetadataT : public flatbuffers::NativeTable { - typedef DimensionMetadata TableType; - tflite::DimensionType format = tflite::DimensionType_DENSE; - int32_t dense_size = 0; - tflite::SparseIndexVectorUnion array_segments{}; - tflite::SparseIndexVectorUnion array_indices{}; -}; - -struct DimensionMetadata FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef DimensionMetadataT NativeTableType; - typedef DimensionMetadataBuilder Builder; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_FORMAT = 4, - VT_DENSE_SIZE = 6, - VT_ARRAY_SEGMENTS_TYPE = 8, - VT_ARRAY_SEGMENTS = 10, - VT_ARRAY_INDICES_TYPE = 12, - VT_ARRAY_INDICES = 14 - }; - tflite::DimensionType format() const { - return static_cast(GetField(VT_FORMAT, 0)); - } - int32_t dense_size() const { - return GetField(VT_DENSE_SIZE, 0); - } - tflite::SparseIndexVector array_segments_type() const { - return static_cast(GetField(VT_ARRAY_SEGMENTS_TYPE, 0)); - } - const void *array_segments() const { - return GetPointer(VT_ARRAY_SEGMENTS); - } - template const T *array_segments_as() const; - const tflite::Int32Vector *array_segments_as_Int32Vector() const { - return array_segments_type() == tflite::SparseIndexVector_Int32Vector ? static_cast(array_segments()) : nullptr; - } - const tflite::Uint16Vector *array_segments_as_Uint16Vector() const { - return array_segments_type() == tflite::SparseIndexVector_Uint16Vector ? static_cast(array_segments()) : nullptr; - } - const tflite::Uint8Vector *array_segments_as_Uint8Vector() const { - return array_segments_type() == tflite::SparseIndexVector_Uint8Vector ? static_cast(array_segments()) : nullptr; - } - tflite::SparseIndexVector array_indices_type() const { - return static_cast(GetField(VT_ARRAY_INDICES_TYPE, 0)); - } - const void *array_indices() const { - return GetPointer(VT_ARRAY_INDICES); - } - template const T *array_indices_as() const; - const tflite::Int32Vector *array_indices_as_Int32Vector() const { - return array_indices_type() == tflite::SparseIndexVector_Int32Vector ? static_cast(array_indices()) : nullptr; - } - const tflite::Uint16Vector *array_indices_as_Uint16Vector() const { - return array_indices_type() == tflite::SparseIndexVector_Uint16Vector ? static_cast(array_indices()) : nullptr; - } - const tflite::Uint8Vector *array_indices_as_Uint8Vector() const { - return array_indices_type() == tflite::SparseIndexVector_Uint8Vector ? static_cast(array_indices()) : nullptr; - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_FORMAT, 1) && - VerifyField(verifier, VT_DENSE_SIZE, 4) && - VerifyField(verifier, VT_ARRAY_SEGMENTS_TYPE, 1) && - VerifyOffset(verifier, VT_ARRAY_SEGMENTS) && - VerifySparseIndexVector(verifier, array_segments(), array_segments_type()) && - VerifyField(verifier, VT_ARRAY_INDICES_TYPE, 1) && - VerifyOffset(verifier, VT_ARRAY_INDICES) && - VerifySparseIndexVector(verifier, array_indices(), array_indices_type()) && - verifier.EndTable(); - } - DimensionMetadataT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(DimensionMetadataT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const DimensionMetadataT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -template<> inline const tflite::Int32Vector *DimensionMetadata::array_segments_as() const { - return array_segments_as_Int32Vector(); -} - -template<> inline const tflite::Uint16Vector *DimensionMetadata::array_segments_as() const { - return array_segments_as_Uint16Vector(); -} - -template<> inline const tflite::Uint8Vector *DimensionMetadata::array_segments_as() const { - return array_segments_as_Uint8Vector(); -} - -template<> inline const tflite::Int32Vector *DimensionMetadata::array_indices_as() const { - return array_indices_as_Int32Vector(); -} - -template<> inline const tflite::Uint16Vector *DimensionMetadata::array_indices_as() const { - return array_indices_as_Uint16Vector(); -} - -template<> inline const tflite::Uint8Vector *DimensionMetadata::array_indices_as() const { - return array_indices_as_Uint8Vector(); -} - -struct DimensionMetadataBuilder { - typedef DimensionMetadata Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_format(tflite::DimensionType format) { - fbb_.AddElement(DimensionMetadata::VT_FORMAT, static_cast(format), 0); - } - void add_dense_size(int32_t dense_size) { - fbb_.AddElement(DimensionMetadata::VT_DENSE_SIZE, dense_size, 0); - } - void add_array_segments_type(tflite::SparseIndexVector array_segments_type) { - fbb_.AddElement(DimensionMetadata::VT_ARRAY_SEGMENTS_TYPE, static_cast(array_segments_type), 0); - } - void add_array_segments(flatbuffers::Offset array_segments) { - fbb_.AddOffset(DimensionMetadata::VT_ARRAY_SEGMENTS, array_segments); - } - void add_array_indices_type(tflite::SparseIndexVector array_indices_type) { - fbb_.AddElement(DimensionMetadata::VT_ARRAY_INDICES_TYPE, static_cast(array_indices_type), 0); - } - void add_array_indices(flatbuffers::Offset array_indices) { - fbb_.AddOffset(DimensionMetadata::VT_ARRAY_INDICES, array_indices); - } - explicit DimensionMetadataBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateDimensionMetadata( - flatbuffers::FlatBufferBuilder &_fbb, - tflite::DimensionType format = tflite::DimensionType_DENSE, - int32_t dense_size = 0, - tflite::SparseIndexVector array_segments_type = tflite::SparseIndexVector_NONE, - flatbuffers::Offset array_segments = 0, - tflite::SparseIndexVector array_indices_type = tflite::SparseIndexVector_NONE, - flatbuffers::Offset array_indices = 0) { - DimensionMetadataBuilder builder_(_fbb); - builder_.add_array_indices(array_indices); - builder_.add_array_segments(array_segments); - builder_.add_dense_size(dense_size); - builder_.add_array_indices_type(array_indices_type); - builder_.add_array_segments_type(array_segments_type); - builder_.add_format(format); - return builder_.Finish(); -} - -flatbuffers::Offset CreateDimensionMetadata(flatbuffers::FlatBufferBuilder &_fbb, const DimensionMetadataT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct SparsityParametersT : public flatbuffers::NativeTable { - typedef SparsityParameters TableType; - std::vector traversal_order{}; - std::vector block_map{}; - std::vector> dim_metadata{}; - SparsityParametersT() = default; - SparsityParametersT(const SparsityParametersT &o); - SparsityParametersT(SparsityParametersT&&) FLATBUFFERS_NOEXCEPT = default; - SparsityParametersT &operator=(SparsityParametersT o) FLATBUFFERS_NOEXCEPT; -}; - -struct SparsityParameters FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef SparsityParametersT NativeTableType; - typedef SparsityParametersBuilder Builder; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_TRAVERSAL_ORDER = 4, - VT_BLOCK_MAP = 6, - VT_DIM_METADATA = 8 - }; - const flatbuffers::Vector *traversal_order() const { - return GetPointer *>(VT_TRAVERSAL_ORDER); - } - const flatbuffers::Vector *block_map() const { - return GetPointer *>(VT_BLOCK_MAP); - } - const flatbuffers::Vector> *dim_metadata() const { - return GetPointer> *>(VT_DIM_METADATA); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyOffset(verifier, VT_TRAVERSAL_ORDER) && - verifier.VerifyVector(traversal_order()) && - VerifyOffset(verifier, VT_BLOCK_MAP) && - verifier.VerifyVector(block_map()) && - VerifyOffset(verifier, VT_DIM_METADATA) && - verifier.VerifyVector(dim_metadata()) && - verifier.VerifyVectorOfTables(dim_metadata()) && - verifier.EndTable(); - } - SparsityParametersT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(SparsityParametersT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SparsityParametersT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct SparsityParametersBuilder { - typedef SparsityParameters Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_traversal_order(flatbuffers::Offset> traversal_order) { - fbb_.AddOffset(SparsityParameters::VT_TRAVERSAL_ORDER, traversal_order); - } - void add_block_map(flatbuffers::Offset> block_map) { - fbb_.AddOffset(SparsityParameters::VT_BLOCK_MAP, block_map); - } - void add_dim_metadata(flatbuffers::Offset>> dim_metadata) { - fbb_.AddOffset(SparsityParameters::VT_DIM_METADATA, dim_metadata); - } - explicit SparsityParametersBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateSparsityParameters( - flatbuffers::FlatBufferBuilder &_fbb, - flatbuffers::Offset> traversal_order = 0, - flatbuffers::Offset> block_map = 0, - flatbuffers::Offset>> dim_metadata = 0) { - SparsityParametersBuilder builder_(_fbb); - builder_.add_dim_metadata(dim_metadata); - builder_.add_block_map(block_map); - builder_.add_traversal_order(traversal_order); - return builder_.Finish(); -} - -inline flatbuffers::Offset CreateSparsityParametersDirect( - flatbuffers::FlatBufferBuilder &_fbb, - const std::vector *traversal_order = nullptr, - const std::vector *block_map = nullptr, - const std::vector> *dim_metadata = nullptr) { - auto traversal_order__ = traversal_order ? _fbb.CreateVector(*traversal_order) : 0; - auto block_map__ = block_map ? _fbb.CreateVector(*block_map) : 0; - auto dim_metadata__ = dim_metadata ? _fbb.CreateVector>(*dim_metadata) : 0; - return tflite::CreateSparsityParameters( - _fbb, - traversal_order__, - block_map__, - dim_metadata__); -} - -flatbuffers::Offset CreateSparsityParameters(flatbuffers::FlatBufferBuilder &_fbb, const SparsityParametersT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct VariantSubTypeT : public flatbuffers::NativeTable { - typedef VariantSubType TableType; - std::vector shape{}; - tflite::TensorType type = tflite::TensorType_FLOAT32; - bool has_rank = false; -}; - -struct VariantSubType FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef VariantSubTypeT NativeTableType; - typedef VariantSubTypeBuilder Builder; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_SHAPE = 4, - VT_TYPE = 6, - VT_HAS_RANK = 8 - }; - const flatbuffers::Vector *shape() const { - return GetPointer *>(VT_SHAPE); - } - tflite::TensorType type() const { - return static_cast(GetField(VT_TYPE, 0)); - } - bool has_rank() const { - return GetField(VT_HAS_RANK, 0) != 0; - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyOffset(verifier, VT_SHAPE) && - verifier.VerifyVector(shape()) && - VerifyField(verifier, VT_TYPE, 1) && - VerifyField(verifier, VT_HAS_RANK, 1) && - verifier.EndTable(); - } - VariantSubTypeT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(VariantSubTypeT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const VariantSubTypeT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct VariantSubTypeBuilder { - typedef VariantSubType Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_shape(flatbuffers::Offset> shape) { - fbb_.AddOffset(VariantSubType::VT_SHAPE, shape); - } - void add_type(tflite::TensorType type) { - fbb_.AddElement(VariantSubType::VT_TYPE, static_cast(type), 0); - } - void add_has_rank(bool has_rank) { - fbb_.AddElement(VariantSubType::VT_HAS_RANK, static_cast(has_rank), 0); - } - explicit VariantSubTypeBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateVariantSubType( - flatbuffers::FlatBufferBuilder &_fbb, - flatbuffers::Offset> shape = 0, - tflite::TensorType type = tflite::TensorType_FLOAT32, - bool has_rank = false) { - VariantSubTypeBuilder builder_(_fbb); - builder_.add_shape(shape); - builder_.add_has_rank(has_rank); - builder_.add_type(type); - return builder_.Finish(); -} - -inline flatbuffers::Offset CreateVariantSubTypeDirect( - flatbuffers::FlatBufferBuilder &_fbb, - const std::vector *shape = nullptr, - tflite::TensorType type = tflite::TensorType_FLOAT32, - bool has_rank = false) { - auto shape__ = shape ? _fbb.CreateVector(*shape) : 0; - return tflite::CreateVariantSubType( - _fbb, - shape__, - type, - has_rank); -} - -flatbuffers::Offset CreateVariantSubType(flatbuffers::FlatBufferBuilder &_fbb, const VariantSubTypeT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct TensorT : public flatbuffers::NativeTable { - typedef Tensor TableType; - std::vector shape{}; - tflite::TensorType type = tflite::TensorType_FLOAT32; - uint32_t buffer = 0; - std::string name{}; - std::unique_ptr quantization{}; - bool is_variable = false; - std::unique_ptr sparsity{}; - std::vector shape_signature{}; - bool has_rank = false; - std::vector> variant_tensors{}; - TensorT() = default; - TensorT(const TensorT &o); - TensorT(TensorT&&) FLATBUFFERS_NOEXCEPT = default; - TensorT &operator=(TensorT o) FLATBUFFERS_NOEXCEPT; -}; - -struct Tensor FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef TensorT NativeTableType; - typedef TensorBuilder Builder; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_SHAPE = 4, - VT_TYPE = 6, - VT_BUFFER = 8, - VT_NAME = 10, - VT_QUANTIZATION = 12, - VT_IS_VARIABLE = 14, - VT_SPARSITY = 16, - VT_SHAPE_SIGNATURE = 18, - VT_HAS_RANK = 20, - VT_VARIANT_TENSORS = 22 - }; - const flatbuffers::Vector *shape() const { - return GetPointer *>(VT_SHAPE); - } - tflite::TensorType type() const { - return static_cast(GetField(VT_TYPE, 0)); - } - uint32_t buffer() const { - return GetField(VT_BUFFER, 0); - } - const flatbuffers::String *name() const { - return GetPointer(VT_NAME); - } - const tflite::QuantizationParameters *quantization() const { - return GetPointer(VT_QUANTIZATION); - } - bool is_variable() const { - return GetField(VT_IS_VARIABLE, 0) != 0; - } - const tflite::SparsityParameters *sparsity() const { - return GetPointer(VT_SPARSITY); - } - const flatbuffers::Vector *shape_signature() const { - return GetPointer *>(VT_SHAPE_SIGNATURE); - } - bool has_rank() const { - return GetField(VT_HAS_RANK, 0) != 0; - } - const flatbuffers::Vector> *variant_tensors() const { - return GetPointer> *>(VT_VARIANT_TENSORS); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyOffset(verifier, VT_SHAPE) && - verifier.VerifyVector(shape()) && - VerifyField(verifier, VT_TYPE, 1) && - VerifyField(verifier, VT_BUFFER, 4) && - VerifyOffset(verifier, VT_NAME) && - verifier.VerifyString(name()) && - VerifyOffset(verifier, VT_QUANTIZATION) && - verifier.VerifyTable(quantization()) && - VerifyField(verifier, VT_IS_VARIABLE, 1) && - VerifyOffset(verifier, VT_SPARSITY) && - verifier.VerifyTable(sparsity()) && - VerifyOffset(verifier, VT_SHAPE_SIGNATURE) && - verifier.VerifyVector(shape_signature()) && - VerifyField(verifier, VT_HAS_RANK, 1) && - VerifyOffset(verifier, VT_VARIANT_TENSORS) && - verifier.VerifyVector(variant_tensors()) && - verifier.VerifyVectorOfTables(variant_tensors()) && - verifier.EndTable(); - } - TensorT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(TensorT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const TensorT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct TensorBuilder { - typedef Tensor Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_shape(flatbuffers::Offset> shape) { - fbb_.AddOffset(Tensor::VT_SHAPE, shape); - } - void add_type(tflite::TensorType type) { - fbb_.AddElement(Tensor::VT_TYPE, static_cast(type), 0); - } - void add_buffer(uint32_t buffer) { - fbb_.AddElement(Tensor::VT_BUFFER, buffer, 0); - } - void add_name(flatbuffers::Offset name) { - fbb_.AddOffset(Tensor::VT_NAME, name); - } - void add_quantization(flatbuffers::Offset quantization) { - fbb_.AddOffset(Tensor::VT_QUANTIZATION, quantization); - } - void add_is_variable(bool is_variable) { - fbb_.AddElement(Tensor::VT_IS_VARIABLE, static_cast(is_variable), 0); - } - void add_sparsity(flatbuffers::Offset sparsity) { - fbb_.AddOffset(Tensor::VT_SPARSITY, sparsity); - } - void add_shape_signature(flatbuffers::Offset> shape_signature) { - fbb_.AddOffset(Tensor::VT_SHAPE_SIGNATURE, shape_signature); - } - void add_has_rank(bool has_rank) { - fbb_.AddElement(Tensor::VT_HAS_RANK, static_cast(has_rank), 0); - } - void add_variant_tensors(flatbuffers::Offset>> variant_tensors) { - fbb_.AddOffset(Tensor::VT_VARIANT_TENSORS, variant_tensors); - } - explicit TensorBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateTensor( - flatbuffers::FlatBufferBuilder &_fbb, - flatbuffers::Offset> shape = 0, - tflite::TensorType type = tflite::TensorType_FLOAT32, - uint32_t buffer = 0, - flatbuffers::Offset name = 0, - flatbuffers::Offset quantization = 0, - bool is_variable = false, - flatbuffers::Offset sparsity = 0, - flatbuffers::Offset> shape_signature = 0, - bool has_rank = false, - flatbuffers::Offset>> variant_tensors = 0) { - TensorBuilder builder_(_fbb); - builder_.add_variant_tensors(variant_tensors); - builder_.add_shape_signature(shape_signature); - builder_.add_sparsity(sparsity); - builder_.add_quantization(quantization); - builder_.add_name(name); - builder_.add_buffer(buffer); - builder_.add_shape(shape); - builder_.add_has_rank(has_rank); - builder_.add_is_variable(is_variable); - builder_.add_type(type); - return builder_.Finish(); -} - -inline flatbuffers::Offset CreateTensorDirect( - flatbuffers::FlatBufferBuilder &_fbb, - const std::vector *shape = nullptr, - tflite::TensorType type = tflite::TensorType_FLOAT32, - uint32_t buffer = 0, - const char *name = nullptr, - flatbuffers::Offset quantization = 0, - bool is_variable = false, - flatbuffers::Offset sparsity = 0, - const std::vector *shape_signature = nullptr, - bool has_rank = false, - const std::vector> *variant_tensors = nullptr) { - auto shape__ = shape ? _fbb.CreateVector(*shape) : 0; - auto name__ = name ? _fbb.CreateString(name) : 0; - auto shape_signature__ = shape_signature ? _fbb.CreateVector(*shape_signature) : 0; - auto variant_tensors__ = variant_tensors ? _fbb.CreateVector>(*variant_tensors) : 0; - return tflite::CreateTensor( - _fbb, - shape__, - type, - buffer, - name__, - quantization, - is_variable, - sparsity, - shape_signature__, - has_rank, - variant_tensors__); -} - -flatbuffers::Offset CreateTensor(flatbuffers::FlatBufferBuilder &_fbb, const TensorT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct Conv2DOptionsT : public flatbuffers::NativeTable { - typedef Conv2DOptions TableType; - tflite::Padding padding = tflite::Padding_SAME; - int32_t stride_w = 0; - int32_t stride_h = 0; - tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE; - int32_t dilation_w_factor = 1; - int32_t dilation_h_factor = 1; -}; - -struct Conv2DOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef Conv2DOptionsT NativeTableType; - typedef Conv2DOptionsBuilder Builder; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_PADDING = 4, - VT_STRIDE_W = 6, - VT_STRIDE_H = 8, - VT_FUSED_ACTIVATION_FUNCTION = 10, - VT_DILATION_W_FACTOR = 12, - VT_DILATION_H_FACTOR = 14 - }; - tflite::Padding padding() const { - return static_cast(GetField(VT_PADDING, 0)); - } - int32_t stride_w() const { - return GetField(VT_STRIDE_W, 0); - } - int32_t stride_h() const { - return GetField(VT_STRIDE_H, 0); - } - tflite::ActivationFunctionType fused_activation_function() const { - return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); - } - int32_t dilation_w_factor() const { - return GetField(VT_DILATION_W_FACTOR, 1); - } - int32_t dilation_h_factor() const { - return GetField(VT_DILATION_H_FACTOR, 1); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_PADDING, 1) && - VerifyField(verifier, VT_STRIDE_W, 4) && - VerifyField(verifier, VT_STRIDE_H, 4) && - VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION, 1) && - VerifyField(verifier, VT_DILATION_W_FACTOR, 4) && - VerifyField(verifier, VT_DILATION_H_FACTOR, 4) && - verifier.EndTable(); - } - Conv2DOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(Conv2DOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const Conv2DOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct Conv2DOptionsBuilder { - typedef Conv2DOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_padding(tflite::Padding padding) { - fbb_.AddElement(Conv2DOptions::VT_PADDING, static_cast(padding), 0); - } - void add_stride_w(int32_t stride_w) { - fbb_.AddElement(Conv2DOptions::VT_STRIDE_W, stride_w, 0); - } - void add_stride_h(int32_t stride_h) { - fbb_.AddElement(Conv2DOptions::VT_STRIDE_H, stride_h, 0); - } - void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { - fbb_.AddElement(Conv2DOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); - } - void add_dilation_w_factor(int32_t dilation_w_factor) { - fbb_.AddElement(Conv2DOptions::VT_DILATION_W_FACTOR, dilation_w_factor, 1); - } - void add_dilation_h_factor(int32_t dilation_h_factor) { - fbb_.AddElement(Conv2DOptions::VT_DILATION_H_FACTOR, dilation_h_factor, 1); - } - explicit Conv2DOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateConv2DOptions( - flatbuffers::FlatBufferBuilder &_fbb, - tflite::Padding padding = tflite::Padding_SAME, - int32_t stride_w = 0, - int32_t stride_h = 0, - tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE, - int32_t dilation_w_factor = 1, - int32_t dilation_h_factor = 1) { - Conv2DOptionsBuilder builder_(_fbb); - builder_.add_dilation_h_factor(dilation_h_factor); - builder_.add_dilation_w_factor(dilation_w_factor); - builder_.add_stride_h(stride_h); - builder_.add_stride_w(stride_w); - builder_.add_fused_activation_function(fused_activation_function); - builder_.add_padding(padding); - return builder_.Finish(); -} - -flatbuffers::Offset CreateConv2DOptions(flatbuffers::FlatBufferBuilder &_fbb, const Conv2DOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct Conv3DOptionsT : public flatbuffers::NativeTable { - typedef Conv3DOptions TableType; - tflite::Padding padding = tflite::Padding_SAME; - int32_t stride_d = 0; - int32_t stride_w = 0; - int32_t stride_h = 0; - tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE; - int32_t dilation_d_factor = 1; - int32_t dilation_w_factor = 1; - int32_t dilation_h_factor = 1; -}; - -struct Conv3DOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef Conv3DOptionsT NativeTableType; - typedef Conv3DOptionsBuilder Builder; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_PADDING = 4, - VT_STRIDE_D = 6, - VT_STRIDE_W = 8, - VT_STRIDE_H = 10, - VT_FUSED_ACTIVATION_FUNCTION = 12, - VT_DILATION_D_FACTOR = 14, - VT_DILATION_W_FACTOR = 16, - VT_DILATION_H_FACTOR = 18 - }; - tflite::Padding padding() const { - return static_cast(GetField(VT_PADDING, 0)); - } - int32_t stride_d() const { - return GetField(VT_STRIDE_D, 0); - } - int32_t stride_w() const { - return GetField(VT_STRIDE_W, 0); - } - int32_t stride_h() const { - return GetField(VT_STRIDE_H, 0); - } - tflite::ActivationFunctionType fused_activation_function() const { - return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); - } - int32_t dilation_d_factor() const { - return GetField(VT_DILATION_D_FACTOR, 1); - } - int32_t dilation_w_factor() const { - return GetField(VT_DILATION_W_FACTOR, 1); - } - int32_t dilation_h_factor() const { - return GetField(VT_DILATION_H_FACTOR, 1); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_PADDING, 1) && - VerifyField(verifier, VT_STRIDE_D, 4) && - VerifyField(verifier, VT_STRIDE_W, 4) && - VerifyField(verifier, VT_STRIDE_H, 4) && - VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION, 1) && - VerifyField(verifier, VT_DILATION_D_FACTOR, 4) && - VerifyField(verifier, VT_DILATION_W_FACTOR, 4) && - VerifyField(verifier, VT_DILATION_H_FACTOR, 4) && - verifier.EndTable(); - } - Conv3DOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(Conv3DOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const Conv3DOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct Conv3DOptionsBuilder { - typedef Conv3DOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_padding(tflite::Padding padding) { - fbb_.AddElement(Conv3DOptions::VT_PADDING, static_cast(padding), 0); - } - void add_stride_d(int32_t stride_d) { - fbb_.AddElement(Conv3DOptions::VT_STRIDE_D, stride_d, 0); - } - void add_stride_w(int32_t stride_w) { - fbb_.AddElement(Conv3DOptions::VT_STRIDE_W, stride_w, 0); - } - void add_stride_h(int32_t stride_h) { - fbb_.AddElement(Conv3DOptions::VT_STRIDE_H, stride_h, 0); - } - void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { - fbb_.AddElement(Conv3DOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); - } - void add_dilation_d_factor(int32_t dilation_d_factor) { - fbb_.AddElement(Conv3DOptions::VT_DILATION_D_FACTOR, dilation_d_factor, 1); - } - void add_dilation_w_factor(int32_t dilation_w_factor) { - fbb_.AddElement(Conv3DOptions::VT_DILATION_W_FACTOR, dilation_w_factor, 1); - } - void add_dilation_h_factor(int32_t dilation_h_factor) { - fbb_.AddElement(Conv3DOptions::VT_DILATION_H_FACTOR, dilation_h_factor, 1); - } - explicit Conv3DOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateConv3DOptions( - flatbuffers::FlatBufferBuilder &_fbb, - tflite::Padding padding = tflite::Padding_SAME, - int32_t stride_d = 0, - int32_t stride_w = 0, - int32_t stride_h = 0, - tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE, - int32_t dilation_d_factor = 1, - int32_t dilation_w_factor = 1, - int32_t dilation_h_factor = 1) { - Conv3DOptionsBuilder builder_(_fbb); - builder_.add_dilation_h_factor(dilation_h_factor); - builder_.add_dilation_w_factor(dilation_w_factor); - builder_.add_dilation_d_factor(dilation_d_factor); - builder_.add_stride_h(stride_h); - builder_.add_stride_w(stride_w); - builder_.add_stride_d(stride_d); - builder_.add_fused_activation_function(fused_activation_function); - builder_.add_padding(padding); - return builder_.Finish(); -} - -flatbuffers::Offset CreateConv3DOptions(flatbuffers::FlatBufferBuilder &_fbb, const Conv3DOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct Pool2DOptionsT : public flatbuffers::NativeTable { - typedef Pool2DOptions TableType; - tflite::Padding padding = tflite::Padding_SAME; - int32_t stride_w = 0; - int32_t stride_h = 0; - int32_t filter_width = 0; - int32_t filter_height = 0; - tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE; -}; - -struct Pool2DOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef Pool2DOptionsT NativeTableType; - typedef Pool2DOptionsBuilder Builder; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_PADDING = 4, - VT_STRIDE_W = 6, - VT_STRIDE_H = 8, - VT_FILTER_WIDTH = 10, - VT_FILTER_HEIGHT = 12, - VT_FUSED_ACTIVATION_FUNCTION = 14 - }; - tflite::Padding padding() const { - return static_cast(GetField(VT_PADDING, 0)); - } - int32_t stride_w() const { - return GetField(VT_STRIDE_W, 0); - } - int32_t stride_h() const { - return GetField(VT_STRIDE_H, 0); - } - int32_t filter_width() const { - return GetField(VT_FILTER_WIDTH, 0); - } - int32_t filter_height() const { - return GetField(VT_FILTER_HEIGHT, 0); - } - tflite::ActivationFunctionType fused_activation_function() const { - return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_PADDING, 1) && - VerifyField(verifier, VT_STRIDE_W, 4) && - VerifyField(verifier, VT_STRIDE_H, 4) && - VerifyField(verifier, VT_FILTER_WIDTH, 4) && - VerifyField(verifier, VT_FILTER_HEIGHT, 4) && - VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION, 1) && - verifier.EndTable(); - } - Pool2DOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(Pool2DOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const Pool2DOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct Pool2DOptionsBuilder { - typedef Pool2DOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_padding(tflite::Padding padding) { - fbb_.AddElement(Pool2DOptions::VT_PADDING, static_cast(padding), 0); - } - void add_stride_w(int32_t stride_w) { - fbb_.AddElement(Pool2DOptions::VT_STRIDE_W, stride_w, 0); - } - void add_stride_h(int32_t stride_h) { - fbb_.AddElement(Pool2DOptions::VT_STRIDE_H, stride_h, 0); - } - void add_filter_width(int32_t filter_width) { - fbb_.AddElement(Pool2DOptions::VT_FILTER_WIDTH, filter_width, 0); - } - void add_filter_height(int32_t filter_height) { - fbb_.AddElement(Pool2DOptions::VT_FILTER_HEIGHT, filter_height, 0); - } - void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { - fbb_.AddElement(Pool2DOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); - } - explicit Pool2DOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreatePool2DOptions( - flatbuffers::FlatBufferBuilder &_fbb, - tflite::Padding padding = tflite::Padding_SAME, - int32_t stride_w = 0, - int32_t stride_h = 0, - int32_t filter_width = 0, - int32_t filter_height = 0, - tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE) { - Pool2DOptionsBuilder builder_(_fbb); - builder_.add_filter_height(filter_height); - builder_.add_filter_width(filter_width); - builder_.add_stride_h(stride_h); - builder_.add_stride_w(stride_w); - builder_.add_fused_activation_function(fused_activation_function); - builder_.add_padding(padding); - return builder_.Finish(); -} - -flatbuffers::Offset CreatePool2DOptions(flatbuffers::FlatBufferBuilder &_fbb, const Pool2DOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct DepthwiseConv2DOptionsT : public flatbuffers::NativeTable { - typedef DepthwiseConv2DOptions TableType; - tflite::Padding padding = tflite::Padding_SAME; - int32_t stride_w = 0; - int32_t stride_h = 0; - int32_t depth_multiplier = 0; - tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE; - int32_t dilation_w_factor = 1; - int32_t dilation_h_factor = 1; -}; - -struct DepthwiseConv2DOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef DepthwiseConv2DOptionsT NativeTableType; - typedef DepthwiseConv2DOptionsBuilder Builder; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_PADDING = 4, - VT_STRIDE_W = 6, - VT_STRIDE_H = 8, - VT_DEPTH_MULTIPLIER = 10, - VT_FUSED_ACTIVATION_FUNCTION = 12, - VT_DILATION_W_FACTOR = 14, - VT_DILATION_H_FACTOR = 16 - }; - tflite::Padding padding() const { - return static_cast(GetField(VT_PADDING, 0)); - } - int32_t stride_w() const { - return GetField(VT_STRIDE_W, 0); - } - int32_t stride_h() const { - return GetField(VT_STRIDE_H, 0); - } - int32_t depth_multiplier() const { - return GetField(VT_DEPTH_MULTIPLIER, 0); - } - tflite::ActivationFunctionType fused_activation_function() const { - return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); - } - int32_t dilation_w_factor() const { - return GetField(VT_DILATION_W_FACTOR, 1); - } - int32_t dilation_h_factor() const { - return GetField(VT_DILATION_H_FACTOR, 1); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_PADDING, 1) && - VerifyField(verifier, VT_STRIDE_W, 4) && - VerifyField(verifier, VT_STRIDE_H, 4) && - VerifyField(verifier, VT_DEPTH_MULTIPLIER, 4) && - VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION, 1) && - VerifyField(verifier, VT_DILATION_W_FACTOR, 4) && - VerifyField(verifier, VT_DILATION_H_FACTOR, 4) && - verifier.EndTable(); - } - DepthwiseConv2DOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(DepthwiseConv2DOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const DepthwiseConv2DOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct DepthwiseConv2DOptionsBuilder { - typedef DepthwiseConv2DOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_padding(tflite::Padding padding) { - fbb_.AddElement(DepthwiseConv2DOptions::VT_PADDING, static_cast(padding), 0); - } - void add_stride_w(int32_t stride_w) { - fbb_.AddElement(DepthwiseConv2DOptions::VT_STRIDE_W, stride_w, 0); - } - void add_stride_h(int32_t stride_h) { - fbb_.AddElement(DepthwiseConv2DOptions::VT_STRIDE_H, stride_h, 0); - } - void add_depth_multiplier(int32_t depth_multiplier) { - fbb_.AddElement(DepthwiseConv2DOptions::VT_DEPTH_MULTIPLIER, depth_multiplier, 0); - } - void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { - fbb_.AddElement(DepthwiseConv2DOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); - } - void add_dilation_w_factor(int32_t dilation_w_factor) { - fbb_.AddElement(DepthwiseConv2DOptions::VT_DILATION_W_FACTOR, dilation_w_factor, 1); - } - void add_dilation_h_factor(int32_t dilation_h_factor) { - fbb_.AddElement(DepthwiseConv2DOptions::VT_DILATION_H_FACTOR, dilation_h_factor, 1); - } - explicit DepthwiseConv2DOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateDepthwiseConv2DOptions( - flatbuffers::FlatBufferBuilder &_fbb, - tflite::Padding padding = tflite::Padding_SAME, - int32_t stride_w = 0, - int32_t stride_h = 0, - int32_t depth_multiplier = 0, - tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE, - int32_t dilation_w_factor = 1, - int32_t dilation_h_factor = 1) { - DepthwiseConv2DOptionsBuilder builder_(_fbb); - builder_.add_dilation_h_factor(dilation_h_factor); - builder_.add_dilation_w_factor(dilation_w_factor); - builder_.add_depth_multiplier(depth_multiplier); - builder_.add_stride_h(stride_h); - builder_.add_stride_w(stride_w); - builder_.add_fused_activation_function(fused_activation_function); - builder_.add_padding(padding); - return builder_.Finish(); -} - -flatbuffers::Offset CreateDepthwiseConv2DOptions(flatbuffers::FlatBufferBuilder &_fbb, const DepthwiseConv2DOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct ConcatEmbeddingsOptionsT : public flatbuffers::NativeTable { - typedef ConcatEmbeddingsOptions TableType; - int32_t num_channels = 0; - std::vector num_columns_per_channel{}; - std::vector embedding_dim_per_channel{}; -}; - -struct ConcatEmbeddingsOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef ConcatEmbeddingsOptionsT NativeTableType; - typedef ConcatEmbeddingsOptionsBuilder Builder; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_NUM_CHANNELS = 4, - VT_NUM_COLUMNS_PER_CHANNEL = 6, - VT_EMBEDDING_DIM_PER_CHANNEL = 8 - }; - int32_t num_channels() const { - return GetField(VT_NUM_CHANNELS, 0); - } - const flatbuffers::Vector *num_columns_per_channel() const { - return GetPointer *>(VT_NUM_COLUMNS_PER_CHANNEL); - } - const flatbuffers::Vector *embedding_dim_per_channel() const { - return GetPointer *>(VT_EMBEDDING_DIM_PER_CHANNEL); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_NUM_CHANNELS, 4) && - VerifyOffset(verifier, VT_NUM_COLUMNS_PER_CHANNEL) && - verifier.VerifyVector(num_columns_per_channel()) && - VerifyOffset(verifier, VT_EMBEDDING_DIM_PER_CHANNEL) && - verifier.VerifyVector(embedding_dim_per_channel()) && - verifier.EndTable(); - } - ConcatEmbeddingsOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(ConcatEmbeddingsOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ConcatEmbeddingsOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct ConcatEmbeddingsOptionsBuilder { - typedef ConcatEmbeddingsOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_num_channels(int32_t num_channels) { - fbb_.AddElement(ConcatEmbeddingsOptions::VT_NUM_CHANNELS, num_channels, 0); - } - void add_num_columns_per_channel(flatbuffers::Offset> num_columns_per_channel) { - fbb_.AddOffset(ConcatEmbeddingsOptions::VT_NUM_COLUMNS_PER_CHANNEL, num_columns_per_channel); - } - void add_embedding_dim_per_channel(flatbuffers::Offset> embedding_dim_per_channel) { - fbb_.AddOffset(ConcatEmbeddingsOptions::VT_EMBEDDING_DIM_PER_CHANNEL, embedding_dim_per_channel); - } - explicit ConcatEmbeddingsOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateConcatEmbeddingsOptions( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t num_channels = 0, - flatbuffers::Offset> num_columns_per_channel = 0, - flatbuffers::Offset> embedding_dim_per_channel = 0) { - ConcatEmbeddingsOptionsBuilder builder_(_fbb); - builder_.add_embedding_dim_per_channel(embedding_dim_per_channel); - builder_.add_num_columns_per_channel(num_columns_per_channel); - builder_.add_num_channels(num_channels); - return builder_.Finish(); -} - -inline flatbuffers::Offset CreateConcatEmbeddingsOptionsDirect( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t num_channels = 0, - const std::vector *num_columns_per_channel = nullptr, - const std::vector *embedding_dim_per_channel = nullptr) { - auto num_columns_per_channel__ = num_columns_per_channel ? _fbb.CreateVector(*num_columns_per_channel) : 0; - auto embedding_dim_per_channel__ = embedding_dim_per_channel ? _fbb.CreateVector(*embedding_dim_per_channel) : 0; - return tflite::CreateConcatEmbeddingsOptions( - _fbb, - num_channels, - num_columns_per_channel__, - embedding_dim_per_channel__); -} - -flatbuffers::Offset CreateConcatEmbeddingsOptions(flatbuffers::FlatBufferBuilder &_fbb, const ConcatEmbeddingsOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct LSHProjectionOptionsT : public flatbuffers::NativeTable { - typedef LSHProjectionOptions TableType; - tflite::LSHProjectionType type = tflite::LSHProjectionType_UNKNOWN; -}; - -struct LSHProjectionOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef LSHProjectionOptionsT NativeTableType; - typedef LSHProjectionOptionsBuilder Builder; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_TYPE = 4 - }; - tflite::LSHProjectionType type() const { - return static_cast(GetField(VT_TYPE, 0)); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_TYPE, 1) && - verifier.EndTable(); - } - LSHProjectionOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(LSHProjectionOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const LSHProjectionOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct LSHProjectionOptionsBuilder { - typedef LSHProjectionOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_type(tflite::LSHProjectionType type) { - fbb_.AddElement(LSHProjectionOptions::VT_TYPE, static_cast(type), 0); - } - explicit LSHProjectionOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateLSHProjectionOptions( - flatbuffers::FlatBufferBuilder &_fbb, - tflite::LSHProjectionType type = tflite::LSHProjectionType_UNKNOWN) { - LSHProjectionOptionsBuilder builder_(_fbb); - builder_.add_type(type); - return builder_.Finish(); -} - -flatbuffers::Offset CreateLSHProjectionOptions(flatbuffers::FlatBufferBuilder &_fbb, const LSHProjectionOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct SVDFOptionsT : public flatbuffers::NativeTable { - typedef SVDFOptions TableType; - int32_t rank = 0; - tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE; - bool asymmetric_quantize_inputs = false; -}; - -struct SVDFOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef SVDFOptionsT NativeTableType; - typedef SVDFOptionsBuilder Builder; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_RANK = 4, - VT_FUSED_ACTIVATION_FUNCTION = 6, - VT_ASYMMETRIC_QUANTIZE_INPUTS = 8 - }; - int32_t rank() const { - return GetField(VT_RANK, 0); - } - tflite::ActivationFunctionType fused_activation_function() const { - return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); - } - bool asymmetric_quantize_inputs() const { - return GetField(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0; - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_RANK, 4) && - VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION, 1) && - VerifyField(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS, 1) && - verifier.EndTable(); - } - SVDFOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(SVDFOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SVDFOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct SVDFOptionsBuilder { - typedef SVDFOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_rank(int32_t rank) { - fbb_.AddElement(SVDFOptions::VT_RANK, rank, 0); - } - void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { - fbb_.AddElement(SVDFOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); - } - void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs) { - fbb_.AddElement(SVDFOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS, static_cast(asymmetric_quantize_inputs), 0); - } - explicit SVDFOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateSVDFOptions( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t rank = 0, - tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE, - bool asymmetric_quantize_inputs = false) { - SVDFOptionsBuilder builder_(_fbb); - builder_.add_rank(rank); - builder_.add_asymmetric_quantize_inputs(asymmetric_quantize_inputs); - builder_.add_fused_activation_function(fused_activation_function); - return builder_.Finish(); -} - -flatbuffers::Offset CreateSVDFOptions(flatbuffers::FlatBufferBuilder &_fbb, const SVDFOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct RNNOptionsT : public flatbuffers::NativeTable { - typedef RNNOptions TableType; - tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE; - bool asymmetric_quantize_inputs = false; -}; - -struct RNNOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef RNNOptionsT NativeTableType; - typedef RNNOptionsBuilder Builder; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_FUSED_ACTIVATION_FUNCTION = 4, - VT_ASYMMETRIC_QUANTIZE_INPUTS = 6 - }; - tflite::ActivationFunctionType fused_activation_function() const { - return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); - } - bool asymmetric_quantize_inputs() const { - return GetField(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0; - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION, 1) && - VerifyField(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS, 1) && - verifier.EndTable(); - } - RNNOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(RNNOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const RNNOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct RNNOptionsBuilder { - typedef RNNOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { - fbb_.AddElement(RNNOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); - } - void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs) { - fbb_.AddElement(RNNOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS, static_cast(asymmetric_quantize_inputs), 0); - } - explicit RNNOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateRNNOptions( - flatbuffers::FlatBufferBuilder &_fbb, - tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE, - bool asymmetric_quantize_inputs = false) { - RNNOptionsBuilder builder_(_fbb); - builder_.add_asymmetric_quantize_inputs(asymmetric_quantize_inputs); - builder_.add_fused_activation_function(fused_activation_function); - return builder_.Finish(); -} - -flatbuffers::Offset CreateRNNOptions(flatbuffers::FlatBufferBuilder &_fbb, const RNNOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct SequenceRNNOptionsT : public flatbuffers::NativeTable { - typedef SequenceRNNOptions TableType; - bool time_major = false; - tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE; - bool asymmetric_quantize_inputs = false; -}; - -struct SequenceRNNOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef SequenceRNNOptionsT NativeTableType; - typedef SequenceRNNOptionsBuilder Builder; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_TIME_MAJOR = 4, - VT_FUSED_ACTIVATION_FUNCTION = 6, - VT_ASYMMETRIC_QUANTIZE_INPUTS = 8 - }; - bool time_major() const { - return GetField(VT_TIME_MAJOR, 0) != 0; - } - tflite::ActivationFunctionType fused_activation_function() const { - return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); - } - bool asymmetric_quantize_inputs() const { - return GetField(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0; - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_TIME_MAJOR, 1) && - VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION, 1) && - VerifyField(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS, 1) && - verifier.EndTable(); - } - SequenceRNNOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(SequenceRNNOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SequenceRNNOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct SequenceRNNOptionsBuilder { - typedef SequenceRNNOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_time_major(bool time_major) { - fbb_.AddElement(SequenceRNNOptions::VT_TIME_MAJOR, static_cast(time_major), 0); - } - void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { - fbb_.AddElement(SequenceRNNOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); - } - void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs) { - fbb_.AddElement(SequenceRNNOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS, static_cast(asymmetric_quantize_inputs), 0); - } - explicit SequenceRNNOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateSequenceRNNOptions( - flatbuffers::FlatBufferBuilder &_fbb, - bool time_major = false, - tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE, - bool asymmetric_quantize_inputs = false) { - SequenceRNNOptionsBuilder builder_(_fbb); - builder_.add_asymmetric_quantize_inputs(asymmetric_quantize_inputs); - builder_.add_fused_activation_function(fused_activation_function); - builder_.add_time_major(time_major); - return builder_.Finish(); -} - -flatbuffers::Offset CreateSequenceRNNOptions(flatbuffers::FlatBufferBuilder &_fbb, const SequenceRNNOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct BidirectionalSequenceRNNOptionsT : public flatbuffers::NativeTable { - typedef BidirectionalSequenceRNNOptions TableType; - bool time_major = false; - tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE; - bool merge_outputs = false; - bool asymmetric_quantize_inputs = false; -}; - -struct BidirectionalSequenceRNNOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef BidirectionalSequenceRNNOptionsT NativeTableType; - typedef BidirectionalSequenceRNNOptionsBuilder Builder; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_TIME_MAJOR = 4, - VT_FUSED_ACTIVATION_FUNCTION = 6, - VT_MERGE_OUTPUTS = 8, - VT_ASYMMETRIC_QUANTIZE_INPUTS = 10 - }; - bool time_major() const { - return GetField(VT_TIME_MAJOR, 0) != 0; - } - tflite::ActivationFunctionType fused_activation_function() const { - return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); - } - bool merge_outputs() const { - return GetField(VT_MERGE_OUTPUTS, 0) != 0; - } - bool asymmetric_quantize_inputs() const { - return GetField(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0; - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_TIME_MAJOR, 1) && - VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION, 1) && - VerifyField(verifier, VT_MERGE_OUTPUTS, 1) && - VerifyField(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS, 1) && - verifier.EndTable(); - } - BidirectionalSequenceRNNOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(BidirectionalSequenceRNNOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const BidirectionalSequenceRNNOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct BidirectionalSequenceRNNOptionsBuilder { - typedef BidirectionalSequenceRNNOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_time_major(bool time_major) { - fbb_.AddElement(BidirectionalSequenceRNNOptions::VT_TIME_MAJOR, static_cast(time_major), 0); - } - void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { - fbb_.AddElement(BidirectionalSequenceRNNOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); - } - void add_merge_outputs(bool merge_outputs) { - fbb_.AddElement(BidirectionalSequenceRNNOptions::VT_MERGE_OUTPUTS, static_cast(merge_outputs), 0); - } - void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs) { - fbb_.AddElement(BidirectionalSequenceRNNOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS, static_cast(asymmetric_quantize_inputs), 0); - } - explicit BidirectionalSequenceRNNOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateBidirectionalSequenceRNNOptions( - flatbuffers::FlatBufferBuilder &_fbb, - bool time_major = false, - tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE, - bool merge_outputs = false, - bool asymmetric_quantize_inputs = false) { - BidirectionalSequenceRNNOptionsBuilder builder_(_fbb); - builder_.add_asymmetric_quantize_inputs(asymmetric_quantize_inputs); - builder_.add_merge_outputs(merge_outputs); - builder_.add_fused_activation_function(fused_activation_function); - builder_.add_time_major(time_major); - return builder_.Finish(); -} - -flatbuffers::Offset CreateBidirectionalSequenceRNNOptions(flatbuffers::FlatBufferBuilder &_fbb, const BidirectionalSequenceRNNOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct FullyConnectedOptionsT : public flatbuffers::NativeTable { - typedef FullyConnectedOptions TableType; - tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE; - tflite::FullyConnectedOptionsWeightsFormat weights_format = tflite::FullyConnectedOptionsWeightsFormat_DEFAULT; - bool keep_num_dims = false; - bool asymmetric_quantize_inputs = false; -}; - -struct FullyConnectedOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef FullyConnectedOptionsT NativeTableType; - typedef FullyConnectedOptionsBuilder Builder; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_FUSED_ACTIVATION_FUNCTION = 4, - VT_WEIGHTS_FORMAT = 6, - VT_KEEP_NUM_DIMS = 8, - VT_ASYMMETRIC_QUANTIZE_INPUTS = 10 - }; - tflite::ActivationFunctionType fused_activation_function() const { - return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); - } - tflite::FullyConnectedOptionsWeightsFormat weights_format() const { - return static_cast(GetField(VT_WEIGHTS_FORMAT, 0)); - } - bool keep_num_dims() const { - return GetField(VT_KEEP_NUM_DIMS, 0) != 0; - } - bool asymmetric_quantize_inputs() const { - return GetField(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0; - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION, 1) && - VerifyField(verifier, VT_WEIGHTS_FORMAT, 1) && - VerifyField(verifier, VT_KEEP_NUM_DIMS, 1) && - VerifyField(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS, 1) && - verifier.EndTable(); - } - FullyConnectedOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(FullyConnectedOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const FullyConnectedOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct FullyConnectedOptionsBuilder { - typedef FullyConnectedOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { - fbb_.AddElement(FullyConnectedOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); - } - void add_weights_format(tflite::FullyConnectedOptionsWeightsFormat weights_format) { - fbb_.AddElement(FullyConnectedOptions::VT_WEIGHTS_FORMAT, static_cast(weights_format), 0); - } - void add_keep_num_dims(bool keep_num_dims) { - fbb_.AddElement(FullyConnectedOptions::VT_KEEP_NUM_DIMS, static_cast(keep_num_dims), 0); - } - void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs) { - fbb_.AddElement(FullyConnectedOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS, static_cast(asymmetric_quantize_inputs), 0); - } - explicit FullyConnectedOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateFullyConnectedOptions( - flatbuffers::FlatBufferBuilder &_fbb, - tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE, - tflite::FullyConnectedOptionsWeightsFormat weights_format = tflite::FullyConnectedOptionsWeightsFormat_DEFAULT, - bool keep_num_dims = false, - bool asymmetric_quantize_inputs = false) { - FullyConnectedOptionsBuilder builder_(_fbb); - builder_.add_asymmetric_quantize_inputs(asymmetric_quantize_inputs); - builder_.add_keep_num_dims(keep_num_dims); - builder_.add_weights_format(weights_format); - builder_.add_fused_activation_function(fused_activation_function); - return builder_.Finish(); -} - -flatbuffers::Offset CreateFullyConnectedOptions(flatbuffers::FlatBufferBuilder &_fbb, const FullyConnectedOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct SoftmaxOptionsT : public flatbuffers::NativeTable { - typedef SoftmaxOptions TableType; - float beta = 0.0f; -}; - -struct SoftmaxOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef SoftmaxOptionsT NativeTableType; - typedef SoftmaxOptionsBuilder Builder; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_BETA = 4 - }; - float beta() const { - return GetField(VT_BETA, 0.0f); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_BETA, 4) && - verifier.EndTable(); - } - SoftmaxOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(SoftmaxOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SoftmaxOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct SoftmaxOptionsBuilder { - typedef SoftmaxOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_beta(float beta) { - fbb_.AddElement(SoftmaxOptions::VT_BETA, beta, 0.0f); - } - explicit SoftmaxOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateSoftmaxOptions( - flatbuffers::FlatBufferBuilder &_fbb, - float beta = 0.0f) { - SoftmaxOptionsBuilder builder_(_fbb); - builder_.add_beta(beta); - return builder_.Finish(); -} - -flatbuffers::Offset CreateSoftmaxOptions(flatbuffers::FlatBufferBuilder &_fbb, const SoftmaxOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct ConcatenationOptionsT : public flatbuffers::NativeTable { - typedef ConcatenationOptions TableType; - int32_t axis = 0; - tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE; -}; - -struct ConcatenationOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef ConcatenationOptionsT NativeTableType; - typedef ConcatenationOptionsBuilder Builder; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_AXIS = 4, - VT_FUSED_ACTIVATION_FUNCTION = 6 - }; - int32_t axis() const { - return GetField(VT_AXIS, 0); - } - tflite::ActivationFunctionType fused_activation_function() const { - return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_AXIS, 4) && - VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION, 1) && - verifier.EndTable(); - } - ConcatenationOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(ConcatenationOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ConcatenationOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct ConcatenationOptionsBuilder { - typedef ConcatenationOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_axis(int32_t axis) { - fbb_.AddElement(ConcatenationOptions::VT_AXIS, axis, 0); - } - void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { - fbb_.AddElement(ConcatenationOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); - } - explicit ConcatenationOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateConcatenationOptions( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t axis = 0, - tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE) { - ConcatenationOptionsBuilder builder_(_fbb); - builder_.add_axis(axis); - builder_.add_fused_activation_function(fused_activation_function); - return builder_.Finish(); -} - -flatbuffers::Offset CreateConcatenationOptions(flatbuffers::FlatBufferBuilder &_fbb, const ConcatenationOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct AddOptionsT : public flatbuffers::NativeTable { - typedef AddOptions TableType; - tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE; - bool pot_scale_int16 = true; -}; - -struct AddOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef AddOptionsT NativeTableType; - typedef AddOptionsBuilder Builder; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_FUSED_ACTIVATION_FUNCTION = 4, - VT_POT_SCALE_INT16 = 6 - }; - tflite::ActivationFunctionType fused_activation_function() const { - return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); - } - bool pot_scale_int16() const { - return GetField(VT_POT_SCALE_INT16, 1) != 0; - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION, 1) && - VerifyField(verifier, VT_POT_SCALE_INT16, 1) && - verifier.EndTable(); - } - AddOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(AddOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const AddOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct AddOptionsBuilder { - typedef AddOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { - fbb_.AddElement(AddOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); - } - void add_pot_scale_int16(bool pot_scale_int16) { - fbb_.AddElement(AddOptions::VT_POT_SCALE_INT16, static_cast(pot_scale_int16), 1); - } - explicit AddOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateAddOptions( - flatbuffers::FlatBufferBuilder &_fbb, - tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE, - bool pot_scale_int16 = true) { - AddOptionsBuilder builder_(_fbb); - builder_.add_pot_scale_int16(pot_scale_int16); - builder_.add_fused_activation_function(fused_activation_function); - return builder_.Finish(); -} - -flatbuffers::Offset CreateAddOptions(flatbuffers::FlatBufferBuilder &_fbb, const AddOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct MulOptionsT : public flatbuffers::NativeTable { - typedef MulOptions TableType; - tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE; -}; - -struct MulOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef MulOptionsT NativeTableType; - typedef MulOptionsBuilder Builder; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_FUSED_ACTIVATION_FUNCTION = 4 - }; - tflite::ActivationFunctionType fused_activation_function() const { - return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION, 1) && - verifier.EndTable(); - } - MulOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(MulOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const MulOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct MulOptionsBuilder { - typedef MulOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { - fbb_.AddElement(MulOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); - } - explicit MulOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateMulOptions( - flatbuffers::FlatBufferBuilder &_fbb, - tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE) { - MulOptionsBuilder builder_(_fbb); - builder_.add_fused_activation_function(fused_activation_function); - return builder_.Finish(); -} - -flatbuffers::Offset CreateMulOptions(flatbuffers::FlatBufferBuilder &_fbb, const MulOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct L2NormOptionsT : public flatbuffers::NativeTable { - typedef L2NormOptions TableType; - tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE; -}; - -struct L2NormOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef L2NormOptionsT NativeTableType; - typedef L2NormOptionsBuilder Builder; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_FUSED_ACTIVATION_FUNCTION = 4 - }; - tflite::ActivationFunctionType fused_activation_function() const { - return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION, 1) && - verifier.EndTable(); - } - L2NormOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(L2NormOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const L2NormOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct L2NormOptionsBuilder { - typedef L2NormOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { - fbb_.AddElement(L2NormOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); - } - explicit L2NormOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateL2NormOptions( - flatbuffers::FlatBufferBuilder &_fbb, - tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE) { - L2NormOptionsBuilder builder_(_fbb); - builder_.add_fused_activation_function(fused_activation_function); - return builder_.Finish(); -} - -flatbuffers::Offset CreateL2NormOptions(flatbuffers::FlatBufferBuilder &_fbb, const L2NormOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct LocalResponseNormalizationOptionsT : public flatbuffers::NativeTable { - typedef LocalResponseNormalizationOptions TableType; - int32_t radius = 0; - float bias = 0.0f; - float alpha = 0.0f; - float beta = 0.0f; -}; - -struct LocalResponseNormalizationOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef LocalResponseNormalizationOptionsT NativeTableType; - typedef LocalResponseNormalizationOptionsBuilder Builder; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_RADIUS = 4, - VT_BIAS = 6, - VT_ALPHA = 8, - VT_BETA = 10 - }; - int32_t radius() const { - return GetField(VT_RADIUS, 0); - } - float bias() const { - return GetField(VT_BIAS, 0.0f); - } - float alpha() const { - return GetField(VT_ALPHA, 0.0f); - } - float beta() const { - return GetField(VT_BETA, 0.0f); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_RADIUS, 4) && - VerifyField(verifier, VT_BIAS, 4) && - VerifyField(verifier, VT_ALPHA, 4) && - VerifyField(verifier, VT_BETA, 4) && - verifier.EndTable(); - } - LocalResponseNormalizationOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(LocalResponseNormalizationOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const LocalResponseNormalizationOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct LocalResponseNormalizationOptionsBuilder { - typedef LocalResponseNormalizationOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_radius(int32_t radius) { - fbb_.AddElement(LocalResponseNormalizationOptions::VT_RADIUS, radius, 0); - } - void add_bias(float bias) { - fbb_.AddElement(LocalResponseNormalizationOptions::VT_BIAS, bias, 0.0f); - } - void add_alpha(float alpha) { - fbb_.AddElement(LocalResponseNormalizationOptions::VT_ALPHA, alpha, 0.0f); - } - void add_beta(float beta) { - fbb_.AddElement(LocalResponseNormalizationOptions::VT_BETA, beta, 0.0f); - } - explicit LocalResponseNormalizationOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateLocalResponseNormalizationOptions( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t radius = 0, - float bias = 0.0f, - float alpha = 0.0f, - float beta = 0.0f) { - LocalResponseNormalizationOptionsBuilder builder_(_fbb); - builder_.add_beta(beta); - builder_.add_alpha(alpha); - builder_.add_bias(bias); - builder_.add_radius(radius); - return builder_.Finish(); -} - -flatbuffers::Offset CreateLocalResponseNormalizationOptions(flatbuffers::FlatBufferBuilder &_fbb, const LocalResponseNormalizationOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct LSTMOptionsT : public flatbuffers::NativeTable { - typedef LSTMOptions TableType; - tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE; - float cell_clip = 0.0f; - float proj_clip = 0.0f; - tflite::LSTMKernelType kernel_type = tflite::LSTMKernelType_FULL; - bool asymmetric_quantize_inputs = false; -}; - -struct LSTMOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef LSTMOptionsT NativeTableType; - typedef LSTMOptionsBuilder Builder; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_FUSED_ACTIVATION_FUNCTION = 4, - VT_CELL_CLIP = 6, - VT_PROJ_CLIP = 8, - VT_KERNEL_TYPE = 10, - VT_ASYMMETRIC_QUANTIZE_INPUTS = 12 - }; - tflite::ActivationFunctionType fused_activation_function() const { - return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); - } - float cell_clip() const { - return GetField(VT_CELL_CLIP, 0.0f); - } - float proj_clip() const { - return GetField(VT_PROJ_CLIP, 0.0f); - } - tflite::LSTMKernelType kernel_type() const { - return static_cast(GetField(VT_KERNEL_TYPE, 0)); - } - bool asymmetric_quantize_inputs() const { - return GetField(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0; - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION, 1) && - VerifyField(verifier, VT_CELL_CLIP, 4) && - VerifyField(verifier, VT_PROJ_CLIP, 4) && - VerifyField(verifier, VT_KERNEL_TYPE, 1) && - VerifyField(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS, 1) && - verifier.EndTable(); - } - LSTMOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(LSTMOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const LSTMOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct LSTMOptionsBuilder { - typedef LSTMOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { - fbb_.AddElement(LSTMOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); - } - void add_cell_clip(float cell_clip) { - fbb_.AddElement(LSTMOptions::VT_CELL_CLIP, cell_clip, 0.0f); - } - void add_proj_clip(float proj_clip) { - fbb_.AddElement(LSTMOptions::VT_PROJ_CLIP, proj_clip, 0.0f); - } - void add_kernel_type(tflite::LSTMKernelType kernel_type) { - fbb_.AddElement(LSTMOptions::VT_KERNEL_TYPE, static_cast(kernel_type), 0); - } - void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs) { - fbb_.AddElement(LSTMOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS, static_cast(asymmetric_quantize_inputs), 0); - } - explicit LSTMOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateLSTMOptions( - flatbuffers::FlatBufferBuilder &_fbb, - tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE, - float cell_clip = 0.0f, - float proj_clip = 0.0f, - tflite::LSTMKernelType kernel_type = tflite::LSTMKernelType_FULL, - bool asymmetric_quantize_inputs = false) { - LSTMOptionsBuilder builder_(_fbb); - builder_.add_proj_clip(proj_clip); - builder_.add_cell_clip(cell_clip); - builder_.add_asymmetric_quantize_inputs(asymmetric_quantize_inputs); - builder_.add_kernel_type(kernel_type); - builder_.add_fused_activation_function(fused_activation_function); - return builder_.Finish(); -} - -flatbuffers::Offset CreateLSTMOptions(flatbuffers::FlatBufferBuilder &_fbb, const LSTMOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct UnidirectionalSequenceLSTMOptionsT : public flatbuffers::NativeTable { - typedef UnidirectionalSequenceLSTMOptions TableType; - tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE; - float cell_clip = 0.0f; - float proj_clip = 0.0f; - bool time_major = false; - bool asymmetric_quantize_inputs = false; -}; - -struct UnidirectionalSequenceLSTMOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef UnidirectionalSequenceLSTMOptionsT NativeTableType; - typedef UnidirectionalSequenceLSTMOptionsBuilder Builder; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_FUSED_ACTIVATION_FUNCTION = 4, - VT_CELL_CLIP = 6, - VT_PROJ_CLIP = 8, - VT_TIME_MAJOR = 10, - VT_ASYMMETRIC_QUANTIZE_INPUTS = 12 - }; - tflite::ActivationFunctionType fused_activation_function() const { - return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); - } - float cell_clip() const { - return GetField(VT_CELL_CLIP, 0.0f); - } - float proj_clip() const { - return GetField(VT_PROJ_CLIP, 0.0f); - } - bool time_major() const { - return GetField(VT_TIME_MAJOR, 0) != 0; - } - bool asymmetric_quantize_inputs() const { - return GetField(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0; - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION, 1) && - VerifyField(verifier, VT_CELL_CLIP, 4) && - VerifyField(verifier, VT_PROJ_CLIP, 4) && - VerifyField(verifier, VT_TIME_MAJOR, 1) && - VerifyField(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS, 1) && - verifier.EndTable(); - } - UnidirectionalSequenceLSTMOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(UnidirectionalSequenceLSTMOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const UnidirectionalSequenceLSTMOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct UnidirectionalSequenceLSTMOptionsBuilder { - typedef UnidirectionalSequenceLSTMOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { - fbb_.AddElement(UnidirectionalSequenceLSTMOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); - } - void add_cell_clip(float cell_clip) { - fbb_.AddElement(UnidirectionalSequenceLSTMOptions::VT_CELL_CLIP, cell_clip, 0.0f); - } - void add_proj_clip(float proj_clip) { - fbb_.AddElement(UnidirectionalSequenceLSTMOptions::VT_PROJ_CLIP, proj_clip, 0.0f); - } - void add_time_major(bool time_major) { - fbb_.AddElement(UnidirectionalSequenceLSTMOptions::VT_TIME_MAJOR, static_cast(time_major), 0); - } - void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs) { - fbb_.AddElement(UnidirectionalSequenceLSTMOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS, static_cast(asymmetric_quantize_inputs), 0); - } - explicit UnidirectionalSequenceLSTMOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateUnidirectionalSequenceLSTMOptions( - flatbuffers::FlatBufferBuilder &_fbb, - tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE, - float cell_clip = 0.0f, - float proj_clip = 0.0f, - bool time_major = false, - bool asymmetric_quantize_inputs = false) { - UnidirectionalSequenceLSTMOptionsBuilder builder_(_fbb); - builder_.add_proj_clip(proj_clip); - builder_.add_cell_clip(cell_clip); - builder_.add_asymmetric_quantize_inputs(asymmetric_quantize_inputs); - builder_.add_time_major(time_major); - builder_.add_fused_activation_function(fused_activation_function); - return builder_.Finish(); -} - -flatbuffers::Offset CreateUnidirectionalSequenceLSTMOptions(flatbuffers::FlatBufferBuilder &_fbb, const UnidirectionalSequenceLSTMOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct BidirectionalSequenceLSTMOptionsT : public flatbuffers::NativeTable { - typedef BidirectionalSequenceLSTMOptions TableType; - tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE; - float cell_clip = 0.0f; - float proj_clip = 0.0f; - bool merge_outputs = false; - bool time_major = true; - bool asymmetric_quantize_inputs = false; -}; - -struct BidirectionalSequenceLSTMOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef BidirectionalSequenceLSTMOptionsT NativeTableType; - typedef BidirectionalSequenceLSTMOptionsBuilder Builder; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_FUSED_ACTIVATION_FUNCTION = 4, - VT_CELL_CLIP = 6, - VT_PROJ_CLIP = 8, - VT_MERGE_OUTPUTS = 10, - VT_TIME_MAJOR = 12, - VT_ASYMMETRIC_QUANTIZE_INPUTS = 14 - }; - tflite::ActivationFunctionType fused_activation_function() const { - return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); - } - float cell_clip() const { - return GetField(VT_CELL_CLIP, 0.0f); - } - float proj_clip() const { - return GetField(VT_PROJ_CLIP, 0.0f); - } - bool merge_outputs() const { - return GetField(VT_MERGE_OUTPUTS, 0) != 0; - } - bool time_major() const { - return GetField(VT_TIME_MAJOR, 1) != 0; - } - bool asymmetric_quantize_inputs() const { - return GetField(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0; - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION, 1) && - VerifyField(verifier, VT_CELL_CLIP, 4) && - VerifyField(verifier, VT_PROJ_CLIP, 4) && - VerifyField(verifier, VT_MERGE_OUTPUTS, 1) && - VerifyField(verifier, VT_TIME_MAJOR, 1) && - VerifyField(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS, 1) && - verifier.EndTable(); - } - BidirectionalSequenceLSTMOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(BidirectionalSequenceLSTMOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const BidirectionalSequenceLSTMOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct BidirectionalSequenceLSTMOptionsBuilder { - typedef BidirectionalSequenceLSTMOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { - fbb_.AddElement(BidirectionalSequenceLSTMOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); - } - void add_cell_clip(float cell_clip) { - fbb_.AddElement(BidirectionalSequenceLSTMOptions::VT_CELL_CLIP, cell_clip, 0.0f); - } - void add_proj_clip(float proj_clip) { - fbb_.AddElement(BidirectionalSequenceLSTMOptions::VT_PROJ_CLIP, proj_clip, 0.0f); - } - void add_merge_outputs(bool merge_outputs) { - fbb_.AddElement(BidirectionalSequenceLSTMOptions::VT_MERGE_OUTPUTS, static_cast(merge_outputs), 0); - } - void add_time_major(bool time_major) { - fbb_.AddElement(BidirectionalSequenceLSTMOptions::VT_TIME_MAJOR, static_cast(time_major), 1); - } - void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs) { - fbb_.AddElement(BidirectionalSequenceLSTMOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS, static_cast(asymmetric_quantize_inputs), 0); - } - explicit BidirectionalSequenceLSTMOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateBidirectionalSequenceLSTMOptions( - flatbuffers::FlatBufferBuilder &_fbb, - tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE, - float cell_clip = 0.0f, - float proj_clip = 0.0f, - bool merge_outputs = false, - bool time_major = true, - bool asymmetric_quantize_inputs = false) { - BidirectionalSequenceLSTMOptionsBuilder builder_(_fbb); - builder_.add_proj_clip(proj_clip); - builder_.add_cell_clip(cell_clip); - builder_.add_asymmetric_quantize_inputs(asymmetric_quantize_inputs); - builder_.add_time_major(time_major); - builder_.add_merge_outputs(merge_outputs); - builder_.add_fused_activation_function(fused_activation_function); - return builder_.Finish(); -} - -flatbuffers::Offset CreateBidirectionalSequenceLSTMOptions(flatbuffers::FlatBufferBuilder &_fbb, const BidirectionalSequenceLSTMOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct ResizeBilinearOptionsT : public flatbuffers::NativeTable { - typedef ResizeBilinearOptions TableType; - bool align_corners = false; - bool half_pixel_centers = false; -}; - -struct ResizeBilinearOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef ResizeBilinearOptionsT NativeTableType; - typedef ResizeBilinearOptionsBuilder Builder; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_ALIGN_CORNERS = 8, - VT_HALF_PIXEL_CENTERS = 10 - }; - bool align_corners() const { - return GetField(VT_ALIGN_CORNERS, 0) != 0; - } - bool half_pixel_centers() const { - return GetField(VT_HALF_PIXEL_CENTERS, 0) != 0; - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_ALIGN_CORNERS, 1) && - VerifyField(verifier, VT_HALF_PIXEL_CENTERS, 1) && - verifier.EndTable(); - } - ResizeBilinearOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(ResizeBilinearOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ResizeBilinearOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct ResizeBilinearOptionsBuilder { - typedef ResizeBilinearOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_align_corners(bool align_corners) { - fbb_.AddElement(ResizeBilinearOptions::VT_ALIGN_CORNERS, static_cast(align_corners), 0); - } - void add_half_pixel_centers(bool half_pixel_centers) { - fbb_.AddElement(ResizeBilinearOptions::VT_HALF_PIXEL_CENTERS, static_cast(half_pixel_centers), 0); - } - explicit ResizeBilinearOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateResizeBilinearOptions( - flatbuffers::FlatBufferBuilder &_fbb, - bool align_corners = false, - bool half_pixel_centers = false) { - ResizeBilinearOptionsBuilder builder_(_fbb); - builder_.add_half_pixel_centers(half_pixel_centers); - builder_.add_align_corners(align_corners); - return builder_.Finish(); -} - -flatbuffers::Offset CreateResizeBilinearOptions(flatbuffers::FlatBufferBuilder &_fbb, const ResizeBilinearOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct ResizeNearestNeighborOptionsT : public flatbuffers::NativeTable { - typedef ResizeNearestNeighborOptions TableType; - bool align_corners = false; - bool half_pixel_centers = false; -}; - -struct ResizeNearestNeighborOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef ResizeNearestNeighborOptionsT NativeTableType; - typedef ResizeNearestNeighborOptionsBuilder Builder; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_ALIGN_CORNERS = 4, - VT_HALF_PIXEL_CENTERS = 6 - }; - bool align_corners() const { - return GetField(VT_ALIGN_CORNERS, 0) != 0; - } - bool half_pixel_centers() const { - return GetField(VT_HALF_PIXEL_CENTERS, 0) != 0; - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_ALIGN_CORNERS, 1) && - VerifyField(verifier, VT_HALF_PIXEL_CENTERS, 1) && - verifier.EndTable(); - } - ResizeNearestNeighborOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(ResizeNearestNeighborOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ResizeNearestNeighborOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct ResizeNearestNeighborOptionsBuilder { - typedef ResizeNearestNeighborOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_align_corners(bool align_corners) { - fbb_.AddElement(ResizeNearestNeighborOptions::VT_ALIGN_CORNERS, static_cast(align_corners), 0); - } - void add_half_pixel_centers(bool half_pixel_centers) { - fbb_.AddElement(ResizeNearestNeighborOptions::VT_HALF_PIXEL_CENTERS, static_cast(half_pixel_centers), 0); - } - explicit ResizeNearestNeighborOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateResizeNearestNeighborOptions( - flatbuffers::FlatBufferBuilder &_fbb, - bool align_corners = false, - bool half_pixel_centers = false) { - ResizeNearestNeighborOptionsBuilder builder_(_fbb); - builder_.add_half_pixel_centers(half_pixel_centers); - builder_.add_align_corners(align_corners); - return builder_.Finish(); -} - -flatbuffers::Offset CreateResizeNearestNeighborOptions(flatbuffers::FlatBufferBuilder &_fbb, const ResizeNearestNeighborOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct CallOptionsT : public flatbuffers::NativeTable { - typedef CallOptions TableType; - uint32_t subgraph = 0; -}; - -struct CallOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef CallOptionsT NativeTableType; - typedef CallOptionsBuilder Builder; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_SUBGRAPH = 4 - }; - uint32_t subgraph() const { - return GetField(VT_SUBGRAPH, 0); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_SUBGRAPH, 4) && - verifier.EndTable(); - } - CallOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(CallOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const CallOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct CallOptionsBuilder { - typedef CallOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_subgraph(uint32_t subgraph) { - fbb_.AddElement(CallOptions::VT_SUBGRAPH, subgraph, 0); - } - explicit CallOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateCallOptions( - flatbuffers::FlatBufferBuilder &_fbb, - uint32_t subgraph = 0) { - CallOptionsBuilder builder_(_fbb); - builder_.add_subgraph(subgraph); - return builder_.Finish(); -} - -flatbuffers::Offset CreateCallOptions(flatbuffers::FlatBufferBuilder &_fbb, const CallOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct PadOptionsT : public flatbuffers::NativeTable { - typedef PadOptions TableType; -}; - -struct PadOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef PadOptionsT NativeTableType; - typedef PadOptionsBuilder Builder; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - PadOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(PadOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const PadOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct PadOptionsBuilder { - typedef PadOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit PadOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreatePadOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - PadOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreatePadOptions(flatbuffers::FlatBufferBuilder &_fbb, const PadOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct PadV2OptionsT : public flatbuffers::NativeTable { - typedef PadV2Options TableType; -}; - -struct PadV2Options FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef PadV2OptionsT NativeTableType; - typedef PadV2OptionsBuilder Builder; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - PadV2OptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(PadV2OptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const PadV2OptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct PadV2OptionsBuilder { - typedef PadV2Options Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit PadV2OptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreatePadV2Options( - flatbuffers::FlatBufferBuilder &_fbb) { - PadV2OptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreatePadV2Options(flatbuffers::FlatBufferBuilder &_fbb, const PadV2OptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct ReshapeOptionsT : public flatbuffers::NativeTable { - typedef ReshapeOptions TableType; - std::vector new_shape{}; -}; - -struct ReshapeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef ReshapeOptionsT NativeTableType; - typedef ReshapeOptionsBuilder Builder; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_NEW_SHAPE = 4 - }; - const flatbuffers::Vector *new_shape() const { - return GetPointer *>(VT_NEW_SHAPE); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyOffset(verifier, VT_NEW_SHAPE) && - verifier.VerifyVector(new_shape()) && - verifier.EndTable(); - } - ReshapeOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(ReshapeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ReshapeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct ReshapeOptionsBuilder { - typedef ReshapeOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_new_shape(flatbuffers::Offset> new_shape) { - fbb_.AddOffset(ReshapeOptions::VT_NEW_SHAPE, new_shape); - } - explicit ReshapeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateReshapeOptions( - flatbuffers::FlatBufferBuilder &_fbb, - flatbuffers::Offset> new_shape = 0) { - ReshapeOptionsBuilder builder_(_fbb); - builder_.add_new_shape(new_shape); - return builder_.Finish(); -} - -inline flatbuffers::Offset CreateReshapeOptionsDirect( - flatbuffers::FlatBufferBuilder &_fbb, - const std::vector *new_shape = nullptr) { - auto new_shape__ = new_shape ? _fbb.CreateVector(*new_shape) : 0; - return tflite::CreateReshapeOptions( - _fbb, - new_shape__); -} - -flatbuffers::Offset CreateReshapeOptions(flatbuffers::FlatBufferBuilder &_fbb, const ReshapeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct SpaceToBatchNDOptionsT : public flatbuffers::NativeTable { - typedef SpaceToBatchNDOptions TableType; -}; - -struct SpaceToBatchNDOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef SpaceToBatchNDOptionsT NativeTableType; - typedef SpaceToBatchNDOptionsBuilder Builder; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - SpaceToBatchNDOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(SpaceToBatchNDOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SpaceToBatchNDOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct SpaceToBatchNDOptionsBuilder { - typedef SpaceToBatchNDOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit SpaceToBatchNDOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateSpaceToBatchNDOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - SpaceToBatchNDOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateSpaceToBatchNDOptions(flatbuffers::FlatBufferBuilder &_fbb, const SpaceToBatchNDOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct BatchToSpaceNDOptionsT : public flatbuffers::NativeTable { - typedef BatchToSpaceNDOptions TableType; -}; - -struct BatchToSpaceNDOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef BatchToSpaceNDOptionsT NativeTableType; - typedef BatchToSpaceNDOptionsBuilder Builder; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - BatchToSpaceNDOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(BatchToSpaceNDOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const BatchToSpaceNDOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct BatchToSpaceNDOptionsBuilder { - typedef BatchToSpaceNDOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit BatchToSpaceNDOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateBatchToSpaceNDOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - BatchToSpaceNDOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateBatchToSpaceNDOptions(flatbuffers::FlatBufferBuilder &_fbb, const BatchToSpaceNDOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct SkipGramOptionsT : public flatbuffers::NativeTable { - typedef SkipGramOptions TableType; - int32_t ngram_size = 0; - int32_t max_skip_size = 0; - bool include_all_ngrams = false; -}; - -struct SkipGramOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef SkipGramOptionsT NativeTableType; - typedef SkipGramOptionsBuilder Builder; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_NGRAM_SIZE = 4, - VT_MAX_SKIP_SIZE = 6, - VT_INCLUDE_ALL_NGRAMS = 8 - }; - int32_t ngram_size() const { - return GetField(VT_NGRAM_SIZE, 0); - } - int32_t max_skip_size() const { - return GetField(VT_MAX_SKIP_SIZE, 0); - } - bool include_all_ngrams() const { - return GetField(VT_INCLUDE_ALL_NGRAMS, 0) != 0; - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_NGRAM_SIZE, 4) && - VerifyField(verifier, VT_MAX_SKIP_SIZE, 4) && - VerifyField(verifier, VT_INCLUDE_ALL_NGRAMS, 1) && - verifier.EndTable(); - } - SkipGramOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(SkipGramOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SkipGramOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct SkipGramOptionsBuilder { - typedef SkipGramOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_ngram_size(int32_t ngram_size) { - fbb_.AddElement(SkipGramOptions::VT_NGRAM_SIZE, ngram_size, 0); - } - void add_max_skip_size(int32_t max_skip_size) { - fbb_.AddElement(SkipGramOptions::VT_MAX_SKIP_SIZE, max_skip_size, 0); - } - void add_include_all_ngrams(bool include_all_ngrams) { - fbb_.AddElement(SkipGramOptions::VT_INCLUDE_ALL_NGRAMS, static_cast(include_all_ngrams), 0); - } - explicit SkipGramOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateSkipGramOptions( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t ngram_size = 0, - int32_t max_skip_size = 0, - bool include_all_ngrams = false) { - SkipGramOptionsBuilder builder_(_fbb); - builder_.add_max_skip_size(max_skip_size); - builder_.add_ngram_size(ngram_size); - builder_.add_include_all_ngrams(include_all_ngrams); - return builder_.Finish(); -} - -flatbuffers::Offset CreateSkipGramOptions(flatbuffers::FlatBufferBuilder &_fbb, const SkipGramOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct SpaceToDepthOptionsT : public flatbuffers::NativeTable { - typedef SpaceToDepthOptions TableType; - int32_t block_size = 0; -}; - -struct SpaceToDepthOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef SpaceToDepthOptionsT NativeTableType; - typedef SpaceToDepthOptionsBuilder Builder; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_BLOCK_SIZE = 4 - }; - int32_t block_size() const { - return GetField(VT_BLOCK_SIZE, 0); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_BLOCK_SIZE, 4) && - verifier.EndTable(); - } - SpaceToDepthOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(SpaceToDepthOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SpaceToDepthOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct SpaceToDepthOptionsBuilder { - typedef SpaceToDepthOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_block_size(int32_t block_size) { - fbb_.AddElement(SpaceToDepthOptions::VT_BLOCK_SIZE, block_size, 0); - } - explicit SpaceToDepthOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateSpaceToDepthOptions( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t block_size = 0) { - SpaceToDepthOptionsBuilder builder_(_fbb); - builder_.add_block_size(block_size); - return builder_.Finish(); -} - -flatbuffers::Offset CreateSpaceToDepthOptions(flatbuffers::FlatBufferBuilder &_fbb, const SpaceToDepthOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct DepthToSpaceOptionsT : public flatbuffers::NativeTable { - typedef DepthToSpaceOptions TableType; - int32_t block_size = 0; -}; - -struct DepthToSpaceOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef DepthToSpaceOptionsT NativeTableType; - typedef DepthToSpaceOptionsBuilder Builder; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_BLOCK_SIZE = 4 - }; - int32_t block_size() const { - return GetField(VT_BLOCK_SIZE, 0); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_BLOCK_SIZE, 4) && - verifier.EndTable(); - } - DepthToSpaceOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(DepthToSpaceOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const DepthToSpaceOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct DepthToSpaceOptionsBuilder { - typedef DepthToSpaceOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_block_size(int32_t block_size) { - fbb_.AddElement(DepthToSpaceOptions::VT_BLOCK_SIZE, block_size, 0); - } - explicit DepthToSpaceOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateDepthToSpaceOptions( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t block_size = 0) { - DepthToSpaceOptionsBuilder builder_(_fbb); - builder_.add_block_size(block_size); - return builder_.Finish(); -} - -flatbuffers::Offset CreateDepthToSpaceOptions(flatbuffers::FlatBufferBuilder &_fbb, const DepthToSpaceOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct SubOptionsT : public flatbuffers::NativeTable { - typedef SubOptions TableType; - tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE; - bool pot_scale_int16 = true; -}; - -struct SubOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef SubOptionsT NativeTableType; - typedef SubOptionsBuilder Builder; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_FUSED_ACTIVATION_FUNCTION = 4, - VT_POT_SCALE_INT16 = 6 - }; - tflite::ActivationFunctionType fused_activation_function() const { - return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); - } - bool pot_scale_int16() const { - return GetField(VT_POT_SCALE_INT16, 1) != 0; - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION, 1) && - VerifyField(verifier, VT_POT_SCALE_INT16, 1) && - verifier.EndTable(); - } - SubOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(SubOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SubOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct SubOptionsBuilder { - typedef SubOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { - fbb_.AddElement(SubOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); - } - void add_pot_scale_int16(bool pot_scale_int16) { - fbb_.AddElement(SubOptions::VT_POT_SCALE_INT16, static_cast(pot_scale_int16), 1); - } - explicit SubOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateSubOptions( - flatbuffers::FlatBufferBuilder &_fbb, - tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE, - bool pot_scale_int16 = true) { - SubOptionsBuilder builder_(_fbb); - builder_.add_pot_scale_int16(pot_scale_int16); - builder_.add_fused_activation_function(fused_activation_function); - return builder_.Finish(); -} - -flatbuffers::Offset CreateSubOptions(flatbuffers::FlatBufferBuilder &_fbb, const SubOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct DivOptionsT : public flatbuffers::NativeTable { - typedef DivOptions TableType; - tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE; -}; - -struct DivOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef DivOptionsT NativeTableType; - typedef DivOptionsBuilder Builder; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_FUSED_ACTIVATION_FUNCTION = 4 - }; - tflite::ActivationFunctionType fused_activation_function() const { - return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION, 1) && - verifier.EndTable(); - } - DivOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(DivOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const DivOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct DivOptionsBuilder { - typedef DivOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { - fbb_.AddElement(DivOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); - } - explicit DivOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateDivOptions( - flatbuffers::FlatBufferBuilder &_fbb, - tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE) { - DivOptionsBuilder builder_(_fbb); - builder_.add_fused_activation_function(fused_activation_function); - return builder_.Finish(); -} - -flatbuffers::Offset CreateDivOptions(flatbuffers::FlatBufferBuilder &_fbb, const DivOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct TopKV2OptionsT : public flatbuffers::NativeTable { - typedef TopKV2Options TableType; -}; - -struct TopKV2Options FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef TopKV2OptionsT NativeTableType; - typedef TopKV2OptionsBuilder Builder; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - TopKV2OptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(TopKV2OptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const TopKV2OptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct TopKV2OptionsBuilder { - typedef TopKV2Options Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit TopKV2OptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateTopKV2Options( - flatbuffers::FlatBufferBuilder &_fbb) { - TopKV2OptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateTopKV2Options(flatbuffers::FlatBufferBuilder &_fbb, const TopKV2OptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct EmbeddingLookupSparseOptionsT : public flatbuffers::NativeTable { - typedef EmbeddingLookupSparseOptions TableType; - tflite::CombinerType combiner = tflite::CombinerType_SUM; -}; - -struct EmbeddingLookupSparseOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef EmbeddingLookupSparseOptionsT NativeTableType; - typedef EmbeddingLookupSparseOptionsBuilder Builder; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_COMBINER = 4 - }; - tflite::CombinerType combiner() const { - return static_cast(GetField(VT_COMBINER, 0)); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_COMBINER, 1) && - verifier.EndTable(); - } - EmbeddingLookupSparseOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(EmbeddingLookupSparseOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const EmbeddingLookupSparseOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct EmbeddingLookupSparseOptionsBuilder { - typedef EmbeddingLookupSparseOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_combiner(tflite::CombinerType combiner) { - fbb_.AddElement(EmbeddingLookupSparseOptions::VT_COMBINER, static_cast(combiner), 0); - } - explicit EmbeddingLookupSparseOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateEmbeddingLookupSparseOptions( - flatbuffers::FlatBufferBuilder &_fbb, - tflite::CombinerType combiner = tflite::CombinerType_SUM) { - EmbeddingLookupSparseOptionsBuilder builder_(_fbb); - builder_.add_combiner(combiner); - return builder_.Finish(); -} - -flatbuffers::Offset CreateEmbeddingLookupSparseOptions(flatbuffers::FlatBufferBuilder &_fbb, const EmbeddingLookupSparseOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct GatherOptionsT : public flatbuffers::NativeTable { - typedef GatherOptions TableType; - int32_t axis = 0; - int32_t batch_dims = 0; -}; - -struct GatherOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef GatherOptionsT NativeTableType; - typedef GatherOptionsBuilder Builder; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_AXIS = 4, - VT_BATCH_DIMS = 6 - }; - int32_t axis() const { - return GetField(VT_AXIS, 0); - } - int32_t batch_dims() const { - return GetField(VT_BATCH_DIMS, 0); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_AXIS, 4) && - VerifyField(verifier, VT_BATCH_DIMS, 4) && - verifier.EndTable(); - } - GatherOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(GatherOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const GatherOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct GatherOptionsBuilder { - typedef GatherOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_axis(int32_t axis) { - fbb_.AddElement(GatherOptions::VT_AXIS, axis, 0); - } - void add_batch_dims(int32_t batch_dims) { - fbb_.AddElement(GatherOptions::VT_BATCH_DIMS, batch_dims, 0); - } - explicit GatherOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateGatherOptions( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t axis = 0, - int32_t batch_dims = 0) { - GatherOptionsBuilder builder_(_fbb); - builder_.add_batch_dims(batch_dims); - builder_.add_axis(axis); - return builder_.Finish(); -} - -flatbuffers::Offset CreateGatherOptions(flatbuffers::FlatBufferBuilder &_fbb, const GatherOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct TransposeOptionsT : public flatbuffers::NativeTable { - typedef TransposeOptions TableType; -}; - -struct TransposeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef TransposeOptionsT NativeTableType; - typedef TransposeOptionsBuilder Builder; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - TransposeOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(TransposeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const TransposeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct TransposeOptionsBuilder { - typedef TransposeOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit TransposeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateTransposeOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - TransposeOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateTransposeOptions(flatbuffers::FlatBufferBuilder &_fbb, const TransposeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct ExpOptionsT : public flatbuffers::NativeTable { - typedef ExpOptions TableType; -}; - -struct ExpOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef ExpOptionsT NativeTableType; - typedef ExpOptionsBuilder Builder; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - ExpOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(ExpOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ExpOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct ExpOptionsBuilder { - typedef ExpOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit ExpOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateExpOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - ExpOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateExpOptions(flatbuffers::FlatBufferBuilder &_fbb, const ExpOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct CosOptionsT : public flatbuffers::NativeTable { - typedef CosOptions TableType; -}; - -struct CosOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef CosOptionsT NativeTableType; - typedef CosOptionsBuilder Builder; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - CosOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(CosOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const CosOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct CosOptionsBuilder { - typedef CosOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit CosOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateCosOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - CosOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateCosOptions(flatbuffers::FlatBufferBuilder &_fbb, const CosOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct ReducerOptionsT : public flatbuffers::NativeTable { - typedef ReducerOptions TableType; - bool keep_dims = false; -}; - -struct ReducerOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef ReducerOptionsT NativeTableType; - typedef ReducerOptionsBuilder Builder; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_KEEP_DIMS = 4 - }; - bool keep_dims() const { - return GetField(VT_KEEP_DIMS, 0) != 0; - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_KEEP_DIMS, 1) && - verifier.EndTable(); - } - ReducerOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(ReducerOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ReducerOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct ReducerOptionsBuilder { - typedef ReducerOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_keep_dims(bool keep_dims) { - fbb_.AddElement(ReducerOptions::VT_KEEP_DIMS, static_cast(keep_dims), 0); - } - explicit ReducerOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateReducerOptions( - flatbuffers::FlatBufferBuilder &_fbb, - bool keep_dims = false) { - ReducerOptionsBuilder builder_(_fbb); - builder_.add_keep_dims(keep_dims); - return builder_.Finish(); -} - -flatbuffers::Offset CreateReducerOptions(flatbuffers::FlatBufferBuilder &_fbb, const ReducerOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct SqueezeOptionsT : public flatbuffers::NativeTable { - typedef SqueezeOptions TableType; - std::vector squeeze_dims{}; -}; - -struct SqueezeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef SqueezeOptionsT NativeTableType; - typedef SqueezeOptionsBuilder Builder; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_SQUEEZE_DIMS = 4 - }; - const flatbuffers::Vector *squeeze_dims() const { - return GetPointer *>(VT_SQUEEZE_DIMS); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyOffset(verifier, VT_SQUEEZE_DIMS) && - verifier.VerifyVector(squeeze_dims()) && - verifier.EndTable(); - } - SqueezeOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(SqueezeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SqueezeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct SqueezeOptionsBuilder { - typedef SqueezeOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_squeeze_dims(flatbuffers::Offset> squeeze_dims) { - fbb_.AddOffset(SqueezeOptions::VT_SQUEEZE_DIMS, squeeze_dims); - } - explicit SqueezeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateSqueezeOptions( - flatbuffers::FlatBufferBuilder &_fbb, - flatbuffers::Offset> squeeze_dims = 0) { - SqueezeOptionsBuilder builder_(_fbb); - builder_.add_squeeze_dims(squeeze_dims); - return builder_.Finish(); -} - -inline flatbuffers::Offset CreateSqueezeOptionsDirect( - flatbuffers::FlatBufferBuilder &_fbb, - const std::vector *squeeze_dims = nullptr) { - auto squeeze_dims__ = squeeze_dims ? _fbb.CreateVector(*squeeze_dims) : 0; - return tflite::CreateSqueezeOptions( - _fbb, - squeeze_dims__); -} - -flatbuffers::Offset CreateSqueezeOptions(flatbuffers::FlatBufferBuilder &_fbb, const SqueezeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct SplitOptionsT : public flatbuffers::NativeTable { - typedef SplitOptions TableType; - int32_t num_splits = 0; -}; - -struct SplitOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef SplitOptionsT NativeTableType; - typedef SplitOptionsBuilder Builder; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_NUM_SPLITS = 4 - }; - int32_t num_splits() const { - return GetField(VT_NUM_SPLITS, 0); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_NUM_SPLITS, 4) && - verifier.EndTable(); - } - SplitOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(SplitOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SplitOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct SplitOptionsBuilder { - typedef SplitOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_num_splits(int32_t num_splits) { - fbb_.AddElement(SplitOptions::VT_NUM_SPLITS, num_splits, 0); - } - explicit SplitOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateSplitOptions( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t num_splits = 0) { - SplitOptionsBuilder builder_(_fbb); - builder_.add_num_splits(num_splits); - return builder_.Finish(); -} - -flatbuffers::Offset CreateSplitOptions(flatbuffers::FlatBufferBuilder &_fbb, const SplitOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct SplitVOptionsT : public flatbuffers::NativeTable { - typedef SplitVOptions TableType; - int32_t num_splits = 0; -}; - -struct SplitVOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef SplitVOptionsT NativeTableType; - typedef SplitVOptionsBuilder Builder; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_NUM_SPLITS = 4 - }; - int32_t num_splits() const { - return GetField(VT_NUM_SPLITS, 0); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_NUM_SPLITS, 4) && - verifier.EndTable(); - } - SplitVOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(SplitVOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SplitVOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct SplitVOptionsBuilder { - typedef SplitVOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_num_splits(int32_t num_splits) { - fbb_.AddElement(SplitVOptions::VT_NUM_SPLITS, num_splits, 0); - } - explicit SplitVOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateSplitVOptions( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t num_splits = 0) { - SplitVOptionsBuilder builder_(_fbb); - builder_.add_num_splits(num_splits); - return builder_.Finish(); -} - -flatbuffers::Offset CreateSplitVOptions(flatbuffers::FlatBufferBuilder &_fbb, const SplitVOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct StridedSliceOptionsT : public flatbuffers::NativeTable { - typedef StridedSliceOptions TableType; - int32_t begin_mask = 0; - int32_t end_mask = 0; - int32_t ellipsis_mask = 0; - int32_t new_axis_mask = 0; - int32_t shrink_axis_mask = 0; -}; - -struct StridedSliceOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef StridedSliceOptionsT NativeTableType; - typedef StridedSliceOptionsBuilder Builder; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_BEGIN_MASK = 4, - VT_END_MASK = 6, - VT_ELLIPSIS_MASK = 8, - VT_NEW_AXIS_MASK = 10, - VT_SHRINK_AXIS_MASK = 12 - }; - int32_t begin_mask() const { - return GetField(VT_BEGIN_MASK, 0); - } - int32_t end_mask() const { - return GetField(VT_END_MASK, 0); - } - int32_t ellipsis_mask() const { - return GetField(VT_ELLIPSIS_MASK, 0); - } - int32_t new_axis_mask() const { - return GetField(VT_NEW_AXIS_MASK, 0); - } - int32_t shrink_axis_mask() const { - return GetField(VT_SHRINK_AXIS_MASK, 0); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_BEGIN_MASK, 4) && - VerifyField(verifier, VT_END_MASK, 4) && - VerifyField(verifier, VT_ELLIPSIS_MASK, 4) && - VerifyField(verifier, VT_NEW_AXIS_MASK, 4) && - VerifyField(verifier, VT_SHRINK_AXIS_MASK, 4) && - verifier.EndTable(); - } - StridedSliceOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(StridedSliceOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const StridedSliceOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct StridedSliceOptionsBuilder { - typedef StridedSliceOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_begin_mask(int32_t begin_mask) { - fbb_.AddElement(StridedSliceOptions::VT_BEGIN_MASK, begin_mask, 0); - } - void add_end_mask(int32_t end_mask) { - fbb_.AddElement(StridedSliceOptions::VT_END_MASK, end_mask, 0); - } - void add_ellipsis_mask(int32_t ellipsis_mask) { - fbb_.AddElement(StridedSliceOptions::VT_ELLIPSIS_MASK, ellipsis_mask, 0); - } - void add_new_axis_mask(int32_t new_axis_mask) { - fbb_.AddElement(StridedSliceOptions::VT_NEW_AXIS_MASK, new_axis_mask, 0); - } - void add_shrink_axis_mask(int32_t shrink_axis_mask) { - fbb_.AddElement(StridedSliceOptions::VT_SHRINK_AXIS_MASK, shrink_axis_mask, 0); - } - explicit StridedSliceOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateStridedSliceOptions( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t begin_mask = 0, - int32_t end_mask = 0, - int32_t ellipsis_mask = 0, - int32_t new_axis_mask = 0, - int32_t shrink_axis_mask = 0) { - StridedSliceOptionsBuilder builder_(_fbb); - builder_.add_shrink_axis_mask(shrink_axis_mask); - builder_.add_new_axis_mask(new_axis_mask); - builder_.add_ellipsis_mask(ellipsis_mask); - builder_.add_end_mask(end_mask); - builder_.add_begin_mask(begin_mask); - return builder_.Finish(); -} - -flatbuffers::Offset CreateStridedSliceOptions(flatbuffers::FlatBufferBuilder &_fbb, const StridedSliceOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct LogSoftmaxOptionsT : public flatbuffers::NativeTable { - typedef LogSoftmaxOptions TableType; -}; - -struct LogSoftmaxOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef LogSoftmaxOptionsT NativeTableType; - typedef LogSoftmaxOptionsBuilder Builder; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - LogSoftmaxOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(LogSoftmaxOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const LogSoftmaxOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct LogSoftmaxOptionsBuilder { - typedef LogSoftmaxOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit LogSoftmaxOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateLogSoftmaxOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - LogSoftmaxOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateLogSoftmaxOptions(flatbuffers::FlatBufferBuilder &_fbb, const LogSoftmaxOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct CastOptionsT : public flatbuffers::NativeTable { - typedef CastOptions TableType; - tflite::TensorType in_data_type = tflite::TensorType_FLOAT32; - tflite::TensorType out_data_type = tflite::TensorType_FLOAT32; -}; - -struct CastOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef CastOptionsT NativeTableType; - typedef CastOptionsBuilder Builder; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_IN_DATA_TYPE = 4, - VT_OUT_DATA_TYPE = 6 - }; - tflite::TensorType in_data_type() const { - return static_cast(GetField(VT_IN_DATA_TYPE, 0)); - } - tflite::TensorType out_data_type() const { - return static_cast(GetField(VT_OUT_DATA_TYPE, 0)); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_IN_DATA_TYPE, 1) && - VerifyField(verifier, VT_OUT_DATA_TYPE, 1) && - verifier.EndTable(); - } - CastOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(CastOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const CastOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct CastOptionsBuilder { - typedef CastOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_in_data_type(tflite::TensorType in_data_type) { - fbb_.AddElement(CastOptions::VT_IN_DATA_TYPE, static_cast(in_data_type), 0); - } - void add_out_data_type(tflite::TensorType out_data_type) { - fbb_.AddElement(CastOptions::VT_OUT_DATA_TYPE, static_cast(out_data_type), 0); - } - explicit CastOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateCastOptions( - flatbuffers::FlatBufferBuilder &_fbb, - tflite::TensorType in_data_type = tflite::TensorType_FLOAT32, - tflite::TensorType out_data_type = tflite::TensorType_FLOAT32) { - CastOptionsBuilder builder_(_fbb); - builder_.add_out_data_type(out_data_type); - builder_.add_in_data_type(in_data_type); - return builder_.Finish(); -} - -flatbuffers::Offset CreateCastOptions(flatbuffers::FlatBufferBuilder &_fbb, const CastOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct DequantizeOptionsT : public flatbuffers::NativeTable { - typedef DequantizeOptions TableType; -}; - -struct DequantizeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef DequantizeOptionsT NativeTableType; - typedef DequantizeOptionsBuilder Builder; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - DequantizeOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(DequantizeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const DequantizeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct DequantizeOptionsBuilder { - typedef DequantizeOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit DequantizeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateDequantizeOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - DequantizeOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateDequantizeOptions(flatbuffers::FlatBufferBuilder &_fbb, const DequantizeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct MaximumMinimumOptionsT : public flatbuffers::NativeTable { - typedef MaximumMinimumOptions TableType; -}; - -struct MaximumMinimumOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef MaximumMinimumOptionsT NativeTableType; - typedef MaximumMinimumOptionsBuilder Builder; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - MaximumMinimumOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(MaximumMinimumOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const MaximumMinimumOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct MaximumMinimumOptionsBuilder { - typedef MaximumMinimumOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit MaximumMinimumOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateMaximumMinimumOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - MaximumMinimumOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateMaximumMinimumOptions(flatbuffers::FlatBufferBuilder &_fbb, const MaximumMinimumOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct TileOptionsT : public flatbuffers::NativeTable { - typedef TileOptions TableType; -}; - -struct TileOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef TileOptionsT NativeTableType; - typedef TileOptionsBuilder Builder; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - TileOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(TileOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const TileOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct TileOptionsBuilder { - typedef TileOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit TileOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateTileOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - TileOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateTileOptions(flatbuffers::FlatBufferBuilder &_fbb, const TileOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct ArgMaxOptionsT : public flatbuffers::NativeTable { - typedef ArgMaxOptions TableType; - tflite::TensorType output_type = tflite::TensorType_FLOAT32; -}; - -struct ArgMaxOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef ArgMaxOptionsT NativeTableType; - typedef ArgMaxOptionsBuilder Builder; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_OUTPUT_TYPE = 4 - }; - tflite::TensorType output_type() const { - return static_cast(GetField(VT_OUTPUT_TYPE, 0)); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_OUTPUT_TYPE, 1) && - verifier.EndTable(); - } - ArgMaxOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(ArgMaxOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ArgMaxOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct ArgMaxOptionsBuilder { - typedef ArgMaxOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_output_type(tflite::TensorType output_type) { - fbb_.AddElement(ArgMaxOptions::VT_OUTPUT_TYPE, static_cast(output_type), 0); - } - explicit ArgMaxOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateArgMaxOptions( - flatbuffers::FlatBufferBuilder &_fbb, - tflite::TensorType output_type = tflite::TensorType_FLOAT32) { - ArgMaxOptionsBuilder builder_(_fbb); - builder_.add_output_type(output_type); - return builder_.Finish(); -} - -flatbuffers::Offset CreateArgMaxOptions(flatbuffers::FlatBufferBuilder &_fbb, const ArgMaxOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct ArgMinOptionsT : public flatbuffers::NativeTable { - typedef ArgMinOptions TableType; - tflite::TensorType output_type = tflite::TensorType_FLOAT32; -}; - -struct ArgMinOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef ArgMinOptionsT NativeTableType; - typedef ArgMinOptionsBuilder Builder; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_OUTPUT_TYPE = 4 - }; - tflite::TensorType output_type() const { - return static_cast(GetField(VT_OUTPUT_TYPE, 0)); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_OUTPUT_TYPE, 1) && - verifier.EndTable(); - } - ArgMinOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(ArgMinOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ArgMinOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct ArgMinOptionsBuilder { - typedef ArgMinOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_output_type(tflite::TensorType output_type) { - fbb_.AddElement(ArgMinOptions::VT_OUTPUT_TYPE, static_cast(output_type), 0); - } - explicit ArgMinOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateArgMinOptions( - flatbuffers::FlatBufferBuilder &_fbb, - tflite::TensorType output_type = tflite::TensorType_FLOAT32) { - ArgMinOptionsBuilder builder_(_fbb); - builder_.add_output_type(output_type); - return builder_.Finish(); -} - -flatbuffers::Offset CreateArgMinOptions(flatbuffers::FlatBufferBuilder &_fbb, const ArgMinOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct GreaterOptionsT : public flatbuffers::NativeTable { - typedef GreaterOptions TableType; -}; - -struct GreaterOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef GreaterOptionsT NativeTableType; - typedef GreaterOptionsBuilder Builder; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - GreaterOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(GreaterOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const GreaterOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct GreaterOptionsBuilder { - typedef GreaterOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit GreaterOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateGreaterOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - GreaterOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateGreaterOptions(flatbuffers::FlatBufferBuilder &_fbb, const GreaterOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct GreaterEqualOptionsT : public flatbuffers::NativeTable { - typedef GreaterEqualOptions TableType; -}; - -struct GreaterEqualOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef GreaterEqualOptionsT NativeTableType; - typedef GreaterEqualOptionsBuilder Builder; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - GreaterEqualOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(GreaterEqualOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const GreaterEqualOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct GreaterEqualOptionsBuilder { - typedef GreaterEqualOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit GreaterEqualOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateGreaterEqualOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - GreaterEqualOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateGreaterEqualOptions(flatbuffers::FlatBufferBuilder &_fbb, const GreaterEqualOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct LessOptionsT : public flatbuffers::NativeTable { - typedef LessOptions TableType; -}; - -struct LessOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef LessOptionsT NativeTableType; - typedef LessOptionsBuilder Builder; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - LessOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(LessOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const LessOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct LessOptionsBuilder { - typedef LessOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit LessOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateLessOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - LessOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateLessOptions(flatbuffers::FlatBufferBuilder &_fbb, const LessOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct LessEqualOptionsT : public flatbuffers::NativeTable { - typedef LessEqualOptions TableType; -}; - -struct LessEqualOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef LessEqualOptionsT NativeTableType; - typedef LessEqualOptionsBuilder Builder; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - LessEqualOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(LessEqualOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const LessEqualOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct LessEqualOptionsBuilder { - typedef LessEqualOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit LessEqualOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateLessEqualOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - LessEqualOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateLessEqualOptions(flatbuffers::FlatBufferBuilder &_fbb, const LessEqualOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct NegOptionsT : public flatbuffers::NativeTable { - typedef NegOptions TableType; -}; - -struct NegOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef NegOptionsT NativeTableType; - typedef NegOptionsBuilder Builder; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - NegOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(NegOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const NegOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct NegOptionsBuilder { - typedef NegOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit NegOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateNegOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - NegOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateNegOptions(flatbuffers::FlatBufferBuilder &_fbb, const NegOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct SelectOptionsT : public flatbuffers::NativeTable { - typedef SelectOptions TableType; -}; - -struct SelectOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef SelectOptionsT NativeTableType; - typedef SelectOptionsBuilder Builder; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - SelectOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(SelectOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SelectOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct SelectOptionsBuilder { - typedef SelectOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit SelectOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateSelectOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - SelectOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateSelectOptions(flatbuffers::FlatBufferBuilder &_fbb, const SelectOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct SliceOptionsT : public flatbuffers::NativeTable { - typedef SliceOptions TableType; -}; - -struct SliceOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef SliceOptionsT NativeTableType; - typedef SliceOptionsBuilder Builder; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - SliceOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(SliceOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SliceOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct SliceOptionsBuilder { - typedef SliceOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit SliceOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateSliceOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - SliceOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateSliceOptions(flatbuffers::FlatBufferBuilder &_fbb, const SliceOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct TransposeConvOptionsT : public flatbuffers::NativeTable { - typedef TransposeConvOptions TableType; - tflite::Padding padding = tflite::Padding_SAME; - int32_t stride_w = 0; - int32_t stride_h = 0; -}; - -struct TransposeConvOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef TransposeConvOptionsT NativeTableType; - typedef TransposeConvOptionsBuilder Builder; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_PADDING = 4, - VT_STRIDE_W = 6, - VT_STRIDE_H = 8 - }; - tflite::Padding padding() const { - return static_cast(GetField(VT_PADDING, 0)); - } - int32_t stride_w() const { - return GetField(VT_STRIDE_W, 0); - } - int32_t stride_h() const { - return GetField(VT_STRIDE_H, 0); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_PADDING, 1) && - VerifyField(verifier, VT_STRIDE_W, 4) && - VerifyField(verifier, VT_STRIDE_H, 4) && - verifier.EndTable(); - } - TransposeConvOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(TransposeConvOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const TransposeConvOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct TransposeConvOptionsBuilder { - typedef TransposeConvOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_padding(tflite::Padding padding) { - fbb_.AddElement(TransposeConvOptions::VT_PADDING, static_cast(padding), 0); - } - void add_stride_w(int32_t stride_w) { - fbb_.AddElement(TransposeConvOptions::VT_STRIDE_W, stride_w, 0); - } - void add_stride_h(int32_t stride_h) { - fbb_.AddElement(TransposeConvOptions::VT_STRIDE_H, stride_h, 0); - } - explicit TransposeConvOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateTransposeConvOptions( - flatbuffers::FlatBufferBuilder &_fbb, - tflite::Padding padding = tflite::Padding_SAME, - int32_t stride_w = 0, - int32_t stride_h = 0) { - TransposeConvOptionsBuilder builder_(_fbb); - builder_.add_stride_h(stride_h); - builder_.add_stride_w(stride_w); - builder_.add_padding(padding); - return builder_.Finish(); -} - -flatbuffers::Offset CreateTransposeConvOptions(flatbuffers::FlatBufferBuilder &_fbb, const TransposeConvOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct ExpandDimsOptionsT : public flatbuffers::NativeTable { - typedef ExpandDimsOptions TableType; -}; - -struct ExpandDimsOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef ExpandDimsOptionsT NativeTableType; - typedef ExpandDimsOptionsBuilder Builder; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - ExpandDimsOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(ExpandDimsOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ExpandDimsOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct ExpandDimsOptionsBuilder { - typedef ExpandDimsOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit ExpandDimsOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateExpandDimsOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - ExpandDimsOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateExpandDimsOptions(flatbuffers::FlatBufferBuilder &_fbb, const ExpandDimsOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct SparseToDenseOptionsT : public flatbuffers::NativeTable { - typedef SparseToDenseOptions TableType; - bool validate_indices = false; -}; - -struct SparseToDenseOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef SparseToDenseOptionsT NativeTableType; - typedef SparseToDenseOptionsBuilder Builder; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_VALIDATE_INDICES = 4 - }; - bool validate_indices() const { - return GetField(VT_VALIDATE_INDICES, 0) != 0; - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_VALIDATE_INDICES, 1) && - verifier.EndTable(); - } - SparseToDenseOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(SparseToDenseOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SparseToDenseOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct SparseToDenseOptionsBuilder { - typedef SparseToDenseOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_validate_indices(bool validate_indices) { - fbb_.AddElement(SparseToDenseOptions::VT_VALIDATE_INDICES, static_cast(validate_indices), 0); - } - explicit SparseToDenseOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateSparseToDenseOptions( - flatbuffers::FlatBufferBuilder &_fbb, - bool validate_indices = false) { - SparseToDenseOptionsBuilder builder_(_fbb); - builder_.add_validate_indices(validate_indices); - return builder_.Finish(); -} - -flatbuffers::Offset CreateSparseToDenseOptions(flatbuffers::FlatBufferBuilder &_fbb, const SparseToDenseOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct EqualOptionsT : public flatbuffers::NativeTable { - typedef EqualOptions TableType; -}; - -struct EqualOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef EqualOptionsT NativeTableType; - typedef EqualOptionsBuilder Builder; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - EqualOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(EqualOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const EqualOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct EqualOptionsBuilder { - typedef EqualOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit EqualOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateEqualOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - EqualOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateEqualOptions(flatbuffers::FlatBufferBuilder &_fbb, const EqualOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct NotEqualOptionsT : public flatbuffers::NativeTable { - typedef NotEqualOptions TableType; -}; - -struct NotEqualOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef NotEqualOptionsT NativeTableType; - typedef NotEqualOptionsBuilder Builder; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - NotEqualOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(NotEqualOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const NotEqualOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct NotEqualOptionsBuilder { - typedef NotEqualOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit NotEqualOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateNotEqualOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - NotEqualOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateNotEqualOptions(flatbuffers::FlatBufferBuilder &_fbb, const NotEqualOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct ShapeOptionsT : public flatbuffers::NativeTable { - typedef ShapeOptions TableType; - tflite::TensorType out_type = tflite::TensorType_FLOAT32; -}; - -struct ShapeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef ShapeOptionsT NativeTableType; - typedef ShapeOptionsBuilder Builder; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_OUT_TYPE = 4 - }; - tflite::TensorType out_type() const { - return static_cast(GetField(VT_OUT_TYPE, 0)); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_OUT_TYPE, 1) && - verifier.EndTable(); - } - ShapeOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(ShapeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ShapeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct ShapeOptionsBuilder { - typedef ShapeOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_out_type(tflite::TensorType out_type) { - fbb_.AddElement(ShapeOptions::VT_OUT_TYPE, static_cast(out_type), 0); - } - explicit ShapeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateShapeOptions( - flatbuffers::FlatBufferBuilder &_fbb, - tflite::TensorType out_type = tflite::TensorType_FLOAT32) { - ShapeOptionsBuilder builder_(_fbb); - builder_.add_out_type(out_type); - return builder_.Finish(); -} - -flatbuffers::Offset CreateShapeOptions(flatbuffers::FlatBufferBuilder &_fbb, const ShapeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct RankOptionsT : public flatbuffers::NativeTable { - typedef RankOptions TableType; -}; - -struct RankOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef RankOptionsT NativeTableType; - typedef RankOptionsBuilder Builder; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - RankOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(RankOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const RankOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct RankOptionsBuilder { - typedef RankOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit RankOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateRankOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - RankOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateRankOptions(flatbuffers::FlatBufferBuilder &_fbb, const RankOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct PowOptionsT : public flatbuffers::NativeTable { - typedef PowOptions TableType; -}; - -struct PowOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef PowOptionsT NativeTableType; - typedef PowOptionsBuilder Builder; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - PowOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(PowOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const PowOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct PowOptionsBuilder { - typedef PowOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit PowOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreatePowOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - PowOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreatePowOptions(flatbuffers::FlatBufferBuilder &_fbb, const PowOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct FakeQuantOptionsT : public flatbuffers::NativeTable { - typedef FakeQuantOptions TableType; - float min = 0.0f; - float max = 0.0f; - int32_t num_bits = 0; - bool narrow_range = false; -}; - -struct FakeQuantOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef FakeQuantOptionsT NativeTableType; - typedef FakeQuantOptionsBuilder Builder; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_MIN = 4, - VT_MAX = 6, - VT_NUM_BITS = 8, - VT_NARROW_RANGE = 10 - }; - float min() const { - return GetField(VT_MIN, 0.0f); - } - float max() const { - return GetField(VT_MAX, 0.0f); - } - int32_t num_bits() const { - return GetField(VT_NUM_BITS, 0); - } - bool narrow_range() const { - return GetField(VT_NARROW_RANGE, 0) != 0; - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_MIN, 4) && - VerifyField(verifier, VT_MAX, 4) && - VerifyField(verifier, VT_NUM_BITS, 4) && - VerifyField(verifier, VT_NARROW_RANGE, 1) && - verifier.EndTable(); - } - FakeQuantOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(FakeQuantOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const FakeQuantOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct FakeQuantOptionsBuilder { - typedef FakeQuantOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_min(float min) { - fbb_.AddElement(FakeQuantOptions::VT_MIN, min, 0.0f); - } - void add_max(float max) { - fbb_.AddElement(FakeQuantOptions::VT_MAX, max, 0.0f); - } - void add_num_bits(int32_t num_bits) { - fbb_.AddElement(FakeQuantOptions::VT_NUM_BITS, num_bits, 0); - } - void add_narrow_range(bool narrow_range) { - fbb_.AddElement(FakeQuantOptions::VT_NARROW_RANGE, static_cast(narrow_range), 0); - } - explicit FakeQuantOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateFakeQuantOptions( - flatbuffers::FlatBufferBuilder &_fbb, - float min = 0.0f, - float max = 0.0f, - int32_t num_bits = 0, - bool narrow_range = false) { - FakeQuantOptionsBuilder builder_(_fbb); - builder_.add_num_bits(num_bits); - builder_.add_max(max); - builder_.add_min(min); - builder_.add_narrow_range(narrow_range); - return builder_.Finish(); -} - -flatbuffers::Offset CreateFakeQuantOptions(flatbuffers::FlatBufferBuilder &_fbb, const FakeQuantOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct PackOptionsT : public flatbuffers::NativeTable { - typedef PackOptions TableType; - int32_t values_count = 0; - int32_t axis = 0; -}; - -struct PackOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef PackOptionsT NativeTableType; - typedef PackOptionsBuilder Builder; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_VALUES_COUNT = 4, - VT_AXIS = 6 - }; - int32_t values_count() const { - return GetField(VT_VALUES_COUNT, 0); - } - int32_t axis() const { - return GetField(VT_AXIS, 0); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_VALUES_COUNT, 4) && - VerifyField(verifier, VT_AXIS, 4) && - verifier.EndTable(); - } - PackOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(PackOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const PackOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct PackOptionsBuilder { - typedef PackOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_values_count(int32_t values_count) { - fbb_.AddElement(PackOptions::VT_VALUES_COUNT, values_count, 0); - } - void add_axis(int32_t axis) { - fbb_.AddElement(PackOptions::VT_AXIS, axis, 0); - } - explicit PackOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreatePackOptions( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t values_count = 0, - int32_t axis = 0) { - PackOptionsBuilder builder_(_fbb); - builder_.add_axis(axis); - builder_.add_values_count(values_count); - return builder_.Finish(); -} - -flatbuffers::Offset CreatePackOptions(flatbuffers::FlatBufferBuilder &_fbb, const PackOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct LogicalOrOptionsT : public flatbuffers::NativeTable { - typedef LogicalOrOptions TableType; -}; - -struct LogicalOrOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef LogicalOrOptionsT NativeTableType; - typedef LogicalOrOptionsBuilder Builder; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - LogicalOrOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(LogicalOrOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const LogicalOrOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct LogicalOrOptionsBuilder { - typedef LogicalOrOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit LogicalOrOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateLogicalOrOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - LogicalOrOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateLogicalOrOptions(flatbuffers::FlatBufferBuilder &_fbb, const LogicalOrOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct OneHotOptionsT : public flatbuffers::NativeTable { - typedef OneHotOptions TableType; - int32_t axis = 0; -}; - -struct OneHotOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef OneHotOptionsT NativeTableType; - typedef OneHotOptionsBuilder Builder; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_AXIS = 4 - }; - int32_t axis() const { - return GetField(VT_AXIS, 0); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_AXIS, 4) && - verifier.EndTable(); - } - OneHotOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(OneHotOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const OneHotOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct OneHotOptionsBuilder { - typedef OneHotOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_axis(int32_t axis) { - fbb_.AddElement(OneHotOptions::VT_AXIS, axis, 0); - } - explicit OneHotOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateOneHotOptions( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t axis = 0) { - OneHotOptionsBuilder builder_(_fbb); - builder_.add_axis(axis); - return builder_.Finish(); -} - -flatbuffers::Offset CreateOneHotOptions(flatbuffers::FlatBufferBuilder &_fbb, const OneHotOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct AbsOptionsT : public flatbuffers::NativeTable { - typedef AbsOptions TableType; -}; - -struct AbsOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef AbsOptionsT NativeTableType; - typedef AbsOptionsBuilder Builder; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - AbsOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(AbsOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const AbsOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct AbsOptionsBuilder { - typedef AbsOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit AbsOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateAbsOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - AbsOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateAbsOptions(flatbuffers::FlatBufferBuilder &_fbb, const AbsOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct HardSwishOptionsT : public flatbuffers::NativeTable { - typedef HardSwishOptions TableType; -}; - -struct HardSwishOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef HardSwishOptionsT NativeTableType; - typedef HardSwishOptionsBuilder Builder; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - HardSwishOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(HardSwishOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const HardSwishOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct HardSwishOptionsBuilder { - typedef HardSwishOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit HardSwishOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateHardSwishOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - HardSwishOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateHardSwishOptions(flatbuffers::FlatBufferBuilder &_fbb, const HardSwishOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct LogicalAndOptionsT : public flatbuffers::NativeTable { - typedef LogicalAndOptions TableType; -}; - -struct LogicalAndOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef LogicalAndOptionsT NativeTableType; - typedef LogicalAndOptionsBuilder Builder; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - LogicalAndOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(LogicalAndOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const LogicalAndOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct LogicalAndOptionsBuilder { - typedef LogicalAndOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit LogicalAndOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateLogicalAndOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - LogicalAndOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateLogicalAndOptions(flatbuffers::FlatBufferBuilder &_fbb, const LogicalAndOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct LogicalNotOptionsT : public flatbuffers::NativeTable { - typedef LogicalNotOptions TableType; -}; - -struct LogicalNotOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef LogicalNotOptionsT NativeTableType; - typedef LogicalNotOptionsBuilder Builder; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - LogicalNotOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(LogicalNotOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const LogicalNotOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct LogicalNotOptionsBuilder { - typedef LogicalNotOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit LogicalNotOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateLogicalNotOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - LogicalNotOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateLogicalNotOptions(flatbuffers::FlatBufferBuilder &_fbb, const LogicalNotOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct UnpackOptionsT : public flatbuffers::NativeTable { - typedef UnpackOptions TableType; - int32_t num = 0; - int32_t axis = 0; -}; - -struct UnpackOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef UnpackOptionsT NativeTableType; - typedef UnpackOptionsBuilder Builder; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_NUM = 4, - VT_AXIS = 6 - }; - int32_t num() const { - return GetField(VT_NUM, 0); - } - int32_t axis() const { - return GetField(VT_AXIS, 0); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_NUM, 4) && - VerifyField(verifier, VT_AXIS, 4) && - verifier.EndTable(); - } - UnpackOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(UnpackOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const UnpackOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct UnpackOptionsBuilder { - typedef UnpackOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_num(int32_t num) { - fbb_.AddElement(UnpackOptions::VT_NUM, num, 0); - } - void add_axis(int32_t axis) { - fbb_.AddElement(UnpackOptions::VT_AXIS, axis, 0); - } - explicit UnpackOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateUnpackOptions( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t num = 0, - int32_t axis = 0) { - UnpackOptionsBuilder builder_(_fbb); - builder_.add_axis(axis); - builder_.add_num(num); - return builder_.Finish(); -} - -flatbuffers::Offset CreateUnpackOptions(flatbuffers::FlatBufferBuilder &_fbb, const UnpackOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct FloorDivOptionsT : public flatbuffers::NativeTable { - typedef FloorDivOptions TableType; -}; - -struct FloorDivOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef FloorDivOptionsT NativeTableType; - typedef FloorDivOptionsBuilder Builder; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - FloorDivOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(FloorDivOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const FloorDivOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct FloorDivOptionsBuilder { - typedef FloorDivOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit FloorDivOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateFloorDivOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - FloorDivOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateFloorDivOptions(flatbuffers::FlatBufferBuilder &_fbb, const FloorDivOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct SquareOptionsT : public flatbuffers::NativeTable { - typedef SquareOptions TableType; -}; - -struct SquareOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef SquareOptionsT NativeTableType; - typedef SquareOptionsBuilder Builder; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - SquareOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(SquareOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SquareOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct SquareOptionsBuilder { - typedef SquareOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit SquareOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateSquareOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - SquareOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateSquareOptions(flatbuffers::FlatBufferBuilder &_fbb, const SquareOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct ZerosLikeOptionsT : public flatbuffers::NativeTable { - typedef ZerosLikeOptions TableType; -}; - -struct ZerosLikeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef ZerosLikeOptionsT NativeTableType; - typedef ZerosLikeOptionsBuilder Builder; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - ZerosLikeOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(ZerosLikeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ZerosLikeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct ZerosLikeOptionsBuilder { - typedef ZerosLikeOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit ZerosLikeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateZerosLikeOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - ZerosLikeOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateZerosLikeOptions(flatbuffers::FlatBufferBuilder &_fbb, const ZerosLikeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct FillOptionsT : public flatbuffers::NativeTable { - typedef FillOptions TableType; -}; - -struct FillOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef FillOptionsT NativeTableType; - typedef FillOptionsBuilder Builder; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - FillOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(FillOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const FillOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct FillOptionsBuilder { - typedef FillOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit FillOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateFillOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - FillOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateFillOptions(flatbuffers::FlatBufferBuilder &_fbb, const FillOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct FloorModOptionsT : public flatbuffers::NativeTable { - typedef FloorModOptions TableType; -}; - -struct FloorModOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef FloorModOptionsT NativeTableType; - typedef FloorModOptionsBuilder Builder; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - FloorModOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(FloorModOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const FloorModOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct FloorModOptionsBuilder { - typedef FloorModOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit FloorModOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateFloorModOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - FloorModOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateFloorModOptions(flatbuffers::FlatBufferBuilder &_fbb, const FloorModOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct RangeOptionsT : public flatbuffers::NativeTable { - typedef RangeOptions TableType; -}; - -struct RangeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef RangeOptionsT NativeTableType; - typedef RangeOptionsBuilder Builder; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - RangeOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(RangeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const RangeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct RangeOptionsBuilder { - typedef RangeOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit RangeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateRangeOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - RangeOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateRangeOptions(flatbuffers::FlatBufferBuilder &_fbb, const RangeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct LeakyReluOptionsT : public flatbuffers::NativeTable { - typedef LeakyReluOptions TableType; - float alpha = 0.0f; -}; - -struct LeakyReluOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef LeakyReluOptionsT NativeTableType; - typedef LeakyReluOptionsBuilder Builder; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_ALPHA = 4 - }; - float alpha() const { - return GetField(VT_ALPHA, 0.0f); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_ALPHA, 4) && - verifier.EndTable(); - } - LeakyReluOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(LeakyReluOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const LeakyReluOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct LeakyReluOptionsBuilder { - typedef LeakyReluOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_alpha(float alpha) { - fbb_.AddElement(LeakyReluOptions::VT_ALPHA, alpha, 0.0f); - } - explicit LeakyReluOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateLeakyReluOptions( - flatbuffers::FlatBufferBuilder &_fbb, - float alpha = 0.0f) { - LeakyReluOptionsBuilder builder_(_fbb); - builder_.add_alpha(alpha); - return builder_.Finish(); -} - -flatbuffers::Offset CreateLeakyReluOptions(flatbuffers::FlatBufferBuilder &_fbb, const LeakyReluOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct SquaredDifferenceOptionsT : public flatbuffers::NativeTable { - typedef SquaredDifferenceOptions TableType; -}; - -struct SquaredDifferenceOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef SquaredDifferenceOptionsT NativeTableType; - typedef SquaredDifferenceOptionsBuilder Builder; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - SquaredDifferenceOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(SquaredDifferenceOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SquaredDifferenceOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct SquaredDifferenceOptionsBuilder { - typedef SquaredDifferenceOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit SquaredDifferenceOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateSquaredDifferenceOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - SquaredDifferenceOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateSquaredDifferenceOptions(flatbuffers::FlatBufferBuilder &_fbb, const SquaredDifferenceOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct MirrorPadOptionsT : public flatbuffers::NativeTable { - typedef MirrorPadOptions TableType; - tflite::MirrorPadMode mode = tflite::MirrorPadMode_REFLECT; -}; - -struct MirrorPadOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef MirrorPadOptionsT NativeTableType; - typedef MirrorPadOptionsBuilder Builder; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_MODE = 4 - }; - tflite::MirrorPadMode mode() const { - return static_cast(GetField(VT_MODE, 0)); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_MODE, 1) && - verifier.EndTable(); - } - MirrorPadOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(MirrorPadOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const MirrorPadOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct MirrorPadOptionsBuilder { - typedef MirrorPadOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_mode(tflite::MirrorPadMode mode) { - fbb_.AddElement(MirrorPadOptions::VT_MODE, static_cast(mode), 0); - } - explicit MirrorPadOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateMirrorPadOptions( - flatbuffers::FlatBufferBuilder &_fbb, - tflite::MirrorPadMode mode = tflite::MirrorPadMode_REFLECT) { - MirrorPadOptionsBuilder builder_(_fbb); - builder_.add_mode(mode); - return builder_.Finish(); -} - -flatbuffers::Offset CreateMirrorPadOptions(flatbuffers::FlatBufferBuilder &_fbb, const MirrorPadOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct UniqueOptionsT : public flatbuffers::NativeTable { - typedef UniqueOptions TableType; - tflite::TensorType idx_out_type = tflite::TensorType_INT32; -}; - -struct UniqueOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef UniqueOptionsT NativeTableType; - typedef UniqueOptionsBuilder Builder; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_IDX_OUT_TYPE = 4 - }; - tflite::TensorType idx_out_type() const { - return static_cast(GetField(VT_IDX_OUT_TYPE, 2)); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_IDX_OUT_TYPE, 1) && - verifier.EndTable(); - } - UniqueOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(UniqueOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const UniqueOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct UniqueOptionsBuilder { - typedef UniqueOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_idx_out_type(tflite::TensorType idx_out_type) { - fbb_.AddElement(UniqueOptions::VT_IDX_OUT_TYPE, static_cast(idx_out_type), 2); - } - explicit UniqueOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateUniqueOptions( - flatbuffers::FlatBufferBuilder &_fbb, - tflite::TensorType idx_out_type = tflite::TensorType_INT32) { - UniqueOptionsBuilder builder_(_fbb); - builder_.add_idx_out_type(idx_out_type); - return builder_.Finish(); -} - -flatbuffers::Offset CreateUniqueOptions(flatbuffers::FlatBufferBuilder &_fbb, const UniqueOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct ReverseV2OptionsT : public flatbuffers::NativeTable { - typedef ReverseV2Options TableType; -}; - -struct ReverseV2Options FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef ReverseV2OptionsT NativeTableType; - typedef ReverseV2OptionsBuilder Builder; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - ReverseV2OptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(ReverseV2OptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ReverseV2OptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct ReverseV2OptionsBuilder { - typedef ReverseV2Options Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit ReverseV2OptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateReverseV2Options( - flatbuffers::FlatBufferBuilder &_fbb) { - ReverseV2OptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateReverseV2Options(flatbuffers::FlatBufferBuilder &_fbb, const ReverseV2OptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct AddNOptionsT : public flatbuffers::NativeTable { - typedef AddNOptions TableType; -}; - -struct AddNOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef AddNOptionsT NativeTableType; - typedef AddNOptionsBuilder Builder; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - AddNOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(AddNOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const AddNOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct AddNOptionsBuilder { - typedef AddNOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit AddNOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateAddNOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - AddNOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateAddNOptions(flatbuffers::FlatBufferBuilder &_fbb, const AddNOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct GatherNdOptionsT : public flatbuffers::NativeTable { - typedef GatherNdOptions TableType; -}; - -struct GatherNdOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef GatherNdOptionsT NativeTableType; - typedef GatherNdOptionsBuilder Builder; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - GatherNdOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(GatherNdOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const GatherNdOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct GatherNdOptionsBuilder { - typedef GatherNdOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit GatherNdOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateGatherNdOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - GatherNdOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateGatherNdOptions(flatbuffers::FlatBufferBuilder &_fbb, const GatherNdOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct WhereOptionsT : public flatbuffers::NativeTable { - typedef WhereOptions TableType; -}; - -struct WhereOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef WhereOptionsT NativeTableType; - typedef WhereOptionsBuilder Builder; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - WhereOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(WhereOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const WhereOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct WhereOptionsBuilder { - typedef WhereOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit WhereOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateWhereOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - WhereOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateWhereOptions(flatbuffers::FlatBufferBuilder &_fbb, const WhereOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct ReverseSequenceOptionsT : public flatbuffers::NativeTable { - typedef ReverseSequenceOptions TableType; - int32_t seq_dim = 0; - int32_t batch_dim = 0; -}; - -struct ReverseSequenceOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef ReverseSequenceOptionsT NativeTableType; - typedef ReverseSequenceOptionsBuilder Builder; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_SEQ_DIM = 4, - VT_BATCH_DIM = 6 - }; - int32_t seq_dim() const { - return GetField(VT_SEQ_DIM, 0); - } - int32_t batch_dim() const { - return GetField(VT_BATCH_DIM, 0); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_SEQ_DIM, 4) && - VerifyField(verifier, VT_BATCH_DIM, 4) && - verifier.EndTable(); - } - ReverseSequenceOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(ReverseSequenceOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ReverseSequenceOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct ReverseSequenceOptionsBuilder { - typedef ReverseSequenceOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_seq_dim(int32_t seq_dim) { - fbb_.AddElement(ReverseSequenceOptions::VT_SEQ_DIM, seq_dim, 0); - } - void add_batch_dim(int32_t batch_dim) { - fbb_.AddElement(ReverseSequenceOptions::VT_BATCH_DIM, batch_dim, 0); - } - explicit ReverseSequenceOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateReverseSequenceOptions( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t seq_dim = 0, - int32_t batch_dim = 0) { - ReverseSequenceOptionsBuilder builder_(_fbb); - builder_.add_batch_dim(batch_dim); - builder_.add_seq_dim(seq_dim); - return builder_.Finish(); -} - -flatbuffers::Offset CreateReverseSequenceOptions(flatbuffers::FlatBufferBuilder &_fbb, const ReverseSequenceOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct MatrixDiagOptionsT : public flatbuffers::NativeTable { - typedef MatrixDiagOptions TableType; -}; - -struct MatrixDiagOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef MatrixDiagOptionsT NativeTableType; - typedef MatrixDiagOptionsBuilder Builder; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - MatrixDiagOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(MatrixDiagOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const MatrixDiagOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct MatrixDiagOptionsBuilder { - typedef MatrixDiagOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit MatrixDiagOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateMatrixDiagOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - MatrixDiagOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateMatrixDiagOptions(flatbuffers::FlatBufferBuilder &_fbb, const MatrixDiagOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct QuantizeOptionsT : public flatbuffers::NativeTable { - typedef QuantizeOptions TableType; -}; - -struct QuantizeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef QuantizeOptionsT NativeTableType; - typedef QuantizeOptionsBuilder Builder; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - QuantizeOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(QuantizeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct QuantizeOptionsBuilder { - typedef QuantizeOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit QuantizeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateQuantizeOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - QuantizeOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateQuantizeOptions(flatbuffers::FlatBufferBuilder &_fbb, const QuantizeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct MatrixSetDiagOptionsT : public flatbuffers::NativeTable { - typedef MatrixSetDiagOptions TableType; -}; - -struct MatrixSetDiagOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef MatrixSetDiagOptionsT NativeTableType; - typedef MatrixSetDiagOptionsBuilder Builder; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - MatrixSetDiagOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(MatrixSetDiagOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const MatrixSetDiagOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct MatrixSetDiagOptionsBuilder { - typedef MatrixSetDiagOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit MatrixSetDiagOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateMatrixSetDiagOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - MatrixSetDiagOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateMatrixSetDiagOptions(flatbuffers::FlatBufferBuilder &_fbb, const MatrixSetDiagOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct IfOptionsT : public flatbuffers::NativeTable { - typedef IfOptions TableType; - int32_t then_subgraph_index = 0; - int32_t else_subgraph_index = 0; -}; - -struct IfOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef IfOptionsT NativeTableType; - typedef IfOptionsBuilder Builder; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_THEN_SUBGRAPH_INDEX = 4, - VT_ELSE_SUBGRAPH_INDEX = 6 - }; - int32_t then_subgraph_index() const { - return GetField(VT_THEN_SUBGRAPH_INDEX, 0); - } - int32_t else_subgraph_index() const { - return GetField(VT_ELSE_SUBGRAPH_INDEX, 0); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_THEN_SUBGRAPH_INDEX, 4) && - VerifyField(verifier, VT_ELSE_SUBGRAPH_INDEX, 4) && - verifier.EndTable(); - } - IfOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(IfOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const IfOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct IfOptionsBuilder { - typedef IfOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_then_subgraph_index(int32_t then_subgraph_index) { - fbb_.AddElement(IfOptions::VT_THEN_SUBGRAPH_INDEX, then_subgraph_index, 0); - } - void add_else_subgraph_index(int32_t else_subgraph_index) { - fbb_.AddElement(IfOptions::VT_ELSE_SUBGRAPH_INDEX, else_subgraph_index, 0); - } - explicit IfOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateIfOptions( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t then_subgraph_index = 0, - int32_t else_subgraph_index = 0) { - IfOptionsBuilder builder_(_fbb); - builder_.add_else_subgraph_index(else_subgraph_index); - builder_.add_then_subgraph_index(then_subgraph_index); - return builder_.Finish(); -} - -flatbuffers::Offset CreateIfOptions(flatbuffers::FlatBufferBuilder &_fbb, const IfOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct CallOnceOptionsT : public flatbuffers::NativeTable { - typedef CallOnceOptions TableType; - int32_t init_subgraph_index = 0; -}; - -struct CallOnceOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef CallOnceOptionsT NativeTableType; - typedef CallOnceOptionsBuilder Builder; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_INIT_SUBGRAPH_INDEX = 4 - }; - int32_t init_subgraph_index() const { - return GetField(VT_INIT_SUBGRAPH_INDEX, 0); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_INIT_SUBGRAPH_INDEX, 4) && - verifier.EndTable(); - } - CallOnceOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(CallOnceOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const CallOnceOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct CallOnceOptionsBuilder { - typedef CallOnceOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_init_subgraph_index(int32_t init_subgraph_index) { - fbb_.AddElement(CallOnceOptions::VT_INIT_SUBGRAPH_INDEX, init_subgraph_index, 0); - } - explicit CallOnceOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateCallOnceOptions( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t init_subgraph_index = 0) { - CallOnceOptionsBuilder builder_(_fbb); - builder_.add_init_subgraph_index(init_subgraph_index); - return builder_.Finish(); -} - -flatbuffers::Offset CreateCallOnceOptions(flatbuffers::FlatBufferBuilder &_fbb, const CallOnceOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct WhileOptionsT : public flatbuffers::NativeTable { - typedef WhileOptions TableType; - int32_t cond_subgraph_index = 0; - int32_t body_subgraph_index = 0; -}; - -struct WhileOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef WhileOptionsT NativeTableType; - typedef WhileOptionsBuilder Builder; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_COND_SUBGRAPH_INDEX = 4, - VT_BODY_SUBGRAPH_INDEX = 6 - }; - int32_t cond_subgraph_index() const { - return GetField(VT_COND_SUBGRAPH_INDEX, 0); - } - int32_t body_subgraph_index() const { - return GetField(VT_BODY_SUBGRAPH_INDEX, 0); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_COND_SUBGRAPH_INDEX, 4) && - VerifyField(verifier, VT_BODY_SUBGRAPH_INDEX, 4) && - verifier.EndTable(); - } - WhileOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(WhileOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const WhileOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct WhileOptionsBuilder { - typedef WhileOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_cond_subgraph_index(int32_t cond_subgraph_index) { - fbb_.AddElement(WhileOptions::VT_COND_SUBGRAPH_INDEX, cond_subgraph_index, 0); - } - void add_body_subgraph_index(int32_t body_subgraph_index) { - fbb_.AddElement(WhileOptions::VT_BODY_SUBGRAPH_INDEX, body_subgraph_index, 0); - } - explicit WhileOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateWhileOptions( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t cond_subgraph_index = 0, - int32_t body_subgraph_index = 0) { - WhileOptionsBuilder builder_(_fbb); - builder_.add_body_subgraph_index(body_subgraph_index); - builder_.add_cond_subgraph_index(cond_subgraph_index); - return builder_.Finish(); -} - -flatbuffers::Offset CreateWhileOptions(flatbuffers::FlatBufferBuilder &_fbb, const WhileOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct NonMaxSuppressionV4OptionsT : public flatbuffers::NativeTable { - typedef NonMaxSuppressionV4Options TableType; -}; - -struct NonMaxSuppressionV4Options FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef NonMaxSuppressionV4OptionsT NativeTableType; - typedef NonMaxSuppressionV4OptionsBuilder Builder; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - NonMaxSuppressionV4OptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(NonMaxSuppressionV4OptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const NonMaxSuppressionV4OptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct NonMaxSuppressionV4OptionsBuilder { - typedef NonMaxSuppressionV4Options Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit NonMaxSuppressionV4OptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateNonMaxSuppressionV4Options( - flatbuffers::FlatBufferBuilder &_fbb) { - NonMaxSuppressionV4OptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateNonMaxSuppressionV4Options(flatbuffers::FlatBufferBuilder &_fbb, const NonMaxSuppressionV4OptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct NonMaxSuppressionV5OptionsT : public flatbuffers::NativeTable { - typedef NonMaxSuppressionV5Options TableType; -}; - -struct NonMaxSuppressionV5Options FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef NonMaxSuppressionV5OptionsT NativeTableType; - typedef NonMaxSuppressionV5OptionsBuilder Builder; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - NonMaxSuppressionV5OptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(NonMaxSuppressionV5OptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const NonMaxSuppressionV5OptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct NonMaxSuppressionV5OptionsBuilder { - typedef NonMaxSuppressionV5Options Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit NonMaxSuppressionV5OptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateNonMaxSuppressionV5Options( - flatbuffers::FlatBufferBuilder &_fbb) { - NonMaxSuppressionV5OptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateNonMaxSuppressionV5Options(flatbuffers::FlatBufferBuilder &_fbb, const NonMaxSuppressionV5OptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct ScatterNdOptionsT : public flatbuffers::NativeTable { - typedef ScatterNdOptions TableType; -}; - -struct ScatterNdOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef ScatterNdOptionsT NativeTableType; - typedef ScatterNdOptionsBuilder Builder; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - ScatterNdOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(ScatterNdOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ScatterNdOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct ScatterNdOptionsBuilder { - typedef ScatterNdOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit ScatterNdOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateScatterNdOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - ScatterNdOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateScatterNdOptions(flatbuffers::FlatBufferBuilder &_fbb, const ScatterNdOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct SelectV2OptionsT : public flatbuffers::NativeTable { - typedef SelectV2Options TableType; -}; - -struct SelectV2Options FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef SelectV2OptionsT NativeTableType; - typedef SelectV2OptionsBuilder Builder; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - SelectV2OptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(SelectV2OptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SelectV2OptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct SelectV2OptionsBuilder { - typedef SelectV2Options Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit SelectV2OptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateSelectV2Options( - flatbuffers::FlatBufferBuilder &_fbb) { - SelectV2OptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateSelectV2Options(flatbuffers::FlatBufferBuilder &_fbb, const SelectV2OptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct DensifyOptionsT : public flatbuffers::NativeTable { - typedef DensifyOptions TableType; -}; - -struct DensifyOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef DensifyOptionsT NativeTableType; - typedef DensifyOptionsBuilder Builder; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - DensifyOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(DensifyOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const DensifyOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct DensifyOptionsBuilder { - typedef DensifyOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit DensifyOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateDensifyOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - DensifyOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateDensifyOptions(flatbuffers::FlatBufferBuilder &_fbb, const DensifyOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct SegmentSumOptionsT : public flatbuffers::NativeTable { - typedef SegmentSumOptions TableType; -}; - -struct SegmentSumOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef SegmentSumOptionsT NativeTableType; - typedef SegmentSumOptionsBuilder Builder; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - SegmentSumOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(SegmentSumOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SegmentSumOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct SegmentSumOptionsBuilder { - typedef SegmentSumOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit SegmentSumOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateSegmentSumOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - SegmentSumOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateSegmentSumOptions(flatbuffers::FlatBufferBuilder &_fbb, const SegmentSumOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct BatchMatMulOptionsT : public flatbuffers::NativeTable { - typedef BatchMatMulOptions TableType; - bool adj_x = false; - bool adj_y = false; - bool asymmetric_quantize_inputs = false; -}; - -struct BatchMatMulOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef BatchMatMulOptionsT NativeTableType; - typedef BatchMatMulOptionsBuilder Builder; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_ADJ_X = 4, - VT_ADJ_Y = 6, - VT_ASYMMETRIC_QUANTIZE_INPUTS = 8 - }; - bool adj_x() const { - return GetField(VT_ADJ_X, 0) != 0; - } - bool adj_y() const { - return GetField(VT_ADJ_Y, 0) != 0; - } - bool asymmetric_quantize_inputs() const { - return GetField(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0; - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_ADJ_X, 1) && - VerifyField(verifier, VT_ADJ_Y, 1) && - VerifyField(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS, 1) && - verifier.EndTable(); - } - BatchMatMulOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(BatchMatMulOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const BatchMatMulOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct BatchMatMulOptionsBuilder { - typedef BatchMatMulOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_adj_x(bool adj_x) { - fbb_.AddElement(BatchMatMulOptions::VT_ADJ_X, static_cast(adj_x), 0); - } - void add_adj_y(bool adj_y) { - fbb_.AddElement(BatchMatMulOptions::VT_ADJ_Y, static_cast(adj_y), 0); - } - void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs) { - fbb_.AddElement(BatchMatMulOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS, static_cast(asymmetric_quantize_inputs), 0); - } - explicit BatchMatMulOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateBatchMatMulOptions( - flatbuffers::FlatBufferBuilder &_fbb, - bool adj_x = false, - bool adj_y = false, - bool asymmetric_quantize_inputs = false) { - BatchMatMulOptionsBuilder builder_(_fbb); - builder_.add_asymmetric_quantize_inputs(asymmetric_quantize_inputs); - builder_.add_adj_y(adj_y); - builder_.add_adj_x(adj_x); - return builder_.Finish(); -} - -flatbuffers::Offset CreateBatchMatMulOptions(flatbuffers::FlatBufferBuilder &_fbb, const BatchMatMulOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct CumsumOptionsT : public flatbuffers::NativeTable { - typedef CumsumOptions TableType; - bool exclusive = false; - bool reverse = false; -}; - -struct CumsumOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef CumsumOptionsT NativeTableType; - typedef CumsumOptionsBuilder Builder; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_EXCLUSIVE = 4, - VT_REVERSE = 6 - }; - bool exclusive() const { - return GetField(VT_EXCLUSIVE, 0) != 0; - } - bool reverse() const { - return GetField(VT_REVERSE, 0) != 0; - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_EXCLUSIVE, 1) && - VerifyField(verifier, VT_REVERSE, 1) && - verifier.EndTable(); - } - CumsumOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(CumsumOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const CumsumOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct CumsumOptionsBuilder { - typedef CumsumOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_exclusive(bool exclusive) { - fbb_.AddElement(CumsumOptions::VT_EXCLUSIVE, static_cast(exclusive), 0); - } - void add_reverse(bool reverse) { - fbb_.AddElement(CumsumOptions::VT_REVERSE, static_cast(reverse), 0); - } - explicit CumsumOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateCumsumOptions( - flatbuffers::FlatBufferBuilder &_fbb, - bool exclusive = false, - bool reverse = false) { - CumsumOptionsBuilder builder_(_fbb); - builder_.add_reverse(reverse); - builder_.add_exclusive(exclusive); - return builder_.Finish(); -} - -flatbuffers::Offset CreateCumsumOptions(flatbuffers::FlatBufferBuilder &_fbb, const CumsumOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct BroadcastToOptionsT : public flatbuffers::NativeTable { - typedef BroadcastToOptions TableType; -}; - -struct BroadcastToOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef BroadcastToOptionsT NativeTableType; - typedef BroadcastToOptionsBuilder Builder; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - BroadcastToOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(BroadcastToOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const BroadcastToOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct BroadcastToOptionsBuilder { - typedef BroadcastToOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit BroadcastToOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateBroadcastToOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - BroadcastToOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateBroadcastToOptions(flatbuffers::FlatBufferBuilder &_fbb, const BroadcastToOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct Rfft2dOptionsT : public flatbuffers::NativeTable { - typedef Rfft2dOptions TableType; -}; - -struct Rfft2dOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef Rfft2dOptionsT NativeTableType; - typedef Rfft2dOptionsBuilder Builder; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - Rfft2dOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(Rfft2dOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const Rfft2dOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct Rfft2dOptionsBuilder { - typedef Rfft2dOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit Rfft2dOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateRfft2dOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - Rfft2dOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateRfft2dOptions(flatbuffers::FlatBufferBuilder &_fbb, const Rfft2dOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct HashtableOptionsT : public flatbuffers::NativeTable { - typedef HashtableOptions TableType; - int32_t table_id = 0; - tflite::TensorType key_dtype = tflite::TensorType_FLOAT32; - tflite::TensorType value_dtype = tflite::TensorType_FLOAT32; -}; - -struct HashtableOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef HashtableOptionsT NativeTableType; - typedef HashtableOptionsBuilder Builder; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_TABLE_ID = 4, - VT_KEY_DTYPE = 6, - VT_VALUE_DTYPE = 8 - }; - int32_t table_id() const { - return GetField(VT_TABLE_ID, 0); - } - tflite::TensorType key_dtype() const { - return static_cast(GetField(VT_KEY_DTYPE, 0)); - } - tflite::TensorType value_dtype() const { - return static_cast(GetField(VT_VALUE_DTYPE, 0)); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_TABLE_ID, 4) && - VerifyField(verifier, VT_KEY_DTYPE, 1) && - VerifyField(verifier, VT_VALUE_DTYPE, 1) && - verifier.EndTable(); - } - HashtableOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(HashtableOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const HashtableOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct HashtableOptionsBuilder { - typedef HashtableOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_table_id(int32_t table_id) { - fbb_.AddElement(HashtableOptions::VT_TABLE_ID, table_id, 0); - } - void add_key_dtype(tflite::TensorType key_dtype) { - fbb_.AddElement(HashtableOptions::VT_KEY_DTYPE, static_cast(key_dtype), 0); - } - void add_value_dtype(tflite::TensorType value_dtype) { - fbb_.AddElement(HashtableOptions::VT_VALUE_DTYPE, static_cast(value_dtype), 0); - } - explicit HashtableOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateHashtableOptions( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t table_id = 0, - tflite::TensorType key_dtype = tflite::TensorType_FLOAT32, - tflite::TensorType value_dtype = tflite::TensorType_FLOAT32) { - HashtableOptionsBuilder builder_(_fbb); - builder_.add_table_id(table_id); - builder_.add_value_dtype(value_dtype); - builder_.add_key_dtype(key_dtype); - return builder_.Finish(); -} - -flatbuffers::Offset CreateHashtableOptions(flatbuffers::FlatBufferBuilder &_fbb, const HashtableOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct HashtableFindOptionsT : public flatbuffers::NativeTable { - typedef HashtableFindOptions TableType; -}; - -struct HashtableFindOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef HashtableFindOptionsT NativeTableType; - typedef HashtableFindOptionsBuilder Builder; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - HashtableFindOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(HashtableFindOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const HashtableFindOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct HashtableFindOptionsBuilder { - typedef HashtableFindOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit HashtableFindOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateHashtableFindOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - HashtableFindOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateHashtableFindOptions(flatbuffers::FlatBufferBuilder &_fbb, const HashtableFindOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct HashtableImportOptionsT : public flatbuffers::NativeTable { - typedef HashtableImportOptions TableType; -}; - -struct HashtableImportOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef HashtableImportOptionsT NativeTableType; - typedef HashtableImportOptionsBuilder Builder; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - HashtableImportOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(HashtableImportOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const HashtableImportOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct HashtableImportOptionsBuilder { - typedef HashtableImportOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit HashtableImportOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateHashtableImportOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - HashtableImportOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateHashtableImportOptions(flatbuffers::FlatBufferBuilder &_fbb, const HashtableImportOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct HashtableSizeOptionsT : public flatbuffers::NativeTable { - typedef HashtableSizeOptions TableType; -}; - -struct HashtableSizeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef HashtableSizeOptionsT NativeTableType; - typedef HashtableSizeOptionsBuilder Builder; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - HashtableSizeOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(HashtableSizeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const HashtableSizeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct HashtableSizeOptionsBuilder { - typedef HashtableSizeOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit HashtableSizeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateHashtableSizeOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - HashtableSizeOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateHashtableSizeOptions(flatbuffers::FlatBufferBuilder &_fbb, const HashtableSizeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct VarHandleOptionsT : public flatbuffers::NativeTable { - typedef VarHandleOptions TableType; - std::string container{}; - std::string shared_name{}; -}; - -struct VarHandleOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef VarHandleOptionsT NativeTableType; - typedef VarHandleOptionsBuilder Builder; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_CONTAINER = 4, - VT_SHARED_NAME = 6 - }; - const flatbuffers::String *container() const { - return GetPointer(VT_CONTAINER); - } - const flatbuffers::String *shared_name() const { - return GetPointer(VT_SHARED_NAME); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyOffset(verifier, VT_CONTAINER) && - verifier.VerifyString(container()) && - VerifyOffset(verifier, VT_SHARED_NAME) && - verifier.VerifyString(shared_name()) && - verifier.EndTable(); - } - VarHandleOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(VarHandleOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const VarHandleOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct VarHandleOptionsBuilder { - typedef VarHandleOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_container(flatbuffers::Offset container) { - fbb_.AddOffset(VarHandleOptions::VT_CONTAINER, container); - } - void add_shared_name(flatbuffers::Offset shared_name) { - fbb_.AddOffset(VarHandleOptions::VT_SHARED_NAME, shared_name); - } - explicit VarHandleOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateVarHandleOptions( - flatbuffers::FlatBufferBuilder &_fbb, - flatbuffers::Offset container = 0, - flatbuffers::Offset shared_name = 0) { - VarHandleOptionsBuilder builder_(_fbb); - builder_.add_shared_name(shared_name); - builder_.add_container(container); - return builder_.Finish(); -} - -inline flatbuffers::Offset CreateVarHandleOptionsDirect( - flatbuffers::FlatBufferBuilder &_fbb, - const char *container = nullptr, - const char *shared_name = nullptr) { - auto container__ = container ? _fbb.CreateString(container) : 0; - auto shared_name__ = shared_name ? _fbb.CreateString(shared_name) : 0; - return tflite::CreateVarHandleOptions( - _fbb, - container__, - shared_name__); -} - -flatbuffers::Offset CreateVarHandleOptions(flatbuffers::FlatBufferBuilder &_fbb, const VarHandleOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct ReadVariableOptionsT : public flatbuffers::NativeTable { - typedef ReadVariableOptions TableType; -}; - -struct ReadVariableOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef ReadVariableOptionsT NativeTableType; - typedef ReadVariableOptionsBuilder Builder; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - ReadVariableOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(ReadVariableOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ReadVariableOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct ReadVariableOptionsBuilder { - typedef ReadVariableOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit ReadVariableOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateReadVariableOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - ReadVariableOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateReadVariableOptions(flatbuffers::FlatBufferBuilder &_fbb, const ReadVariableOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct AssignVariableOptionsT : public flatbuffers::NativeTable { - typedef AssignVariableOptions TableType; -}; - -struct AssignVariableOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef AssignVariableOptionsT NativeTableType; - typedef AssignVariableOptionsBuilder Builder; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - AssignVariableOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(AssignVariableOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const AssignVariableOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct AssignVariableOptionsBuilder { - typedef AssignVariableOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit AssignVariableOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateAssignVariableOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - AssignVariableOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateAssignVariableOptions(flatbuffers::FlatBufferBuilder &_fbb, const AssignVariableOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct RandomOptionsT : public flatbuffers::NativeTable { - typedef RandomOptions TableType; - int64_t seed = 0; - int64_t seed2 = 0; -}; - -struct RandomOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef RandomOptionsT NativeTableType; - typedef RandomOptionsBuilder Builder; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_SEED = 4, - VT_SEED2 = 6 - }; - int64_t seed() const { - return GetField(VT_SEED, 0); - } - int64_t seed2() const { - return GetField(VT_SEED2, 0); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_SEED, 8) && - VerifyField(verifier, VT_SEED2, 8) && - verifier.EndTable(); - } - RandomOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(RandomOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const RandomOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct RandomOptionsBuilder { - typedef RandomOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_seed(int64_t seed) { - fbb_.AddElement(RandomOptions::VT_SEED, seed, 0); - } - void add_seed2(int64_t seed2) { - fbb_.AddElement(RandomOptions::VT_SEED2, seed2, 0); - } - explicit RandomOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateRandomOptions( - flatbuffers::FlatBufferBuilder &_fbb, - int64_t seed = 0, - int64_t seed2 = 0) { - RandomOptionsBuilder builder_(_fbb); - builder_.add_seed2(seed2); - builder_.add_seed(seed); - return builder_.Finish(); -} - -flatbuffers::Offset CreateRandomOptions(flatbuffers::FlatBufferBuilder &_fbb, const RandomOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct BucketizeOptionsT : public flatbuffers::NativeTable { - typedef BucketizeOptions TableType; - std::vector boundaries{}; -}; - -struct BucketizeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef BucketizeOptionsT NativeTableType; - typedef BucketizeOptionsBuilder Builder; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_BOUNDARIES = 4 - }; - const flatbuffers::Vector *boundaries() const { - return GetPointer *>(VT_BOUNDARIES); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyOffset(verifier, VT_BOUNDARIES) && - verifier.VerifyVector(boundaries()) && - verifier.EndTable(); - } - BucketizeOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(BucketizeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const BucketizeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct BucketizeOptionsBuilder { - typedef BucketizeOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_boundaries(flatbuffers::Offset> boundaries) { - fbb_.AddOffset(BucketizeOptions::VT_BOUNDARIES, boundaries); - } - explicit BucketizeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateBucketizeOptions( - flatbuffers::FlatBufferBuilder &_fbb, - flatbuffers::Offset> boundaries = 0) { - BucketizeOptionsBuilder builder_(_fbb); - builder_.add_boundaries(boundaries); - return builder_.Finish(); -} - -inline flatbuffers::Offset CreateBucketizeOptionsDirect( - flatbuffers::FlatBufferBuilder &_fbb, - const std::vector *boundaries = nullptr) { - auto boundaries__ = boundaries ? _fbb.CreateVector(*boundaries) : 0; - return tflite::CreateBucketizeOptions( - _fbb, - boundaries__); -} - -flatbuffers::Offset CreateBucketizeOptions(flatbuffers::FlatBufferBuilder &_fbb, const BucketizeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct GeluOptionsT : public flatbuffers::NativeTable { - typedef GeluOptions TableType; - bool approximate = false; -}; - -struct GeluOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef GeluOptionsT NativeTableType; - typedef GeluOptionsBuilder Builder; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_APPROXIMATE = 4 - }; - bool approximate() const { - return GetField(VT_APPROXIMATE, 0) != 0; - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_APPROXIMATE, 1) && - verifier.EndTable(); - } - GeluOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(GeluOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const GeluOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct GeluOptionsBuilder { - typedef GeluOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_approximate(bool approximate) { - fbb_.AddElement(GeluOptions::VT_APPROXIMATE, static_cast(approximate), 0); - } - explicit GeluOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateGeluOptions( - flatbuffers::FlatBufferBuilder &_fbb, - bool approximate = false) { - GeluOptionsBuilder builder_(_fbb); - builder_.add_approximate(approximate); - return builder_.Finish(); -} - -flatbuffers::Offset CreateGeluOptions(flatbuffers::FlatBufferBuilder &_fbb, const GeluOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct DynamicUpdateSliceOptionsT : public flatbuffers::NativeTable { - typedef DynamicUpdateSliceOptions TableType; -}; - -struct DynamicUpdateSliceOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef DynamicUpdateSliceOptionsT NativeTableType; - typedef DynamicUpdateSliceOptionsBuilder Builder; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - DynamicUpdateSliceOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(DynamicUpdateSliceOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const DynamicUpdateSliceOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct DynamicUpdateSliceOptionsBuilder { - typedef DynamicUpdateSliceOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit DynamicUpdateSliceOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateDynamicUpdateSliceOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - DynamicUpdateSliceOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateDynamicUpdateSliceOptions(flatbuffers::FlatBufferBuilder &_fbb, const DynamicUpdateSliceOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct UnsortedSegmentProdOptionsT : public flatbuffers::NativeTable { - typedef UnsortedSegmentProdOptions TableType; -}; - -struct UnsortedSegmentProdOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef UnsortedSegmentProdOptionsT NativeTableType; - typedef UnsortedSegmentProdOptionsBuilder Builder; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - UnsortedSegmentProdOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(UnsortedSegmentProdOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const UnsortedSegmentProdOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct UnsortedSegmentProdOptionsBuilder { - typedef UnsortedSegmentProdOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit UnsortedSegmentProdOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateUnsortedSegmentProdOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - UnsortedSegmentProdOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateUnsortedSegmentProdOptions(flatbuffers::FlatBufferBuilder &_fbb, const UnsortedSegmentProdOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct UnsortedSegmentMaxOptionsT : public flatbuffers::NativeTable { - typedef UnsortedSegmentMaxOptions TableType; -}; - -struct UnsortedSegmentMaxOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef UnsortedSegmentMaxOptionsT NativeTableType; - typedef UnsortedSegmentMaxOptionsBuilder Builder; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - UnsortedSegmentMaxOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(UnsortedSegmentMaxOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const UnsortedSegmentMaxOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct UnsortedSegmentMaxOptionsBuilder { - typedef UnsortedSegmentMaxOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit UnsortedSegmentMaxOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateUnsortedSegmentMaxOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - UnsortedSegmentMaxOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateUnsortedSegmentMaxOptions(flatbuffers::FlatBufferBuilder &_fbb, const UnsortedSegmentMaxOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct UnsortedSegmentSumOptionsT : public flatbuffers::NativeTable { - typedef UnsortedSegmentSumOptions TableType; -}; - -struct UnsortedSegmentSumOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef UnsortedSegmentSumOptionsT NativeTableType; - typedef UnsortedSegmentSumOptionsBuilder Builder; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - UnsortedSegmentSumOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(UnsortedSegmentSumOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const UnsortedSegmentSumOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct UnsortedSegmentSumOptionsBuilder { - typedef UnsortedSegmentSumOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit UnsortedSegmentSumOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateUnsortedSegmentSumOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - UnsortedSegmentSumOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateUnsortedSegmentSumOptions(flatbuffers::FlatBufferBuilder &_fbb, const UnsortedSegmentSumOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct ATan2OptionsT : public flatbuffers::NativeTable { - typedef ATan2Options TableType; -}; - -struct ATan2Options FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef ATan2OptionsT NativeTableType; - typedef ATan2OptionsBuilder Builder; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - ATan2OptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(ATan2OptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ATan2OptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct ATan2OptionsBuilder { - typedef ATan2Options Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit ATan2OptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateATan2Options( - flatbuffers::FlatBufferBuilder &_fbb) { - ATan2OptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateATan2Options(flatbuffers::FlatBufferBuilder &_fbb, const ATan2OptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct UnsortedSegmentMinOptionsT : public flatbuffers::NativeTable { - typedef UnsortedSegmentMinOptions TableType; -}; - -struct UnsortedSegmentMinOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef UnsortedSegmentMinOptionsT NativeTableType; - typedef UnsortedSegmentMinOptionsBuilder Builder; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - UnsortedSegmentMinOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(UnsortedSegmentMinOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const UnsortedSegmentMinOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct UnsortedSegmentMinOptionsBuilder { - typedef UnsortedSegmentMinOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit UnsortedSegmentMinOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateUnsortedSegmentMinOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - UnsortedSegmentMinOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateUnsortedSegmentMinOptions(flatbuffers::FlatBufferBuilder &_fbb, const UnsortedSegmentMinOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct SignOptionsT : public flatbuffers::NativeTable { - typedef SignOptions TableType; -}; - -struct SignOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef SignOptionsT NativeTableType; - typedef SignOptionsBuilder Builder; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - SignOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(SignOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SignOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct SignOptionsBuilder { - typedef SignOptions Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit SignOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateSignOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - SignOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateSignOptions(flatbuffers::FlatBufferBuilder &_fbb, const SignOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct OperatorCodeT : public flatbuffers::NativeTable { - typedef OperatorCode TableType; - int8_t deprecated_builtin_code = 0; - std::string custom_code{}; - int32_t version = 1; - tflite::BuiltinOperator builtin_code = tflite::BuiltinOperator_ADD; -}; - -struct OperatorCode FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef OperatorCodeT NativeTableType; - typedef OperatorCodeBuilder Builder; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_DEPRECATED_BUILTIN_CODE = 4, - VT_CUSTOM_CODE = 6, - VT_VERSION = 8, - VT_BUILTIN_CODE = 10 - }; - int8_t deprecated_builtin_code() const { - return GetField(VT_DEPRECATED_BUILTIN_CODE, 0); - } - const flatbuffers::String *custom_code() const { - return GetPointer(VT_CUSTOM_CODE); - } - int32_t version() const { - return GetField(VT_VERSION, 1); - } - tflite::BuiltinOperator builtin_code() const { - return static_cast(GetField(VT_BUILTIN_CODE, 0)); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_DEPRECATED_BUILTIN_CODE, 1) && - VerifyOffset(verifier, VT_CUSTOM_CODE) && - verifier.VerifyString(custom_code()) && - VerifyField(verifier, VT_VERSION, 4) && - VerifyField(verifier, VT_BUILTIN_CODE, 4) && - verifier.EndTable(); - } - OperatorCodeT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(OperatorCodeT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const OperatorCodeT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct OperatorCodeBuilder { - typedef OperatorCode Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_deprecated_builtin_code(int8_t deprecated_builtin_code) { - fbb_.AddElement(OperatorCode::VT_DEPRECATED_BUILTIN_CODE, deprecated_builtin_code, 0); - } - void add_custom_code(flatbuffers::Offset custom_code) { - fbb_.AddOffset(OperatorCode::VT_CUSTOM_CODE, custom_code); - } - void add_version(int32_t version) { - fbb_.AddElement(OperatorCode::VT_VERSION, version, 1); - } - void add_builtin_code(tflite::BuiltinOperator builtin_code) { - fbb_.AddElement(OperatorCode::VT_BUILTIN_CODE, static_cast(builtin_code), 0); - } - explicit OperatorCodeBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateOperatorCode( - flatbuffers::FlatBufferBuilder &_fbb, - int8_t deprecated_builtin_code = 0, - flatbuffers::Offset custom_code = 0, - int32_t version = 1, - tflite::BuiltinOperator builtin_code = tflite::BuiltinOperator_ADD) { - OperatorCodeBuilder builder_(_fbb); - builder_.add_builtin_code(builtin_code); - builder_.add_version(version); - builder_.add_custom_code(custom_code); - builder_.add_deprecated_builtin_code(deprecated_builtin_code); - return builder_.Finish(); -} - -inline flatbuffers::Offset CreateOperatorCodeDirect( - flatbuffers::FlatBufferBuilder &_fbb, - int8_t deprecated_builtin_code = 0, - const char *custom_code = nullptr, - int32_t version = 1, - tflite::BuiltinOperator builtin_code = tflite::BuiltinOperator_ADD) { - auto custom_code__ = custom_code ? _fbb.CreateString(custom_code) : 0; - return tflite::CreateOperatorCode( - _fbb, - deprecated_builtin_code, - custom_code__, - version, - builtin_code); -} - -flatbuffers::Offset CreateOperatorCode(flatbuffers::FlatBufferBuilder &_fbb, const OperatorCodeT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct OperatorT : public flatbuffers::NativeTable { - typedef Operator TableType; - uint32_t opcode_index = 0; - std::vector inputs{}; - std::vector outputs{}; - tflite::BuiltinOptionsUnion builtin_options{}; - std::vector custom_options{}; - tflite::CustomOptionsFormat custom_options_format = tflite::CustomOptionsFormat_FLEXBUFFERS; - std::vector mutating_variable_inputs{}; - std::vector intermediates{}; -}; - -struct Operator FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef OperatorT NativeTableType; - typedef OperatorBuilder Builder; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_OPCODE_INDEX = 4, - VT_INPUTS = 6, - VT_OUTPUTS = 8, - VT_BUILTIN_OPTIONS_TYPE = 10, - VT_BUILTIN_OPTIONS = 12, - VT_CUSTOM_OPTIONS = 14, - VT_CUSTOM_OPTIONS_FORMAT = 16, - VT_MUTATING_VARIABLE_INPUTS = 18, - VT_INTERMEDIATES = 20 - }; - uint32_t opcode_index() const { - return GetField(VT_OPCODE_INDEX, 0); - } - const flatbuffers::Vector *inputs() const { - return GetPointer *>(VT_INPUTS); - } - const flatbuffers::Vector *outputs() const { - return GetPointer *>(VT_OUTPUTS); - } - tflite::BuiltinOptions builtin_options_type() const { - return static_cast(GetField(VT_BUILTIN_OPTIONS_TYPE, 0)); - } - const void *builtin_options() const { - return GetPointer(VT_BUILTIN_OPTIONS); - } - template const T *builtin_options_as() const; - const tflite::Conv2DOptions *builtin_options_as_Conv2DOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_Conv2DOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::DepthwiseConv2DOptions *builtin_options_as_DepthwiseConv2DOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_DepthwiseConv2DOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::ConcatEmbeddingsOptions *builtin_options_as_ConcatEmbeddingsOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_ConcatEmbeddingsOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::LSHProjectionOptions *builtin_options_as_LSHProjectionOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_LSHProjectionOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::Pool2DOptions *builtin_options_as_Pool2DOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_Pool2DOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::SVDFOptions *builtin_options_as_SVDFOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_SVDFOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::RNNOptions *builtin_options_as_RNNOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_RNNOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::FullyConnectedOptions *builtin_options_as_FullyConnectedOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_FullyConnectedOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::SoftmaxOptions *builtin_options_as_SoftmaxOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_SoftmaxOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::ConcatenationOptions *builtin_options_as_ConcatenationOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_ConcatenationOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::AddOptions *builtin_options_as_AddOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_AddOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::L2NormOptions *builtin_options_as_L2NormOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_L2NormOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::LocalResponseNormalizationOptions *builtin_options_as_LocalResponseNormalizationOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_LocalResponseNormalizationOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::LSTMOptions *builtin_options_as_LSTMOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_LSTMOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::ResizeBilinearOptions *builtin_options_as_ResizeBilinearOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_ResizeBilinearOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::CallOptions *builtin_options_as_CallOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_CallOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::ReshapeOptions *builtin_options_as_ReshapeOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_ReshapeOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::SkipGramOptions *builtin_options_as_SkipGramOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_SkipGramOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::SpaceToDepthOptions *builtin_options_as_SpaceToDepthOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_SpaceToDepthOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::EmbeddingLookupSparseOptions *builtin_options_as_EmbeddingLookupSparseOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_EmbeddingLookupSparseOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::MulOptions *builtin_options_as_MulOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_MulOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::PadOptions *builtin_options_as_PadOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_PadOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::GatherOptions *builtin_options_as_GatherOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_GatherOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::BatchToSpaceNDOptions *builtin_options_as_BatchToSpaceNDOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_BatchToSpaceNDOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::SpaceToBatchNDOptions *builtin_options_as_SpaceToBatchNDOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_SpaceToBatchNDOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::TransposeOptions *builtin_options_as_TransposeOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_TransposeOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::ReducerOptions *builtin_options_as_ReducerOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_ReducerOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::SubOptions *builtin_options_as_SubOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_SubOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::DivOptions *builtin_options_as_DivOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_DivOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::SqueezeOptions *builtin_options_as_SqueezeOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_SqueezeOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::SequenceRNNOptions *builtin_options_as_SequenceRNNOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_SequenceRNNOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::StridedSliceOptions *builtin_options_as_StridedSliceOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_StridedSliceOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::ExpOptions *builtin_options_as_ExpOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_ExpOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::TopKV2Options *builtin_options_as_TopKV2Options() const { - return builtin_options_type() == tflite::BuiltinOptions_TopKV2Options ? static_cast(builtin_options()) : nullptr; - } - const tflite::SplitOptions *builtin_options_as_SplitOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_SplitOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::LogSoftmaxOptions *builtin_options_as_LogSoftmaxOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_LogSoftmaxOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::CastOptions *builtin_options_as_CastOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_CastOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::DequantizeOptions *builtin_options_as_DequantizeOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_DequantizeOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::MaximumMinimumOptions *builtin_options_as_MaximumMinimumOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_MaximumMinimumOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::ArgMaxOptions *builtin_options_as_ArgMaxOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_ArgMaxOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::LessOptions *builtin_options_as_LessOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_LessOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::NegOptions *builtin_options_as_NegOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_NegOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::PadV2Options *builtin_options_as_PadV2Options() const { - return builtin_options_type() == tflite::BuiltinOptions_PadV2Options ? static_cast(builtin_options()) : nullptr; - } - const tflite::GreaterOptions *builtin_options_as_GreaterOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_GreaterOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::GreaterEqualOptions *builtin_options_as_GreaterEqualOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_GreaterEqualOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::LessEqualOptions *builtin_options_as_LessEqualOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_LessEqualOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::SelectOptions *builtin_options_as_SelectOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_SelectOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::SliceOptions *builtin_options_as_SliceOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_SliceOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::TransposeConvOptions *builtin_options_as_TransposeConvOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_TransposeConvOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::SparseToDenseOptions *builtin_options_as_SparseToDenseOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_SparseToDenseOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::TileOptions *builtin_options_as_TileOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_TileOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::ExpandDimsOptions *builtin_options_as_ExpandDimsOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_ExpandDimsOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::EqualOptions *builtin_options_as_EqualOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_EqualOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::NotEqualOptions *builtin_options_as_NotEqualOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_NotEqualOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::ShapeOptions *builtin_options_as_ShapeOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_ShapeOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::PowOptions *builtin_options_as_PowOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_PowOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::ArgMinOptions *builtin_options_as_ArgMinOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_ArgMinOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::FakeQuantOptions *builtin_options_as_FakeQuantOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_FakeQuantOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::PackOptions *builtin_options_as_PackOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_PackOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::LogicalOrOptions *builtin_options_as_LogicalOrOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_LogicalOrOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::OneHotOptions *builtin_options_as_OneHotOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_OneHotOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::LogicalAndOptions *builtin_options_as_LogicalAndOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_LogicalAndOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::LogicalNotOptions *builtin_options_as_LogicalNotOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_LogicalNotOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::UnpackOptions *builtin_options_as_UnpackOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_UnpackOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::FloorDivOptions *builtin_options_as_FloorDivOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_FloorDivOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::SquareOptions *builtin_options_as_SquareOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_SquareOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::ZerosLikeOptions *builtin_options_as_ZerosLikeOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_ZerosLikeOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::FillOptions *builtin_options_as_FillOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_FillOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::BidirectionalSequenceLSTMOptions *builtin_options_as_BidirectionalSequenceLSTMOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_BidirectionalSequenceLSTMOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::BidirectionalSequenceRNNOptions *builtin_options_as_BidirectionalSequenceRNNOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_BidirectionalSequenceRNNOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::UnidirectionalSequenceLSTMOptions *builtin_options_as_UnidirectionalSequenceLSTMOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_UnidirectionalSequenceLSTMOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::FloorModOptions *builtin_options_as_FloorModOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_FloorModOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::RangeOptions *builtin_options_as_RangeOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_RangeOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::ResizeNearestNeighborOptions *builtin_options_as_ResizeNearestNeighborOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_ResizeNearestNeighborOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::LeakyReluOptions *builtin_options_as_LeakyReluOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_LeakyReluOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::SquaredDifferenceOptions *builtin_options_as_SquaredDifferenceOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_SquaredDifferenceOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::MirrorPadOptions *builtin_options_as_MirrorPadOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_MirrorPadOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::AbsOptions *builtin_options_as_AbsOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_AbsOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::SplitVOptions *builtin_options_as_SplitVOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_SplitVOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::UniqueOptions *builtin_options_as_UniqueOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_UniqueOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::ReverseV2Options *builtin_options_as_ReverseV2Options() const { - return builtin_options_type() == tflite::BuiltinOptions_ReverseV2Options ? static_cast(builtin_options()) : nullptr; - } - const tflite::AddNOptions *builtin_options_as_AddNOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_AddNOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::GatherNdOptions *builtin_options_as_GatherNdOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_GatherNdOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::CosOptions *builtin_options_as_CosOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_CosOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::WhereOptions *builtin_options_as_WhereOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_WhereOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::RankOptions *builtin_options_as_RankOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_RankOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::ReverseSequenceOptions *builtin_options_as_ReverseSequenceOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_ReverseSequenceOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::MatrixDiagOptions *builtin_options_as_MatrixDiagOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_MatrixDiagOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::QuantizeOptions *builtin_options_as_QuantizeOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_QuantizeOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::MatrixSetDiagOptions *builtin_options_as_MatrixSetDiagOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_MatrixSetDiagOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::HardSwishOptions *builtin_options_as_HardSwishOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_HardSwishOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::IfOptions *builtin_options_as_IfOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_IfOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::WhileOptions *builtin_options_as_WhileOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_WhileOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::DepthToSpaceOptions *builtin_options_as_DepthToSpaceOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_DepthToSpaceOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::NonMaxSuppressionV4Options *builtin_options_as_NonMaxSuppressionV4Options() const { - return builtin_options_type() == tflite::BuiltinOptions_NonMaxSuppressionV4Options ? static_cast(builtin_options()) : nullptr; - } - const tflite::NonMaxSuppressionV5Options *builtin_options_as_NonMaxSuppressionV5Options() const { - return builtin_options_type() == tflite::BuiltinOptions_NonMaxSuppressionV5Options ? static_cast(builtin_options()) : nullptr; - } - const tflite::ScatterNdOptions *builtin_options_as_ScatterNdOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_ScatterNdOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::SelectV2Options *builtin_options_as_SelectV2Options() const { - return builtin_options_type() == tflite::BuiltinOptions_SelectV2Options ? static_cast(builtin_options()) : nullptr; - } - const tflite::DensifyOptions *builtin_options_as_DensifyOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_DensifyOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::SegmentSumOptions *builtin_options_as_SegmentSumOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_SegmentSumOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::BatchMatMulOptions *builtin_options_as_BatchMatMulOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_BatchMatMulOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::CumsumOptions *builtin_options_as_CumsumOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_CumsumOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::CallOnceOptions *builtin_options_as_CallOnceOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_CallOnceOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::BroadcastToOptions *builtin_options_as_BroadcastToOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_BroadcastToOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::Rfft2dOptions *builtin_options_as_Rfft2dOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_Rfft2dOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::Conv3DOptions *builtin_options_as_Conv3DOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_Conv3DOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::HashtableOptions *builtin_options_as_HashtableOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_HashtableOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::HashtableFindOptions *builtin_options_as_HashtableFindOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_HashtableFindOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::HashtableImportOptions *builtin_options_as_HashtableImportOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_HashtableImportOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::HashtableSizeOptions *builtin_options_as_HashtableSizeOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_HashtableSizeOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::VarHandleOptions *builtin_options_as_VarHandleOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_VarHandleOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::ReadVariableOptions *builtin_options_as_ReadVariableOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_ReadVariableOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::AssignVariableOptions *builtin_options_as_AssignVariableOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_AssignVariableOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::RandomOptions *builtin_options_as_RandomOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_RandomOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::BucketizeOptions *builtin_options_as_BucketizeOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_BucketizeOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::GeluOptions *builtin_options_as_GeluOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_GeluOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::DynamicUpdateSliceOptions *builtin_options_as_DynamicUpdateSliceOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_DynamicUpdateSliceOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::UnsortedSegmentProdOptions *builtin_options_as_UnsortedSegmentProdOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_UnsortedSegmentProdOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::UnsortedSegmentMaxOptions *builtin_options_as_UnsortedSegmentMaxOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_UnsortedSegmentMaxOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::UnsortedSegmentMinOptions *builtin_options_as_UnsortedSegmentMinOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_UnsortedSegmentMinOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::UnsortedSegmentSumOptions *builtin_options_as_UnsortedSegmentSumOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_UnsortedSegmentSumOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::ATan2Options *builtin_options_as_ATan2Options() const { - return builtin_options_type() == tflite::BuiltinOptions_ATan2Options ? static_cast(builtin_options()) : nullptr; - } - const tflite::SignOptions *builtin_options_as_SignOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_SignOptions ? static_cast(builtin_options()) : nullptr; - } - const flatbuffers::Vector *custom_options() const { - return GetPointer *>(VT_CUSTOM_OPTIONS); - } - tflite::CustomOptionsFormat custom_options_format() const { - return static_cast(GetField(VT_CUSTOM_OPTIONS_FORMAT, 0)); - } - const flatbuffers::Vector *mutating_variable_inputs() const { - return GetPointer *>(VT_MUTATING_VARIABLE_INPUTS); - } - const flatbuffers::Vector *intermediates() const { - return GetPointer *>(VT_INTERMEDIATES); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_OPCODE_INDEX, 4) && - VerifyOffset(verifier, VT_INPUTS) && - verifier.VerifyVector(inputs()) && - VerifyOffset(verifier, VT_OUTPUTS) && - verifier.VerifyVector(outputs()) && - VerifyField(verifier, VT_BUILTIN_OPTIONS_TYPE, 1) && - VerifyOffset(verifier, VT_BUILTIN_OPTIONS) && - VerifyBuiltinOptions(verifier, builtin_options(), builtin_options_type()) && - VerifyOffset(verifier, VT_CUSTOM_OPTIONS) && - verifier.VerifyVector(custom_options()) && - VerifyField(verifier, VT_CUSTOM_OPTIONS_FORMAT, 1) && - VerifyOffset(verifier, VT_MUTATING_VARIABLE_INPUTS) && - verifier.VerifyVector(mutating_variable_inputs()) && - VerifyOffset(verifier, VT_INTERMEDIATES) && - verifier.VerifyVector(intermediates()) && - verifier.EndTable(); - } - OperatorT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(OperatorT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const OperatorT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -template<> inline const tflite::Conv2DOptions *Operator::builtin_options_as() const { - return builtin_options_as_Conv2DOptions(); -} - -template<> inline const tflite::DepthwiseConv2DOptions *Operator::builtin_options_as() const { - return builtin_options_as_DepthwiseConv2DOptions(); -} - -template<> inline const tflite::ConcatEmbeddingsOptions *Operator::builtin_options_as() const { - return builtin_options_as_ConcatEmbeddingsOptions(); -} - -template<> inline const tflite::LSHProjectionOptions *Operator::builtin_options_as() const { - return builtin_options_as_LSHProjectionOptions(); -} - -template<> inline const tflite::Pool2DOptions *Operator::builtin_options_as() const { - return builtin_options_as_Pool2DOptions(); -} - -template<> inline const tflite::SVDFOptions *Operator::builtin_options_as() const { - return builtin_options_as_SVDFOptions(); -} - -template<> inline const tflite::RNNOptions *Operator::builtin_options_as() const { - return builtin_options_as_RNNOptions(); -} - -template<> inline const tflite::FullyConnectedOptions *Operator::builtin_options_as() const { - return builtin_options_as_FullyConnectedOptions(); -} - -template<> inline const tflite::SoftmaxOptions *Operator::builtin_options_as() const { - return builtin_options_as_SoftmaxOptions(); -} - -template<> inline const tflite::ConcatenationOptions *Operator::builtin_options_as() const { - return builtin_options_as_ConcatenationOptions(); -} - -template<> inline const tflite::AddOptions *Operator::builtin_options_as() const { - return builtin_options_as_AddOptions(); -} - -template<> inline const tflite::L2NormOptions *Operator::builtin_options_as() const { - return builtin_options_as_L2NormOptions(); -} - -template<> inline const tflite::LocalResponseNormalizationOptions *Operator::builtin_options_as() const { - return builtin_options_as_LocalResponseNormalizationOptions(); -} - -template<> inline const tflite::LSTMOptions *Operator::builtin_options_as() const { - return builtin_options_as_LSTMOptions(); -} - -template<> inline const tflite::ResizeBilinearOptions *Operator::builtin_options_as() const { - return builtin_options_as_ResizeBilinearOptions(); -} - -template<> inline const tflite::CallOptions *Operator::builtin_options_as() const { - return builtin_options_as_CallOptions(); -} - -template<> inline const tflite::ReshapeOptions *Operator::builtin_options_as() const { - return builtin_options_as_ReshapeOptions(); -} - -template<> inline const tflite::SkipGramOptions *Operator::builtin_options_as() const { - return builtin_options_as_SkipGramOptions(); -} - -template<> inline const tflite::SpaceToDepthOptions *Operator::builtin_options_as() const { - return builtin_options_as_SpaceToDepthOptions(); -} - -template<> inline const tflite::EmbeddingLookupSparseOptions *Operator::builtin_options_as() const { - return builtin_options_as_EmbeddingLookupSparseOptions(); -} - -template<> inline const tflite::MulOptions *Operator::builtin_options_as() const { - return builtin_options_as_MulOptions(); -} - -template<> inline const tflite::PadOptions *Operator::builtin_options_as() const { - return builtin_options_as_PadOptions(); -} - -template<> inline const tflite::GatherOptions *Operator::builtin_options_as() const { - return builtin_options_as_GatherOptions(); -} - -template<> inline const tflite::BatchToSpaceNDOptions *Operator::builtin_options_as() const { - return builtin_options_as_BatchToSpaceNDOptions(); -} - -template<> inline const tflite::SpaceToBatchNDOptions *Operator::builtin_options_as() const { - return builtin_options_as_SpaceToBatchNDOptions(); -} - -template<> inline const tflite::TransposeOptions *Operator::builtin_options_as() const { - return builtin_options_as_TransposeOptions(); -} - -template<> inline const tflite::ReducerOptions *Operator::builtin_options_as() const { - return builtin_options_as_ReducerOptions(); -} - -template<> inline const tflite::SubOptions *Operator::builtin_options_as() const { - return builtin_options_as_SubOptions(); -} - -template<> inline const tflite::DivOptions *Operator::builtin_options_as() const { - return builtin_options_as_DivOptions(); -} - -template<> inline const tflite::SqueezeOptions *Operator::builtin_options_as() const { - return builtin_options_as_SqueezeOptions(); -} - -template<> inline const tflite::SequenceRNNOptions *Operator::builtin_options_as() const { - return builtin_options_as_SequenceRNNOptions(); -} - -template<> inline const tflite::StridedSliceOptions *Operator::builtin_options_as() const { - return builtin_options_as_StridedSliceOptions(); -} - -template<> inline const tflite::ExpOptions *Operator::builtin_options_as() const { - return builtin_options_as_ExpOptions(); -} - -template<> inline const tflite::TopKV2Options *Operator::builtin_options_as() const { - return builtin_options_as_TopKV2Options(); -} - -template<> inline const tflite::SplitOptions *Operator::builtin_options_as() const { - return builtin_options_as_SplitOptions(); -} - -template<> inline const tflite::LogSoftmaxOptions *Operator::builtin_options_as() const { - return builtin_options_as_LogSoftmaxOptions(); -} - -template<> inline const tflite::CastOptions *Operator::builtin_options_as() const { - return builtin_options_as_CastOptions(); -} - -template<> inline const tflite::DequantizeOptions *Operator::builtin_options_as() const { - return builtin_options_as_DequantizeOptions(); -} - -template<> inline const tflite::MaximumMinimumOptions *Operator::builtin_options_as() const { - return builtin_options_as_MaximumMinimumOptions(); -} - -template<> inline const tflite::ArgMaxOptions *Operator::builtin_options_as() const { - return builtin_options_as_ArgMaxOptions(); -} - -template<> inline const tflite::LessOptions *Operator::builtin_options_as() const { - return builtin_options_as_LessOptions(); -} - -template<> inline const tflite::NegOptions *Operator::builtin_options_as() const { - return builtin_options_as_NegOptions(); -} - -template<> inline const tflite::PadV2Options *Operator::builtin_options_as() const { - return builtin_options_as_PadV2Options(); -} - -template<> inline const tflite::GreaterOptions *Operator::builtin_options_as() const { - return builtin_options_as_GreaterOptions(); -} - -template<> inline const tflite::GreaterEqualOptions *Operator::builtin_options_as() const { - return builtin_options_as_GreaterEqualOptions(); -} - -template<> inline const tflite::LessEqualOptions *Operator::builtin_options_as() const { - return builtin_options_as_LessEqualOptions(); -} - -template<> inline const tflite::SelectOptions *Operator::builtin_options_as() const { - return builtin_options_as_SelectOptions(); -} - -template<> inline const tflite::SliceOptions *Operator::builtin_options_as() const { - return builtin_options_as_SliceOptions(); -} - -template<> inline const tflite::TransposeConvOptions *Operator::builtin_options_as() const { - return builtin_options_as_TransposeConvOptions(); -} - -template<> inline const tflite::SparseToDenseOptions *Operator::builtin_options_as() const { - return builtin_options_as_SparseToDenseOptions(); -} - -template<> inline const tflite::TileOptions *Operator::builtin_options_as() const { - return builtin_options_as_TileOptions(); -} - -template<> inline const tflite::ExpandDimsOptions *Operator::builtin_options_as() const { - return builtin_options_as_ExpandDimsOptions(); -} - -template<> inline const tflite::EqualOptions *Operator::builtin_options_as() const { - return builtin_options_as_EqualOptions(); -} - -template<> inline const tflite::NotEqualOptions *Operator::builtin_options_as() const { - return builtin_options_as_NotEqualOptions(); -} - -template<> inline const tflite::ShapeOptions *Operator::builtin_options_as() const { - return builtin_options_as_ShapeOptions(); -} - -template<> inline const tflite::PowOptions *Operator::builtin_options_as() const { - return builtin_options_as_PowOptions(); -} - -template<> inline const tflite::ArgMinOptions *Operator::builtin_options_as() const { - return builtin_options_as_ArgMinOptions(); -} - -template<> inline const tflite::FakeQuantOptions *Operator::builtin_options_as() const { - return builtin_options_as_FakeQuantOptions(); -} - -template<> inline const tflite::PackOptions *Operator::builtin_options_as() const { - return builtin_options_as_PackOptions(); -} - -template<> inline const tflite::LogicalOrOptions *Operator::builtin_options_as() const { - return builtin_options_as_LogicalOrOptions(); -} - -template<> inline const tflite::OneHotOptions *Operator::builtin_options_as() const { - return builtin_options_as_OneHotOptions(); -} - -template<> inline const tflite::LogicalAndOptions *Operator::builtin_options_as() const { - return builtin_options_as_LogicalAndOptions(); -} - -template<> inline const tflite::LogicalNotOptions *Operator::builtin_options_as() const { - return builtin_options_as_LogicalNotOptions(); -} - -template<> inline const tflite::UnpackOptions *Operator::builtin_options_as() const { - return builtin_options_as_UnpackOptions(); -} - -template<> inline const tflite::FloorDivOptions *Operator::builtin_options_as() const { - return builtin_options_as_FloorDivOptions(); -} - -template<> inline const tflite::SquareOptions *Operator::builtin_options_as() const { - return builtin_options_as_SquareOptions(); -} - -template<> inline const tflite::ZerosLikeOptions *Operator::builtin_options_as() const { - return builtin_options_as_ZerosLikeOptions(); -} - -template<> inline const tflite::FillOptions *Operator::builtin_options_as() const { - return builtin_options_as_FillOptions(); -} - -template<> inline const tflite::BidirectionalSequenceLSTMOptions *Operator::builtin_options_as() const { - return builtin_options_as_BidirectionalSequenceLSTMOptions(); -} - -template<> inline const tflite::BidirectionalSequenceRNNOptions *Operator::builtin_options_as() const { - return builtin_options_as_BidirectionalSequenceRNNOptions(); -} - -template<> inline const tflite::UnidirectionalSequenceLSTMOptions *Operator::builtin_options_as() const { - return builtin_options_as_UnidirectionalSequenceLSTMOptions(); -} - -template<> inline const tflite::FloorModOptions *Operator::builtin_options_as() const { - return builtin_options_as_FloorModOptions(); -} - -template<> inline const tflite::RangeOptions *Operator::builtin_options_as() const { - return builtin_options_as_RangeOptions(); -} - -template<> inline const tflite::ResizeNearestNeighborOptions *Operator::builtin_options_as() const { - return builtin_options_as_ResizeNearestNeighborOptions(); -} - -template<> inline const tflite::LeakyReluOptions *Operator::builtin_options_as() const { - return builtin_options_as_LeakyReluOptions(); -} - -template<> inline const tflite::SquaredDifferenceOptions *Operator::builtin_options_as() const { - return builtin_options_as_SquaredDifferenceOptions(); -} - -template<> inline const tflite::MirrorPadOptions *Operator::builtin_options_as() const { - return builtin_options_as_MirrorPadOptions(); -} - -template<> inline const tflite::AbsOptions *Operator::builtin_options_as() const { - return builtin_options_as_AbsOptions(); -} - -template<> inline const tflite::SplitVOptions *Operator::builtin_options_as() const { - return builtin_options_as_SplitVOptions(); -} - -template<> inline const tflite::UniqueOptions *Operator::builtin_options_as() const { - return builtin_options_as_UniqueOptions(); -} - -template<> inline const tflite::ReverseV2Options *Operator::builtin_options_as() const { - return builtin_options_as_ReverseV2Options(); -} - -template<> inline const tflite::AddNOptions *Operator::builtin_options_as() const { - return builtin_options_as_AddNOptions(); -} - -template<> inline const tflite::GatherNdOptions *Operator::builtin_options_as() const { - return builtin_options_as_GatherNdOptions(); -} - -template<> inline const tflite::CosOptions *Operator::builtin_options_as() const { - return builtin_options_as_CosOptions(); -} - -template<> inline const tflite::WhereOptions *Operator::builtin_options_as() const { - return builtin_options_as_WhereOptions(); -} - -template<> inline const tflite::RankOptions *Operator::builtin_options_as() const { - return builtin_options_as_RankOptions(); -} - -template<> inline const tflite::ReverseSequenceOptions *Operator::builtin_options_as() const { - return builtin_options_as_ReverseSequenceOptions(); -} - -template<> inline const tflite::MatrixDiagOptions *Operator::builtin_options_as() const { - return builtin_options_as_MatrixDiagOptions(); -} - -template<> inline const tflite::QuantizeOptions *Operator::builtin_options_as() const { - return builtin_options_as_QuantizeOptions(); -} - -template<> inline const tflite::MatrixSetDiagOptions *Operator::builtin_options_as() const { - return builtin_options_as_MatrixSetDiagOptions(); -} - -template<> inline const tflite::HardSwishOptions *Operator::builtin_options_as() const { - return builtin_options_as_HardSwishOptions(); -} - -template<> inline const tflite::IfOptions *Operator::builtin_options_as() const { - return builtin_options_as_IfOptions(); -} - -template<> inline const tflite::WhileOptions *Operator::builtin_options_as() const { - return builtin_options_as_WhileOptions(); -} - -template<> inline const tflite::DepthToSpaceOptions *Operator::builtin_options_as() const { - return builtin_options_as_DepthToSpaceOptions(); -} - -template<> inline const tflite::NonMaxSuppressionV4Options *Operator::builtin_options_as() const { - return builtin_options_as_NonMaxSuppressionV4Options(); -} - -template<> inline const tflite::NonMaxSuppressionV5Options *Operator::builtin_options_as() const { - return builtin_options_as_NonMaxSuppressionV5Options(); -} - -template<> inline const tflite::ScatterNdOptions *Operator::builtin_options_as() const { - return builtin_options_as_ScatterNdOptions(); -} - -template<> inline const tflite::SelectV2Options *Operator::builtin_options_as() const { - return builtin_options_as_SelectV2Options(); -} - -template<> inline const tflite::DensifyOptions *Operator::builtin_options_as() const { - return builtin_options_as_DensifyOptions(); -} - -template<> inline const tflite::SegmentSumOptions *Operator::builtin_options_as() const { - return builtin_options_as_SegmentSumOptions(); -} - -template<> inline const tflite::BatchMatMulOptions *Operator::builtin_options_as() const { - return builtin_options_as_BatchMatMulOptions(); -} - -template<> inline const tflite::CumsumOptions *Operator::builtin_options_as() const { - return builtin_options_as_CumsumOptions(); -} - -template<> inline const tflite::CallOnceOptions *Operator::builtin_options_as() const { - return builtin_options_as_CallOnceOptions(); -} - -template<> inline const tflite::BroadcastToOptions *Operator::builtin_options_as() const { - return builtin_options_as_BroadcastToOptions(); -} - -template<> inline const tflite::Rfft2dOptions *Operator::builtin_options_as() const { - return builtin_options_as_Rfft2dOptions(); -} - -template<> inline const tflite::Conv3DOptions *Operator::builtin_options_as() const { - return builtin_options_as_Conv3DOptions(); -} - -template<> inline const tflite::HashtableOptions *Operator::builtin_options_as() const { - return builtin_options_as_HashtableOptions(); -} - -template<> inline const tflite::HashtableFindOptions *Operator::builtin_options_as() const { - return builtin_options_as_HashtableFindOptions(); -} - -template<> inline const tflite::HashtableImportOptions *Operator::builtin_options_as() const { - return builtin_options_as_HashtableImportOptions(); -} - -template<> inline const tflite::HashtableSizeOptions *Operator::builtin_options_as() const { - return builtin_options_as_HashtableSizeOptions(); -} - -template<> inline const tflite::VarHandleOptions *Operator::builtin_options_as() const { - return builtin_options_as_VarHandleOptions(); -} - -template<> inline const tflite::ReadVariableOptions *Operator::builtin_options_as() const { - return builtin_options_as_ReadVariableOptions(); -} - -template<> inline const tflite::AssignVariableOptions *Operator::builtin_options_as() const { - return builtin_options_as_AssignVariableOptions(); -} - -template<> inline const tflite::RandomOptions *Operator::builtin_options_as() const { - return builtin_options_as_RandomOptions(); -} - -template<> inline const tflite::BucketizeOptions *Operator::builtin_options_as() const { - return builtin_options_as_BucketizeOptions(); -} - -template<> inline const tflite::GeluOptions *Operator::builtin_options_as() const { - return builtin_options_as_GeluOptions(); -} - -template<> inline const tflite::DynamicUpdateSliceOptions *Operator::builtin_options_as() const { - return builtin_options_as_DynamicUpdateSliceOptions(); -} - -template<> inline const tflite::UnsortedSegmentProdOptions *Operator::builtin_options_as() const { - return builtin_options_as_UnsortedSegmentProdOptions(); -} - -template<> inline const tflite::UnsortedSegmentMaxOptions *Operator::builtin_options_as() const { - return builtin_options_as_UnsortedSegmentMaxOptions(); -} - -template<> inline const tflite::UnsortedSegmentMinOptions *Operator::builtin_options_as() const { - return builtin_options_as_UnsortedSegmentMinOptions(); -} - -template<> inline const tflite::UnsortedSegmentSumOptions *Operator::builtin_options_as() const { - return builtin_options_as_UnsortedSegmentSumOptions(); -} - -template<> inline const tflite::ATan2Options *Operator::builtin_options_as() const { - return builtin_options_as_ATan2Options(); -} - -template<> inline const tflite::SignOptions *Operator::builtin_options_as() const { - return builtin_options_as_SignOptions(); -} - -struct OperatorBuilder { - typedef Operator Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_opcode_index(uint32_t opcode_index) { - fbb_.AddElement(Operator::VT_OPCODE_INDEX, opcode_index, 0); - } - void add_inputs(flatbuffers::Offset> inputs) { - fbb_.AddOffset(Operator::VT_INPUTS, inputs); - } - void add_outputs(flatbuffers::Offset> outputs) { - fbb_.AddOffset(Operator::VT_OUTPUTS, outputs); - } - void add_builtin_options_type(tflite::BuiltinOptions builtin_options_type) { - fbb_.AddElement(Operator::VT_BUILTIN_OPTIONS_TYPE, static_cast(builtin_options_type), 0); - } - void add_builtin_options(flatbuffers::Offset builtin_options) { - fbb_.AddOffset(Operator::VT_BUILTIN_OPTIONS, builtin_options); - } - void add_custom_options(flatbuffers::Offset> custom_options) { - fbb_.AddOffset(Operator::VT_CUSTOM_OPTIONS, custom_options); - } - void add_custom_options_format(tflite::CustomOptionsFormat custom_options_format) { - fbb_.AddElement(Operator::VT_CUSTOM_OPTIONS_FORMAT, static_cast(custom_options_format), 0); - } - void add_mutating_variable_inputs(flatbuffers::Offset> mutating_variable_inputs) { - fbb_.AddOffset(Operator::VT_MUTATING_VARIABLE_INPUTS, mutating_variable_inputs); - } - void add_intermediates(flatbuffers::Offset> intermediates) { - fbb_.AddOffset(Operator::VT_INTERMEDIATES, intermediates); - } - explicit OperatorBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateOperator( - flatbuffers::FlatBufferBuilder &_fbb, - uint32_t opcode_index = 0, - flatbuffers::Offset> inputs = 0, - flatbuffers::Offset> outputs = 0, - tflite::BuiltinOptions builtin_options_type = tflite::BuiltinOptions_NONE, - flatbuffers::Offset builtin_options = 0, - flatbuffers::Offset> custom_options = 0, - tflite::CustomOptionsFormat custom_options_format = tflite::CustomOptionsFormat_FLEXBUFFERS, - flatbuffers::Offset> mutating_variable_inputs = 0, - flatbuffers::Offset> intermediates = 0) { - OperatorBuilder builder_(_fbb); - builder_.add_intermediates(intermediates); - builder_.add_mutating_variable_inputs(mutating_variable_inputs); - builder_.add_custom_options(custom_options); - builder_.add_builtin_options(builtin_options); - builder_.add_outputs(outputs); - builder_.add_inputs(inputs); - builder_.add_opcode_index(opcode_index); - builder_.add_custom_options_format(custom_options_format); - builder_.add_builtin_options_type(builtin_options_type); - return builder_.Finish(); -} - -inline flatbuffers::Offset CreateOperatorDirect( - flatbuffers::FlatBufferBuilder &_fbb, - uint32_t opcode_index = 0, - const std::vector *inputs = nullptr, - const std::vector *outputs = nullptr, - tflite::BuiltinOptions builtin_options_type = tflite::BuiltinOptions_NONE, - flatbuffers::Offset builtin_options = 0, - const std::vector *custom_options = nullptr, - tflite::CustomOptionsFormat custom_options_format = tflite::CustomOptionsFormat_FLEXBUFFERS, - const std::vector *mutating_variable_inputs = nullptr, - const std::vector *intermediates = nullptr) { - auto inputs__ = inputs ? _fbb.CreateVector(*inputs) : 0; - auto outputs__ = outputs ? _fbb.CreateVector(*outputs) : 0; - auto custom_options__ = custom_options ? _fbb.CreateVector(*custom_options) : 0; - auto mutating_variable_inputs__ = mutating_variable_inputs ? _fbb.CreateVector(*mutating_variable_inputs) : 0; - auto intermediates__ = intermediates ? _fbb.CreateVector(*intermediates) : 0; - return tflite::CreateOperator( - _fbb, - opcode_index, - inputs__, - outputs__, - builtin_options_type, - builtin_options, - custom_options__, - custom_options_format, - mutating_variable_inputs__, - intermediates__); -} - -flatbuffers::Offset CreateOperator(flatbuffers::FlatBufferBuilder &_fbb, const OperatorT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct SubGraphT : public flatbuffers::NativeTable { - typedef SubGraph TableType; - std::vector> tensors{}; - std::vector inputs{}; - std::vector outputs{}; - std::vector> operators{}; - std::string name{}; - SubGraphT() = default; - SubGraphT(const SubGraphT &o); - SubGraphT(SubGraphT&&) FLATBUFFERS_NOEXCEPT = default; - SubGraphT &operator=(SubGraphT o) FLATBUFFERS_NOEXCEPT; -}; - -struct SubGraph FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef SubGraphT NativeTableType; - typedef SubGraphBuilder Builder; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_TENSORS = 4, - VT_INPUTS = 6, - VT_OUTPUTS = 8, - VT_OPERATORS = 10, - VT_NAME = 12 - }; - const flatbuffers::Vector> *tensors() const { - return GetPointer> *>(VT_TENSORS); - } - const flatbuffers::Vector *inputs() const { - return GetPointer *>(VT_INPUTS); - } - const flatbuffers::Vector *outputs() const { - return GetPointer *>(VT_OUTPUTS); - } - const flatbuffers::Vector> *operators() const { - return GetPointer> *>(VT_OPERATORS); - } - const flatbuffers::String *name() const { - return GetPointer(VT_NAME); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyOffset(verifier, VT_TENSORS) && - verifier.VerifyVector(tensors()) && - verifier.VerifyVectorOfTables(tensors()) && - VerifyOffset(verifier, VT_INPUTS) && - verifier.VerifyVector(inputs()) && - VerifyOffset(verifier, VT_OUTPUTS) && - verifier.VerifyVector(outputs()) && - VerifyOffset(verifier, VT_OPERATORS) && - verifier.VerifyVector(operators()) && - verifier.VerifyVectorOfTables(operators()) && - VerifyOffset(verifier, VT_NAME) && - verifier.VerifyString(name()) && - verifier.EndTable(); - } - SubGraphT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(SubGraphT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SubGraphT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct SubGraphBuilder { - typedef SubGraph Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_tensors(flatbuffers::Offset>> tensors) { - fbb_.AddOffset(SubGraph::VT_TENSORS, tensors); - } - void add_inputs(flatbuffers::Offset> inputs) { - fbb_.AddOffset(SubGraph::VT_INPUTS, inputs); - } - void add_outputs(flatbuffers::Offset> outputs) { - fbb_.AddOffset(SubGraph::VT_OUTPUTS, outputs); - } - void add_operators(flatbuffers::Offset>> operators) { - fbb_.AddOffset(SubGraph::VT_OPERATORS, operators); - } - void add_name(flatbuffers::Offset name) { - fbb_.AddOffset(SubGraph::VT_NAME, name); - } - explicit SubGraphBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateSubGraph( - flatbuffers::FlatBufferBuilder &_fbb, - flatbuffers::Offset>> tensors = 0, - flatbuffers::Offset> inputs = 0, - flatbuffers::Offset> outputs = 0, - flatbuffers::Offset>> operators = 0, - flatbuffers::Offset name = 0) { - SubGraphBuilder builder_(_fbb); - builder_.add_name(name); - builder_.add_operators(operators); - builder_.add_outputs(outputs); - builder_.add_inputs(inputs); - builder_.add_tensors(tensors); - return builder_.Finish(); -} - -inline flatbuffers::Offset CreateSubGraphDirect( - flatbuffers::FlatBufferBuilder &_fbb, - const std::vector> *tensors = nullptr, - const std::vector *inputs = nullptr, - const std::vector *outputs = nullptr, - const std::vector> *operators = nullptr, - const char *name = nullptr) { - auto tensors__ = tensors ? _fbb.CreateVector>(*tensors) : 0; - auto inputs__ = inputs ? _fbb.CreateVector(*inputs) : 0; - auto outputs__ = outputs ? _fbb.CreateVector(*outputs) : 0; - auto operators__ = operators ? _fbb.CreateVector>(*operators) : 0; - auto name__ = name ? _fbb.CreateString(name) : 0; - return tflite::CreateSubGraph( - _fbb, - tensors__, - inputs__, - outputs__, - operators__, - name__); -} - -flatbuffers::Offset CreateSubGraph(flatbuffers::FlatBufferBuilder &_fbb, const SubGraphT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct BufferT : public flatbuffers::NativeTable { - typedef Buffer TableType; - std::vector data{}; -}; - -struct Buffer FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef BufferT NativeTableType; - typedef BufferBuilder Builder; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_DATA = 4 - }; - const flatbuffers::Vector *data() const { - return GetPointer *>(VT_DATA); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyOffset(verifier, VT_DATA) && - verifier.VerifyVector(data()) && - verifier.EndTable(); - } - BufferT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(BufferT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const BufferT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct BufferBuilder { - typedef Buffer Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_data(flatbuffers::Offset> data) { - fbb_.AddOffset(Buffer::VT_DATA, data); - } - explicit BufferBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateBuffer( - flatbuffers::FlatBufferBuilder &_fbb, - flatbuffers::Offset> data = 0) { - BufferBuilder builder_(_fbb); - builder_.add_data(data); - return builder_.Finish(); -} - -inline flatbuffers::Offset CreateBufferDirect( - flatbuffers::FlatBufferBuilder &_fbb, - const std::vector *data = nullptr) { - if (data) { _fbb.ForceVectorAlignment(data->size(), sizeof(uint8_t), 16); } - auto data__ = data ? _fbb.CreateVector(*data) : 0; - return tflite::CreateBuffer( - _fbb, - data__); -} - -flatbuffers::Offset CreateBuffer(flatbuffers::FlatBufferBuilder &_fbb, const BufferT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct MetadataT : public flatbuffers::NativeTable { - typedef Metadata TableType; - std::string name{}; - uint32_t buffer = 0; -}; - -struct Metadata FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef MetadataT NativeTableType; - typedef MetadataBuilder Builder; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_NAME = 4, - VT_BUFFER = 6 - }; - const flatbuffers::String *name() const { - return GetPointer(VT_NAME); - } - uint32_t buffer() const { - return GetField(VT_BUFFER, 0); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyOffset(verifier, VT_NAME) && - verifier.VerifyString(name()) && - VerifyField(verifier, VT_BUFFER, 4) && - verifier.EndTable(); - } - MetadataT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(MetadataT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const MetadataT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct MetadataBuilder { - typedef Metadata Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_name(flatbuffers::Offset name) { - fbb_.AddOffset(Metadata::VT_NAME, name); - } - void add_buffer(uint32_t buffer) { - fbb_.AddElement(Metadata::VT_BUFFER, buffer, 0); - } - explicit MetadataBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateMetadata( - flatbuffers::FlatBufferBuilder &_fbb, - flatbuffers::Offset name = 0, - uint32_t buffer = 0) { - MetadataBuilder builder_(_fbb); - builder_.add_buffer(buffer); - builder_.add_name(name); - return builder_.Finish(); -} - -inline flatbuffers::Offset CreateMetadataDirect( - flatbuffers::FlatBufferBuilder &_fbb, - const char *name = nullptr, - uint32_t buffer = 0) { - auto name__ = name ? _fbb.CreateString(name) : 0; - return tflite::CreateMetadata( - _fbb, - name__, - buffer); -} - -flatbuffers::Offset CreateMetadata(flatbuffers::FlatBufferBuilder &_fbb, const MetadataT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct TensorMapT : public flatbuffers::NativeTable { - typedef TensorMap TableType; - std::string name{}; - uint32_t tensor_index = 0; -}; - -struct TensorMap FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef TensorMapT NativeTableType; - typedef TensorMapBuilder Builder; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_NAME = 4, - VT_TENSOR_INDEX = 6 - }; - const flatbuffers::String *name() const { - return GetPointer(VT_NAME); - } - uint32_t tensor_index() const { - return GetField(VT_TENSOR_INDEX, 0); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyOffset(verifier, VT_NAME) && - verifier.VerifyString(name()) && - VerifyField(verifier, VT_TENSOR_INDEX, 4) && - verifier.EndTable(); - } - TensorMapT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(TensorMapT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const TensorMapT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct TensorMapBuilder { - typedef TensorMap Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_name(flatbuffers::Offset name) { - fbb_.AddOffset(TensorMap::VT_NAME, name); - } - void add_tensor_index(uint32_t tensor_index) { - fbb_.AddElement(TensorMap::VT_TENSOR_INDEX, tensor_index, 0); - } - explicit TensorMapBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateTensorMap( - flatbuffers::FlatBufferBuilder &_fbb, - flatbuffers::Offset name = 0, - uint32_t tensor_index = 0) { - TensorMapBuilder builder_(_fbb); - builder_.add_tensor_index(tensor_index); - builder_.add_name(name); - return builder_.Finish(); -} - -inline flatbuffers::Offset CreateTensorMapDirect( - flatbuffers::FlatBufferBuilder &_fbb, - const char *name = nullptr, - uint32_t tensor_index = 0) { - auto name__ = name ? _fbb.CreateString(name) : 0; - return tflite::CreateTensorMap( - _fbb, - name__, - tensor_index); -} - -flatbuffers::Offset CreateTensorMap(flatbuffers::FlatBufferBuilder &_fbb, const TensorMapT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct SignatureDefT : public flatbuffers::NativeTable { - typedef SignatureDef TableType; - std::vector> inputs{}; - std::vector> outputs{}; - std::string signature_key{}; - uint32_t subgraph_index = 0; - SignatureDefT() = default; - SignatureDefT(const SignatureDefT &o); - SignatureDefT(SignatureDefT&&) FLATBUFFERS_NOEXCEPT = default; - SignatureDefT &operator=(SignatureDefT o) FLATBUFFERS_NOEXCEPT; -}; - -struct SignatureDef FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef SignatureDefT NativeTableType; - typedef SignatureDefBuilder Builder; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_INPUTS = 4, - VT_OUTPUTS = 6, - VT_SIGNATURE_KEY = 8, - VT_SUBGRAPH_INDEX = 12 - }; - const flatbuffers::Vector> *inputs() const { - return GetPointer> *>(VT_INPUTS); - } - const flatbuffers::Vector> *outputs() const { - return GetPointer> *>(VT_OUTPUTS); - } - const flatbuffers::String *signature_key() const { - return GetPointer(VT_SIGNATURE_KEY); - } - uint32_t subgraph_index() const { - return GetField(VT_SUBGRAPH_INDEX, 0); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyOffset(verifier, VT_INPUTS) && - verifier.VerifyVector(inputs()) && - verifier.VerifyVectorOfTables(inputs()) && - VerifyOffset(verifier, VT_OUTPUTS) && - verifier.VerifyVector(outputs()) && - verifier.VerifyVectorOfTables(outputs()) && - VerifyOffset(verifier, VT_SIGNATURE_KEY) && - verifier.VerifyString(signature_key()) && - VerifyField(verifier, VT_SUBGRAPH_INDEX, 4) && - verifier.EndTable(); - } - SignatureDefT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(SignatureDefT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SignatureDefT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct SignatureDefBuilder { - typedef SignatureDef Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_inputs(flatbuffers::Offset>> inputs) { - fbb_.AddOffset(SignatureDef::VT_INPUTS, inputs); - } - void add_outputs(flatbuffers::Offset>> outputs) { - fbb_.AddOffset(SignatureDef::VT_OUTPUTS, outputs); - } - void add_signature_key(flatbuffers::Offset signature_key) { - fbb_.AddOffset(SignatureDef::VT_SIGNATURE_KEY, signature_key); - } - void add_subgraph_index(uint32_t subgraph_index) { - fbb_.AddElement(SignatureDef::VT_SUBGRAPH_INDEX, subgraph_index, 0); - } - explicit SignatureDefBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateSignatureDef( - flatbuffers::FlatBufferBuilder &_fbb, - flatbuffers::Offset>> inputs = 0, - flatbuffers::Offset>> outputs = 0, - flatbuffers::Offset signature_key = 0, - uint32_t subgraph_index = 0) { - SignatureDefBuilder builder_(_fbb); - builder_.add_subgraph_index(subgraph_index); - builder_.add_signature_key(signature_key); - builder_.add_outputs(outputs); - builder_.add_inputs(inputs); - return builder_.Finish(); -} - -inline flatbuffers::Offset CreateSignatureDefDirect( - flatbuffers::FlatBufferBuilder &_fbb, - const std::vector> *inputs = nullptr, - const std::vector> *outputs = nullptr, - const char *signature_key = nullptr, - uint32_t subgraph_index = 0) { - auto inputs__ = inputs ? _fbb.CreateVector>(*inputs) : 0; - auto outputs__ = outputs ? _fbb.CreateVector>(*outputs) : 0; - auto signature_key__ = signature_key ? _fbb.CreateString(signature_key) : 0; - return tflite::CreateSignatureDef( - _fbb, - inputs__, - outputs__, - signature_key__, - subgraph_index); -} - -flatbuffers::Offset CreateSignatureDef(flatbuffers::FlatBufferBuilder &_fbb, const SignatureDefT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct ModelT : public flatbuffers::NativeTable { - typedef Model TableType; - uint32_t version = 0; - std::vector> operator_codes{}; - std::vector> subgraphs{}; - std::string description{}; - std::vector> buffers{}; - std::vector metadata_buffer{}; - std::vector> metadata{}; - std::vector> signature_defs{}; - ModelT() = default; - ModelT(const ModelT &o); - ModelT(ModelT&&) FLATBUFFERS_NOEXCEPT = default; - ModelT &operator=(ModelT o) FLATBUFFERS_NOEXCEPT; -}; - -struct Model FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef ModelT NativeTableType; - typedef ModelBuilder Builder; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_VERSION = 4, - VT_OPERATOR_CODES = 6, - VT_SUBGRAPHS = 8, - VT_DESCRIPTION = 10, - VT_BUFFERS = 12, - VT_METADATA_BUFFER = 14, - VT_METADATA = 16, - VT_SIGNATURE_DEFS = 18 - }; - uint32_t version() const { - return GetField(VT_VERSION, 0); - } - const flatbuffers::Vector> *operator_codes() const { - return GetPointer> *>(VT_OPERATOR_CODES); - } - const flatbuffers::Vector> *subgraphs() const { - return GetPointer> *>(VT_SUBGRAPHS); - } - const flatbuffers::String *description() const { - return GetPointer(VT_DESCRIPTION); - } - const flatbuffers::Vector> *buffers() const { - return GetPointer> *>(VT_BUFFERS); - } - const flatbuffers::Vector *metadata_buffer() const { - return GetPointer *>(VT_METADATA_BUFFER); - } - const flatbuffers::Vector> *metadata() const { - return GetPointer> *>(VT_METADATA); - } - const flatbuffers::Vector> *signature_defs() const { - return GetPointer> *>(VT_SIGNATURE_DEFS); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_VERSION, 4) && - VerifyOffset(verifier, VT_OPERATOR_CODES) && - verifier.VerifyVector(operator_codes()) && - verifier.VerifyVectorOfTables(operator_codes()) && - VerifyOffset(verifier, VT_SUBGRAPHS) && - verifier.VerifyVector(subgraphs()) && - verifier.VerifyVectorOfTables(subgraphs()) && - VerifyOffset(verifier, VT_DESCRIPTION) && - verifier.VerifyString(description()) && - VerifyOffset(verifier, VT_BUFFERS) && - verifier.VerifyVector(buffers()) && - verifier.VerifyVectorOfTables(buffers()) && - VerifyOffset(verifier, VT_METADATA_BUFFER) && - verifier.VerifyVector(metadata_buffer()) && - VerifyOffset(verifier, VT_METADATA) && - verifier.VerifyVector(metadata()) && - verifier.VerifyVectorOfTables(metadata()) && - VerifyOffset(verifier, VT_SIGNATURE_DEFS) && - verifier.VerifyVector(signature_defs()) && - verifier.VerifyVectorOfTables(signature_defs()) && - verifier.EndTable(); - } - ModelT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(ModelT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ModelT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct ModelBuilder { - typedef Model Table; - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_version(uint32_t version) { - fbb_.AddElement(Model::VT_VERSION, version, 0); - } - void add_operator_codes(flatbuffers::Offset>> operator_codes) { - fbb_.AddOffset(Model::VT_OPERATOR_CODES, operator_codes); - } - void add_subgraphs(flatbuffers::Offset>> subgraphs) { - fbb_.AddOffset(Model::VT_SUBGRAPHS, subgraphs); - } - void add_description(flatbuffers::Offset description) { - fbb_.AddOffset(Model::VT_DESCRIPTION, description); - } - void add_buffers(flatbuffers::Offset>> buffers) { - fbb_.AddOffset(Model::VT_BUFFERS, buffers); - } - void add_metadata_buffer(flatbuffers::Offset> metadata_buffer) { - fbb_.AddOffset(Model::VT_METADATA_BUFFER, metadata_buffer); - } - void add_metadata(flatbuffers::Offset>> metadata) { - fbb_.AddOffset(Model::VT_METADATA, metadata); - } - void add_signature_defs(flatbuffers::Offset>> signature_defs) { - fbb_.AddOffset(Model::VT_SIGNATURE_DEFS, signature_defs); - } - explicit ModelBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateModel( - flatbuffers::FlatBufferBuilder &_fbb, - uint32_t version = 0, - flatbuffers::Offset>> operator_codes = 0, - flatbuffers::Offset>> subgraphs = 0, - flatbuffers::Offset description = 0, - flatbuffers::Offset>> buffers = 0, - flatbuffers::Offset> metadata_buffer = 0, - flatbuffers::Offset>> metadata = 0, - flatbuffers::Offset>> signature_defs = 0) { - ModelBuilder builder_(_fbb); - builder_.add_signature_defs(signature_defs); - builder_.add_metadata(metadata); - builder_.add_metadata_buffer(metadata_buffer); - builder_.add_buffers(buffers); - builder_.add_description(description); - builder_.add_subgraphs(subgraphs); - builder_.add_operator_codes(operator_codes); - builder_.add_version(version); - return builder_.Finish(); -} - -inline flatbuffers::Offset CreateModelDirect( - flatbuffers::FlatBufferBuilder &_fbb, - uint32_t version = 0, - const std::vector> *operator_codes = nullptr, - const std::vector> *subgraphs = nullptr, - const char *description = nullptr, - const std::vector> *buffers = nullptr, - const std::vector *metadata_buffer = nullptr, - const std::vector> *metadata = nullptr, - const std::vector> *signature_defs = nullptr) { - auto operator_codes__ = operator_codes ? _fbb.CreateVector>(*operator_codes) : 0; - auto subgraphs__ = subgraphs ? _fbb.CreateVector>(*subgraphs) : 0; - auto description__ = description ? _fbb.CreateString(description) : 0; - auto buffers__ = buffers ? _fbb.CreateVector>(*buffers) : 0; - auto metadata_buffer__ = metadata_buffer ? _fbb.CreateVector(*metadata_buffer) : 0; - auto metadata__ = metadata ? _fbb.CreateVector>(*metadata) : 0; - auto signature_defs__ = signature_defs ? _fbb.CreateVector>(*signature_defs) : 0; - return tflite::CreateModel( - _fbb, - version, - operator_codes__, - subgraphs__, - description__, - buffers__, - metadata_buffer__, - metadata__, - signature_defs__); -} - -flatbuffers::Offset CreateModel(flatbuffers::FlatBufferBuilder &_fbb, const ModelT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -inline CustomQuantizationT *CustomQuantization::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new CustomQuantizationT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void CustomQuantization::UnPackTo(CustomQuantizationT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = custom(); if (_e) { _o->custom.resize(_e->size()); std::copy(_e->begin(), _e->end(), _o->custom.begin()); } } -} - -inline flatbuffers::Offset CustomQuantization::Pack(flatbuffers::FlatBufferBuilder &_fbb, const CustomQuantizationT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateCustomQuantization(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateCustomQuantization(flatbuffers::FlatBufferBuilder &_fbb, const CustomQuantizationT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const CustomQuantizationT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - _fbb.ForceVectorAlignment(_o->custom.size(), sizeof(uint8_t), 16); - auto _custom = _o->custom.size() ? _fbb.CreateVector(_o->custom) : 0; - return tflite::CreateCustomQuantization( - _fbb, - _custom); -} - -inline QuantizationParametersT *QuantizationParameters::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new QuantizationParametersT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void QuantizationParameters::UnPackTo(QuantizationParametersT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = min(); if (_e) { _o->min.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->min[_i] = _e->Get(_i); } } } - { auto _e = max(); if (_e) { _o->max.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->max[_i] = _e->Get(_i); } } } - { auto _e = scale(); if (_e) { _o->scale.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->scale[_i] = _e->Get(_i); } } } - { auto _e = zero_point(); if (_e) { _o->zero_point.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->zero_point[_i] = _e->Get(_i); } } } - { auto _e = details_type(); _o->details.type = _e; } - { auto _e = details(); if (_e) _o->details.value = tflite::QuantizationDetailsUnion::UnPack(_e, details_type(), _resolver); } - { auto _e = quantized_dimension(); _o->quantized_dimension = _e; } -} - -inline flatbuffers::Offset QuantizationParameters::Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizationParametersT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateQuantizationParameters(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateQuantizationParameters(flatbuffers::FlatBufferBuilder &_fbb, const QuantizationParametersT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const QuantizationParametersT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _min = _o->min.size() ? _fbb.CreateVector(_o->min) : 0; - auto _max = _o->max.size() ? _fbb.CreateVector(_o->max) : 0; - auto _scale = _o->scale.size() ? _fbb.CreateVector(_o->scale) : 0; - auto _zero_point = _o->zero_point.size() ? _fbb.CreateVector(_o->zero_point) : 0; - auto _details_type = _o->details.type; - auto _details = _o->details.Pack(_fbb); - auto _quantized_dimension = _o->quantized_dimension; - return tflite::CreateQuantizationParameters( - _fbb, - _min, - _max, - _scale, - _zero_point, - _details_type, - _details, - _quantized_dimension); -} - -inline Int32VectorT *Int32Vector::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new Int32VectorT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void Int32Vector::UnPackTo(Int32VectorT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = values(); if (_e) { _o->values.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->values[_i] = _e->Get(_i); } } } -} - -inline flatbuffers::Offset Int32Vector::Pack(flatbuffers::FlatBufferBuilder &_fbb, const Int32VectorT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateInt32Vector(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateInt32Vector(flatbuffers::FlatBufferBuilder &_fbb, const Int32VectorT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const Int32VectorT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _values = _o->values.size() ? _fbb.CreateVector(_o->values) : 0; - return tflite::CreateInt32Vector( - _fbb, - _values); -} - -inline Uint16VectorT *Uint16Vector::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new Uint16VectorT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void Uint16Vector::UnPackTo(Uint16VectorT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = values(); if (_e) { _o->values.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->values[_i] = _e->Get(_i); } } } -} - -inline flatbuffers::Offset Uint16Vector::Pack(flatbuffers::FlatBufferBuilder &_fbb, const Uint16VectorT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateUint16Vector(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateUint16Vector(flatbuffers::FlatBufferBuilder &_fbb, const Uint16VectorT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const Uint16VectorT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - _fbb.ForceVectorAlignment(_o->values.size(), sizeof(uint16_t), 4); - auto _values = _o->values.size() ? _fbb.CreateVector(_o->values) : 0; - return tflite::CreateUint16Vector( - _fbb, - _values); -} - -inline Uint8VectorT *Uint8Vector::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new Uint8VectorT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void Uint8Vector::UnPackTo(Uint8VectorT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = values(); if (_e) { _o->values.resize(_e->size()); std::copy(_e->begin(), _e->end(), _o->values.begin()); } } -} - -inline flatbuffers::Offset Uint8Vector::Pack(flatbuffers::FlatBufferBuilder &_fbb, const Uint8VectorT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateUint8Vector(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateUint8Vector(flatbuffers::FlatBufferBuilder &_fbb, const Uint8VectorT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const Uint8VectorT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - _fbb.ForceVectorAlignment(_o->values.size(), sizeof(uint8_t), 4); - auto _values = _o->values.size() ? _fbb.CreateVector(_o->values) : 0; - return tflite::CreateUint8Vector( - _fbb, - _values); -} - -inline DimensionMetadataT *DimensionMetadata::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new DimensionMetadataT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void DimensionMetadata::UnPackTo(DimensionMetadataT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = format(); _o->format = _e; } - { auto _e = dense_size(); _o->dense_size = _e; } - { auto _e = array_segments_type(); _o->array_segments.type = _e; } - { auto _e = array_segments(); if (_e) _o->array_segments.value = tflite::SparseIndexVectorUnion::UnPack(_e, array_segments_type(), _resolver); } - { auto _e = array_indices_type(); _o->array_indices.type = _e; } - { auto _e = array_indices(); if (_e) _o->array_indices.value = tflite::SparseIndexVectorUnion::UnPack(_e, array_indices_type(), _resolver); } -} - -inline flatbuffers::Offset DimensionMetadata::Pack(flatbuffers::FlatBufferBuilder &_fbb, const DimensionMetadataT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateDimensionMetadata(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateDimensionMetadata(flatbuffers::FlatBufferBuilder &_fbb, const DimensionMetadataT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const DimensionMetadataT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _format = _o->format; - auto _dense_size = _o->dense_size; - auto _array_segments_type = _o->array_segments.type; - auto _array_segments = _o->array_segments.Pack(_fbb); - auto _array_indices_type = _o->array_indices.type; - auto _array_indices = _o->array_indices.Pack(_fbb); - return tflite::CreateDimensionMetadata( - _fbb, - _format, - _dense_size, - _array_segments_type, - _array_segments, - _array_indices_type, - _array_indices); -} - -inline SparsityParametersT::SparsityParametersT(const SparsityParametersT &o) - : traversal_order(o.traversal_order), - block_map(o.block_map) { - dim_metadata.reserve(o.dim_metadata.size()); - for (const auto &dim_metadata_ : o.dim_metadata) { dim_metadata.emplace_back((dim_metadata_) ? new tflite::DimensionMetadataT(*dim_metadata_) : nullptr); } -} - -inline SparsityParametersT &SparsityParametersT::operator=(SparsityParametersT o) FLATBUFFERS_NOEXCEPT { - std::swap(traversal_order, o.traversal_order); - std::swap(block_map, o.block_map); - std::swap(dim_metadata, o.dim_metadata); - return *this; -} - -inline SparsityParametersT *SparsityParameters::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new SparsityParametersT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void SparsityParameters::UnPackTo(SparsityParametersT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = traversal_order(); if (_e) { _o->traversal_order.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->traversal_order[_i] = _e->Get(_i); } } } - { auto _e = block_map(); if (_e) { _o->block_map.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->block_map[_i] = _e->Get(_i); } } } - { auto _e = dim_metadata(); if (_e) { _o->dim_metadata.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { if(_o->dim_metadata[_i]) { _e->Get(_i)->UnPackTo(_o->dim_metadata[_i].get(), _resolver); } else { _o->dim_metadata[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); }; } } } -} - -inline flatbuffers::Offset SparsityParameters::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SparsityParametersT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateSparsityParameters(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateSparsityParameters(flatbuffers::FlatBufferBuilder &_fbb, const SparsityParametersT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SparsityParametersT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _traversal_order = _o->traversal_order.size() ? _fbb.CreateVector(_o->traversal_order) : 0; - auto _block_map = _o->block_map.size() ? _fbb.CreateVector(_o->block_map) : 0; - auto _dim_metadata = _o->dim_metadata.size() ? _fbb.CreateVector> (_o->dim_metadata.size(), [](size_t i, _VectorArgs *__va) { return CreateDimensionMetadata(*__va->__fbb, __va->__o->dim_metadata[i].get(), __va->__rehasher); }, &_va ) : 0; - return tflite::CreateSparsityParameters( - _fbb, - _traversal_order, - _block_map, - _dim_metadata); -} - -inline VariantSubTypeT *VariantSubType::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new VariantSubTypeT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void VariantSubType::UnPackTo(VariantSubTypeT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = shape(); if (_e) { _o->shape.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->shape[_i] = _e->Get(_i); } } } - { auto _e = type(); _o->type = _e; } - { auto _e = has_rank(); _o->has_rank = _e; } -} - -inline flatbuffers::Offset VariantSubType::Pack(flatbuffers::FlatBufferBuilder &_fbb, const VariantSubTypeT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateVariantSubType(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateVariantSubType(flatbuffers::FlatBufferBuilder &_fbb, const VariantSubTypeT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const VariantSubTypeT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _shape = _o->shape.size() ? _fbb.CreateVector(_o->shape) : 0; - auto _type = _o->type; - auto _has_rank = _o->has_rank; - return tflite::CreateVariantSubType( - _fbb, - _shape, - _type, - _has_rank); -} - -inline TensorT::TensorT(const TensorT &o) - : shape(o.shape), - type(o.type), - buffer(o.buffer), - name(o.name), - quantization((o.quantization) ? new tflite::QuantizationParametersT(*o.quantization) : nullptr), - is_variable(o.is_variable), - sparsity((o.sparsity) ? new tflite::SparsityParametersT(*o.sparsity) : nullptr), - shape_signature(o.shape_signature), - has_rank(o.has_rank) { - variant_tensors.reserve(o.variant_tensors.size()); - for (const auto &variant_tensors_ : o.variant_tensors) { variant_tensors.emplace_back((variant_tensors_) ? new tflite::VariantSubTypeT(*variant_tensors_) : nullptr); } -} - -inline TensorT &TensorT::operator=(TensorT o) FLATBUFFERS_NOEXCEPT { - std::swap(shape, o.shape); - std::swap(type, o.type); - std::swap(buffer, o.buffer); - std::swap(name, o.name); - std::swap(quantization, o.quantization); - std::swap(is_variable, o.is_variable); - std::swap(sparsity, o.sparsity); - std::swap(shape_signature, o.shape_signature); - std::swap(has_rank, o.has_rank); - std::swap(variant_tensors, o.variant_tensors); - return *this; -} - -inline TensorT *Tensor::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new TensorT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void Tensor::UnPackTo(TensorT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = shape(); if (_e) { _o->shape.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->shape[_i] = _e->Get(_i); } } } - { auto _e = type(); _o->type = _e; } - { auto _e = buffer(); _o->buffer = _e; } - { auto _e = name(); if (_e) _o->name = _e->str(); } - { auto _e = quantization(); if (_e) { if(_o->quantization) { _e->UnPackTo(_o->quantization.get(), _resolver); } else { _o->quantization = std::unique_ptr(_e->UnPack(_resolver)); } } } - { auto _e = is_variable(); _o->is_variable = _e; } - { auto _e = sparsity(); if (_e) { if(_o->sparsity) { _e->UnPackTo(_o->sparsity.get(), _resolver); } else { _o->sparsity = std::unique_ptr(_e->UnPack(_resolver)); } } } - { auto _e = shape_signature(); if (_e) { _o->shape_signature.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->shape_signature[_i] = _e->Get(_i); } } } - { auto _e = has_rank(); _o->has_rank = _e; } - { auto _e = variant_tensors(); if (_e) { _o->variant_tensors.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { if(_o->variant_tensors[_i]) { _e->Get(_i)->UnPackTo(_o->variant_tensors[_i].get(), _resolver); } else { _o->variant_tensors[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); }; } } } -} - -inline flatbuffers::Offset Tensor::Pack(flatbuffers::FlatBufferBuilder &_fbb, const TensorT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateTensor(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateTensor(flatbuffers::FlatBufferBuilder &_fbb, const TensorT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const TensorT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _shape = _o->shape.size() ? _fbb.CreateVector(_o->shape) : 0; - auto _type = _o->type; - auto _buffer = _o->buffer; - auto _name = _o->name.empty() ? 0 : _fbb.CreateString(_o->name); - auto _quantization = _o->quantization ? CreateQuantizationParameters(_fbb, _o->quantization.get(), _rehasher) : 0; - auto _is_variable = _o->is_variable; - auto _sparsity = _o->sparsity ? CreateSparsityParameters(_fbb, _o->sparsity.get(), _rehasher) : 0; - auto _shape_signature = _o->shape_signature.size() ? _fbb.CreateVector(_o->shape_signature) : 0; - auto _has_rank = _o->has_rank; - auto _variant_tensors = _o->variant_tensors.size() ? _fbb.CreateVector> (_o->variant_tensors.size(), [](size_t i, _VectorArgs *__va) { return CreateVariantSubType(*__va->__fbb, __va->__o->variant_tensors[i].get(), __va->__rehasher); }, &_va ) : 0; - return tflite::CreateTensor( - _fbb, - _shape, - _type, - _buffer, - _name, - _quantization, - _is_variable, - _sparsity, - _shape_signature, - _has_rank, - _variant_tensors); -} - -inline Conv2DOptionsT *Conv2DOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new Conv2DOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void Conv2DOptions::UnPackTo(Conv2DOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = padding(); _o->padding = _e; } - { auto _e = stride_w(); _o->stride_w = _e; } - { auto _e = stride_h(); _o->stride_h = _e; } - { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } - { auto _e = dilation_w_factor(); _o->dilation_w_factor = _e; } - { auto _e = dilation_h_factor(); _o->dilation_h_factor = _e; } -} - -inline flatbuffers::Offset Conv2DOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const Conv2DOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateConv2DOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateConv2DOptions(flatbuffers::FlatBufferBuilder &_fbb, const Conv2DOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const Conv2DOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _padding = _o->padding; - auto _stride_w = _o->stride_w; - auto _stride_h = _o->stride_h; - auto _fused_activation_function = _o->fused_activation_function; - auto _dilation_w_factor = _o->dilation_w_factor; - auto _dilation_h_factor = _o->dilation_h_factor; - return tflite::CreateConv2DOptions( - _fbb, - _padding, - _stride_w, - _stride_h, - _fused_activation_function, - _dilation_w_factor, - _dilation_h_factor); -} - -inline Conv3DOptionsT *Conv3DOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new Conv3DOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void Conv3DOptions::UnPackTo(Conv3DOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = padding(); _o->padding = _e; } - { auto _e = stride_d(); _o->stride_d = _e; } - { auto _e = stride_w(); _o->stride_w = _e; } - { auto _e = stride_h(); _o->stride_h = _e; } - { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } - { auto _e = dilation_d_factor(); _o->dilation_d_factor = _e; } - { auto _e = dilation_w_factor(); _o->dilation_w_factor = _e; } - { auto _e = dilation_h_factor(); _o->dilation_h_factor = _e; } -} - -inline flatbuffers::Offset Conv3DOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const Conv3DOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateConv3DOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateConv3DOptions(flatbuffers::FlatBufferBuilder &_fbb, const Conv3DOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const Conv3DOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _padding = _o->padding; - auto _stride_d = _o->stride_d; - auto _stride_w = _o->stride_w; - auto _stride_h = _o->stride_h; - auto _fused_activation_function = _o->fused_activation_function; - auto _dilation_d_factor = _o->dilation_d_factor; - auto _dilation_w_factor = _o->dilation_w_factor; - auto _dilation_h_factor = _o->dilation_h_factor; - return tflite::CreateConv3DOptions( - _fbb, - _padding, - _stride_d, - _stride_w, - _stride_h, - _fused_activation_function, - _dilation_d_factor, - _dilation_w_factor, - _dilation_h_factor); -} - -inline Pool2DOptionsT *Pool2DOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new Pool2DOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void Pool2DOptions::UnPackTo(Pool2DOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = padding(); _o->padding = _e; } - { auto _e = stride_w(); _o->stride_w = _e; } - { auto _e = stride_h(); _o->stride_h = _e; } - { auto _e = filter_width(); _o->filter_width = _e; } - { auto _e = filter_height(); _o->filter_height = _e; } - { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } -} - -inline flatbuffers::Offset Pool2DOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const Pool2DOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreatePool2DOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreatePool2DOptions(flatbuffers::FlatBufferBuilder &_fbb, const Pool2DOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const Pool2DOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _padding = _o->padding; - auto _stride_w = _o->stride_w; - auto _stride_h = _o->stride_h; - auto _filter_width = _o->filter_width; - auto _filter_height = _o->filter_height; - auto _fused_activation_function = _o->fused_activation_function; - return tflite::CreatePool2DOptions( - _fbb, - _padding, - _stride_w, - _stride_h, - _filter_width, - _filter_height, - _fused_activation_function); -} - -inline DepthwiseConv2DOptionsT *DepthwiseConv2DOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new DepthwiseConv2DOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void DepthwiseConv2DOptions::UnPackTo(DepthwiseConv2DOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = padding(); _o->padding = _e; } - { auto _e = stride_w(); _o->stride_w = _e; } - { auto _e = stride_h(); _o->stride_h = _e; } - { auto _e = depth_multiplier(); _o->depth_multiplier = _e; } - { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } - { auto _e = dilation_w_factor(); _o->dilation_w_factor = _e; } - { auto _e = dilation_h_factor(); _o->dilation_h_factor = _e; } -} - -inline flatbuffers::Offset DepthwiseConv2DOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const DepthwiseConv2DOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateDepthwiseConv2DOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateDepthwiseConv2DOptions(flatbuffers::FlatBufferBuilder &_fbb, const DepthwiseConv2DOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const DepthwiseConv2DOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _padding = _o->padding; - auto _stride_w = _o->stride_w; - auto _stride_h = _o->stride_h; - auto _depth_multiplier = _o->depth_multiplier; - auto _fused_activation_function = _o->fused_activation_function; - auto _dilation_w_factor = _o->dilation_w_factor; - auto _dilation_h_factor = _o->dilation_h_factor; - return tflite::CreateDepthwiseConv2DOptions( - _fbb, - _padding, - _stride_w, - _stride_h, - _depth_multiplier, - _fused_activation_function, - _dilation_w_factor, - _dilation_h_factor); -} - -inline ConcatEmbeddingsOptionsT *ConcatEmbeddingsOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new ConcatEmbeddingsOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void ConcatEmbeddingsOptions::UnPackTo(ConcatEmbeddingsOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = num_channels(); _o->num_channels = _e; } - { auto _e = num_columns_per_channel(); if (_e) { _o->num_columns_per_channel.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->num_columns_per_channel[_i] = _e->Get(_i); } } } - { auto _e = embedding_dim_per_channel(); if (_e) { _o->embedding_dim_per_channel.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->embedding_dim_per_channel[_i] = _e->Get(_i); } } } -} - -inline flatbuffers::Offset ConcatEmbeddingsOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ConcatEmbeddingsOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateConcatEmbeddingsOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateConcatEmbeddingsOptions(flatbuffers::FlatBufferBuilder &_fbb, const ConcatEmbeddingsOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ConcatEmbeddingsOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _num_channels = _o->num_channels; - auto _num_columns_per_channel = _o->num_columns_per_channel.size() ? _fbb.CreateVector(_o->num_columns_per_channel) : 0; - auto _embedding_dim_per_channel = _o->embedding_dim_per_channel.size() ? _fbb.CreateVector(_o->embedding_dim_per_channel) : 0; - return tflite::CreateConcatEmbeddingsOptions( - _fbb, - _num_channels, - _num_columns_per_channel, - _embedding_dim_per_channel); -} - -inline LSHProjectionOptionsT *LSHProjectionOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new LSHProjectionOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void LSHProjectionOptions::UnPackTo(LSHProjectionOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = type(); _o->type = _e; } -} - -inline flatbuffers::Offset LSHProjectionOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const LSHProjectionOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateLSHProjectionOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateLSHProjectionOptions(flatbuffers::FlatBufferBuilder &_fbb, const LSHProjectionOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const LSHProjectionOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _type = _o->type; - return tflite::CreateLSHProjectionOptions( - _fbb, - _type); -} - -inline SVDFOptionsT *SVDFOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new SVDFOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void SVDFOptions::UnPackTo(SVDFOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = rank(); _o->rank = _e; } - { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } - { auto _e = asymmetric_quantize_inputs(); _o->asymmetric_quantize_inputs = _e; } -} - -inline flatbuffers::Offset SVDFOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SVDFOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateSVDFOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateSVDFOptions(flatbuffers::FlatBufferBuilder &_fbb, const SVDFOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SVDFOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _rank = _o->rank; - auto _fused_activation_function = _o->fused_activation_function; - auto _asymmetric_quantize_inputs = _o->asymmetric_quantize_inputs; - return tflite::CreateSVDFOptions( - _fbb, - _rank, - _fused_activation_function, - _asymmetric_quantize_inputs); -} - -inline RNNOptionsT *RNNOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new RNNOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void RNNOptions::UnPackTo(RNNOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } - { auto _e = asymmetric_quantize_inputs(); _o->asymmetric_quantize_inputs = _e; } -} - -inline flatbuffers::Offset RNNOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const RNNOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateRNNOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateRNNOptions(flatbuffers::FlatBufferBuilder &_fbb, const RNNOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const RNNOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _fused_activation_function = _o->fused_activation_function; - auto _asymmetric_quantize_inputs = _o->asymmetric_quantize_inputs; - return tflite::CreateRNNOptions( - _fbb, - _fused_activation_function, - _asymmetric_quantize_inputs); -} - -inline SequenceRNNOptionsT *SequenceRNNOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new SequenceRNNOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void SequenceRNNOptions::UnPackTo(SequenceRNNOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = time_major(); _o->time_major = _e; } - { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } - { auto _e = asymmetric_quantize_inputs(); _o->asymmetric_quantize_inputs = _e; } -} - -inline flatbuffers::Offset SequenceRNNOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SequenceRNNOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateSequenceRNNOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateSequenceRNNOptions(flatbuffers::FlatBufferBuilder &_fbb, const SequenceRNNOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SequenceRNNOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _time_major = _o->time_major; - auto _fused_activation_function = _o->fused_activation_function; - auto _asymmetric_quantize_inputs = _o->asymmetric_quantize_inputs; - return tflite::CreateSequenceRNNOptions( - _fbb, - _time_major, - _fused_activation_function, - _asymmetric_quantize_inputs); -} - -inline BidirectionalSequenceRNNOptionsT *BidirectionalSequenceRNNOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new BidirectionalSequenceRNNOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void BidirectionalSequenceRNNOptions::UnPackTo(BidirectionalSequenceRNNOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = time_major(); _o->time_major = _e; } - { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } - { auto _e = merge_outputs(); _o->merge_outputs = _e; } - { auto _e = asymmetric_quantize_inputs(); _o->asymmetric_quantize_inputs = _e; } -} - -inline flatbuffers::Offset BidirectionalSequenceRNNOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const BidirectionalSequenceRNNOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateBidirectionalSequenceRNNOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateBidirectionalSequenceRNNOptions(flatbuffers::FlatBufferBuilder &_fbb, const BidirectionalSequenceRNNOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const BidirectionalSequenceRNNOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _time_major = _o->time_major; - auto _fused_activation_function = _o->fused_activation_function; - auto _merge_outputs = _o->merge_outputs; - auto _asymmetric_quantize_inputs = _o->asymmetric_quantize_inputs; - return tflite::CreateBidirectionalSequenceRNNOptions( - _fbb, - _time_major, - _fused_activation_function, - _merge_outputs, - _asymmetric_quantize_inputs); -} - -inline FullyConnectedOptionsT *FullyConnectedOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new FullyConnectedOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void FullyConnectedOptions::UnPackTo(FullyConnectedOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } - { auto _e = weights_format(); _o->weights_format = _e; } - { auto _e = keep_num_dims(); _o->keep_num_dims = _e; } - { auto _e = asymmetric_quantize_inputs(); _o->asymmetric_quantize_inputs = _e; } -} - -inline flatbuffers::Offset FullyConnectedOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const FullyConnectedOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateFullyConnectedOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateFullyConnectedOptions(flatbuffers::FlatBufferBuilder &_fbb, const FullyConnectedOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const FullyConnectedOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _fused_activation_function = _o->fused_activation_function; - auto _weights_format = _o->weights_format; - auto _keep_num_dims = _o->keep_num_dims; - auto _asymmetric_quantize_inputs = _o->asymmetric_quantize_inputs; - return tflite::CreateFullyConnectedOptions( - _fbb, - _fused_activation_function, - _weights_format, - _keep_num_dims, - _asymmetric_quantize_inputs); -} - -inline SoftmaxOptionsT *SoftmaxOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new SoftmaxOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void SoftmaxOptions::UnPackTo(SoftmaxOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = beta(); _o->beta = _e; } -} - -inline flatbuffers::Offset SoftmaxOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SoftmaxOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateSoftmaxOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateSoftmaxOptions(flatbuffers::FlatBufferBuilder &_fbb, const SoftmaxOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SoftmaxOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _beta = _o->beta; - return tflite::CreateSoftmaxOptions( - _fbb, - _beta); -} - -inline ConcatenationOptionsT *ConcatenationOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new ConcatenationOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void ConcatenationOptions::UnPackTo(ConcatenationOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = axis(); _o->axis = _e; } - { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } -} - -inline flatbuffers::Offset ConcatenationOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ConcatenationOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateConcatenationOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateConcatenationOptions(flatbuffers::FlatBufferBuilder &_fbb, const ConcatenationOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ConcatenationOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _axis = _o->axis; - auto _fused_activation_function = _o->fused_activation_function; - return tflite::CreateConcatenationOptions( - _fbb, - _axis, - _fused_activation_function); -} - -inline AddOptionsT *AddOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new AddOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void AddOptions::UnPackTo(AddOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } - { auto _e = pot_scale_int16(); _o->pot_scale_int16 = _e; } -} - -inline flatbuffers::Offset AddOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const AddOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateAddOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateAddOptions(flatbuffers::FlatBufferBuilder &_fbb, const AddOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const AddOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _fused_activation_function = _o->fused_activation_function; - auto _pot_scale_int16 = _o->pot_scale_int16; - return tflite::CreateAddOptions( - _fbb, - _fused_activation_function, - _pot_scale_int16); -} - -inline MulOptionsT *MulOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new MulOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void MulOptions::UnPackTo(MulOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } -} - -inline flatbuffers::Offset MulOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const MulOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateMulOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateMulOptions(flatbuffers::FlatBufferBuilder &_fbb, const MulOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const MulOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _fused_activation_function = _o->fused_activation_function; - return tflite::CreateMulOptions( - _fbb, - _fused_activation_function); -} - -inline L2NormOptionsT *L2NormOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new L2NormOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void L2NormOptions::UnPackTo(L2NormOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } -} - -inline flatbuffers::Offset L2NormOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const L2NormOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateL2NormOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateL2NormOptions(flatbuffers::FlatBufferBuilder &_fbb, const L2NormOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const L2NormOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _fused_activation_function = _o->fused_activation_function; - return tflite::CreateL2NormOptions( - _fbb, - _fused_activation_function); -} - -inline LocalResponseNormalizationOptionsT *LocalResponseNormalizationOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new LocalResponseNormalizationOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void LocalResponseNormalizationOptions::UnPackTo(LocalResponseNormalizationOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = radius(); _o->radius = _e; } - { auto _e = bias(); _o->bias = _e; } - { auto _e = alpha(); _o->alpha = _e; } - { auto _e = beta(); _o->beta = _e; } -} - -inline flatbuffers::Offset LocalResponseNormalizationOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const LocalResponseNormalizationOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateLocalResponseNormalizationOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateLocalResponseNormalizationOptions(flatbuffers::FlatBufferBuilder &_fbb, const LocalResponseNormalizationOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const LocalResponseNormalizationOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _radius = _o->radius; - auto _bias = _o->bias; - auto _alpha = _o->alpha; - auto _beta = _o->beta; - return tflite::CreateLocalResponseNormalizationOptions( - _fbb, - _radius, - _bias, - _alpha, - _beta); -} - -inline LSTMOptionsT *LSTMOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new LSTMOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void LSTMOptions::UnPackTo(LSTMOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } - { auto _e = cell_clip(); _o->cell_clip = _e; } - { auto _e = proj_clip(); _o->proj_clip = _e; } - { auto _e = kernel_type(); _o->kernel_type = _e; } - { auto _e = asymmetric_quantize_inputs(); _o->asymmetric_quantize_inputs = _e; } -} - -inline flatbuffers::Offset LSTMOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const LSTMOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateLSTMOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateLSTMOptions(flatbuffers::FlatBufferBuilder &_fbb, const LSTMOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const LSTMOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _fused_activation_function = _o->fused_activation_function; - auto _cell_clip = _o->cell_clip; - auto _proj_clip = _o->proj_clip; - auto _kernel_type = _o->kernel_type; - auto _asymmetric_quantize_inputs = _o->asymmetric_quantize_inputs; - return tflite::CreateLSTMOptions( - _fbb, - _fused_activation_function, - _cell_clip, - _proj_clip, - _kernel_type, - _asymmetric_quantize_inputs); -} - -inline UnidirectionalSequenceLSTMOptionsT *UnidirectionalSequenceLSTMOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new UnidirectionalSequenceLSTMOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void UnidirectionalSequenceLSTMOptions::UnPackTo(UnidirectionalSequenceLSTMOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } - { auto _e = cell_clip(); _o->cell_clip = _e; } - { auto _e = proj_clip(); _o->proj_clip = _e; } - { auto _e = time_major(); _o->time_major = _e; } - { auto _e = asymmetric_quantize_inputs(); _o->asymmetric_quantize_inputs = _e; } -} - -inline flatbuffers::Offset UnidirectionalSequenceLSTMOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const UnidirectionalSequenceLSTMOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateUnidirectionalSequenceLSTMOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateUnidirectionalSequenceLSTMOptions(flatbuffers::FlatBufferBuilder &_fbb, const UnidirectionalSequenceLSTMOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const UnidirectionalSequenceLSTMOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _fused_activation_function = _o->fused_activation_function; - auto _cell_clip = _o->cell_clip; - auto _proj_clip = _o->proj_clip; - auto _time_major = _o->time_major; - auto _asymmetric_quantize_inputs = _o->asymmetric_quantize_inputs; - return tflite::CreateUnidirectionalSequenceLSTMOptions( - _fbb, - _fused_activation_function, - _cell_clip, - _proj_clip, - _time_major, - _asymmetric_quantize_inputs); -} - -inline BidirectionalSequenceLSTMOptionsT *BidirectionalSequenceLSTMOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new BidirectionalSequenceLSTMOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void BidirectionalSequenceLSTMOptions::UnPackTo(BidirectionalSequenceLSTMOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } - { auto _e = cell_clip(); _o->cell_clip = _e; } - { auto _e = proj_clip(); _o->proj_clip = _e; } - { auto _e = merge_outputs(); _o->merge_outputs = _e; } - { auto _e = time_major(); _o->time_major = _e; } - { auto _e = asymmetric_quantize_inputs(); _o->asymmetric_quantize_inputs = _e; } -} - -inline flatbuffers::Offset BidirectionalSequenceLSTMOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const BidirectionalSequenceLSTMOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateBidirectionalSequenceLSTMOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateBidirectionalSequenceLSTMOptions(flatbuffers::FlatBufferBuilder &_fbb, const BidirectionalSequenceLSTMOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const BidirectionalSequenceLSTMOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _fused_activation_function = _o->fused_activation_function; - auto _cell_clip = _o->cell_clip; - auto _proj_clip = _o->proj_clip; - auto _merge_outputs = _o->merge_outputs; - auto _time_major = _o->time_major; - auto _asymmetric_quantize_inputs = _o->asymmetric_quantize_inputs; - return tflite::CreateBidirectionalSequenceLSTMOptions( - _fbb, - _fused_activation_function, - _cell_clip, - _proj_clip, - _merge_outputs, - _time_major, - _asymmetric_quantize_inputs); -} - -inline ResizeBilinearOptionsT *ResizeBilinearOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new ResizeBilinearOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void ResizeBilinearOptions::UnPackTo(ResizeBilinearOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = align_corners(); _o->align_corners = _e; } - { auto _e = half_pixel_centers(); _o->half_pixel_centers = _e; } -} - -inline flatbuffers::Offset ResizeBilinearOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ResizeBilinearOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateResizeBilinearOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateResizeBilinearOptions(flatbuffers::FlatBufferBuilder &_fbb, const ResizeBilinearOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ResizeBilinearOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _align_corners = _o->align_corners; - auto _half_pixel_centers = _o->half_pixel_centers; - return tflite::CreateResizeBilinearOptions( - _fbb, - _align_corners, - _half_pixel_centers); -} - -inline ResizeNearestNeighborOptionsT *ResizeNearestNeighborOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new ResizeNearestNeighborOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void ResizeNearestNeighborOptions::UnPackTo(ResizeNearestNeighborOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = align_corners(); _o->align_corners = _e; } - { auto _e = half_pixel_centers(); _o->half_pixel_centers = _e; } -} - -inline flatbuffers::Offset ResizeNearestNeighborOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ResizeNearestNeighborOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateResizeNearestNeighborOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateResizeNearestNeighborOptions(flatbuffers::FlatBufferBuilder &_fbb, const ResizeNearestNeighborOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ResizeNearestNeighborOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _align_corners = _o->align_corners; - auto _half_pixel_centers = _o->half_pixel_centers; - return tflite::CreateResizeNearestNeighborOptions( - _fbb, - _align_corners, - _half_pixel_centers); -} - -inline CallOptionsT *CallOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new CallOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void CallOptions::UnPackTo(CallOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = subgraph(); _o->subgraph = _e; } -} - -inline flatbuffers::Offset CallOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const CallOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateCallOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateCallOptions(flatbuffers::FlatBufferBuilder &_fbb, const CallOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const CallOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _subgraph = _o->subgraph; - return tflite::CreateCallOptions( - _fbb, - _subgraph); -} - -inline PadOptionsT *PadOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new PadOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void PadOptions::UnPackTo(PadOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset PadOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const PadOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreatePadOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreatePadOptions(flatbuffers::FlatBufferBuilder &_fbb, const PadOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const PadOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreatePadOptions( - _fbb); -} - -inline PadV2OptionsT *PadV2Options::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new PadV2OptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void PadV2Options::UnPackTo(PadV2OptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset PadV2Options::Pack(flatbuffers::FlatBufferBuilder &_fbb, const PadV2OptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreatePadV2Options(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreatePadV2Options(flatbuffers::FlatBufferBuilder &_fbb, const PadV2OptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const PadV2OptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreatePadV2Options( - _fbb); -} - -inline ReshapeOptionsT *ReshapeOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new ReshapeOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void ReshapeOptions::UnPackTo(ReshapeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = new_shape(); if (_e) { _o->new_shape.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->new_shape[_i] = _e->Get(_i); } } } -} - -inline flatbuffers::Offset ReshapeOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ReshapeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateReshapeOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateReshapeOptions(flatbuffers::FlatBufferBuilder &_fbb, const ReshapeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ReshapeOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _new_shape = _o->new_shape.size() ? _fbb.CreateVector(_o->new_shape) : 0; - return tflite::CreateReshapeOptions( - _fbb, - _new_shape); -} - -inline SpaceToBatchNDOptionsT *SpaceToBatchNDOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new SpaceToBatchNDOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void SpaceToBatchNDOptions::UnPackTo(SpaceToBatchNDOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset SpaceToBatchNDOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SpaceToBatchNDOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateSpaceToBatchNDOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateSpaceToBatchNDOptions(flatbuffers::FlatBufferBuilder &_fbb, const SpaceToBatchNDOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SpaceToBatchNDOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateSpaceToBatchNDOptions( - _fbb); -} - -inline BatchToSpaceNDOptionsT *BatchToSpaceNDOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new BatchToSpaceNDOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void BatchToSpaceNDOptions::UnPackTo(BatchToSpaceNDOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset BatchToSpaceNDOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const BatchToSpaceNDOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateBatchToSpaceNDOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateBatchToSpaceNDOptions(flatbuffers::FlatBufferBuilder &_fbb, const BatchToSpaceNDOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const BatchToSpaceNDOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateBatchToSpaceNDOptions( - _fbb); -} - -inline SkipGramOptionsT *SkipGramOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new SkipGramOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void SkipGramOptions::UnPackTo(SkipGramOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = ngram_size(); _o->ngram_size = _e; } - { auto _e = max_skip_size(); _o->max_skip_size = _e; } - { auto _e = include_all_ngrams(); _o->include_all_ngrams = _e; } -} - -inline flatbuffers::Offset SkipGramOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SkipGramOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateSkipGramOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateSkipGramOptions(flatbuffers::FlatBufferBuilder &_fbb, const SkipGramOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SkipGramOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _ngram_size = _o->ngram_size; - auto _max_skip_size = _o->max_skip_size; - auto _include_all_ngrams = _o->include_all_ngrams; - return tflite::CreateSkipGramOptions( - _fbb, - _ngram_size, - _max_skip_size, - _include_all_ngrams); -} - -inline SpaceToDepthOptionsT *SpaceToDepthOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new SpaceToDepthOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void SpaceToDepthOptions::UnPackTo(SpaceToDepthOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = block_size(); _o->block_size = _e; } -} - -inline flatbuffers::Offset SpaceToDepthOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SpaceToDepthOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateSpaceToDepthOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateSpaceToDepthOptions(flatbuffers::FlatBufferBuilder &_fbb, const SpaceToDepthOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SpaceToDepthOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _block_size = _o->block_size; - return tflite::CreateSpaceToDepthOptions( - _fbb, - _block_size); -} - -inline DepthToSpaceOptionsT *DepthToSpaceOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new DepthToSpaceOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void DepthToSpaceOptions::UnPackTo(DepthToSpaceOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = block_size(); _o->block_size = _e; } -} - -inline flatbuffers::Offset DepthToSpaceOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const DepthToSpaceOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateDepthToSpaceOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateDepthToSpaceOptions(flatbuffers::FlatBufferBuilder &_fbb, const DepthToSpaceOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const DepthToSpaceOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _block_size = _o->block_size; - return tflite::CreateDepthToSpaceOptions( - _fbb, - _block_size); -} - -inline SubOptionsT *SubOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new SubOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void SubOptions::UnPackTo(SubOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } - { auto _e = pot_scale_int16(); _o->pot_scale_int16 = _e; } -} - -inline flatbuffers::Offset SubOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SubOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateSubOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateSubOptions(flatbuffers::FlatBufferBuilder &_fbb, const SubOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SubOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _fused_activation_function = _o->fused_activation_function; - auto _pot_scale_int16 = _o->pot_scale_int16; - return tflite::CreateSubOptions( - _fbb, - _fused_activation_function, - _pot_scale_int16); -} - -inline DivOptionsT *DivOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new DivOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void DivOptions::UnPackTo(DivOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } -} - -inline flatbuffers::Offset DivOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const DivOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateDivOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateDivOptions(flatbuffers::FlatBufferBuilder &_fbb, const DivOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const DivOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _fused_activation_function = _o->fused_activation_function; - return tflite::CreateDivOptions( - _fbb, - _fused_activation_function); -} - -inline TopKV2OptionsT *TopKV2Options::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new TopKV2OptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void TopKV2Options::UnPackTo(TopKV2OptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset TopKV2Options::Pack(flatbuffers::FlatBufferBuilder &_fbb, const TopKV2OptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateTopKV2Options(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateTopKV2Options(flatbuffers::FlatBufferBuilder &_fbb, const TopKV2OptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const TopKV2OptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateTopKV2Options( - _fbb); -} - -inline EmbeddingLookupSparseOptionsT *EmbeddingLookupSparseOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new EmbeddingLookupSparseOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void EmbeddingLookupSparseOptions::UnPackTo(EmbeddingLookupSparseOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = combiner(); _o->combiner = _e; } -} - -inline flatbuffers::Offset EmbeddingLookupSparseOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const EmbeddingLookupSparseOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateEmbeddingLookupSparseOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateEmbeddingLookupSparseOptions(flatbuffers::FlatBufferBuilder &_fbb, const EmbeddingLookupSparseOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const EmbeddingLookupSparseOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _combiner = _o->combiner; - return tflite::CreateEmbeddingLookupSparseOptions( - _fbb, - _combiner); -} - -inline GatherOptionsT *GatherOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new GatherOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void GatherOptions::UnPackTo(GatherOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = axis(); _o->axis = _e; } - { auto _e = batch_dims(); _o->batch_dims = _e; } -} - -inline flatbuffers::Offset GatherOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const GatherOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateGatherOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateGatherOptions(flatbuffers::FlatBufferBuilder &_fbb, const GatherOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const GatherOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _axis = _o->axis; - auto _batch_dims = _o->batch_dims; - return tflite::CreateGatherOptions( - _fbb, - _axis, - _batch_dims); -} - -inline TransposeOptionsT *TransposeOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new TransposeOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void TransposeOptions::UnPackTo(TransposeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset TransposeOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const TransposeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateTransposeOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateTransposeOptions(flatbuffers::FlatBufferBuilder &_fbb, const TransposeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const TransposeOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateTransposeOptions( - _fbb); -} - -inline ExpOptionsT *ExpOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new ExpOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void ExpOptions::UnPackTo(ExpOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset ExpOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ExpOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateExpOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateExpOptions(flatbuffers::FlatBufferBuilder &_fbb, const ExpOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ExpOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateExpOptions( - _fbb); -} - -inline CosOptionsT *CosOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new CosOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void CosOptions::UnPackTo(CosOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset CosOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const CosOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateCosOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateCosOptions(flatbuffers::FlatBufferBuilder &_fbb, const CosOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const CosOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateCosOptions( - _fbb); -} - -inline ReducerOptionsT *ReducerOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new ReducerOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void ReducerOptions::UnPackTo(ReducerOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = keep_dims(); _o->keep_dims = _e; } -} - -inline flatbuffers::Offset ReducerOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ReducerOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateReducerOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateReducerOptions(flatbuffers::FlatBufferBuilder &_fbb, const ReducerOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ReducerOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _keep_dims = _o->keep_dims; - return tflite::CreateReducerOptions( - _fbb, - _keep_dims); -} - -inline SqueezeOptionsT *SqueezeOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new SqueezeOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void SqueezeOptions::UnPackTo(SqueezeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = squeeze_dims(); if (_e) { _o->squeeze_dims.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->squeeze_dims[_i] = _e->Get(_i); } } } -} - -inline flatbuffers::Offset SqueezeOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SqueezeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateSqueezeOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateSqueezeOptions(flatbuffers::FlatBufferBuilder &_fbb, const SqueezeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SqueezeOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _squeeze_dims = _o->squeeze_dims.size() ? _fbb.CreateVector(_o->squeeze_dims) : 0; - return tflite::CreateSqueezeOptions( - _fbb, - _squeeze_dims); -} - -inline SplitOptionsT *SplitOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new SplitOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void SplitOptions::UnPackTo(SplitOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = num_splits(); _o->num_splits = _e; } -} - -inline flatbuffers::Offset SplitOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SplitOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateSplitOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateSplitOptions(flatbuffers::FlatBufferBuilder &_fbb, const SplitOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SplitOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _num_splits = _o->num_splits; - return tflite::CreateSplitOptions( - _fbb, - _num_splits); -} - -inline SplitVOptionsT *SplitVOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new SplitVOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void SplitVOptions::UnPackTo(SplitVOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = num_splits(); _o->num_splits = _e; } -} - -inline flatbuffers::Offset SplitVOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SplitVOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateSplitVOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateSplitVOptions(flatbuffers::FlatBufferBuilder &_fbb, const SplitVOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SplitVOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _num_splits = _o->num_splits; - return tflite::CreateSplitVOptions( - _fbb, - _num_splits); -} - -inline StridedSliceOptionsT *StridedSliceOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new StridedSliceOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void StridedSliceOptions::UnPackTo(StridedSliceOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = begin_mask(); _o->begin_mask = _e; } - { auto _e = end_mask(); _o->end_mask = _e; } - { auto _e = ellipsis_mask(); _o->ellipsis_mask = _e; } - { auto _e = new_axis_mask(); _o->new_axis_mask = _e; } - { auto _e = shrink_axis_mask(); _o->shrink_axis_mask = _e; } -} - -inline flatbuffers::Offset StridedSliceOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const StridedSliceOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateStridedSliceOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateStridedSliceOptions(flatbuffers::FlatBufferBuilder &_fbb, const StridedSliceOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const StridedSliceOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _begin_mask = _o->begin_mask; - auto _end_mask = _o->end_mask; - auto _ellipsis_mask = _o->ellipsis_mask; - auto _new_axis_mask = _o->new_axis_mask; - auto _shrink_axis_mask = _o->shrink_axis_mask; - return tflite::CreateStridedSliceOptions( - _fbb, - _begin_mask, - _end_mask, - _ellipsis_mask, - _new_axis_mask, - _shrink_axis_mask); -} - -inline LogSoftmaxOptionsT *LogSoftmaxOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new LogSoftmaxOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void LogSoftmaxOptions::UnPackTo(LogSoftmaxOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset LogSoftmaxOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const LogSoftmaxOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateLogSoftmaxOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateLogSoftmaxOptions(flatbuffers::FlatBufferBuilder &_fbb, const LogSoftmaxOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const LogSoftmaxOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateLogSoftmaxOptions( - _fbb); -} - -inline CastOptionsT *CastOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new CastOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void CastOptions::UnPackTo(CastOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = in_data_type(); _o->in_data_type = _e; } - { auto _e = out_data_type(); _o->out_data_type = _e; } -} - -inline flatbuffers::Offset CastOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const CastOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateCastOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateCastOptions(flatbuffers::FlatBufferBuilder &_fbb, const CastOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const CastOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _in_data_type = _o->in_data_type; - auto _out_data_type = _o->out_data_type; - return tflite::CreateCastOptions( - _fbb, - _in_data_type, - _out_data_type); -} - -inline DequantizeOptionsT *DequantizeOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new DequantizeOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void DequantizeOptions::UnPackTo(DequantizeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset DequantizeOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const DequantizeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateDequantizeOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateDequantizeOptions(flatbuffers::FlatBufferBuilder &_fbb, const DequantizeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const DequantizeOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateDequantizeOptions( - _fbb); -} - -inline MaximumMinimumOptionsT *MaximumMinimumOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new MaximumMinimumOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void MaximumMinimumOptions::UnPackTo(MaximumMinimumOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset MaximumMinimumOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const MaximumMinimumOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateMaximumMinimumOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateMaximumMinimumOptions(flatbuffers::FlatBufferBuilder &_fbb, const MaximumMinimumOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const MaximumMinimumOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateMaximumMinimumOptions( - _fbb); -} - -inline TileOptionsT *TileOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new TileOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void TileOptions::UnPackTo(TileOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset TileOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const TileOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateTileOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateTileOptions(flatbuffers::FlatBufferBuilder &_fbb, const TileOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const TileOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateTileOptions( - _fbb); -} - -inline ArgMaxOptionsT *ArgMaxOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new ArgMaxOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void ArgMaxOptions::UnPackTo(ArgMaxOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = output_type(); _o->output_type = _e; } -} - -inline flatbuffers::Offset ArgMaxOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ArgMaxOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateArgMaxOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateArgMaxOptions(flatbuffers::FlatBufferBuilder &_fbb, const ArgMaxOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ArgMaxOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _output_type = _o->output_type; - return tflite::CreateArgMaxOptions( - _fbb, - _output_type); -} - -inline ArgMinOptionsT *ArgMinOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new ArgMinOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void ArgMinOptions::UnPackTo(ArgMinOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = output_type(); _o->output_type = _e; } -} - -inline flatbuffers::Offset ArgMinOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ArgMinOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateArgMinOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateArgMinOptions(flatbuffers::FlatBufferBuilder &_fbb, const ArgMinOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ArgMinOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _output_type = _o->output_type; - return tflite::CreateArgMinOptions( - _fbb, - _output_type); -} - -inline GreaterOptionsT *GreaterOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new GreaterOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void GreaterOptions::UnPackTo(GreaterOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset GreaterOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const GreaterOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateGreaterOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateGreaterOptions(flatbuffers::FlatBufferBuilder &_fbb, const GreaterOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const GreaterOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateGreaterOptions( - _fbb); -} - -inline GreaterEqualOptionsT *GreaterEqualOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new GreaterEqualOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void GreaterEqualOptions::UnPackTo(GreaterEqualOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset GreaterEqualOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const GreaterEqualOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateGreaterEqualOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateGreaterEqualOptions(flatbuffers::FlatBufferBuilder &_fbb, const GreaterEqualOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const GreaterEqualOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateGreaterEqualOptions( - _fbb); -} - -inline LessOptionsT *LessOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new LessOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void LessOptions::UnPackTo(LessOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset LessOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const LessOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateLessOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateLessOptions(flatbuffers::FlatBufferBuilder &_fbb, const LessOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const LessOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateLessOptions( - _fbb); -} - -inline LessEqualOptionsT *LessEqualOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new LessEqualOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void LessEqualOptions::UnPackTo(LessEqualOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset LessEqualOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const LessEqualOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateLessEqualOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateLessEqualOptions(flatbuffers::FlatBufferBuilder &_fbb, const LessEqualOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const LessEqualOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateLessEqualOptions( - _fbb); -} - -inline NegOptionsT *NegOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new NegOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void NegOptions::UnPackTo(NegOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset NegOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const NegOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateNegOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateNegOptions(flatbuffers::FlatBufferBuilder &_fbb, const NegOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const NegOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateNegOptions( - _fbb); -} - -inline SelectOptionsT *SelectOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new SelectOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void SelectOptions::UnPackTo(SelectOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset SelectOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SelectOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateSelectOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateSelectOptions(flatbuffers::FlatBufferBuilder &_fbb, const SelectOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SelectOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateSelectOptions( - _fbb); -} - -inline SliceOptionsT *SliceOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new SliceOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void SliceOptions::UnPackTo(SliceOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset SliceOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SliceOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateSliceOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateSliceOptions(flatbuffers::FlatBufferBuilder &_fbb, const SliceOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SliceOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateSliceOptions( - _fbb); -} - -inline TransposeConvOptionsT *TransposeConvOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new TransposeConvOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void TransposeConvOptions::UnPackTo(TransposeConvOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = padding(); _o->padding = _e; } - { auto _e = stride_w(); _o->stride_w = _e; } - { auto _e = stride_h(); _o->stride_h = _e; } -} - -inline flatbuffers::Offset TransposeConvOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const TransposeConvOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateTransposeConvOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateTransposeConvOptions(flatbuffers::FlatBufferBuilder &_fbb, const TransposeConvOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const TransposeConvOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _padding = _o->padding; - auto _stride_w = _o->stride_w; - auto _stride_h = _o->stride_h; - return tflite::CreateTransposeConvOptions( - _fbb, - _padding, - _stride_w, - _stride_h); -} - -inline ExpandDimsOptionsT *ExpandDimsOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new ExpandDimsOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void ExpandDimsOptions::UnPackTo(ExpandDimsOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset ExpandDimsOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ExpandDimsOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateExpandDimsOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateExpandDimsOptions(flatbuffers::FlatBufferBuilder &_fbb, const ExpandDimsOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ExpandDimsOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateExpandDimsOptions( - _fbb); -} - -inline SparseToDenseOptionsT *SparseToDenseOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new SparseToDenseOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void SparseToDenseOptions::UnPackTo(SparseToDenseOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = validate_indices(); _o->validate_indices = _e; } -} - -inline flatbuffers::Offset SparseToDenseOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SparseToDenseOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateSparseToDenseOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateSparseToDenseOptions(flatbuffers::FlatBufferBuilder &_fbb, const SparseToDenseOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SparseToDenseOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _validate_indices = _o->validate_indices; - return tflite::CreateSparseToDenseOptions( - _fbb, - _validate_indices); -} - -inline EqualOptionsT *EqualOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new EqualOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void EqualOptions::UnPackTo(EqualOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset EqualOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const EqualOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateEqualOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateEqualOptions(flatbuffers::FlatBufferBuilder &_fbb, const EqualOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const EqualOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateEqualOptions( - _fbb); -} - -inline NotEqualOptionsT *NotEqualOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new NotEqualOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void NotEqualOptions::UnPackTo(NotEqualOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset NotEqualOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const NotEqualOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateNotEqualOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateNotEqualOptions(flatbuffers::FlatBufferBuilder &_fbb, const NotEqualOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const NotEqualOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateNotEqualOptions( - _fbb); -} - -inline ShapeOptionsT *ShapeOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new ShapeOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void ShapeOptions::UnPackTo(ShapeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = out_type(); _o->out_type = _e; } -} - -inline flatbuffers::Offset ShapeOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ShapeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateShapeOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateShapeOptions(flatbuffers::FlatBufferBuilder &_fbb, const ShapeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ShapeOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _out_type = _o->out_type; - return tflite::CreateShapeOptions( - _fbb, - _out_type); -} - -inline RankOptionsT *RankOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new RankOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void RankOptions::UnPackTo(RankOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset RankOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const RankOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateRankOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateRankOptions(flatbuffers::FlatBufferBuilder &_fbb, const RankOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const RankOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateRankOptions( - _fbb); -} - -inline PowOptionsT *PowOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new PowOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void PowOptions::UnPackTo(PowOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset PowOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const PowOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreatePowOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreatePowOptions(flatbuffers::FlatBufferBuilder &_fbb, const PowOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const PowOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreatePowOptions( - _fbb); -} - -inline FakeQuantOptionsT *FakeQuantOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new FakeQuantOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void FakeQuantOptions::UnPackTo(FakeQuantOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = min(); _o->min = _e; } - { auto _e = max(); _o->max = _e; } - { auto _e = num_bits(); _o->num_bits = _e; } - { auto _e = narrow_range(); _o->narrow_range = _e; } -} - -inline flatbuffers::Offset FakeQuantOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const FakeQuantOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateFakeQuantOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateFakeQuantOptions(flatbuffers::FlatBufferBuilder &_fbb, const FakeQuantOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const FakeQuantOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _min = _o->min; - auto _max = _o->max; - auto _num_bits = _o->num_bits; - auto _narrow_range = _o->narrow_range; - return tflite::CreateFakeQuantOptions( - _fbb, - _min, - _max, - _num_bits, - _narrow_range); -} - -inline PackOptionsT *PackOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new PackOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void PackOptions::UnPackTo(PackOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = values_count(); _o->values_count = _e; } - { auto _e = axis(); _o->axis = _e; } -} - -inline flatbuffers::Offset PackOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const PackOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreatePackOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreatePackOptions(flatbuffers::FlatBufferBuilder &_fbb, const PackOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const PackOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _values_count = _o->values_count; - auto _axis = _o->axis; - return tflite::CreatePackOptions( - _fbb, - _values_count, - _axis); -} - -inline LogicalOrOptionsT *LogicalOrOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new LogicalOrOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void LogicalOrOptions::UnPackTo(LogicalOrOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset LogicalOrOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const LogicalOrOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateLogicalOrOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateLogicalOrOptions(flatbuffers::FlatBufferBuilder &_fbb, const LogicalOrOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const LogicalOrOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateLogicalOrOptions( - _fbb); -} - -inline OneHotOptionsT *OneHotOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new OneHotOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void OneHotOptions::UnPackTo(OneHotOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = axis(); _o->axis = _e; } -} - -inline flatbuffers::Offset OneHotOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const OneHotOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateOneHotOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateOneHotOptions(flatbuffers::FlatBufferBuilder &_fbb, const OneHotOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const OneHotOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _axis = _o->axis; - return tflite::CreateOneHotOptions( - _fbb, - _axis); -} - -inline AbsOptionsT *AbsOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new AbsOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void AbsOptions::UnPackTo(AbsOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset AbsOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const AbsOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateAbsOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateAbsOptions(flatbuffers::FlatBufferBuilder &_fbb, const AbsOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const AbsOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateAbsOptions( - _fbb); -} - -inline HardSwishOptionsT *HardSwishOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new HardSwishOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void HardSwishOptions::UnPackTo(HardSwishOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset HardSwishOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const HardSwishOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateHardSwishOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateHardSwishOptions(flatbuffers::FlatBufferBuilder &_fbb, const HardSwishOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const HardSwishOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateHardSwishOptions( - _fbb); -} - -inline LogicalAndOptionsT *LogicalAndOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new LogicalAndOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void LogicalAndOptions::UnPackTo(LogicalAndOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset LogicalAndOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const LogicalAndOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateLogicalAndOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateLogicalAndOptions(flatbuffers::FlatBufferBuilder &_fbb, const LogicalAndOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const LogicalAndOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateLogicalAndOptions( - _fbb); -} - -inline LogicalNotOptionsT *LogicalNotOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new LogicalNotOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void LogicalNotOptions::UnPackTo(LogicalNotOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset LogicalNotOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const LogicalNotOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateLogicalNotOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateLogicalNotOptions(flatbuffers::FlatBufferBuilder &_fbb, const LogicalNotOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const LogicalNotOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateLogicalNotOptions( - _fbb); -} - -inline UnpackOptionsT *UnpackOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new UnpackOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void UnpackOptions::UnPackTo(UnpackOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = num(); _o->num = _e; } - { auto _e = axis(); _o->axis = _e; } -} - -inline flatbuffers::Offset UnpackOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const UnpackOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateUnpackOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateUnpackOptions(flatbuffers::FlatBufferBuilder &_fbb, const UnpackOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const UnpackOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _num = _o->num; - auto _axis = _o->axis; - return tflite::CreateUnpackOptions( - _fbb, - _num, - _axis); -} - -inline FloorDivOptionsT *FloorDivOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new FloorDivOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void FloorDivOptions::UnPackTo(FloorDivOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset FloorDivOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const FloorDivOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateFloorDivOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateFloorDivOptions(flatbuffers::FlatBufferBuilder &_fbb, const FloorDivOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const FloorDivOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateFloorDivOptions( - _fbb); -} - -inline SquareOptionsT *SquareOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new SquareOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void SquareOptions::UnPackTo(SquareOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset SquareOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SquareOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateSquareOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateSquareOptions(flatbuffers::FlatBufferBuilder &_fbb, const SquareOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SquareOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateSquareOptions( - _fbb); -} - -inline ZerosLikeOptionsT *ZerosLikeOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new ZerosLikeOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void ZerosLikeOptions::UnPackTo(ZerosLikeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset ZerosLikeOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ZerosLikeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateZerosLikeOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateZerosLikeOptions(flatbuffers::FlatBufferBuilder &_fbb, const ZerosLikeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ZerosLikeOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateZerosLikeOptions( - _fbb); -} - -inline FillOptionsT *FillOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new FillOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void FillOptions::UnPackTo(FillOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset FillOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const FillOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateFillOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateFillOptions(flatbuffers::FlatBufferBuilder &_fbb, const FillOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const FillOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateFillOptions( - _fbb); -} - -inline FloorModOptionsT *FloorModOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new FloorModOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void FloorModOptions::UnPackTo(FloorModOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset FloorModOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const FloorModOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateFloorModOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateFloorModOptions(flatbuffers::FlatBufferBuilder &_fbb, const FloorModOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const FloorModOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateFloorModOptions( - _fbb); -} - -inline RangeOptionsT *RangeOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new RangeOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void RangeOptions::UnPackTo(RangeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset RangeOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const RangeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateRangeOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateRangeOptions(flatbuffers::FlatBufferBuilder &_fbb, const RangeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const RangeOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateRangeOptions( - _fbb); -} - -inline LeakyReluOptionsT *LeakyReluOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new LeakyReluOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void LeakyReluOptions::UnPackTo(LeakyReluOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = alpha(); _o->alpha = _e; } -} - -inline flatbuffers::Offset LeakyReluOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const LeakyReluOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateLeakyReluOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateLeakyReluOptions(flatbuffers::FlatBufferBuilder &_fbb, const LeakyReluOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const LeakyReluOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _alpha = _o->alpha; - return tflite::CreateLeakyReluOptions( - _fbb, - _alpha); -} - -inline SquaredDifferenceOptionsT *SquaredDifferenceOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new SquaredDifferenceOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void SquaredDifferenceOptions::UnPackTo(SquaredDifferenceOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset SquaredDifferenceOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SquaredDifferenceOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateSquaredDifferenceOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateSquaredDifferenceOptions(flatbuffers::FlatBufferBuilder &_fbb, const SquaredDifferenceOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SquaredDifferenceOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateSquaredDifferenceOptions( - _fbb); -} - -inline MirrorPadOptionsT *MirrorPadOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new MirrorPadOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void MirrorPadOptions::UnPackTo(MirrorPadOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = mode(); _o->mode = _e; } -} - -inline flatbuffers::Offset MirrorPadOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const MirrorPadOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateMirrorPadOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateMirrorPadOptions(flatbuffers::FlatBufferBuilder &_fbb, const MirrorPadOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const MirrorPadOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _mode = _o->mode; - return tflite::CreateMirrorPadOptions( - _fbb, - _mode); -} - -inline UniqueOptionsT *UniqueOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new UniqueOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void UniqueOptions::UnPackTo(UniqueOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = idx_out_type(); _o->idx_out_type = _e; } -} - -inline flatbuffers::Offset UniqueOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const UniqueOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateUniqueOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateUniqueOptions(flatbuffers::FlatBufferBuilder &_fbb, const UniqueOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const UniqueOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _idx_out_type = _o->idx_out_type; - return tflite::CreateUniqueOptions( - _fbb, - _idx_out_type); -} - -inline ReverseV2OptionsT *ReverseV2Options::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new ReverseV2OptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void ReverseV2Options::UnPackTo(ReverseV2OptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset ReverseV2Options::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ReverseV2OptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateReverseV2Options(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateReverseV2Options(flatbuffers::FlatBufferBuilder &_fbb, const ReverseV2OptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ReverseV2OptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateReverseV2Options( - _fbb); -} - -inline AddNOptionsT *AddNOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new AddNOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void AddNOptions::UnPackTo(AddNOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset AddNOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const AddNOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateAddNOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateAddNOptions(flatbuffers::FlatBufferBuilder &_fbb, const AddNOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const AddNOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateAddNOptions( - _fbb); -} - -inline GatherNdOptionsT *GatherNdOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new GatherNdOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void GatherNdOptions::UnPackTo(GatherNdOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset GatherNdOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const GatherNdOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateGatherNdOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateGatherNdOptions(flatbuffers::FlatBufferBuilder &_fbb, const GatherNdOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const GatherNdOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateGatherNdOptions( - _fbb); -} - -inline WhereOptionsT *WhereOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new WhereOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void WhereOptions::UnPackTo(WhereOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset WhereOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const WhereOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateWhereOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateWhereOptions(flatbuffers::FlatBufferBuilder &_fbb, const WhereOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const WhereOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateWhereOptions( - _fbb); -} - -inline ReverseSequenceOptionsT *ReverseSequenceOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new ReverseSequenceOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void ReverseSequenceOptions::UnPackTo(ReverseSequenceOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = seq_dim(); _o->seq_dim = _e; } - { auto _e = batch_dim(); _o->batch_dim = _e; } -} - -inline flatbuffers::Offset ReverseSequenceOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ReverseSequenceOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateReverseSequenceOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateReverseSequenceOptions(flatbuffers::FlatBufferBuilder &_fbb, const ReverseSequenceOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ReverseSequenceOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _seq_dim = _o->seq_dim; - auto _batch_dim = _o->batch_dim; - return tflite::CreateReverseSequenceOptions( - _fbb, - _seq_dim, - _batch_dim); -} - -inline MatrixDiagOptionsT *MatrixDiagOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new MatrixDiagOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void MatrixDiagOptions::UnPackTo(MatrixDiagOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset MatrixDiagOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const MatrixDiagOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateMatrixDiagOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateMatrixDiagOptions(flatbuffers::FlatBufferBuilder &_fbb, const MatrixDiagOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const MatrixDiagOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateMatrixDiagOptions( - _fbb); -} - -inline QuantizeOptionsT *QuantizeOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new QuantizeOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void QuantizeOptions::UnPackTo(QuantizeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset QuantizeOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateQuantizeOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateQuantizeOptions(flatbuffers::FlatBufferBuilder &_fbb, const QuantizeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const QuantizeOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateQuantizeOptions( - _fbb); -} - -inline MatrixSetDiagOptionsT *MatrixSetDiagOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new MatrixSetDiagOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void MatrixSetDiagOptions::UnPackTo(MatrixSetDiagOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset MatrixSetDiagOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const MatrixSetDiagOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateMatrixSetDiagOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateMatrixSetDiagOptions(flatbuffers::FlatBufferBuilder &_fbb, const MatrixSetDiagOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const MatrixSetDiagOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateMatrixSetDiagOptions( - _fbb); -} - -inline IfOptionsT *IfOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new IfOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void IfOptions::UnPackTo(IfOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = then_subgraph_index(); _o->then_subgraph_index = _e; } - { auto _e = else_subgraph_index(); _o->else_subgraph_index = _e; } -} - -inline flatbuffers::Offset IfOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const IfOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateIfOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateIfOptions(flatbuffers::FlatBufferBuilder &_fbb, const IfOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const IfOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _then_subgraph_index = _o->then_subgraph_index; - auto _else_subgraph_index = _o->else_subgraph_index; - return tflite::CreateIfOptions( - _fbb, - _then_subgraph_index, - _else_subgraph_index); -} - -inline CallOnceOptionsT *CallOnceOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new CallOnceOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void CallOnceOptions::UnPackTo(CallOnceOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = init_subgraph_index(); _o->init_subgraph_index = _e; } -} - -inline flatbuffers::Offset CallOnceOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const CallOnceOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateCallOnceOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateCallOnceOptions(flatbuffers::FlatBufferBuilder &_fbb, const CallOnceOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const CallOnceOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _init_subgraph_index = _o->init_subgraph_index; - return tflite::CreateCallOnceOptions( - _fbb, - _init_subgraph_index); -} - -inline WhileOptionsT *WhileOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new WhileOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void WhileOptions::UnPackTo(WhileOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = cond_subgraph_index(); _o->cond_subgraph_index = _e; } - { auto _e = body_subgraph_index(); _o->body_subgraph_index = _e; } -} - -inline flatbuffers::Offset WhileOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const WhileOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateWhileOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateWhileOptions(flatbuffers::FlatBufferBuilder &_fbb, const WhileOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const WhileOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _cond_subgraph_index = _o->cond_subgraph_index; - auto _body_subgraph_index = _o->body_subgraph_index; - return tflite::CreateWhileOptions( - _fbb, - _cond_subgraph_index, - _body_subgraph_index); -} - -inline NonMaxSuppressionV4OptionsT *NonMaxSuppressionV4Options::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new NonMaxSuppressionV4OptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void NonMaxSuppressionV4Options::UnPackTo(NonMaxSuppressionV4OptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset NonMaxSuppressionV4Options::Pack(flatbuffers::FlatBufferBuilder &_fbb, const NonMaxSuppressionV4OptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateNonMaxSuppressionV4Options(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateNonMaxSuppressionV4Options(flatbuffers::FlatBufferBuilder &_fbb, const NonMaxSuppressionV4OptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const NonMaxSuppressionV4OptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateNonMaxSuppressionV4Options( - _fbb); -} - -inline NonMaxSuppressionV5OptionsT *NonMaxSuppressionV5Options::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new NonMaxSuppressionV5OptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void NonMaxSuppressionV5Options::UnPackTo(NonMaxSuppressionV5OptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset NonMaxSuppressionV5Options::Pack(flatbuffers::FlatBufferBuilder &_fbb, const NonMaxSuppressionV5OptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateNonMaxSuppressionV5Options(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateNonMaxSuppressionV5Options(flatbuffers::FlatBufferBuilder &_fbb, const NonMaxSuppressionV5OptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const NonMaxSuppressionV5OptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateNonMaxSuppressionV5Options( - _fbb); -} - -inline ScatterNdOptionsT *ScatterNdOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new ScatterNdOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void ScatterNdOptions::UnPackTo(ScatterNdOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset ScatterNdOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ScatterNdOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateScatterNdOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateScatterNdOptions(flatbuffers::FlatBufferBuilder &_fbb, const ScatterNdOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ScatterNdOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateScatterNdOptions( - _fbb); -} - -inline SelectV2OptionsT *SelectV2Options::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new SelectV2OptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void SelectV2Options::UnPackTo(SelectV2OptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset SelectV2Options::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SelectV2OptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateSelectV2Options(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateSelectV2Options(flatbuffers::FlatBufferBuilder &_fbb, const SelectV2OptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SelectV2OptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateSelectV2Options( - _fbb); -} - -inline DensifyOptionsT *DensifyOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new DensifyOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void DensifyOptions::UnPackTo(DensifyOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset DensifyOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const DensifyOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateDensifyOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateDensifyOptions(flatbuffers::FlatBufferBuilder &_fbb, const DensifyOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const DensifyOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateDensifyOptions( - _fbb); -} - -inline SegmentSumOptionsT *SegmentSumOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new SegmentSumOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void SegmentSumOptions::UnPackTo(SegmentSumOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset SegmentSumOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SegmentSumOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateSegmentSumOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateSegmentSumOptions(flatbuffers::FlatBufferBuilder &_fbb, const SegmentSumOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SegmentSumOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateSegmentSumOptions( - _fbb); -} - -inline BatchMatMulOptionsT *BatchMatMulOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new BatchMatMulOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void BatchMatMulOptions::UnPackTo(BatchMatMulOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = adj_x(); _o->adj_x = _e; } - { auto _e = adj_y(); _o->adj_y = _e; } - { auto _e = asymmetric_quantize_inputs(); _o->asymmetric_quantize_inputs = _e; } -} - -inline flatbuffers::Offset BatchMatMulOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const BatchMatMulOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateBatchMatMulOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateBatchMatMulOptions(flatbuffers::FlatBufferBuilder &_fbb, const BatchMatMulOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const BatchMatMulOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _adj_x = _o->adj_x; - auto _adj_y = _o->adj_y; - auto _asymmetric_quantize_inputs = _o->asymmetric_quantize_inputs; - return tflite::CreateBatchMatMulOptions( - _fbb, - _adj_x, - _adj_y, - _asymmetric_quantize_inputs); -} - -inline CumsumOptionsT *CumsumOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new CumsumOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void CumsumOptions::UnPackTo(CumsumOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = exclusive(); _o->exclusive = _e; } - { auto _e = reverse(); _o->reverse = _e; } -} - -inline flatbuffers::Offset CumsumOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const CumsumOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateCumsumOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateCumsumOptions(flatbuffers::FlatBufferBuilder &_fbb, const CumsumOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const CumsumOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _exclusive = _o->exclusive; - auto _reverse = _o->reverse; - return tflite::CreateCumsumOptions( - _fbb, - _exclusive, - _reverse); -} - -inline BroadcastToOptionsT *BroadcastToOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new BroadcastToOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void BroadcastToOptions::UnPackTo(BroadcastToOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset BroadcastToOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const BroadcastToOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateBroadcastToOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateBroadcastToOptions(flatbuffers::FlatBufferBuilder &_fbb, const BroadcastToOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const BroadcastToOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateBroadcastToOptions( - _fbb); -} - -inline Rfft2dOptionsT *Rfft2dOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new Rfft2dOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void Rfft2dOptions::UnPackTo(Rfft2dOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset Rfft2dOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const Rfft2dOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateRfft2dOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateRfft2dOptions(flatbuffers::FlatBufferBuilder &_fbb, const Rfft2dOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const Rfft2dOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateRfft2dOptions( - _fbb); -} - -inline HashtableOptionsT *HashtableOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new HashtableOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void HashtableOptions::UnPackTo(HashtableOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = table_id(); _o->table_id = _e; } - { auto _e = key_dtype(); _o->key_dtype = _e; } - { auto _e = value_dtype(); _o->value_dtype = _e; } -} - -inline flatbuffers::Offset HashtableOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const HashtableOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateHashtableOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateHashtableOptions(flatbuffers::FlatBufferBuilder &_fbb, const HashtableOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const HashtableOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _table_id = _o->table_id; - auto _key_dtype = _o->key_dtype; - auto _value_dtype = _o->value_dtype; - return tflite::CreateHashtableOptions( - _fbb, - _table_id, - _key_dtype, - _value_dtype); -} - -inline HashtableFindOptionsT *HashtableFindOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new HashtableFindOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void HashtableFindOptions::UnPackTo(HashtableFindOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset HashtableFindOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const HashtableFindOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateHashtableFindOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateHashtableFindOptions(flatbuffers::FlatBufferBuilder &_fbb, const HashtableFindOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const HashtableFindOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateHashtableFindOptions( - _fbb); -} - -inline HashtableImportOptionsT *HashtableImportOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new HashtableImportOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void HashtableImportOptions::UnPackTo(HashtableImportOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset HashtableImportOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const HashtableImportOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateHashtableImportOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateHashtableImportOptions(flatbuffers::FlatBufferBuilder &_fbb, const HashtableImportOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const HashtableImportOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateHashtableImportOptions( - _fbb); -} - -inline HashtableSizeOptionsT *HashtableSizeOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new HashtableSizeOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void HashtableSizeOptions::UnPackTo(HashtableSizeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset HashtableSizeOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const HashtableSizeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateHashtableSizeOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateHashtableSizeOptions(flatbuffers::FlatBufferBuilder &_fbb, const HashtableSizeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const HashtableSizeOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateHashtableSizeOptions( - _fbb); -} - -inline VarHandleOptionsT *VarHandleOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new VarHandleOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void VarHandleOptions::UnPackTo(VarHandleOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = container(); if (_e) _o->container = _e->str(); } - { auto _e = shared_name(); if (_e) _o->shared_name = _e->str(); } -} - -inline flatbuffers::Offset VarHandleOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const VarHandleOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateVarHandleOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateVarHandleOptions(flatbuffers::FlatBufferBuilder &_fbb, const VarHandleOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const VarHandleOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _container = _o->container.empty() ? 0 : _fbb.CreateString(_o->container); - auto _shared_name = _o->shared_name.empty() ? 0 : _fbb.CreateString(_o->shared_name); - return tflite::CreateVarHandleOptions( - _fbb, - _container, - _shared_name); -} - -inline ReadVariableOptionsT *ReadVariableOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new ReadVariableOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void ReadVariableOptions::UnPackTo(ReadVariableOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset ReadVariableOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ReadVariableOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateReadVariableOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateReadVariableOptions(flatbuffers::FlatBufferBuilder &_fbb, const ReadVariableOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ReadVariableOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateReadVariableOptions( - _fbb); -} - -inline AssignVariableOptionsT *AssignVariableOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new AssignVariableOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void AssignVariableOptions::UnPackTo(AssignVariableOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset AssignVariableOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const AssignVariableOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateAssignVariableOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateAssignVariableOptions(flatbuffers::FlatBufferBuilder &_fbb, const AssignVariableOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const AssignVariableOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateAssignVariableOptions( - _fbb); -} - -inline RandomOptionsT *RandomOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new RandomOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void RandomOptions::UnPackTo(RandomOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = seed(); _o->seed = _e; } - { auto _e = seed2(); _o->seed2 = _e; } -} - -inline flatbuffers::Offset RandomOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const RandomOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateRandomOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateRandomOptions(flatbuffers::FlatBufferBuilder &_fbb, const RandomOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const RandomOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _seed = _o->seed; - auto _seed2 = _o->seed2; - return tflite::CreateRandomOptions( - _fbb, - _seed, - _seed2); -} - -inline BucketizeOptionsT *BucketizeOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new BucketizeOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void BucketizeOptions::UnPackTo(BucketizeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = boundaries(); if (_e) { _o->boundaries.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->boundaries[_i] = _e->Get(_i); } } } -} - -inline flatbuffers::Offset BucketizeOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const BucketizeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateBucketizeOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateBucketizeOptions(flatbuffers::FlatBufferBuilder &_fbb, const BucketizeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const BucketizeOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _boundaries = _o->boundaries.size() ? _fbb.CreateVector(_o->boundaries) : 0; - return tflite::CreateBucketizeOptions( - _fbb, - _boundaries); -} - -inline GeluOptionsT *GeluOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new GeluOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void GeluOptions::UnPackTo(GeluOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = approximate(); _o->approximate = _e; } -} - -inline flatbuffers::Offset GeluOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const GeluOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateGeluOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateGeluOptions(flatbuffers::FlatBufferBuilder &_fbb, const GeluOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const GeluOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _approximate = _o->approximate; - return tflite::CreateGeluOptions( - _fbb, - _approximate); -} - -inline DynamicUpdateSliceOptionsT *DynamicUpdateSliceOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new DynamicUpdateSliceOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void DynamicUpdateSliceOptions::UnPackTo(DynamicUpdateSliceOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset DynamicUpdateSliceOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const DynamicUpdateSliceOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateDynamicUpdateSliceOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateDynamicUpdateSliceOptions(flatbuffers::FlatBufferBuilder &_fbb, const DynamicUpdateSliceOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const DynamicUpdateSliceOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateDynamicUpdateSliceOptions( - _fbb); -} - -inline UnsortedSegmentProdOptionsT *UnsortedSegmentProdOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new UnsortedSegmentProdOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void UnsortedSegmentProdOptions::UnPackTo(UnsortedSegmentProdOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset UnsortedSegmentProdOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const UnsortedSegmentProdOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateUnsortedSegmentProdOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateUnsortedSegmentProdOptions(flatbuffers::FlatBufferBuilder &_fbb, const UnsortedSegmentProdOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const UnsortedSegmentProdOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateUnsortedSegmentProdOptions( - _fbb); -} - -inline UnsortedSegmentMaxOptionsT *UnsortedSegmentMaxOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new UnsortedSegmentMaxOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void UnsortedSegmentMaxOptions::UnPackTo(UnsortedSegmentMaxOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset UnsortedSegmentMaxOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const UnsortedSegmentMaxOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateUnsortedSegmentMaxOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateUnsortedSegmentMaxOptions(flatbuffers::FlatBufferBuilder &_fbb, const UnsortedSegmentMaxOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const UnsortedSegmentMaxOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateUnsortedSegmentMaxOptions( - _fbb); -} - -inline UnsortedSegmentSumOptionsT *UnsortedSegmentSumOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new UnsortedSegmentSumOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void UnsortedSegmentSumOptions::UnPackTo(UnsortedSegmentSumOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset UnsortedSegmentSumOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const UnsortedSegmentSumOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateUnsortedSegmentSumOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateUnsortedSegmentSumOptions(flatbuffers::FlatBufferBuilder &_fbb, const UnsortedSegmentSumOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const UnsortedSegmentSumOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateUnsortedSegmentSumOptions( - _fbb); -} - -inline ATan2OptionsT *ATan2Options::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new ATan2OptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void ATan2Options::UnPackTo(ATan2OptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset ATan2Options::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ATan2OptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateATan2Options(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateATan2Options(flatbuffers::FlatBufferBuilder &_fbb, const ATan2OptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ATan2OptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateATan2Options( - _fbb); -} - -inline UnsortedSegmentMinOptionsT *UnsortedSegmentMinOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new UnsortedSegmentMinOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void UnsortedSegmentMinOptions::UnPackTo(UnsortedSegmentMinOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset UnsortedSegmentMinOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const UnsortedSegmentMinOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateUnsortedSegmentMinOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateUnsortedSegmentMinOptions(flatbuffers::FlatBufferBuilder &_fbb, const UnsortedSegmentMinOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const UnsortedSegmentMinOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateUnsortedSegmentMinOptions( - _fbb); -} - -inline SignOptionsT *SignOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new SignOptionsT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void SignOptions::UnPackTo(SignOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset SignOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SignOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateSignOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateSignOptions(flatbuffers::FlatBufferBuilder &_fbb, const SignOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SignOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateSignOptions( - _fbb); -} - -inline OperatorCodeT *OperatorCode::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new OperatorCodeT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void OperatorCode::UnPackTo(OperatorCodeT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = deprecated_builtin_code(); _o->deprecated_builtin_code = _e; } - { auto _e = custom_code(); if (_e) _o->custom_code = _e->str(); } - { auto _e = version(); _o->version = _e; } - { auto _e = builtin_code(); _o->builtin_code = _e; } -} - -inline flatbuffers::Offset OperatorCode::Pack(flatbuffers::FlatBufferBuilder &_fbb, const OperatorCodeT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateOperatorCode(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateOperatorCode(flatbuffers::FlatBufferBuilder &_fbb, const OperatorCodeT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const OperatorCodeT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _deprecated_builtin_code = _o->deprecated_builtin_code; - auto _custom_code = _o->custom_code.empty() ? 0 : _fbb.CreateString(_o->custom_code); - auto _version = _o->version; - auto _builtin_code = _o->builtin_code; - return tflite::CreateOperatorCode( - _fbb, - _deprecated_builtin_code, - _custom_code, - _version, - _builtin_code); -} - -inline OperatorT *Operator::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new OperatorT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void Operator::UnPackTo(OperatorT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = opcode_index(); _o->opcode_index = _e; } - { auto _e = inputs(); if (_e) { _o->inputs.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->inputs[_i] = _e->Get(_i); } } } - { auto _e = outputs(); if (_e) { _o->outputs.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->outputs[_i] = _e->Get(_i); } } } - { auto _e = builtin_options_type(); _o->builtin_options.type = _e; } - { auto _e = builtin_options(); if (_e) _o->builtin_options.value = tflite::BuiltinOptionsUnion::UnPack(_e, builtin_options_type(), _resolver); } - { auto _e = custom_options(); if (_e) { _o->custom_options.resize(_e->size()); std::copy(_e->begin(), _e->end(), _o->custom_options.begin()); } } - { auto _e = custom_options_format(); _o->custom_options_format = _e; } - { auto _e = mutating_variable_inputs(); if (_e) { _o->mutating_variable_inputs.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->mutating_variable_inputs[_i] = _e->Get(_i) != 0; } } } - { auto _e = intermediates(); if (_e) { _o->intermediates.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->intermediates[_i] = _e->Get(_i); } } } -} - -inline flatbuffers::Offset Operator::Pack(flatbuffers::FlatBufferBuilder &_fbb, const OperatorT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateOperator(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateOperator(flatbuffers::FlatBufferBuilder &_fbb, const OperatorT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const OperatorT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _opcode_index = _o->opcode_index; - auto _inputs = _o->inputs.size() ? _fbb.CreateVector(_o->inputs) : 0; - auto _outputs = _o->outputs.size() ? _fbb.CreateVector(_o->outputs) : 0; - auto _builtin_options_type = _o->builtin_options.type; - auto _builtin_options = _o->builtin_options.Pack(_fbb); - auto _custom_options = _o->custom_options.size() ? _fbb.CreateVector(_o->custom_options) : 0; - auto _custom_options_format = _o->custom_options_format; - auto _mutating_variable_inputs = _o->mutating_variable_inputs.size() ? _fbb.CreateVector(_o->mutating_variable_inputs) : 0; - auto _intermediates = _o->intermediates.size() ? _fbb.CreateVector(_o->intermediates) : 0; - return tflite::CreateOperator( - _fbb, - _opcode_index, - _inputs, - _outputs, - _builtin_options_type, - _builtin_options, - _custom_options, - _custom_options_format, - _mutating_variable_inputs, - _intermediates); -} - -inline SubGraphT::SubGraphT(const SubGraphT &o) - : inputs(o.inputs), - outputs(o.outputs), - name(o.name) { - tensors.reserve(o.tensors.size()); - for (const auto &tensors_ : o.tensors) { tensors.emplace_back((tensors_) ? new tflite::TensorT(*tensors_) : nullptr); } - operators.reserve(o.operators.size()); - for (const auto &operators_ : o.operators) { operators.emplace_back((operators_) ? new tflite::OperatorT(*operators_) : nullptr); } -} - -inline SubGraphT &SubGraphT::operator=(SubGraphT o) FLATBUFFERS_NOEXCEPT { - std::swap(tensors, o.tensors); - std::swap(inputs, o.inputs); - std::swap(outputs, o.outputs); - std::swap(operators, o.operators); - std::swap(name, o.name); - return *this; -} - -inline SubGraphT *SubGraph::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new SubGraphT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void SubGraph::UnPackTo(SubGraphT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = tensors(); if (_e) { _o->tensors.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { if(_o->tensors[_i]) { _e->Get(_i)->UnPackTo(_o->tensors[_i].get(), _resolver); } else { _o->tensors[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); }; } } } - { auto _e = inputs(); if (_e) { _o->inputs.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->inputs[_i] = _e->Get(_i); } } } - { auto _e = outputs(); if (_e) { _o->outputs.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->outputs[_i] = _e->Get(_i); } } } - { auto _e = operators(); if (_e) { _o->operators.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { if(_o->operators[_i]) { _e->Get(_i)->UnPackTo(_o->operators[_i].get(), _resolver); } else { _o->operators[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); }; } } } - { auto _e = name(); if (_e) _o->name = _e->str(); } -} - -inline flatbuffers::Offset SubGraph::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SubGraphT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateSubGraph(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateSubGraph(flatbuffers::FlatBufferBuilder &_fbb, const SubGraphT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SubGraphT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _tensors = _o->tensors.size() ? _fbb.CreateVector> (_o->tensors.size(), [](size_t i, _VectorArgs *__va) { return CreateTensor(*__va->__fbb, __va->__o->tensors[i].get(), __va->__rehasher); }, &_va ) : 0; - auto _inputs = _o->inputs.size() ? _fbb.CreateVector(_o->inputs) : 0; - auto _outputs = _o->outputs.size() ? _fbb.CreateVector(_o->outputs) : 0; - auto _operators = _o->operators.size() ? _fbb.CreateVector> (_o->operators.size(), [](size_t i, _VectorArgs *__va) { return CreateOperator(*__va->__fbb, __va->__o->operators[i].get(), __va->__rehasher); }, &_va ) : 0; - auto _name = _o->name.empty() ? 0 : _fbb.CreateString(_o->name); - return tflite::CreateSubGraph( - _fbb, - _tensors, - _inputs, - _outputs, - _operators, - _name); -} - -inline BufferT *Buffer::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new BufferT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void Buffer::UnPackTo(BufferT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = data(); if (_e) { _o->data.resize(_e->size()); std::copy(_e->begin(), _e->end(), _o->data.begin()); } } -} - -inline flatbuffers::Offset Buffer::Pack(flatbuffers::FlatBufferBuilder &_fbb, const BufferT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateBuffer(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateBuffer(flatbuffers::FlatBufferBuilder &_fbb, const BufferT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const BufferT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - _fbb.ForceVectorAlignment(_o->data.size(), sizeof(uint8_t), 16); - auto _data = _o->data.size() ? _fbb.CreateVector(_o->data) : 0; - return tflite::CreateBuffer( - _fbb, - _data); -} - -inline MetadataT *Metadata::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new MetadataT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void Metadata::UnPackTo(MetadataT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = name(); if (_e) _o->name = _e->str(); } - { auto _e = buffer(); _o->buffer = _e; } -} - -inline flatbuffers::Offset Metadata::Pack(flatbuffers::FlatBufferBuilder &_fbb, const MetadataT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateMetadata(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateMetadata(flatbuffers::FlatBufferBuilder &_fbb, const MetadataT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const MetadataT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _name = _o->name.empty() ? 0 : _fbb.CreateString(_o->name); - auto _buffer = _o->buffer; - return tflite::CreateMetadata( - _fbb, - _name, - _buffer); -} - -inline TensorMapT *TensorMap::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new TensorMapT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void TensorMap::UnPackTo(TensorMapT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = name(); if (_e) _o->name = _e->str(); } - { auto _e = tensor_index(); _o->tensor_index = _e; } -} - -inline flatbuffers::Offset TensorMap::Pack(flatbuffers::FlatBufferBuilder &_fbb, const TensorMapT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateTensorMap(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateTensorMap(flatbuffers::FlatBufferBuilder &_fbb, const TensorMapT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const TensorMapT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _name = _o->name.empty() ? 0 : _fbb.CreateString(_o->name); - auto _tensor_index = _o->tensor_index; - return tflite::CreateTensorMap( - _fbb, - _name, - _tensor_index); -} - -inline SignatureDefT::SignatureDefT(const SignatureDefT &o) - : signature_key(o.signature_key), - subgraph_index(o.subgraph_index) { - inputs.reserve(o.inputs.size()); - for (const auto &inputs_ : o.inputs) { inputs.emplace_back((inputs_) ? new tflite::TensorMapT(*inputs_) : nullptr); } - outputs.reserve(o.outputs.size()); - for (const auto &outputs_ : o.outputs) { outputs.emplace_back((outputs_) ? new tflite::TensorMapT(*outputs_) : nullptr); } -} - -inline SignatureDefT &SignatureDefT::operator=(SignatureDefT o) FLATBUFFERS_NOEXCEPT { - std::swap(inputs, o.inputs); - std::swap(outputs, o.outputs); - std::swap(signature_key, o.signature_key); - std::swap(subgraph_index, o.subgraph_index); - return *this; -} - -inline SignatureDefT *SignatureDef::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new SignatureDefT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void SignatureDef::UnPackTo(SignatureDefT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = inputs(); if (_e) { _o->inputs.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { if(_o->inputs[_i]) { _e->Get(_i)->UnPackTo(_o->inputs[_i].get(), _resolver); } else { _o->inputs[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); }; } } } - { auto _e = outputs(); if (_e) { _o->outputs.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { if(_o->outputs[_i]) { _e->Get(_i)->UnPackTo(_o->outputs[_i].get(), _resolver); } else { _o->outputs[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); }; } } } - { auto _e = signature_key(); if (_e) _o->signature_key = _e->str(); } - { auto _e = subgraph_index(); _o->subgraph_index = _e; } -} - -inline flatbuffers::Offset SignatureDef::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SignatureDefT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateSignatureDef(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateSignatureDef(flatbuffers::FlatBufferBuilder &_fbb, const SignatureDefT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SignatureDefT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _inputs = _o->inputs.size() ? _fbb.CreateVector> (_o->inputs.size(), [](size_t i, _VectorArgs *__va) { return CreateTensorMap(*__va->__fbb, __va->__o->inputs[i].get(), __va->__rehasher); }, &_va ) : 0; - auto _outputs = _o->outputs.size() ? _fbb.CreateVector> (_o->outputs.size(), [](size_t i, _VectorArgs *__va) { return CreateTensorMap(*__va->__fbb, __va->__o->outputs[i].get(), __va->__rehasher); }, &_va ) : 0; - auto _signature_key = _o->signature_key.empty() ? 0 : _fbb.CreateString(_o->signature_key); - auto _subgraph_index = _o->subgraph_index; - return tflite::CreateSignatureDef( - _fbb, - _inputs, - _outputs, - _signature_key, - _subgraph_index); -} - -inline ModelT::ModelT(const ModelT &o) - : version(o.version), - description(o.description), - metadata_buffer(o.metadata_buffer) { - operator_codes.reserve(o.operator_codes.size()); - for (const auto &operator_codes_ : o.operator_codes) { operator_codes.emplace_back((operator_codes_) ? new tflite::OperatorCodeT(*operator_codes_) : nullptr); } - subgraphs.reserve(o.subgraphs.size()); - for (const auto &subgraphs_ : o.subgraphs) { subgraphs.emplace_back((subgraphs_) ? new tflite::SubGraphT(*subgraphs_) : nullptr); } - buffers.reserve(o.buffers.size()); - for (const auto &buffers_ : o.buffers) { buffers.emplace_back((buffers_) ? new tflite::BufferT(*buffers_) : nullptr); } - metadata.reserve(o.metadata.size()); - for (const auto &metadata_ : o.metadata) { metadata.emplace_back((metadata_) ? new tflite::MetadataT(*metadata_) : nullptr); } - signature_defs.reserve(o.signature_defs.size()); - for (const auto &signature_defs_ : o.signature_defs) { signature_defs.emplace_back((signature_defs_) ? new tflite::SignatureDefT(*signature_defs_) : nullptr); } -} - -inline ModelT &ModelT::operator=(ModelT o) FLATBUFFERS_NOEXCEPT { - std::swap(version, o.version); - std::swap(operator_codes, o.operator_codes); - std::swap(subgraphs, o.subgraphs); - std::swap(description, o.description); - std::swap(buffers, o.buffers); - std::swap(metadata_buffer, o.metadata_buffer); - std::swap(metadata, o.metadata); - std::swap(signature_defs, o.signature_defs); - return *this; -} - -inline ModelT *Model::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = std::unique_ptr(new ModelT()); - UnPackTo(_o.get(), _resolver); - return _o.release(); -} - -inline void Model::UnPackTo(ModelT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = version(); _o->version = _e; } - { auto _e = operator_codes(); if (_e) { _o->operator_codes.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { if(_o->operator_codes[_i]) { _e->Get(_i)->UnPackTo(_o->operator_codes[_i].get(), _resolver); } else { _o->operator_codes[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); }; } } } - { auto _e = subgraphs(); if (_e) { _o->subgraphs.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { if(_o->subgraphs[_i]) { _e->Get(_i)->UnPackTo(_o->subgraphs[_i].get(), _resolver); } else { _o->subgraphs[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); }; } } } - { auto _e = description(); if (_e) _o->description = _e->str(); } - { auto _e = buffers(); if (_e) { _o->buffers.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { if(_o->buffers[_i]) { _e->Get(_i)->UnPackTo(_o->buffers[_i].get(), _resolver); } else { _o->buffers[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); }; } } } - { auto _e = metadata_buffer(); if (_e) { _o->metadata_buffer.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->metadata_buffer[_i] = _e->Get(_i); } } } - { auto _e = metadata(); if (_e) { _o->metadata.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { if(_o->metadata[_i]) { _e->Get(_i)->UnPackTo(_o->metadata[_i].get(), _resolver); } else { _o->metadata[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); }; } } } - { auto _e = signature_defs(); if (_e) { _o->signature_defs.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { if(_o->signature_defs[_i]) { _e->Get(_i)->UnPackTo(_o->signature_defs[_i].get(), _resolver); } else { _o->signature_defs[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); }; } } } -} - -inline flatbuffers::Offset Model::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ModelT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateModel(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateModel(flatbuffers::FlatBufferBuilder &_fbb, const ModelT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ModelT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _version = _o->version; - auto _operator_codes = _o->operator_codes.size() ? _fbb.CreateVector> (_o->operator_codes.size(), [](size_t i, _VectorArgs *__va) { return CreateOperatorCode(*__va->__fbb, __va->__o->operator_codes[i].get(), __va->__rehasher); }, &_va ) : 0; - auto _subgraphs = _o->subgraphs.size() ? _fbb.CreateVector> (_o->subgraphs.size(), [](size_t i, _VectorArgs *__va) { return CreateSubGraph(*__va->__fbb, __va->__o->subgraphs[i].get(), __va->__rehasher); }, &_va ) : 0; - auto _description = _o->description.empty() ? 0 : _fbb.CreateString(_o->description); - auto _buffers = _o->buffers.size() ? _fbb.CreateVector> (_o->buffers.size(), [](size_t i, _VectorArgs *__va) { return CreateBuffer(*__va->__fbb, __va->__o->buffers[i].get(), __va->__rehasher); }, &_va ) : 0; - auto _metadata_buffer = _o->metadata_buffer.size() ? _fbb.CreateVector(_o->metadata_buffer) : 0; - auto _metadata = _o->metadata.size() ? _fbb.CreateVector> (_o->metadata.size(), [](size_t i, _VectorArgs *__va) { return CreateMetadata(*__va->__fbb, __va->__o->metadata[i].get(), __va->__rehasher); }, &_va ) : 0; - auto _signature_defs = _o->signature_defs.size() ? _fbb.CreateVector> (_o->signature_defs.size(), [](size_t i, _VectorArgs *__va) { return CreateSignatureDef(*__va->__fbb, __va->__o->signature_defs[i].get(), __va->__rehasher); }, &_va ) : 0; - return tflite::CreateModel( - _fbb, - _version, - _operator_codes, - _subgraphs, - _description, - _buffers, - _metadata_buffer, - _metadata, - _signature_defs); -} - -inline bool VerifyQuantizationDetails(flatbuffers::Verifier &verifier, const void *obj, QuantizationDetails type) { - switch (type) { - case QuantizationDetails_NONE: { - return true; - } - case QuantizationDetails_CustomQuantization: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - default: return true; - } -} - -inline bool VerifyQuantizationDetailsVector(flatbuffers::Verifier &verifier, const flatbuffers::Vector> *values, const flatbuffers::Vector *types) { - if (!values || !types) return !values && !types; - if (values->size() != types->size()) return false; - for (flatbuffers::uoffset_t i = 0; i < values->size(); ++i) { - if (!VerifyQuantizationDetails( - verifier, values->Get(i), types->GetEnum(i))) { - return false; - } - } - return true; -} - -inline void *QuantizationDetailsUnion::UnPack(const void *obj, QuantizationDetails type, const flatbuffers::resolver_function_t *resolver) { - (void)resolver; - switch (type) { - case QuantizationDetails_CustomQuantization: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - default: return nullptr; - } -} - -inline flatbuffers::Offset QuantizationDetailsUnion::Pack(flatbuffers::FlatBufferBuilder &_fbb, const flatbuffers::rehasher_function_t *_rehasher) const { - (void)_rehasher; - switch (type) { - case QuantizationDetails_CustomQuantization: { - auto ptr = reinterpret_cast(value); - return CreateCustomQuantization(_fbb, ptr, _rehasher).Union(); - } - default: return 0; - } -} - -inline QuantizationDetailsUnion::QuantizationDetailsUnion(const QuantizationDetailsUnion &u) : type(u.type), value(nullptr) { - switch (type) { - case QuantizationDetails_CustomQuantization: { - value = new tflite::CustomQuantizationT(*reinterpret_cast(u.value)); - break; - } - default: - break; - } -} - -inline void QuantizationDetailsUnion::Reset() { - switch (type) { - case QuantizationDetails_CustomQuantization: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - default: break; - } - value = nullptr; - type = QuantizationDetails_NONE; -} - -inline bool VerifySparseIndexVector(flatbuffers::Verifier &verifier, const void *obj, SparseIndexVector type) { - switch (type) { - case SparseIndexVector_NONE: { - return true; - } - case SparseIndexVector_Int32Vector: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case SparseIndexVector_Uint16Vector: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case SparseIndexVector_Uint8Vector: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - default: return true; - } -} - -inline bool VerifySparseIndexVectorVector(flatbuffers::Verifier &verifier, const flatbuffers::Vector> *values, const flatbuffers::Vector *types) { - if (!values || !types) return !values && !types; - if (values->size() != types->size()) return false; - for (flatbuffers::uoffset_t i = 0; i < values->size(); ++i) { - if (!VerifySparseIndexVector( - verifier, values->Get(i), types->GetEnum(i))) { - return false; - } - } - return true; -} - -inline void *SparseIndexVectorUnion::UnPack(const void *obj, SparseIndexVector type, const flatbuffers::resolver_function_t *resolver) { - (void)resolver; - switch (type) { - case SparseIndexVector_Int32Vector: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case SparseIndexVector_Uint16Vector: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case SparseIndexVector_Uint8Vector: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - default: return nullptr; - } -} - -inline flatbuffers::Offset SparseIndexVectorUnion::Pack(flatbuffers::FlatBufferBuilder &_fbb, const flatbuffers::rehasher_function_t *_rehasher) const { - (void)_rehasher; - switch (type) { - case SparseIndexVector_Int32Vector: { - auto ptr = reinterpret_cast(value); - return CreateInt32Vector(_fbb, ptr, _rehasher).Union(); - } - case SparseIndexVector_Uint16Vector: { - auto ptr = reinterpret_cast(value); - return CreateUint16Vector(_fbb, ptr, _rehasher).Union(); - } - case SparseIndexVector_Uint8Vector: { - auto ptr = reinterpret_cast(value); - return CreateUint8Vector(_fbb, ptr, _rehasher).Union(); - } - default: return 0; - } -} - -inline SparseIndexVectorUnion::SparseIndexVectorUnion(const SparseIndexVectorUnion &u) : type(u.type), value(nullptr) { - switch (type) { - case SparseIndexVector_Int32Vector: { - value = new tflite::Int32VectorT(*reinterpret_cast(u.value)); - break; - } - case SparseIndexVector_Uint16Vector: { - value = new tflite::Uint16VectorT(*reinterpret_cast(u.value)); - break; - } - case SparseIndexVector_Uint8Vector: { - value = new tflite::Uint8VectorT(*reinterpret_cast(u.value)); - break; - } - default: - break; - } -} - -inline void SparseIndexVectorUnion::Reset() { - switch (type) { - case SparseIndexVector_Int32Vector: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case SparseIndexVector_Uint16Vector: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case SparseIndexVector_Uint8Vector: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - default: break; - } - value = nullptr; - type = SparseIndexVector_NONE; -} - -inline bool VerifyBuiltinOptions(flatbuffers::Verifier &verifier, const void *obj, BuiltinOptions type) { - switch (type) { - case BuiltinOptions_NONE: { - return true; - } - case BuiltinOptions_Conv2DOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_DepthwiseConv2DOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_ConcatEmbeddingsOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_LSHProjectionOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_Pool2DOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_SVDFOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_RNNOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_FullyConnectedOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_SoftmaxOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_ConcatenationOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_AddOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_L2NormOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_LocalResponseNormalizationOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_LSTMOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_ResizeBilinearOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_CallOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_ReshapeOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_SkipGramOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_SpaceToDepthOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_EmbeddingLookupSparseOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_MulOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_PadOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_GatherOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_BatchToSpaceNDOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_SpaceToBatchNDOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_TransposeOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_ReducerOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_SubOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_DivOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_SqueezeOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_SequenceRNNOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_StridedSliceOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_ExpOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_TopKV2Options: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_SplitOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_LogSoftmaxOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_CastOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_DequantizeOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_MaximumMinimumOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_ArgMaxOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_LessOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_NegOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_PadV2Options: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_GreaterOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_GreaterEqualOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_LessEqualOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_SelectOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_SliceOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_TransposeConvOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_SparseToDenseOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_TileOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_ExpandDimsOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_EqualOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_NotEqualOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_ShapeOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_PowOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_ArgMinOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_FakeQuantOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_PackOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_LogicalOrOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_OneHotOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_LogicalAndOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_LogicalNotOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_UnpackOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_FloorDivOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_SquareOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_ZerosLikeOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_FillOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_BidirectionalSequenceLSTMOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_BidirectionalSequenceRNNOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_UnidirectionalSequenceLSTMOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_FloorModOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_RangeOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_ResizeNearestNeighborOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_LeakyReluOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_SquaredDifferenceOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_MirrorPadOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_AbsOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_SplitVOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_UniqueOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_ReverseV2Options: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_AddNOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_GatherNdOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_CosOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_WhereOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_RankOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_ReverseSequenceOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_MatrixDiagOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_QuantizeOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_MatrixSetDiagOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_HardSwishOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_IfOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_WhileOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_DepthToSpaceOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_NonMaxSuppressionV4Options: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_NonMaxSuppressionV5Options: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_ScatterNdOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_SelectV2Options: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_DensifyOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_SegmentSumOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_BatchMatMulOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_CumsumOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_CallOnceOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_BroadcastToOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_Rfft2dOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_Conv3DOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_HashtableOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_HashtableFindOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_HashtableImportOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_HashtableSizeOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_VarHandleOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_ReadVariableOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_AssignVariableOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_RandomOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_BucketizeOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_GeluOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_DynamicUpdateSliceOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_UnsortedSegmentProdOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_UnsortedSegmentMaxOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_UnsortedSegmentMinOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_UnsortedSegmentSumOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_ATan2Options: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_SignOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - default: return true; - } -} - -inline bool VerifyBuiltinOptionsVector(flatbuffers::Verifier &verifier, const flatbuffers::Vector> *values, const flatbuffers::Vector *types) { - if (!values || !types) return !values && !types; - if (values->size() != types->size()) return false; - for (flatbuffers::uoffset_t i = 0; i < values->size(); ++i) { - if (!VerifyBuiltinOptions( - verifier, values->Get(i), types->GetEnum(i))) { - return false; - } - } - return true; -} - -inline void *BuiltinOptionsUnion::UnPack(const void *obj, BuiltinOptions type, const flatbuffers::resolver_function_t *resolver) { - (void)resolver; - switch (type) { - case BuiltinOptions_Conv2DOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_DepthwiseConv2DOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_ConcatEmbeddingsOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_LSHProjectionOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_Pool2DOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_SVDFOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_RNNOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_FullyConnectedOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_SoftmaxOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_ConcatenationOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_AddOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_L2NormOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_LocalResponseNormalizationOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_LSTMOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_ResizeBilinearOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_CallOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_ReshapeOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_SkipGramOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_SpaceToDepthOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_EmbeddingLookupSparseOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_MulOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_PadOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_GatherOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_BatchToSpaceNDOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_SpaceToBatchNDOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_TransposeOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_ReducerOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_SubOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_DivOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_SqueezeOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_SequenceRNNOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_StridedSliceOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_ExpOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_TopKV2Options: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_SplitOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_LogSoftmaxOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_CastOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_DequantizeOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_MaximumMinimumOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_ArgMaxOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_LessOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_NegOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_PadV2Options: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_GreaterOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_GreaterEqualOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_LessEqualOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_SelectOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_SliceOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_TransposeConvOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_SparseToDenseOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_TileOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_ExpandDimsOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_EqualOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_NotEqualOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_ShapeOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_PowOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_ArgMinOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_FakeQuantOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_PackOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_LogicalOrOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_OneHotOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_LogicalAndOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_LogicalNotOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_UnpackOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_FloorDivOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_SquareOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_ZerosLikeOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_FillOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_BidirectionalSequenceLSTMOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_BidirectionalSequenceRNNOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_UnidirectionalSequenceLSTMOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_FloorModOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_RangeOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_ResizeNearestNeighborOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_LeakyReluOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_SquaredDifferenceOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_MirrorPadOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_AbsOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_SplitVOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_UniqueOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_ReverseV2Options: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_AddNOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_GatherNdOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_CosOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_WhereOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_RankOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_ReverseSequenceOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_MatrixDiagOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_QuantizeOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_MatrixSetDiagOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_HardSwishOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_IfOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_WhileOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_DepthToSpaceOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_NonMaxSuppressionV4Options: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_NonMaxSuppressionV5Options: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_ScatterNdOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_SelectV2Options: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_DensifyOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_SegmentSumOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_BatchMatMulOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_CumsumOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_CallOnceOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_BroadcastToOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_Rfft2dOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_Conv3DOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_HashtableOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_HashtableFindOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_HashtableImportOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_HashtableSizeOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_VarHandleOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_ReadVariableOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_AssignVariableOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_RandomOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_BucketizeOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_GeluOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_DynamicUpdateSliceOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_UnsortedSegmentProdOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_UnsortedSegmentMaxOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_UnsortedSegmentMinOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_UnsortedSegmentSumOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_ATan2Options: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_SignOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - default: return nullptr; - } -} - -inline flatbuffers::Offset BuiltinOptionsUnion::Pack(flatbuffers::FlatBufferBuilder &_fbb, const flatbuffers::rehasher_function_t *_rehasher) const { - (void)_rehasher; - switch (type) { - case BuiltinOptions_Conv2DOptions: { - auto ptr = reinterpret_cast(value); - return CreateConv2DOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_DepthwiseConv2DOptions: { - auto ptr = reinterpret_cast(value); - return CreateDepthwiseConv2DOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_ConcatEmbeddingsOptions: { - auto ptr = reinterpret_cast(value); - return CreateConcatEmbeddingsOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_LSHProjectionOptions: { - auto ptr = reinterpret_cast(value); - return CreateLSHProjectionOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_Pool2DOptions: { - auto ptr = reinterpret_cast(value); - return CreatePool2DOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_SVDFOptions: { - auto ptr = reinterpret_cast(value); - return CreateSVDFOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_RNNOptions: { - auto ptr = reinterpret_cast(value); - return CreateRNNOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_FullyConnectedOptions: { - auto ptr = reinterpret_cast(value); - return CreateFullyConnectedOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_SoftmaxOptions: { - auto ptr = reinterpret_cast(value); - return CreateSoftmaxOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_ConcatenationOptions: { - auto ptr = reinterpret_cast(value); - return CreateConcatenationOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_AddOptions: { - auto ptr = reinterpret_cast(value); - return CreateAddOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_L2NormOptions: { - auto ptr = reinterpret_cast(value); - return CreateL2NormOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_LocalResponseNormalizationOptions: { - auto ptr = reinterpret_cast(value); - return CreateLocalResponseNormalizationOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_LSTMOptions: { - auto ptr = reinterpret_cast(value); - return CreateLSTMOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_ResizeBilinearOptions: { - auto ptr = reinterpret_cast(value); - return CreateResizeBilinearOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_CallOptions: { - auto ptr = reinterpret_cast(value); - return CreateCallOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_ReshapeOptions: { - auto ptr = reinterpret_cast(value); - return CreateReshapeOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_SkipGramOptions: { - auto ptr = reinterpret_cast(value); - return CreateSkipGramOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_SpaceToDepthOptions: { - auto ptr = reinterpret_cast(value); - return CreateSpaceToDepthOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_EmbeddingLookupSparseOptions: { - auto ptr = reinterpret_cast(value); - return CreateEmbeddingLookupSparseOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_MulOptions: { - auto ptr = reinterpret_cast(value); - return CreateMulOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_PadOptions: { - auto ptr = reinterpret_cast(value); - return CreatePadOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_GatherOptions: { - auto ptr = reinterpret_cast(value); - return CreateGatherOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_BatchToSpaceNDOptions: { - auto ptr = reinterpret_cast(value); - return CreateBatchToSpaceNDOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_SpaceToBatchNDOptions: { - auto ptr = reinterpret_cast(value); - return CreateSpaceToBatchNDOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_TransposeOptions: { - auto ptr = reinterpret_cast(value); - return CreateTransposeOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_ReducerOptions: { - auto ptr = reinterpret_cast(value); - return CreateReducerOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_SubOptions: { - auto ptr = reinterpret_cast(value); - return CreateSubOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_DivOptions: { - auto ptr = reinterpret_cast(value); - return CreateDivOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_SqueezeOptions: { - auto ptr = reinterpret_cast(value); - return CreateSqueezeOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_SequenceRNNOptions: { - auto ptr = reinterpret_cast(value); - return CreateSequenceRNNOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_StridedSliceOptions: { - auto ptr = reinterpret_cast(value); - return CreateStridedSliceOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_ExpOptions: { - auto ptr = reinterpret_cast(value); - return CreateExpOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_TopKV2Options: { - auto ptr = reinterpret_cast(value); - return CreateTopKV2Options(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_SplitOptions: { - auto ptr = reinterpret_cast(value); - return CreateSplitOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_LogSoftmaxOptions: { - auto ptr = reinterpret_cast(value); - return CreateLogSoftmaxOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_CastOptions: { - auto ptr = reinterpret_cast(value); - return CreateCastOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_DequantizeOptions: { - auto ptr = reinterpret_cast(value); - return CreateDequantizeOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_MaximumMinimumOptions: { - auto ptr = reinterpret_cast(value); - return CreateMaximumMinimumOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_ArgMaxOptions: { - auto ptr = reinterpret_cast(value); - return CreateArgMaxOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_LessOptions: { - auto ptr = reinterpret_cast(value); - return CreateLessOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_NegOptions: { - auto ptr = reinterpret_cast(value); - return CreateNegOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_PadV2Options: { - auto ptr = reinterpret_cast(value); - return CreatePadV2Options(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_GreaterOptions: { - auto ptr = reinterpret_cast(value); - return CreateGreaterOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_GreaterEqualOptions: { - auto ptr = reinterpret_cast(value); - return CreateGreaterEqualOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_LessEqualOptions: { - auto ptr = reinterpret_cast(value); - return CreateLessEqualOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_SelectOptions: { - auto ptr = reinterpret_cast(value); - return CreateSelectOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_SliceOptions: { - auto ptr = reinterpret_cast(value); - return CreateSliceOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_TransposeConvOptions: { - auto ptr = reinterpret_cast(value); - return CreateTransposeConvOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_SparseToDenseOptions: { - auto ptr = reinterpret_cast(value); - return CreateSparseToDenseOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_TileOptions: { - auto ptr = reinterpret_cast(value); - return CreateTileOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_ExpandDimsOptions: { - auto ptr = reinterpret_cast(value); - return CreateExpandDimsOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_EqualOptions: { - auto ptr = reinterpret_cast(value); - return CreateEqualOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_NotEqualOptions: { - auto ptr = reinterpret_cast(value); - return CreateNotEqualOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_ShapeOptions: { - auto ptr = reinterpret_cast(value); - return CreateShapeOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_PowOptions: { - auto ptr = reinterpret_cast(value); - return CreatePowOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_ArgMinOptions: { - auto ptr = reinterpret_cast(value); - return CreateArgMinOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_FakeQuantOptions: { - auto ptr = reinterpret_cast(value); - return CreateFakeQuantOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_PackOptions: { - auto ptr = reinterpret_cast(value); - return CreatePackOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_LogicalOrOptions: { - auto ptr = reinterpret_cast(value); - return CreateLogicalOrOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_OneHotOptions: { - auto ptr = reinterpret_cast(value); - return CreateOneHotOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_LogicalAndOptions: { - auto ptr = reinterpret_cast(value); - return CreateLogicalAndOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_LogicalNotOptions: { - auto ptr = reinterpret_cast(value); - return CreateLogicalNotOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_UnpackOptions: { - auto ptr = reinterpret_cast(value); - return CreateUnpackOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_FloorDivOptions: { - auto ptr = reinterpret_cast(value); - return CreateFloorDivOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_SquareOptions: { - auto ptr = reinterpret_cast(value); - return CreateSquareOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_ZerosLikeOptions: { - auto ptr = reinterpret_cast(value); - return CreateZerosLikeOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_FillOptions: { - auto ptr = reinterpret_cast(value); - return CreateFillOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_BidirectionalSequenceLSTMOptions: { - auto ptr = reinterpret_cast(value); - return CreateBidirectionalSequenceLSTMOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_BidirectionalSequenceRNNOptions: { - auto ptr = reinterpret_cast(value); - return CreateBidirectionalSequenceRNNOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_UnidirectionalSequenceLSTMOptions: { - auto ptr = reinterpret_cast(value); - return CreateUnidirectionalSequenceLSTMOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_FloorModOptions: { - auto ptr = reinterpret_cast(value); - return CreateFloorModOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_RangeOptions: { - auto ptr = reinterpret_cast(value); - return CreateRangeOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_ResizeNearestNeighborOptions: { - auto ptr = reinterpret_cast(value); - return CreateResizeNearestNeighborOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_LeakyReluOptions: { - auto ptr = reinterpret_cast(value); - return CreateLeakyReluOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_SquaredDifferenceOptions: { - auto ptr = reinterpret_cast(value); - return CreateSquaredDifferenceOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_MirrorPadOptions: { - auto ptr = reinterpret_cast(value); - return CreateMirrorPadOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_AbsOptions: { - auto ptr = reinterpret_cast(value); - return CreateAbsOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_SplitVOptions: { - auto ptr = reinterpret_cast(value); - return CreateSplitVOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_UniqueOptions: { - auto ptr = reinterpret_cast(value); - return CreateUniqueOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_ReverseV2Options: { - auto ptr = reinterpret_cast(value); - return CreateReverseV2Options(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_AddNOptions: { - auto ptr = reinterpret_cast(value); - return CreateAddNOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_GatherNdOptions: { - auto ptr = reinterpret_cast(value); - return CreateGatherNdOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_CosOptions: { - auto ptr = reinterpret_cast(value); - return CreateCosOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_WhereOptions: { - auto ptr = reinterpret_cast(value); - return CreateWhereOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_RankOptions: { - auto ptr = reinterpret_cast(value); - return CreateRankOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_ReverseSequenceOptions: { - auto ptr = reinterpret_cast(value); - return CreateReverseSequenceOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_MatrixDiagOptions: { - auto ptr = reinterpret_cast(value); - return CreateMatrixDiagOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_QuantizeOptions: { - auto ptr = reinterpret_cast(value); - return CreateQuantizeOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_MatrixSetDiagOptions: { - auto ptr = reinterpret_cast(value); - return CreateMatrixSetDiagOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_HardSwishOptions: { - auto ptr = reinterpret_cast(value); - return CreateHardSwishOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_IfOptions: { - auto ptr = reinterpret_cast(value); - return CreateIfOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_WhileOptions: { - auto ptr = reinterpret_cast(value); - return CreateWhileOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_DepthToSpaceOptions: { - auto ptr = reinterpret_cast(value); - return CreateDepthToSpaceOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_NonMaxSuppressionV4Options: { - auto ptr = reinterpret_cast(value); - return CreateNonMaxSuppressionV4Options(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_NonMaxSuppressionV5Options: { - auto ptr = reinterpret_cast(value); - return CreateNonMaxSuppressionV5Options(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_ScatterNdOptions: { - auto ptr = reinterpret_cast(value); - return CreateScatterNdOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_SelectV2Options: { - auto ptr = reinterpret_cast(value); - return CreateSelectV2Options(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_DensifyOptions: { - auto ptr = reinterpret_cast(value); - return CreateDensifyOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_SegmentSumOptions: { - auto ptr = reinterpret_cast(value); - return CreateSegmentSumOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_BatchMatMulOptions: { - auto ptr = reinterpret_cast(value); - return CreateBatchMatMulOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_CumsumOptions: { - auto ptr = reinterpret_cast(value); - return CreateCumsumOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_CallOnceOptions: { - auto ptr = reinterpret_cast(value); - return CreateCallOnceOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_BroadcastToOptions: { - auto ptr = reinterpret_cast(value); - return CreateBroadcastToOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_Rfft2dOptions: { - auto ptr = reinterpret_cast(value); - return CreateRfft2dOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_Conv3DOptions: { - auto ptr = reinterpret_cast(value); - return CreateConv3DOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_HashtableOptions: { - auto ptr = reinterpret_cast(value); - return CreateHashtableOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_HashtableFindOptions: { - auto ptr = reinterpret_cast(value); - return CreateHashtableFindOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_HashtableImportOptions: { - auto ptr = reinterpret_cast(value); - return CreateHashtableImportOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_HashtableSizeOptions: { - auto ptr = reinterpret_cast(value); - return CreateHashtableSizeOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_VarHandleOptions: { - auto ptr = reinterpret_cast(value); - return CreateVarHandleOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_ReadVariableOptions: { - auto ptr = reinterpret_cast(value); - return CreateReadVariableOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_AssignVariableOptions: { - auto ptr = reinterpret_cast(value); - return CreateAssignVariableOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_RandomOptions: { - auto ptr = reinterpret_cast(value); - return CreateRandomOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_BucketizeOptions: { - auto ptr = reinterpret_cast(value); - return CreateBucketizeOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_GeluOptions: { - auto ptr = reinterpret_cast(value); - return CreateGeluOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_DynamicUpdateSliceOptions: { - auto ptr = reinterpret_cast(value); - return CreateDynamicUpdateSliceOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_UnsortedSegmentProdOptions: { - auto ptr = reinterpret_cast(value); - return CreateUnsortedSegmentProdOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_UnsortedSegmentMaxOptions: { - auto ptr = reinterpret_cast(value); - return CreateUnsortedSegmentMaxOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_UnsortedSegmentMinOptions: { - auto ptr = reinterpret_cast(value); - return CreateUnsortedSegmentMinOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_UnsortedSegmentSumOptions: { - auto ptr = reinterpret_cast(value); - return CreateUnsortedSegmentSumOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_ATan2Options: { - auto ptr = reinterpret_cast(value); - return CreateATan2Options(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_SignOptions: { - auto ptr = reinterpret_cast(value); - return CreateSignOptions(_fbb, ptr, _rehasher).Union(); - } - default: return 0; - } -} - -inline BuiltinOptionsUnion::BuiltinOptionsUnion(const BuiltinOptionsUnion &u) : type(u.type), value(nullptr) { - switch (type) { - case BuiltinOptions_Conv2DOptions: { - value = new tflite::Conv2DOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_DepthwiseConv2DOptions: { - value = new tflite::DepthwiseConv2DOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_ConcatEmbeddingsOptions: { - value = new tflite::ConcatEmbeddingsOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_LSHProjectionOptions: { - value = new tflite::LSHProjectionOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_Pool2DOptions: { - value = new tflite::Pool2DOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_SVDFOptions: { - value = new tflite::SVDFOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_RNNOptions: { - value = new tflite::RNNOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_FullyConnectedOptions: { - value = new tflite::FullyConnectedOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_SoftmaxOptions: { - value = new tflite::SoftmaxOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_ConcatenationOptions: { - value = new tflite::ConcatenationOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_AddOptions: { - value = new tflite::AddOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_L2NormOptions: { - value = new tflite::L2NormOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_LocalResponseNormalizationOptions: { - value = new tflite::LocalResponseNormalizationOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_LSTMOptions: { - value = new tflite::LSTMOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_ResizeBilinearOptions: { - value = new tflite::ResizeBilinearOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_CallOptions: { - value = new tflite::CallOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_ReshapeOptions: { - value = new tflite::ReshapeOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_SkipGramOptions: { - value = new tflite::SkipGramOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_SpaceToDepthOptions: { - value = new tflite::SpaceToDepthOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_EmbeddingLookupSparseOptions: { - value = new tflite::EmbeddingLookupSparseOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_MulOptions: { - value = new tflite::MulOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_PadOptions: { - value = new tflite::PadOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_GatherOptions: { - value = new tflite::GatherOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_BatchToSpaceNDOptions: { - value = new tflite::BatchToSpaceNDOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_SpaceToBatchNDOptions: { - value = new tflite::SpaceToBatchNDOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_TransposeOptions: { - value = new tflite::TransposeOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_ReducerOptions: { - value = new tflite::ReducerOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_SubOptions: { - value = new tflite::SubOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_DivOptions: { - value = new tflite::DivOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_SqueezeOptions: { - value = new tflite::SqueezeOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_SequenceRNNOptions: { - value = new tflite::SequenceRNNOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_StridedSliceOptions: { - value = new tflite::StridedSliceOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_ExpOptions: { - value = new tflite::ExpOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_TopKV2Options: { - value = new tflite::TopKV2OptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_SplitOptions: { - value = new tflite::SplitOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_LogSoftmaxOptions: { - value = new tflite::LogSoftmaxOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_CastOptions: { - value = new tflite::CastOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_DequantizeOptions: { - value = new tflite::DequantizeOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_MaximumMinimumOptions: { - value = new tflite::MaximumMinimumOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_ArgMaxOptions: { - value = new tflite::ArgMaxOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_LessOptions: { - value = new tflite::LessOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_NegOptions: { - value = new tflite::NegOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_PadV2Options: { - value = new tflite::PadV2OptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_GreaterOptions: { - value = new tflite::GreaterOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_GreaterEqualOptions: { - value = new tflite::GreaterEqualOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_LessEqualOptions: { - value = new tflite::LessEqualOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_SelectOptions: { - value = new tflite::SelectOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_SliceOptions: { - value = new tflite::SliceOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_TransposeConvOptions: { - value = new tflite::TransposeConvOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_SparseToDenseOptions: { - value = new tflite::SparseToDenseOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_TileOptions: { - value = new tflite::TileOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_ExpandDimsOptions: { - value = new tflite::ExpandDimsOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_EqualOptions: { - value = new tflite::EqualOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_NotEqualOptions: { - value = new tflite::NotEqualOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_ShapeOptions: { - value = new tflite::ShapeOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_PowOptions: { - value = new tflite::PowOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_ArgMinOptions: { - value = new tflite::ArgMinOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_FakeQuantOptions: { - value = new tflite::FakeQuantOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_PackOptions: { - value = new tflite::PackOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_LogicalOrOptions: { - value = new tflite::LogicalOrOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_OneHotOptions: { - value = new tflite::OneHotOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_LogicalAndOptions: { - value = new tflite::LogicalAndOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_LogicalNotOptions: { - value = new tflite::LogicalNotOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_UnpackOptions: { - value = new tflite::UnpackOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_FloorDivOptions: { - value = new tflite::FloorDivOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_SquareOptions: { - value = new tflite::SquareOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_ZerosLikeOptions: { - value = new tflite::ZerosLikeOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_FillOptions: { - value = new tflite::FillOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_BidirectionalSequenceLSTMOptions: { - value = new tflite::BidirectionalSequenceLSTMOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_BidirectionalSequenceRNNOptions: { - value = new tflite::BidirectionalSequenceRNNOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_UnidirectionalSequenceLSTMOptions: { - value = new tflite::UnidirectionalSequenceLSTMOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_FloorModOptions: { - value = new tflite::FloorModOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_RangeOptions: { - value = new tflite::RangeOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_ResizeNearestNeighborOptions: { - value = new tflite::ResizeNearestNeighborOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_LeakyReluOptions: { - value = new tflite::LeakyReluOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_SquaredDifferenceOptions: { - value = new tflite::SquaredDifferenceOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_MirrorPadOptions: { - value = new tflite::MirrorPadOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_AbsOptions: { - value = new tflite::AbsOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_SplitVOptions: { - value = new tflite::SplitVOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_UniqueOptions: { - value = new tflite::UniqueOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_ReverseV2Options: { - value = new tflite::ReverseV2OptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_AddNOptions: { - value = new tflite::AddNOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_GatherNdOptions: { - value = new tflite::GatherNdOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_CosOptions: { - value = new tflite::CosOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_WhereOptions: { - value = new tflite::WhereOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_RankOptions: { - value = new tflite::RankOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_ReverseSequenceOptions: { - value = new tflite::ReverseSequenceOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_MatrixDiagOptions: { - value = new tflite::MatrixDiagOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_QuantizeOptions: { - value = new tflite::QuantizeOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_MatrixSetDiagOptions: { - value = new tflite::MatrixSetDiagOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_HardSwishOptions: { - value = new tflite::HardSwishOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_IfOptions: { - value = new tflite::IfOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_WhileOptions: { - value = new tflite::WhileOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_DepthToSpaceOptions: { - value = new tflite::DepthToSpaceOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_NonMaxSuppressionV4Options: { - value = new tflite::NonMaxSuppressionV4OptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_NonMaxSuppressionV5Options: { - value = new tflite::NonMaxSuppressionV5OptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_ScatterNdOptions: { - value = new tflite::ScatterNdOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_SelectV2Options: { - value = new tflite::SelectV2OptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_DensifyOptions: { - value = new tflite::DensifyOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_SegmentSumOptions: { - value = new tflite::SegmentSumOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_BatchMatMulOptions: { - value = new tflite::BatchMatMulOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_CumsumOptions: { - value = new tflite::CumsumOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_CallOnceOptions: { - value = new tflite::CallOnceOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_BroadcastToOptions: { - value = new tflite::BroadcastToOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_Rfft2dOptions: { - value = new tflite::Rfft2dOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_Conv3DOptions: { - value = new tflite::Conv3DOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_HashtableOptions: { - value = new tflite::HashtableOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_HashtableFindOptions: { - value = new tflite::HashtableFindOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_HashtableImportOptions: { - value = new tflite::HashtableImportOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_HashtableSizeOptions: { - value = new tflite::HashtableSizeOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_VarHandleOptions: { - value = new tflite::VarHandleOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_ReadVariableOptions: { - value = new tflite::ReadVariableOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_AssignVariableOptions: { - value = new tflite::AssignVariableOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_RandomOptions: { - value = new tflite::RandomOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_BucketizeOptions: { - value = new tflite::BucketizeOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_GeluOptions: { - value = new tflite::GeluOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_DynamicUpdateSliceOptions: { - value = new tflite::DynamicUpdateSliceOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_UnsortedSegmentProdOptions: { - value = new tflite::UnsortedSegmentProdOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_UnsortedSegmentMaxOptions: { - value = new tflite::UnsortedSegmentMaxOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_UnsortedSegmentMinOptions: { - value = new tflite::UnsortedSegmentMinOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_UnsortedSegmentSumOptions: { - value = new tflite::UnsortedSegmentSumOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_ATan2Options: { - value = new tflite::ATan2OptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_SignOptions: { - value = new tflite::SignOptionsT(*reinterpret_cast(u.value)); - break; - } - default: - break; - } -} - -inline void BuiltinOptionsUnion::Reset() { - switch (type) { - case BuiltinOptions_Conv2DOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_DepthwiseConv2DOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_ConcatEmbeddingsOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_LSHProjectionOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_Pool2DOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_SVDFOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_RNNOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_FullyConnectedOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_SoftmaxOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_ConcatenationOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_AddOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_L2NormOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_LocalResponseNormalizationOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_LSTMOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_ResizeBilinearOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_CallOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_ReshapeOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_SkipGramOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_SpaceToDepthOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_EmbeddingLookupSparseOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_MulOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_PadOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_GatherOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_BatchToSpaceNDOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_SpaceToBatchNDOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_TransposeOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_ReducerOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_SubOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_DivOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_SqueezeOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_SequenceRNNOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_StridedSliceOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_ExpOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_TopKV2Options: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_SplitOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_LogSoftmaxOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_CastOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_DequantizeOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_MaximumMinimumOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_ArgMaxOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_LessOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_NegOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_PadV2Options: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_GreaterOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_GreaterEqualOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_LessEqualOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_SelectOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_SliceOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_TransposeConvOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_SparseToDenseOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_TileOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_ExpandDimsOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_EqualOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_NotEqualOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_ShapeOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_PowOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_ArgMinOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_FakeQuantOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_PackOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_LogicalOrOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_OneHotOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_LogicalAndOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_LogicalNotOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_UnpackOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_FloorDivOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_SquareOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_ZerosLikeOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_FillOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_BidirectionalSequenceLSTMOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_BidirectionalSequenceRNNOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_UnidirectionalSequenceLSTMOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_FloorModOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_RangeOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_ResizeNearestNeighborOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_LeakyReluOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_SquaredDifferenceOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_MirrorPadOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_AbsOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_SplitVOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_UniqueOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_ReverseV2Options: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_AddNOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_GatherNdOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_CosOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_WhereOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_RankOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_ReverseSequenceOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_MatrixDiagOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_QuantizeOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_MatrixSetDiagOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_HardSwishOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_IfOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_WhileOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_DepthToSpaceOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_NonMaxSuppressionV4Options: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_NonMaxSuppressionV5Options: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_ScatterNdOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_SelectV2Options: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_DensifyOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_SegmentSumOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_BatchMatMulOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_CumsumOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_CallOnceOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_BroadcastToOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_Rfft2dOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_Conv3DOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_HashtableOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_HashtableFindOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_HashtableImportOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_HashtableSizeOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_VarHandleOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_ReadVariableOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_AssignVariableOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_RandomOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_BucketizeOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_GeluOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_DynamicUpdateSliceOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_UnsortedSegmentProdOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_UnsortedSegmentMaxOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_UnsortedSegmentMinOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_UnsortedSegmentSumOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_ATan2Options: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_SignOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - default: break; - } - value = nullptr; - type = BuiltinOptions_NONE; -} - -inline const tflite::Model *GetModel(const void *buf) { - return flatbuffers::GetRoot(buf); -} - -inline const tflite::Model *GetSizePrefixedModel(const void *buf) { - return flatbuffers::GetSizePrefixedRoot(buf); -} - -inline const char *ModelIdentifier() { - return "TFL3"; -} - -inline bool ModelBufferHasIdentifier(const void *buf) { - return flatbuffers::BufferHasIdentifier( - buf, ModelIdentifier()); -} - -inline bool SizePrefixedModelBufferHasIdentifier(const void *buf) { - return flatbuffers::BufferHasIdentifier( - buf, ModelIdentifier(), true); -} - -inline bool VerifyModelBuffer( - flatbuffers::Verifier &verifier) { - return verifier.VerifyBuffer(ModelIdentifier()); -} - -inline bool VerifySizePrefixedModelBuffer( - flatbuffers::Verifier &verifier) { - return verifier.VerifySizePrefixedBuffer(ModelIdentifier()); -} - -inline const char *ModelExtension() { - return "tflite"; -} - -inline void FinishModelBuffer( - flatbuffers::FlatBufferBuilder &fbb, - flatbuffers::Offset root) { - fbb.Finish(root, ModelIdentifier()); -} - -inline void FinishSizePrefixedModelBuffer( - flatbuffers::FlatBufferBuilder &fbb, - flatbuffers::Offset root) { - fbb.FinishSizePrefixed(root, ModelIdentifier()); -} - -inline std::unique_ptr UnPackModel( - const void *buf, - const flatbuffers::resolver_function_t *res = nullptr) { - return std::unique_ptr(GetModel(buf)->UnPack(res)); -} - -inline std::unique_ptr UnPackSizePrefixedModel( - const void *buf, - const flatbuffers::resolver_function_t *res = nullptr) { - return std::unique_ptr(GetSizePrefixedModel(buf)->UnPack(res)); -} - -} // namespace tflite - -#endif // FLATBUFFERS_GENERATED_SCHEMA_TFLITE_H_ diff --git a/code/components/tflite-lib/third_party/flatbuffers/include/flatbuffers/base.h b/code/components/tflite-lib/third_party/flatbuffers/include/flatbuffers/base.h deleted file mode 100644 index 5f2158e0..00000000 --- a/code/components/tflite-lib/third_party/flatbuffers/include/flatbuffers/base.h +++ /dev/null @@ -1,496 +0,0 @@ -#ifndef FLATBUFFERS_BASE_H_ -#define FLATBUFFERS_BASE_H_ - -// For TFLM, we always want FLATBUFFERS_LOCALE_INDEPENDENT to be defined as 0. -// We could achieve this by adding -DFLATBUFFERS_LOCALE_INDEPENDENT=0 to the -// TFLM Makefile. However, for (at least) the Arduino, adding additional build -// flags during the compilation can be a bit awkward. As such, we have instead -// made a decision to change the default to be FLATBUFFERS_LOCALE_INDEPENDENT=0 -// for TFLM to make it easier for external IDE integration. -#ifndef FLATBUFFERS_LOCALE_INDEPENDENT -#define FLATBUFFERS_LOCALE_INDEPENDENT 0 -#endif - -// clang-format off - -// If activate should be declared and included first. -#if defined(FLATBUFFERS_MEMORY_LEAK_TRACKING) && \ - defined(_MSC_VER) && defined(_DEBUG) - // The _CRTDBG_MAP_ALLOC inside will replace - // calloc/free (etc) to its debug version using #define directives. - #define _CRTDBG_MAP_ALLOC - #include - #include - // Replace operator new by trace-enabled version. - #define DEBUG_NEW new(_NORMAL_BLOCK, __FILE__, __LINE__) - #define new DEBUG_NEW -#endif - -#if !defined(FLATBUFFERS_ASSERT) -#include -#define FLATBUFFERS_ASSERT assert -#elif defined(FLATBUFFERS_ASSERT_INCLUDE) -// Include file with forward declaration -#include FLATBUFFERS_ASSERT_INCLUDE -#endif - -#ifndef ARDUINO -#include -#endif - -#include -#include -#include - -#if defined(ARDUINO) && !defined(ARDUINOSTL_M_H) - #include -#else - #include -#endif - -#include -#include -#include -#include -#include -#include -#include - -#if defined(__unix__) && !defined(FLATBUFFERS_LOCALE_INDEPENDENT) - #include -#endif - -#ifdef __ANDROID__ - #include -#endif - -#if defined(__ICCARM__) -#include -#endif - -// Note the __clang__ check is needed, because clang presents itself -// as an older GNUC compiler (4.2). -// Clang 3.3 and later implement all of the ISO C++ 2011 standard. -// Clang 3.4 and later implement all of the ISO C++ 2014 standard. -// http://clang.llvm.org/cxx_status.html - -// Note the MSVC value '__cplusplus' may be incorrect: -// The '__cplusplus' predefined macro in the MSVC stuck at the value 199711L, -// indicating (erroneously!) that the compiler conformed to the C++98 Standard. -// This value should be correct starting from MSVC2017-15.7-Preview-3. -// The '__cplusplus' will be valid only if MSVC2017-15.7-P3 and the `/Zc:__cplusplus` switch is set. -// Workaround (for details see MSDN): -// Use the _MSC_VER and _MSVC_LANG definition instead of the __cplusplus for compatibility. -// The _MSVC_LANG macro reports the Standard version regardless of the '/Zc:__cplusplus' switch. - -#if defined(__GNUC__) && !defined(__clang__) - #define FLATBUFFERS_GCC (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) -#else - #define FLATBUFFERS_GCC 0 -#endif - -#if defined(__clang__) - #define FLATBUFFERS_CLANG (__clang_major__ * 10000 + __clang_minor__ * 100 + __clang_patchlevel__) -#else - #define FLATBUFFERS_CLANG 0 -#endif - -/// @cond FLATBUFFERS_INTERNAL -#if __cplusplus <= 199711L && \ - (!defined(_MSC_VER) || _MSC_VER < 1600) && \ - (!defined(__GNUC__) || \ - (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__ < 40400)) - #error A C++11 compatible compiler with support for the auto typing is \ - required for FlatBuffers. - #error __cplusplus _MSC_VER __GNUC__ __GNUC_MINOR__ __GNUC_PATCHLEVEL__ -#endif - -#if !defined(__clang__) && \ - defined(__GNUC__) && \ - (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__ < 40600) - // Backwards compatibility for g++ 4.4, and 4.5 which don't have the nullptr - // and constexpr keywords. Note the __clang__ check is needed, because clang - // presents itself as an older GNUC compiler. - #ifndef nullptr_t - const class nullptr_t { - public: - template inline operator T*() const { return 0; } - private: - void operator&() const; - } nullptr = {}; - #endif - #ifndef constexpr - #define constexpr const - #endif -#endif - -// The wire format uses a little endian encoding (since that's efficient for -// the common platforms). -#if defined(__s390x__) - #define FLATBUFFERS_LITTLEENDIAN 0 -#endif // __s390x__ -#if !defined(FLATBUFFERS_LITTLEENDIAN) - #if defined(__GNUC__) || defined(__clang__) || defined(__ICCARM__) - #if (defined(__BIG_ENDIAN__) || \ - (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)) - #define FLATBUFFERS_LITTLEENDIAN 0 - #else - #define FLATBUFFERS_LITTLEENDIAN 1 - #endif // __BIG_ENDIAN__ - #elif defined(_MSC_VER) - #if defined(_M_PPC) - #define FLATBUFFERS_LITTLEENDIAN 0 - #else - #define FLATBUFFERS_LITTLEENDIAN 1 - #endif - #else - #error Unable to determine endianness, define FLATBUFFERS_LITTLEENDIAN. - #endif -#endif // !defined(FLATBUFFERS_LITTLEENDIAN) - -#define FLATBUFFERS_VERSION_MAJOR 2 -#define FLATBUFFERS_VERSION_MINOR 0 -#define FLATBUFFERS_VERSION_REVISION 6 -#define FLATBUFFERS_STRING_EXPAND(X) #X -#define FLATBUFFERS_STRING(X) FLATBUFFERS_STRING_EXPAND(X) -namespace flatbuffers { - // Returns version as string "MAJOR.MINOR.REVISION". - const char* FLATBUFFERS_VERSION(); -} - -#if (!defined(_MSC_VER) || _MSC_VER > 1600) && \ - (!defined(__GNUC__) || (__GNUC__ * 100 + __GNUC_MINOR__ >= 407)) || \ - defined(__clang__) - #define FLATBUFFERS_FINAL_CLASS final - #define FLATBUFFERS_OVERRIDE override - #define FLATBUFFERS_EXPLICIT_CPP11 explicit - #define FLATBUFFERS_VTABLE_UNDERLYING_TYPE : flatbuffers::voffset_t -#else - #define FLATBUFFERS_FINAL_CLASS - #define FLATBUFFERS_OVERRIDE - #define FLATBUFFERS_EXPLICIT_CPP11 - #define FLATBUFFERS_VTABLE_UNDERLYING_TYPE -#endif - -#if (!defined(_MSC_VER) || _MSC_VER >= 1900) && \ - (!defined(__GNUC__) || (__GNUC__ * 100 + __GNUC_MINOR__ >= 406)) || \ - (defined(__cpp_constexpr) && __cpp_constexpr >= 200704) - #define FLATBUFFERS_CONSTEXPR constexpr - #define FLATBUFFERS_CONSTEXPR_CPP11 constexpr - #define FLATBUFFERS_CONSTEXPR_DEFINED -#else - #define FLATBUFFERS_CONSTEXPR const - #define FLATBUFFERS_CONSTEXPR_CPP11 -#endif - -#if (defined(__cplusplus) && __cplusplus >= 201402L) || \ - (defined(__cpp_constexpr) && __cpp_constexpr >= 201304) - #define FLATBUFFERS_CONSTEXPR_CPP14 FLATBUFFERS_CONSTEXPR_CPP11 -#else - #define FLATBUFFERS_CONSTEXPR_CPP14 -#endif - -#if (defined(__GXX_EXPERIMENTAL_CXX0X__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 406)) || \ - (defined(_MSC_FULL_VER) && (_MSC_FULL_VER >= 190023026)) || \ - defined(__clang__) - #define FLATBUFFERS_NOEXCEPT noexcept -#else - #define FLATBUFFERS_NOEXCEPT -#endif - -// NOTE: the FLATBUFFERS_DELETE_FUNC macro may change the access mode to -// private, so be sure to put it at the end or reset access mode explicitly. -#if (!defined(_MSC_VER) || _MSC_FULL_VER >= 180020827) && \ - (!defined(__GNUC__) || (__GNUC__ * 100 + __GNUC_MINOR__ >= 404)) || \ - defined(__clang__) - #define FLATBUFFERS_DELETE_FUNC(func) func = delete -#else - #define FLATBUFFERS_DELETE_FUNC(func) private: func -#endif - -#if (!defined(_MSC_VER) || _MSC_VER >= 1900) && \ - (!defined(__GNUC__) || (__GNUC__ * 100 + __GNUC_MINOR__ >= 409)) || \ - defined(__clang__) - #define FLATBUFFERS_DEFAULT_DECLARATION -#endif - -// Check if we can use template aliases -// Not possible if Microsoft Compiler before 2012 -// Possible is the language feature __cpp_alias_templates is defined well -// Or possible if the C++ std is C+11 or newer -#if (defined(_MSC_VER) && _MSC_VER > 1700 /* MSVC2012 */) \ - || (defined(__cpp_alias_templates) && __cpp_alias_templates >= 200704) \ - || (defined(__cplusplus) && __cplusplus >= 201103L) - #define FLATBUFFERS_TEMPLATES_ALIASES -#endif - -#ifndef FLATBUFFERS_HAS_STRING_VIEW - // Only provide flatbuffers::string_view if __has_include can be used - // to detect a header that provides an implementation - #if defined(__has_include) - // Check for std::string_view (in c++17) - #if __has_include() && (__cplusplus >= 201606 || (defined(_HAS_CXX17) && _HAS_CXX17)) - #include - namespace flatbuffers { - typedef std::string_view string_view; - } - #define FLATBUFFERS_HAS_STRING_VIEW 1 - // Check for std::experimental::string_view (in c++14, compiler-dependent) - #elif __has_include() && (__cplusplus >= 201411) - #include - namespace flatbuffers { - typedef std::experimental::string_view string_view; - } - #define FLATBUFFERS_HAS_STRING_VIEW 1 - // Check for absl::string_view - #elif __has_include("absl/strings/string_view.h") - #include "absl/strings/string_view.h" - namespace flatbuffers { - typedef absl::string_view string_view; - } - #define FLATBUFFERS_HAS_STRING_VIEW 1 - #endif - #endif // __has_include -#endif // !FLATBUFFERS_HAS_STRING_VIEW - -#ifndef FLATBUFFERS_GENERAL_HEAP_ALLOC_OK - // Allow heap allocations to be used - #define FLATBUFFERS_GENERAL_HEAP_ALLOC_OK 1 -#endif // !FLATBUFFERS_GENERAL_HEAP_ALLOC_OK - -#ifndef FLATBUFFERS_HAS_NEW_STRTOD - // Modern (C++11) strtod and strtof functions are available for use. - // 1) nan/inf strings as argument of strtod; - // 2) hex-float as argument of strtod/strtof. - #if (defined(_MSC_VER) && _MSC_VER >= 1900) || \ - (defined(__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 409)) || \ - (defined(__clang__)) - #define FLATBUFFERS_HAS_NEW_STRTOD 1 - #endif -#endif // !FLATBUFFERS_HAS_NEW_STRTOD - -#ifndef FLATBUFFERS_LOCALE_INDEPENDENT - // Enable locale independent functions {strtof_l, strtod_l,strtoll_l, - // strtoull_l}. - #if (defined(_MSC_VER) && _MSC_VER >= 1800) || \ - (defined(__ANDROID_API__) && __ANDROID_API__>= 21) || \ - (defined(_XOPEN_VERSION) && (_XOPEN_VERSION >= 700)) && \ - (!defined(__Fuchsia__) && !defined(__ANDROID_API__)) - #define FLATBUFFERS_LOCALE_INDEPENDENT 1 - #else - #define FLATBUFFERS_LOCALE_INDEPENDENT 0 - #endif -#endif // !FLATBUFFERS_LOCALE_INDEPENDENT - -// Suppress Undefined Behavior Sanitizer (recoverable only). Usage: -// - __supress_ubsan__("undefined") -// - __supress_ubsan__("signed-integer-overflow") -#if defined(__clang__) && (__clang_major__ > 3 || (__clang_major__ == 3 && __clang_minor__ >=7)) - #define __supress_ubsan__(type) __attribute__((no_sanitize(type))) -#elif defined(__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 409) - #define __supress_ubsan__(type) __attribute__((no_sanitize_undefined)) -#else - #define __supress_ubsan__(type) -#endif - -// This is constexpr function used for checking compile-time constants. -// Avoid `#pragma warning(disable: 4127) // C4127: expression is constant`. -template FLATBUFFERS_CONSTEXPR inline bool IsConstTrue(T t) { - return !!t; -} - -// Enable C++ attribute [[]] if std:c++17 or higher. -#if ((__cplusplus >= 201703L) \ - || (defined(_MSVC_LANG) && (_MSVC_LANG >= 201703L))) - // All attributes unknown to an implementation are ignored without causing an error. - #define FLATBUFFERS_ATTRIBUTE(attr) attr - - #define FLATBUFFERS_FALLTHROUGH() [[fallthrough]] -#else - #define FLATBUFFERS_ATTRIBUTE(attr) - - #if FLATBUFFERS_CLANG >= 30800 - #define FLATBUFFERS_FALLTHROUGH() [[clang::fallthrough]] - #elif FLATBUFFERS_GCC >= 70300 - #define FLATBUFFERS_FALLTHROUGH() [[gnu::fallthrough]] - #else - #define FLATBUFFERS_FALLTHROUGH() - #endif -#endif - -/// @endcond - -/// @file -namespace flatbuffers { - -/// @cond FLATBUFFERS_INTERNAL -// Our default offset / size type, 32bit on purpose on 64bit systems. -// Also, using a consistent offset type maintains compatibility of serialized -// offset values between 32bit and 64bit systems. -typedef uint32_t uoffset_t; - -// Signed offsets for references that can go in both directions. -typedef int32_t soffset_t; - -// Offset/index used in v-tables, can be changed to uint8_t in -// format forks to save a bit of space if desired. -typedef uint16_t voffset_t; - -typedef uintmax_t largest_scalar_t; - -// In 32bits, this evaluates to 2GB - 1 -#define FLATBUFFERS_MAX_BUFFER_SIZE ((1ULL << (sizeof(::flatbuffers::soffset_t) * 8 - 1)) - 1) - -// The minimum size buffer that can be a valid flatbuffer. -// Includes the offset to the root table (uoffset_t), the offset to the vtable -// of the root table (soffset_t), the size of the vtable (uint16_t), and the -// size of the referring table (uint16_t). -#define FLATBUFFERS_MIN_BUFFER_SIZE sizeof(uoffset_t) + sizeof(soffset_t) + \ - sizeof(uint16_t) + sizeof(uint16_t) - -// We support aligning the contents of buffers up to this size. -#ifndef FLATBUFFERS_MAX_ALIGNMENT - #define FLATBUFFERS_MAX_ALIGNMENT 32 -#endif - -/// @brief The length of a FlatBuffer file header. -static const size_t kFileIdentifierLength = 4; - -inline bool VerifyAlignmentRequirements(size_t align, size_t min_align = 1) { - return (min_align <= align) && (align <= (FLATBUFFERS_MAX_ALIGNMENT)) && - (align & (align - 1)) == 0; // must be power of 2 -} - -#if defined(_MSC_VER) - #pragma warning(disable: 4351) // C4351: new behavior: elements of array ... will be default initialized - #pragma warning(push) - #pragma warning(disable: 4127) // C4127: conditional expression is constant -#endif - -template T EndianSwap(T t) { - #if defined(_MSC_VER) - #define FLATBUFFERS_BYTESWAP16 _byteswap_ushort - #define FLATBUFFERS_BYTESWAP32 _byteswap_ulong - #define FLATBUFFERS_BYTESWAP64 _byteswap_uint64 - #elif defined(__ICCARM__) - #define FLATBUFFERS_BYTESWAP16 __REV16 - #define FLATBUFFERS_BYTESWAP32 __REV - #define FLATBUFFERS_BYTESWAP64(x) \ - ((__REV(static_cast(x >> 32U))) | (static_cast(__REV(static_cast(x)))) << 32U) - #else - #if defined(__GNUC__) && __GNUC__ * 100 + __GNUC_MINOR__ < 408 && !defined(__clang__) - // __builtin_bswap16 was missing prior to GCC 4.8. - #define FLATBUFFERS_BYTESWAP16(x) \ - static_cast(__builtin_bswap32(static_cast(x) << 16)) - #else - #define FLATBUFFERS_BYTESWAP16 __builtin_bswap16 - #endif - #define FLATBUFFERS_BYTESWAP32 __builtin_bswap32 - #define FLATBUFFERS_BYTESWAP64 __builtin_bswap64 - #endif - if (sizeof(T) == 1) { // Compile-time if-then's. - return t; - } else if (sizeof(T) == 2) { - union { T t; uint16_t i; } u = { t }; - u.i = FLATBUFFERS_BYTESWAP16(u.i); - return u.t; - } else if (sizeof(T) == 4) { - union { T t; uint32_t i; } u = { t }; - u.i = FLATBUFFERS_BYTESWAP32(u.i); - return u.t; - } else if (sizeof(T) == 8) { - union { T t; uint64_t i; } u = { t }; - u.i = FLATBUFFERS_BYTESWAP64(u.i); - return u.t; - } else { - FLATBUFFERS_ASSERT(0); - return t; - } -} - -#if defined(_MSC_VER) - #pragma warning(pop) -#endif - - -template T EndianScalar(T t) { - #if FLATBUFFERS_LITTLEENDIAN - return t; - #else - return EndianSwap(t); - #endif -} - -template -// UBSAN: C++ aliasing type rules, see std::bit_cast<> for details. -__supress_ubsan__("alignment") -T ReadScalar(const void *p) { - return EndianScalar(*reinterpret_cast(p)); -} - -// See https://github.com/google/flatbuffers/issues/5950 - -#if (FLATBUFFERS_GCC >= 100000) && (FLATBUFFERS_GCC < 110000) - #pragma GCC diagnostic push - #pragma GCC diagnostic ignored "-Wstringop-overflow" -#endif - -template -// UBSAN: C++ aliasing type rules, see std::bit_cast<> for details. -__supress_ubsan__("alignment") -void WriteScalar(void *p, T t) { - *reinterpret_cast(p) = EndianScalar(t); -} - -template struct Offset; -template __supress_ubsan__("alignment") void WriteScalar(void *p, Offset t) { - *reinterpret_cast(p) = EndianScalar(t.o); -} - -#if (FLATBUFFERS_GCC >= 100000) && (FLATBUFFERS_GCC < 110000) - #pragma GCC diagnostic pop -#endif - -// Computes how many bytes you'd have to pad to be able to write an -// "scalar_size" scalar if the buffer had grown to "buf_size" (downwards in -// memory). -__supress_ubsan__("unsigned-integer-overflow") -inline size_t PaddingBytes(size_t buf_size, size_t scalar_size) { - return ((~buf_size) + 1) & (scalar_size - 1); -} - -// Generic 'operator==' with conditional specialisations. -// T e - new value of a scalar field. -// T def - default of scalar (is known at compile-time). -template inline bool IsTheSameAs(T e, T def) { return e == def; } - -#if defined(FLATBUFFERS_NAN_DEFAULTS) && \ - defined(FLATBUFFERS_HAS_NEW_STRTOD) && (FLATBUFFERS_HAS_NEW_STRTOD > 0) -// Like `operator==(e, def)` with weak NaN if T=(float|double). -template inline bool IsFloatTheSameAs(T e, T def) { - return (e == def) || ((def != def) && (e != e)); -} -template<> inline bool IsTheSameAs(float e, float def) { - return IsFloatTheSameAs(e, def); -} -template<> inline bool IsTheSameAs(double e, double def) { - return IsFloatTheSameAs(e, def); -} -#endif - -// Check 'v' is out of closed range [low; high]. -// Workaround for GCC warning [-Werror=type-limits]: -// comparison is always true due to limited range of data type. -template -inline bool IsOutRange(const T &v, const T &low, const T &high) { - return (v < low) || (high < v); -} - -// Check 'v' is in closed range [low; high]. -template -inline bool IsInRange(const T &v, const T &low, const T &high) { - return !IsOutRange(v, low, high); -} - -} // namespace flatbuffers -#endif // FLATBUFFERS_BASE_H_ diff --git a/code/components/tflite-lib/third_party/flatbuffers/include/flatbuffers/flatbuffer_builder.h b/code/components/tflite-lib/third_party/flatbuffers/include/flatbuffers/flatbuffer_builder.h deleted file mode 100644 index aa02f50d..00000000 --- a/code/components/tflite-lib/third_party/flatbuffers/include/flatbuffers/flatbuffer_builder.h +++ /dev/null @@ -1,1214 +0,0 @@ -/* - * Copyright 2021 Google Inc. All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef FLATBUFFERS_FLATBUFFER_BUILDER_H_ -#define FLATBUFFERS_FLATBUFFER_BUILDER_H_ - -#include -#include - -#include "flatbuffers/allocator.h" -#include "flatbuffers/array.h" -#include "flatbuffers/base.h" -#include "flatbuffers/buffer_ref.h" -#include "flatbuffers/default_allocator.h" -#include "flatbuffers/detached_buffer.h" -#include "flatbuffers/stl_emulation.h" -#include "flatbuffers/string.h" -#include "flatbuffers/struct.h" -#include "flatbuffers/table.h" -#include "flatbuffers/vector.h" -#include "flatbuffers/vector_downward.h" -#include "flatbuffers/verifier.h" - -namespace flatbuffers { - -// Converts a Field ID to a virtual table offset. -inline voffset_t FieldIndexToOffset(voffset_t field_id) { - // Should correspond to what EndTable() below builds up. - const int fixed_fields = 2; // Vtable size and Object Size. - return static_cast((field_id + fixed_fields) * sizeof(voffset_t)); -} - -template> -const T *data(const std::vector &v) { - // Eventually the returned pointer gets passed down to memcpy, so - // we need it to be non-null to avoid undefined behavior. - static uint8_t t; - return v.empty() ? reinterpret_cast(&t) : &v.front(); -} -template> -T *data(std::vector &v) { - // Eventually the returned pointer gets passed down to memcpy, so - // we need it to be non-null to avoid undefined behavior. - static uint8_t t; - return v.empty() ? reinterpret_cast(&t) : &v.front(); -} - -/// @addtogroup flatbuffers_cpp_api -/// @{ -/// @class FlatBufferBuilder -/// @brief Helper class to hold data needed in creation of a FlatBuffer. -/// To serialize data, you typically call one of the `Create*()` functions in -/// the generated code, which in turn call a sequence of `StartTable`/ -/// `PushElement`/`AddElement`/`EndTable`, or the builtin `CreateString`/ -/// `CreateVector` functions. Do this is depth-first order to build up a tree to -/// the root. `Finish()` wraps up the buffer ready for transport. -class FlatBufferBuilder { - public: - /// @brief Default constructor for FlatBufferBuilder. - /// @param[in] initial_size The initial size of the buffer, in bytes. Defaults - /// to `1024`. - /// @param[in] allocator An `Allocator` to use. If null will use - /// `DefaultAllocator`. - /// @param[in] own_allocator Whether the builder/vector should own the - /// allocator. Defaults to / `false`. - /// @param[in] buffer_minalign Force the buffer to be aligned to the given - /// minimum alignment upon reallocation. Only needed if you intend to store - /// types with custom alignment AND you wish to read the buffer in-place - /// directly after creation. - explicit FlatBufferBuilder( - size_t initial_size = 1024, Allocator *allocator = nullptr, - bool own_allocator = false, - size_t buffer_minalign = AlignOf()) - : buf_(initial_size, allocator, own_allocator, buffer_minalign), - num_field_loc(0), - max_voffset_(0), - nested(false), - finished(false), - minalign_(1), - force_defaults_(false), - dedup_vtables_(true), - string_pool(nullptr) { - EndianCheck(); - } - - /// @brief Move constructor for FlatBufferBuilder. - FlatBufferBuilder(FlatBufferBuilder &&other) - : buf_(1024, nullptr, false, AlignOf()), - num_field_loc(0), - max_voffset_(0), - nested(false), - finished(false), - minalign_(1), - force_defaults_(false), - dedup_vtables_(true), - string_pool(nullptr) { - EndianCheck(); - // Default construct and swap idiom. - // Lack of delegating constructors in vs2010 makes it more verbose than - // needed. - Swap(other); - } - - /// @brief Move assignment operator for FlatBufferBuilder. - FlatBufferBuilder &operator=(FlatBufferBuilder &&other) { - // Move construct a temporary and swap idiom - FlatBufferBuilder temp(std::move(other)); - Swap(temp); - return *this; - } - - void Swap(FlatBufferBuilder &other) { - using std::swap; - buf_.swap(other.buf_); - swap(num_field_loc, other.num_field_loc); - swap(max_voffset_, other.max_voffset_); - swap(nested, other.nested); - swap(finished, other.finished); - swap(minalign_, other.minalign_); - swap(force_defaults_, other.force_defaults_); - swap(dedup_vtables_, other.dedup_vtables_); - swap(string_pool, other.string_pool); - } - - ~FlatBufferBuilder() { - if (string_pool) delete string_pool; - } - - void Reset() { - Clear(); // clear builder state - buf_.reset(); // deallocate buffer - } - - /// @brief Reset all the state in this FlatBufferBuilder so it can be reused - /// to construct another buffer. - void Clear() { - ClearOffsets(); - buf_.clear(); - nested = false; - finished = false; - minalign_ = 1; - if (string_pool) string_pool->clear(); - } - - /// @brief The current size of the serialized buffer, counting from the end. - /// @return Returns an `uoffset_t` with the current size of the buffer. - uoffset_t GetSize() const { return buf_.size(); } - - /// @brief Get the serialized buffer (after you call `Finish()`). - /// @return Returns an `uint8_t` pointer to the FlatBuffer data inside the - /// buffer. - uint8_t *GetBufferPointer() const { - Finished(); - return buf_.data(); - } - - /// @brief Get the serialized buffer (after you call `Finish()`) as a span. - /// @return Returns a constructed flatbuffers::span that is a view over the - /// FlatBuffer data inside the buffer. - flatbuffers::span GetBufferSpan() const { - Finished(); - return flatbuffers::span(buf_.data(), buf_.size()); - } - - /// @brief Get a pointer to an unfinished buffer. - /// @return Returns a `uint8_t` pointer to the unfinished buffer. - uint8_t *GetCurrentBufferPointer() const { return buf_.data(); } - - /// @brief Get the released pointer to the serialized buffer. - /// @warning Do NOT attempt to use this FlatBufferBuilder afterwards! - /// @return A `FlatBuffer` that owns the buffer and its allocator and - /// behaves similar to a `unique_ptr` with a deleter. - FLATBUFFERS_ATTRIBUTE([[deprecated("use Release() instead")]]) - DetachedBuffer ReleaseBufferPointer() { - Finished(); - return buf_.release(); - } - - /// @brief Get the released DetachedBuffer. - /// @return A `DetachedBuffer` that owns the buffer and its allocator. - DetachedBuffer Release() { - Finished(); - return buf_.release(); - } - - /// @brief Get the released pointer to the serialized buffer. - /// @param size The size of the memory block containing - /// the serialized `FlatBuffer`. - /// @param offset The offset from the released pointer where the finished - /// `FlatBuffer` starts. - /// @return A raw pointer to the start of the memory block containing - /// the serialized `FlatBuffer`. - /// @remark If the allocator is owned, it gets deleted when the destructor is - /// called.. - uint8_t *ReleaseRaw(size_t &size, size_t &offset) { - Finished(); - return buf_.release_raw(size, offset); - } - - /// @brief get the minimum alignment this buffer needs to be accessed - /// properly. This is only known once all elements have been written (after - /// you call Finish()). You can use this information if you need to embed - /// a FlatBuffer in some other buffer, such that you can later read it - /// without first having to copy it into its own buffer. - size_t GetBufferMinAlignment() const { - Finished(); - return minalign_; - } - - /// @cond FLATBUFFERS_INTERNAL - void Finished() const { - // If you get this assert, you're attempting to get access a buffer - // which hasn't been finished yet. Be sure to call - // FlatBufferBuilder::Finish with your root table. - // If you really need to access an unfinished buffer, call - // GetCurrentBufferPointer instead. - FLATBUFFERS_ASSERT(finished); - } - /// @endcond - - /// @brief In order to save space, fields that are set to their default value - /// don't get serialized into the buffer. - /// @param[in] fd When set to `true`, always serializes default values that - /// are set. Optional fields which are not set explicitly, will still not be - /// serialized. - void ForceDefaults(bool fd) { force_defaults_ = fd; } - - /// @brief By default vtables are deduped in order to save space. - /// @param[in] dedup When set to `true`, dedup vtables. - void DedupVtables(bool dedup) { dedup_vtables_ = dedup; } - - /// @cond FLATBUFFERS_INTERNAL - void Pad(size_t num_bytes) { buf_.fill(num_bytes); } - - void TrackMinAlign(size_t elem_size) { - if (elem_size > minalign_) minalign_ = elem_size; - } - - void Align(size_t elem_size) { - TrackMinAlign(elem_size); - buf_.fill(PaddingBytes(buf_.size(), elem_size)); - } - - void PushFlatBuffer(const uint8_t *bytes, size_t size) { - PushBytes(bytes, size); - finished = true; - } - - void PushBytes(const uint8_t *bytes, size_t size) { buf_.push(bytes, size); } - - void PopBytes(size_t amount) { buf_.pop(amount); } - - template void AssertScalarT() { - // The code assumes power of 2 sizes and endian-swap-ability. - static_assert(flatbuffers::is_scalar::value, "T must be a scalar type"); - } - - // Write a single aligned scalar to the buffer - template uoffset_t PushElement(T element) { - AssertScalarT(); - Align(sizeof(T)); - buf_.push_small(EndianScalar(element)); - return GetSize(); - } - - template uoffset_t PushElement(Offset off) { - // Special case for offsets: see ReferTo below. - return PushElement(ReferTo(off.o)); - } - - // When writing fields, we track where they are, so we can create correct - // vtables later. - void TrackField(voffset_t field, uoffset_t off) { - FieldLoc fl = { off, field }; - buf_.scratch_push_small(fl); - num_field_loc++; - if (field > max_voffset_) { max_voffset_ = field; } - } - - // Like PushElement, but additionally tracks the field this represents. - template void AddElement(voffset_t field, T e, T def) { - // We don't serialize values equal to the default. - if (IsTheSameAs(e, def) && !force_defaults_) return; - TrackField(field, PushElement(e)); - } - - template void AddElement(voffset_t field, T e) { - TrackField(field, PushElement(e)); - } - - template void AddOffset(voffset_t field, Offset off) { - if (off.IsNull()) return; // Don't store. - AddElement(field, ReferTo(off.o), static_cast(0)); - } - - template void AddStruct(voffset_t field, const T *structptr) { - if (!structptr) return; // Default, don't store. - Align(AlignOf()); - buf_.push_small(*structptr); - TrackField(field, GetSize()); - } - - void AddStructOffset(voffset_t field, uoffset_t off) { - TrackField(field, off); - } - - // Offsets initially are relative to the end of the buffer (downwards). - // This function converts them to be relative to the current location - // in the buffer (when stored here), pointing upwards. - uoffset_t ReferTo(uoffset_t off) { - // Align to ensure GetSize() below is correct. - Align(sizeof(uoffset_t)); - // Offset must refer to something already in buffer. - const uoffset_t size = GetSize(); - FLATBUFFERS_ASSERT(off && off <= size); - return size - off + static_cast(sizeof(uoffset_t)); - } - - void NotNested() { - // If you hit this, you're trying to construct a Table/Vector/String - // during the construction of its parent table (between the MyTableBuilder - // and table.Finish(). - // Move the creation of these sub-objects to above the MyTableBuilder to - // not get this assert. - // Ignoring this assert may appear to work in simple cases, but the reason - // it is here is that storing objects in-line may cause vtable offsets - // to not fit anymore. It also leads to vtable duplication. - FLATBUFFERS_ASSERT(!nested); - // If you hit this, fields were added outside the scope of a table. - FLATBUFFERS_ASSERT(!num_field_loc); - } - - // From generated code (or from the parser), we call StartTable/EndTable - // with a sequence of AddElement calls in between. - uoffset_t StartTable() { - NotNested(); - nested = true; - return GetSize(); - } - - // This finishes one serialized object by generating the vtable if it's a - // table, comparing it against existing vtables, and writing the - // resulting vtable offset. - uoffset_t EndTable(uoffset_t start) { - // If you get this assert, a corresponding StartTable wasn't called. - FLATBUFFERS_ASSERT(nested); - // Write the vtable offset, which is the start of any Table. - // We fill it's value later. - auto vtableoffsetloc = PushElement(0); - // Write a vtable, which consists entirely of voffset_t elements. - // It starts with the number of offsets, followed by a type id, followed - // by the offsets themselves. In reverse: - // Include space for the last offset and ensure empty tables have a - // minimum size. - max_voffset_ = - (std::max)(static_cast(max_voffset_ + sizeof(voffset_t)), - FieldIndexToOffset(0)); - buf_.fill_big(max_voffset_); - auto table_object_size = vtableoffsetloc - start; - // Vtable use 16bit offsets. - FLATBUFFERS_ASSERT(table_object_size < 0x10000); - WriteScalar(buf_.data() + sizeof(voffset_t), - static_cast(table_object_size)); - WriteScalar(buf_.data(), max_voffset_); - // Write the offsets into the table - for (auto it = buf_.scratch_end() - num_field_loc * sizeof(FieldLoc); - it < buf_.scratch_end(); it += sizeof(FieldLoc)) { - auto field_location = reinterpret_cast(it); - auto pos = static_cast(vtableoffsetloc - field_location->off); - // If this asserts, it means you've set a field twice. - FLATBUFFERS_ASSERT( - !ReadScalar(buf_.data() + field_location->id)); - WriteScalar(buf_.data() + field_location->id, pos); - } - ClearOffsets(); - auto vt1 = reinterpret_cast(buf_.data()); - auto vt1_size = ReadScalar(vt1); - auto vt_use = GetSize(); - // See if we already have generated a vtable with this exact same - // layout before. If so, make it point to the old one, remove this one. - if (dedup_vtables_) { - for (auto it = buf_.scratch_data(); it < buf_.scratch_end(); - it += sizeof(uoffset_t)) { - auto vt_offset_ptr = reinterpret_cast(it); - auto vt2 = reinterpret_cast(buf_.data_at(*vt_offset_ptr)); - auto vt2_size = ReadScalar(vt2); - if (vt1_size != vt2_size || 0 != memcmp(vt2, vt1, vt1_size)) continue; - vt_use = *vt_offset_ptr; - buf_.pop(GetSize() - vtableoffsetloc); - break; - } - } - // If this is a new vtable, remember it. - if (vt_use == GetSize()) { buf_.scratch_push_small(vt_use); } - // Fill the vtable offset we created above. - // The offset points from the beginning of the object to where the - // vtable is stored. - // Offsets default direction is downward in memory for future format - // flexibility (storing all vtables at the start of the file). - WriteScalar(buf_.data_at(vtableoffsetloc), - static_cast(vt_use) - - static_cast(vtableoffsetloc)); - - nested = false; - return vtableoffsetloc; - } - - FLATBUFFERS_ATTRIBUTE([[deprecated("call the version above instead")]]) - uoffset_t EndTable(uoffset_t start, voffset_t /*numfields*/) { - return EndTable(start); - } - - // This checks a required field has been set in a given table that has - // just been constructed. - template void Required(Offset table, voffset_t field); - - uoffset_t StartStruct(size_t alignment) { - Align(alignment); - return GetSize(); - } - - uoffset_t EndStruct() { return GetSize(); } - - void ClearOffsets() { - buf_.scratch_pop(num_field_loc * sizeof(FieldLoc)); - num_field_loc = 0; - max_voffset_ = 0; - } - - // Aligns such that when "len" bytes are written, an object can be written - // after it with "alignment" without padding. - void PreAlign(size_t len, size_t alignment) { - if (len == 0) return; - TrackMinAlign(alignment); - buf_.fill(PaddingBytes(GetSize() + len, alignment)); - } - template void PreAlign(size_t len) { - AssertScalarT(); - PreAlign(len, sizeof(T)); - } - /// @endcond - - /// @brief Store a string in the buffer, which can contain any binary data. - /// @param[in] str A const char pointer to the data to be stored as a string. - /// @param[in] len The number of bytes that should be stored from `str`. - /// @return Returns the offset in the buffer where the string starts. - Offset CreateString(const char *str, size_t len) { - NotNested(); - PreAlign(len + 1); // Always 0-terminated. - buf_.fill(1); - PushBytes(reinterpret_cast(str), len); - PushElement(static_cast(len)); - return Offset(GetSize()); - } - - /// @brief Store a string in the buffer, which is null-terminated. - /// @param[in] str A const char pointer to a C-string to add to the buffer. - /// @return Returns the offset in the buffer where the string starts. - Offset CreateString(const char *str) { - return CreateString(str, strlen(str)); - } - - /// @brief Store a string in the buffer, which is null-terminated. - /// @param[in] str A char pointer to a C-string to add to the buffer. - /// @return Returns the offset in the buffer where the string starts. - Offset CreateString(char *str) { - return CreateString(str, strlen(str)); - } - - /// @brief Store a string in the buffer, which can contain any binary data. - /// @param[in] str A const reference to a std::string to store in the buffer. - /// @return Returns the offset in the buffer where the string starts. - Offset CreateString(const std::string &str) { - return CreateString(str.c_str(), str.length()); - } - - // clang-format off - #ifdef FLATBUFFERS_HAS_STRING_VIEW - /// @brief Store a string in the buffer, which can contain any binary data. - /// @param[in] str A const string_view to copy in to the buffer. - /// @return Returns the offset in the buffer where the string starts. - Offset CreateString(flatbuffers::string_view str) { - return CreateString(str.data(), str.size()); - } - #endif // FLATBUFFERS_HAS_STRING_VIEW - // clang-format on - - /// @brief Store a string in the buffer, which can contain any binary data. - /// @param[in] str A const pointer to a `String` struct to add to the buffer. - /// @return Returns the offset in the buffer where the string starts - Offset CreateString(const String *str) { - return str ? CreateString(str->c_str(), str->size()) : 0; - } - - /// @brief Store a string in the buffer, which can contain any binary data. - /// @param[in] str A const reference to a std::string like type with support - /// of T::c_str() and T::length() to store in the buffer. - /// @return Returns the offset in the buffer where the string starts. - template Offset CreateString(const T &str) { - return CreateString(str.c_str(), str.length()); - } - - /// @brief Store a string in the buffer, which can contain any binary data. - /// If a string with this exact contents has already been serialized before, - /// instead simply returns the offset of the existing string. This uses a map - /// stored on the heap, but only stores the numerical offsets. - /// @param[in] str A const char pointer to the data to be stored as a string. - /// @param[in] len The number of bytes that should be stored from `str`. - /// @return Returns the offset in the buffer where the string starts. - Offset CreateSharedString(const char *str, size_t len) { - FLATBUFFERS_ASSERT(FLATBUFFERS_GENERAL_HEAP_ALLOC_OK); - if (!string_pool) - string_pool = new StringOffsetMap(StringOffsetCompare(buf_)); - auto size_before_string = buf_.size(); - // Must first serialize the string, since the set is all offsets into - // buffer. - auto off = CreateString(str, len); - auto it = string_pool->find(off); - // If it exists we reuse existing serialized data! - if (it != string_pool->end()) { - // We can remove the string we serialized. - buf_.pop(buf_.size() - size_before_string); - return *it; - } - // Record this string for future use. - string_pool->insert(off); - return off; - } - -#ifdef FLATBUFFERS_HAS_STRING_VIEW - /// @brief Store a string in the buffer, which can contain any binary data. - /// If a string with this exact contents has already been serialized before, - /// instead simply returns the offset of the existing string. This uses a map - /// stored on the heap, but only stores the numerical offsets. - /// @param[in] str A const std::string_view to store in the buffer. - /// @return Returns the offset in the buffer where the string starts - Offset CreateSharedString(const flatbuffers::string_view str) { - return CreateSharedString(str.data(), str.size()); - } -#else - /// @brief Store a string in the buffer, which null-terminated. - /// If a string with this exact contents has already been serialized before, - /// instead simply returns the offset of the existing string. This uses a map - /// stored on the heap, but only stores the numerical offsets. - /// @param[in] str A const char pointer to a C-string to add to the buffer. - /// @return Returns the offset in the buffer where the string starts. - Offset CreateSharedString(const char *str) { - return CreateSharedString(str, strlen(str)); - } - - /// @brief Store a string in the buffer, which can contain any binary data. - /// If a string with this exact contents has already been serialized before, - /// instead simply returns the offset of the existing string. This uses a map - /// stored on the heap, but only stores the numerical offsets. - /// @param[in] str A const reference to a std::string to store in the buffer. - /// @return Returns the offset in the buffer where the string starts. - Offset CreateSharedString(const std::string &str) { - return CreateSharedString(str.c_str(), str.length()); - } -#endif - - /// @brief Store a string in the buffer, which can contain any binary data. - /// If a string with this exact contents has already been serialized before, - /// instead simply returns the offset of the existing string. This uses a map - /// stored on the heap, but only stores the numerical offsets. - /// @param[in] str A const pointer to a `String` struct to add to the buffer. - /// @return Returns the offset in the buffer where the string starts - Offset CreateSharedString(const String *str) { - return CreateSharedString(str->c_str(), str->size()); - } - - /// @cond FLATBUFFERS_INTERNAL - uoffset_t EndVector(size_t len) { - FLATBUFFERS_ASSERT(nested); // Hit if no corresponding StartVector. - nested = false; - return PushElement(static_cast(len)); - } - - void StartVector(size_t len, size_t elemsize) { - NotNested(); - nested = true; - PreAlign(len * elemsize); - PreAlign(len * elemsize, elemsize); // Just in case elemsize > uoffset_t. - } - - // Call this right before StartVector/CreateVector if you want to force the - // alignment to be something different than what the element size would - // normally dictate. - // This is useful when storing a nested_flatbuffer in a vector of bytes, - // or when storing SIMD floats, etc. - void ForceVectorAlignment(size_t len, size_t elemsize, size_t alignment) { - if (len == 0) return; - FLATBUFFERS_ASSERT(VerifyAlignmentRequirements(alignment)); - PreAlign(len * elemsize, alignment); - } - - // Similar to ForceVectorAlignment but for String fields. - void ForceStringAlignment(size_t len, size_t alignment) { - if (len == 0) return; - FLATBUFFERS_ASSERT(VerifyAlignmentRequirements(alignment)); - PreAlign((len + 1) * sizeof(char), alignment); - } - - /// @endcond - - /// @brief Serialize an array into a FlatBuffer `vector`. - /// @tparam T The data type of the array elements. - /// @param[in] v A pointer to the array of type `T` to serialize into the - /// buffer as a `vector`. - /// @param[in] len The number of elements to serialize. - /// @return Returns a typed `Offset` into the serialized data indicating - /// where the vector is stored. - template Offset> CreateVector(const T *v, size_t len) { - // If this assert hits, you're specifying a template argument that is - // causing the wrong overload to be selected, remove it. - AssertScalarT(); - StartVector(len, sizeof(T)); - if (len == 0) { return Offset>(EndVector(len)); } - // clang-format off - #if FLATBUFFERS_LITTLEENDIAN - PushBytes(reinterpret_cast(v), len * sizeof(T)); - #else - if (sizeof(T) == 1) { - PushBytes(reinterpret_cast(v), len); - } else { - for (auto i = len; i > 0; ) { - PushElement(v[--i]); - } - } - #endif - // clang-format on - return Offset>(EndVector(len)); - } - - /// @brief Serialize an array like object into a FlatBuffer `vector`. - /// @tparam T The data type of the array elements. - /// @tparam C The type of the array. - /// @param[in] array A reference to an array like object of type `T` to - /// serialize into the buffer as a `vector`. - /// @return Returns a typed `Offset` into the serialized data indicating - /// where the vector is stored. - template Offset> CreateVector(const C &array) { - return CreateVector(array.data(), array.size()); - } - - /// @brief Serialize an initializer list into a FlatBuffer `vector`. - /// @tparam T The data type of the initializer list elements. - /// @param[in] v The value of the initializer list. - /// @return Returns a typed `Offset` into the serialized data indicating - /// where the vector is stored. - template - Offset> CreateVector(std::initializer_list v) { - return CreateVector(v.begin(), v.size()); - } - - template - Offset>> CreateVector(const Offset *v, size_t len) { - StartVector(len, sizeof(Offset)); - for (auto i = len; i > 0;) { PushElement(v[--i]); } - return Offset>>(EndVector(len)); - } - - /// @brief Serialize a `std::vector` into a FlatBuffer `vector`. - /// @tparam T The data type of the `std::vector` elements. - /// @param v A const reference to the `std::vector` to serialize into the - /// buffer as a `vector`. - /// @return Returns a typed `Offset` into the serialized data indicating - /// where the vector is stored. - template> - Offset> CreateVector(const std::vector &v) { - return CreateVector(data(v), v.size()); - } - - // vector may be implemented using a bit-set, so we can't access it as - // an array. Instead, read elements manually. - // Background: https://isocpp.org/blog/2012/11/on-vectorbool - Offset> CreateVector(const std::vector &v) { - StartVector(v.size(), sizeof(uint8_t)); - for (auto i = v.size(); i > 0;) { - PushElement(static_cast(v[--i])); - } - return Offset>(EndVector(v.size())); - } - - /// @brief Serialize values returned by a function into a FlatBuffer `vector`. - /// This is a convenience function that takes care of iteration for you. - /// @tparam T The data type of the `std::vector` elements. - /// @param f A function that takes the current iteration 0..vector_size-1 and - /// returns any type that you can construct a FlatBuffers vector out of. - /// @return Returns a typed `Offset` into the serialized data indicating - /// where the vector is stored. - template - Offset> CreateVector(size_t vector_size, - const std::function &f) { - FLATBUFFERS_ASSERT(FLATBUFFERS_GENERAL_HEAP_ALLOC_OK); - std::vector elems(vector_size); - for (size_t i = 0; i < vector_size; i++) elems[i] = f(i); - return CreateVector(elems); - } - - /// @brief Serialize values returned by a function into a FlatBuffer `vector`. - /// This is a convenience function that takes care of iteration for you. This - /// uses a vector stored on the heap to store the intermediate results of the - /// iteration. - /// @tparam T The data type of the `std::vector` elements. - /// @param f A function that takes the current iteration 0..vector_size-1, - /// and the state parameter returning any type that you can construct a - /// FlatBuffers vector out of. - /// @param state State passed to f. - /// @return Returns a typed `Offset` into the serialized data indicating - /// where the vector is stored. - template - Offset> CreateVector(size_t vector_size, F f, S *state) { - FLATBUFFERS_ASSERT(FLATBUFFERS_GENERAL_HEAP_ALLOC_OK); - std::vector elems(vector_size); - for (size_t i = 0; i < vector_size; i++) elems[i] = f(i, state); - return CreateVector(elems); - } - - /// @brief Serialize a `std::vector` into a FlatBuffer `vector`. - /// whereas StringType is any type that is accepted by the CreateString() - /// overloads. - /// This is a convenience function for a common case. - /// @param v A const reference to the `std::vector` to serialize into the - /// buffer as a `vector`. - /// @return Returns a typed `Offset` into the serialized data indicating - /// where the vector is stored. - template> - Offset>> CreateVectorOfStrings( - const std::vector &v) { - return CreateVectorOfStrings(v.cbegin(), v.cend()); - } - - /// @brief Serialize a collection of Strings into a FlatBuffer `vector`. - /// This is a convenience function for a common case. - /// @param begin The begining iterator of the collection - /// @param end The ending iterator of the collection - /// @return Returns a typed `Offset` into the serialized data indicating - /// where the vector is stored. - template - Offset>> CreateVectorOfStrings(It begin, It end) { - auto size = std::distance(begin, end); - auto scratch_buffer_usage = size * sizeof(Offset); - // If there is not enough space to store the offsets, there definitely won't - // be enough space to store all the strings. So ensuring space for the - // scratch region is OK, for it it fails, it would have failed later. - buf_.ensure_space(scratch_buffer_usage); - for (auto it = begin; it != end; ++it) { - buf_.scratch_push_small(CreateString(*it)); - } - StartVector(size, sizeof(Offset)); - for (auto i = 1; i <= size; i++) { - // Note we re-evaluate the buf location each iteration to account for any - // underlying buffer resizing that may occur. - PushElement(*reinterpret_cast *>( - buf_.scratch_end() - i * sizeof(Offset))); - } - buf_.scratch_pop(scratch_buffer_usage); - return Offset>>(EndVector(size)); - } - - /// @brief Serialize an array of structs into a FlatBuffer `vector`. - /// @tparam T The data type of the struct array elements. - /// @param[in] v A pointer to the array of type `T` to serialize into the - /// buffer as a `vector`. - /// @param[in] len The number of elements to serialize. - /// @return Returns a typed `Offset` into the serialized data indicating - /// where the vector is stored. - template - Offset> CreateVectorOfStructs(const T *v, size_t len) { - StartVector(len * sizeof(T) / AlignOf(), AlignOf()); - if (len > 0) { - PushBytes(reinterpret_cast(v), sizeof(T) * len); - } - return Offset>(EndVector(len)); - } - - /// @brief Serialize an array of native structs into a FlatBuffer `vector`. - /// @tparam T The data type of the struct array elements. - /// @tparam S The data type of the native struct array elements. - /// @param[in] v A pointer to the array of type `S` to serialize into the - /// buffer as a `vector`. - /// @param[in] len The number of elements to serialize. - /// @param[in] pack_func Pointer to a function to convert the native struct - /// to the FlatBuffer struct. - /// @return Returns a typed `Offset` into the serialized data indicating - /// where the vector is stored. - template - Offset> CreateVectorOfNativeStructs( - const S *v, size_t len, T (*const pack_func)(const S &)) { - FLATBUFFERS_ASSERT(pack_func); - auto structs = StartVectorOfStructs(len); - for (size_t i = 0; i < len; i++) { structs[i] = pack_func(v[i]); } - return EndVectorOfStructs(len); - } - - /// @brief Serialize an array of native structs into a FlatBuffer `vector`. - /// @tparam T The data type of the struct array elements. - /// @tparam S The data type of the native struct array elements. - /// @param[in] v A pointer to the array of type `S` to serialize into the - /// buffer as a `vector`. - /// @param[in] len The number of elements to serialize. - /// @return Returns a typed `Offset` into the serialized data indicating - /// where the vector is stored. - template - Offset> CreateVectorOfNativeStructs(const S *v, - size_t len) { - extern T Pack(const S &); - return CreateVectorOfNativeStructs(v, len, Pack); - } - - /// @brief Serialize an array of structs into a FlatBuffer `vector`. - /// @tparam T The data type of the struct array elements. - /// @param[in] filler A function that takes the current iteration - /// 0..vector_size-1 and a pointer to the struct that must be filled. - /// @return Returns a typed `Offset` into the serialized data indicating - /// where the vector is stored. - /// This is mostly useful when flatbuffers are generated with mutation - /// accessors. - template - Offset> CreateVectorOfStructs( - size_t vector_size, const std::function &filler) { - T *structs = StartVectorOfStructs(vector_size); - for (size_t i = 0; i < vector_size; i++) { - filler(i, structs); - structs++; - } - return EndVectorOfStructs(vector_size); - } - - /// @brief Serialize an array of structs into a FlatBuffer `vector`. - /// @tparam T The data type of the struct array elements. - /// @param[in] f A function that takes the current iteration 0..vector_size-1, - /// a pointer to the struct that must be filled and the state argument. - /// @param[in] state Arbitrary state to pass to f. - /// @return Returns a typed `Offset` into the serialized data indicating - /// where the vector is stored. - /// This is mostly useful when flatbuffers are generated with mutation - /// accessors. - template - Offset> CreateVectorOfStructs(size_t vector_size, F f, - S *state) { - T *structs = StartVectorOfStructs(vector_size); - for (size_t i = 0; i < vector_size; i++) { - f(i, structs, state); - structs++; - } - return EndVectorOfStructs(vector_size); - } - - /// @brief Serialize a `std::vector` of structs into a FlatBuffer `vector`. - /// @tparam T The data type of the `std::vector` struct elements. - /// @param[in] v A const reference to the `std::vector` of structs to - /// serialize into the buffer as a `vector`. - /// @return Returns a typed `Offset` into the serialized data indicating - /// where the vector is stored. - template> - Offset> CreateVectorOfStructs( - const std::vector &v) { - return CreateVectorOfStructs(data(v), v.size()); - } - - /// @brief Serialize a `std::vector` of native structs into a FlatBuffer - /// `vector`. - /// @tparam T The data type of the `std::vector` struct elements. - /// @tparam S The data type of the `std::vector` native struct elements. - /// @param[in] v A const reference to the `std::vector` of structs to - /// serialize into the buffer as a `vector`. - /// @param[in] pack_func Pointer to a function to convert the native struct - /// to the FlatBuffer struct. - /// @return Returns a typed `Offset` into the serialized data indicating - /// where the vector is stored. - template> - Offset> CreateVectorOfNativeStructs( - const std::vector &v, T (*const pack_func)(const S &)) { - return CreateVectorOfNativeStructs(data(v), v.size(), pack_func); - } - - /// @brief Serialize a `std::vector` of native structs into a FlatBuffer - /// `vector`. - /// @tparam T The data type of the `std::vector` struct elements. - /// @tparam S The data type of the `std::vector` native struct elements. - /// @param[in] v A const reference to the `std::vector` of structs to - /// serialize into the buffer as a `vector`. - /// @return Returns a typed `Offset` into the serialized data indicating - /// where the vector is stored. - template> - Offset> CreateVectorOfNativeStructs( - const std::vector &v) { - return CreateVectorOfNativeStructs(data(v), v.size()); - } - - /// @cond FLATBUFFERS_INTERNAL - template struct StructKeyComparator { - bool operator()(const T &a, const T &b) const { - return a.KeyCompareLessThan(&b); - } - }; - /// @endcond - - /// @brief Serialize a `std::vector` of structs into a FlatBuffer `vector` - /// in sorted order. - /// @tparam T The data type of the `std::vector` struct elements. - /// @param[in] v A const reference to the `std::vector` of structs to - /// serialize into the buffer as a `vector`. - /// @return Returns a typed `Offset` into the serialized data indicating - /// where the vector is stored. - template> - Offset> CreateVectorOfSortedStructs( - std::vector *v) { - return CreateVectorOfSortedStructs(data(*v), v->size()); - } - - /// @brief Serialize a `std::vector` of native structs into a FlatBuffer - /// `vector` in sorted order. - /// @tparam T The data type of the `std::vector` struct elements. - /// @tparam S The data type of the `std::vector` native struct elements. - /// @param[in] v A const reference to the `std::vector` of structs to - /// serialize into the buffer as a `vector`. - /// @return Returns a typed `Offset` into the serialized data indicating - /// where the vector is stored. - template> - Offset> CreateVectorOfSortedNativeStructs( - std::vector *v) { - return CreateVectorOfSortedNativeStructs(data(*v), v->size()); - } - - /// @brief Serialize an array of structs into a FlatBuffer `vector` in sorted - /// order. - /// @tparam T The data type of the struct array elements. - /// @param[in] v A pointer to the array of type `T` to serialize into the - /// buffer as a `vector`. - /// @param[in] len The number of elements to serialize. - /// @return Returns a typed `Offset` into the serialized data indicating - /// where the vector is stored. - template - Offset> CreateVectorOfSortedStructs(T *v, size_t len) { - std::stable_sort(v, v + len, StructKeyComparator()); - return CreateVectorOfStructs(v, len); - } - - /// @brief Serialize an array of native structs into a FlatBuffer `vector` in - /// sorted order. - /// @tparam T The data type of the struct array elements. - /// @tparam S The data type of the native struct array elements. - /// @param[in] v A pointer to the array of type `S` to serialize into the - /// buffer as a `vector`. - /// @param[in] len The number of elements to serialize. - /// @return Returns a typed `Offset` into the serialized data indicating - /// where the vector is stored. - template - Offset> CreateVectorOfSortedNativeStructs(S *v, - size_t len) { - extern T Pack(const S &); - auto structs = StartVectorOfStructs(len); - for (size_t i = 0; i < len; i++) { structs[i] = Pack(v[i]); } - std::stable_sort(structs, structs + len, StructKeyComparator()); - return EndVectorOfStructs(len); - } - - /// @cond FLATBUFFERS_INTERNAL - template struct TableKeyComparator { - TableKeyComparator(vector_downward &buf) : buf_(buf) {} - TableKeyComparator(const TableKeyComparator &other) : buf_(other.buf_) {} - bool operator()(const Offset &a, const Offset &b) const { - auto table_a = reinterpret_cast(buf_.data_at(a.o)); - auto table_b = reinterpret_cast(buf_.data_at(b.o)); - return table_a->KeyCompareLessThan(table_b); - } - vector_downward &buf_; - - private: - FLATBUFFERS_DELETE_FUNC( - TableKeyComparator &operator=(const TableKeyComparator &other)); - }; - /// @endcond - - /// @brief Serialize an array of `table` offsets as a `vector` in the buffer - /// in sorted order. - /// @tparam T The data type that the offset refers to. - /// @param[in] v An array of type `Offset` that contains the `table` - /// offsets to store in the buffer in sorted order. - /// @param[in] len The number of elements to store in the `vector`. - /// @return Returns a typed `Offset` into the serialized data indicating - /// where the vector is stored. - template - Offset>> CreateVectorOfSortedTables(Offset *v, - size_t len) { - std::stable_sort(v, v + len, TableKeyComparator(buf_)); - return CreateVector(v, len); - } - - /// @brief Serialize an array of `table` offsets as a `vector` in the buffer - /// in sorted order. - /// @tparam T The data type that the offset refers to. - /// @param[in] v An array of type `Offset` that contains the `table` - /// offsets to store in the buffer in sorted order. - /// @return Returns a typed `Offset` into the serialized data indicating - /// where the vector is stored. - template> - Offset>> CreateVectorOfSortedTables( - std::vector, Alloc> *v) { - return CreateVectorOfSortedTables(data(*v), v->size()); - } - - /// @brief Specialized version of `CreateVector` for non-copying use cases. - /// Write the data any time later to the returned buffer pointer `buf`. - /// @param[in] len The number of elements to store in the `vector`. - /// @param[in] elemsize The size of each element in the `vector`. - /// @param[out] buf A pointer to a `uint8_t` pointer that can be - /// written to at a later time to serialize the data into a `vector` - /// in the buffer. - uoffset_t CreateUninitializedVector(size_t len, size_t elemsize, - uint8_t **buf) { - NotNested(); - StartVector(len, elemsize); - buf_.make_space(len * elemsize); - auto vec_start = GetSize(); - auto vec_end = EndVector(len); - *buf = buf_.data_at(vec_start); - return vec_end; - } - - /// @brief Specialized version of `CreateVector` for non-copying use cases. - /// Write the data any time later to the returned buffer pointer `buf`. - /// @tparam T The data type of the data that will be stored in the buffer - /// as a `vector`. - /// @param[in] len The number of elements to store in the `vector`. - /// @param[out] buf A pointer to a pointer of type `T` that can be - /// written to at a later time to serialize the data into a `vector` - /// in the buffer. - template - Offset> CreateUninitializedVector(size_t len, T **buf) { - AssertScalarT(); - return CreateUninitializedVector(len, sizeof(T), - reinterpret_cast(buf)); - } - - template - Offset> CreateUninitializedVectorOfStructs(size_t len, - T **buf) { - return CreateUninitializedVector(len, sizeof(T), - reinterpret_cast(buf)); - } - - // @brief Create a vector of scalar type T given as input a vector of scalar - // type U, useful with e.g. pre "enum class" enums, or any existing scalar - // data of the wrong type. - template - Offset> CreateVectorScalarCast(const U *v, size_t len) { - AssertScalarT(); - AssertScalarT(); - StartVector(len, sizeof(T)); - for (auto i = len; i > 0;) { PushElement(static_cast(v[--i])); } - return Offset>(EndVector(len)); - } - - /// @brief Write a struct by itself, typically to be part of a union. - template Offset CreateStruct(const T &structobj) { - NotNested(); - Align(AlignOf()); - buf_.push_small(structobj); - return Offset(GetSize()); - } - - /// @brief Finish serializing a buffer by writing the root offset. - /// @param[in] file_identifier If a `file_identifier` is given, the buffer - /// will be prefixed with a standard FlatBuffers file header. - template - void Finish(Offset root, const char *file_identifier = nullptr) { - Finish(root.o, file_identifier, false); - } - - /// @brief Finish a buffer with a 32 bit size field pre-fixed (size of the - /// buffer following the size field). These buffers are NOT compatible - /// with standard buffers created by Finish, i.e. you can't call GetRoot - /// on them, you have to use GetSizePrefixedRoot instead. - /// All >32 bit quantities in this buffer will be aligned when the whole - /// size pre-fixed buffer is aligned. - /// These kinds of buffers are useful for creating a stream of FlatBuffers. - template - void FinishSizePrefixed(Offset root, - const char *file_identifier = nullptr) { - Finish(root.o, file_identifier, true); - } - - void SwapBufAllocator(FlatBufferBuilder &other) { - buf_.swap_allocator(other.buf_); - } - - /// @brief The length of a FlatBuffer file header. - static const size_t kFileIdentifierLength = - ::flatbuffers::kFileIdentifierLength; - - protected: - // You shouldn't really be copying instances of this class. - FlatBufferBuilder(const FlatBufferBuilder &); - FlatBufferBuilder &operator=(const FlatBufferBuilder &); - - void Finish(uoffset_t root, const char *file_identifier, bool size_prefix) { - NotNested(); - buf_.clear_scratch(); - // This will cause the whole buffer to be aligned. - PreAlign((size_prefix ? sizeof(uoffset_t) : 0) + sizeof(uoffset_t) + - (file_identifier ? kFileIdentifierLength : 0), - minalign_); - if (file_identifier) { - FLATBUFFERS_ASSERT(strlen(file_identifier) == kFileIdentifierLength); - PushBytes(reinterpret_cast(file_identifier), - kFileIdentifierLength); - } - PushElement(ReferTo(root)); // Location of root. - if (size_prefix) { PushElement(GetSize()); } - finished = true; - } - - struct FieldLoc { - uoffset_t off; - voffset_t id; - }; - - vector_downward buf_; - - // Accumulating offsets of table members while it is being built. - // We store these in the scratch pad of buf_, after the vtable offsets. - uoffset_t num_field_loc; - // Track how much of the vtable is in use, so we can output the most compact - // possible vtable. - voffset_t max_voffset_; - - // Ensure objects are not nested. - bool nested; - - // Ensure the buffer is finished before it is being accessed. - bool finished; - - size_t minalign_; - - bool force_defaults_; // Serialize values equal to their defaults anyway. - - bool dedup_vtables_; - - struct StringOffsetCompare { - StringOffsetCompare(const vector_downward &buf) : buf_(&buf) {} - bool operator()(const Offset &a, const Offset &b) const { - auto stra = reinterpret_cast(buf_->data_at(a.o)); - auto strb = reinterpret_cast(buf_->data_at(b.o)); - return StringLessThan(stra->data(), stra->size(), strb->data(), - strb->size()); - } - const vector_downward *buf_; - }; - - // For use with CreateSharedString. Instantiated on first use only. - typedef std::set, StringOffsetCompare> StringOffsetMap; - StringOffsetMap *string_pool; - - private: - // Allocates space for a vector of structures. - // Must be completed with EndVectorOfStructs(). - template T *StartVectorOfStructs(size_t vector_size) { - StartVector(vector_size * sizeof(T) / AlignOf(), AlignOf()); - return reinterpret_cast(buf_.make_space(vector_size * sizeof(T))); - } - - // End the vector of structures in the flatbuffers. - // Vector should have previously be started with StartVectorOfStructs(). - template - Offset> EndVectorOfStructs(size_t vector_size) { - return Offset>(EndVector(vector_size)); - } -}; -/// @} - -/// Helpers to get a typed pointer to objects that are currently being built. -/// @warning Creating new objects will lead to reallocations and invalidates -/// the pointer! -template -T *GetMutableTemporaryPointer(FlatBufferBuilder &fbb, Offset offset) { - return reinterpret_cast(fbb.GetCurrentBufferPointer() + fbb.GetSize() - - offset.o); -} - -template -const T *GetTemporaryPointer(FlatBufferBuilder &fbb, Offset offset) { - return GetMutableTemporaryPointer(fbb, offset); -} - -template -void FlatBufferBuilder::Required(Offset table, voffset_t field) { - auto table_ptr = reinterpret_cast(buf_.data_at(table.o)); - bool ok = table_ptr->GetOptionalFieldOffset(field) != 0; - // If this fails, the caller will show what field needs to be set. - FLATBUFFERS_ASSERT(ok); - (void)ok; -} - -} // namespace flatbuffers - -#endif // FLATBUFFERS_VECTOR_DOWNWARD_H_ diff --git a/code/components/tflite-lib/third_party/flatbuffers/include/flatbuffers/flatbuffers.h b/code/components/tflite-lib/third_party/flatbuffers/include/flatbuffers/flatbuffers.h deleted file mode 100644 index 64217889..00000000 --- a/code/components/tflite-lib/third_party/flatbuffers/include/flatbuffers/flatbuffers.h +++ /dev/null @@ -1,270 +0,0 @@ -/* - * Copyright 2014 Google Inc. All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef FLATBUFFERS_H_ -#define FLATBUFFERS_H_ - -// TODO: These includes are for mitigating the pains of users editing their -// source because they relied on flatbuffers.h to include everything for them. -#include "flatbuffers/array.h" -#include "flatbuffers/base.h" -#include "flatbuffers/buffer.h" -#include "flatbuffers/buffer_ref.h" -#include "flatbuffers/detached_buffer.h" -#include "flatbuffers/flatbuffer_builder.h" -#include "flatbuffers/stl_emulation.h" -#include "flatbuffers/string.h" -#include "flatbuffers/struct.h" -#include "flatbuffers/table.h" -#include "flatbuffers/vector.h" -#include "flatbuffers/vector_downward.h" -#include "flatbuffers/verifier.h" - -namespace flatbuffers { - -/// @brief This can compute the start of a FlatBuffer from a root pointer, i.e. -/// it is the opposite transformation of GetRoot(). -/// This may be useful if you want to pass on a root and have the recipient -/// delete the buffer afterwards. -inline const uint8_t *GetBufferStartFromRootPointer(const void *root) { - auto table = reinterpret_cast(root); - auto vtable = table->GetVTable(); - // Either the vtable is before the root or after the root. - auto start = (std::min)(vtable, reinterpret_cast(root)); - // Align to at least sizeof(uoffset_t). - start = reinterpret_cast(reinterpret_cast(start) & - ~(sizeof(uoffset_t) - 1)); - // Additionally, there may be a file_identifier in the buffer, and the root - // offset. The buffer may have been aligned to any size between - // sizeof(uoffset_t) and FLATBUFFERS_MAX_ALIGNMENT (see "force_align"). - // Sadly, the exact alignment is only known when constructing the buffer, - // since it depends on the presence of values with said alignment properties. - // So instead, we simply look at the next uoffset_t values (root, - // file_identifier, and alignment padding) to see which points to the root. - // None of the other values can "impersonate" the root since they will either - // be 0 or four ASCII characters. - static_assert(flatbuffers::kFileIdentifierLength == sizeof(uoffset_t), - "file_identifier is assumed to be the same size as uoffset_t"); - for (auto possible_roots = FLATBUFFERS_MAX_ALIGNMENT / sizeof(uoffset_t) + 1; - possible_roots; possible_roots--) { - start -= sizeof(uoffset_t); - if (ReadScalar(start) + start == - reinterpret_cast(root)) - return start; - } - // We didn't find the root, either the "root" passed isn't really a root, - // or the buffer is corrupt. - // Assert, because calling this function with bad data may cause reads - // outside of buffer boundaries. - FLATBUFFERS_ASSERT(false); - return nullptr; -} - -/// @brief This return the prefixed size of a FlatBuffer. -inline uoffset_t GetPrefixedSize(const uint8_t *buf) { - return ReadScalar(buf); -} - -// Base class for native objects (FlatBuffer data de-serialized into native -// C++ data structures). -// Contains no functionality, purely documentative. -struct NativeTable {}; - -/// @brief Function types to be used with resolving hashes into objects and -/// back again. The resolver gets a pointer to a field inside an object API -/// object that is of the type specified in the schema using the attribute -/// `cpp_type` (it is thus important whatever you write to this address -/// matches that type). The value of this field is initially null, so you -/// may choose to implement a delayed binding lookup using this function -/// if you wish. The resolver does the opposite lookup, for when the object -/// is being serialized again. -typedef uint64_t hash_value_t; -typedef std::function - resolver_function_t; -typedef std::function rehasher_function_t; - -// Helper function to test if a field is present, using any of the field -// enums in the generated code. -// `table` must be a generated table type. Since this is a template parameter, -// this is not typechecked to be a subclass of Table, so beware! -// Note: this function will return false for fields equal to the default -// value, since they're not stored in the buffer (unless force_defaults was -// used). -template -bool IsFieldPresent(const T *table, typename T::FlatBuffersVTableOffset field) { - // Cast, since Table is a private baseclass of any table types. - return reinterpret_cast(table)->CheckField( - static_cast(field)); -} - -// Utility function for reverse lookups on the EnumNames*() functions -// (in the generated C++ code) -// names must be NULL terminated. -inline int LookupEnum(const char **names, const char *name) { - for (const char **p = names; *p; p++) - if (!strcmp(*p, name)) return static_cast(p - names); - return -1; -} - -// These macros allow us to layout a struct with a guarantee that they'll end -// up looking the same on different compilers and platforms. -// It does this by disallowing the compiler to do any padding, and then -// does padding itself by inserting extra padding fields that make every -// element aligned to its own size. -// Additionally, it manually sets the alignment of the struct as a whole, -// which is typically its largest element, or a custom size set in the schema -// by the force_align attribute. -// These are used in the generated code only. - -// clang-format off -#if defined(_MSC_VER) - #define FLATBUFFERS_MANUALLY_ALIGNED_STRUCT(alignment) \ - __pragma(pack(1)) \ - struct __declspec(align(alignment)) - #define FLATBUFFERS_STRUCT_END(name, size) \ - __pragma(pack()) \ - static_assert(sizeof(name) == size, "compiler breaks packing rules") -#elif defined(__GNUC__) || defined(__clang__) || defined(__ICCARM__) - #define FLATBUFFERS_MANUALLY_ALIGNED_STRUCT(alignment) \ - _Pragma("pack(1)") \ - struct __attribute__((aligned(alignment))) - #define FLATBUFFERS_STRUCT_END(name, size) \ - _Pragma("pack()") \ - static_assert(sizeof(name) == size, "compiler breaks packing rules") -#else - #error Unknown compiler, please define structure alignment macros -#endif -// clang-format on - -// Minimal reflection via code generation. -// Besides full-fat reflection (see reflection.h) and parsing/printing by -// loading schemas (see idl.h), we can also have code generation for minimal -// reflection data which allows pretty-printing and other uses without needing -// a schema or a parser. -// Generate code with --reflect-types (types only) or --reflect-names (names -// also) to enable. -// See minireflect.h for utilities using this functionality. - -// These types are organized slightly differently as the ones in idl.h. -enum SequenceType { ST_TABLE, ST_STRUCT, ST_UNION, ST_ENUM }; - -// Scalars have the same order as in idl.h -// clang-format off -#define FLATBUFFERS_GEN_ELEMENTARY_TYPES(ET) \ - ET(ET_UTYPE) \ - ET(ET_BOOL) \ - ET(ET_CHAR) \ - ET(ET_UCHAR) \ - ET(ET_SHORT) \ - ET(ET_USHORT) \ - ET(ET_INT) \ - ET(ET_UINT) \ - ET(ET_LONG) \ - ET(ET_ULONG) \ - ET(ET_FLOAT) \ - ET(ET_DOUBLE) \ - ET(ET_STRING) \ - ET(ET_SEQUENCE) // See SequenceType. - -enum ElementaryType { - #define FLATBUFFERS_ET(E) E, - FLATBUFFERS_GEN_ELEMENTARY_TYPES(FLATBUFFERS_ET) - #undef FLATBUFFERS_ET -}; - -inline const char * const *ElementaryTypeNames() { - static const char * const names[] = { - #define FLATBUFFERS_ET(E) #E, - FLATBUFFERS_GEN_ELEMENTARY_TYPES(FLATBUFFERS_ET) - #undef FLATBUFFERS_ET - }; - return names; -} -// clang-format on - -// Basic type info cost just 16bits per field! -// We're explicitly defining the signedness since the signedness of integer -// bitfields is otherwise implementation-defined and causes warnings on older -// GCC compilers. -struct TypeCode { - // ElementaryType - unsigned short base_type : 4; - // Either vector (in table) or array (in struct) - unsigned short is_repeating : 1; - // Index into type_refs below, or -1 for none. - signed short sequence_ref : 11; -}; - -static_assert(sizeof(TypeCode) == 2, "TypeCode"); - -struct TypeTable; - -// Signature of the static method present in each type. -typedef const TypeTable *(*TypeFunction)(); - -struct TypeTable { - SequenceType st; - size_t num_elems; // of type_codes, values, names (but not type_refs). - const TypeCode *type_codes; // num_elems count - const TypeFunction *type_refs; // less than num_elems entries (see TypeCode). - const int16_t *array_sizes; // less than num_elems entries (see TypeCode). - const int64_t *values; // Only set for non-consecutive enum/union or structs. - const char *const *names; // Only set if compiled with --reflect-names. -}; - -// String which identifies the current version of FlatBuffers. -inline const char *flatbuffers_version_string() { - return "FlatBuffers " FLATBUFFERS_STRING(FLATBUFFERS_VERSION_MAJOR) "." - FLATBUFFERS_STRING(FLATBUFFERS_VERSION_MINOR) "." - FLATBUFFERS_STRING(FLATBUFFERS_VERSION_REVISION); -} - -// clang-format off -#define FLATBUFFERS_DEFINE_BITMASK_OPERATORS(E, T)\ - inline E operator | (E lhs, E rhs){\ - return E(T(lhs) | T(rhs));\ - }\ - inline E operator & (E lhs, E rhs){\ - return E(T(lhs) & T(rhs));\ - }\ - inline E operator ^ (E lhs, E rhs){\ - return E(T(lhs) ^ T(rhs));\ - }\ - inline E operator ~ (E lhs){\ - return E(~T(lhs));\ - }\ - inline E operator |= (E &lhs, E rhs){\ - lhs = lhs | rhs;\ - return lhs;\ - }\ - inline E operator &= (E &lhs, E rhs){\ - lhs = lhs & rhs;\ - return lhs;\ - }\ - inline E operator ^= (E &lhs, E rhs){\ - lhs = lhs ^ rhs;\ - return lhs;\ - }\ - inline bool operator !(E rhs) \ - {\ - return !bool(T(rhs)); \ - } -/// @endcond -} // namespace flatbuffers - -// clang-format on - -#endif // FLATBUFFERS_H_ diff --git a/code/components/tflite-lib/third_party/flatbuffers/include/flatbuffers/flexbuffers.h b/code/components/tflite-lib/third_party/flatbuffers/include/flatbuffers/flexbuffers.h deleted file mode 100644 index 1a109bbd..00000000 --- a/code/components/tflite-lib/third_party/flatbuffers/include/flatbuffers/flexbuffers.h +++ /dev/null @@ -1,1903 +0,0 @@ -/* - * Copyright 2017 Google Inc. All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef FLATBUFFERS_FLEXBUFFERS_H_ -#define FLATBUFFERS_FLEXBUFFERS_H_ - -#include -// Used to select STL variant. -#include "flatbuffers/base.h" -// We use the basic binary writing functions from the regular FlatBuffers. -#include "flatbuffers/util.h" - -#ifdef _MSC_VER -# include -#endif - -#if defined(_MSC_VER) -# pragma warning(push) -# pragma warning(disable : 4127) // C4127: conditional expression is constant -#endif - -namespace flexbuffers { - -class Reference; -class Map; - -// These are used in the lower 2 bits of a type field to determine the size of -// the elements (and or size field) of the item pointed to (e.g. vector). -enum BitWidth { - BIT_WIDTH_8 = 0, - BIT_WIDTH_16 = 1, - BIT_WIDTH_32 = 2, - BIT_WIDTH_64 = 3, -}; - -// These are used as the upper 6 bits of a type field to indicate the actual -// type. -enum Type { - FBT_NULL = 0, - FBT_INT = 1, - FBT_UINT = 2, - FBT_FLOAT = 3, - // Types above stored inline, types below (except FBT_BOOL) store an offset. - FBT_KEY = 4, - FBT_STRING = 5, - FBT_INDIRECT_INT = 6, - FBT_INDIRECT_UINT = 7, - FBT_INDIRECT_FLOAT = 8, - FBT_MAP = 9, - FBT_VECTOR = 10, // Untyped. - FBT_VECTOR_INT = 11, // Typed any size (stores no type table). - FBT_VECTOR_UINT = 12, - FBT_VECTOR_FLOAT = 13, - FBT_VECTOR_KEY = 14, - // DEPRECATED, use FBT_VECTOR or FBT_VECTOR_KEY instead. - // Read test.cpp/FlexBuffersDeprecatedTest() for details on why. - FBT_VECTOR_STRING_DEPRECATED = 15, - FBT_VECTOR_INT2 = 16, // Typed tuple (no type table, no size field). - FBT_VECTOR_UINT2 = 17, - FBT_VECTOR_FLOAT2 = 18, - FBT_VECTOR_INT3 = 19, // Typed triple (no type table, no size field). - FBT_VECTOR_UINT3 = 20, - FBT_VECTOR_FLOAT3 = 21, - FBT_VECTOR_INT4 = 22, // Typed quad (no type table, no size field). - FBT_VECTOR_UINT4 = 23, - FBT_VECTOR_FLOAT4 = 24, - FBT_BLOB = 25, - FBT_BOOL = 26, - FBT_VECTOR_BOOL = - 36, // To Allow the same type of conversion of type to vector type - - FBT_MAX_TYPE = 37 -}; - -inline bool IsInline(Type t) { return t <= FBT_FLOAT || t == FBT_BOOL; } - -inline bool IsTypedVectorElementType(Type t) { - return (t >= FBT_INT && t <= FBT_STRING) || t == FBT_BOOL; -} - -inline bool IsTypedVector(Type t) { - return (t >= FBT_VECTOR_INT && t <= FBT_VECTOR_STRING_DEPRECATED) || - t == FBT_VECTOR_BOOL; -} - -inline bool IsFixedTypedVector(Type t) { - return t >= FBT_VECTOR_INT2 && t <= FBT_VECTOR_FLOAT4; -} - -inline Type ToTypedVector(Type t, size_t fixed_len = 0) { - FLATBUFFERS_ASSERT(IsTypedVectorElementType(t)); - switch (fixed_len) { - case 0: return static_cast(t - FBT_INT + FBT_VECTOR_INT); - case 2: return static_cast(t - FBT_INT + FBT_VECTOR_INT2); - case 3: return static_cast(t - FBT_INT + FBT_VECTOR_INT3); - case 4: return static_cast(t - FBT_INT + FBT_VECTOR_INT4); - default: FLATBUFFERS_ASSERT(0); return FBT_NULL; - } -} - -inline Type ToTypedVectorElementType(Type t) { - FLATBUFFERS_ASSERT(IsTypedVector(t)); - return static_cast(t - FBT_VECTOR_INT + FBT_INT); -} - -inline Type ToFixedTypedVectorElementType(Type t, uint8_t *len) { - FLATBUFFERS_ASSERT(IsFixedTypedVector(t)); - auto fixed_type = t - FBT_VECTOR_INT2; - *len = static_cast(fixed_type / 3 + - 2); // 3 types each, starting from length 2. - return static_cast(fixed_type % 3 + FBT_INT); -} - -// TODO: implement proper support for 8/16bit floats, or decide not to -// support them. -typedef int16_t half; -typedef int8_t quarter; - -// TODO: can we do this without conditionals using intrinsics or inline asm -// on some platforms? Given branch prediction the method below should be -// decently quick, but it is the most frequently executed function. -// We could do an (unaligned) 64-bit read if we ifdef out the platforms for -// which that doesn't work (or where we'd read into un-owned memory). -template -R ReadSizedScalar(const uint8_t *data, uint8_t byte_width) { - return byte_width < 4 - ? (byte_width < 2 - ? static_cast(flatbuffers::ReadScalar(data)) - : static_cast(flatbuffers::ReadScalar(data))) - : (byte_width < 8 - ? static_cast(flatbuffers::ReadScalar(data)) - : static_cast(flatbuffers::ReadScalar(data))); -} - -inline int64_t ReadInt64(const uint8_t *data, uint8_t byte_width) { - return ReadSizedScalar( - data, byte_width); -} - -inline uint64_t ReadUInt64(const uint8_t *data, uint8_t byte_width) { - // This is the "hottest" function (all offset lookups use this), so worth - // optimizing if possible. - // TODO: GCC apparently replaces memcpy by a rep movsb, but only if count is a - // constant, which here it isn't. Test if memcpy is still faster than - // the conditionals in ReadSizedScalar. Can also use inline asm. - - // clang-format off - #if defined(_MSC_VER) && defined(_M_X64) && !defined(_M_ARM64EC) - // This is 64-bit Windows only, __movsb does not work on 32-bit Windows. - uint64_t u = 0; - __movsb(reinterpret_cast(&u), - reinterpret_cast(data), byte_width); - return flatbuffers::EndianScalar(u); - #else - return ReadSizedScalar( - data, byte_width); - #endif - // clang-format on -} - -inline double ReadDouble(const uint8_t *data, uint8_t byte_width) { - return ReadSizedScalar(data, - byte_width); -} - -inline const uint8_t *Indirect(const uint8_t *offset, uint8_t byte_width) { - return offset - ReadUInt64(offset, byte_width); -} - -template const uint8_t *Indirect(const uint8_t *offset) { - return offset - flatbuffers::ReadScalar(offset); -} - -inline BitWidth WidthU(uint64_t u) { -#define FLATBUFFERS_GET_FIELD_BIT_WIDTH(value, width) \ - { \ - if (!((u) & ~((1ULL << (width)) - 1ULL))) return BIT_WIDTH_##width; \ - } - FLATBUFFERS_GET_FIELD_BIT_WIDTH(u, 8); - FLATBUFFERS_GET_FIELD_BIT_WIDTH(u, 16); - FLATBUFFERS_GET_FIELD_BIT_WIDTH(u, 32); -#undef FLATBUFFERS_GET_FIELD_BIT_WIDTH - return BIT_WIDTH_64; -} - -inline BitWidth WidthI(int64_t i) { - auto u = static_cast(i) << 1; - return WidthU(i >= 0 ? u : ~u); -} - -inline BitWidth WidthF(double f) { - return static_cast(static_cast(f)) == f ? BIT_WIDTH_32 - : BIT_WIDTH_64; -} - -// Base class of all types below. -// Points into the data buffer and allows access to one type. -class Object { - public: - Object(const uint8_t *data, uint8_t byte_width) - : data_(data), byte_width_(byte_width) {} - - protected: - const uint8_t *data_; - uint8_t byte_width_; -}; - -// Object that has a size, obtained either from size prefix, or elsewhere. -class Sized : public Object { - public: - // Size prefix. - Sized(const uint8_t *data, uint8_t byte_width) - : Object(data, byte_width), size_(read_size()) {} - // Manual size. - Sized(const uint8_t *data, uint8_t byte_width, size_t sz) - : Object(data, byte_width), size_(sz) {} - size_t size() const { return size_; } - // Access size stored in `byte_width_` bytes before data_ pointer. - size_t read_size() const { - return static_cast(ReadUInt64(data_ - byte_width_, byte_width_)); - } - - protected: - size_t size_; -}; - -class String : public Sized { - public: - // Size prefix. - String(const uint8_t *data, uint8_t byte_width) : Sized(data, byte_width) {} - // Manual size. - String(const uint8_t *data, uint8_t byte_width, size_t sz) - : Sized(data, byte_width, sz) {} - - size_t length() const { return size(); } - const char *c_str() const { return reinterpret_cast(data_); } - std::string str() const { return std::string(c_str(), size()); } - - static String EmptyString() { - static const char *empty_string = ""; - return String(reinterpret_cast(empty_string), 1, 0); - } - bool IsTheEmptyString() const { return data_ == EmptyString().data_; } -}; - -class Blob : public Sized { - public: - Blob(const uint8_t *data_buf, uint8_t byte_width) - : Sized(data_buf, byte_width) {} - - static Blob EmptyBlob() { - static const uint8_t empty_blob[] = { 0 /*len*/ }; - return Blob(empty_blob + 1, 1); - } - bool IsTheEmptyBlob() const { return data_ == EmptyBlob().data_; } - const uint8_t *data() const { return data_; } -}; - -class Vector : public Sized { - public: - Vector(const uint8_t *data, uint8_t byte_width) : Sized(data, byte_width) {} - - Reference operator[](size_t i) const; - - static Vector EmptyVector() { - static const uint8_t empty_vector[] = { 0 /*len*/ }; - return Vector(empty_vector + 1, 1); - } - bool IsTheEmptyVector() const { return data_ == EmptyVector().data_; } -}; - -class TypedVector : public Sized { - public: - TypedVector(const uint8_t *data, uint8_t byte_width, Type element_type) - : Sized(data, byte_width), type_(element_type) {} - - Reference operator[](size_t i) const; - - static TypedVector EmptyTypedVector() { - static const uint8_t empty_typed_vector[] = { 0 /*len*/ }; - return TypedVector(empty_typed_vector + 1, 1, FBT_INT); - } - bool IsTheEmptyVector() const { - return data_ == TypedVector::EmptyTypedVector().data_; - } - - Type ElementType() { return type_; } - - friend Reference; - - private: - Type type_; - - friend Map; -}; - -class FixedTypedVector : public Object { - public: - FixedTypedVector(const uint8_t *data, uint8_t byte_width, Type element_type, - uint8_t len) - : Object(data, byte_width), type_(element_type), len_(len) {} - - Reference operator[](size_t i) const; - - static FixedTypedVector EmptyFixedTypedVector() { - static const uint8_t fixed_empty_vector[] = { 0 /* unused */ }; - return FixedTypedVector(fixed_empty_vector, 1, FBT_INT, 0); - } - bool IsTheEmptyFixedTypedVector() const { - return data_ == FixedTypedVector::EmptyFixedTypedVector().data_; - } - - Type ElementType() const { return type_; } - uint8_t size() const { return len_; } - - private: - Type type_; - uint8_t len_; -}; - -class Map : public Vector { - public: - Map(const uint8_t *data, uint8_t byte_width) : Vector(data, byte_width) {} - - Reference operator[](const char *key) const; - Reference operator[](const std::string &key) const; - - Vector Values() const { return Vector(data_, byte_width_); } - - TypedVector Keys() const { - const size_t num_prefixed_fields = 3; - auto keys_offset = data_ - byte_width_ * num_prefixed_fields; - return TypedVector(Indirect(keys_offset, byte_width_), - static_cast( - ReadUInt64(keys_offset + byte_width_, byte_width_)), - FBT_KEY); - } - - static Map EmptyMap() { - static const uint8_t empty_map[] = { - 0 /*keys_len*/, 0 /*keys_offset*/, 1 /*keys_width*/, 0 /*len*/ - }; - return Map(empty_map + 4, 1); - } - - bool IsTheEmptyMap() const { return data_ == EmptyMap().data_; } -}; - -template -void AppendToString(std::string &s, T &&v, bool keys_quoted) { - s += "[ "; - for (size_t i = 0; i < v.size(); i++) { - if (i) s += ", "; - v[i].ToString(true, keys_quoted, s); - } - s += " ]"; -} - -class Reference { - public: - Reference() - : data_(nullptr), parent_width_(0), byte_width_(0), type_(FBT_NULL) {} - - Reference(const uint8_t *data, uint8_t parent_width, uint8_t byte_width, - Type type) - : data_(data), - parent_width_(parent_width), - byte_width_(byte_width), - type_(type) {} - - Reference(const uint8_t *data, uint8_t parent_width, uint8_t packed_type) - : data_(data), parent_width_(parent_width) { - byte_width_ = 1U << static_cast(packed_type & 3); - type_ = static_cast(packed_type >> 2); - } - - Type GetType() const { return type_; } - - bool IsNull() const { return type_ == FBT_NULL; } - bool IsBool() const { return type_ == FBT_BOOL; } - bool IsInt() const { return type_ == FBT_INT || type_ == FBT_INDIRECT_INT; } - bool IsUInt() const { - return type_ == FBT_UINT || type_ == FBT_INDIRECT_UINT; - } - bool IsIntOrUint() const { return IsInt() || IsUInt(); } - bool IsFloat() const { - return type_ == FBT_FLOAT || type_ == FBT_INDIRECT_FLOAT; - } - bool IsNumeric() const { return IsIntOrUint() || IsFloat(); } - bool IsString() const { return type_ == FBT_STRING; } - bool IsKey() const { return type_ == FBT_KEY; } - bool IsVector() const { return type_ == FBT_VECTOR || type_ == FBT_MAP; } - bool IsUntypedVector() const { return type_ == FBT_VECTOR; } - bool IsTypedVector() const { return flexbuffers::IsTypedVector(type_); } - bool IsFixedTypedVector() const { - return flexbuffers::IsFixedTypedVector(type_); - } - bool IsAnyVector() const { - return (IsTypedVector() || IsFixedTypedVector() || IsVector()); - } - bool IsMap() const { return type_ == FBT_MAP; } - bool IsBlob() const { return type_ == FBT_BLOB; } - bool AsBool() const { - return (type_ == FBT_BOOL ? ReadUInt64(data_, parent_width_) - : AsUInt64()) != 0; - } - - // Reads any type as a int64_t. Never fails, does most sensible conversion. - // Truncates floats, strings are attempted to be parsed for a number, - // vectors/maps return their size. Returns 0 if all else fails. - int64_t AsInt64() const { - if (type_ == FBT_INT) { - // A fast path for the common case. - return ReadInt64(data_, parent_width_); - } else - switch (type_) { - case FBT_INDIRECT_INT: return ReadInt64(Indirect(), byte_width_); - case FBT_UINT: return ReadUInt64(data_, parent_width_); - case FBT_INDIRECT_UINT: return ReadUInt64(Indirect(), byte_width_); - case FBT_FLOAT: - return static_cast(ReadDouble(data_, parent_width_)); - case FBT_INDIRECT_FLOAT: - return static_cast(ReadDouble(Indirect(), byte_width_)); - case FBT_NULL: return 0; - case FBT_STRING: return flatbuffers::StringToInt(AsString().c_str()); - case FBT_VECTOR: return static_cast(AsVector().size()); - case FBT_BOOL: return ReadInt64(data_, parent_width_); - default: - // Convert other things to int. - return 0; - } - } - - // TODO: could specialize these to not use AsInt64() if that saves - // extension ops in generated code, and use a faster op than ReadInt64. - int32_t AsInt32() const { return static_cast(AsInt64()); } - int16_t AsInt16() const { return static_cast(AsInt64()); } - int8_t AsInt8() const { return static_cast(AsInt64()); } - - uint64_t AsUInt64() const { - if (type_ == FBT_UINT) { - // A fast path for the common case. - return ReadUInt64(data_, parent_width_); - } else - switch (type_) { - case FBT_INDIRECT_UINT: return ReadUInt64(Indirect(), byte_width_); - case FBT_INT: return ReadInt64(data_, parent_width_); - case FBT_INDIRECT_INT: return ReadInt64(Indirect(), byte_width_); - case FBT_FLOAT: - return static_cast(ReadDouble(data_, parent_width_)); - case FBT_INDIRECT_FLOAT: - return static_cast(ReadDouble(Indirect(), byte_width_)); - case FBT_NULL: return 0; - case FBT_STRING: return flatbuffers::StringToUInt(AsString().c_str()); - case FBT_VECTOR: return static_cast(AsVector().size()); - case FBT_BOOL: return ReadUInt64(data_, parent_width_); - default: - // Convert other things to uint. - return 0; - } - } - - uint32_t AsUInt32() const { return static_cast(AsUInt64()); } - uint16_t AsUInt16() const { return static_cast(AsUInt64()); } - uint8_t AsUInt8() const { return static_cast(AsUInt64()); } - - double AsDouble() const { - if (type_ == FBT_FLOAT) { - // A fast path for the common case. - return ReadDouble(data_, parent_width_); - } else - switch (type_) { - case FBT_INDIRECT_FLOAT: return ReadDouble(Indirect(), byte_width_); - case FBT_INT: - return static_cast(ReadInt64(data_, parent_width_)); - case FBT_UINT: - return static_cast(ReadUInt64(data_, parent_width_)); - case FBT_INDIRECT_INT: - return static_cast(ReadInt64(Indirect(), byte_width_)); - case FBT_INDIRECT_UINT: - return static_cast(ReadUInt64(Indirect(), byte_width_)); - case FBT_NULL: return 0.0; - case FBT_STRING: { -#if 1 -#if !defined( _MSC_VER) -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wnull-dereference" -#endif - // See b/173239141 for additional context. Patched via - // micro/tools/make/flexbuffers_download.sh - // Introduce a segfault for an unsupported code path for TFLM. - return *(static_cast(nullptr)); -#if !defined( _MSC_VER) -#pragma GCC diagnostic pop -#endif -#else - // This is the original code - double d; - flatbuffers::StringToNumber(AsString().c_str(), &d); - return d; -#endif - } - case FBT_VECTOR: return static_cast(AsVector().size()); - case FBT_BOOL: - return static_cast(ReadUInt64(data_, parent_width_)); - default: - // Convert strings and other things to float. - return 0; - } - } - - float AsFloat() const { return static_cast(AsDouble()); } - - const char *AsKey() const { - if (type_ == FBT_KEY || type_ == FBT_STRING) { - return reinterpret_cast(Indirect()); - } else { - return ""; - } - } - - // This function returns the empty string if you try to read something that - // is not a string or key. - String AsString() const { - if (type_ == FBT_STRING) { - return String(Indirect(), byte_width_); - } else if (type_ == FBT_KEY) { - auto key = Indirect(); - return String(key, byte_width_, - strlen(reinterpret_cast(key))); - } else { - return String::EmptyString(); - } - } - - // Unlike AsString(), this will convert any type to a std::string. - std::string ToString() const { - std::string s; - ToString(false, false, s); - return s; - } - - // Convert any type to a JSON-like string. strings_quoted determines if - // string values at the top level receive "" quotes (inside other values - // they always do). keys_quoted determines if keys are quoted, at any level. - // TODO(wvo): add further options to have indentation/newlines. - void ToString(bool strings_quoted, bool keys_quoted, std::string &s) const { - if (type_ == FBT_STRING) { - String str(Indirect(), byte_width_); - if (strings_quoted) { - flatbuffers::EscapeString(str.c_str(), str.length(), &s, true, false); - } else { - s.append(str.c_str(), str.length()); - } - } else if (IsKey()) { - auto str = AsKey(); - if (keys_quoted) { - flatbuffers::EscapeString(str, strlen(str), &s, true, false); - } else { - s += str; - } - } else if (IsInt()) { - s += flatbuffers::NumToString(AsInt64()); - } else if (IsUInt()) { - s += flatbuffers::NumToString(AsUInt64()); - } else if (IsFloat()) { - s += flatbuffers::NumToString(AsDouble()); - } else if (IsNull()) { - s += "null"; - } else if (IsBool()) { - s += AsBool() ? "true" : "false"; - } else if (IsMap()) { - s += "{ "; - auto m = AsMap(); - auto keys = m.Keys(); - auto vals = m.Values(); - for (size_t i = 0; i < keys.size(); i++) { - bool kq = keys_quoted; - if (!kq) { - // FlexBuffers keys may contain arbitrary characters, only allow - // unquoted if it looks like an "identifier": - const char *p = keys[i].AsKey(); - if (!flatbuffers::is_alpha(*p) && *p != '_') { - kq = true; - } else { - while (*++p) { - if (!flatbuffers::is_alnum(*p) && *p != '_') { - kq = true; - break; - } - } - } - } - keys[i].ToString(true, kq, s); - s += ": "; - vals[i].ToString(true, keys_quoted, s); - if (i < keys.size() - 1) s += ", "; - } - s += " }"; - } else if (IsVector()) { - AppendToString(s, AsVector(), keys_quoted); - } else if (IsTypedVector()) { - AppendToString(s, AsTypedVector(), keys_quoted); - } else if (IsFixedTypedVector()) { - AppendToString(s, AsFixedTypedVector(), keys_quoted); - } else if (IsBlob()) { - auto blob = AsBlob(); - flatbuffers::EscapeString(reinterpret_cast(blob.data()), - blob.size(), &s, true, false); - } else { - s += "(?)"; - } - } - - // This function returns the empty blob if you try to read a not-blob. - // Strings can be viewed as blobs too. - Blob AsBlob() const { - if (type_ == FBT_BLOB || type_ == FBT_STRING) { - return Blob(Indirect(), byte_width_); - } else { - return Blob::EmptyBlob(); - } - } - - // This function returns the empty vector if you try to read a not-vector. - // Maps can be viewed as vectors too. - Vector AsVector() const { - if (type_ == FBT_VECTOR || type_ == FBT_MAP) { - return Vector(Indirect(), byte_width_); - } else { - return Vector::EmptyVector(); - } - } - - TypedVector AsTypedVector() const { - if (IsTypedVector()) { - auto tv = - TypedVector(Indirect(), byte_width_, ToTypedVectorElementType(type_)); - if (tv.type_ == FBT_STRING) { - // These can't be accessed as strings, since we don't know the bit-width - // of the size field, see the declaration of - // FBT_VECTOR_STRING_DEPRECATED above for details. - // We change the type here to be keys, which are a subtype of strings, - // and will ignore the size field. This will truncate strings with - // embedded nulls. - tv.type_ = FBT_KEY; - } - return tv; - } else { - return TypedVector::EmptyTypedVector(); - } - } - - FixedTypedVector AsFixedTypedVector() const { - if (IsFixedTypedVector()) { - uint8_t len = 0; - auto vtype = ToFixedTypedVectorElementType(type_, &len); - return FixedTypedVector(Indirect(), byte_width_, vtype, len); - } else { - return FixedTypedVector::EmptyFixedTypedVector(); - } - } - - Map AsMap() const { - if (type_ == FBT_MAP) { - return Map(Indirect(), byte_width_); - } else { - return Map::EmptyMap(); - } - } - - template T As() const; - - // Experimental: Mutation functions. - // These allow scalars in an already created buffer to be updated in-place. - // Since by default scalars are stored in the smallest possible space, - // the new value may not fit, in which case these functions return false. - // To avoid this, you can construct the values you intend to mutate using - // Builder::ForceMinimumBitWidth. - bool MutateInt(int64_t i) { - if (type_ == FBT_INT) { - return Mutate(data_, i, parent_width_, WidthI(i)); - } else if (type_ == FBT_INDIRECT_INT) { - return Mutate(Indirect(), i, byte_width_, WidthI(i)); - } else if (type_ == FBT_UINT) { - auto u = static_cast(i); - return Mutate(data_, u, parent_width_, WidthU(u)); - } else if (type_ == FBT_INDIRECT_UINT) { - auto u = static_cast(i); - return Mutate(Indirect(), u, byte_width_, WidthU(u)); - } else { - return false; - } - } - - bool MutateBool(bool b) { - return type_ == FBT_BOOL && Mutate(data_, b, parent_width_, BIT_WIDTH_8); - } - - bool MutateUInt(uint64_t u) { - if (type_ == FBT_UINT) { - return Mutate(data_, u, parent_width_, WidthU(u)); - } else if (type_ == FBT_INDIRECT_UINT) { - return Mutate(Indirect(), u, byte_width_, WidthU(u)); - } else if (type_ == FBT_INT) { - auto i = static_cast(u); - return Mutate(data_, i, parent_width_, WidthI(i)); - } else if (type_ == FBT_INDIRECT_INT) { - auto i = static_cast(u); - return Mutate(Indirect(), i, byte_width_, WidthI(i)); - } else { - return false; - } - } - - bool MutateFloat(float f) { - if (type_ == FBT_FLOAT) { - return MutateF(data_, f, parent_width_, BIT_WIDTH_32); - } else if (type_ == FBT_INDIRECT_FLOAT) { - return MutateF(Indirect(), f, byte_width_, BIT_WIDTH_32); - } else { - return false; - } - } - - bool MutateFloat(double d) { - if (type_ == FBT_FLOAT) { - return MutateF(data_, d, parent_width_, WidthF(d)); - } else if (type_ == FBT_INDIRECT_FLOAT) { - return MutateF(Indirect(), d, byte_width_, WidthF(d)); - } else { - return false; - } - } - - bool MutateString(const char *str, size_t len) { - auto s = AsString(); - if (s.IsTheEmptyString()) return false; - // This is very strict, could allow shorter strings, but that creates - // garbage. - if (s.length() != len) return false; - memcpy(const_cast(s.c_str()), str, len); - return true; - } - bool MutateString(const char *str) { return MutateString(str, strlen(str)); } - bool MutateString(const std::string &str) { - return MutateString(str.data(), str.length()); - } - - private: - const uint8_t *Indirect() const { - return flexbuffers::Indirect(data_, parent_width_); - } - - template - bool Mutate(const uint8_t *dest, T t, size_t byte_width, - BitWidth value_width) { - auto fits = static_cast(static_cast(1U) << value_width) <= - byte_width; - if (fits) { - t = flatbuffers::EndianScalar(t); - memcpy(const_cast(dest), &t, byte_width); - } - return fits; - } - - template - bool MutateF(const uint8_t *dest, T t, size_t byte_width, - BitWidth value_width) { - if (byte_width == sizeof(double)) - return Mutate(dest, static_cast(t), byte_width, value_width); - if (byte_width == sizeof(float)) - return Mutate(dest, static_cast(t), byte_width, value_width); - FLATBUFFERS_ASSERT(false); - return false; - } - - friend class Verifier; - - const uint8_t *data_; - uint8_t parent_width_; - uint8_t byte_width_; - Type type_; -}; - -// Template specialization for As(). -template<> inline bool Reference::As() const { return AsBool(); } - -template<> inline int8_t Reference::As() const { return AsInt8(); } -template<> inline int16_t Reference::As() const { return AsInt16(); } -template<> inline int32_t Reference::As() const { return AsInt32(); } -template<> inline int64_t Reference::As() const { return AsInt64(); } - -template<> inline uint8_t Reference::As() const { return AsUInt8(); } -template<> inline uint16_t Reference::As() const { - return AsUInt16(); -} -template<> inline uint32_t Reference::As() const { - return AsUInt32(); -} -template<> inline uint64_t Reference::As() const { - return AsUInt64(); -} - -template<> inline double Reference::As() const { return AsDouble(); } -template<> inline float Reference::As() const { return AsFloat(); } - -template<> inline String Reference::As() const { return AsString(); } -template<> inline std::string Reference::As() const { - return AsString().str(); -} - -template<> inline Blob Reference::As() const { return AsBlob(); } -template<> inline Vector Reference::As() const { return AsVector(); } -template<> inline TypedVector Reference::As() const { - return AsTypedVector(); -} -template<> inline FixedTypedVector Reference::As() const { - return AsFixedTypedVector(); -} -template<> inline Map Reference::As() const { return AsMap(); } - -inline uint8_t PackedType(BitWidth bit_width, Type type) { - return static_cast(bit_width | (type << 2)); -} - -inline uint8_t NullPackedType() { return PackedType(BIT_WIDTH_8, FBT_NULL); } - -// Vector accessors. -// Note: if you try to access outside of bounds, you get a Null value back -// instead. Normally this would be an assert, but since this is "dynamically -// typed" data, you may not want that (someone sends you a 2d vector and you -// wanted 3d). -// The Null converts seamlessly into a default value for any other type. -// TODO(wvo): Could introduce an #ifdef that makes this into an assert? -inline Reference Vector::operator[](size_t i) const { - auto len = size(); - if (i >= len) return Reference(nullptr, 1, NullPackedType()); - auto packed_type = (data_ + len * byte_width_)[i]; - auto elem = data_ + i * byte_width_; - return Reference(elem, byte_width_, packed_type); -} - -inline Reference TypedVector::operator[](size_t i) const { - auto len = size(); - if (i >= len) return Reference(nullptr, 1, NullPackedType()); - auto elem = data_ + i * byte_width_; - return Reference(elem, byte_width_, 1, type_); -} - -inline Reference FixedTypedVector::operator[](size_t i) const { - if (i >= len_) return Reference(nullptr, 1, NullPackedType()); - auto elem = data_ + i * byte_width_; - return Reference(elem, byte_width_, 1, type_); -} - -template int KeyCompare(const void *key, const void *elem) { - auto str_elem = reinterpret_cast( - Indirect(reinterpret_cast(elem))); - auto skey = reinterpret_cast(key); - return strcmp(skey, str_elem); -} - -inline Reference Map::operator[](const char *key) const { - auto keys = Keys(); - // We can't pass keys.byte_width_ to the comparison function, so we have - // to pick the right one ahead of time. - int (*comp)(const void *, const void *) = nullptr; - switch (keys.byte_width_) { - case 1: comp = KeyCompare; break; - case 2: comp = KeyCompare; break; - case 4: comp = KeyCompare; break; - case 8: comp = KeyCompare; break; - default: FLATBUFFERS_ASSERT(false); return Reference(); - } - auto res = std::bsearch(key, keys.data_, keys.size(), keys.byte_width_, comp); - if (!res) return Reference(nullptr, 1, NullPackedType()); - auto i = (reinterpret_cast(res) - keys.data_) / keys.byte_width_; - return (*static_cast(this))[i]; -} - -inline Reference Map::operator[](const std::string &key) const { - return (*this)[key.c_str()]; -} - -inline Reference GetRoot(const uint8_t *buffer, size_t size) { - // See Finish() below for the serialization counterpart of this. - // The root starts at the end of the buffer, so we parse backwards from there. - auto end = buffer + size; - auto byte_width = *--end; - auto packed_type = *--end; - end -= byte_width; // The root data item. - return Reference(end, byte_width, packed_type); -} - -inline Reference GetRoot(const std::vector &buffer) { - return GetRoot(buffer.data(), buffer.size()); -} - -// Flags that configure how the Builder behaves. -// The "Share" flags determine if the Builder automatically tries to pool -// this type. Pooling can reduce the size of serialized data if there are -// multiple maps of the same kind, at the expense of slightly slower -// serialization (the cost of lookups) and more memory use (std::set). -// By default this is on for keys, but off for strings. -// Turn keys off if you have e.g. only one map. -// Turn strings on if you expect many non-unique string values. -// Additionally, sharing key vectors can save space if you have maps with -// identical field populations. -enum BuilderFlag { - BUILDER_FLAG_NONE = 0, - BUILDER_FLAG_SHARE_KEYS = 1, - BUILDER_FLAG_SHARE_STRINGS = 2, - BUILDER_FLAG_SHARE_KEYS_AND_STRINGS = 3, - BUILDER_FLAG_SHARE_KEY_VECTORS = 4, - BUILDER_FLAG_SHARE_ALL = 7, -}; - -class Builder FLATBUFFERS_FINAL_CLASS { - public: - Builder(size_t initial_size = 256, - BuilderFlag flags = BUILDER_FLAG_SHARE_KEYS) - : buf_(initial_size), - finished_(false), - has_duplicate_keys_(false), - flags_(flags), - force_min_bit_width_(BIT_WIDTH_8), - key_pool(KeyOffsetCompare(buf_)), - string_pool(StringOffsetCompare(buf_)) { - buf_.clear(); - } - -#ifdef FLATBUFFERS_DEFAULT_DECLARATION - Builder(Builder &&) = default; - Builder &operator=(Builder &&) = default; -#endif - - /// @brief Get the serialized buffer (after you call `Finish()`). - /// @return Returns a vector owned by this class. - const std::vector &GetBuffer() const { - Finished(); - return buf_; - } - - // Size of the buffer. Does not include unfinished values. - size_t GetSize() const { return buf_.size(); } - - // Reset all state so we can re-use the buffer. - void Clear() { - buf_.clear(); - stack_.clear(); - finished_ = false; - // flags_ remains as-is; - force_min_bit_width_ = BIT_WIDTH_8; - key_pool.clear(); - string_pool.clear(); - } - - // All value constructing functions below have two versions: one that - // takes a key (for placement inside a map) and one that doesn't (for inside - // vectors and elsewhere). - - void Null() { stack_.push_back(Value()); } - void Null(const char *key) { - Key(key); - Null(); - } - - void Int(int64_t i) { stack_.push_back(Value(i, FBT_INT, WidthI(i))); } - void Int(const char *key, int64_t i) { - Key(key); - Int(i); - } - - void UInt(uint64_t u) { stack_.push_back(Value(u, FBT_UINT, WidthU(u))); } - void UInt(const char *key, uint64_t u) { - Key(key); - UInt(u); - } - - void Float(float f) { stack_.push_back(Value(f)); } - void Float(const char *key, float f) { - Key(key); - Float(f); - } - - void Double(double f) { stack_.push_back(Value(f)); } - void Double(const char *key, double d) { - Key(key); - Double(d); - } - - void Bool(bool b) { stack_.push_back(Value(b)); } - void Bool(const char *key, bool b) { - Key(key); - Bool(b); - } - - void IndirectInt(int64_t i) { PushIndirect(i, FBT_INDIRECT_INT, WidthI(i)); } - void IndirectInt(const char *key, int64_t i) { - Key(key); - IndirectInt(i); - } - - void IndirectUInt(uint64_t u) { - PushIndirect(u, FBT_INDIRECT_UINT, WidthU(u)); - } - void IndirectUInt(const char *key, uint64_t u) { - Key(key); - IndirectUInt(u); - } - - void IndirectFloat(float f) { - PushIndirect(f, FBT_INDIRECT_FLOAT, BIT_WIDTH_32); - } - void IndirectFloat(const char *key, float f) { - Key(key); - IndirectFloat(f); - } - - void IndirectDouble(double f) { - PushIndirect(f, FBT_INDIRECT_FLOAT, WidthF(f)); - } - void IndirectDouble(const char *key, double d) { - Key(key); - IndirectDouble(d); - } - - size_t Key(const char *str, size_t len) { - auto sloc = buf_.size(); - WriteBytes(str, len + 1); - if (flags_ & BUILDER_FLAG_SHARE_KEYS) { - auto it = key_pool.find(sloc); - if (it != key_pool.end()) { - // Already in the buffer. Remove key we just serialized, and use - // existing offset instead. - buf_.resize(sloc); - sloc = *it; - } else { - key_pool.insert(sloc); - } - } - stack_.push_back(Value(static_cast(sloc), FBT_KEY, BIT_WIDTH_8)); - return sloc; - } - - size_t Key(const char *str) { return Key(str, strlen(str)); } - size_t Key(const std::string &str) { return Key(str.c_str(), str.size()); } - - size_t String(const char *str, size_t len) { - auto reset_to = buf_.size(); - auto sloc = CreateBlob(str, len, 1, FBT_STRING); - if (flags_ & BUILDER_FLAG_SHARE_STRINGS) { - StringOffset so(sloc, len); - auto it = string_pool.find(so); - if (it != string_pool.end()) { - // Already in the buffer. Remove string we just serialized, and use - // existing offset instead. - buf_.resize(reset_to); - sloc = it->first; - stack_.back().u_ = sloc; - } else { - string_pool.insert(so); - } - } - return sloc; - } - size_t String(const char *str) { return String(str, strlen(str)); } - size_t String(const std::string &str) { - return String(str.c_str(), str.size()); - } - void String(const flexbuffers::String &str) { - String(str.c_str(), str.length()); - } - - void String(const char *key, const char *str) { - Key(key); - String(str); - } - void String(const char *key, const std::string &str) { - Key(key); - String(str); - } - void String(const char *key, const flexbuffers::String &str) { - Key(key); - String(str); - } - - size_t Blob(const void *data, size_t len) { - return CreateBlob(data, len, 0, FBT_BLOB); - } - size_t Blob(const std::vector &v) { - return CreateBlob(v.data(), v.size(), 0, FBT_BLOB); - } - - void Blob(const char *key, const void *data, size_t len) { - Key(key); - Blob(data, len); - } - void Blob(const char *key, const std::vector &v) { - Key(key); - Blob(v); - } - - // TODO(wvo): support all the FlexBuffer types (like flexbuffers::String), - // e.g. Vector etc. Also in overloaded versions. - // Also some FlatBuffers types? - - size_t StartVector() { return stack_.size(); } - size_t StartVector(const char *key) { - Key(key); - return stack_.size(); - } - size_t StartMap() { return stack_.size(); } - size_t StartMap(const char *key) { - Key(key); - return stack_.size(); - } - - // TODO(wvo): allow this to specify an alignment greater than the natural - // alignment. - size_t EndVector(size_t start, bool typed, bool fixed) { - auto vec = CreateVector(start, stack_.size() - start, 1, typed, fixed); - // Remove temp elements and return vector. - stack_.resize(start); - stack_.push_back(vec); - return static_cast(vec.u_); - } - - size_t EndMap(size_t start) { - // We should have interleaved keys and values on the stack. - // Make sure it is an even number: - auto len = stack_.size() - start; - FLATBUFFERS_ASSERT(!(len & 1)); - len /= 2; - // Make sure keys are all strings: - for (auto key = start; key < stack_.size(); key += 2) { - FLATBUFFERS_ASSERT(stack_[key].type_ == FBT_KEY); - } - // Now sort values, so later we can do a binary search lookup. - // We want to sort 2 array elements at a time. - struct TwoValue { - Value key; - Value val; - }; - // TODO(wvo): strict aliasing? - // TODO(wvo): allow the caller to indicate the data is already sorted - // for maximum efficiency? With an assert to check sortedness to make sure - // we're not breaking binary search. - // Or, we can track if the map is sorted as keys are added which would be - // be quite cheap (cheaper than checking it here), so we can skip this - // step automatically when appliccable, and encourage people to write in - // sorted fashion. - // std::sort is typically already a lot faster on sorted data though. - auto dict = reinterpret_cast(stack_.data() + start); - std::sort( - dict, dict + len, [&](const TwoValue &a, const TwoValue &b) -> bool { - auto as = reinterpret_cast(buf_.data() + a.key.u_); - auto bs = reinterpret_cast(buf_.data() + b.key.u_); - auto comp = strcmp(as, bs); - // We want to disallow duplicate keys, since this results in a - // map where values cannot be found. - // But we can't assert here (since we don't want to fail on - // random JSON input) or have an error mechanism. - // Instead, we set has_duplicate_keys_ in the builder to - // signal this. - // TODO: Have to check for pointer equality, as some sort - // implementation apparently call this function with the same - // element?? Why? - if (!comp && &a != &b) has_duplicate_keys_ = true; - return comp < 0; - }); - // First create a vector out of all keys. - // TODO(wvo): if kBuilderFlagShareKeyVectors is true, see if we can share - // the first vector. - auto keys = CreateVector(start, len, 2, true, false); - auto vec = CreateVector(start + 1, len, 2, false, false, &keys); - // Remove temp elements and return map. - stack_.resize(start); - stack_.push_back(vec); - return static_cast(vec.u_); - } - - // Call this after EndMap to see if the map had any duplicate keys. - // Any map with such keys won't be able to retrieve all values. - bool HasDuplicateKeys() const { return has_duplicate_keys_; } - - template size_t Vector(F f) { - auto start = StartVector(); - f(); - return EndVector(start, false, false); - } - template size_t Vector(F f, T &state) { - auto start = StartVector(); - f(state); - return EndVector(start, false, false); - } - template size_t Vector(const char *key, F f) { - auto start = StartVector(key); - f(); - return EndVector(start, false, false); - } - template - size_t Vector(const char *key, F f, T &state) { - auto start = StartVector(key); - f(state); - return EndVector(start, false, false); - } - - template void Vector(const T *elems, size_t len) { - if (flatbuffers::is_scalar::value) { - // This path should be a lot quicker and use less space. - ScalarVector(elems, len, false); - } else { - auto start = StartVector(); - for (size_t i = 0; i < len; i++) Add(elems[i]); - EndVector(start, false, false); - } - } - template - void Vector(const char *key, const T *elems, size_t len) { - Key(key); - Vector(elems, len); - } - template void Vector(const std::vector &vec) { - Vector(vec.data(), vec.size()); - } - - template size_t TypedVector(F f) { - auto start = StartVector(); - f(); - return EndVector(start, true, false); - } - template size_t TypedVector(F f, T &state) { - auto start = StartVector(); - f(state); - return EndVector(start, true, false); - } - template size_t TypedVector(const char *key, F f) { - auto start = StartVector(key); - f(); - return EndVector(start, true, false); - } - template - size_t TypedVector(const char *key, F f, T &state) { - auto start = StartVector(key); - f(state); - return EndVector(start, true, false); - } - - template size_t FixedTypedVector(const T *elems, size_t len) { - // We only support a few fixed vector lengths. Anything bigger use a - // regular typed vector. - FLATBUFFERS_ASSERT(len >= 2 && len <= 4); - // And only scalar values. - static_assert(flatbuffers::is_scalar::value, "Unrelated types"); - return ScalarVector(elems, len, true); - } - - template - size_t FixedTypedVector(const char *key, const T *elems, size_t len) { - Key(key); - return FixedTypedVector(elems, len); - } - - template size_t Map(F f) { - auto start = StartMap(); - f(); - return EndMap(start); - } - template size_t Map(F f, T &state) { - auto start = StartMap(); - f(state); - return EndMap(start); - } - template size_t Map(const char *key, F f) { - auto start = StartMap(key); - f(); - return EndMap(start); - } - template size_t Map(const char *key, F f, T &state) { - auto start = StartMap(key); - f(state); - return EndMap(start); - } - template void Map(const std::map &map) { - auto start = StartMap(); - for (auto it = map.begin(); it != map.end(); ++it) - Add(it->first.c_str(), it->second); - EndMap(start); - } - - // If you wish to share a value explicitly (a value not shared automatically - // through one of the BUILDER_FLAG_SHARE_* flags) you can do so with these - // functions. Or if you wish to turn those flags off for performance reasons - // and still do some explicit sharing. For example: - // builder.IndirectDouble(M_PI); - // auto id = builder.LastValue(); // Remember where we stored it. - // .. more code goes here .. - // builder.ReuseValue(id); // Refers to same double by offset. - // LastValue works regardless of whether the value has a key or not. - // Works on any data type. - struct Value; - Value LastValue() { return stack_.back(); } - void ReuseValue(Value v) { stack_.push_back(v); } - void ReuseValue(const char *key, Value v) { - Key(key); - ReuseValue(v); - } - - // Overloaded Add that tries to call the correct function above. - void Add(int8_t i) { Int(i); } - void Add(int16_t i) { Int(i); } - void Add(int32_t i) { Int(i); } - void Add(int64_t i) { Int(i); } - void Add(uint8_t u) { UInt(u); } - void Add(uint16_t u) { UInt(u); } - void Add(uint32_t u) { UInt(u); } - void Add(uint64_t u) { UInt(u); } - void Add(float f) { Float(f); } - void Add(double d) { Double(d); } - void Add(bool b) { Bool(b); } - void Add(const char *str) { String(str); } - void Add(const std::string &str) { String(str); } - void Add(const flexbuffers::String &str) { String(str); } - - template void Add(const std::vector &vec) { Vector(vec); } - - template void Add(const char *key, const T &t) { - Key(key); - Add(t); - } - - template void Add(const std::map &map) { - Map(map); - } - - template void operator+=(const T &t) { Add(t); } - - // This function is useful in combination with the Mutate* functions above. - // It forces elements of vectors and maps to have a minimum size, such that - // they can later be updated without failing. - // Call with no arguments to reset. - void ForceMinimumBitWidth(BitWidth bw = BIT_WIDTH_8) { - force_min_bit_width_ = bw; - } - - void Finish() { - // If you hit this assert, you likely have objects that were never included - // in a parent. You need to have exactly one root to finish a buffer. - // Check your Start/End calls are matched, and all objects are inside - // some other object. - FLATBUFFERS_ASSERT(stack_.size() == 1); - - // Write root value. - auto byte_width = Align(stack_[0].ElemWidth(buf_.size(), 0)); - WriteAny(stack_[0], byte_width); - // Write root type. - Write(stack_[0].StoredPackedType(), 1); - // Write root size. Normally determined by parent, but root has no parent :) - Write(byte_width, 1); - - finished_ = true; - } - - private: - void Finished() const { - // If you get this assert, you're attempting to get access a buffer - // which hasn't been finished yet. Be sure to call - // Builder::Finish with your root object. - FLATBUFFERS_ASSERT(finished_); - } - - // Align to prepare for writing a scalar with a certain size. - uint8_t Align(BitWidth alignment) { - auto byte_width = 1U << alignment; - buf_.insert(buf_.end(), flatbuffers::PaddingBytes(buf_.size(), byte_width), - 0); - return static_cast(byte_width); - } - - void WriteBytes(const void *val, size_t size) { - buf_.insert(buf_.end(), reinterpret_cast(val), - reinterpret_cast(val) + size); - } - - template void Write(T val, size_t byte_width) { - FLATBUFFERS_ASSERT(sizeof(T) >= byte_width); - val = flatbuffers::EndianScalar(val); - WriteBytes(&val, byte_width); - } - - void WriteDouble(double f, uint8_t byte_width) { - switch (byte_width) { - case 8: Write(f, byte_width); break; - case 4: Write(static_cast(f), byte_width); break; - // case 2: Write(static_cast(f), byte_width); break; - // case 1: Write(static_cast(f), byte_width); break; - default: FLATBUFFERS_ASSERT(0); - } - } - - void WriteOffset(uint64_t o, uint8_t byte_width) { - auto reloff = buf_.size() - o; - FLATBUFFERS_ASSERT(byte_width == 8 || reloff < 1ULL << (byte_width * 8)); - Write(reloff, byte_width); - } - - template void PushIndirect(T val, Type type, BitWidth bit_width) { - auto byte_width = Align(bit_width); - auto iloc = buf_.size(); - Write(val, byte_width); - stack_.push_back(Value(static_cast(iloc), type, bit_width)); - } - - static BitWidth WidthB(size_t byte_width) { - switch (byte_width) { - case 1: return BIT_WIDTH_8; - case 2: return BIT_WIDTH_16; - case 4: return BIT_WIDTH_32; - case 8: return BIT_WIDTH_64; - default: FLATBUFFERS_ASSERT(false); return BIT_WIDTH_64; - } - } - - template static Type GetScalarType() { - static_assert(flatbuffers::is_scalar::value, "Unrelated types"); - return flatbuffers::is_floating_point::value - ? FBT_FLOAT - : flatbuffers::is_same::value - ? FBT_BOOL - : (flatbuffers::is_unsigned::value ? FBT_UINT - : FBT_INT); - } - - public: - // This was really intended to be private, except for LastValue/ReuseValue. - struct Value { - union { - int64_t i_; - uint64_t u_; - double f_; - }; - - Type type_; - - // For scalars: of itself, for vector: of its elements, for string: length. - BitWidth min_bit_width_; - - Value() : i_(0), type_(FBT_NULL), min_bit_width_(BIT_WIDTH_8) {} - - Value(bool b) - : u_(static_cast(b)), - type_(FBT_BOOL), - min_bit_width_(BIT_WIDTH_8) {} - - Value(int64_t i, Type t, BitWidth bw) - : i_(i), type_(t), min_bit_width_(bw) {} - Value(uint64_t u, Type t, BitWidth bw) - : u_(u), type_(t), min_bit_width_(bw) {} - - Value(float f) - : f_(static_cast(f)), - type_(FBT_FLOAT), - min_bit_width_(BIT_WIDTH_32) {} - Value(double f) : f_(f), type_(FBT_FLOAT), min_bit_width_(WidthF(f)) {} - - uint8_t StoredPackedType(BitWidth parent_bit_width_ = BIT_WIDTH_8) const { - return PackedType(StoredWidth(parent_bit_width_), type_); - } - - BitWidth ElemWidth(size_t buf_size, size_t elem_index) const { - if (IsInline(type_)) { - return min_bit_width_; - } else { - // We have an absolute offset, but want to store a relative offset - // elem_index elements beyond the current buffer end. Since whether - // the relative offset fits in a certain byte_width depends on - // the size of the elements before it (and their alignment), we have - // to test for each size in turn. - for (size_t byte_width = 1; - byte_width <= sizeof(flatbuffers::largest_scalar_t); - byte_width *= 2) { - // Where are we going to write this offset? - auto offset_loc = buf_size + - flatbuffers::PaddingBytes(buf_size, byte_width) + - elem_index * byte_width; - // Compute relative offset. - auto offset = offset_loc - u_; - // Does it fit? - auto bit_width = WidthU(offset); - if (static_cast(static_cast(1U) << bit_width) == - byte_width) - return bit_width; - } - FLATBUFFERS_ASSERT(false); // Must match one of the sizes above. - return BIT_WIDTH_64; - } - } - - BitWidth StoredWidth(BitWidth parent_bit_width_ = BIT_WIDTH_8) const { - if (IsInline(type_)) { - return (std::max)(min_bit_width_, parent_bit_width_); - } else { - return min_bit_width_; - } - } - }; - - private: - void WriteAny(const Value &val, uint8_t byte_width) { - switch (val.type_) { - case FBT_NULL: - case FBT_INT: Write(val.i_, byte_width); break; - case FBT_BOOL: - case FBT_UINT: Write(val.u_, byte_width); break; - case FBT_FLOAT: WriteDouble(val.f_, byte_width); break; - default: WriteOffset(val.u_, byte_width); break; - } - } - - size_t CreateBlob(const void *data, size_t len, size_t trailing, Type type) { - auto bit_width = WidthU(len); - auto byte_width = Align(bit_width); - Write(len, byte_width); - auto sloc = buf_.size(); - WriteBytes(data, len + trailing); - stack_.push_back(Value(static_cast(sloc), type, bit_width)); - return sloc; - } - - template - size_t ScalarVector(const T *elems, size_t len, bool fixed) { - auto vector_type = GetScalarType(); - auto byte_width = sizeof(T); - auto bit_width = WidthB(byte_width); - // If you get this assert, you're trying to write a vector with a size - // field that is bigger than the scalars you're trying to write (e.g. a - // byte vector > 255 elements). For such types, write a "blob" instead. - // TODO: instead of asserting, could write vector with larger elements - // instead, though that would be wasteful. - FLATBUFFERS_ASSERT(WidthU(len) <= bit_width); - Align(bit_width); - if (!fixed) Write(len, byte_width); - auto vloc = buf_.size(); - for (size_t i = 0; i < len; i++) Write(elems[i], byte_width); - stack_.push_back(Value(static_cast(vloc), - ToTypedVector(vector_type, fixed ? len : 0), - bit_width)); - return vloc; - } - - Value CreateVector(size_t start, size_t vec_len, size_t step, bool typed, - bool fixed, const Value *keys = nullptr) { - FLATBUFFERS_ASSERT( - !fixed || - typed); // typed=false, fixed=true combination is not supported. - // Figure out smallest bit width we can store this vector with. - auto bit_width = (std::max)(force_min_bit_width_, WidthU(vec_len)); - auto prefix_elems = 1; - if (keys) { - // If this vector is part of a map, we will pre-fix an offset to the keys - // to this vector. - bit_width = (std::max)(bit_width, keys->ElemWidth(buf_.size(), 0)); - prefix_elems += 2; - } - Type vector_type = FBT_KEY; - // Check bit widths and types for all elements. - for (size_t i = start; i < stack_.size(); i += step) { - auto elem_width = - stack_[i].ElemWidth(buf_.size(), i - start + prefix_elems); - bit_width = (std::max)(bit_width, elem_width); - if (typed) { - if (i == start) { - vector_type = stack_[i].type_; - } else { - // If you get this assert, you are writing a typed vector with - // elements that are not all the same type. - FLATBUFFERS_ASSERT(vector_type == stack_[i].type_); - } - } - } - // If you get this assert, your typed types are not one of: - // Int / UInt / Float / Key. - FLATBUFFERS_ASSERT(!typed || IsTypedVectorElementType(vector_type)); - auto byte_width = Align(bit_width); - // Write vector. First the keys width/offset if available, and size. - if (keys) { - WriteOffset(keys->u_, byte_width); - Write(1ULL << keys->min_bit_width_, byte_width); - } - if (!fixed) Write(vec_len, byte_width); - // Then the actual data. - auto vloc = buf_.size(); - for (size_t i = start; i < stack_.size(); i += step) { - WriteAny(stack_[i], byte_width); - } - // Then the types. - if (!typed) { - for (size_t i = start; i < stack_.size(); i += step) { - buf_.push_back(stack_[i].StoredPackedType(bit_width)); - } - } - return Value(static_cast(vloc), - keys ? FBT_MAP - : (typed ? ToTypedVector(vector_type, fixed ? vec_len : 0) - : FBT_VECTOR), - bit_width); - } - - // You shouldn't really be copying instances of this class. - Builder(const Builder &); - Builder &operator=(const Builder &); - - std::vector buf_; - std::vector stack_; - - bool finished_; - bool has_duplicate_keys_; - - BuilderFlag flags_; - - BitWidth force_min_bit_width_; - - struct KeyOffsetCompare { - explicit KeyOffsetCompare(const std::vector &buf) : buf_(&buf) {} - bool operator()(size_t a, size_t b) const { - auto stra = reinterpret_cast(buf_->data() + a); - auto strb = reinterpret_cast(buf_->data() + b); - return strcmp(stra, strb) < 0; - } - const std::vector *buf_; - }; - - typedef std::pair StringOffset; - struct StringOffsetCompare { - explicit StringOffsetCompare(const std::vector &buf) - : buf_(&buf) {} - bool operator()(const StringOffset &a, const StringOffset &b) const { - auto stra = buf_->data() + a.first; - auto strb = buf_->data() + b.first; - auto cr = memcmp(stra, strb, (std::min)(a.second, b.second) + 1); - return cr < 0 || (cr == 0 && a.second < b.second); - } - const std::vector *buf_; - }; - - typedef std::set KeyOffsetMap; - typedef std::set StringOffsetMap; - - KeyOffsetMap key_pool; - StringOffsetMap string_pool; - - friend class Verifier; -}; - -// Helper class to verify the integrity of a FlexBuffer -class Verifier FLATBUFFERS_FINAL_CLASS { - public: - Verifier(const uint8_t *buf, size_t buf_len, - // Supplying this vector likely results in faster verification - // of larger buffers with many shared keys/strings, but - // comes at the cost of using additional memory the same size of - // the buffer being verified, so it is by default off. - std::vector *reuse_tracker = nullptr, - bool _check_alignment = true, size_t max_depth = 64) - : buf_(buf), - size_(buf_len), - depth_(0), - max_depth_(max_depth), - num_vectors_(0), - max_vectors_(buf_len), - check_alignment_(_check_alignment), - reuse_tracker_(reuse_tracker) { - FLATBUFFERS_ASSERT(size_ < FLATBUFFERS_MAX_BUFFER_SIZE); - if (reuse_tracker_) { - reuse_tracker_->clear(); - reuse_tracker_->resize(size_, PackedType(BIT_WIDTH_8, FBT_NULL)); - } - } - - private: - // Central location where any verification failures register. - bool Check(bool ok) const { - // clang-format off - #ifdef FLATBUFFERS_DEBUG_VERIFICATION_FAILURE - FLATBUFFERS_ASSERT(ok); - #endif - // clang-format on - return ok; - } - - // Verify any range within the buffer. - bool VerifyFrom(size_t elem, size_t elem_len) const { - return Check(elem_len < size_ && elem <= size_ - elem_len); - } - bool VerifyBefore(size_t elem, size_t elem_len) const { - return Check(elem_len <= elem); - } - - bool VerifyFromPointer(const uint8_t *p, size_t len) { - auto o = static_cast(p - buf_); - return VerifyFrom(o, len); - } - bool VerifyBeforePointer(const uint8_t *p, size_t len) { - auto o = static_cast(p - buf_); - return VerifyBefore(o, len); - } - - bool VerifyByteWidth(size_t width) { - return Check(width == 1 || width == 2 || width == 4 || width == 8); - } - - bool VerifyType(int type) { return Check(type >= 0 && type < FBT_MAX_TYPE); } - - bool VerifyOffset(uint64_t off, const uint8_t *p) { - return Check(off <= static_cast(size_)) && - off <= static_cast(p - buf_); - } - - bool VerifyAlignment(const uint8_t *p, size_t size) const { - auto o = static_cast(p - buf_); - return Check((o & (size - 1)) == 0 || !check_alignment_); - } - -// Macro, since we want to escape from parent function & use lazy args. -#define FLEX_CHECK_VERIFIED(P, PACKED_TYPE) \ - if (reuse_tracker_) { \ - auto packed_type = PACKED_TYPE; \ - auto existing = (*reuse_tracker_)[P - buf_]; \ - if (existing == packed_type) return true; \ - /* Fail verification if already set with different type! */ \ - if (!Check(existing == 0)) return false; \ - (*reuse_tracker_)[P - buf_] = packed_type; \ - } - - bool VerifyVector(Reference r, const uint8_t *p, Type elem_type) { - // Any kind of nesting goes thru this function, so guard against that - // here, both with simple nesting checks, and the reuse tracker if on. - depth_++; - num_vectors_++; - if (!Check(depth_ <= max_depth_ && num_vectors_ <= max_vectors_)) - return false; - auto size_byte_width = r.byte_width_; - if (!VerifyBeforePointer(p, size_byte_width)) return false; - FLEX_CHECK_VERIFIED(p - size_byte_width, - PackedType(Builder::WidthB(size_byte_width), r.type_)); - auto sized = Sized(p, size_byte_width); - auto num_elems = sized.size(); - auto elem_byte_width = r.type_ == FBT_STRING || r.type_ == FBT_BLOB - ? uint8_t(1) - : r.byte_width_; - auto max_elems = SIZE_MAX / elem_byte_width; - if (!Check(num_elems < max_elems)) - return false; // Protect against byte_size overflowing. - auto byte_size = num_elems * elem_byte_width; - if (!VerifyFromPointer(p, byte_size)) return false; - if (elem_type == FBT_NULL) { - // Verify type bytes after the vector. - if (!VerifyFromPointer(p + byte_size, num_elems)) return false; - auto v = Vector(p, size_byte_width); - for (size_t i = 0; i < num_elems; i++) - if (!VerifyRef(v[i])) return false; - } else if (elem_type == FBT_KEY) { - auto v = TypedVector(p, elem_byte_width, FBT_KEY); - for (size_t i = 0; i < num_elems; i++) - if (!VerifyRef(v[i])) return false; - } else { - FLATBUFFERS_ASSERT(IsInline(elem_type)); - } - depth_--; - return true; - } - - bool VerifyKeys(const uint8_t *p, uint8_t byte_width) { - // The vector part of the map has already been verified. - const size_t num_prefixed_fields = 3; - if (!VerifyBeforePointer(p, byte_width * num_prefixed_fields)) return false; - p -= byte_width * num_prefixed_fields; - auto off = ReadUInt64(p, byte_width); - if (!VerifyOffset(off, p)) return false; - auto key_byte_with = - static_cast(ReadUInt64(p + byte_width, byte_width)); - if (!VerifyByteWidth(key_byte_with)) return false; - return VerifyVector(Reference(p, byte_width, key_byte_with, FBT_VECTOR_KEY), - p - off, FBT_KEY); - } - - bool VerifyKey(const uint8_t *p) { - FLEX_CHECK_VERIFIED(p, PackedType(BIT_WIDTH_8, FBT_KEY)); - while (p < buf_ + size_) - if (*p++) return true; - return false; - } - -#undef FLEX_CHECK_VERIFIED - - bool VerifyTerminator(const String &s) { - return VerifyFromPointer(reinterpret_cast(s.c_str()), - s.size() + 1); - } - - bool VerifyRef(Reference r) { - // r.parent_width_ and r.data_ already verified. - if (!VerifyByteWidth(r.byte_width_) || !VerifyType(r.type_)) { - return false; - } - if (IsInline(r.type_)) { - // Inline scalars, don't require further verification. - return true; - } - // All remaining types are an offset. - auto off = ReadUInt64(r.data_, r.parent_width_); - if (!VerifyOffset(off, r.data_)) return false; - auto p = r.Indirect(); - if (!VerifyAlignment(p, r.byte_width_)) return false; - switch (r.type_) { - case FBT_INDIRECT_INT: - case FBT_INDIRECT_UINT: - case FBT_INDIRECT_FLOAT: return VerifyFromPointer(p, r.byte_width_); - case FBT_KEY: return VerifyKey(p); - case FBT_MAP: - return VerifyVector(r, p, FBT_NULL) && VerifyKeys(p, r.byte_width_); - case FBT_VECTOR: return VerifyVector(r, p, FBT_NULL); - case FBT_VECTOR_INT: return VerifyVector(r, p, FBT_INT); - case FBT_VECTOR_BOOL: - case FBT_VECTOR_UINT: return VerifyVector(r, p, FBT_UINT); - case FBT_VECTOR_FLOAT: return VerifyVector(r, p, FBT_FLOAT); - case FBT_VECTOR_KEY: return VerifyVector(r, p, FBT_KEY); - case FBT_VECTOR_STRING_DEPRECATED: - // Use of FBT_KEY here intentional, see elsewhere. - return VerifyVector(r, p, FBT_KEY); - case FBT_BLOB: return VerifyVector(r, p, FBT_UINT); - case FBT_STRING: - return VerifyVector(r, p, FBT_UINT) && - VerifyTerminator(String(p, r.byte_width_)); - case FBT_VECTOR_INT2: - case FBT_VECTOR_UINT2: - case FBT_VECTOR_FLOAT2: - case FBT_VECTOR_INT3: - case FBT_VECTOR_UINT3: - case FBT_VECTOR_FLOAT3: - case FBT_VECTOR_INT4: - case FBT_VECTOR_UINT4: - case FBT_VECTOR_FLOAT4: { - uint8_t len = 0; - auto vtype = ToFixedTypedVectorElementType(r.type_, &len); - if (!VerifyType(vtype)) return false; - return VerifyFromPointer(p, r.byte_width_ * len); - } - default: return false; - } - } - - public: - bool VerifyBuffer() { - if (!Check(size_ >= 3)) return false; - auto end = buf_ + size_; - auto byte_width = *--end; - auto packed_type = *--end; - return VerifyByteWidth(byte_width) && Check(end - buf_ >= byte_width) && - VerifyRef(Reference(end - byte_width, byte_width, packed_type)); - } - - private: - const uint8_t *buf_; - size_t size_; - size_t depth_; - const size_t max_depth_; - size_t num_vectors_; - const size_t max_vectors_; - bool check_alignment_; - std::vector *reuse_tracker_; -}; - -// Utility function that contructs the Verifier for you, see above for -// parameters. -inline bool VerifyBuffer(const uint8_t *buf, size_t buf_len, - std::vector *reuse_tracker = nullptr) { - Verifier verifier(buf, buf_len, reuse_tracker); - return verifier.VerifyBuffer(); -} - -} // namespace flexbuffers - -#if defined(_MSC_VER) -# pragma warning(pop) -#endif - -#endif // FLATBUFFERS_FLEXBUFFERS_H_ diff --git a/code/components/tflite-lib/third_party/flatbuffers/include/flatbuffers/stl_emulation.h b/code/components/tflite-lib/third_party/flatbuffers/include/flatbuffers/stl_emulation.h deleted file mode 100644 index 97652b17..00000000 --- a/code/components/tflite-lib/third_party/flatbuffers/include/flatbuffers/stl_emulation.h +++ /dev/null @@ -1,509 +0,0 @@ -/* - * Copyright 2017 Google Inc. All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef FLATBUFFERS_STL_EMULATION_H_ -#define FLATBUFFERS_STL_EMULATION_H_ - -// clang-format off -#include "flatbuffers/base.h" - -#include -#include -#include -#include -#include - -#ifndef FLATBUFFERS_USE_STD_OPTIONAL - // Detect C++17 compatible compiler. - // __cplusplus >= 201703L - a compiler has support of 'static inline' variables. - #if (defined(__cplusplus) && __cplusplus >= 201703L) \ - || (defined(_MSVC_LANG) && _MSVC_LANG >= 201703L) - #define FLATBUFFERS_USE_STD_OPTIONAL 1 - #else - #define FLATBUFFERS_USE_STD_OPTIONAL 0 - #endif // (defined(__cplusplus) && __cplusplus >= 201703L) ... -#endif // FLATBUFFERS_USE_STD_OPTIONAL - -#if FLATBUFFERS_USE_STD_OPTIONAL - #include -#endif - -// The __cpp_lib_span is the predefined feature macro. -#if defined(FLATBUFFERS_USE_STD_SPAN) - #include -#elif defined(__cpp_lib_span) && defined(__has_include) - #if __has_include() - #include - #define FLATBUFFERS_USE_STD_SPAN - #endif -#else - // Disable non-trivial ctors if FLATBUFFERS_SPAN_MINIMAL defined. - #if !defined(FLATBUFFERS_TEMPLATES_ALIASES) - #define FLATBUFFERS_SPAN_MINIMAL - #else - // Enable implicit construction of a span from a std::array. - #include - #endif -#endif // defined(FLATBUFFERS_USE_STD_SPAN) - -// This header provides backwards compatibility for older versions of the STL. -namespace flatbuffers { - -#if defined(FLATBUFFERS_TEMPLATES_ALIASES) - template - using numeric_limits = std::numeric_limits; -#else - template class numeric_limits : - public std::numeric_limits {}; -#endif // defined(FLATBUFFERS_TEMPLATES_ALIASES) - -#if defined(FLATBUFFERS_TEMPLATES_ALIASES) - template using is_scalar = std::is_scalar; - template using is_same = std::is_same; - template using is_floating_point = std::is_floating_point; - template using is_unsigned = std::is_unsigned; - template using is_enum = std::is_enum; - template using make_unsigned = std::make_unsigned; - template - using conditional = std::conditional; - template - using integral_constant = std::integral_constant; - template - using bool_constant = integral_constant; - using true_type = std::true_type; - using false_type = std::false_type; -#else - // MSVC 2010 doesn't support C++11 aliases. - template struct is_scalar : public std::is_scalar {}; - template struct is_same : public std::is_same {}; - template struct is_floating_point : - public std::is_floating_point {}; - template struct is_unsigned : public std::is_unsigned {}; - template struct is_enum : public std::is_enum {}; - template struct make_unsigned : public std::make_unsigned {}; - template - struct conditional : public std::conditional {}; - template - struct integral_constant : public std::integral_constant {}; - template - struct bool_constant : public integral_constant {}; - typedef bool_constant true_type; - typedef bool_constant false_type; -#endif // defined(FLATBUFFERS_TEMPLATES_ALIASES) - -#if defined(FLATBUFFERS_TEMPLATES_ALIASES) - template using unique_ptr = std::unique_ptr; -#else - // MSVC 2010 doesn't support C++11 aliases. - // We're manually "aliasing" the class here as we want to bring unique_ptr - // into the flatbuffers namespace. We have unique_ptr in the flatbuffers - // namespace we have a completely independent implementation (see below) - // for C++98 STL implementations. - template class unique_ptr : public std::unique_ptr { - public: - unique_ptr() {} - explicit unique_ptr(T* p) : std::unique_ptr(p) {} - unique_ptr(std::unique_ptr&& u) { *this = std::move(u); } - unique_ptr(unique_ptr&& u) { *this = std::move(u); } - unique_ptr& operator=(std::unique_ptr&& u) { - std::unique_ptr::reset(u.release()); - return *this; - } - unique_ptr& operator=(unique_ptr&& u) { - std::unique_ptr::reset(u.release()); - return *this; - } - unique_ptr& operator=(T* p) { - return std::unique_ptr::operator=(p); - } - }; -#endif // defined(FLATBUFFERS_TEMPLATES_ALIASES) - -#if FLATBUFFERS_USE_STD_OPTIONAL -template -using Optional = std::optional; -using nullopt_t = std::nullopt_t; -inline constexpr nullopt_t nullopt = std::nullopt; - -#else -// Limited implementation of Optional type for a scalar T. -// This implementation limited by trivial types compatible with -// std::is_arithmetic or std::is_enum type traits. - -// A tag to indicate an empty flatbuffers::optional. -struct nullopt_t { - explicit FLATBUFFERS_CONSTEXPR_CPP11 nullopt_t(int) {} -}; - -#if defined(FLATBUFFERS_CONSTEXPR_DEFINED) - namespace internal { - template struct nullopt_holder { - static constexpr nullopt_t instance_ = nullopt_t(0); - }; - template - constexpr nullopt_t nullopt_holder::instance_; - } - static constexpr const nullopt_t &nullopt = internal::nullopt_holder::instance_; - -#else - namespace internal { - template struct nullopt_holder { - static const nullopt_t instance_; - }; - template - const nullopt_t nullopt_holder::instance_ = nullopt_t(0); - } - static const nullopt_t &nullopt = internal::nullopt_holder::instance_; - -#endif - -template -class Optional FLATBUFFERS_FINAL_CLASS { - // Non-scalar 'T' would extremely complicated Optional. - // Use is_scalar checking because flatbuffers flatbuffers::is_arithmetic - // isn't implemented. - static_assert(flatbuffers::is_scalar::value, "unexpected type T"); - - public: - ~Optional() {} - - FLATBUFFERS_CONSTEXPR_CPP11 Optional() FLATBUFFERS_NOEXCEPT - : value_(), has_value_(false) {} - - FLATBUFFERS_CONSTEXPR_CPP11 Optional(nullopt_t) FLATBUFFERS_NOEXCEPT - : value_(), has_value_(false) {} - - FLATBUFFERS_CONSTEXPR_CPP11 Optional(T val) FLATBUFFERS_NOEXCEPT - : value_(val), has_value_(true) {} - - FLATBUFFERS_CONSTEXPR_CPP11 Optional(const Optional &other) FLATBUFFERS_NOEXCEPT - : value_(other.value_), has_value_(other.has_value_) {} - - FLATBUFFERS_CONSTEXPR_CPP14 Optional &operator=(const Optional &other) FLATBUFFERS_NOEXCEPT { - value_ = other.value_; - has_value_ = other.has_value_; - return *this; - } - - FLATBUFFERS_CONSTEXPR_CPP14 Optional &operator=(nullopt_t) FLATBUFFERS_NOEXCEPT { - value_ = T(); - has_value_ = false; - return *this; - } - - FLATBUFFERS_CONSTEXPR_CPP14 Optional &operator=(T val) FLATBUFFERS_NOEXCEPT { - value_ = val; - has_value_ = true; - return *this; - } - - void reset() FLATBUFFERS_NOEXCEPT { - *this = nullopt; - } - - void swap(Optional &other) FLATBUFFERS_NOEXCEPT { - std::swap(value_, other.value_); - std::swap(has_value_, other.has_value_); - } - - FLATBUFFERS_CONSTEXPR_CPP11 FLATBUFFERS_EXPLICIT_CPP11 operator bool() const FLATBUFFERS_NOEXCEPT { - return has_value_; - } - - FLATBUFFERS_CONSTEXPR_CPP11 bool has_value() const FLATBUFFERS_NOEXCEPT { - return has_value_; - } - - FLATBUFFERS_CONSTEXPR_CPP11 const T& operator*() const FLATBUFFERS_NOEXCEPT { - return value_; - } - - const T& value() const { - FLATBUFFERS_ASSERT(has_value()); - return value_; - } - - T value_or(T default_value) const FLATBUFFERS_NOEXCEPT { - return has_value() ? value_ : default_value; - } - - private: - T value_; - bool has_value_; -}; - -template -FLATBUFFERS_CONSTEXPR_CPP11 bool operator==(const Optional& opt, nullopt_t) FLATBUFFERS_NOEXCEPT { - return !opt; -} -template -FLATBUFFERS_CONSTEXPR_CPP11 bool operator==(nullopt_t, const Optional& opt) FLATBUFFERS_NOEXCEPT { - return !opt; -} - -template -FLATBUFFERS_CONSTEXPR_CPP11 bool operator==(const Optional& lhs, const U& rhs) FLATBUFFERS_NOEXCEPT { - return static_cast(lhs) && (*lhs == rhs); -} - -template -FLATBUFFERS_CONSTEXPR_CPP11 bool operator==(const T& lhs, const Optional& rhs) FLATBUFFERS_NOEXCEPT { - return static_cast(rhs) && (lhs == *rhs); -} - -template -FLATBUFFERS_CONSTEXPR_CPP11 bool operator==(const Optional& lhs, const Optional& rhs) FLATBUFFERS_NOEXCEPT { - return static_cast(lhs) != static_cast(rhs) - ? false - : !static_cast(lhs) ? false : (*lhs == *rhs); -} -#endif // FLATBUFFERS_USE_STD_OPTIONAL - - -// Very limited and naive partial implementation of C++20 std::span. -#if defined(FLATBUFFERS_USE_STD_SPAN) - inline constexpr std::size_t dynamic_extent = std::dynamic_extent; - template - using span = std::span; - -#else // !defined(FLATBUFFERS_USE_STD_SPAN) -FLATBUFFERS_CONSTEXPR std::size_t dynamic_extent = static_cast(-1); - -// Exclude this code if MSVC2010 or non-STL Android is active. -// The non-STL Android doesn't have `std::is_convertible` required for SFINAE. -#if !defined(FLATBUFFERS_SPAN_MINIMAL) -namespace internal { - // This is SFINAE helper class for checking of a common condition: - // > This overload only participates in overload resolution - // > Check whether a pointer to an array of From can be converted - // > to a pointer to an array of To. - // This helper is used for checking of 'From -> const From'. - template - struct is_span_convertable { - using type = - typename std::conditional::value - && (Extent == dynamic_extent || N == Extent), - int, void>::type; - }; - - template - struct SpanIterator { - // TODO: upgrade to std::random_access_iterator_tag. - using iterator_category = std::forward_iterator_tag; - using difference_type = std::ptrdiff_t; - using value_type = typename std::remove_cv::type; - using reference = T&; - using pointer = T*; - - // Convince MSVC compiler that this iterator is trusted (it is verified). - #ifdef _MSC_VER - using _Unchecked_type = pointer; - #endif // _MSC_VER - - SpanIterator(pointer ptr) : ptr_(ptr) {} - reference operator*() const { return *ptr_; } - pointer operator->() { return ptr_; } - SpanIterator& operator++() { ptr_++; return *this; } - SpanIterator operator++(int) { auto tmp = *this; ++(*this); return tmp; } - - friend bool operator== (const SpanIterator& lhs, const SpanIterator& rhs) { return lhs.ptr_ == rhs.ptr_; } - friend bool operator!= (const SpanIterator& lhs, const SpanIterator& rhs) { return lhs.ptr_ != rhs.ptr_; } - - private: - pointer ptr_; - }; -} // namespace internal -#endif // !defined(FLATBUFFERS_SPAN_MINIMAL) - -// T - element type; must be a complete type that is not an abstract -// class type. -// Extent - the number of elements in the sequence, or dynamic. -template -class span FLATBUFFERS_FINAL_CLASS { - public: - typedef T element_type; - typedef T& reference; - typedef const T& const_reference; - typedef T* pointer; - typedef const T* const_pointer; - typedef std::size_t size_type; - - static FLATBUFFERS_CONSTEXPR size_type extent = Extent; - - // Returns the number of elements in the span. - FLATBUFFERS_CONSTEXPR_CPP11 size_type size() const FLATBUFFERS_NOEXCEPT { - return count_; - } - - // Returns the size of the sequence in bytes. - FLATBUFFERS_CONSTEXPR_CPP11 - size_type size_bytes() const FLATBUFFERS_NOEXCEPT { - return size() * sizeof(element_type); - } - - // Checks if the span is empty. - FLATBUFFERS_CONSTEXPR_CPP11 bool empty() const FLATBUFFERS_NOEXCEPT { - return size() == 0; - } - - // Returns a pointer to the beginning of the sequence. - FLATBUFFERS_CONSTEXPR_CPP11 pointer data() const FLATBUFFERS_NOEXCEPT { - return data_; - } - - #if !defined(FLATBUFFERS_SPAN_MINIMAL) - using Iterator = internal::SpanIterator; - - Iterator begin() const { return Iterator(data()); } - Iterator end() const { return Iterator(data() + size()); } - #endif - - // Returns a reference to the idx-th element of the sequence. - // The behavior is undefined if the idx is greater than or equal to size(). - FLATBUFFERS_CONSTEXPR_CPP11 reference operator[](size_type idx) const { - return data()[idx]; - } - - FLATBUFFERS_CONSTEXPR_CPP11 span(const span &other) FLATBUFFERS_NOEXCEPT - : data_(other.data_), count_(other.count_) {} - - FLATBUFFERS_CONSTEXPR_CPP14 span &operator=(const span &other) - FLATBUFFERS_NOEXCEPT { - data_ = other.data_; - count_ = other.count_; - } - - // Limited implementation of - // `template constexpr std::span(It first, size_type count);`. - // - // Constructs a span that is a view over the range [first, first + count); - // the resulting span has: data() == first and size() == count. - // The behavior is undefined if [first, first + count) is not a valid range, - // or if (extent != flatbuffers::dynamic_extent && count != extent). - FLATBUFFERS_CONSTEXPR_CPP11 - explicit span(pointer first, size_type count) FLATBUFFERS_NOEXCEPT - : data_ (Extent == dynamic_extent ? first : (Extent == count ? first : nullptr)), - count_(Extent == dynamic_extent ? count : (Extent == count ? Extent : 0)) { - // Make span empty if the count argument is incompatible with span. - } - - // Exclude this code if MSVC2010 is active. The MSVC2010 isn't C++11 - // compliant, it doesn't support default template arguments for functions. - #if defined(FLATBUFFERS_SPAN_MINIMAL) - FLATBUFFERS_CONSTEXPR_CPP11 span() FLATBUFFERS_NOEXCEPT : data_(nullptr), - count_(0) { - static_assert(extent == 0 || extent == dynamic_extent, "invalid span"); - } - - #else - // Constructs an empty span whose data() == nullptr and size() == 0. - // This overload only participates in overload resolution if - // extent == 0 || extent == flatbuffers::dynamic_extent. - // A dummy template argument N is need dependency for SFINAE. - template::type = 0> - FLATBUFFERS_CONSTEXPR_CPP11 span() FLATBUFFERS_NOEXCEPT : data_(nullptr), - count_(0) { - static_assert(extent == 0 || extent == dynamic_extent, "invalid span"); - } - - // Constructs a span that is a view over the array arr; the resulting span - // has size() == N and data() == std::data(arr). These overloads only - // participate in overload resolution if - // extent == std::dynamic_extent || N == extent is true and - // std::remove_pointer_t(*)[] - // is convertible to element_type (*)[]. - template::type = 0> - FLATBUFFERS_CONSTEXPR_CPP11 span(element_type (&arr)[N]) FLATBUFFERS_NOEXCEPT - : data_(arr), count_(N) {} - - template::type = 0> - FLATBUFFERS_CONSTEXPR_CPP11 span(std::array &arr) FLATBUFFERS_NOEXCEPT - : data_(arr.data()), count_(N) {} - - //template - //FLATBUFFERS_CONSTEXPR_CPP11 span(std::array &arr) FLATBUFFERS_NOEXCEPT - // : data_(arr.data()), count_(N) {} - - template::type = 0> - FLATBUFFERS_CONSTEXPR_CPP11 span(const std::array &arr) FLATBUFFERS_NOEXCEPT - : data_(arr.data()), count_(N) {} - - // Converting constructor from another span s; - // the resulting span has size() == s.size() and data() == s.data(). - // This overload only participates in overload resolution - // if extent == std::dynamic_extent || N == extent is true and U (*)[] - // is convertible to element_type (*)[]. - template::type = 0> - FLATBUFFERS_CONSTEXPR_CPP11 span(const flatbuffers::span &s) FLATBUFFERS_NOEXCEPT - : span(s.data(), s.size()) { - } - - #endif // !defined(FLATBUFFERS_SPAN_MINIMAL) - - private: - // This is a naive implementation with 'count_' member even if (Extent != dynamic_extent). - pointer const data_; - size_type count_; -}; -#endif // defined(FLATBUFFERS_USE_STD_SPAN) - -#if !defined(FLATBUFFERS_SPAN_MINIMAL) -template -FLATBUFFERS_CONSTEXPR_CPP11 -flatbuffers::span make_span(ElementType(&arr)[Extent]) FLATBUFFERS_NOEXCEPT { - return span(arr); -} - -template -FLATBUFFERS_CONSTEXPR_CPP11 -flatbuffers::span make_span(const ElementType(&arr)[Extent]) FLATBUFFERS_NOEXCEPT { - return span(arr); -} - -template -FLATBUFFERS_CONSTEXPR_CPP11 -flatbuffers::span make_span(std::array &arr) FLATBUFFERS_NOEXCEPT { - return span(arr); -} - -template -FLATBUFFERS_CONSTEXPR_CPP11 -flatbuffers::span make_span(const std::array &arr) FLATBUFFERS_NOEXCEPT { - return span(arr); -} - -template -FLATBUFFERS_CONSTEXPR_CPP11 -flatbuffers::span make_span(ElementType *first, std::size_t count) FLATBUFFERS_NOEXCEPT { - return span(first, count); -} - -template -FLATBUFFERS_CONSTEXPR_CPP11 -flatbuffers::span make_span(const ElementType *first, std::size_t count) FLATBUFFERS_NOEXCEPT { - return span(first, count); -} -#endif // !defined(FLATBUFFERS_SPAN_MINIMAL) - -} // namespace flatbuffers - -#endif // FLATBUFFERS_STL_EMULATION_H_ diff --git a/code/components/tflite-lib/third_party/flatbuffers/include/flatbuffers/table.h b/code/components/tflite-lib/third_party/flatbuffers/include/flatbuffers/table.h deleted file mode 100644 index 11b29247..00000000 --- a/code/components/tflite-lib/third_party/flatbuffers/include/flatbuffers/table.h +++ /dev/null @@ -1,168 +0,0 @@ -/* - * Copyright 2021 Google Inc. All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef FLATBUFFERS_TABLE_H_ -#define FLATBUFFERS_TABLE_H_ - -#include "flatbuffers/base.h" -#include "flatbuffers/verifier.h" - -namespace flatbuffers { - -// "tables" use an offset table (possibly shared) that allows fields to be -// omitted and added at will, but uses an extra indirection to read. -class Table { - public: - const uint8_t *GetVTable() const { - return data_ - ReadScalar(data_); - } - - // This gets the field offset for any of the functions below it, or 0 - // if the field was not present. - voffset_t GetOptionalFieldOffset(voffset_t field) const { - // The vtable offset is always at the start. - auto vtable = GetVTable(); - // The first element is the size of the vtable (fields + type id + itself). - auto vtsize = ReadScalar(vtable); - // If the field we're accessing is outside the vtable, we're reading older - // data, so it's the same as if the offset was 0 (not present). - return field < vtsize ? ReadScalar(vtable + field) : 0; - } - - template T GetField(voffset_t field, T defaultval) const { - auto field_offset = GetOptionalFieldOffset(field); - return field_offset ? ReadScalar(data_ + field_offset) : defaultval; - } - - template P GetPointer(voffset_t field) { - auto field_offset = GetOptionalFieldOffset(field); - auto p = data_ + field_offset; - return field_offset ? reinterpret_cast

(p + ReadScalar(p)) - : nullptr; - } - template P GetPointer(voffset_t field) const { - return const_cast(this)->GetPointer

(field); - } - - template P GetStruct(voffset_t field) const { - auto field_offset = GetOptionalFieldOffset(field); - auto p = const_cast(data_ + field_offset); - return field_offset ? reinterpret_cast

(p) : nullptr; - } - - template - flatbuffers::Optional GetOptional(voffset_t field) const { - auto field_offset = GetOptionalFieldOffset(field); - auto p = data_ + field_offset; - return field_offset ? Optional(static_cast(ReadScalar(p))) - : Optional(); - } - - template bool SetField(voffset_t field, T val, T def) { - auto field_offset = GetOptionalFieldOffset(field); - if (!field_offset) return IsTheSameAs(val, def); - WriteScalar(data_ + field_offset, val); - return true; - } - template bool SetField(voffset_t field, T val) { - auto field_offset = GetOptionalFieldOffset(field); - if (!field_offset) return false; - WriteScalar(data_ + field_offset, val); - return true; - } - - bool SetPointer(voffset_t field, const uint8_t *val) { - auto field_offset = GetOptionalFieldOffset(field); - if (!field_offset) return false; - WriteScalar(data_ + field_offset, - static_cast(val - (data_ + field_offset))); - return true; - } - - uint8_t *GetAddressOf(voffset_t field) { - auto field_offset = GetOptionalFieldOffset(field); - return field_offset ? data_ + field_offset : nullptr; - } - const uint8_t *GetAddressOf(voffset_t field) const { - return const_cast

(this)->GetAddressOf(field); - } - - bool CheckField(voffset_t field) const { - return GetOptionalFieldOffset(field) != 0; - } - - // Verify the vtable of this table. - // Call this once per table, followed by VerifyField once per field. - bool VerifyTableStart(Verifier &verifier) const { - return verifier.VerifyTableStart(data_); - } - - // Verify a particular field. - template - bool VerifyField(const Verifier &verifier, voffset_t field, - size_t align) const { - // Calling GetOptionalFieldOffset should be safe now thanks to - // VerifyTable(). - auto field_offset = GetOptionalFieldOffset(field); - // Check the actual field. - return !field_offset || verifier.VerifyField(data_, field_offset, align); - } - - // VerifyField for required fields. - template - bool VerifyFieldRequired(const Verifier &verifier, voffset_t field, - size_t align) const { - auto field_offset = GetOptionalFieldOffset(field); - return verifier.Check(field_offset != 0) && - verifier.VerifyField(data_, field_offset, align); - } - - // Versions for offsets. - bool VerifyOffset(const Verifier &verifier, voffset_t field) const { - auto field_offset = GetOptionalFieldOffset(field); - return !field_offset || verifier.VerifyOffset(data_, field_offset); - } - - bool VerifyOffsetRequired(const Verifier &verifier, voffset_t field) const { - auto field_offset = GetOptionalFieldOffset(field); - return verifier.Check(field_offset != 0) && - verifier.VerifyOffset(data_, field_offset); - } - - private: - // private constructor & copy constructor: you obtain instances of this - // class by pointing to existing data only - Table(); - Table(const Table &other); - Table &operator=(const Table &); - - uint8_t data_[1]; -}; - -// This specialization allows avoiding warnings like: -// MSVC C4800: type: forcing value to bool 'true' or 'false'. -template<> -inline flatbuffers::Optional Table::GetOptional( - voffset_t field) const { - auto field_offset = GetOptionalFieldOffset(field); - auto p = data_ + field_offset; - return field_offset ? Optional(ReadScalar(p) != 0) - : Optional(); -} - -} // namespace flatbuffers - -#endif // FLATBUFFERS_TABLE_H_ diff --git a/code/components/tflite-lib/third_party/flatbuffers/include/flatbuffers/util.h b/code/components/tflite-lib/third_party/flatbuffers/include/flatbuffers/util.h deleted file mode 100644 index 5a4bfe52..00000000 --- a/code/components/tflite-lib/third_party/flatbuffers/include/flatbuffers/util.h +++ /dev/null @@ -1,725 +0,0 @@ -/* - * Copyright 2014 Google Inc. All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef FLATBUFFERS_UTIL_H_ -#define FLATBUFFERS_UTIL_H_ - -#include -#include - -#include "flatbuffers/base.h" -#include "flatbuffers/stl_emulation.h" - -// For TFLM we always want to use FLATBUFFERS_PREFER_PRINTF=1. See -// http://b/211811553 for more context. -#ifndef FLATBUFFERS_PREFER_PRINTF -#define FLATBUFFERS_PREFER_PRINTF 1 -#endif - -#ifndef FLATBUFFERS_PREFER_PRINTF -# include -# include -#else // FLATBUFFERS_PREFER_PRINTF -# include -# include -#endif // FLATBUFFERS_PREFER_PRINTF - -#include - -namespace flatbuffers { - -// @locale-independent functions for ASCII characters set. - -// Fast checking that character lies in closed range: [a <= x <= b] -// using one compare (conditional branch) operator. -inline bool check_ascii_range(char x, char a, char b) { - FLATBUFFERS_ASSERT(a <= b); - // (Hacker's Delight): `a <= x <= b` <=> `(x-a) <={u} (b-a)`. - // The x, a, b will be promoted to int and subtracted without overflow. - return static_cast(x - a) <= static_cast(b - a); -} - -// Case-insensitive isalpha -inline bool is_alpha(char c) { - // ASCII only: alpha to upper case => reset bit 0x20 (~0x20 = 0xDF). - return check_ascii_range(c & 0xDF, 'a' & 0xDF, 'z' & 0xDF); -} - -// Check for uppercase alpha -inline bool is_alpha_upper(char c) { return check_ascii_range(c, 'A', 'Z'); } - -// Check (case-insensitive) that `c` is equal to alpha. -inline bool is_alpha_char(char c, char alpha) { - FLATBUFFERS_ASSERT(is_alpha(alpha)); - // ASCII only: alpha to upper case => reset bit 0x20 (~0x20 = 0xDF). - return ((c & 0xDF) == (alpha & 0xDF)); -} - -// https://en.cppreference.com/w/cpp/string/byte/isxdigit -// isdigit and isxdigit are the only standard narrow character classification -// functions that are not affected by the currently installed C locale. although -// some implementations (e.g. Microsoft in 1252 codepage) may classify -// additional single-byte characters as digits. -inline bool is_digit(char c) { return check_ascii_range(c, '0', '9'); } - -inline bool is_xdigit(char c) { - // Replace by look-up table. - return is_digit(c) || check_ascii_range(c & 0xDF, 'a' & 0xDF, 'f' & 0xDF); -} - -// Case-insensitive isalnum -inline bool is_alnum(char c) { return is_alpha(c) || is_digit(c); } - -inline char CharToUpper(char c) { - return static_cast(::toupper(static_cast(c))); -} - -inline char CharToLower(char c) { - return static_cast(::tolower(static_cast(c))); -} - -// @end-locale-independent functions for ASCII character set - -#ifdef FLATBUFFERS_PREFER_PRINTF -template size_t IntToDigitCount(T t) { - size_t digit_count = 0; - // Count the sign for negative numbers - if (t < 0) digit_count++; - // Count a single 0 left of the dot for fractional numbers - if (-1 < t && t < 1) digit_count++; - // Count digits until fractional part - T eps = std::numeric_limits::epsilon(); - while (t <= (-1 + eps) || (1 - eps) <= t) { - t /= 10; - digit_count++; - } - return digit_count; -} - -template size_t NumToStringWidth(T t, int precision = 0) { - size_t string_width = IntToDigitCount(t); - // Count the dot for floating point numbers - if (precision) string_width += (precision + 1); - return string_width; -} - -template -std::string NumToStringImplWrapper(T t, const char *fmt, int precision = 0) { - size_t string_width = NumToStringWidth(t, precision); - std::string s(string_width, 0x00); - // Allow snprintf to use std::string trailing null to detect buffer overflow - snprintf(const_cast(s.data()), (s.size() + 1), fmt, string_width, t); - return s; -} -#endif // FLATBUFFERS_PREFER_PRINTF - -// Convert an integer or floating point value to a string. -// In contrast to std::stringstream, "char" values are -// converted to a string of digits, and we don't use scientific notation. -template std::string NumToString(T t) { - // clang-format off - - #ifndef FLATBUFFERS_PREFER_PRINTF - std::stringstream ss; - ss << t; - return ss.str(); - #else // FLATBUFFERS_PREFER_PRINTF - auto v = static_cast(t); - return NumToStringImplWrapper(v, "%.*lld"); - #endif // FLATBUFFERS_PREFER_PRINTF - // clang-format on -} -// Avoid char types used as character data. -template<> inline std::string NumToString(signed char t) { - return NumToString(static_cast(t)); -} -template<> inline std::string NumToString(unsigned char t) { - return NumToString(static_cast(t)); -} -template<> inline std::string NumToString(char t) { - return NumToString(static_cast(t)); -} - -// Special versions for floats/doubles. -template std::string FloatToString(T t, int precision) { - // clang-format off - - #ifndef FLATBUFFERS_PREFER_PRINTF - // to_string() prints different numbers of digits for floats depending on - // platform and isn't available on Android, so we use stringstream - std::stringstream ss; - // Use std::fixed to suppress scientific notation. - ss << std::fixed; - // Default precision is 6, we want that to be higher for doubles. - ss << std::setprecision(precision); - ss << t; - auto s = ss.str(); - #else // FLATBUFFERS_PREFER_PRINTF - auto v = static_cast(t); - auto s = NumToStringImplWrapper(v, "%0.*f", precision); - #endif // FLATBUFFERS_PREFER_PRINTF - // clang-format on - // Sadly, std::fixed turns "1" into "1.00000", so here we undo that. - auto p = s.find_last_not_of('0'); - if (p != std::string::npos) { - // Strip trailing zeroes. If it is a whole number, keep one zero. - s.resize(p + (s[p] == '.' ? 2 : 1)); - } - return s; -} - -template<> inline std::string NumToString(double t) { - return FloatToString(t, 12); -} -template<> inline std::string NumToString(float t) { - return FloatToString(t, 6); -} - -// Convert an integer value to a hexadecimal string. -// The returned string length is always xdigits long, prefixed by 0 digits. -// For example, IntToStringHex(0x23, 8) returns the string "00000023". -inline std::string IntToStringHex(int i, int xdigits) { - FLATBUFFERS_ASSERT(i >= 0); - // clang-format off - - #ifndef FLATBUFFERS_PREFER_PRINTF - std::stringstream ss; - ss << std::setw(xdigits) << std::setfill('0') << std::hex << std::uppercase - << i; - return ss.str(); - #else // FLATBUFFERS_PREFER_PRINTF - return NumToStringImplWrapper(i, "%.*X", xdigits); - #endif // FLATBUFFERS_PREFER_PRINTF - // clang-format on -} - -// clang-format off -// Use locale independent functions {strtod_l, strtof_l, strtoll_l, strtoull_l}. -#if defined(FLATBUFFERS_LOCALE_INDEPENDENT) && (FLATBUFFERS_LOCALE_INDEPENDENT > 0) - class ClassicLocale { - #ifdef _MSC_VER - typedef _locale_t locale_type; - #else - typedef locale_t locale_type; // POSIX.1-2008 locale_t type - #endif - ClassicLocale(); - ~ClassicLocale(); - locale_type locale_; - static ClassicLocale instance_; - public: - static locale_type Get() { return instance_.locale_; } - }; - - #ifdef _MSC_VER - #define __strtoull_impl(s, pe, b) _strtoui64_l(s, pe, b, ClassicLocale::Get()) - #define __strtoll_impl(s, pe, b) _strtoi64_l(s, pe, b, ClassicLocale::Get()) - #define __strtod_impl(s, pe) _strtod_l(s, pe, ClassicLocale::Get()) - #define __strtof_impl(s, pe) _strtof_l(s, pe, ClassicLocale::Get()) - #else - #define __strtoull_impl(s, pe, b) strtoull_l(s, pe, b, ClassicLocale::Get()) - #define __strtoll_impl(s, pe, b) strtoll_l(s, pe, b, ClassicLocale::Get()) - #define __strtod_impl(s, pe) strtod_l(s, pe, ClassicLocale::Get()) - #define __strtof_impl(s, pe) strtof_l(s, pe, ClassicLocale::Get()) - #endif -#else - #define __strtod_impl(s, pe) strtod(s, pe) - #define __strtof_impl(s, pe) static_cast(strtod(s, pe)) - #ifdef _MSC_VER - #define __strtoull_impl(s, pe, b) _strtoui64(s, pe, b) - #define __strtoll_impl(s, pe, b) _strtoi64(s, pe, b) - #else - #define __strtoull_impl(s, pe, b) strtoull(s, pe, b) - #define __strtoll_impl(s, pe, b) strtoll(s, pe, b) - #endif -#endif - -inline void strtoval_impl(int64_t *val, const char *str, char **endptr, - int base) { - *val = __strtoll_impl(str, endptr, base); -} - -inline void strtoval_impl(uint64_t *val, const char *str, char **endptr, - int base) { - *val = __strtoull_impl(str, endptr, base); -} - -inline void strtoval_impl(double *val, const char *str, char **endptr) { - *val = __strtod_impl(str, endptr); -} - -// UBSAN: double to float is safe if numeric_limits::is_iec559 is true. -__supress_ubsan__("float-cast-overflow") -inline void strtoval_impl(float *val, const char *str, char **endptr) { - *val = __strtof_impl(str, endptr); -} -#undef __strtoull_impl -#undef __strtoll_impl -#undef __strtod_impl -#undef __strtof_impl -// clang-format on - -// Adaptor for strtoull()/strtoll(). -// Flatbuffers accepts numbers with any count of leading zeros (-009 is -9), -// while strtoll with base=0 interprets first leading zero as octal prefix. -// In future, it is possible to add prefixed 0b0101. -// 1) Checks errno code for overflow condition (out of range). -// 2) If base <= 0, function try to detect base of number by prefix. -// -// Return value (like strtoull and strtoll, but reject partial result): -// - If successful, an integer value corresponding to the str is returned. -// - If full string conversion can't be performed, 0 is returned. -// - If the converted value falls out of range of corresponding return type, a -// range error occurs. In this case value MAX(T)/MIN(T) is returned. -template -inline bool StringToIntegerImpl(T *val, const char *const str, - const int base = 0, - const bool check_errno = true) { - // T is int64_t or uint64_T - FLATBUFFERS_ASSERT(str); - if (base <= 0) { - auto s = str; - while (*s && !is_digit(*s)) s++; - if (s[0] == '0' && is_alpha_char(s[1], 'X')) - return StringToIntegerImpl(val, str, 16, check_errno); - // if a prefix not match, try base=10 - return StringToIntegerImpl(val, str, 10, check_errno); - } else { - if (check_errno) errno = 0; // clear thread-local errno - auto endptr = str; - strtoval_impl(val, str, const_cast(&endptr), base); - if ((*endptr != '\0') || (endptr == str)) { - *val = 0; // erase partial result - return false; // invalid string - } - // errno is out-of-range, return MAX/MIN - if (check_errno && errno) return false; - return true; - } -} - -template -inline bool StringToFloatImpl(T *val, const char *const str) { - // Type T must be either float or double. - FLATBUFFERS_ASSERT(str && val); - auto end = str; - strtoval_impl(val, str, const_cast(&end)); - auto done = (end != str) && (*end == '\0'); - if (!done) *val = 0; // erase partial result - return done; -} - -// Convert a string to an instance of T. -// Return value (matched with StringToInteger64Impl and strtod): -// - If successful, a numeric value corresponding to the str is returned. -// - If full string conversion can't be performed, 0 is returned. -// - If the converted value falls out of range of corresponding return type, a -// range error occurs. In this case value MAX(T)/MIN(T) is returned. -template inline bool StringToNumber(const char *s, T *val) { - // Assert on `unsigned long` and `signed long` on LP64. - // If it is necessary, it could be solved with flatbuffers::enable_if. - static_assert(sizeof(T) < sizeof(int64_t), "unexpected type T"); - FLATBUFFERS_ASSERT(s && val); - int64_t i64; - // The errno check isn't needed, will return MAX/MIN on overflow. - if (StringToIntegerImpl(&i64, s, 0, false)) { - const int64_t max = (flatbuffers::numeric_limits::max)(); - const int64_t min = flatbuffers::numeric_limits::lowest(); - if (i64 > max) { - *val = static_cast(max); - return false; - } - if (i64 < min) { - // For unsigned types return max to distinguish from - // "no conversion can be performed" when 0 is returned. - *val = static_cast(flatbuffers::is_unsigned::value ? max : min); - return false; - } - *val = static_cast(i64); - return true; - } - *val = 0; - return false; -} - -template<> inline bool StringToNumber(const char *str, int64_t *val) { - return StringToIntegerImpl(val, str); -} - -template<> -inline bool StringToNumber(const char *str, uint64_t *val) { - if (!StringToIntegerImpl(val, str)) return false; - // The strtoull accepts negative numbers: - // If the minus sign was part of the input sequence, the numeric value - // calculated from the sequence of digits is negated as if by unary minus - // in the result type, which applies unsigned integer wraparound rules. - // Fix this behaviour (except -0). - if (*val) { - auto s = str; - while (*s && !is_digit(*s)) s++; - s = (s > str) ? (s - 1) : s; // step back to one symbol - if (*s == '-') { - // For unsigned types return the max to distinguish from - // "no conversion can be performed". - *val = (flatbuffers::numeric_limits::max)(); - return false; - } - } - return true; -} - -template<> inline bool StringToNumber(const char *s, float *val) { - return StringToFloatImpl(val, s); -} - -template<> inline bool StringToNumber(const char *s, double *val) { - return StringToFloatImpl(val, s); -} - -inline int64_t StringToInt(const char *s, int base = 10) { - int64_t val; - return StringToIntegerImpl(&val, s, base) ? val : 0; -} - -inline uint64_t StringToUInt(const char *s, int base = 10) { - uint64_t val; - return StringToIntegerImpl(&val, s, base) ? val : 0; -} - -typedef bool (*LoadFileFunction)(const char *filename, bool binary, - std::string *dest); -typedef bool (*FileExistsFunction)(const char *filename); - -LoadFileFunction SetLoadFileFunction(LoadFileFunction load_file_function); - -FileExistsFunction SetFileExistsFunction( - FileExistsFunction file_exists_function); - -// Check if file "name" exists. -bool FileExists(const char *name); - -// Check if "name" exists and it is also a directory. -bool DirExists(const char *name); - -// Load file "name" into "buf" returning true if successful -// false otherwise. If "binary" is false data is read -// using ifstream's text mode, otherwise data is read with -// no transcoding. -bool LoadFile(const char *name, bool binary, std::string *buf); - -// Save data "buf" of length "len" bytes into a file -// "name" returning true if successful, false otherwise. -// If "binary" is false data is written using ifstream's -// text mode, otherwise data is written with no -// transcoding. -bool SaveFile(const char *name, const char *buf, size_t len, bool binary); - -// Save data "buf" into file "name" returning true if -// successful, false otherwise. If "binary" is false -// data is written using ifstream's text mode, otherwise -// data is written with no transcoding. -inline bool SaveFile(const char *name, const std::string &buf, bool binary) { - return SaveFile(name, buf.c_str(), buf.size(), binary); -} - -// Functionality for minimalistic portable path handling. - -// The functions below behave correctly regardless of whether posix ('/') or -// Windows ('/' or '\\') separators are used. - -// Any new separators inserted are always posix. -FLATBUFFERS_CONSTEXPR char kPathSeparator = '/'; - -// Returns the path with the extension, if any, removed. -std::string StripExtension(const std::string &filepath); - -// Returns the extension, if any. -std::string GetExtension(const std::string &filepath); - -// Return the last component of the path, after the last separator. -std::string StripPath(const std::string &filepath); - -// Strip the last component of the path + separator. -std::string StripFileName(const std::string &filepath); - -std::string StripPrefix(const std::string &filepath, - const std::string &prefix_to_remove); - -// Concatenates a path with a filename, regardless of whether the path -// ends in a separator or not. -std::string ConCatPathFileName(const std::string &path, - const std::string &filename); - -// Replaces any '\\' separators with '/' -std::string PosixPath(const char *path); -std::string PosixPath(const std::string &path); - -// This function ensure a directory exists, by recursively -// creating dirs for any parts of the path that don't exist yet. -void EnsureDirExists(const std::string &filepath); - -// Obtains the absolute path from any other path. -// Returns the input path if the absolute path couldn't be resolved. -std::string AbsolutePath(const std::string &filepath); - -// Returns files relative to the --project_root path, prefixed with `//`. -std::string RelativeToRootPath(const std::string &project, - const std::string &filepath); - -// To and from UTF-8 unicode conversion functions - -// Convert a unicode code point into a UTF-8 representation by appending it -// to a string. Returns the number of bytes generated. -inline int ToUTF8(uint32_t ucc, std::string *out) { - FLATBUFFERS_ASSERT(!(ucc & 0x80000000)); // Top bit can't be set. - // 6 possible encodings: http://en.wikipedia.org/wiki/UTF-8 - for (int i = 0; i < 6; i++) { - // Max bits this encoding can represent. - uint32_t max_bits = 6 + i * 5 + static_cast(!i); - if (ucc < (1u << max_bits)) { // does it fit? - // Remaining bits not encoded in the first byte, store 6 bits each - uint32_t remain_bits = i * 6; - // Store first byte: - (*out) += static_cast((0xFE << (max_bits - remain_bits)) | - (ucc >> remain_bits)); - // Store remaining bytes: - for (int j = i - 1; j >= 0; j--) { - (*out) += static_cast(((ucc >> (j * 6)) & 0x3F) | 0x80); - } - return i + 1; // Return the number of bytes added. - } - } - FLATBUFFERS_ASSERT(0); // Impossible to arrive here. - return -1; -} - -// Converts whatever prefix of the incoming string corresponds to a valid -// UTF-8 sequence into a unicode code. The incoming pointer will have been -// advanced past all bytes parsed. -// returns -1 upon corrupt UTF-8 encoding (ignore the incoming pointer in -// this case). -inline int FromUTF8(const char **in) { - int len = 0; - // Count leading 1 bits. - for (int mask = 0x80; mask >= 0x04; mask >>= 1) { - if (**in & mask) { - len++; - } else { - break; - } - } - if ((static_cast(**in) << len) & 0x80) - return -1; // Bit after leading 1's must be 0. - if (!len) return *(*in)++; - // UTF-8 encoded values with a length are between 2 and 4 bytes. - if (len < 2 || len > 4) { return -1; } - // Grab initial bits of the code. - int ucc = *(*in)++ & ((1 << (7 - len)) - 1); - for (int i = 0; i < len - 1; i++) { - if ((**in & 0xC0) != 0x80) return -1; // Upper bits must 1 0. - ucc <<= 6; - ucc |= *(*in)++ & 0x3F; // Grab 6 more bits of the code. - } - // UTF-8 cannot encode values between 0xD800 and 0xDFFF (reserved for - // UTF-16 surrogate pairs). - if (ucc >= 0xD800 && ucc <= 0xDFFF) { return -1; } - // UTF-8 must represent code points in their shortest possible encoding. - switch (len) { - case 2: - // Two bytes of UTF-8 can represent code points from U+0080 to U+07FF. - if (ucc < 0x0080 || ucc > 0x07FF) { return -1; } - break; - case 3: - // Three bytes of UTF-8 can represent code points from U+0800 to U+FFFF. - if (ucc < 0x0800 || ucc > 0xFFFF) { return -1; } - break; - case 4: - // Four bytes of UTF-8 can represent code points from U+10000 to U+10FFFF. - if (ucc < 0x10000 || ucc > 0x10FFFF) { return -1; } - break; - } - return ucc; -} - -#ifndef FLATBUFFERS_PREFER_PRINTF -// Wraps a string to a maximum length, inserting new lines where necessary. Any -// existing whitespace will be collapsed down to a single space. A prefix or -// suffix can be provided, which will be inserted before or after a wrapped -// line, respectively. -inline std::string WordWrap(const std::string in, size_t max_length, - const std::string wrapped_line_prefix, - const std::string wrapped_line_suffix) { - std::istringstream in_stream(in); - std::string wrapped, line, word; - - in_stream >> word; - line = word; - - while (in_stream >> word) { - if ((line.length() + 1 + word.length() + wrapped_line_suffix.length()) < - max_length) { - line += " " + word; - } else { - wrapped += line + wrapped_line_suffix + "\n"; - line = wrapped_line_prefix + word; - } - } - wrapped += line; - - return wrapped; -} -#endif // !FLATBUFFERS_PREFER_PRINTF - -inline bool EscapeString(const char *s, size_t length, std::string *_text, - bool allow_non_utf8, bool natural_utf8) { - std::string &text = *_text; - text += "\""; - for (uoffset_t i = 0; i < length; i++) { - char c = s[i]; - switch (c) { - case '\n': text += "\\n"; break; - case '\t': text += "\\t"; break; - case '\r': text += "\\r"; break; - case '\b': text += "\\b"; break; - case '\f': text += "\\f"; break; - case '\"': text += "\\\""; break; - case '\\': text += "\\\\"; break; - default: - if (c >= ' ' && c <= '~') { - text += c; - } else { - // Not printable ASCII data. Let's see if it's valid UTF-8 first: - const char *utf8 = s + i; - int ucc = FromUTF8(&utf8); - if (ucc < 0) { - if (allow_non_utf8) { - text += "\\x"; - text += IntToStringHex(static_cast(c), 2); - } else { - // There are two cases here: - // - // 1) We reached here by parsing an IDL file. In that case, - // we previously checked for non-UTF-8, so we shouldn't reach - // here. - // - // 2) We reached here by someone calling GenerateText() - // on a previously-serialized flatbuffer. The data might have - // non-UTF-8 Strings, or might be corrupt. - // - // In both cases, we have to give up and inform the caller - // they have no JSON. - return false; - } - } else { - if (natural_utf8) { - // utf8 points to past all utf-8 bytes parsed - text.append(s + i, static_cast(utf8 - s - i)); - } else if (ucc <= 0xFFFF) { - // Parses as Unicode within JSON's \uXXXX range, so use that. - text += "\\u"; - text += IntToStringHex(ucc, 4); - } else if (ucc <= 0x10FFFF) { - // Encode Unicode SMP values to a surrogate pair using two \u - // escapes. - uint32_t base = ucc - 0x10000; - auto high_surrogate = (base >> 10) + 0xD800; - auto low_surrogate = (base & 0x03FF) + 0xDC00; - text += "\\u"; - text += IntToStringHex(high_surrogate, 4); - text += "\\u"; - text += IntToStringHex(low_surrogate, 4); - } - // Skip past characters recognized. - i = static_cast(utf8 - s - 1); - } - } - break; - } - } - text += "\""; - return true; -} - -inline std::string BufferToHexText(const void *buffer, size_t buffer_size, - size_t max_length, - const std::string &wrapped_line_prefix, - const std::string &wrapped_line_suffix) { - std::string text = wrapped_line_prefix; - size_t start_offset = 0; - const char *s = reinterpret_cast(buffer); - for (size_t i = 0; s && i < buffer_size; i++) { - // Last iteration or do we have more? - bool have_more = i + 1 < buffer_size; - text += "0x"; - text += IntToStringHex(static_cast(s[i]), 2); - if (have_more) { text += ','; } - // If we have more to process and we reached max_length - if (have_more && - text.size() + wrapped_line_suffix.size() >= start_offset + max_length) { - text += wrapped_line_suffix; - text += '\n'; - start_offset = text.size(); - text += wrapped_line_prefix; - } - } - text += wrapped_line_suffix; - return text; -} - -// Remove paired quotes in a string: "text"|'text' -> text. -std::string RemoveStringQuotes(const std::string &s); - -// Change th global C-locale to locale with name . -// Returns an actual locale name in <_value>, useful if locale_name is "" or -// null. -bool SetGlobalTestLocale(const char *locale_name, - std::string *_value = nullptr); - -// Read (or test) a value of environment variable. -bool ReadEnvironmentVariable(const char *var_name, - std::string *_value = nullptr); - -// MSVC specific: Send all assert reports to STDOUT to prevent CI hangs. -void SetupDefaultCRTReportMode(); - -enum class Case { - kUnknown = 0, - // TheQuickBrownFox - kUpperCamel = 1, - // theQuickBrownFox - kLowerCamel = 2, - // the_quick_brown_fox - kSnake = 3, - // THE_QUICK_BROWN_FOX - kScreamingSnake = 4, - // THEQUICKBROWNFOX - kAllUpper = 5, - // thequickbrownfox - kAllLower = 6, - // the-quick-brown-fox - kDasher = 7, - // THEQuiCKBr_ownFox (or whatever you want, we won't change it) - kKeep = 8, - // the_quick_brown_fox123 (as opposed to the_quick_brown_fox_123) - kSnake2 = 9, -}; - -// Convert the `input` string of case `input_case` to the specified `output_case`. -std::string ConvertCase(const std::string &input, Case output_case, - Case input_case = Case::kSnake); - -} // namespace flatbuffers - -#endif // FLATBUFFERS_UTIL_H_ diff --git a/code/components/tflite-lib/third_party/flatbuffers/include/flatbuffers/vector.h b/code/components/tflite-lib/third_party/flatbuffers/include/flatbuffers/vector.h deleted file mode 100644 index 6bcdfe26..00000000 --- a/code/components/tflite-lib/third_party/flatbuffers/include/flatbuffers/vector.h +++ /dev/null @@ -1,389 +0,0 @@ -/* - * Copyright 2021 Google Inc. All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef FLATBUFFERS_VECTOR_H_ -#define FLATBUFFERS_VECTOR_H_ - -#include "flatbuffers/base.h" -#include "flatbuffers/buffer.h" -#include "flatbuffers/stl_emulation.h" - -namespace flatbuffers { - -struct String; - -// An STL compatible iterator implementation for Vector below, effectively -// calling Get() for every element. -template struct VectorIterator { - typedef std::random_access_iterator_tag iterator_category; - typedef IT value_type; - typedef ptrdiff_t difference_type; - typedef IT *pointer; - typedef IT &reference; - - VectorIterator(const uint8_t *data, uoffset_t i) - : data_(data + IndirectHelper::element_stride * i) {} - VectorIterator(const VectorIterator &other) : data_(other.data_) {} - VectorIterator() : data_(nullptr) {} - - VectorIterator &operator=(const VectorIterator &other) { - data_ = other.data_; - return *this; - } - - VectorIterator &operator=(VectorIterator &&other) { - data_ = other.data_; - return *this; - } - - bool operator==(const VectorIterator &other) const { - return data_ == other.data_; - } - - bool operator<(const VectorIterator &other) const { - return data_ < other.data_; - } - - bool operator!=(const VectorIterator &other) const { - return data_ != other.data_; - } - - difference_type operator-(const VectorIterator &other) const { - return (data_ - other.data_) / IndirectHelper::element_stride; - } - - // Note: return type is incompatible with the standard - // `reference operator*()`. - IT operator*() const { return IndirectHelper::Read(data_, 0); } - - // Note: return type is incompatible with the standard - // `pointer operator->()`. - IT operator->() const { return IndirectHelper::Read(data_, 0); } - - VectorIterator &operator++() { - data_ += IndirectHelper::element_stride; - return *this; - } - - VectorIterator operator++(int) { - VectorIterator temp(data_, 0); - data_ += IndirectHelper::element_stride; - return temp; - } - - VectorIterator operator+(const uoffset_t &offset) const { - return VectorIterator(data_ + offset * IndirectHelper::element_stride, - 0); - } - - VectorIterator &operator+=(const uoffset_t &offset) { - data_ += offset * IndirectHelper::element_stride; - return *this; - } - - VectorIterator &operator--() { - data_ -= IndirectHelper::element_stride; - return *this; - } - - VectorIterator operator--(int) { - VectorIterator temp(data_, 0); - data_ -= IndirectHelper::element_stride; - return temp; - } - - VectorIterator operator-(const uoffset_t &offset) const { - return VectorIterator(data_ - offset * IndirectHelper::element_stride, - 0); - } - - VectorIterator &operator-=(const uoffset_t &offset) { - data_ -= offset * IndirectHelper::element_stride; - return *this; - } - - private: - const uint8_t *data_; -}; - -template -struct VectorReverseIterator : public std::reverse_iterator { - explicit VectorReverseIterator(Iterator iter) - : std::reverse_iterator(iter) {} - - // Note: return type is incompatible with the standard - // `reference operator*()`. - typename Iterator::value_type operator*() const { - auto tmp = std::reverse_iterator::current; - return *--tmp; - } - - // Note: return type is incompatible with the standard - // `pointer operator->()`. - typename Iterator::value_type operator->() const { - auto tmp = std::reverse_iterator::current; - return *--tmp; - } -}; - -// This is used as a helper type for accessing vectors. -// Vector::data() assumes the vector elements start after the length field. -template class Vector { - public: - typedef VectorIterator::mutable_return_type> - iterator; - typedef VectorIterator::return_type> - const_iterator; - typedef VectorReverseIterator reverse_iterator; - typedef VectorReverseIterator const_reverse_iterator; - - typedef typename flatbuffers::bool_constant::value> - scalar_tag; - - static FLATBUFFERS_CONSTEXPR bool is_span_observable = - scalar_tag::value && (FLATBUFFERS_LITTLEENDIAN || sizeof(T) == 1); - - uoffset_t size() const { return EndianScalar(length_); } - - // Deprecated: use size(). Here for backwards compatibility. - FLATBUFFERS_ATTRIBUTE([[deprecated("use size() instead")]]) - uoffset_t Length() const { return size(); } - - typedef typename IndirectHelper::return_type return_type; - typedef typename IndirectHelper::mutable_return_type mutable_return_type; - typedef return_type value_type; - - return_type Get(uoffset_t i) const { - FLATBUFFERS_ASSERT(i < size()); - return IndirectHelper::Read(Data(), i); - } - - return_type operator[](uoffset_t i) const { return Get(i); } - - // If this is a Vector of enums, T will be its storage type, not the enum - // type. This function makes it convenient to retrieve value with enum - // type E. - template E GetEnum(uoffset_t i) const { - return static_cast(Get(i)); - } - - // If this a vector of unions, this does the cast for you. There's no check - // to make sure this is the right type! - template const U *GetAs(uoffset_t i) const { - return reinterpret_cast(Get(i)); - } - - // If this a vector of unions, this does the cast for you. There's no check - // to make sure this is actually a string! - const String *GetAsString(uoffset_t i) const { - return reinterpret_cast(Get(i)); - } - - const void *GetStructFromOffset(size_t o) const { - return reinterpret_cast(Data() + o); - } - - iterator begin() { return iterator(Data(), 0); } - const_iterator begin() const { return const_iterator(Data(), 0); } - - iterator end() { return iterator(Data(), size()); } - const_iterator end() const { return const_iterator(Data(), size()); } - - reverse_iterator rbegin() { return reverse_iterator(end()); } - const_reverse_iterator rbegin() const { - return const_reverse_iterator(end()); - } - - reverse_iterator rend() { return reverse_iterator(begin()); } - const_reverse_iterator rend() const { - return const_reverse_iterator(begin()); - } - - const_iterator cbegin() const { return begin(); } - - const_iterator cend() const { return end(); } - - const_reverse_iterator crbegin() const { return rbegin(); } - - const_reverse_iterator crend() const { return rend(); } - - // Change elements if you have a non-const pointer to this object. - // Scalars only. See reflection.h, and the documentation. - void Mutate(uoffset_t i, const T &val) { - FLATBUFFERS_ASSERT(i < size()); - WriteScalar(data() + i, val); - } - - // Change an element of a vector of tables (or strings). - // "val" points to the new table/string, as you can obtain from - // e.g. reflection::AddFlatBuffer(). - void MutateOffset(uoffset_t i, const uint8_t *val) { - FLATBUFFERS_ASSERT(i < size()); - static_assert(sizeof(T) == sizeof(uoffset_t), "Unrelated types"); - WriteScalar(data() + i, - static_cast(val - (Data() + i * sizeof(uoffset_t)))); - } - - // Get a mutable pointer to tables/strings inside this vector. - mutable_return_type GetMutableObject(uoffset_t i) const { - FLATBUFFERS_ASSERT(i < size()); - return const_cast(IndirectHelper::Read(Data(), i)); - } - - // The raw data in little endian format. Use with care. - const uint8_t *Data() const { - return reinterpret_cast(&length_ + 1); - } - - uint8_t *Data() { return reinterpret_cast(&length_ + 1); } - - // Similarly, but typed, much like std::vector::data - const T *data() const { return reinterpret_cast(Data()); } - T *data() { return reinterpret_cast(Data()); } - - template return_type LookupByKey(K key) const { - void *search_result = std::bsearch( - &key, Data(), size(), IndirectHelper::element_stride, KeyCompare); - - if (!search_result) { - return nullptr; // Key not found. - } - - const uint8_t *element = reinterpret_cast(search_result); - - return IndirectHelper::Read(element, 0); - } - - template mutable_return_type MutableLookupByKey(K key) { - return const_cast(LookupByKey(key)); - } - - protected: - // This class is only used to access pre-existing data. Don't ever - // try to construct these manually. - Vector(); - - uoffset_t length_; - - private: - // This class is a pointer. Copying will therefore create an invalid object. - // Private and unimplemented copy constructor. - Vector(const Vector &); - Vector &operator=(const Vector &); - - template static int KeyCompare(const void *ap, const void *bp) { - const K *key = reinterpret_cast(ap); - const uint8_t *data = reinterpret_cast(bp); - auto table = IndirectHelper::Read(data, 0); - - // std::bsearch compares with the operands transposed, so we negate the - // result here. - return -table->KeyCompareWithValue(*key); - } -}; - -template -FLATBUFFERS_CONSTEXPR_CPP11 flatbuffers::span make_span(Vector &vec) - FLATBUFFERS_NOEXCEPT { - static_assert(Vector::is_span_observable, - "wrong type U, only LE-scalar, or byte types are allowed"); - return span(vec.data(), vec.size()); -} - -template -FLATBUFFERS_CONSTEXPR_CPP11 flatbuffers::span make_span( - const Vector &vec) FLATBUFFERS_NOEXCEPT { - static_assert(Vector::is_span_observable, - "wrong type U, only LE-scalar, or byte types are allowed"); - return span(vec.data(), vec.size()); -} - -template -FLATBUFFERS_CONSTEXPR_CPP11 flatbuffers::span make_bytes_span( - Vector &vec) FLATBUFFERS_NOEXCEPT { - static_assert(Vector::scalar_tag::value, - "wrong type U, only LE-scalar, or byte types are allowed"); - return span(vec.Data(), vec.size() * sizeof(U)); -} - -template -FLATBUFFERS_CONSTEXPR_CPP11 flatbuffers::span make_bytes_span( - const Vector &vec) FLATBUFFERS_NOEXCEPT { - static_assert(Vector::scalar_tag::value, - "wrong type U, only LE-scalar, or byte types are allowed"); - return span(vec.Data(), vec.size() * sizeof(U)); -} - -// Convenient helper functions to get a span of any vector, regardless -// of whether it is null or not (the field is not set). -template -FLATBUFFERS_CONSTEXPR_CPP11 flatbuffers::span make_span(Vector *ptr) - FLATBUFFERS_NOEXCEPT { - static_assert(Vector::is_span_observable, - "wrong type U, only LE-scalar, or byte types are allowed"); - return ptr ? make_span(*ptr) : span(); -} - -template -FLATBUFFERS_CONSTEXPR_CPP11 flatbuffers::span make_span( - const Vector *ptr) FLATBUFFERS_NOEXCEPT { - static_assert(Vector::is_span_observable, - "wrong type U, only LE-scalar, or byte types are allowed"); - return ptr ? make_span(*ptr) : span(); -} - -// Represent a vector much like the template above, but in this case we -// don't know what the element types are (used with reflection.h). -class VectorOfAny { - public: - uoffset_t size() const { return EndianScalar(length_); } - - const uint8_t *Data() const { - return reinterpret_cast(&length_ + 1); - } - uint8_t *Data() { return reinterpret_cast(&length_ + 1); } - - protected: - VectorOfAny(); - - uoffset_t length_; - - private: - VectorOfAny(const VectorOfAny &); - VectorOfAny &operator=(const VectorOfAny &); -}; - -template -Vector> *VectorCast(Vector> *ptr) { - static_assert(std::is_base_of::value, "Unrelated types"); - return reinterpret_cast> *>(ptr); -} - -template -const Vector> *VectorCast(const Vector> *ptr) { - static_assert(std::is_base_of::value, "Unrelated types"); - return reinterpret_cast> *>(ptr); -} - -// Convenient helper function to get the length of any vector, regardless -// of whether it is null or not (the field is not set). -template static inline size_t VectorLength(const Vector *v) { - return v ? v->size() : 0; -} - -} // namespace flatbuffers - -#endif // FLATBUFFERS_VERIFIER_H_ diff --git a/code/components/tflite-lib/third_party/flatbuffers/include/flatbuffers/verifier.h b/code/components/tflite-lib/third_party/flatbuffers/include/flatbuffers/verifier.h deleted file mode 100644 index 0241223e..00000000 --- a/code/components/tflite-lib/third_party/flatbuffers/include/flatbuffers/verifier.h +++ /dev/null @@ -1,304 +0,0 @@ -/* - * Copyright 2021 Google Inc. All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef FLATBUFFERS_VERIFIER_H_ -#define FLATBUFFERS_VERIFIER_H_ - -#include "flatbuffers/base.h" -#include "flatbuffers/vector.h" - -namespace flatbuffers { - -// Helper class to verify the integrity of a FlatBuffer -class Verifier FLATBUFFERS_FINAL_CLASS { - public: - Verifier(const uint8_t *const buf, const size_t buf_len, - const uoffset_t _max_depth = 64, - const uoffset_t _max_tables = 1000000, - const bool _check_alignment = true) - : buf_(buf), - size_(buf_len), - max_depth_(_max_depth), - max_tables_(_max_tables), - check_alignment_(_check_alignment), - upper_bound_(0), - depth_(0), - num_tables_(0), - flex_reuse_tracker_(nullptr) { - FLATBUFFERS_ASSERT(size_ < FLATBUFFERS_MAX_BUFFER_SIZE); - } - - // Central location where any verification failures register. - bool Check(const bool ok) const { - // clang-format off - #ifdef FLATBUFFERS_DEBUG_VERIFICATION_FAILURE - FLATBUFFERS_ASSERT(ok); - #endif - #ifdef FLATBUFFERS_TRACK_VERIFIER_BUFFER_SIZE - if (!ok) - upper_bound_ = 0; - #endif - // clang-format on - return ok; - } - - // Verify any range within the buffer. - bool Verify(const size_t elem, const size_t elem_len) const { - // clang-format off - #ifdef FLATBUFFERS_TRACK_VERIFIER_BUFFER_SIZE - auto upper_bound = elem + elem_len; - if (upper_bound_ < upper_bound) - upper_bound_ = upper_bound; - #endif - // clang-format on - return Check(elem_len < size_ && elem <= size_ - elem_len); - } - - bool VerifyAlignment(const size_t elem, const size_t align) const { - return Check((elem & (align - 1)) == 0 || !check_alignment_); - } - - // Verify a range indicated by sizeof(T). - template bool Verify(const size_t elem) const { - return VerifyAlignment(elem, sizeof(T)) && Verify(elem, sizeof(T)); - } - - bool VerifyFromPointer(const uint8_t *const p, const size_t len) { - return Verify(static_cast(p - buf_), len); - } - - // Verify relative to a known-good base pointer. - bool VerifyFieldStruct(const uint8_t *const base, const voffset_t elem_off, - const size_t elem_len, const size_t align) const { - const auto f = static_cast(base - buf_) + elem_off; - return VerifyAlignment(f, align) && Verify(f, elem_len); - } - - template - bool VerifyField(const uint8_t *const base, const voffset_t elem_off, - const size_t align) const { - const auto f = static_cast(base - buf_) + elem_off; - return VerifyAlignment(f, align) && Verify(f, sizeof(T)); - } - - // Verify a pointer (may be NULL) of a table type. - template bool VerifyTable(const T *const table) { - return !table || table->Verify(*this); - } - - // Verify a pointer (may be NULL) of any vector type. - template bool VerifyVector(const Vector *const vec) const { - return !vec || VerifyVectorOrString(reinterpret_cast(vec), - sizeof(T)); - } - - // Verify a pointer (may be NULL) of a vector to struct. - template - bool VerifyVector(const Vector *const vec) const { - return VerifyVector(reinterpret_cast *>(vec)); - } - - // Verify a pointer (may be NULL) to string. - bool VerifyString(const String *const str) const { - size_t end; - return !str || (VerifyVectorOrString(reinterpret_cast(str), - 1, &end) && - Verify(end, 1) && // Must have terminator - Check(buf_[end] == '\0')); // Terminating byte must be 0. - } - - // Common code between vectors and strings. - bool VerifyVectorOrString(const uint8_t *const vec, const size_t elem_size, - size_t *const end = nullptr) const { - const auto veco = static_cast(vec - buf_); - // Check we can read the size field. - if (!Verify(veco)) return false; - // Check the whole array. If this is a string, the byte past the array - // must be 0. - const auto size = ReadScalar(vec); - const auto max_elems = FLATBUFFERS_MAX_BUFFER_SIZE / elem_size; - if (!Check(size < max_elems)) - return false; // Protect against byte_size overflowing. - const auto byte_size = sizeof(size) + elem_size * size; - if (end) *end = veco + byte_size; - return Verify(veco, byte_size); - } - - // Special case for string contents, after the above has been called. - bool VerifyVectorOfStrings(const Vector> *const vec) const { - if (vec) { - for (uoffset_t i = 0; i < vec->size(); i++) { - if (!VerifyString(vec->Get(i))) return false; - } - } - return true; - } - - // Special case for table contents, after the above has been called. - template - bool VerifyVectorOfTables(const Vector> *const vec) { - if (vec) { - for (uoffset_t i = 0; i < vec->size(); i++) { - if (!vec->Get(i)->Verify(*this)) return false; - } - } - return true; - } - - __supress_ubsan__("unsigned-integer-overflow") bool VerifyTableStart( - const uint8_t *const table) { - // Check the vtable offset. - const auto tableo = static_cast(table - buf_); - if (!Verify(tableo)) return false; - // This offset may be signed, but doing the subtraction unsigned always - // gives the result we want. - const auto vtableo = - tableo - static_cast(ReadScalar(table)); - // Check the vtable size field, then check vtable fits in its entirety. - if (!(VerifyComplexity() && Verify(vtableo) && - VerifyAlignment(ReadScalar(buf_ + vtableo), - sizeof(voffset_t)))) - return false; - const auto vsize = ReadScalar(buf_ + vtableo); - return Check((vsize & 1) == 0) && Verify(vtableo, vsize); - } - - template - bool VerifyBufferFromStart(const char *const identifier, const size_t start) { - // Buffers have to be of some size to be valid. The reason it is a runtime - // check instead of static_assert, is that nested flatbuffers go through - // this call and their size is determined at runtime. - if (!Check(size_ >= FLATBUFFERS_MIN_BUFFER_SIZE)) return false; - - // If an identifier is provided, check that we have a buffer - if (identifier && !Check((size_ >= 2 * sizeof(flatbuffers::uoffset_t) && - BufferHasIdentifier(buf_ + start, identifier)))) { - return false; - } - - // Call T::Verify, which must be in the generated code for this type. - const auto o = VerifyOffset(start); - return Check(o != 0) && - reinterpret_cast(buf_ + start + o)->Verify(*this) - // clang-format off - #ifdef FLATBUFFERS_TRACK_VERIFIER_BUFFER_SIZE - && GetComputedSize() - #endif - ; - // clang-format on - } - - template - bool VerifyNestedFlatBuffer(const Vector *const buf, - const char *const identifier) { - // An empty buffer is OK as it indicates not present. - if (!buf) return true; - - // If there is a nested buffer, it must be greater than the min size. - if(!Check(buf->size() >= FLATBUFFERS_MIN_BUFFER_SIZE)) return false; - - Verifier nested_verifier(buf->data(), buf->size()); - return nested_verifier.VerifyBuffer(identifier); - } - - // Verify this whole buffer, starting with root type T. - template bool VerifyBuffer() { return VerifyBuffer(nullptr); } - - template bool VerifyBuffer(const char *const identifier) { - return VerifyBufferFromStart(identifier, 0); - } - - template - bool VerifySizePrefixedBuffer(const char *const identifier) { - return Verify(0U) && - Check(ReadScalar(buf_) == size_ - sizeof(uoffset_t)) && - VerifyBufferFromStart(identifier, sizeof(uoffset_t)); - } - - uoffset_t VerifyOffset(const size_t start) const { - if (!Verify(start)) return 0; - const auto o = ReadScalar(buf_ + start); - // May not point to itself. - if (!Check(o != 0)) return 0; - // Can't wrap around / buffers are max 2GB. - if (!Check(static_cast(o) >= 0)) return 0; - // Must be inside the buffer to create a pointer from it (pointer outside - // buffer is UB). - if (!Verify(start + o, 1)) return 0; - return o; - } - - uoffset_t VerifyOffset(const uint8_t *const base, - const voffset_t start) const { - return VerifyOffset(static_cast(base - buf_) + start); - } - - // Called at the start of a table to increase counters measuring data - // structure depth and amount, and possibly bails out with false if - // limits set by the constructor have been hit. Needs to be balanced - // with EndTable(). - bool VerifyComplexity() { - depth_++; - num_tables_++; - return Check(depth_ <= max_depth_ && num_tables_ <= max_tables_); - } - - // Called at the end of a table to pop the depth count. - bool EndTable() { - depth_--; - return true; - } - - // Returns the message size in bytes - size_t GetComputedSize() const { - // clang-format off - #ifdef FLATBUFFERS_TRACK_VERIFIER_BUFFER_SIZE - uintptr_t size = upper_bound_; - // Align the size to uoffset_t - size = (size - 1 + sizeof(uoffset_t)) & ~(sizeof(uoffset_t) - 1); - return (size > size_) ? 0 : size; - #else - // Must turn on FLATBUFFERS_TRACK_VERIFIER_BUFFER_SIZE for this to work. - (void)upper_bound_; - FLATBUFFERS_ASSERT(false); - return 0; - #endif - // clang-format on - } - - std::vector *GetFlexReuseTracker() { return flex_reuse_tracker_; } - - void SetFlexReuseTracker(std::vector *const rt) { - flex_reuse_tracker_ = rt; - } - - private: - const uint8_t *buf_; - const size_t size_; - const uoffset_t max_depth_; - const uoffset_t max_tables_; - const bool check_alignment_; - - mutable size_t upper_bound_; - - uoffset_t depth_; - uoffset_t num_tables_; - std::vector *flex_reuse_tracker_; -}; - -} // namespace flatbuffers - -#endif // FLATBUFFERS_VERIFIER_H_