mirror of
https://github.com/jomjol/AI-on-the-edge-device.git
synced 2025-12-07 12:06:58 +03:00
Compare commits
46 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3e6713d16c | ||
|
|
c292ecd54b | ||
|
|
7bfdfd3c38 | ||
|
|
3f154e3a53 | ||
|
|
06ba8372d0 | ||
|
|
eae9b66eed | ||
|
|
4caca9b06a | ||
|
|
de772d7ddd | ||
|
|
707472ba27 | ||
|
|
46265debc3 | ||
|
|
14d221bf9c | ||
|
|
f4f871002b | ||
|
|
bb92d4aa54 | ||
|
|
acc7253ca1 | ||
|
|
f1002b5f9d | ||
|
|
84cea8e3d6 | ||
|
|
05a0f6fa62 | ||
|
|
2ab2f070b4 | ||
|
|
103de2011b | ||
|
|
3cec93e2f1 | ||
|
|
a23d7ee6e2 | ||
|
|
42afbcf655 | ||
|
|
c61167bdfa | ||
|
|
642cefb84f | ||
|
|
1223aa7c70 | ||
|
|
0d90977917 | ||
|
|
7e57e85e75 | ||
|
|
8f518954aa | ||
|
|
26144815d2 | ||
|
|
21d07be7df | ||
|
|
04f69f0853 | ||
|
|
0678c81959 | ||
|
|
70a88088f2 | ||
|
|
f8e8c756ab | ||
|
|
6e26fa6e3c | ||
|
|
b54d6e785d | ||
|
|
5d2e22cd86 | ||
|
|
ccd1d3f460 | ||
|
|
964486a819 | ||
|
|
9080f1d2f0 | ||
|
|
aab8dfcde5 | ||
|
|
1633b74ab2 | ||
|
|
bafd67be36 | ||
|
|
8f1d7d081d | ||
|
|
66bfcd1d45 | ||
|
|
d89438a15f |
40
README.md
40
README.md
@@ -19,7 +19,7 @@ A 3d-printable housing can be found here: https://www.thingiverse.com/thing:4571
|
||||
|
||||
### Known Issues
|
||||
|
||||
* spontaneous reboot, especially in case of intensive web server access (improved since v2.1.0)
|
||||
* Reboot on extensive web access due to the limits of the internal web server
|
||||
|
||||
------
|
||||
|
||||
@@ -27,21 +27,48 @@ A 3d-printable housing can be found here: https://www.thingiverse.com/thing:4571
|
||||
|
||||
|
||||
|
||||
##### Rolling - (2020-09-27)
|
||||
##### Rolling - (2020-11-15)
|
||||
|
||||
* based on v2.2.0 (2020-09-27)
|
||||
* based on v4.0.0 (2020-11-15)
|
||||
|
||||
|
||||
|
||||
##### 4.0.0 Tflite Core - (2020-11-15)
|
||||
* Implementation of rolling log-files
|
||||
|
||||
##### 2.2.0 Version Controll (2020-09-27)
|
||||
* Update Tflite-Core to master@20201108 (v2.4)
|
||||
|
||||
* Bug-fixing for reducing reboots
|
||||
|
||||
|
||||
|
||||
##### 3.1.0 MQTT-Client - (2020-10-26)
|
||||
|
||||
* Update digital CNN to v6.5.0 and HTML (Info to hostname, IP, ssid)
|
||||
|
||||
* New implementation of "checkDigitConsistency" also for digits
|
||||
* MQTT-Adapter: user and password for sign in MQTT-Broker
|
||||
|
||||
##### 3.0.0 MQTT-Client (2020-10-14)
|
||||
|
||||
* Implementation of MQTT Client
|
||||
* Improved Version Control
|
||||
* bug-fixing
|
||||
|
||||
|
||||
|
||||
##### 2.2.1 Version Control (2020-09-27)
|
||||
|
||||
* Bug-Fixing (hostname in wlan.ini and error handling inside flow)
|
||||
|
||||
|
||||
|
||||
##### 2.2.0 Version Control (2020-09-27)
|
||||
|
||||
* Integrated automated versioning system (menu: SYSTEM --> INFO)
|
||||
* Update Build-System to PlatformIO - Espressif 32 v2.0.0 (ESP-IDF 4.1)
|
||||
|
||||
|
||||
|
||||
|
||||
##### 2.1.0 Decimal Shift, Chrome & Edge (2020-09-25)
|
||||
|
||||
* Implementation of Decimal Shift
|
||||
@@ -58,7 +85,6 @@ A 3d-printable housing can be found here: https://www.thingiverse.com/thing:4571
|
||||
|
||||
* Bug fixing, code corrections
|
||||
|
||||
|
||||
|
||||
##### 2.0.0 Layout update (2020-09-12)
|
||||
|
||||
|
||||
BIN
code/.DS_Store
vendored
Normal file
BIN
code/.DS_Store
vendored
Normal file
Binary file not shown.
@@ -1,39 +0,0 @@
|
||||
|
||||
This directory is intended for project header files.
|
||||
|
||||
A header file is a file containing C declarations and macro definitions
|
||||
to be shared between several project source files. You request the use of a
|
||||
header file in your project source file (C, C++, etc) located in `src` folder
|
||||
by including it, with the C preprocessing directive `#include'.
|
||||
|
||||
```src/main.c
|
||||
|
||||
#include "header.h"
|
||||
|
||||
int main (void)
|
||||
{
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
Including a header file produces the same results as copying the header file
|
||||
into each source file that needs it. Such copying would be time-consuming
|
||||
and error-prone. With a header file, the related declarations appear
|
||||
in only one place. If they need to be changed, they can be changed in one
|
||||
place, and programs that include the header file will automatically use the
|
||||
new version when next recompiled. The header file eliminates the labor of
|
||||
finding and changing all the copies as well as the risk that a failure to
|
||||
find one copy will result in inconsistencies within a program.
|
||||
|
||||
In C, the usual convention is to give header files names that end with `.h'.
|
||||
It is most portable to use only letters, digits, dashes, and underscores in
|
||||
header file names, and at most one dot.
|
||||
|
||||
Read more about using header files in official GCC documentation:
|
||||
|
||||
* Include Syntax
|
||||
* Include Operation
|
||||
* Once-Only Headers
|
||||
* Computed Includes
|
||||
|
||||
https://gcc.gnu.org/onlinedocs/cpp/Header-Files.html
|
||||
@@ -1,22 +0,0 @@
|
||||
#pragma once
|
||||
#include <string>
|
||||
#include <fstream>
|
||||
|
||||
|
||||
using namespace std;
|
||||
|
||||
std::string FormatFileName(std::string input);
|
||||
void FindReplace(std::string& line, std::string& oldString, std::string& newString);
|
||||
|
||||
void CopyFile(string input, string output);
|
||||
|
||||
size_t findDelimiterPos(string input, string delimiter);
|
||||
//string trim(string istring);
|
||||
string trim(string istring, string adddelimiter = "");
|
||||
bool ctype_space(const char c, string adddelimiter);
|
||||
|
||||
string getFileType(string filename);
|
||||
|
||||
string toUpper(string in);
|
||||
|
||||
|
||||
@@ -1,157 +0,0 @@
|
||||
//#pragma warning(disable : 4996)
|
||||
|
||||
#include "Helper.h"
|
||||
|
||||
//#define ISWINDOWS_TRUE
|
||||
|
||||
using namespace std;
|
||||
|
||||
std::string FormatFileName(std::string input)
|
||||
{
|
||||
#ifdef ISWINDOWS_TRUE
|
||||
input.erase(0, 1);
|
||||
std::string os = "/";
|
||||
std::string ns = "\\";
|
||||
FindReplace(input, os, ns);
|
||||
#endif
|
||||
return input;
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
void FindReplace(std::string& line, std::string& oldString, std::string& newString) {
|
||||
const size_t oldSize = oldString.length();
|
||||
|
||||
// do nothing if line is shorter than the string to find
|
||||
if (oldSize > line.length()) return;
|
||||
|
||||
const size_t newSize = newString.length();
|
||||
for (size_t pos = 0; ; pos += newSize) {
|
||||
// Locate the substring to replace
|
||||
pos = line.find(oldString, pos);
|
||||
if (pos == std::string::npos) return;
|
||||
if (oldSize == newSize) {
|
||||
// if they're same size, use std::string::replace
|
||||
line.replace(pos, oldSize, newString);
|
||||
}
|
||||
else {
|
||||
// if not same size, replace by erasing and inserting
|
||||
line.erase(pos, oldSize);
|
||||
line.insert(pos, newString);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
bool ctype_space(const char c, string adddelimiter)
|
||||
{
|
||||
if (c == ' ' || c == '\t' || c == '\r' || c == '\n' || c == 11)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
if (adddelimiter.find(c) != string::npos)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
string trim(string istring, string adddelimiter)
|
||||
{
|
||||
bool trimmed = false;
|
||||
|
||||
if (ctype_space(istring[istring.length() - 1], adddelimiter))
|
||||
{
|
||||
istring.erase(istring.length() - 1);
|
||||
trimmed = true;
|
||||
}
|
||||
|
||||
if (ctype_space(istring[0], adddelimiter))
|
||||
{
|
||||
istring.erase(0, 1);
|
||||
trimmed = true;
|
||||
}
|
||||
|
||||
if ((trimmed == false) || (istring.size() == 0))
|
||||
{
|
||||
return istring;
|
||||
}
|
||||
else
|
||||
{
|
||||
return trim(istring, adddelimiter);
|
||||
}
|
||||
}
|
||||
|
||||
size_t findDelimiterPos(string input, string delimiter)
|
||||
{
|
||||
size_t pos = std::string::npos;
|
||||
size_t zw;
|
||||
string akt_del;
|
||||
|
||||
for (int anz = 0; anz < delimiter.length(); ++anz)
|
||||
{
|
||||
akt_del = delimiter[anz];
|
||||
if ((zw = input.find(akt_del)) != std::string::npos)
|
||||
{
|
||||
if (pos != std::string::npos)
|
||||
{
|
||||
if (zw < pos)
|
||||
pos = zw;
|
||||
}
|
||||
else
|
||||
pos = zw;
|
||||
}
|
||||
}
|
||||
return pos;
|
||||
}
|
||||
|
||||
|
||||
void CopyFile(string input, string output)
|
||||
{
|
||||
input = FormatFileName(input);
|
||||
output = FormatFileName(output);
|
||||
|
||||
char cTemp;
|
||||
FILE* fpSourceFile = fopen(input.c_str(), "rb");
|
||||
FILE* fpTargetFile = fopen(output.c_str(), "wb");
|
||||
|
||||
// Code Section
|
||||
|
||||
// Read From The Source File - "Copy"
|
||||
while (fread(&cTemp, 1, 1, fpSourceFile) == 1)
|
||||
{
|
||||
// Write To The Target File - "Paste"
|
||||
fwrite(&cTemp, 1, 1, fpTargetFile);
|
||||
}
|
||||
|
||||
// Close The Files
|
||||
fclose(fpSourceFile);
|
||||
fclose(fpTargetFile);
|
||||
}
|
||||
|
||||
|
||||
string getFileType(string filename)
|
||||
{
|
||||
int lastpos = filename.find(".", 0);
|
||||
int neu_pos;
|
||||
while ((neu_pos = filename.find(".", lastpos + 1)) > -1)
|
||||
{
|
||||
lastpos = neu_pos;
|
||||
}
|
||||
|
||||
string zw = filename.substr(lastpos + 1, filename.size() - lastpos);
|
||||
|
||||
return zw;
|
||||
}
|
||||
|
||||
|
||||
string toUpper(string in)
|
||||
{
|
||||
for (int i = 0; i < in.length(); ++i)
|
||||
in[i] = toupper(in[i]);
|
||||
|
||||
return in;
|
||||
}
|
||||
|
||||
@@ -22,6 +22,7 @@ static const char *MAIN_TAG = "connect_wlan";
|
||||
std::string ssid;
|
||||
std::string passphrase;
|
||||
std::string hostname;
|
||||
std::string ipaddress;
|
||||
|
||||
std::string std_hostname = "watermeter";
|
||||
|
||||
@@ -123,6 +124,7 @@ void initialise_wifi(std::string _ssid, std::string _passphrase, std::string _ho
|
||||
xEventGroupWaitBits(wifi_event_group,CONNECTED_BIT,true,true,portMAX_DELAY);
|
||||
tcpip_adapter_ip_info_t ip_info;
|
||||
ESP_ERROR_CHECK(tcpip_adapter_get_ip_info(TCPIP_ADAPTER_IF_STA, &ip_info));
|
||||
ipaddress = std::string(ip4addr_ntoa(&ip_info.ip));
|
||||
printf("IPv4 : %s\n", ip4addr_ntoa(&ip_info.ip));
|
||||
printf("HostName : %s\n", hostname.c_str());
|
||||
}
|
||||
@@ -143,7 +145,6 @@ void LoadWlanFromFile(std::string fn, std::string &_ssid, std::string &_passphra
|
||||
|
||||
char zw[1024];
|
||||
fgets(zw, 1024, pFile);
|
||||
// printf("%s", zw);
|
||||
line = std::string(zw);
|
||||
|
||||
while ((line.size() > 0) || !(feof(pFile)))
|
||||
@@ -151,21 +152,16 @@ void LoadWlanFromFile(std::string fn, std::string &_ssid, std::string &_passphra
|
||||
// printf("%s", line.c_str());
|
||||
zerlegt = ZerlegeZeile(line, "=");
|
||||
zerlegt[0] = trim(zerlegt[0], " ");
|
||||
zerlegt[1] = trim(zerlegt[1], " ");
|
||||
|
||||
if ((zerlegt.size() > 1) && (toUpper(zerlegt[0]) == "HOSTNAME")){
|
||||
_hostname = zerlegt[1];
|
||||
_hostname = trim(zerlegt[1]);
|
||||
if ((_hostname[0] == '"') && (_hostname[_hostname.length()-1] == '"')){
|
||||
_hostname = _hostname.substr(1, _hostname.length()-2);
|
||||
}
|
||||
// Check if Hostname was empty in .ini if yes set to std_hostname
|
||||
if(_hostname.length() <= 0){
|
||||
_hostname = std_hostname;
|
||||
}
|
||||
}
|
||||
|
||||
if ((zerlegt.size() > 1) && (toUpper(zerlegt[0]) == "SSID")){
|
||||
_ssid = zerlegt[1];
|
||||
_ssid = trim(zerlegt[1]);
|
||||
if ((_ssid[0] == '"') && (_ssid[_ssid.length()-1] == '"')){
|
||||
_ssid = _ssid.substr(1, _ssid.length()-2);
|
||||
}
|
||||
@@ -189,6 +185,22 @@ void LoadWlanFromFile(std::string fn, std::string &_ssid, std::string &_passphra
|
||||
}
|
||||
|
||||
fclose(pFile);
|
||||
|
||||
// Check if Hostname was empty in .ini if yes set to std_hostname
|
||||
if(_hostname.length() <= 0){
|
||||
_hostname = std_hostname;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
std::string getHostname(){
|
||||
return hostname;
|
||||
}
|
||||
|
||||
std::string getIPAddress(){
|
||||
return ipaddress;
|
||||
}
|
||||
|
||||
std::string getSSID(){
|
||||
return ssid;
|
||||
}
|
||||
|
||||
@@ -10,4 +10,8 @@ void initialise_wifi(std::string _ssid, std::string _passphrase, std::string _ho
|
||||
|
||||
void LoadWlanFromFile(std::string fn, std::string &_ssid, std::string &_passphrase, std::string &_hostname);
|
||||
|
||||
std::string getHostname();
|
||||
std::string getIPAddress();
|
||||
std::string getSSID();
|
||||
|
||||
#endif
|
||||
@@ -10,6 +10,8 @@
|
||||
|
||||
#include "camera_define.h"
|
||||
|
||||
#include "driver/ledc.h"
|
||||
|
||||
CCamera Camera;
|
||||
|
||||
|
||||
@@ -20,6 +22,42 @@ typedef struct {
|
||||
size_t len;
|
||||
} jpg_chunking_t;
|
||||
|
||||
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
#define LEDC_LS_CH2_GPIO (4)
|
||||
#define LEDC_LS_CH2_CHANNEL LEDC_CHANNEL_2
|
||||
#define LEDC_LS_TIMER LEDC_TIMER_1
|
||||
#define LEDC_LS_MODE LEDC_LOW_SPEED_MODE
|
||||
#define LEDC_TEST_DUTY (4000)
|
||||
|
||||
void test(){
|
||||
ledc_channel_config_t ledc_channel = { };
|
||||
|
||||
ledc_channel.channel = LEDC_LS_CH2_CHANNEL;
|
||||
ledc_channel.duty = 0;
|
||||
ledc_channel.gpio_num = FLASH_GPIO;
|
||||
ledc_channel.speed_mode = LEDC_LS_MODE;
|
||||
ledc_channel.hpoint = 0;
|
||||
ledc_channel.timer_sel = LEDC_LS_TIMER;
|
||||
|
||||
ledc_channel_config(&ledc_channel);
|
||||
|
||||
ledc_set_duty(ledc_channel.speed_mode, ledc_channel.channel, LEDC_TEST_DUTY);
|
||||
ledc_update_duty(ledc_channel.speed_mode, ledc_channel.channel);
|
||||
vTaskDelay(1000 / portTICK_PERIOD_MS);
|
||||
};
|
||||
|
||||
|
||||
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
static size_t jpg_encode_stream(void * arg, size_t index, const void* data, size_t len){
|
||||
jpg_chunking_t *j = (jpg_chunking_t *)arg;
|
||||
if(!index){
|
||||
|
||||
@@ -84,7 +84,7 @@ static esp_err_t favicon_get_handler(httpd_req_t *req)
|
||||
* a list of all files and folders under the requested path.
|
||||
* In case of SPIFFS this returns empty list when path is any
|
||||
* string other than '/', since SPIFFS doesn't support directories */
|
||||
static esp_err_t http_resp_dir_html(httpd_req_t *req, const char *dirpath)
|
||||
static esp_err_t http_resp_dir_html(httpd_req_t *req, const char *dirpath, const char* uripath, bool readonly)
|
||||
{
|
||||
char entrypath[FILE_PATH_MAX];
|
||||
char entrysize[16];
|
||||
@@ -120,24 +120,24 @@ static esp_err_t http_resp_dir_html(httpd_req_t *req, const char *dirpath)
|
||||
httpd_resp_sendstr_chunk(req, "<!DOCTYPE html><html><body>");
|
||||
|
||||
/////////////////////////////////////////////////
|
||||
|
||||
FILE *fd = fopen("/sdcard/html/upload_script.html", "r");
|
||||
char *chunk = ((struct file_server_data *)req->user_ctx)->scratch;
|
||||
size_t chunksize;
|
||||
do {
|
||||
chunksize = fread(chunk, 1, SCRATCH_BUFSIZE, fd);
|
||||
// printf("Chunksize %d\n", chunksize);
|
||||
if (chunksize > 0){
|
||||
if (httpd_resp_send_chunk(req, chunk, chunksize) != ESP_OK) {
|
||||
fclose(fd);
|
||||
ESP_LOGE(TAG, "File sending failed!");
|
||||
return ESP_FAIL;
|
||||
if (!readonly) {
|
||||
FILE *fd = fopen("/sdcard/html/upload_script.html", "r");
|
||||
char *chunk = ((struct file_server_data *)req->user_ctx)->scratch;
|
||||
size_t chunksize;
|
||||
do {
|
||||
chunksize = fread(chunk, 1, SCRATCH_BUFSIZE, fd);
|
||||
// printf("Chunksize %d\n", chunksize);
|
||||
if (chunksize > 0){
|
||||
if (httpd_resp_send_chunk(req, chunk, chunksize) != ESP_OK) {
|
||||
fclose(fd);
|
||||
ESP_LOGE(TAG, "File sending failed!");
|
||||
return ESP_FAIL;
|
||||
}
|
||||
}
|
||||
}
|
||||
} while (chunksize != 0);
|
||||
fclose(fd);
|
||||
// ESP_LOGI(TAG, "File sending complete");
|
||||
|
||||
} while (chunksize != 0);
|
||||
fclose(fd);
|
||||
// ESP_LOGI(TAG, "File sending complete");
|
||||
}
|
||||
///////////////////////////////
|
||||
|
||||
std::string _zw = std::string(dirpath);
|
||||
@@ -149,12 +149,16 @@ static esp_err_t http_resp_dir_html(httpd_req_t *req, const char *dirpath)
|
||||
httpd_resp_sendstr_chunk(req,
|
||||
"<table class=\"fixed\" border=\"1\">"
|
||||
"<col width=\"800px\" /><col width=\"300px\" /><col width=\"300px\" /><col width=\"100px\" />"
|
||||
"<thead><tr><th>Name</th><th>Type</th><th>Size (Bytes)</th><th>Delete<br>"
|
||||
"<form method=\"post\" action=\"");
|
||||
httpd_resp_sendstr_chunk(req, _zw.c_str());
|
||||
httpd_resp_sendstr_chunk(req,
|
||||
"\"><button type=\"submit\">DELETE ALL!</button></form>"
|
||||
"</th></tr></thead><tbody>\n");
|
||||
"<thead><tr><th>Name</th><th>Type</th><th>Size (Bytes)</th>");
|
||||
if (!readonly) {
|
||||
httpd_resp_sendstr_chunk(req, "<th>Delete<br>"
|
||||
"<form method=\"post\" action=\"");
|
||||
httpd_resp_sendstr_chunk(req, _zw.c_str());
|
||||
httpd_resp_sendstr_chunk(req,
|
||||
"\"><button type=\"submit\">DELETE ALL!</button></form>"
|
||||
"</th></tr>");
|
||||
}
|
||||
httpd_resp_sendstr_chunk(req, "</thead><tbody>\n");
|
||||
|
||||
/* Iterate over all files / folders and fetch their names and sizes */
|
||||
while ((entry = readdir(dir)) != NULL) {
|
||||
@@ -173,7 +177,8 @@ static esp_err_t http_resp_dir_html(httpd_req_t *req, const char *dirpath)
|
||||
|
||||
/* Send chunk of HTML file containing table entries with file name and size */
|
||||
httpd_resp_sendstr_chunk(req, "<tr><td><a href=\"");
|
||||
httpd_resp_sendstr_chunk(req, req->uri);
|
||||
httpd_resp_sendstr_chunk(req, "/fileserver");
|
||||
httpd_resp_sendstr_chunk(req, uripath);
|
||||
httpd_resp_sendstr_chunk(req, entry->d_name);
|
||||
if (entry->d_type == DT_DIR) {
|
||||
httpd_resp_sendstr_chunk(req, "/");
|
||||
@@ -184,11 +189,13 @@ static esp_err_t http_resp_dir_html(httpd_req_t *req, const char *dirpath)
|
||||
httpd_resp_sendstr_chunk(req, entrytype);
|
||||
httpd_resp_sendstr_chunk(req, "</td><td>");
|
||||
httpd_resp_sendstr_chunk(req, entrysize);
|
||||
httpd_resp_sendstr_chunk(req, "</td><td>");
|
||||
httpd_resp_sendstr_chunk(req, "<form method=\"post\" action=\"/delete");
|
||||
httpd_resp_sendstr_chunk(req, req->uri + strlen("/fileserver"));
|
||||
httpd_resp_sendstr_chunk(req, entry->d_name);
|
||||
httpd_resp_sendstr_chunk(req, "\"><button type=\"submit\">Delete</button></form>");
|
||||
if (!readonly) {
|
||||
httpd_resp_sendstr_chunk(req, "</td><td>");
|
||||
httpd_resp_sendstr_chunk(req, "<form method=\"post\" action=\"/delete");
|
||||
httpd_resp_sendstr_chunk(req, uripath);
|
||||
httpd_resp_sendstr_chunk(req, entry->d_name);
|
||||
httpd_resp_sendstr_chunk(req, "\"><button type=\"submit\">Delete</button></form>");
|
||||
}
|
||||
httpd_resp_sendstr_chunk(req, "</td></tr>\n");
|
||||
}
|
||||
}
|
||||
@@ -226,6 +233,7 @@ static esp_err_t download_get_handler(httpd_req_t *req)
|
||||
// filename = get_path_from_uri(filepath, ((struct file_server_data *)req->user_ctx)->base_path,
|
||||
// req->uri, sizeof(filepath));
|
||||
|
||||
|
||||
if (!filename) {
|
||||
ESP_LOGE(TAG, "Filename is too long");
|
||||
/* Respond with 500 Internal Server Error */
|
||||
@@ -235,7 +243,22 @@ static esp_err_t download_get_handler(httpd_req_t *req)
|
||||
|
||||
/* If name has trailing '/', respond with directory contents */
|
||||
if (filename[strlen(filename) - 1] == '/') {
|
||||
return http_resp_dir_html(req, filepath);
|
||||
bool readonly = false;
|
||||
size_t buf_len = httpd_req_get_url_query_len(req) + 1;
|
||||
if (buf_len > 1) {
|
||||
char buf[buf_len];
|
||||
if (httpd_req_get_url_query_str(req, buf, buf_len) == ESP_OK) {
|
||||
ESP_LOGI(TAG, "Found URL query => %s", buf);
|
||||
char param[32];
|
||||
/* Get value of expected key from query string */
|
||||
if (httpd_query_key_value(buf, "readonly", param, sizeof(param)) == ESP_OK) {
|
||||
ESP_LOGI(TAG, "Found URL query parameter => readonly=%s", param);
|
||||
readonly = param && strcmp(param,"true")==0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return http_resp_dir_html(req, filepath, filename, readonly);
|
||||
}
|
||||
|
||||
std::string testwlan = toUpper(std::string(filename));
|
||||
|
||||
@@ -404,6 +404,9 @@ void doReboot(){
|
||||
LogFile.WriteToFile("Reboot - now");
|
||||
KillTFliteTasks();
|
||||
xTaskCreate(&task_reboot, "reboot", configMINIMAL_STACK_SIZE * 64, NULL, 10, NULL);
|
||||
vTaskDelay(5000 / portTICK_PERIOD_MS);
|
||||
esp_restart();
|
||||
hard_restart();
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -9,6 +9,10 @@
|
||||
|
||||
using namespace std;
|
||||
|
||||
#define LOGFILE_TIME_FORMAT "%Y%m%d-%H%M%S"
|
||||
#define LOGFILE_TIME_FORMAT_DATE_EXTR substr(0, 8)
|
||||
#define LOGFILE_TIME_FORMAT_HOUR_EXTR substr(9, 2)
|
||||
|
||||
struct HTMLInfo
|
||||
{
|
||||
float val;
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
#include <math.h>
|
||||
#include <iomanip>
|
||||
#include <sstream>
|
||||
#include <sys/types.h>
|
||||
|
||||
// #define OHNETFLITE
|
||||
|
||||
@@ -12,23 +12,23 @@
|
||||
|
||||
#include "ClassLogFile.h"
|
||||
|
||||
ClassFlowAnalog::ClassFlowAnalog()
|
||||
static const char* TAG = "flow_analog";
|
||||
|
||||
bool debugdetailanalog = false;
|
||||
|
||||
ClassFlowAnalog::ClassFlowAnalog() : ClassFlowImage(TAG)
|
||||
{
|
||||
isLogImage = false;
|
||||
string cnnmodelfile = "";
|
||||
modelxsize = 1;
|
||||
modelysize = 1;
|
||||
ListFlowControll = NULL;
|
||||
}
|
||||
|
||||
ClassFlowAnalog::ClassFlowAnalog(std::vector<ClassFlow*>* lfc)
|
||||
ClassFlowAnalog::ClassFlowAnalog(std::vector<ClassFlow*>* lfc) : ClassFlowImage(lfc, TAG)
|
||||
{
|
||||
isLogImage = false;
|
||||
string cnnmodelfile = "";
|
||||
modelxsize = 1;
|
||||
modelysize = 1;
|
||||
ListFlowControll = NULL;
|
||||
ListFlowControll = lfc;
|
||||
}
|
||||
|
||||
|
||||
@@ -88,8 +88,12 @@ bool ClassFlowAnalog::ReadParameter(FILE* pfile, string& aktparamgraph)
|
||||
zerlegt = this->ZerlegeZeile(aktparamgraph);
|
||||
if ((zerlegt[0] == "LogImageLocation") && (zerlegt.size() > 1))
|
||||
{
|
||||
this->LogImageLocation = "/sdcard" + zerlegt[1];
|
||||
this->isLogImage = true;
|
||||
this->LogImageLocation = zerlegt[1];
|
||||
}
|
||||
if ((toUpper(zerlegt[0]) == "LOGFILERETENTIONINDAYS") && (zerlegt.size() > 1))
|
||||
{
|
||||
this->logfileRetentionInDays = std::stoi(zerlegt[1]);
|
||||
}
|
||||
if ((zerlegt[0] == "Model") && (zerlegt.size() > 1))
|
||||
{
|
||||
@@ -108,6 +112,7 @@ bool ClassFlowAnalog::ReadParameter(FILE* pfile, string& aktparamgraph)
|
||||
neuroi->posy = std::stoi(zerlegt[2]);
|
||||
neuroi->deltax = std::stoi(zerlegt[3]);
|
||||
neuroi->deltay = std::stoi(zerlegt[4]);
|
||||
neuroi->result = -1;
|
||||
ROI.push_back(neuroi);
|
||||
}
|
||||
}
|
||||
@@ -146,8 +151,12 @@ bool ClassFlowAnalog::doFlow(string time)
|
||||
return false;
|
||||
};
|
||||
|
||||
if (debugdetailanalog) LogFile.WriteToFile("ClassFlowAnalog::doFlow nach Alignment");
|
||||
|
||||
doNeuralNetwork(time);
|
||||
|
||||
RemoveOldLogs();
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -166,7 +175,7 @@ bool ClassFlowAnalog::doAlignAndCut(string time)
|
||||
CAlignAndCutImage *caic = new CAlignAndCutImage(input);
|
||||
|
||||
if (!caic->ImageOkay()){
|
||||
LogFile.WriteToFile("ClassFlowAnalog::doAlignAndCut not okay!");
|
||||
if (debugdetailanalog) LogFile.WriteToFile("ClassFlowAnalog::doAlignAndCut not okay!");
|
||||
delete caic;
|
||||
return false;
|
||||
}
|
||||
@@ -174,7 +183,8 @@ bool ClassFlowAnalog::doAlignAndCut(string time)
|
||||
if (input_roi.length() > 0){
|
||||
img_roi = new CImageBasis(input_roi);
|
||||
if (!img_roi->ImageOkay()){
|
||||
LogFile.WriteToFile("ClassFlowAnalog::doAlignAndCut ImageRoi not okay!");
|
||||
if (debugdetailanalog) LogFile.WriteToFile("ClassFlowAnalog::doAlignAndCut ImageRoi not okay!");
|
||||
delete caic;
|
||||
delete img_roi;
|
||||
return false;
|
||||
}
|
||||
@@ -188,6 +198,13 @@ bool ClassFlowAnalog::doAlignAndCut(string time)
|
||||
caic->CutAndSave(output, ROI[i]->posx, ROI[i]->posy, ROI[i]->deltax, ROI[i]->deltay);
|
||||
|
||||
rs = new CResizeImage(output);
|
||||
if (!rs->ImageOkay()){
|
||||
if (debugdetailanalog) LogFile.WriteToFile("ClassFlowAnalog::doAlignAndCut CResizeImage(output);!");
|
||||
delete caic;
|
||||
delete rs;
|
||||
return false;
|
||||
}
|
||||
|
||||
rs->Resize(modelxsize, modelysize);
|
||||
ioresize = "/sdcard/img_tmp/ra" + std::to_string(i) + ".bmp";
|
||||
ioresize = FormatFileName(ioresize);
|
||||
@@ -220,10 +237,11 @@ bool ClassFlowAnalog::doAlignAndCut(string time)
|
||||
|
||||
bool ClassFlowAnalog::doNeuralNetwork(string time)
|
||||
{
|
||||
string logPath = CreateLogFolder(time);
|
||||
|
||||
string input = "/sdcard/img_tmp/alg.jpg";
|
||||
string ioresize = "/sdcard/img_tmp/resize.bmp";
|
||||
string output;
|
||||
string nm;
|
||||
input = FormatFileName(input);
|
||||
|
||||
#ifndef OHNETFLITE
|
||||
@@ -246,8 +264,11 @@ bool ClassFlowAnalog::doNeuralNetwork(string time)
|
||||
f1 = 0; f2 = 0;
|
||||
|
||||
#ifndef OHNETFLITE
|
||||
// LogFile.WriteToFile("ClassFlowAnalog::doNeuralNetwork vor CNN tflite->LoadInputImage(ioresize)");
|
||||
tflite->LoadInputImage(ioresize);
|
||||
tflite->Invoke();
|
||||
if (debugdetailanalog) LogFile.WriteToFile("Nach Invoke");
|
||||
|
||||
|
||||
f1 = tflite->GetOutputValue(0);
|
||||
f2 = tflite->GetOutputValue(1);
|
||||
@@ -259,19 +280,7 @@ bool ClassFlowAnalog::doNeuralNetwork(string time)
|
||||
|
||||
printf("Result Analog%i: %f\n", i, ROI[i]->result);
|
||||
|
||||
if (isLogImage)
|
||||
{
|
||||
std::stringstream stream;
|
||||
stream << std::fixed << std::setprecision(1) << ROI[i]->result;
|
||||
std::string s = stream.str();
|
||||
// std::snprintf(&s[0], s.size(), "%.2f", pi);
|
||||
nm = "/sdcard" + LogImageLocation + "/" + s + "_" + ROI[i]->name + "_" + time + ".jpg";
|
||||
nm = FormatFileName(nm);
|
||||
output = "/sdcard/img_tmp/" + ROI[i]->name + ".jpg";
|
||||
output = FormatFileName(output);
|
||||
printf("Analog - save to file: %s\n", nm.c_str());
|
||||
CopyFile(output, nm);
|
||||
}
|
||||
LogImage(logPath, ROI[i]->name, &ROI[i]->result, NULL, time);
|
||||
}
|
||||
#ifndef OHNETFLITE
|
||||
delete tflite;
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#pragma once
|
||||
#include "ClassFlow.h"
|
||||
#include "ClassFlowImage.h"
|
||||
// #include "CTfLiteClass.h"
|
||||
|
||||
struct roianalog {
|
||||
@@ -10,11 +10,9 @@ struct roianalog {
|
||||
|
||||
|
||||
class ClassFlowAnalog :
|
||||
public ClassFlow
|
||||
public ClassFlowImage
|
||||
{
|
||||
protected:
|
||||
string LogImageLocation;
|
||||
bool isLogImage;
|
||||
std::vector<roianalog*> ROI;
|
||||
string cnnmodelfile;
|
||||
int modelxsize, modelysize;
|
||||
|
||||
@@ -1,294 +0,0 @@
|
||||
#include "ClassFlowControll.h"
|
||||
|
||||
#include "ClassLogFile.h"
|
||||
#include "time_sntp.h"
|
||||
#include "Helper.h"
|
||||
#include "server_ota.h"
|
||||
|
||||
std::string ClassFlowControll::doSingleStep(std::string _stepname, std::string _host){
|
||||
bool found = false;
|
||||
std::string _classname = "";
|
||||
std::string result = "";
|
||||
if (_stepname.compare("[MakeImage]") == 0){
|
||||
_classname = "ClassFlowMakeImage";
|
||||
}
|
||||
if (_stepname.compare("[Alignment]") == 0){
|
||||
_classname = "ClassFlowAlignment";
|
||||
}
|
||||
if (_stepname.compare("[Digits]") == 0){
|
||||
_classname = "ClassFlowDigit";
|
||||
}
|
||||
if (_stepname.compare("[Analog]") == 0){
|
||||
_classname = "ClassFlowAnalog";
|
||||
}
|
||||
// std::string zw = "Classname: " + _classname + "\n";
|
||||
// printf(zw.c_str());
|
||||
|
||||
for (int i = 0; i < FlowControll.size(); ++i)
|
||||
if (FlowControll[i]->name().compare(_classname) == 0){
|
||||
// printf(FlowControll[i]->name().c_str()); printf("\n");
|
||||
FlowControll[i]->doFlow("");
|
||||
result = FlowControll[i]->getHTMLSingleStep(_host);
|
||||
found = true;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
std::vector<HTMLInfo*> ClassFlowControll::GetAllDigital()
|
||||
{
|
||||
for (int i = 0; i < FlowControll.size(); ++i)
|
||||
if (FlowControll[i]->name().compare("ClassFlowDigit") == 0)
|
||||
return ((ClassFlowDigit*) (FlowControll[i]))->GetHTMLInfo();
|
||||
|
||||
std::vector<HTMLInfo*> empty;
|
||||
return empty;
|
||||
}
|
||||
|
||||
std::vector<HTMLInfo*> ClassFlowControll::GetAllAnalog()
|
||||
{
|
||||
for (int i = 0; i < FlowControll.size(); ++i)
|
||||
if (FlowControll[i]->name().compare("ClassFlowAnalog") == 0)
|
||||
return ((ClassFlowAnalog*) (FlowControll[i]))->GetHTMLInfo();
|
||||
|
||||
std::vector<HTMLInfo*> empty;
|
||||
return empty;
|
||||
}
|
||||
|
||||
|
||||
void ClassFlowControll::SetInitialParameter(void)
|
||||
{
|
||||
AutoStart = false;
|
||||
AutoIntervall = 10;
|
||||
}
|
||||
|
||||
bool ClassFlowControll::isAutoStart(long &_intervall)
|
||||
{
|
||||
_intervall = AutoIntervall * 60 * 1000; // AutoIntervall: Minuten -> ms
|
||||
return AutoStart;
|
||||
}
|
||||
|
||||
ClassFlow* ClassFlowControll::CreateClassFlow(std::string _type)
|
||||
{
|
||||
ClassFlow* cfc = NULL;
|
||||
|
||||
_type = trim(_type);
|
||||
|
||||
if (_type.compare("[MakeImage]") == 0)
|
||||
cfc = new ClassFlowMakeImage(&FlowControll);
|
||||
if (_type.compare("[Alignment]") == 0)
|
||||
cfc = new ClassFlowAlignment(&FlowControll);
|
||||
if (_type.compare("[Analog]") == 0)
|
||||
cfc = new ClassFlowAnalog(&FlowControll);
|
||||
if (_type.compare("[Digits]") == 0)
|
||||
cfc = new ClassFlowDigit(&FlowControll);
|
||||
if (_type.compare("[PostProcessing]") == 0)
|
||||
{
|
||||
cfc = new ClassFlowPostProcessing(&FlowControll);
|
||||
flowpostprocessing = (ClassFlowPostProcessing*) cfc;
|
||||
}
|
||||
|
||||
if (cfc) // Wird nur angehangen, falls es nicht [AutoTimer] ist, denn dieses ist für FlowControll
|
||||
FlowControll.push_back(cfc);
|
||||
|
||||
if (_type.compare("[AutoTimer]") == 0)
|
||||
cfc = this;
|
||||
|
||||
if (_type.compare("[Debug]") == 0)
|
||||
cfc = this;
|
||||
|
||||
return cfc;
|
||||
}
|
||||
|
||||
void ClassFlowControll::InitFlow(std::string config)
|
||||
{
|
||||
string line;
|
||||
|
||||
flowpostprocessing = NULL;
|
||||
|
||||
ClassFlow* cfc;
|
||||
FILE* pFile;
|
||||
config = FormatFileName(config);
|
||||
pFile = fopen(config.c_str(), "r");
|
||||
|
||||
line = "";
|
||||
|
||||
char zw[1024];
|
||||
if (pFile != NULL)
|
||||
{
|
||||
fgets(zw, 1024, pFile);
|
||||
printf("%s", zw);
|
||||
line = std::string(zw);
|
||||
}
|
||||
|
||||
while ((line.size() > 0) && !(feof(pFile)))
|
||||
{
|
||||
cfc = CreateClassFlow(line);
|
||||
if (cfc)
|
||||
{
|
||||
cfc->ReadParameter(pFile, line);
|
||||
}
|
||||
else
|
||||
{
|
||||
fgets(zw, 1024, pFile);
|
||||
printf("%s", zw);
|
||||
line = std::string(zw);
|
||||
}
|
||||
}
|
||||
|
||||
fclose(pFile);
|
||||
|
||||
}
|
||||
|
||||
std::string ClassFlowControll::getActStatus(){
|
||||
return aktstatus;
|
||||
}
|
||||
|
||||
bool ClassFlowControll::doFlow(string time)
|
||||
{
|
||||
bool result = true;
|
||||
std::string zw_time;
|
||||
int repeat = 0;
|
||||
|
||||
for (int i = 0; i < FlowControll.size(); ++i)
|
||||
{
|
||||
zw_time = gettimestring("%Y%m%d-%H%M%S");
|
||||
aktstatus = zw_time + ": " + FlowControll[i]->name();
|
||||
string zw = "FlowControll.doFlow - " + FlowControll[i]->name();
|
||||
LogFile.WriteToFile(zw);
|
||||
if (!FlowControll[i]->doFlow(time)){
|
||||
repeat++;
|
||||
LogFile.WriteToFile("Fehler im vorheriger Schritt - wird zum " + to_string(repeat) + ". Mal wiederholt");
|
||||
i = -1; // Soll wieder bei i = 0 anfangen ==> komplett von vorne !!!
|
||||
result = false;
|
||||
if (repeat > 5) {
|
||||
LogFile.WriteToFile("Wiederholung 5x nicht erfolgreich --> reboot");
|
||||
doReboot();
|
||||
// Schritt wurde 5x wiederholt --> reboot
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
result = true;
|
||||
}
|
||||
}
|
||||
zw_time = gettimestring("%Y%m%d-%H%M%S");
|
||||
aktstatus = zw_time + ": Flow is done";
|
||||
return result;
|
||||
}
|
||||
|
||||
string ClassFlowControll::getReadout(bool _rawvalue = false, bool _noerror = false)
|
||||
{
|
||||
if (flowpostprocessing)
|
||||
return flowpostprocessing->getReadoutParam(_rawvalue, _noerror);
|
||||
|
||||
string zw = "";
|
||||
string result = "";
|
||||
|
||||
for (int i = 0; i < FlowControll.size(); ++i)
|
||||
{
|
||||
zw = FlowControll[i]->getReadout();
|
||||
if (zw.length() > 0)
|
||||
{
|
||||
if (result.length() == 0)
|
||||
result = zw;
|
||||
else
|
||||
result = result + "\t" + zw;
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
string ClassFlowControll::GetPrevalue()
|
||||
{
|
||||
if (flowpostprocessing)
|
||||
{
|
||||
return flowpostprocessing->GetPreValue();
|
||||
}
|
||||
|
||||
return std::string();
|
||||
}
|
||||
|
||||
std::string ClassFlowControll::UpdatePrevalue(std::string _newvalue)
|
||||
{
|
||||
float zw;
|
||||
char* p;
|
||||
|
||||
_newvalue = trim(_newvalue);
|
||||
// printf("Input UpdatePreValue: %s\n", _newvalue.c_str());
|
||||
|
||||
if (_newvalue.compare("0.0") == 0)
|
||||
{
|
||||
zw = 0;
|
||||
}
|
||||
else
|
||||
{
|
||||
zw = strtof(_newvalue.c_str(), &p);
|
||||
if (zw == 0)
|
||||
return "- Error in String to Value Conversion!!! Must be of format value=123.456";
|
||||
}
|
||||
|
||||
|
||||
if (flowpostprocessing)
|
||||
{
|
||||
flowpostprocessing->SavePreValue(zw);
|
||||
return to_string(zw);
|
||||
}
|
||||
|
||||
return std::string();
|
||||
}
|
||||
|
||||
bool ClassFlowControll::ReadParameter(FILE* pfile, string& aktparamgraph)
|
||||
{
|
||||
std::vector<string> zerlegt;
|
||||
|
||||
aktparamgraph = trim(aktparamgraph);
|
||||
|
||||
if (aktparamgraph.size() == 0)
|
||||
if (!this->GetNextParagraph(pfile, aktparamgraph)){
|
||||
return false;
|
||||
}
|
||||
|
||||
// if ((aktparamgraph.compare("[Autotimer]") != 0) && (aktparamgraph.compare("[Debug]") != 0)) // Paragraph passt nich zu MakeImage
|
||||
if (aktparamgraph.compare("[Autotimer]") != 0) // Paragraph passt nich zu MakeImage
|
||||
return false;
|
||||
|
||||
// if ((toUpper(aktparamgraph) != "[AUTOTIMER]") && (toUpper(aktparamgraph) != ("[DEBUG]"))) // Paragraph passt nich zu MakeImage
|
||||
// return false;
|
||||
|
||||
while (this->getNextLine(pfile, &aktparamgraph) && !this->isNewParagraph(aktparamgraph))
|
||||
{
|
||||
zerlegt = this->ZerlegeZeile(aktparamgraph);
|
||||
if ((toUpper(zerlegt[0]) == "AUTOSTART") && (zerlegt.size() > 1))
|
||||
{
|
||||
if (toUpper(zerlegt[1]) == "TRUE")
|
||||
{
|
||||
AutoStart = true;
|
||||
}
|
||||
}
|
||||
|
||||
if ((toUpper(zerlegt[0]) == "INTERVALL") && (zerlegt.size() > 1))
|
||||
{
|
||||
AutoIntervall = std::stof(zerlegt[1]);
|
||||
}
|
||||
|
||||
/*
|
||||
if ((toUpper(zerlegt[0]) == "LOGFILE") && (zerlegt.size() > 1))
|
||||
{
|
||||
if (toUpper(zerlegt[1]) == "TRUE")
|
||||
{
|
||||
LogFile.SwitchOnOff(true);
|
||||
printf("TurnLogFile On\n");
|
||||
}
|
||||
if (toUpper(zerlegt[1]) == "FALSE")
|
||||
{
|
||||
LogFile.SwitchOnOff(false);
|
||||
printf("TurnLogFile Off\n");
|
||||
}
|
||||
}
|
||||
*/
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -1,10 +1,14 @@
|
||||
#include "ClassFlowControll.h"
|
||||
|
||||
#include <sys/stat.h>
|
||||
#include <dirent.h>
|
||||
#include "ClassLogFile.h"
|
||||
#include "time_sntp.h"
|
||||
#include "Helper.h"
|
||||
#include "server_ota.h"
|
||||
|
||||
static const char* TAG = "flow_controll";
|
||||
|
||||
std::string ClassFlowControll::doSingleStep(std::string _stepname, std::string _host){
|
||||
std::string _classname = "";
|
||||
std::string result = "";
|
||||
@@ -20,6 +24,9 @@ std::string ClassFlowControll::doSingleStep(std::string _stepname, std::string _
|
||||
if (_stepname.compare("[Analog]") == 0){
|
||||
_classname = "ClassFlowAnalog";
|
||||
}
|
||||
if (_stepname.compare("[MQTT]") == 0){
|
||||
_classname = "ClassFlowMQTT";
|
||||
}
|
||||
// std::string zw = "Classname: " + _classname + "\n";
|
||||
// printf(zw.c_str());
|
||||
|
||||
@@ -80,6 +87,8 @@ ClassFlow* ClassFlowControll::CreateClassFlow(std::string _type)
|
||||
cfc = new ClassFlowAnalog(&FlowControll);
|
||||
if (toUpper(_type).compare("[DIGITS]") == 0)
|
||||
cfc = new ClassFlowDigit(&FlowControll);
|
||||
if (toUpper(_type).compare("[MQTT]") == 0)
|
||||
cfc = new ClassFlowMQTT(&FlowControll);
|
||||
if (toUpper(_type).compare("[POSTPROCESSING]") == 0)
|
||||
{
|
||||
cfc = new ClassFlowPostProcessing(&FlowControll);
|
||||
@@ -144,6 +153,8 @@ std::string ClassFlowControll::getActStatus(){
|
||||
|
||||
bool ClassFlowControll::doFlow(string time)
|
||||
{
|
||||
// CleanTempFolder(); // dazu muss man noch eine Rolling einführen
|
||||
|
||||
bool result = true;
|
||||
std::string zw_time;
|
||||
int repeat = 0;
|
||||
@@ -157,7 +168,7 @@ bool ClassFlowControll::doFlow(string time)
|
||||
if (!FlowControll[i]->doFlow(time)){
|
||||
repeat++;
|
||||
LogFile.WriteToFile("Fehler im vorheriger Schritt - wird zum " + to_string(repeat) + ". Mal wiederholt");
|
||||
i = i-2; // vorheriger Schritt muss wiederholt werden (vermutlich Bilder aufnehmen)
|
||||
i = -1; // vorheriger Schritt muss wiederholt werden (vermutlich Bilder aufnehmen)
|
||||
result = false;
|
||||
if (repeat > 5) {
|
||||
LogFile.WriteToFile("Wiederholung 5x nicht erfolgreich --> reboot");
|
||||
@@ -275,8 +286,41 @@ bool ClassFlowControll::ReadParameter(FILE* pfile, string& aktparamgraph)
|
||||
{
|
||||
LogFile.SwitchOnOff(false);
|
||||
}
|
||||
}
|
||||
if ((toUpper(zerlegt[0]) == "LOGFILERETENTIONINDAYS") && (zerlegt.size() > 1))
|
||||
{
|
||||
LogFile.SetRetention(std::stoi(zerlegt[1]));
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
int ClassFlowControll::CleanTempFolder() {
|
||||
const char* folderPath = "/sdcard/img_tmp";
|
||||
|
||||
ESP_LOGI(TAG, "Clean up temporary folder to avoid damage of sdcard sectors : %s", folderPath);
|
||||
DIR *dir = opendir(folderPath);
|
||||
if (!dir) {
|
||||
ESP_LOGE(TAG, "Failed to stat dir : %s", folderPath);
|
||||
return -1;
|
||||
}
|
||||
|
||||
struct dirent *entry;
|
||||
int deleted = 0;
|
||||
while ((entry = readdir(dir)) != NULL) {
|
||||
std::string path = string(folderPath) + "/" + entry->d_name;
|
||||
if (entry->d_type == DT_REG) {
|
||||
if (unlink(path.c_str()) == 0) {
|
||||
deleted ++;
|
||||
} else {
|
||||
ESP_LOGE(TAG, "can't delete file : %s", path.c_str());
|
||||
}
|
||||
} else if (entry->d_type == DT_DIR) {
|
||||
deleted += removeFolder(path.c_str(), TAG);
|
||||
}
|
||||
}
|
||||
closedir(dir);
|
||||
ESP_LOGI(TAG, "%d files deleted", deleted);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -8,6 +8,7 @@
|
||||
#include "ClassFlowDigit.h"
|
||||
#include "ClassFlowAnalog.h"
|
||||
#include "ClassFlowPostProcessing.h"
|
||||
#include "ClassFlowMQTT.h"
|
||||
|
||||
|
||||
class ClassFlowControll :
|
||||
@@ -41,6 +42,8 @@ public:
|
||||
std::vector<HTMLInfo*> GetAllDigital();
|
||||
std::vector<HTMLInfo*> GetAllAnalog();
|
||||
|
||||
int CleanTempFolder();
|
||||
|
||||
string name(){return "ClassFlowControll";};
|
||||
};
|
||||
|
||||
|
||||
@@ -13,23 +13,21 @@
|
||||
|
||||
#include "ClassLogFile.h"
|
||||
|
||||
ClassFlowDigit::ClassFlowDigit()
|
||||
static const char* TAG = "flow_digital";
|
||||
|
||||
ClassFlowDigit::ClassFlowDigit() : ClassFlowImage(TAG)
|
||||
{
|
||||
isLogImage = false;
|
||||
string cnnmodelfile = "";
|
||||
modelxsize = 1;
|
||||
modelysize = 1;
|
||||
ListFlowControll = NULL;
|
||||
}
|
||||
|
||||
ClassFlowDigit::ClassFlowDigit(std::vector<ClassFlow*>* lfc)
|
||||
ClassFlowDigit::ClassFlowDigit(std::vector<ClassFlow*>* lfc) : ClassFlowImage(lfc, TAG)
|
||||
{
|
||||
isLogImage = false;
|
||||
string cnnmodelfile = "";
|
||||
modelxsize = 1;
|
||||
modelysize = 1;
|
||||
ListFlowControll = NULL;
|
||||
ListFlowControll = lfc;
|
||||
}
|
||||
|
||||
string ClassFlowDigit::getReadout()
|
||||
@@ -66,8 +64,8 @@ bool ClassFlowDigit::ReadParameter(FILE* pfile, string& aktparamgraph)
|
||||
zerlegt = this->ZerlegeZeile(aktparamgraph);
|
||||
if ((zerlegt[0] == "LogImageLocation") && (zerlegt.size() > 1))
|
||||
{
|
||||
isLogImage = true;
|
||||
LogImageLocation = zerlegt[1];
|
||||
LogImageLocation = "/sdcard" + zerlegt[1];
|
||||
isLogImage = true;
|
||||
}
|
||||
if ((zerlegt[0] == "Model") && (zerlegt.size() > 1))
|
||||
{
|
||||
@@ -86,6 +84,7 @@ bool ClassFlowDigit::ReadParameter(FILE* pfile, string& aktparamgraph)
|
||||
neuroi->posy = std::stoi(zerlegt[2]);
|
||||
neuroi->deltax = std::stoi(zerlegt[3]);
|
||||
neuroi->deltay = std::stoi(zerlegt[4]);
|
||||
neuroi->resultklasse = -1;
|
||||
ROI.push_back(neuroi);
|
||||
}
|
||||
}
|
||||
@@ -127,6 +126,8 @@ bool ClassFlowDigit::doFlow(string time)
|
||||
|
||||
doNeuralNetwork(time);
|
||||
|
||||
RemoveOldLogs();
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -152,7 +153,8 @@ bool ClassFlowDigit::doAlignAndCut(string time)
|
||||
if (input_roi.length() > 0){
|
||||
img_roi = new CImageBasis(input_roi);
|
||||
if (!img_roi->ImageOkay()){
|
||||
LogFile.WriteToFile("ClassFlowAnalog::doAlignAndCut ImageRoi not okay!");
|
||||
LogFile.WriteToFile("ClassFlowDigit::doAlignAndCut ImageRoi not okay!");
|
||||
delete caic;
|
||||
delete img_roi;
|
||||
return false;
|
||||
}
|
||||
@@ -192,6 +194,8 @@ bool ClassFlowDigit::doAlignAndCut(string time)
|
||||
|
||||
bool ClassFlowDigit::doNeuralNetwork(string time)
|
||||
{
|
||||
string logPath = CreateLogFolder(time);
|
||||
|
||||
string input = "/sdcard/img_tmp/alg.jpg";
|
||||
string ioresize = "/sdcard/img_tmp/resize.bmp";
|
||||
string output;
|
||||
@@ -219,16 +223,9 @@ bool ClassFlowDigit::doNeuralNetwork(string time)
|
||||
#ifndef OHNETFLITE
|
||||
ROI[i]->resultklasse = tflite->GetClassFromImage(ioresize);
|
||||
#endif
|
||||
printf("Result Digit%i: %d\n", i, ROI[i]->resultklasse);
|
||||
printf("Result Digit%i: %d\n", i, ROI[i]->resultklasse);
|
||||
|
||||
if (isLogImage)
|
||||
{
|
||||
nm = "/sdcard" + LogImageLocation + "/" + std::to_string(ROI[i]->resultklasse) + "/" + time + "_" + ROI[i]->name + ".jpg";
|
||||
output = "/sdcard/img_tmp/" + ROI[i]->name + ".jpg";
|
||||
output = FormatFileName(output);
|
||||
nm = FormatFileName(nm);
|
||||
CopyFile(output, nm);
|
||||
}
|
||||
LogImage(logPath, ROI[i]->name, NULL, &ROI[i]->resultklasse, time);
|
||||
}
|
||||
#ifndef OHNETFLITE
|
||||
delete tflite;
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#pragma once
|
||||
#include "ClassFlow.h"
|
||||
#include "ClassFlowImage.h"
|
||||
#include "Helper.h"
|
||||
|
||||
#include <string>
|
||||
@@ -12,11 +12,9 @@ struct roi {
|
||||
};
|
||||
|
||||
class ClassFlowDigit :
|
||||
public ClassFlow
|
||||
public ClassFlowImage
|
||||
{
|
||||
protected:
|
||||
string LogImageLocation;
|
||||
bool isLogImage;
|
||||
std::vector<roi*> ROI;
|
||||
string cnnmodelfile;
|
||||
int modelxsize, modelysize;
|
||||
|
||||
101
code/lib/jomjol_flowcontroll/ClassFlowImage.cpp
Normal file
101
code/lib/jomjol_flowcontroll/ClassFlowImage.cpp
Normal file
@@ -0,0 +1,101 @@
|
||||
#include "ClassFlowImage.h"
|
||||
#include <string>
|
||||
#include <string.h>
|
||||
#include <sys/stat.h>
|
||||
#include <dirent.h>
|
||||
#include "time_sntp.h"
|
||||
#include "ClassLogFile.h"
|
||||
|
||||
ClassFlowImage::ClassFlowImage(const char* logTag)
|
||||
{
|
||||
this->logTag = logTag;
|
||||
isLogImage = false;
|
||||
}
|
||||
|
||||
ClassFlowImage::ClassFlowImage(std::vector<ClassFlow*> * lfc, const char* logTag) : ClassFlow((std::vector<ClassFlow*>*)lfc)
|
||||
{
|
||||
this->logTag = logTag;
|
||||
isLogImage = false;
|
||||
}
|
||||
|
||||
string ClassFlowImage::CreateLogFolder(string time) {
|
||||
if (!isLogImage)
|
||||
return "";
|
||||
|
||||
string logPath = LogImageLocation + "/" + time.LOGFILE_TIME_FORMAT_DATE_EXTR + "/" + time.LOGFILE_TIME_FORMAT_HOUR_EXTR;
|
||||
isLogImage = mkdir_r(logPath.c_str(), S_IRWXU) == 0;
|
||||
if (!isLogImage) {
|
||||
ESP_LOGW(logTag, "Can't create log foolder for analog images. Path %s", logPath.c_str());
|
||||
LogFile.WriteToFile("Can't create log foolder for analog images. Path " + logPath);
|
||||
}
|
||||
|
||||
return logPath;
|
||||
}
|
||||
|
||||
void ClassFlowImage::LogImage(string logPath, string name, float *resultFloat, int *resultInt, string time) {
|
||||
if (!isLogImage)
|
||||
return;
|
||||
|
||||
char buf[10];
|
||||
if (resultFloat != NULL) {
|
||||
sprintf(buf, "%.1f_", *resultFloat);
|
||||
} else if (resultInt != NULL) {
|
||||
sprintf(buf, "%d_", *resultInt);
|
||||
} else {
|
||||
buf[0] = '\0';
|
||||
}
|
||||
|
||||
string nm = logPath + "/" + buf + name + "_" + time + ".jpg";
|
||||
nm = FormatFileName(nm);
|
||||
string output = "/sdcard/img_tmp/" + name + ".jpg";
|
||||
output = FormatFileName(output);
|
||||
printf("save to file: %s\n", nm.c_str());
|
||||
CopyFile(output, nm);
|
||||
}
|
||||
|
||||
void ClassFlowImage::RemoveOldLogs()
|
||||
{
|
||||
if (!isLogImage)
|
||||
return;
|
||||
|
||||
ESP_LOGI(logTag, "remove old log images");
|
||||
if (logfileRetentionInDays == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
time_t rawtime;
|
||||
struct tm* timeinfo;
|
||||
char cmpfilename[30];
|
||||
|
||||
time(&rawtime);
|
||||
rawtime = addDays(rawtime, -logfileRetentionInDays);
|
||||
timeinfo = localtime(&rawtime);
|
||||
|
||||
strftime(cmpfilename, 30, LOGFILE_TIME_FORMAT, timeinfo);
|
||||
//ESP_LOGE(TAG, "log file name to compare: %s", cmpfilename);
|
||||
string folderName = string(cmpfilename).LOGFILE_TIME_FORMAT_DATE_EXTR;
|
||||
|
||||
DIR *dir = opendir(LogImageLocation.c_str());
|
||||
if (!dir) {
|
||||
ESP_LOGI(logTag, "Failed to stat dir : %s", LogImageLocation.c_str());
|
||||
return;
|
||||
}
|
||||
|
||||
struct dirent *entry;
|
||||
int deleted = 0;
|
||||
int notDeleted = 0;
|
||||
while ((entry = readdir(dir)) != NULL) {
|
||||
string folderPath = LogImageLocation + "/" + entry->d_name;
|
||||
if (entry->d_type == DT_DIR) {
|
||||
//ESP_LOGI(logTag, "Compare %s %s", entry->d_name, folderName.c_str());
|
||||
if ((strlen(entry->d_name) == folderName.length()) && (strcmp(entry->d_name, folderName.c_str()) < 0)) {
|
||||
deleted += removeFolder(folderPath.c_str(), logTag);
|
||||
} else {
|
||||
notDeleted ++;
|
||||
}
|
||||
}
|
||||
}
|
||||
ESP_LOGI(logTag, "%d older log files deleted. %d current log files not deleted.", deleted, notDeleted);
|
||||
closedir(dir);
|
||||
}
|
||||
|
||||
22
code/lib/jomjol_flowcontroll/ClassFlowImage.h
Normal file
22
code/lib/jomjol_flowcontroll/ClassFlowImage.h
Normal file
@@ -0,0 +1,22 @@
|
||||
#pragma once
|
||||
#include "ClassFlow.h"
|
||||
|
||||
using namespace std;
|
||||
|
||||
class ClassFlowImage : public ClassFlow
|
||||
{
|
||||
protected:
|
||||
string LogImageLocation;
|
||||
bool isLogImage;
|
||||
unsigned short logfileRetentionInDays;
|
||||
const char* logTag;
|
||||
|
||||
string CreateLogFolder(string time);
|
||||
void LogImage(string logPath, string name, float *resultFloat, int *resultInt, string time);
|
||||
|
||||
public:
|
||||
ClassFlowImage(const char* logTag);
|
||||
ClassFlowImage(std::vector<ClassFlow*> * lfc, const char* logTag);
|
||||
|
||||
void RemoveOldLogs();
|
||||
};
|
||||
120
code/lib/jomjol_flowcontroll/ClassFlowMQTT.cpp
Normal file
120
code/lib/jomjol_flowcontroll/ClassFlowMQTT.cpp
Normal file
@@ -0,0 +1,120 @@
|
||||
#include "ClassFlowMQTT.h"
|
||||
#include "Helper.h"
|
||||
|
||||
#include "interface_mqtt.h"
|
||||
#include "ClassFlowPostProcessing.h"
|
||||
|
||||
#include <time.h>
|
||||
|
||||
ClassFlowMQTT::ClassFlowMQTT()
|
||||
{
|
||||
uri = "";
|
||||
topic = "";
|
||||
clientname = "watermeter";
|
||||
OldValue = "";
|
||||
flowpostprocessing = NULL;
|
||||
user = "";
|
||||
password = "";
|
||||
}
|
||||
|
||||
ClassFlowMQTT::ClassFlowMQTT(std::vector<ClassFlow*>* lfc)
|
||||
{
|
||||
uri = "";
|
||||
topic = "";
|
||||
clientname = "watermeter";
|
||||
OldValue = "";
|
||||
flowpostprocessing = NULL;
|
||||
user = "";
|
||||
password = "";
|
||||
|
||||
ListFlowControll = lfc;
|
||||
|
||||
for (int i = 0; i < ListFlowControll->size(); ++i)
|
||||
{
|
||||
if (((*ListFlowControll)[i])->name().compare("ClassFlowPostProcessing") == 0)
|
||||
{
|
||||
flowpostprocessing = (ClassFlowPostProcessing*) (*ListFlowControll)[i];
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
bool ClassFlowMQTT::ReadParameter(FILE* pfile, string& aktparamgraph)
|
||||
{
|
||||
std::vector<string> zerlegt;
|
||||
|
||||
aktparamgraph = trim(aktparamgraph);
|
||||
|
||||
if (aktparamgraph.size() == 0)
|
||||
if (!this->GetNextParagraph(pfile, aktparamgraph))
|
||||
return false;
|
||||
|
||||
if (toUpper(aktparamgraph).compare("[MQTT]") != 0) // Paragraph passt nich zu MakeImage
|
||||
return false;
|
||||
|
||||
while (this->getNextLine(pfile, &aktparamgraph) && !this->isNewParagraph(aktparamgraph))
|
||||
{
|
||||
zerlegt = this->ZerlegeZeile(aktparamgraph);
|
||||
if ((toUpper(zerlegt[0]) == "USER") && (zerlegt.size() > 1))
|
||||
{
|
||||
this->user = zerlegt[1];
|
||||
}
|
||||
if ((toUpper(zerlegt[0]) == "PASSWORD") && (zerlegt.size() > 1))
|
||||
{
|
||||
this->password = zerlegt[1];
|
||||
}
|
||||
if ((toUpper(zerlegt[0]) == "URI") && (zerlegt.size() > 1))
|
||||
{
|
||||
this->uri = zerlegt[1];
|
||||
}
|
||||
if ((toUpper(zerlegt[0]) == "TOPIC") && (zerlegt.size() > 1))
|
||||
{
|
||||
this->topic = zerlegt[1];
|
||||
}
|
||||
if ((toUpper(zerlegt[0]) == "CLIENTID") && (zerlegt.size() > 1))
|
||||
{
|
||||
this->clientname = zerlegt[1];
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if ((uri.length() > 0) && (topic.length() > 0))
|
||||
{
|
||||
MQTTInit(uri, clientname, user, password);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
bool ClassFlowMQTT::doFlow(string zwtime)
|
||||
{
|
||||
std::string result;
|
||||
string zw = "";
|
||||
|
||||
if (flowpostprocessing)
|
||||
{
|
||||
result = flowpostprocessing->getReadoutParam(false, true);
|
||||
}
|
||||
else
|
||||
{
|
||||
for (int i = 0; i < ListFlowControll->size(); ++i)
|
||||
{
|
||||
zw = (*ListFlowControll)[i]->getReadout();
|
||||
if (zw.length() > 0)
|
||||
{
|
||||
if (result.length() == 0)
|
||||
result = zw;
|
||||
else
|
||||
result = result + "\t" + zw;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
MQTTPublish(topic, result);
|
||||
|
||||
OldValue = result;
|
||||
|
||||
|
||||
return true;
|
||||
}
|
||||
25
code/lib/jomjol_flowcontroll/ClassFlowMQTT.h
Normal file
25
code/lib/jomjol_flowcontroll/ClassFlowMQTT.h
Normal file
@@ -0,0 +1,25 @@
|
||||
#pragma once
|
||||
#include "ClassFlow.h"
|
||||
|
||||
#include "ClassFlowPostProcessing.h"
|
||||
|
||||
#include <string>
|
||||
|
||||
class ClassFlowMQTT :
|
||||
public ClassFlow
|
||||
{
|
||||
protected:
|
||||
std::string uri, topic, clientname;
|
||||
std::string OldValue;
|
||||
ClassFlowPostProcessing* flowpostprocessing;
|
||||
std::string user, password;
|
||||
|
||||
|
||||
public:
|
||||
ClassFlowMQTT();
|
||||
ClassFlowMQTT(std::vector<ClassFlow*>* lfc);
|
||||
bool ReadParameter(FILE* pfile, string& aktparamgraph);
|
||||
bool doFlow(string time);
|
||||
string name(){return "ClassFlowMQTT";};
|
||||
};
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
#include "ClassFlowMakeImage.h"
|
||||
#include "Helper.h"
|
||||
|
||||
|
||||
#include "CFindTemplate.h"
|
||||
#include "ClassControllCamera.h"
|
||||
|
||||
#include <time.h>
|
||||
|
||||
static const char* TAG = "flow_make_image";
|
||||
|
||||
esp_err_t ClassFlowMakeImage::camera_capture(){
|
||||
string nm = namerawimage;
|
||||
@@ -24,11 +24,8 @@ void ClassFlowMakeImage::takePictureWithFlash(int flashdauer)
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
ClassFlowMakeImage::ClassFlowMakeImage()
|
||||
ClassFlowMakeImage::ClassFlowMakeImage() : ClassFlowImage(TAG)
|
||||
{
|
||||
isLogImage = false;
|
||||
waitbeforepicture = 5;
|
||||
isImageSize = false;
|
||||
ImageQuality = -1;
|
||||
@@ -36,16 +33,13 @@ ClassFlowMakeImage::ClassFlowMakeImage()
|
||||
namerawimage = "/sdcard/img_tmp/raw.jpg";
|
||||
}
|
||||
|
||||
ClassFlowMakeImage::ClassFlowMakeImage(std::vector<ClassFlow*>* lfc)
|
||||
ClassFlowMakeImage::ClassFlowMakeImage(std::vector<ClassFlow*>* lfc) : ClassFlowImage(lfc, TAG)
|
||||
{
|
||||
isLogImage = false;
|
||||
waitbeforepicture = 5;
|
||||
isImageSize = false;
|
||||
ImageQuality = -1;
|
||||
TimeImageTaken = 0;
|
||||
namerawimage = "/sdcard/img_tmp/raw.jpg";
|
||||
|
||||
ListFlowControll = lfc;
|
||||
}
|
||||
|
||||
bool ClassFlowMakeImage::ReadParameter(FILE* pfile, string& aktparamgraph)
|
||||
@@ -66,8 +60,8 @@ bool ClassFlowMakeImage::ReadParameter(FILE* pfile, string& aktparamgraph)
|
||||
zerlegt = this->ZerlegeZeile(aktparamgraph);
|
||||
if ((zerlegt[0] == "LogImageLocation") && (zerlegt.size() > 1))
|
||||
{
|
||||
this->isLogImage = true;
|
||||
this->LogImageLocation = zerlegt[1];
|
||||
LogImageLocation = "/sdcard" + zerlegt[1];
|
||||
isLogImage = true;
|
||||
}
|
||||
if ((zerlegt[0] == "ImageQuality") && (zerlegt.size() > 1))
|
||||
this->ImageQuality = std::stod(zerlegt[1]);
|
||||
@@ -81,45 +75,6 @@ bool ClassFlowMakeImage::ReadParameter(FILE* pfile, string& aktparamgraph)
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
void ClassFlowMakeImage::CopyFile(string input, string output)
|
||||
{
|
||||
input = FormatFileName(input);
|
||||
output = FormatFileName(output);
|
||||
input = namerawimage;
|
||||
|
||||
|
||||
printf("Copy Input : %s\n", input.c_str());
|
||||
printf("Copy Output: %s\n", output.c_str());
|
||||
|
||||
char cTemp;
|
||||
FILE* fpSourceFile = fopen(input.c_str(), "rb");
|
||||
FILE* fpTargetFile = fopen(output.c_str(), "wb");
|
||||
|
||||
if (fpSourceFile == NULL)
|
||||
{
|
||||
printf("fpSourceFile == NULL\n");
|
||||
perror("Error");
|
||||
}
|
||||
|
||||
if (fpTargetFile == NULL)
|
||||
{
|
||||
printf("fpTargetFile == NULL\n");
|
||||
perror("Error");
|
||||
}
|
||||
|
||||
|
||||
while (fread(&cTemp, 1, 1, fpSourceFile) == 1)
|
||||
{
|
||||
fwrite(&cTemp, 1, 1, fpTargetFile);
|
||||
}
|
||||
|
||||
// Close The Files
|
||||
fclose(fpSourceFile);
|
||||
fclose(fpTargetFile);
|
||||
printf("Copy done\n");
|
||||
}
|
||||
|
||||
string ClassFlowMakeImage::getHTMLSingleStep(string host)
|
||||
{
|
||||
string result;
|
||||
@@ -133,6 +88,8 @@ bool ClassFlowMakeImage::doFlow(string zwtime)
|
||||
// TakeImage and Store into /image_tmp/raw.jpg TO BE DONE
|
||||
////////////////////////////////////////////////////////////////////
|
||||
|
||||
string logPath = CreateLogFolder(zwtime);
|
||||
|
||||
int flashdauer = (int) waitbeforepicture * 1000;
|
||||
|
||||
|
||||
@@ -140,16 +97,9 @@ bool ClassFlowMakeImage::doFlow(string zwtime)
|
||||
time(&TimeImageTaken);
|
||||
localtime(&TimeImageTaken);
|
||||
|
||||
LogImage(logPath, "raw", NULL, NULL, zwtime);
|
||||
|
||||
if (this->isLogImage)
|
||||
{
|
||||
string nm = "/sdcard" + this->LogImageLocation + "/" + zwtime + ".jpg";
|
||||
string input = "/sdcard/image_tmp/raw.jgp";
|
||||
printf("loginput from: %s to: %s\n", input.c_str(), nm.c_str());
|
||||
nm = FormatFileName(nm);
|
||||
input = FormatFileName(input);
|
||||
CopyFile(input, nm);
|
||||
}
|
||||
RemoveOldLogs();
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -1,11 +1,9 @@
|
||||
#pragma once
|
||||
#include "ClassFlow.h"
|
||||
#include "ClassFlowImage.h"
|
||||
#include "ClassControllCamera.h"
|
||||
|
||||
#include <string>
|
||||
|
||||
static const char* TAG2 = "example";
|
||||
|
||||
#define BLINK_GPIO GPIO_NUM_4
|
||||
|
||||
#define CAMERA_MODEL_AI_THINKER
|
||||
@@ -13,11 +11,9 @@ static const char* TAG2 = "example";
|
||||
|
||||
|
||||
class ClassFlowMakeImage :
|
||||
public ClassFlow
|
||||
public ClassFlowImage
|
||||
{
|
||||
protected:
|
||||
string LogImageLocation;
|
||||
bool isLogImage;
|
||||
float waitbeforepicture;
|
||||
framesize_t ImageSize;
|
||||
bool isImageSize;
|
||||
|
||||
@@ -335,25 +335,23 @@ bool ClassFlowPostProcessing::doFlow(string zwtime)
|
||||
Value = std::stof(zw);
|
||||
if (checkDigitIncreaseConsistency)
|
||||
{
|
||||
// Value = checkDigitConsistency(Value, DecimalShift, isanalog);
|
||||
Value = checkDigitConsistency(Value, DecimalShift, isanalog);
|
||||
}
|
||||
|
||||
zwvalue = RundeOutput(Value, AnzahlAnalog - DecimalShift);
|
||||
|
||||
if ((!AllowNegativeRates) && (Value < PreValue))
|
||||
{
|
||||
error = "Negative Rate - Returned old value - read value: " + zwvalue;
|
||||
error = error + "Negative Rate - Returned old value - read value: " + zwvalue + " ";
|
||||
Value = PreValue;
|
||||
zwvalue = RundeOutput(Value, AnzahlAnalog - DecimalShift);
|
||||
}
|
||||
else
|
||||
|
||||
if (useMaxRateValue && (abs(Value - PreValue) > MaxRateValue))
|
||||
{
|
||||
if (useMaxRateValue && (abs(Value - PreValue) > MaxRateValue))
|
||||
{
|
||||
error = "Rate too high - Returned old value - read value: " + zwvalue;
|
||||
Value = PreValue;
|
||||
zwvalue = RundeOutput(Value, AnzahlAnalog - DecimalShift);
|
||||
}
|
||||
error = error + "Rate too high - Returned old value - read value: " + zwvalue + " ";
|
||||
Value = PreValue;
|
||||
zwvalue = RundeOutput(Value, AnzahlAnalog - DecimalShift);
|
||||
}
|
||||
|
||||
ReturnValueNoError = zwvalue;
|
||||
@@ -426,7 +424,7 @@ float ClassFlowPostProcessing::checkDigitConsistency(float input, int _decilamsh
|
||||
float zw;
|
||||
|
||||
pot = _decilamshift;
|
||||
if (!_isanalog) // falls es keine analogwerte gibt, kann die letzte nicht bewerte werden
|
||||
if (!_isanalog) // falls es keine analogwerte gibt, kann die letzte nicht bewertet werden
|
||||
{
|
||||
pot++;
|
||||
}
|
||||
|
||||
@@ -1,8 +1,14 @@
|
||||
//#pragma warning(disable : 4996)
|
||||
|
||||
#include "Helper.h"
|
||||
#include <sys/types.h>
|
||||
#include <sys/stat.h>
|
||||
#include <dirent.h>
|
||||
#include <string.h>
|
||||
#include <esp_log.h>
|
||||
|
||||
//#define ISWINDOWS_TRUE
|
||||
#define PATH_MAX_STRING_SIZE 256
|
||||
|
||||
using namespace std;
|
||||
|
||||
@@ -159,6 +165,63 @@ string getFileType(string filename)
|
||||
return zw;
|
||||
}
|
||||
|
||||
/* recursive mkdir */
|
||||
int mkdir_r(const char *dir, const mode_t mode) {
|
||||
char tmp[PATH_MAX_STRING_SIZE];
|
||||
char *p = NULL;
|
||||
struct stat sb;
|
||||
size_t len;
|
||||
|
||||
/* copy path */
|
||||
len = strnlen (dir, PATH_MAX_STRING_SIZE);
|
||||
if (len == 0 || len == PATH_MAX_STRING_SIZE) {
|
||||
return -1;
|
||||
}
|
||||
memcpy (tmp, dir, len);
|
||||
tmp[len] = '\0';
|
||||
|
||||
/* remove trailing slash */
|
||||
if(tmp[len - 1] == '/') {
|
||||
tmp[len - 1] = '\0';
|
||||
}
|
||||
|
||||
/* check if path exists and is a directory */
|
||||
if (stat (tmp, &sb) == 0) {
|
||||
if (S_ISDIR (sb.st_mode)) {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
/* recursive mkdir */
|
||||
for(p = tmp + 1; *p; p++) {
|
||||
if(*p == '/') {
|
||||
*p = 0;
|
||||
/* test path */
|
||||
if (stat(tmp, &sb) != 0) {
|
||||
/* path does not exist - create directory */
|
||||
if (mkdir(tmp, mode) < 0) {
|
||||
return -1;
|
||||
}
|
||||
} else if (!S_ISDIR(sb.st_mode)) {
|
||||
/* not a directory */
|
||||
return -1;
|
||||
}
|
||||
*p = '/';
|
||||
}
|
||||
}
|
||||
/* test path */
|
||||
if (stat(tmp, &sb) != 0) {
|
||||
/* path does not exist - create directory */
|
||||
if (mkdir(tmp, mode) < 0) {
|
||||
return -1;
|
||||
}
|
||||
} else if (!S_ISDIR(sb.st_mode)) {
|
||||
/* not a directory */
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
string toUpper(string in)
|
||||
{
|
||||
for (int i = 0; i < in.length(); ++i)
|
||||
@@ -173,3 +236,42 @@ float temperatureRead()
|
||||
{
|
||||
return (temprature_sens_read() - 32) / 1.8;
|
||||
}
|
||||
|
||||
time_t addDays(time_t startTime, int days) {
|
||||
struct tm* tm = localtime(&startTime);
|
||||
tm->tm_mday += days;
|
||||
return mktime(tm);
|
||||
}
|
||||
|
||||
int removeFolder(const char* folderPath, const char* logTag) {
|
||||
ESP_LOGI(logTag, "Delete folder %s", folderPath);
|
||||
|
||||
DIR *dir = opendir(folderPath);
|
||||
if (!dir) {
|
||||
ESP_LOGI(logTag, "Failed to stat dir : %s", folderPath);
|
||||
return -1;
|
||||
}
|
||||
|
||||
struct dirent *entry;
|
||||
int deleted = 0;
|
||||
while ((entry = readdir(dir)) != NULL) {
|
||||
std::string path = string(folderPath) + "/" + entry->d_name;
|
||||
if (entry->d_type == DT_REG) {
|
||||
if (unlink(path.c_str()) == 0) {
|
||||
deleted ++;
|
||||
} else {
|
||||
ESP_LOGE(logTag, "can't delete file : %s", path.c_str());
|
||||
}
|
||||
} else if (entry->d_type == DT_DIR) {
|
||||
deleted += removeFolder(path.c_str(), logTag);
|
||||
}
|
||||
}
|
||||
|
||||
closedir(dir);
|
||||
if (rmdir(folderPath) != 0) {
|
||||
ESP_LOGE(logTag, "can't delete file : %s", folderPath);
|
||||
}
|
||||
ESP_LOGI(logTag, "%d older log files in folder %s deleted.", deleted, folderPath);
|
||||
|
||||
return deleted;
|
||||
}
|
||||
|
||||
@@ -17,6 +17,11 @@ bool ctype_space(const char c, string adddelimiter);
|
||||
|
||||
string getFileType(string filename);
|
||||
|
||||
int mkdir_r(const char *dir, const mode_t mode);
|
||||
int removeFolder(const char* folderPath, const char* logTag);
|
||||
|
||||
string toUpper(string in);
|
||||
|
||||
float temperatureRead();
|
||||
|
||||
time_t addDays(time_t startTime, int days);
|
||||
|
||||
@@ -391,11 +391,11 @@ CImageBasis::CImageBasis(std::string _image)
|
||||
channels = 3;
|
||||
externalImage = false;
|
||||
filename = _image;
|
||||
long freebefore = esp_get_free_heap_size();
|
||||
// long freebefore = esp_get_free_heap_size();
|
||||
|
||||
rgb_image = stbi_load(_image.c_str(), &width, &height, &bpp, channels);
|
||||
if (rgb_image == NULL)
|
||||
LogFile.WriteToFile("Image Load failed:" + _image + " FreeHeapSize before: " + to_string(freebefore) + " after: " + to_string(esp_get_free_heap_size()));
|
||||
// if (rgb_image == NULL)
|
||||
// LogFile.WriteToFile("Image Load failed:" + _image + " FreeHeapSize before: " + to_string(freebefore) + " after: " + to_string(esp_get_free_heap_size()));
|
||||
// printf("CImageBasis after load\n");
|
||||
// printf("w %d, h %d, b %d, c %d", this->width, this->height, this->bpp, this->channels);
|
||||
}
|
||||
|
||||
@@ -1,7 +1,14 @@
|
||||
#include "ClassLogFile.h"
|
||||
#include "time_sntp.h"
|
||||
#include <string.h>
|
||||
#include <sys/types.h>
|
||||
#include <sys/stat.h>
|
||||
#include <dirent.h>
|
||||
#include "Helper.h"
|
||||
|
||||
ClassLogFile LogFile("/sdcard/log.txt");
|
||||
static const char *TAG = "log";
|
||||
|
||||
ClassLogFile LogFile("/sdcard/log/message", "log_%Y-%m-%d.txt");
|
||||
|
||||
void ClassLogFile::WriteToDedicatedFile(std::string _fn, std::string info, bool _time)
|
||||
{
|
||||
@@ -13,39 +20,111 @@ void ClassLogFile::WriteToDedicatedFile(std::string _fn, std::string info, bool
|
||||
}
|
||||
|
||||
pFile = fopen(_fn.c_str(), "a+");
|
||||
if (pFile!=NULL) {
|
||||
if (_time)
|
||||
{
|
||||
time_t rawtime;
|
||||
struct tm* timeinfo;
|
||||
char buffer[80];
|
||||
|
||||
if (_time)
|
||||
{
|
||||
time_t rawtime;
|
||||
struct tm* timeinfo;
|
||||
char buffer[80];
|
||||
time(&rawtime);
|
||||
timeinfo = localtime(&rawtime);
|
||||
|
||||
time(&rawtime);
|
||||
timeinfo = localtime(&rawtime);
|
||||
strftime(buffer, 80, "%Y-%m-%d_%H-%M-%S", timeinfo);
|
||||
|
||||
strftime(buffer, 80, "%Y-%m-%d_%H-%M-%S", timeinfo);
|
||||
zwtime = std::string(buffer);
|
||||
info = zwtime + ": " + info;
|
||||
}
|
||||
fputs(info.c_str(), pFile);
|
||||
fputs("\n", pFile);
|
||||
|
||||
zwtime = std::string(buffer);
|
||||
info = zwtime + ": " + info;
|
||||
fclose(pFile);
|
||||
} else {
|
||||
ESP_LOGI(TAG, "Can't open log file %s", _fn.c_str());
|
||||
}
|
||||
fputs(info.c_str(), pFile);
|
||||
fputs("\n", pFile);
|
||||
|
||||
fclose(pFile);
|
||||
}
|
||||
|
||||
void ClassLogFile::SwitchOnOff(bool _doLogFile){
|
||||
doLogFile = _doLogFile;
|
||||
};
|
||||
|
||||
void ClassLogFile::SetRetention(unsigned short _retentionInDays){
|
||||
retentionInDays = _retentionInDays;
|
||||
};
|
||||
|
||||
void ClassLogFile::WriteToFile(std::string info, bool _time)
|
||||
{
|
||||
WriteToDedicatedFile(logfile, info, _time);
|
||||
struct stat path_stat;
|
||||
if (stat(logroot.c_str(), &path_stat) != 0) {
|
||||
ESP_LOGI(TAG, "Create log folder: %s", logroot.c_str());
|
||||
if (mkdir_r(logroot.c_str(), S_IRWXU) == -1) {
|
||||
ESP_LOGI(TAG, "Can't create log foolder");
|
||||
}
|
||||
}
|
||||
|
||||
time_t rawtime;
|
||||
struct tm* timeinfo;
|
||||
char buffer[30];
|
||||
|
||||
time(&rawtime);
|
||||
timeinfo = localtime(&rawtime);
|
||||
|
||||
strftime(buffer, 30, logfile.c_str(), timeinfo);
|
||||
std::string logpath = logroot + "/" + buffer;
|
||||
|
||||
WriteToDedicatedFile(logpath, info, _time);
|
||||
}
|
||||
|
||||
ClassLogFile::ClassLogFile(std::string _logfile)
|
||||
void ClassLogFile::RemoveOld()
|
||||
{
|
||||
logfile = _logfile;
|
||||
if (retentionInDays == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
time_t rawtime;
|
||||
struct tm* timeinfo;
|
||||
char cmpfilename[30];
|
||||
|
||||
time(&rawtime);
|
||||
rawtime = addDays(rawtime, -retentionInDays);
|
||||
timeinfo = localtime(&rawtime);
|
||||
|
||||
strftime(cmpfilename, 30, logfile.c_str(), timeinfo);
|
||||
//ESP_LOGE(TAG, "log file name to compare: %s", cmpfilename);
|
||||
|
||||
DIR *dir = opendir(logroot.c_str());
|
||||
if (!dir) {
|
||||
ESP_LOGI(TAG, "Failed to stat dir : %s", logroot.c_str());
|
||||
return;
|
||||
}
|
||||
|
||||
struct dirent *entry;
|
||||
int deleted = 0;
|
||||
int notDeleted = 0;
|
||||
while ((entry = readdir(dir)) != NULL) {
|
||||
if (entry->d_type == DT_REG) {
|
||||
//ESP_LOGI(TAG, "list log file : %s %s", entry->d_name, cmpfilename);
|
||||
if ((strlen(entry->d_name) == strlen(cmpfilename)) && (strcmp(entry->d_name, cmpfilename) < 0)) {
|
||||
ESP_LOGI(TAG, "delete log file : %s", entry->d_name);
|
||||
std::string filepath = logroot + "/" + entry->d_name;
|
||||
if (unlink(filepath.c_str()) == 0) {
|
||||
deleted ++;
|
||||
} else {
|
||||
ESP_LOGE(TAG, "can't delete file : %s", entry->d_name);
|
||||
}
|
||||
} else {
|
||||
notDeleted ++;
|
||||
}
|
||||
}
|
||||
}
|
||||
ESP_LOGI(TAG, "%d older log files deleted. %d current log files not deleted.", deleted, notDeleted);
|
||||
closedir(dir);
|
||||
}
|
||||
|
||||
ClassLogFile::ClassLogFile(std::string _logroot, std::string _logfile)
|
||||
{
|
||||
logroot = _logroot;
|
||||
logfile = _logfile;
|
||||
doLogFile = true;
|
||||
}
|
||||
retentionInDays = 10;
|
||||
}
|
||||
|
||||
@@ -5,15 +5,19 @@
|
||||
class ClassLogFile
|
||||
{
|
||||
private:
|
||||
std::string logroot;
|
||||
std::string logfile;
|
||||
bool doLogFile;
|
||||
unsigned short retentionInDays;
|
||||
public:
|
||||
ClassLogFile(std::string _logfile);
|
||||
ClassLogFile(std::string _logpath, std::string _logfile);
|
||||
|
||||
void SwitchOnOff(bool _doLogFile);
|
||||
void SetRetention(unsigned short _retentionInDays);
|
||||
|
||||
void WriteToFile(std::string info, bool _time = true);
|
||||
void WriteToDedicatedFile(std::string _fn, std::string info, bool _time = true);
|
||||
void RemoveOld();
|
||||
};
|
||||
|
||||
extern ClassLogFile LogFile;
|
||||
82
code/lib/jomjol_mqtt/interface_mqtt.cpp
Normal file
82
code/lib/jomjol_mqtt/interface_mqtt.cpp
Normal file
@@ -0,0 +1,82 @@
|
||||
#include "interface_mqtt.h"
|
||||
|
||||
|
||||
#include "esp_log.h"
|
||||
#include "mqtt_client.h"
|
||||
#include "ClassLogFile.h"
|
||||
|
||||
static const char *TAG = "interface_mqtt";
|
||||
|
||||
bool debugdetail = true;
|
||||
|
||||
// #define CONFIG_BROKER_URL "mqtt://192.168.178.43:1883"
|
||||
|
||||
esp_mqtt_event_id_t esp_mmqtt_ID = MQTT_EVENT_ANY;
|
||||
|
||||
bool mqtt_connected = false;
|
||||
esp_mqtt_client_handle_t client = NULL;
|
||||
|
||||
void MQTTPublish(std::string _key, std::string _content){
|
||||
if (client && mqtt_connected) {
|
||||
int msg_id;
|
||||
std::string zw;
|
||||
msg_id = esp_mqtt_client_publish(client, _key.c_str(), _content.c_str(), 0, 1, 0);
|
||||
zw = "sent publish successful in MQTTPublish, msg_id=" + std::to_string(msg_id) + ", " + _key + ", " + _content;
|
||||
if (debugdetail) LogFile.WriteToFile(zw);
|
||||
ESP_LOGI(TAG, "sent publish successful in MQTTPublish, msg_id=%d, %s, %s", msg_id, _key.c_str(), _content.c_str());
|
||||
}
|
||||
else {
|
||||
ESP_LOGI(TAG, "Problem with Publish, client=%d, mqtt_connected %d", (int) client, (int) mqtt_connected);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static esp_err_t mqtt_event_handler_cb(esp_mqtt_event_handle_t event)
|
||||
{
|
||||
switch (event->event_id) {
|
||||
case MQTT_EVENT_CONNECTED:
|
||||
ESP_LOGI(TAG, "MQTT_EVENT_CONNECTED");
|
||||
mqtt_connected = true;
|
||||
break;
|
||||
case MQTT_EVENT_DISCONNECTED:
|
||||
ESP_LOGI(TAG, "MQTT_EVENT_DISCONNECTED");
|
||||
break;
|
||||
case MQTT_EVENT_PUBLISHED:
|
||||
ESP_LOGI(TAG, "MQTT_EVENT_PUBLISHED, msg_id=%d", event->msg_id);
|
||||
break;
|
||||
case MQTT_EVENT_DATA:
|
||||
ESP_LOGI(TAG, "MQTT_EVENT_DATA");
|
||||
printf("TOPIC=%.*s\r\n", event->topic_len, event->topic);
|
||||
printf("DATA=%.*s\r\n", event->data_len, event->data);
|
||||
break;
|
||||
case MQTT_EVENT_ERROR:
|
||||
ESP_LOGI(TAG, "MQTT_EVENT_ERROR");
|
||||
break;
|
||||
default:
|
||||
ESP_LOGI(TAG, "Other event id:%d", event->event_id);
|
||||
break;
|
||||
}
|
||||
return ESP_OK;
|
||||
}
|
||||
|
||||
static void mqtt_event_handler(void *handler_args, esp_event_base_t base, int32_t event_id, void *event_data) {
|
||||
ESP_LOGD(TAG, "Event dispatched from event loop base=%s, event_id=%d", base, event_id);
|
||||
mqtt_event_handler_cb((esp_mqtt_event_handle_t) event_data);
|
||||
}
|
||||
|
||||
void MQTTInit(std::string _mqttURI, std::string _clientid, std::string _user, std::string _password){
|
||||
esp_mqtt_client_config_t mqtt_cfg = {
|
||||
.uri = _mqttURI.c_str(),
|
||||
.client_id = _clientid.c_str(),
|
||||
};
|
||||
|
||||
if (_user.length() && _password.length()){
|
||||
mqtt_cfg.username = _user.c_str();
|
||||
mqtt_cfg.password = _password.c_str();
|
||||
printf("Connect to MQTT: %s, %s", mqtt_cfg.username, mqtt_cfg.password);
|
||||
};
|
||||
|
||||
client = esp_mqtt_client_init(&mqtt_cfg);
|
||||
esp_mqtt_client_register_event(client, esp_mmqtt_ID, mqtt_event_handler, client);
|
||||
esp_mqtt_client_start(client);
|
||||
}
|
||||
4
code/lib/jomjol_mqtt/interface_mqtt.h
Normal file
4
code/lib/jomjol_mqtt/interface_mqtt.h
Normal file
@@ -0,0 +1,4 @@
|
||||
#include <string>
|
||||
|
||||
void MQTTInit(std::string _mqttURI, std::string _clientid, std::string _user = "", std::string _password = "");
|
||||
void MQTTPublish(std::string _key, std::string _content);
|
||||
@@ -2,8 +2,12 @@
|
||||
|
||||
#include "bitmap_image.hpp"
|
||||
|
||||
#include "ClassLogFile.h"
|
||||
|
||||
#include <sys/stat.h>
|
||||
|
||||
bool debugdetailtflite = false;
|
||||
|
||||
float CTfLiteClass::GetOutputValue(int nr)
|
||||
{
|
||||
TfLiteTensor* output2 = this->interpreter->output(0);
|
||||
@@ -109,7 +113,11 @@ void CTfLiteClass::Invoke()
|
||||
|
||||
bool CTfLiteClass::LoadInputImage(std::string _fn)
|
||||
{
|
||||
std::string zw = "ClassFlowAnalog::doNeuralNetwork nach Load Image: " + _fn;
|
||||
// LogFile.WriteToFile(zw);
|
||||
bitmap_image image(_fn);
|
||||
if (debugdetailtflite) LogFile.WriteToFile(zw);
|
||||
|
||||
unsigned int w = image.width();
|
||||
unsigned int h = image.height();
|
||||
unsigned char red, green, blue;
|
||||
@@ -135,13 +143,17 @@ bool CTfLiteClass::LoadInputImage(std::string _fn)
|
||||
// printf("BMP: %f %f %f\n", (float) red, (float) green, (float) blue);
|
||||
|
||||
}
|
||||
|
||||
if (debugdetailtflite) LogFile.WriteToFile("Nach dem Laden in input");
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
void CTfLiteClass::MakeAllocate()
|
||||
{
|
||||
static tflite::ops::micro::AllOpsResolver resolver;
|
||||
// static tflite::ops::micro::AllOpsResolver resolver;
|
||||
static tflite::AllOpsResolver resolver;
|
||||
this->interpreter = new tflite::MicroInterpreter(this->model, resolver, this->tensor_arena, this->kTensorArenaSize, this->error_reporter);
|
||||
|
||||
TfLiteStatus allocate_status = this->interpreter->AllocateTensors();
|
||||
@@ -232,6 +244,8 @@ CTfLiteClass::CTfLiteClass()
|
||||
CTfLiteClass::~CTfLiteClass()
|
||||
{
|
||||
delete this->tensor_arena;
|
||||
delete this->interpreter;
|
||||
delete this->error_reporter;
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
exit(1); \
|
||||
}
|
||||
|
||||
#include "tensorflow/lite/micro/kernels/all_ops_resolver.h"
|
||||
#include "tensorflow/lite/micro/all_ops_resolver.h"
|
||||
#include "tensorflow/lite/micro/micro_error_reporter.h"
|
||||
#include "tensorflow/lite/micro/micro_interpreter.h"
|
||||
#include "tensorflow/lite/schema/schema_generated.h"
|
||||
@@ -39,7 +39,8 @@ class CTfLiteClass
|
||||
const tflite::Model* model;
|
||||
tflite::MicroInterpreter* interpreter;
|
||||
TfLiteTensor* output = nullptr;
|
||||
static tflite::ops::micro::AllOpsResolver *resolver;
|
||||
// static tflite::ops::micro::AllOpsResolver *resolver;
|
||||
static tflite::AllOpsResolver resolver;
|
||||
|
||||
int kTensorArenaSize;
|
||||
uint8_t *tensor_arena;
|
||||
|
||||
@@ -75,24 +75,18 @@ void setup_time(void)
|
||||
time(&now);
|
||||
localtime_r(&now, &timeinfo);
|
||||
// Is time set? If not, tm_year will be (1970 - 1900).
|
||||
if (timeinfo.tm_year < (2016 - 1900)) {
|
||||
if ((timeinfo.tm_year < (2016 - 1900)) || setTimeAlwaysOnReboot) {
|
||||
ESP_LOGI(TAG, "Time is not set yet. Connecting to WiFi and getting time over NTP.");
|
||||
initialize_sntp();
|
||||
obtain_time();
|
||||
// update 'now' variable with current time
|
||||
time(&now);
|
||||
}
|
||||
else
|
||||
{
|
||||
if (setTimeAlwaysOnReboot)
|
||||
{
|
||||
obtain_time();
|
||||
}
|
||||
}
|
||||
char strftime_buf[64];
|
||||
|
||||
// Set timezone to Berlin Standard Time
|
||||
setenv("TZ", "UTC+9", 1);
|
||||
// setenv("TZ", "Europe/Berlin", 1);
|
||||
tzset();
|
||||
localtime_r(&now, &timeinfo);
|
||||
strftime(strftime_buf, sizeof(strftime_buf), "%c", &timeinfo);
|
||||
@@ -130,4 +124,4 @@ static void initialize_sntp(void)
|
||||
sntp_setservername(0, "pool.ntp.org");
|
||||
sntp_set_time_sync_notification_cb(time_sync_notification_cb);
|
||||
sntp_init();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -23,8 +23,8 @@ if(NOT DEFINED ENV{IDF_PATH})
|
||||
endif()
|
||||
|
||||
idf_component_register(
|
||||
SRCS tensorflow/lite/micro/simple_memory_allocator.cc tensorflow/lite/micro/micro_error_reporter.cc tensorflow/lite/micro/memory_helpers.cc tensorflow/lite/micro/test_helpers.cc tensorflow/lite/micro/micro_utils.cc tensorflow/lite/micro/micro_time.cc tensorflow/lite/micro/debug_log.cc tensorflow/lite/micro/micro_string.cc tensorflow/lite/micro/micro_optional_debug_tools.cc tensorflow/lite/micro/micro_interpreter.cc tensorflow/lite/micro/micro_allocator.cc tensorflow/lite/micro/kernels/comparisons.cc tensorflow/lite/micro/kernels/fully_connected.cc tensorflow/lite/micro/kernels/depthwise_conv.cc tensorflow/lite/micro/kernels/logistic.cc tensorflow/lite/micro/kernels/pooling.cc tensorflow/lite/micro/kernels/prelu.cc tensorflow/lite/micro/kernels/concatenation.cc tensorflow/lite/micro/kernels/dequantize.cc tensorflow/lite/micro/kernels/pad.cc tensorflow/lite/micro/kernels/l2norm.cc tensorflow/lite/micro/kernels/resize_nearest_neighbor.cc tensorflow/lite/micro/kernels/activations.cc tensorflow/lite/micro/kernels/ceil.cc tensorflow/lite/micro/kernels/arg_min_max.cc tensorflow/lite/micro/kernels/reduce.cc tensorflow/lite/micro/kernels/unpack.cc tensorflow/lite/micro/kernels/add.cc tensorflow/lite/micro/kernels/split.cc tensorflow/lite/micro/kernels/circular_buffer.cc tensorflow/lite/micro/kernels/softmax.cc tensorflow/lite/micro/kernels/floor.cc tensorflow/lite/micro/kernels/sub.cc tensorflow/lite/micro/kernels/mul.cc tensorflow/lite/micro/kernels/conv.cc tensorflow/lite/micro/kernels/neg.cc tensorflow/lite/micro/kernels/quantize.cc tensorflow/lite/micro/kernels/elementwise.cc tensorflow/lite/micro/kernels/all_ops_resolver.cc tensorflow/lite/micro/kernels/svdf.cc tensorflow/lite/micro/kernels/maximum_minimum.cc tensorflow/lite/micro/kernels/reshape.cc tensorflow/lite/micro/kernels/strided_slice.cc tensorflow/lite/micro/kernels/round.cc tensorflow/lite/micro/kernels/pack.cc tensorflow/lite/micro/kernels/logical.cc tensorflow/lite/micro/memory_planner/linear_memory_planner.cc tensorflow/lite/micro/memory_planner/greedy_memory_planner.cc tensorflow/lite/c/common.c tensorflow/lite/core/api/error_reporter.cc tensorflow/lite/core/api/flatbuffer_conversions.cc tensorflow/lite/core/api/op_resolver.cc tensorflow/lite/core/api/tensor_utils.cc tensorflow/lite/kernels/internal/quantization_util.cc tensorflow/lite/kernels/kernel_util.cc tensorflow/lite/micro/testing/test_utils.cc
|
||||
INCLUDE_DIRS . third_party/gemmlowp third_party/flatbuffers/include third_party/ruy third_party/kissfft)
|
||||
SRCS tensorflow/lite/micro/micro_error_reporter.cc tensorflow/lite/micro/simple_memory_allocator.cc tensorflow/lite/micro/memory_helpers.cc tensorflow/lite/micro/test_helpers.cc tensorflow/lite/micro/recording_micro_allocator.cc tensorflow/lite/micro/micro_time.cc tensorflow/lite/micro/recording_simple_memory_allocator.cc tensorflow/lite/micro/micro_string.cc tensorflow/lite/micro/micro_profiler.cc tensorflow/lite/micro/debug_log.cc tensorflow/lite/micro/all_ops_resolver.cc tensorflow/lite/micro/micro_utils.cc tensorflow/lite/micro/micro_interpreter.cc tensorflow/lite/micro/micro_allocator.cc tensorflow/lite/micro/benchmarks/keyword_scrambled_model_data.cc tensorflow/lite/micro/memory_planner/linear_memory_planner.cc tensorflow/lite/micro/memory_planner/greedy_memory_planner.cc tensorflow/lite/micro/testing/test_conv_model.cc tensorflow/lite/c/common.c tensorflow/lite/core/api/error_reporter.cc tensorflow/lite/core/api/flatbuffer_conversions.cc tensorflow/lite/core/api/op_resolver.cc tensorflow/lite/core/api/tensor_utils.cc tensorflow/lite/kernels/internal/quantization_util.cc tensorflow/lite/kernels/kernel_util.cc tensorflow/lite/schema/schema_utils.cc tensorflow/lite/micro/kernels/prelu.cc tensorflow/lite/micro/kernels/dequantize.cc tensorflow/lite/micro/kernels/pad.cc tensorflow/lite/micro/kernels/shape.cc tensorflow/lite/micro/kernels/l2norm.cc tensorflow/lite/micro/kernels/tanh.cc tensorflow/lite/micro/kernels/resize_nearest_neighbor.cc tensorflow/lite/micro/kernels/logical.cc tensorflow/lite/micro/kernels/kernel_util.cc tensorflow/lite/micro/kernels/ceil.cc tensorflow/lite/micro/kernels/arg_min_max.cc tensorflow/lite/micro/kernels/softmax.cc tensorflow/lite/micro/kernels/sub.cc tensorflow/lite/micro/kernels/add.cc tensorflow/lite/micro/kernels/floor.cc tensorflow/lite/micro/kernels/kernel_runner.cc tensorflow/lite/micro/kernels/split_v.cc tensorflow/lite/micro/kernels/hard_swish.cc tensorflow/lite/micro/kernels/pooling.cc tensorflow/lite/micro/kernels/concatenation.cc tensorflow/lite/micro/kernels/mul.cc tensorflow/lite/micro/kernels/unpack.cc tensorflow/lite/micro/kernels/round.cc tensorflow/lite/micro/kernels/quantize.cc tensorflow/lite/micro/kernels/ethosu.cc tensorflow/lite/micro/kernels/svdf.cc tensorflow/lite/micro/kernels/maximum_minimum.cc tensorflow/lite/micro/kernels/reshape.cc tensorflow/lite/micro/kernels/reduce.cc tensorflow/lite/micro/kernels/strided_slice.cc tensorflow/lite/micro/kernels/neg.cc tensorflow/lite/micro/kernels/pack.cc tensorflow/lite/micro/kernels/elementwise.cc tensorflow/lite/micro/kernels/comparisons.cc tensorflow/lite/micro/kernels/fully_connected.cc tensorflow/lite/micro/kernels/depthwise_conv.cc tensorflow/lite/micro/kernels/split.cc tensorflow/lite/micro/kernels/logistic.cc tensorflow/lite/micro/kernels/circular_buffer.cc tensorflow/lite/micro/kernels/conv.cc tensorflow/lite/micro/kernels/activations.cc
|
||||
INCLUDE_DIRS . third_party/gemmlowp third_party/flatbuffers/include third_party/ruy)
|
||||
|
||||
# Reduce the level of paranoia to be able to compile TF sources
|
||||
target_compile_options(${COMPONENT_LIB} PRIVATE
|
||||
@@ -32,6 +32,7 @@ target_compile_options(${COMPONENT_LIB} PRIVATE
|
||||
-Wno-missing-field-initializers
|
||||
-Wno-type-limits)
|
||||
|
||||
target_compile_options(${COMPONENT_LIB} PRIVATE -std=c11 -DTF_LITE_STATIC_MEMORY -O3 -Wno-nonnull -Wno-nonnull -Wno-nonnull -Wno-nonnull)
|
||||
target_compile_options(${COMPONENT_LIB} PRIVATE $<$<COMPILE_LANGUAGE:CXX>: -std=c++11 -DTF_LITE_STATIC_MEMORY -O3 -Wno-return-type -Wno-strict-aliasing -Wno-ignored-qualifiers -Wno-return-type -Wno-strict-aliasing -Wno-ignored-qualifiers -Wno-return-type -Wno-strict-aliasing -Wno-return-type -Wno-strict-aliasing >)
|
||||
target_compile_options(${COMPONENT_LIB} PRIVATE -fno-unwind-tables -ffunction-sections -fdata-sections -fmessage-length=0 -DTF_LITE_STATIC_MEMORY -DTF_LITE_DISABLE_X86_NEON -O3 -Werror -Wsign-compare -Wdouble-promotion -Wshadow -Wunused-variable -Wmissing-field-initializers -Wunused-function -Wswitch -Wvla -Wall -Wextra -Wstrict-aliasing -Wno-unused-parameter)
|
||||
target_compile_options(${COMPONENT_LIB} PRIVATE $<$<COMPILE_LANGUAGE:CXX>: -std=c++11 -fno-rtti -fno-exceptions -fno-threadsafe-statics -fno-unwind-tables -ffunction-sections -fdata-sections -fmessage-length=0 -DTF_LITE_STATIC_MEMORY -DTF_LITE_DISABLE_X86_NEON -O3 -Werror -Wsign-compare -Wdouble-promotion -Wshadow -Wunused-variable -Wmissing-field-initializers -Wunused-function -Wswitch -Wvla -Wall -Wextra -Wstrict-aliasing -Wno-unused-parameter >)
|
||||
target_compile_options(${COMPONENT_LIB} INTERFACE $<$<IN_LIST:-DTF_LITE_STATIC_MEMORY,$<TARGET_PROPERTY:${COMPONENT_LIB},COMPILE_OPTIONS>>:-DTF_LITE_STATIC_MEMORY>)
|
||||
target_link_libraries(${COMPONENT_LIB} PRIVATE -lm)
|
||||
|
||||
331
code/lib/tfmicro/fixedpoint/fixedpoint_neon.h
Normal file
331
code/lib/tfmicro/fixedpoint/fixedpoint_neon.h
Normal file
@@ -0,0 +1,331 @@
|
||||
// Copyright 2015 The Gemmlowp Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// fixedpoint_neon.h: optimized NEON specializations of the templates
|
||||
// in fixedpoint.h.
|
||||
|
||||
#ifndef GEMMLOWP_INTERNAL_FIXEDPOINT_NEON_H_
|
||||
#define GEMMLOWP_INTERNAL_FIXEDPOINT_NEON_H_
|
||||
|
||||
#include <arm_neon.h>
|
||||
|
||||
namespace gemmlowp {
|
||||
|
||||
template <>
|
||||
struct FixedPointRawTypeTraits<int32x4_t> {
|
||||
typedef std::int32_t ScalarRawType;
|
||||
static constexpr int kLanes = 4;
|
||||
};
|
||||
|
||||
template <>
|
||||
struct FixedPointRawTypeTraits<int16x8_t> {
|
||||
typedef std::int16_t ScalarRawType;
|
||||
static constexpr int kLanes = 8;
|
||||
};
|
||||
|
||||
template <>
|
||||
inline int32x4_t BitAnd(int32x4_t a, int32x4_t b) {
|
||||
return vandq_s32(a, b);
|
||||
}
|
||||
|
||||
template <>
|
||||
inline int16x8_t BitAnd(int16x8_t a, int16x8_t b) {
|
||||
return vandq_s16(a, b);
|
||||
}
|
||||
|
||||
template <>
|
||||
inline int32x4_t BitOr(int32x4_t a, int32x4_t b) {
|
||||
return vorrq_s32(a, b);
|
||||
}
|
||||
|
||||
template <>
|
||||
inline int16x8_t BitOr(int16x8_t a, int16x8_t b) {
|
||||
return vorrq_s16(a, b);
|
||||
}
|
||||
|
||||
template <>
|
||||
inline int32x4_t BitXor(int32x4_t a, int32x4_t b) {
|
||||
return veorq_s32(a, b);
|
||||
}
|
||||
|
||||
template <>
|
||||
inline int16x8_t BitXor(int16x8_t a, int16x8_t b) {
|
||||
return veorq_s16(a, b);
|
||||
}
|
||||
|
||||
template <>
|
||||
inline int32x4_t BitNot(int32x4_t a) {
|
||||
return veorq_s32(a, vdupq_n_s32(-1));
|
||||
}
|
||||
|
||||
template <>
|
||||
inline int16x8_t BitNot(int16x8_t a) {
|
||||
return veorq_s16(a, vdupq_n_s16(-1));
|
||||
}
|
||||
|
||||
template <>
|
||||
inline int32x4_t Add(int32x4_t a, int32x4_t b) {
|
||||
return vaddq_s32(a, b);
|
||||
}
|
||||
|
||||
template <>
|
||||
inline int16x8_t Add(int16x8_t a, int16x8_t b) {
|
||||
return vaddq_s16(a, b);
|
||||
}
|
||||
|
||||
template <>
|
||||
inline int32x4_t Sub(int32x4_t a, int32x4_t b) {
|
||||
return vsubq_s32(a, b);
|
||||
}
|
||||
|
||||
template <>
|
||||
inline int16x8_t Sub(int16x8_t a, int16x8_t b) {
|
||||
return vsubq_s16(a, b);
|
||||
}
|
||||
|
||||
template <>
|
||||
inline int32x4_t Neg(int32x4_t a) {
|
||||
return vnegq_s32(a);
|
||||
}
|
||||
|
||||
template <>
|
||||
inline int16x8_t Neg(int16x8_t a) {
|
||||
return vnegq_s16(a);
|
||||
}
|
||||
|
||||
template <>
|
||||
inline int32x4_t ShiftLeft(int32x4_t a, int offset) {
|
||||
return vshlq_s32(a, vdupq_n_s32(offset));
|
||||
}
|
||||
|
||||
template <>
|
||||
inline int16x8_t ShiftLeft(int16x8_t a, int offset) {
|
||||
return vshlq_s16(a, vdupq_n_s16(offset));
|
||||
}
|
||||
|
||||
template <>
|
||||
inline int32x4_t ShiftRight(int32x4_t a, int offset) {
|
||||
return vshlq_s32(a, vdupq_n_s32(-offset));
|
||||
}
|
||||
|
||||
template <>
|
||||
inline int16x8_t ShiftRight(int16x8_t a, int offset) {
|
||||
return vshlq_s16(a, vdupq_n_s16(-offset));
|
||||
}
|
||||
|
||||
template <>
|
||||
inline int32x4_t SelectUsingMask(int32x4_t if_mask, int32x4_t then_val,
|
||||
int32x4_t else_val) {
|
||||
return vbslq_s32(vreinterpretq_u32_s32(if_mask), then_val, else_val);
|
||||
}
|
||||
|
||||
template <>
|
||||
inline int16x8_t SelectUsingMask(int16x8_t if_mask, int16x8_t then_val,
|
||||
int16x8_t else_val) {
|
||||
return vbslq_s16(vreinterpretq_u16_s16(if_mask), then_val, else_val);
|
||||
}
|
||||
|
||||
template <>
|
||||
inline int32x4_t MaskIfEqual(int32x4_t a, int32x4_t b) {
|
||||
return vreinterpretq_s32_u32(vceqq_s32(a, b));
|
||||
}
|
||||
|
||||
template <>
|
||||
inline int16x8_t MaskIfEqual(int16x8_t a, int16x8_t b) {
|
||||
return vreinterpretq_s16_u16(vceqq_s16(a, b));
|
||||
}
|
||||
|
||||
template <>
|
||||
inline int32x4_t MaskIfNotEqual(int32x4_t a, int32x4_t b) {
|
||||
return BitNot(MaskIfEqual(a, b));
|
||||
}
|
||||
|
||||
template <>
|
||||
inline int16x8_t MaskIfNotEqual(int16x8_t a, int16x8_t b) {
|
||||
return BitNot(MaskIfEqual(a, b));
|
||||
}
|
||||
|
||||
template <>
|
||||
inline int32x4_t MaskIfZero(int32x4_t a) {
|
||||
return MaskIfEqual(a, vdupq_n_s32(0));
|
||||
}
|
||||
|
||||
template <>
|
||||
inline int16x8_t MaskIfZero(int16x8_t a) {
|
||||
return MaskIfEqual(a, vdupq_n_s16(0));
|
||||
}
|
||||
|
||||
template <>
|
||||
inline int32x4_t MaskIfNonZero(int32x4_t a) {
|
||||
return vreinterpretq_s32_u32(vtstq_s32(a, a));
|
||||
}
|
||||
|
||||
template <>
|
||||
inline int16x8_t MaskIfNonZero(int16x8_t a) {
|
||||
return vreinterpretq_s16_u16(vtstq_s16(a, a));
|
||||
}
|
||||
|
||||
template <>
|
||||
inline int32x4_t MaskIfGreaterThan(int32x4_t a, int32x4_t b) {
|
||||
return vreinterpretq_s32_u32(vcgtq_s32(a, b));
|
||||
}
|
||||
|
||||
template <>
|
||||
inline int16x8_t MaskIfGreaterThan(int16x8_t a, int16x8_t b) {
|
||||
return vreinterpretq_s16_u16(vcgtq_s16(a, b));
|
||||
}
|
||||
|
||||
template <>
|
||||
inline int32x4_t MaskIfGreaterThanOrEqual(int32x4_t a, int32x4_t b) {
|
||||
return vreinterpretq_s32_u32(vcgeq_s32(a, b));
|
||||
}
|
||||
|
||||
template <>
|
||||
inline int16x8_t MaskIfGreaterThanOrEqual(int16x8_t a, int16x8_t b) {
|
||||
return vreinterpretq_s16_u16(vcgeq_s16(a, b));
|
||||
}
|
||||
|
||||
template <>
|
||||
inline int32x4_t MaskIfLessThan(int32x4_t a, int32x4_t b) {
|
||||
return vreinterpretq_s32_u32(vcltq_s32(a, b));
|
||||
}
|
||||
|
||||
template <>
|
||||
inline int16x8_t MaskIfLessThan(int16x8_t a, int16x8_t b) {
|
||||
return vreinterpretq_s16_u16(vcltq_s16(a, b));
|
||||
}
|
||||
|
||||
template <>
|
||||
inline int32x4_t MaskIfLessThanOrEqual(int32x4_t a, int32x4_t b) {
|
||||
return vreinterpretq_s32_u32(vcleq_s32(a, b));
|
||||
}
|
||||
|
||||
template <>
|
||||
inline int16x8_t MaskIfLessThanOrEqual(int16x8_t a, int16x8_t b) {
|
||||
return vreinterpretq_s16_u16(vcleq_s16(a, b));
|
||||
}
|
||||
|
||||
template <>
|
||||
inline bool All(int32x4_t a) {
|
||||
a = vandq_s32(a, vextq_s32(a, a, 1));
|
||||
a = vandq_s32(a, vextq_s32(a, a, 2));
|
||||
return vgetq_lane_s32(a, 0);
|
||||
}
|
||||
|
||||
template <>
|
||||
inline bool All(int16x8_t a) {
|
||||
a = vandq_s16(a, vextq_s16(a, a, 1));
|
||||
a = vandq_s16(a, vextq_s16(a, a, 2));
|
||||
a = vandq_s16(a, vextq_s16(a, a, 4));
|
||||
return vgetq_lane_s16(a, 0);
|
||||
}
|
||||
|
||||
template <>
|
||||
inline bool Any(int32x4_t a) {
|
||||
a = vorrq_s32(a, vextq_s32(a, a, 1));
|
||||
a = vorrq_s32(a, vextq_s32(a, a, 2));
|
||||
return vgetq_lane_s32(a, 0);
|
||||
}
|
||||
|
||||
template <>
|
||||
inline bool Any(int16x8_t a) {
|
||||
a = vorrq_s16(a, vextq_s16(a, a, 1));
|
||||
a = vorrq_s16(a, vextq_s16(a, a, 2));
|
||||
a = vorrq_s16(a, vextq_s16(a, a, 4));
|
||||
return vgetq_lane_s16(a, 0);
|
||||
}
|
||||
|
||||
template <>
|
||||
inline int32x4_t RoundingHalfSum(int32x4_t a, int32x4_t b) {
|
||||
return vrhaddq_s32(a, b);
|
||||
}
|
||||
|
||||
template <>
|
||||
inline int16x8_t RoundingHalfSum(int16x8_t a, int16x8_t b) {
|
||||
return vrhaddq_s16(a, b);
|
||||
}
|
||||
|
||||
template <>
|
||||
inline int32x4_t SaturatingRoundingDoublingHighMul(int32x4_t a, int32x4_t b) {
|
||||
return vqrdmulhq_s32(a, b);
|
||||
}
|
||||
|
||||
template <>
|
||||
inline int16x8_t SaturatingRoundingDoublingHighMul(int16x8_t a, int16x8_t b) {
|
||||
return vqrdmulhq_s16(a, b);
|
||||
}
|
||||
|
||||
template <>
|
||||
inline int32x4_t RoundingDivideByPOT(int32x4_t x, int exponent) {
|
||||
const int32x4_t shift_vec = vdupq_n_s32(-exponent);
|
||||
const int32x4_t fixup = vshrq_n_s32(vandq_s32(x, shift_vec), 31);
|
||||
const int32x4_t fixed_up_x = vqaddq_s32(x, fixup);
|
||||
return vrshlq_s32(fixed_up_x, shift_vec);
|
||||
}
|
||||
|
||||
template <>
|
||||
inline int16x8_t RoundingDivideByPOT(int16x8_t x, int exponent) {
|
||||
const int16x8_t shift_vec = vdupq_n_s16(-exponent);
|
||||
const int16x8_t fixup = vshrq_n_s16(vandq_s16(x, shift_vec), 15);
|
||||
const int16x8_t fixed_up_x = vqaddq_s16(x, fixup);
|
||||
return vrshlq_s16(fixed_up_x, shift_vec);
|
||||
}
|
||||
|
||||
template <int Exponent>
|
||||
struct ImplSaturatingRoundingMultiplyByPOT<Exponent, int32x4_t, 1> {
|
||||
static int32x4_t eval(int32x4_t x) { return vqshlq_n_s32(x, Exponent); }
|
||||
};
|
||||
|
||||
template <int Exponent>
|
||||
struct ImplSaturatingRoundingMultiplyByPOT<Exponent, int32x4_t, -1> {
|
||||
static int32x4_t eval(int32x4_t x) {
|
||||
const int32x4_t fixup = vshrq_n_s32(x, 31);
|
||||
const int32x4_t fixed_up_x = vqaddq_s32(x, fixup);
|
||||
return vrshrq_n_s32(fixed_up_x, -Exponent);
|
||||
}
|
||||
};
|
||||
|
||||
template <int Exponent>
|
||||
struct ImplSaturatingRoundingMultiplyByPOT<Exponent, int16x8_t, 1> {
|
||||
static int16x8_t eval(int16x8_t x) { return vqshlq_n_s16(x, Exponent); }
|
||||
};
|
||||
|
||||
template <int Exponent>
|
||||
struct ImplSaturatingRoundingMultiplyByPOT<Exponent, int16x8_t, -1> {
|
||||
static int16x8_t eval(int16x8_t x) {
|
||||
const int16x8_t fixup = vshrq_n_s16(x, 15);
|
||||
const int16x8_t fixed_up_x = vqaddq_s16(x, fixup);
|
||||
return vrshrq_n_s16(fixed_up_x, -Exponent);
|
||||
}
|
||||
};
|
||||
|
||||
template <>
|
||||
inline int32x4_t Dup<int32x4_t>(std::int32_t x) {
|
||||
return vdupq_n_s32(x);
|
||||
}
|
||||
|
||||
template <>
|
||||
inline int16x8_t Dup<int16x8_t>(std::int16_t x) {
|
||||
return vdupq_n_s16(x);
|
||||
}
|
||||
|
||||
// So far this is only needed for int16.
|
||||
template <>
|
||||
inline int16x8_t SaturatingAdd(int16x8_t a, int16x8_t b) {
|
||||
return vqaddq_s16(a, b);
|
||||
}
|
||||
|
||||
} // end namespace gemmlowp
|
||||
|
||||
#endif // GEMMLOWP_INTERNAL_FIXEDPOINT_NEON_H_
|
||||
@@ -46,14 +46,17 @@
|
||||
#include <iterator>
|
||||
#include <memory>
|
||||
|
||||
#if defined(__unix__) && !defined(FLATBUFFERS_LOCALE_INDEPENDENT)
|
||||
#include <unistd.h>
|
||||
#endif
|
||||
|
||||
#ifdef _STLPORT_VERSION
|
||||
#define FLATBUFFERS_CPP98_STL
|
||||
#endif
|
||||
#ifndef FLATBUFFERS_CPP98_STL
|
||||
#include <functional>
|
||||
#endif
|
||||
|
||||
#include "flatbuffers/stl_emulation.h"
|
||||
#ifdef __ANDROID__
|
||||
#include <android/api-level.h>
|
||||
#endif
|
||||
|
||||
#if defined(__ICCARM__)
|
||||
#include <intrinsics.h>
|
||||
@@ -154,10 +157,12 @@ namespace flatbuffers {
|
||||
defined(__clang__)
|
||||
#define FLATBUFFERS_FINAL_CLASS final
|
||||
#define FLATBUFFERS_OVERRIDE override
|
||||
#define FLATBUFFERS_EXPLICIT_CPP11 explicit
|
||||
#define FLATBUFFERS_VTABLE_UNDERLYING_TYPE : flatbuffers::voffset_t
|
||||
#else
|
||||
#define FLATBUFFERS_FINAL_CLASS
|
||||
#define FLATBUFFERS_OVERRIDE
|
||||
#define FLATBUFFERS_EXPLICIT_CPP11
|
||||
#define FLATBUFFERS_VTABLE_UNDERLYING_TYPE
|
||||
#endif
|
||||
|
||||
@@ -165,10 +170,14 @@ namespace flatbuffers {
|
||||
(!defined(__GNUC__) || (__GNUC__ * 100 + __GNUC_MINOR__ >= 406)) || \
|
||||
(defined(__cpp_constexpr) && __cpp_constexpr >= 200704)
|
||||
#define FLATBUFFERS_CONSTEXPR constexpr
|
||||
#define FLATBUFFERS_CONSTEXPR_CPP11 constexpr
|
||||
#define FLATBUFFERS_CONSTEXPR_DEFINED
|
||||
#else
|
||||
#define FLATBUFFERS_CONSTEXPR const
|
||||
#define FLATBUFFERS_CONSTEXPR_CPP11
|
||||
#endif
|
||||
|
||||
// This macro is never used in code!
|
||||
#if (defined(__cplusplus) && __cplusplus >= 201402L) || \
|
||||
(defined(__cpp_constexpr) && __cpp_constexpr >= 201304)
|
||||
#define FLATBUFFERS_CONSTEXPR_CPP14 FLATBUFFERS_CONSTEXPR
|
||||
@@ -194,6 +203,16 @@ namespace flatbuffers {
|
||||
#define FLATBUFFERS_DELETE_FUNC(func) private: func;
|
||||
#endif
|
||||
|
||||
// Check if we can use template aliases
|
||||
// Not possible if Microsoft Compiler before 2012
|
||||
// Possible is the language feature __cpp_alias_templates is defined well
|
||||
// Or possible if the C++ std is C+11 or newer
|
||||
#if (defined(_MSC_VER) && _MSC_VER > 1700 /* MSVC2012 */) \
|
||||
|| (defined(__cpp_alias_templates) && __cpp_alias_templates >= 200704) \
|
||||
|| (defined(__cplusplus) && __cplusplus >= 201103L)
|
||||
#define FLATBUFFERS_TEMPLATES_ALIASES
|
||||
#endif
|
||||
|
||||
#ifndef FLATBUFFERS_HAS_STRING_VIEW
|
||||
// Only provide flatbuffers::string_view if __has_include can be used
|
||||
// to detect a header that provides an implementation
|
||||
@@ -236,10 +255,8 @@ namespace flatbuffers {
|
||||
|
||||
#ifndef FLATBUFFERS_LOCALE_INDEPENDENT
|
||||
// Enable locale independent functions {strtof_l, strtod_l,strtoll_l, strtoull_l}.
|
||||
// They are part of the POSIX-2008 but not part of the C/C++ standard.
|
||||
// GCC/Clang have definition (_XOPEN_SOURCE>=700) if POSIX-2008.
|
||||
#if ((defined(_MSC_VER) && _MSC_VER >= 1800) || \
|
||||
(defined(_XOPEN_SOURCE) && (_XOPEN_SOURCE>=700)))
|
||||
(defined(_XOPEN_VERSION) && (_XOPEN_VERSION>=700)) && (!defined(__ANDROID_API__) || (defined(__ANDROID_API__) && (__ANDROID_API__>=21))))
|
||||
#define FLATBUFFERS_LOCALE_INDEPENDENT 1
|
||||
#else
|
||||
#define FLATBUFFERS_LOCALE_INDEPENDENT 0
|
||||
@@ -309,6 +326,7 @@ typedef uintmax_t largest_scalar_t;
|
||||
#define FLATBUFFERS_MAX_ALIGNMENT 16
|
||||
|
||||
#if defined(_MSC_VER)
|
||||
#pragma warning(disable: 4351) // C4351: new behavior: elements of array ... will be default initialized
|
||||
#pragma warning(push)
|
||||
#pragma warning(disable: 4127) // C4127: conditional expression is constant
|
||||
#endif
|
||||
@@ -374,6 +392,13 @@ T ReadScalar(const void *p) {
|
||||
return EndianScalar(*reinterpret_cast<const T *>(p));
|
||||
}
|
||||
|
||||
// See https://github.com/google/flatbuffers/issues/5950
|
||||
|
||||
#if (FLATBUFFERS_GCC >= 100000) && (FLATBUFFERS_GCC < 110000)
|
||||
#pragma GCC diagnostic push
|
||||
#pragma GCC diagnostic ignored "-Wstringop-overflow"
|
||||
#endif
|
||||
|
||||
template<typename T>
|
||||
// UBSAN: C++ aliasing type rules, see std::bit_cast<> for details.
|
||||
__supress_ubsan__("alignment")
|
||||
@@ -386,6 +411,10 @@ template<typename T> __supress_ubsan__("alignment") void WriteScalar(void *p, Of
|
||||
*reinterpret_cast<uoffset_t *>(p) = EndianScalar(t.o);
|
||||
}
|
||||
|
||||
#if (FLATBUFFERS_GCC >= 100000) && (FLATBUFFERS_GCC < 110000)
|
||||
#pragma GCC diagnostic pop
|
||||
#endif
|
||||
|
||||
// Computes how many bytes you'd have to pad to be able to write an
|
||||
// "scalar_size" scalar if the buffer had grown to "buf_size" (downwards in
|
||||
// memory).
|
||||
|
||||
@@ -18,6 +18,11 @@
|
||||
#define FLATBUFFERS_H_
|
||||
|
||||
#include "flatbuffers/base.h"
|
||||
#include "flatbuffers/stl_emulation.h"
|
||||
|
||||
#ifndef FLATBUFFERS_CPP98_STL
|
||||
#include <functional>
|
||||
#endif
|
||||
|
||||
#if defined(FLATBUFFERS_NAN_DEFAULTS)
|
||||
# include <cmath>
|
||||
@@ -581,6 +586,14 @@ static inline const char *GetCstring(const String *str) {
|
||||
return str ? str->c_str() : "";
|
||||
}
|
||||
|
||||
#ifdef FLATBUFFERS_HAS_STRING_VIEW
|
||||
// Convenience function to get string_view from a String returning an empty
|
||||
// string_view on null pointer.
|
||||
static inline flatbuffers::string_view GetStringView(const String *str) {
|
||||
return str ? str->string_view() : flatbuffers::string_view();
|
||||
}
|
||||
#endif // FLATBUFFERS_HAS_STRING_VIEW
|
||||
|
||||
// Allocator interface. This is flatbuffers-specific and meant only for
|
||||
// `vector_downward` usage.
|
||||
class Allocator {
|
||||
@@ -1211,7 +1224,7 @@ class FlatBufferBuilder {
|
||||
/// you call Finish()). You can use this information if you need to embed
|
||||
/// a FlatBuffer in some other buffer, such that you can later read it
|
||||
/// without first having to copy it into its own buffer.
|
||||
size_t GetBufferMinAlignment() {
|
||||
size_t GetBufferMinAlignment() const {
|
||||
Finished();
|
||||
return minalign_;
|
||||
}
|
||||
@@ -1295,6 +1308,11 @@ class FlatBufferBuilder {
|
||||
TrackField(field, off);
|
||||
}
|
||||
|
||||
template<typename T> void AddElement(voffset_t field, T e) {
|
||||
auto off = PushElement(e);
|
||||
TrackField(field, off);
|
||||
}
|
||||
|
||||
template<typename T> void AddOffset(voffset_t field, Offset<T> off) {
|
||||
if (off.IsNull()) return; // Don't store.
|
||||
AddElement(field, ReferTo(off.o), static_cast<uoffset_t>(0));
|
||||
@@ -1599,6 +1617,9 @@ class FlatBufferBuilder {
|
||||
// causing the wrong overload to be selected, remove it.
|
||||
AssertScalarT<T>();
|
||||
StartVector(len, sizeof(T));
|
||||
if (len == 0) {
|
||||
return Offset<Vector<T>>(EndVector(len));
|
||||
}
|
||||
// clang-format off
|
||||
#if FLATBUFFERS_LITTLEENDIAN
|
||||
PushBytes(reinterpret_cast<const uint8_t *>(v), len * sizeof(T));
|
||||
@@ -1795,8 +1816,8 @@ class FlatBufferBuilder {
|
||||
return a.KeyCompareLessThan(&b);
|
||||
}
|
||||
|
||||
private:
|
||||
StructKeyComparator &operator=(const StructKeyComparator &);
|
||||
FLATBUFFERS_DELETE_FUNC(
|
||||
StructKeyComparator &operator=(const StructKeyComparator &))
|
||||
};
|
||||
/// @endcond
|
||||
|
||||
@@ -1871,10 +1892,7 @@ class FlatBufferBuilder {
|
||||
vector_downward &buf_;
|
||||
|
||||
private:
|
||||
TableKeyComparator &operator=(const TableKeyComparator &other) {
|
||||
buf_ = other.buf_;
|
||||
return *this;
|
||||
}
|
||||
FLATBUFFERS_DELETE_FUNC(TableKeyComparator &operator=(const TableKeyComparator &other))
|
||||
};
|
||||
/// @endcond
|
||||
|
||||
@@ -2269,8 +2287,8 @@ class Verifier FLATBUFFERS_FINAL_CLASS {
|
||||
|
||||
template<typename T>
|
||||
bool VerifyBufferFromStart(const char *identifier, size_t start) {
|
||||
if (identifier && (size_ < 2 * sizeof(flatbuffers::uoffset_t) ||
|
||||
!BufferHasIdentifier(buf_ + start, identifier))) {
|
||||
if (identifier && !Check((size_ >= 2 * sizeof(flatbuffers::uoffset_t) &&
|
||||
BufferHasIdentifier(buf_ + start, identifier)))) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -2452,12 +2470,26 @@ class Table {
|
||||
return field_offset ? reinterpret_cast<P>(p) : nullptr;
|
||||
}
|
||||
|
||||
template<typename Raw, typename Face>
|
||||
flatbuffers::Optional<Face> GetOptional(voffset_t field) const {
|
||||
auto field_offset = GetOptionalFieldOffset(field);
|
||||
auto p = data_ + field_offset;
|
||||
return field_offset ? Optional<Face>(static_cast<Face>(ReadScalar<Raw>(p)))
|
||||
: Optional<Face>();
|
||||
}
|
||||
|
||||
template<typename T> bool SetField(voffset_t field, T val, T def) {
|
||||
auto field_offset = GetOptionalFieldOffset(field);
|
||||
if (!field_offset) return IsTheSameAs(val, def);
|
||||
WriteScalar(data_ + field_offset, val);
|
||||
return true;
|
||||
}
|
||||
template<typename T> bool SetField(voffset_t field, T val) {
|
||||
auto field_offset = GetOptionalFieldOffset(field);
|
||||
if (!field_offset) return false;
|
||||
WriteScalar(data_ + field_offset, val);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool SetPointer(voffset_t field, const uint8_t *val) {
|
||||
auto field_offset = GetOptionalFieldOffset(field);
|
||||
@@ -2525,6 +2557,17 @@ class Table {
|
||||
uint8_t data_[1];
|
||||
};
|
||||
|
||||
// This specialization allows avoiding warnings like:
|
||||
// MSVC C4800: type: forcing value to bool 'true' or 'false'.
|
||||
template<>
|
||||
inline flatbuffers::Optional<bool> Table::GetOptional<uint8_t, bool>(
|
||||
voffset_t field) const {
|
||||
auto field_offset = GetOptionalFieldOffset(field);
|
||||
auto p = data_ + field_offset;
|
||||
return field_offset ? Optional<bool>(ReadScalar<uint8_t>(p) != 0)
|
||||
: Optional<bool>();
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
void FlatBufferBuilder::Required(Offset<T> table, voffset_t field) {
|
||||
auto table_ptr = reinterpret_cast<const Table *>(buf_.data_at(table.o));
|
||||
@@ -2704,7 +2747,7 @@ inline const char * const *ElementaryTypeNames() {
|
||||
// Basic type info cost just 16bits per field!
|
||||
struct TypeCode {
|
||||
uint16_t base_type : 4; // ElementaryType
|
||||
uint16_t is_vector : 1;
|
||||
uint16_t is_repeating : 1; // Either vector (in table) or array (in struct)
|
||||
int16_t sequence_ref : 11; // Index into type_refs below, or -1 for none.
|
||||
};
|
||||
|
||||
@@ -2720,6 +2763,7 @@ struct TypeTable {
|
||||
size_t num_elems; // of type_codes, values, names (but not type_refs).
|
||||
const TypeCode *type_codes; // num_elems count
|
||||
const TypeFunction *type_refs; // less than num_elems entries (see TypeCode).
|
||||
const int16_t *array_sizes; // less than num_elems entries (see TypeCode).
|
||||
const int64_t *values; // Only set for non-consecutive enum/union or structs.
|
||||
const char *const *names; // Only set if compiled with --reflect-names.
|
||||
};
|
||||
|
||||
@@ -18,6 +18,7 @@
|
||||
#define FLATBUFFERS_STL_EMULATION_H_
|
||||
|
||||
// clang-format off
|
||||
#include "flatbuffers/base.h"
|
||||
|
||||
#include <string>
|
||||
#include <type_traits>
|
||||
@@ -25,6 +26,17 @@
|
||||
#include <memory>
|
||||
#include <limits>
|
||||
|
||||
// Detect C++17 compatible compiler.
|
||||
// __cplusplus >= 201703L - a compiler has support of 'static inline' variables.
|
||||
#if defined(FLATBUFFERS_USE_STD_OPTIONAL) \
|
||||
|| (defined(__cplusplus) && __cplusplus >= 201703L) \
|
||||
|| (defined(_MSVC_LANG) && (_MSVC_LANG >= 201703L))
|
||||
#include <optional>
|
||||
#ifndef FLATBUFFERS_USE_STD_OPTIONAL
|
||||
#define FLATBUFFERS_USE_STD_OPTIONAL
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if defined(_STLPORT_VERSION) && !defined(FLATBUFFERS_CPP98_STL)
|
||||
#define FLATBUFFERS_CPP98_STL
|
||||
#endif // defined(_STLPORT_VERSION) && !defined(FLATBUFFERS_CPP98_STL)
|
||||
@@ -33,16 +45,6 @@
|
||||
#include <cctype>
|
||||
#endif // defined(FLATBUFFERS_CPP98_STL)
|
||||
|
||||
// Check if we can use template aliases
|
||||
// Not possible if Microsoft Compiler before 2012
|
||||
// Possible is the language feature __cpp_alias_templates is defined well
|
||||
// Or possible if the C++ std is C+11 or newer
|
||||
#if (defined(_MSC_VER) && _MSC_VER > 1700 /* MSVC2012 */) \
|
||||
|| (defined(__cpp_alias_templates) && __cpp_alias_templates >= 200704) \
|
||||
|| (defined(__cplusplus) && __cplusplus >= 201103L)
|
||||
#define FLATBUFFERS_TEMPLATES_ALIASES
|
||||
#endif
|
||||
|
||||
// This header provides backwards compatibility for C++98 STLs like stlport.
|
||||
namespace flatbuffers {
|
||||
|
||||
@@ -190,7 +192,7 @@ inline void vector_emplace_back(std::vector<T> *vector, V &&data) {
|
||||
// MSVC 2010 doesn't support C++11 aliases.
|
||||
// We're manually "aliasing" the class here as we want to bring unique_ptr
|
||||
// into the flatbuffers namespace. We have unique_ptr in the flatbuffers
|
||||
// namespace we have a completely independent implemenation (see below)
|
||||
// namespace we have a completely independent implementation (see below)
|
||||
// for C++98 STL implementations.
|
||||
template <class T> class unique_ptr : public std::unique_ptr<T> {
|
||||
public:
|
||||
@@ -302,6 +304,146 @@ inline void vector_emplace_back(std::vector<T> *vector, V &&data) {
|
||||
|
||||
#endif // !FLATBUFFERS_CPP98_STL
|
||||
|
||||
#ifdef FLATBUFFERS_USE_STD_OPTIONAL
|
||||
template<class T>
|
||||
using Optional = std::optional<T>;
|
||||
using nullopt_t = std::nullopt_t;
|
||||
inline constexpr nullopt_t nullopt = std::nullopt;
|
||||
|
||||
#else
|
||||
// Limited implementation of Optional<T> type for a scalar T.
|
||||
// This implementation limited by trivial types compatible with
|
||||
// std::is_arithmetic<T> or std::is_enum<T> type traits.
|
||||
|
||||
// A tag to indicate an empty flatbuffers::optional<T>.
|
||||
struct nullopt_t {
|
||||
explicit FLATBUFFERS_CONSTEXPR_CPP11 nullopt_t(int) {}
|
||||
};
|
||||
|
||||
#if defined(FLATBUFFERS_CONSTEXPR_DEFINED)
|
||||
namespace internal {
|
||||
template <class> struct nullopt_holder {
|
||||
static constexpr nullopt_t instance_ = nullopt_t(0);
|
||||
};
|
||||
template<class Dummy>
|
||||
constexpr nullopt_t nullopt_holder<Dummy>::instance_;
|
||||
}
|
||||
static constexpr const nullopt_t &nullopt = internal::nullopt_holder<void>::instance_;
|
||||
|
||||
#else
|
||||
namespace internal {
|
||||
template <class> struct nullopt_holder {
|
||||
static const nullopt_t instance_;
|
||||
};
|
||||
template<class Dummy>
|
||||
const nullopt_t nullopt_holder<Dummy>::instance_ = nullopt_t(0);
|
||||
}
|
||||
static const nullopt_t &nullopt = internal::nullopt_holder<void>::instance_;
|
||||
|
||||
#endif
|
||||
|
||||
template<class T>
|
||||
class Optional FLATBUFFERS_FINAL_CLASS {
|
||||
// Non-scalar 'T' would extremely complicated Optional<T>.
|
||||
// Use is_scalar<T> checking because flatbuffers flatbuffers::is_arithmetic<T>
|
||||
// isn't implemented.
|
||||
static_assert(flatbuffers::is_scalar<T>::value, "unexpected type T");
|
||||
|
||||
public:
|
||||
~Optional() {}
|
||||
|
||||
FLATBUFFERS_CONSTEXPR_CPP11 Optional() FLATBUFFERS_NOEXCEPT
|
||||
: value_(), has_value_(false) {}
|
||||
|
||||
FLATBUFFERS_CONSTEXPR_CPP11 Optional(nullopt_t) FLATBUFFERS_NOEXCEPT
|
||||
: value_(), has_value_(false) {}
|
||||
|
||||
FLATBUFFERS_CONSTEXPR_CPP11 Optional(T val) FLATBUFFERS_NOEXCEPT
|
||||
: value_(val), has_value_(true) {}
|
||||
|
||||
FLATBUFFERS_CONSTEXPR_CPP11 Optional(const Optional &other) FLATBUFFERS_NOEXCEPT
|
||||
: value_(other.value_), has_value_(other.has_value_) {}
|
||||
|
||||
FLATBUFFERS_CONSTEXPR_CPP14 Optional &operator=(const Optional &other) FLATBUFFERS_NOEXCEPT {
|
||||
value_ = other.value_;
|
||||
has_value_ = other.has_value_;
|
||||
return *this;
|
||||
}
|
||||
|
||||
FLATBUFFERS_CONSTEXPR_CPP14 Optional &operator=(nullopt_t) FLATBUFFERS_NOEXCEPT {
|
||||
value_ = T();
|
||||
has_value_ = false;
|
||||
return *this;
|
||||
}
|
||||
|
||||
FLATBUFFERS_CONSTEXPR_CPP14 Optional &operator=(T val) FLATBUFFERS_NOEXCEPT {
|
||||
value_ = val;
|
||||
has_value_ = true;
|
||||
return *this;
|
||||
}
|
||||
|
||||
void reset() FLATBUFFERS_NOEXCEPT {
|
||||
*this = nullopt;
|
||||
}
|
||||
|
||||
void swap(Optional &other) FLATBUFFERS_NOEXCEPT {
|
||||
std::swap(value_, other.value_);
|
||||
std::swap(has_value_, other.has_value_);
|
||||
}
|
||||
|
||||
FLATBUFFERS_CONSTEXPR_CPP11 FLATBUFFERS_EXPLICIT_CPP11 operator bool() const FLATBUFFERS_NOEXCEPT {
|
||||
return has_value_;
|
||||
}
|
||||
|
||||
FLATBUFFERS_CONSTEXPR_CPP11 bool has_value() const FLATBUFFERS_NOEXCEPT {
|
||||
return has_value_;
|
||||
}
|
||||
|
||||
FLATBUFFERS_CONSTEXPR_CPP11 const T& operator*() const FLATBUFFERS_NOEXCEPT {
|
||||
return value_;
|
||||
}
|
||||
|
||||
const T& value() const {
|
||||
FLATBUFFERS_ASSERT(has_value());
|
||||
return value_;
|
||||
}
|
||||
|
||||
T value_or(T default_value) const FLATBUFFERS_NOEXCEPT {
|
||||
return has_value() ? value_ : default_value;
|
||||
}
|
||||
|
||||
private:
|
||||
T value_;
|
||||
bool has_value_;
|
||||
};
|
||||
|
||||
template<class T>
|
||||
FLATBUFFERS_CONSTEXPR_CPP11 bool operator==(const Optional<T>& opt, nullopt_t) FLATBUFFERS_NOEXCEPT {
|
||||
return !opt;
|
||||
}
|
||||
template<class T>
|
||||
FLATBUFFERS_CONSTEXPR_CPP11 bool operator==(nullopt_t, const Optional<T>& opt) FLATBUFFERS_NOEXCEPT {
|
||||
return !opt;
|
||||
}
|
||||
|
||||
template<class T, class U>
|
||||
FLATBUFFERS_CONSTEXPR_CPP11 bool operator==(const Optional<T>& lhs, const U& rhs) FLATBUFFERS_NOEXCEPT {
|
||||
return static_cast<bool>(lhs) && (*lhs == rhs);
|
||||
}
|
||||
|
||||
template<class T, class U>
|
||||
FLATBUFFERS_CONSTEXPR_CPP11 bool operator==(const T& lhs, const Optional<U>& rhs) FLATBUFFERS_NOEXCEPT {
|
||||
return static_cast<bool>(rhs) && (lhs == *rhs);
|
||||
}
|
||||
|
||||
template<class T, class U>
|
||||
FLATBUFFERS_CONSTEXPR_CPP11 bool operator==(const Optional<T>& lhs, const Optional<U>& rhs) FLATBUFFERS_NOEXCEPT {
|
||||
return static_cast<bool>(lhs) != static_cast<bool>(rhs)
|
||||
? false
|
||||
: !static_cast<bool>(lhs) ? false : (*lhs == *rhs);
|
||||
}
|
||||
#endif // FLATBUFFERS_USE_STD_OPTIONAL
|
||||
|
||||
} // namespace flatbuffers
|
||||
|
||||
#endif // FLATBUFFERS_STL_EMULATION_H_
|
||||
|
||||
@@ -1,11 +0,0 @@
|
||||
Copyright (c) 2003-2010 Mark Borgerding
|
||||
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
|
||||
* Neither the author nor the names of any contributors may be used to endorse or promote products derived from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
@@ -1,164 +0,0 @@
|
||||
/*
|
||||
Copyright (c) 2003-2010, Mark Borgerding
|
||||
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
|
||||
* Neither the author nor the names of any contributors may be used to endorse or promote products derived from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
/* kiss_fft.h
|
||||
defines kiss_fft_scalar as either short or a float type
|
||||
and defines
|
||||
typedef struct { kiss_fft_scalar r; kiss_fft_scalar i; }kiss_fft_cpx; */
|
||||
#include "kiss_fft.h"
|
||||
#include <limits.h>
|
||||
|
||||
#define MAXFACTORS 32
|
||||
/* e.g. an fft of length 128 has 4 factors
|
||||
as far as kissfft is concerned
|
||||
4*4*4*2
|
||||
*/
|
||||
|
||||
struct kiss_fft_state{
|
||||
int nfft;
|
||||
int inverse;
|
||||
int factors[2*MAXFACTORS];
|
||||
kiss_fft_cpx twiddles[1];
|
||||
};
|
||||
|
||||
/*
|
||||
Explanation of macros dealing with complex math:
|
||||
|
||||
C_MUL(m,a,b) : m = a*b
|
||||
C_FIXDIV( c , div ) : if a fixed point impl., c /= div. noop otherwise
|
||||
C_SUB( res, a,b) : res = a - b
|
||||
C_SUBFROM( res , a) : res -= a
|
||||
C_ADDTO( res , a) : res += a
|
||||
* */
|
||||
#ifdef FIXED_POINT
|
||||
#if (FIXED_POINT==32)
|
||||
# define FRACBITS 31
|
||||
# define SAMPPROD int64_t
|
||||
#define SAMP_MAX 2147483647
|
||||
#else
|
||||
# define FRACBITS 15
|
||||
# define SAMPPROD int32_t
|
||||
#define SAMP_MAX 32767
|
||||
#endif
|
||||
|
||||
#define SAMP_MIN -SAMP_MAX
|
||||
|
||||
#if defined(CHECK_OVERFLOW)
|
||||
# define CHECK_OVERFLOW_OP(a,op,b) \
|
||||
if ( (SAMPPROD)(a) op (SAMPPROD)(b) > SAMP_MAX || (SAMPPROD)(a) op (SAMPPROD)(b) < SAMP_MIN ) { \
|
||||
fprintf(stderr,"WARNING:overflow @ " __FILE__ "(%d): (%d " #op" %d) = %ld\n",__LINE__,(a),(b),(SAMPPROD)(a) op (SAMPPROD)(b) ); }
|
||||
#endif
|
||||
|
||||
|
||||
# define smul(a,b) ( (SAMPPROD)(a)*(b) )
|
||||
# define sround( x ) (kiss_fft_scalar)( ( (x) + (1<<(FRACBITS-1)) ) >> FRACBITS )
|
||||
|
||||
# define S_MUL(a,b) sround( smul(a,b) )
|
||||
|
||||
# define C_MUL(m,a,b) \
|
||||
do{ (m).r = sround( smul((a).r,(b).r) - smul((a).i,(b).i) ); \
|
||||
(m).i = sround( smul((a).r,(b).i) + smul((a).i,(b).r) ); }while(0)
|
||||
|
||||
# define DIVSCALAR(x,k) \
|
||||
(x) = sround( smul( x, SAMP_MAX/k ) )
|
||||
|
||||
# define C_FIXDIV(c,div) \
|
||||
do { DIVSCALAR( (c).r , div); \
|
||||
DIVSCALAR( (c).i , div); }while (0)
|
||||
|
||||
# define C_MULBYSCALAR( c, s ) \
|
||||
do{ (c).r = sround( smul( (c).r , s ) ) ;\
|
||||
(c).i = sround( smul( (c).i , s ) ) ; }while(0)
|
||||
|
||||
#else /* not FIXED_POINT*/
|
||||
|
||||
# define S_MUL(a,b) ( (a)*(b) )
|
||||
#define C_MUL(m,a,b) \
|
||||
do{ (m).r = (a).r*(b).r - (a).i*(b).i;\
|
||||
(m).i = (a).r*(b).i + (a).i*(b).r; }while(0)
|
||||
# define C_FIXDIV(c,div) /* NOOP */
|
||||
# define C_MULBYSCALAR( c, s ) \
|
||||
do{ (c).r *= (s);\
|
||||
(c).i *= (s); }while(0)
|
||||
#endif
|
||||
|
||||
#ifndef CHECK_OVERFLOW_OP
|
||||
# define CHECK_OVERFLOW_OP(a,op,b) /* noop */
|
||||
#endif
|
||||
|
||||
#define C_ADD( res, a,b)\
|
||||
do { \
|
||||
CHECK_OVERFLOW_OP((a).r,+,(b).r)\
|
||||
CHECK_OVERFLOW_OP((a).i,+,(b).i)\
|
||||
(res).r=(a).r+(b).r; (res).i=(a).i+(b).i; \
|
||||
}while(0)
|
||||
#define C_SUB( res, a,b)\
|
||||
do { \
|
||||
CHECK_OVERFLOW_OP((a).r,-,(b).r)\
|
||||
CHECK_OVERFLOW_OP((a).i,-,(b).i)\
|
||||
(res).r=(a).r-(b).r; (res).i=(a).i-(b).i; \
|
||||
}while(0)
|
||||
#define C_ADDTO( res , a)\
|
||||
do { \
|
||||
CHECK_OVERFLOW_OP((res).r,+,(a).r)\
|
||||
CHECK_OVERFLOW_OP((res).i,+,(a).i)\
|
||||
(res).r += (a).r; (res).i += (a).i;\
|
||||
}while(0)
|
||||
|
||||
#define C_SUBFROM( res , a)\
|
||||
do {\
|
||||
CHECK_OVERFLOW_OP((res).r,-,(a).r)\
|
||||
CHECK_OVERFLOW_OP((res).i,-,(a).i)\
|
||||
(res).r -= (a).r; (res).i -= (a).i; \
|
||||
}while(0)
|
||||
|
||||
|
||||
#ifdef FIXED_POINT
|
||||
# define KISS_FFT_COS(phase) floor(.5+SAMP_MAX * cos (phase))
|
||||
# define KISS_FFT_SIN(phase) floor(.5+SAMP_MAX * sin (phase))
|
||||
# define HALF_OF(x) ((x)>>1)
|
||||
#elif defined(USE_SIMD)
|
||||
# define KISS_FFT_COS(phase) _mm_set1_ps( cos(phase) )
|
||||
# define KISS_FFT_SIN(phase) _mm_set1_ps( sin(phase) )
|
||||
# define HALF_OF(x) ((x)*_mm_set1_ps(.5))
|
||||
#else
|
||||
# define KISS_FFT_COS(phase) (kiss_fft_scalar) cos(phase)
|
||||
# define KISS_FFT_SIN(phase) (kiss_fft_scalar) sin(phase)
|
||||
# define HALF_OF(x) ((x)*.5)
|
||||
#endif
|
||||
|
||||
#define kf_cexp(x,phase) \
|
||||
do{ \
|
||||
(x)->r = KISS_FFT_COS(phase);\
|
||||
(x)->i = KISS_FFT_SIN(phase);\
|
||||
}while(0)
|
||||
|
||||
|
||||
/* a debugging function */
|
||||
#define pcpx(c)\
|
||||
fprintf(stderr,"%g + %gi\n",(double)((c)->r),(double)((c)->i) )
|
||||
|
||||
|
||||
#ifdef KISS_FFT_USE_ALLOCA
|
||||
// define this to allow use of alloca instead of malloc for temporary buffers
|
||||
// Temporary buffers are used in two case:
|
||||
// 1. FFT sizes that have "bad" factors. i.e. not 2,3 and 5
|
||||
// 2. "in-place" FFTs. Notice the quotes, since kissfft does not really do an in-place transform.
|
||||
#include <alloca.h>
|
||||
#define KISS_FFT_TMP_ALLOC(nbytes) alloca(nbytes)
|
||||
#define KISS_FFT_TMP_FREE(ptr)
|
||||
#else
|
||||
#define KISS_FFT_TMP_ALLOC(nbytes) KISS_FFT_MALLOC(nbytes)
|
||||
#define KISS_FFT_TMP_FREE(ptr) KISS_FFT_FREE(ptr)
|
||||
#endif
|
||||
@@ -1,131 +0,0 @@
|
||||
#ifndef KISS_FFT_H
|
||||
#define KISS_FFT_H
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <stdio.h>
|
||||
#include <math.h>
|
||||
#include <string.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/*
|
||||
ATTENTION!
|
||||
If you would like a :
|
||||
-- a utility that will handle the caching of fft objects
|
||||
-- real-only (no imaginary time component ) FFT
|
||||
-- a multi-dimensional FFT
|
||||
-- a command-line utility to perform ffts
|
||||
-- a command-line utility to perform fast-convolution filtering
|
||||
|
||||
Then see kfc.h kiss_fftr.h kiss_fftnd.h fftutil.c kiss_fastfir.c
|
||||
in the tools/ directory.
|
||||
*/
|
||||
|
||||
#ifdef USE_SIMD
|
||||
# include <xmmintrin.h>
|
||||
# define kiss_fft_scalar __m128
|
||||
#define KISS_FFT_MALLOC(nbytes) _mm_malloc(nbytes,16)
|
||||
#define KISS_FFT_FREE _mm_free
|
||||
#else
|
||||
#define KISS_FFT_MALLOC(X) (void*)(0) /* Patched. */
|
||||
#define KISS_FFT_FREE(X) /* Patched. */
|
||||
#endif
|
||||
|
||||
|
||||
// Patched automatically by download_dependencies.sh so default is 16 bit.
|
||||
#ifndef FIXED_POINT
|
||||
#define FIXED_POINT (16)
|
||||
#endif
|
||||
// End patch.
|
||||
|
||||
#ifdef FIXED_POINT
|
||||
#include <stdint.h> /* Patched. */
|
||||
#include <sys/types.h>
|
||||
# if (FIXED_POINT == 32)
|
||||
# define kiss_fft_scalar int32_t
|
||||
# else
|
||||
# define kiss_fft_scalar int16_t
|
||||
# endif
|
||||
#else
|
||||
# ifndef kiss_fft_scalar
|
||||
/* default is float */
|
||||
# define kiss_fft_scalar float
|
||||
# endif
|
||||
#endif
|
||||
|
||||
typedef struct {
|
||||
kiss_fft_scalar r;
|
||||
kiss_fft_scalar i;
|
||||
}kiss_fft_cpx;
|
||||
|
||||
typedef struct kiss_fft_state* kiss_fft_cfg;
|
||||
|
||||
/*
|
||||
* kiss_fft_alloc
|
||||
*
|
||||
* Initialize a FFT (or IFFT) algorithm's cfg/state buffer.
|
||||
*
|
||||
* typical usage: kiss_fft_cfg mycfg=kiss_fft_alloc(1024,0,NULL,NULL);
|
||||
*
|
||||
* The return value from fft_alloc is a cfg buffer used internally
|
||||
* by the fft routine or NULL.
|
||||
*
|
||||
* If lenmem is NULL, then kiss_fft_alloc will allocate a cfg buffer using malloc.
|
||||
* The returned value should be free()d when done to avoid memory leaks.
|
||||
*
|
||||
* The state can be placed in a user supplied buffer 'mem':
|
||||
* If lenmem is not NULL and mem is not NULL and *lenmem is large enough,
|
||||
* then the function places the cfg in mem and the size used in *lenmem
|
||||
* and returns mem.
|
||||
*
|
||||
* If lenmem is not NULL and ( mem is NULL or *lenmem is not large enough),
|
||||
* then the function returns NULL and places the minimum cfg
|
||||
* buffer size in *lenmem.
|
||||
* */
|
||||
|
||||
kiss_fft_cfg kiss_fft_alloc(int nfft,int inverse_fft,void * mem,size_t * lenmem);
|
||||
|
||||
/*
|
||||
* kiss_fft(cfg,in_out_buf)
|
||||
*
|
||||
* Perform an FFT on a complex input buffer.
|
||||
* for a forward FFT,
|
||||
* fin should be f[0] , f[1] , ... ,f[nfft-1]
|
||||
* fout will be F[0] , F[1] , ... ,F[nfft-1]
|
||||
* Note that each element is complex and can be accessed like
|
||||
f[k].r and f[k].i
|
||||
* */
|
||||
void kiss_fft(kiss_fft_cfg cfg,const kiss_fft_cpx *fin,kiss_fft_cpx *fout);
|
||||
|
||||
/*
|
||||
A more generic version of the above function. It reads its input from every Nth sample.
|
||||
* */
|
||||
void kiss_fft_stride(kiss_fft_cfg cfg,const kiss_fft_cpx *fin,kiss_fft_cpx *fout,int fin_stride);
|
||||
|
||||
/* If kiss_fft_alloc allocated a buffer, it is one contiguous
|
||||
buffer and can be simply free()d when no longer needed*/
|
||||
#define kiss_fft_free free
|
||||
|
||||
/*
|
||||
Cleans up some memory that gets managed internally. Not necessary to call, but it might clean up
|
||||
your compiler output to call this before you exit.
|
||||
*/
|
||||
void kiss_fft_cleanup(void);
|
||||
|
||||
|
||||
/*
|
||||
* Returns the smallest integer k, such that k>=n and k has only "fast" factors (2,3,5)
|
||||
*/
|
||||
int kiss_fft_next_fast_size(int n);
|
||||
|
||||
/* for real ffts, we need an even size */
|
||||
#define kiss_fftr_next_fast_size_real(n) \
|
||||
(kiss_fft_next_fast_size( ((n)+1)>>1)<<1)
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
||||
@@ -1,46 +0,0 @@
|
||||
#ifndef KISS_FTR_H
|
||||
#define KISS_FTR_H
|
||||
|
||||
#include "kiss_fft.h"
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
|
||||
Real optimized version can save about 45% cpu time vs. complex fft of a real seq.
|
||||
|
||||
|
||||
|
||||
*/
|
||||
|
||||
typedef struct kiss_fftr_state *kiss_fftr_cfg;
|
||||
|
||||
|
||||
kiss_fftr_cfg kiss_fftr_alloc(int nfft,int inverse_fft,void * mem, size_t * lenmem);
|
||||
/*
|
||||
nfft must be even
|
||||
|
||||
If you don't care to allocate space, use mem = lenmem = NULL
|
||||
*/
|
||||
|
||||
|
||||
void kiss_fftr(kiss_fftr_cfg cfg,const kiss_fft_scalar *timedata,kiss_fft_cpx *freqdata);
|
||||
/*
|
||||
input timedata has nfft scalar points
|
||||
output freqdata has nfft/2+1 complex points
|
||||
*/
|
||||
|
||||
void kiss_fftri(kiss_fftr_cfg cfg,const kiss_fft_cpx *freqdata,kiss_fft_scalar *timedata);
|
||||
/*
|
||||
input freqdata has nfft/2+1 complex points
|
||||
output timedata has nfft scalar points
|
||||
*/
|
||||
|
||||
#define kiss_fftr_free free
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
@@ -1,203 +0,0 @@
|
||||
/* Copyright 2020 Google LLC. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
|
||||
#ifndef RUY_RUY_PROFILER_INSTRUMENTATION_H_
|
||||
#define RUY_RUY_PROFILER_INSTRUMENTATION_H_
|
||||
|
||||
#ifdef RUY_PROFILER
|
||||
#include <cstdio>
|
||||
#include <mutex>
|
||||
#include <vector>
|
||||
#endif
|
||||
|
||||
namespace ruy {
|
||||
namespace profiler {
|
||||
|
||||
#ifdef RUY_PROFILER
|
||||
|
||||
// A label is how a code scope is annotated to appear in profiles.
|
||||
// The stacks that are sampled by the profiler are stacks of such labels.
|
||||
// A label consists of a literal string, plus optional integer arguments.
|
||||
class Label {
|
||||
public:
|
||||
Label() {}
|
||||
template <typename... Args>
|
||||
explicit Label(Args... args) {
|
||||
Set(args...);
|
||||
}
|
||||
void Set(const char* format) {
|
||||
format_ = format;
|
||||
args_count_ = 0;
|
||||
}
|
||||
template <typename... Args>
|
||||
void Set(const char* format, Args... args) {
|
||||
format_ = format;
|
||||
args_count_ = sizeof...(args);
|
||||
SetArgs(0, args...);
|
||||
}
|
||||
|
||||
void operator=(const Label& other);
|
||||
|
||||
bool operator==(const Label& other) const;
|
||||
|
||||
std::string Formatted() const;
|
||||
const char* format() const { return format_; }
|
||||
|
||||
private:
|
||||
void SetArgs(int position, int arg0) { args_[position] = arg0; }
|
||||
|
||||
template <typename... Args>
|
||||
void SetArgs(int position, int arg0, Args... args) {
|
||||
SetArgs(position, arg0);
|
||||
SetArgs(position + 1, args...);
|
||||
}
|
||||
|
||||
static constexpr int kMaxArgs = 4;
|
||||
const char* format_ = nullptr;
|
||||
int args_count_ = 0;
|
||||
int args_[kMaxArgs];
|
||||
};
|
||||
|
||||
namespace detail {
|
||||
|
||||
// Forward-declaration, see class ThreadStack below.
|
||||
class ThreadStack;
|
||||
|
||||
bool& GlobalIsProfilerRunning();
|
||||
|
||||
// Returns the global vector of pointers to all stacks, there being one stack
|
||||
// per thread executing instrumented code.
|
||||
std::vector<ThreadStack*>* GlobalAllThreadStacks();
|
||||
|
||||
// Returns the mutex to be locked around any access to GlobalAllThreadStacks().
|
||||
std::mutex* GlobalsMutex();
|
||||
|
||||
// Returns the thread-local stack, specific to the current thread.
|
||||
ThreadStack* ThreadLocalThreadStack();
|
||||
|
||||
// This 'stack' is what may be more appropriately called a 'pseudostack':
|
||||
// It contains Label entries that are 'manually' entered by instrumentation
|
||||
// code. It's unrelated to real call stacks.
|
||||
struct Stack {
|
||||
std::uint32_t id = 0;
|
||||
static constexpr int kMaxSize = 64;
|
||||
int size = 0;
|
||||
Label labels[kMaxSize];
|
||||
};
|
||||
|
||||
// Returns the buffer byte size required by CopyToSample.
|
||||
int GetBufferSize(const Stack& stack);
|
||||
|
||||
// Copies this Stack into a byte buffer, called a 'sample'.
|
||||
void CopyToBuffer(const Stack& stack, char* dst);
|
||||
|
||||
// Populates this Stack from an existing sample buffer, typically
|
||||
// produced by CopyToSample.
|
||||
void ReadFromBuffer(const char* src, Stack* stack);
|
||||
|
||||
// ThreadStack is meant to be used as a thread-local singleton, assigning to
|
||||
// each thread a Stack object holding its pseudo-stack of profile labels,
|
||||
// plus a mutex allowing to synchronize accesses to this pseudo-stack between
|
||||
// this thread and a possible profiler thread sampling it.
|
||||
class ThreadStack {
|
||||
public:
|
||||
ThreadStack();
|
||||
~ThreadStack();
|
||||
|
||||
const Stack& stack() const { return stack_; }
|
||||
|
||||
// Returns the mutex to lock around any access to this stack. Each stack is
|
||||
// accessed by potentially two threads: the thread that it belongs to
|
||||
// (which calls Push and Pop) and the profiler thread during profiling
|
||||
// (which calls CopyToSample).
|
||||
std::mutex& Mutex() const { return mutex_; }
|
||||
|
||||
// Pushes a new label on the top of this Stack.
|
||||
template <typename... Args>
|
||||
void Push(Args... args) {
|
||||
// This mutex locking is needed to guard against race conditions as both
|
||||
// the current thread and the profiler thread may be concurrently accessing
|
||||
// this stack. In addition to that, this mutex locking also serves the other
|
||||
// purpose of acting as a barrier (of compiler code reordering, of runtime
|
||||
// CPU instruction reordering, and of memory access reordering), which
|
||||
// gives a measure of correctness to this profiler. The downside is some
|
||||
// latency. As this lock will be uncontended most of the times, the cost
|
||||
// should be roughly that of an sequentially-consistent atomic access,
|
||||
// comparable to an access to the level of CPU data cache that is shared
|
||||
// among all cores, typically 60 cycles on current ARM CPUs, plus side
|
||||
// effects from barrier instructions.
|
||||
std::lock_guard<std::mutex> lock(mutex_);
|
||||
// Avoid overrunning the stack, even in 'release' builds. This profiling
|
||||
// instrumentation code should not ship in release builds anyway, the
|
||||
// overhead of this check is negligible, and overrunning a stack array would
|
||||
// be bad.
|
||||
if (stack_.size >= Stack::kMaxSize) {
|
||||
abort();
|
||||
}
|
||||
stack_.labels[stack_.size++].Set(args...);
|
||||
}
|
||||
|
||||
// Pops the top-most label from this Stack.
|
||||
void Pop() {
|
||||
// See the comment in Push about this lock. While it would be tempting to
|
||||
// try to remove this lock and just atomically decrement size_ with a
|
||||
// store-release, that would not necessarily be a substitute for all of the
|
||||
// purposes that this lock serves, or if it was done carefully to serve all
|
||||
// of the same purposes, then that wouldn't be faster than this (mostly
|
||||
// uncontended) lock.
|
||||
std::lock_guard<std::mutex> lock(mutex_);
|
||||
stack_.size--;
|
||||
}
|
||||
|
||||
private:
|
||||
mutable std::mutex mutex_;
|
||||
Stack stack_;
|
||||
};
|
||||
|
||||
} // namespace detail
|
||||
|
||||
// RAII user-facing way to construct Labels associated with their life scope
|
||||
// and get them pushed to / popped from the current thread stack.
|
||||
class ScopeLabel {
|
||||
public:
|
||||
template <typename... Args>
|
||||
ScopeLabel(Args... args) : thread_stack_(detail::ThreadLocalThreadStack()) {
|
||||
thread_stack_->Push(args...);
|
||||
}
|
||||
|
||||
~ScopeLabel() { thread_stack_->Pop(); }
|
||||
|
||||
private:
|
||||
detail::ThreadStack* thread_stack_;
|
||||
};
|
||||
|
||||
#else // no RUY_PROFILER
|
||||
|
||||
class ScopeLabel {
|
||||
public:
|
||||
template <typename... Args>
|
||||
explicit ScopeLabel(Args...) {}
|
||||
|
||||
// This destructor is needed to consistently silence clang's -Wunused-variable
|
||||
// which seems to trigger semi-randomly.
|
||||
~ScopeLabel() {}
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
} // namespace profiler
|
||||
} // namespace ruy
|
||||
|
||||
#endif // RUY_RUY_PROFILER_INSTRUMENTATION_H_
|
||||
@@ -21,7 +21,7 @@ limitations under the License.
|
||||
// Also update tensorflow/tensorflow.bzl and
|
||||
// tensorflow/tools/pip_package/setup.py
|
||||
#define TF_MAJOR_VERSION 2
|
||||
#define TF_MINOR_VERSION 1
|
||||
#define TF_MINOR_VERSION 5
|
||||
#define TF_PATCH_VERSION 0
|
||||
|
||||
// TF_VERSION_SUFFIX is non-empty for pre-releases (e.g. "-alpha", "-alpha.1",
|
||||
@@ -108,7 +108,7 @@ limitations under the License.
|
||||
|
||||
#define TF_GRAPH_DEF_VERSION_MIN_PRODUCER 0
|
||||
#define TF_GRAPH_DEF_VERSION_MIN_CONSUMER 0
|
||||
#define TF_GRAPH_DEF_VERSION 389 // Updated: 2020/5/2
|
||||
#define TF_GRAPH_DEF_VERSION 578 // Updated: 2020/11/7
|
||||
|
||||
// Checkpoint compatibility versions (the versions field in SavedSliceMeta).
|
||||
//
|
||||
|
||||
@@ -67,8 +67,9 @@ typedef struct {
|
||||
typedef enum {
|
||||
kTfLiteActNone = 0,
|
||||
kTfLiteActRelu,
|
||||
kTfLiteActRelu1, // min(max(-1, x), 1)
|
||||
kTfLiteActRelu6, // min(max(0, x), 6)
|
||||
kTfLiteActReluN1To1, // min(max(-1, x), 1)
|
||||
kTfLiteActRelu1 = kTfLiteActReluN1To1, // kTfLiteActRelu1 will be deprecated.
|
||||
kTfLiteActRelu6, // min(max(0, x), 6)
|
||||
kTfLiteActTanh,
|
||||
kTfLiteActSignBit,
|
||||
kTfLiteActSigmoid,
|
||||
@@ -198,6 +199,8 @@ typedef struct {
|
||||
|
||||
typedef struct {
|
||||
TfLiteFusedActivation activation;
|
||||
// Parameter added for the version 4.
|
||||
bool pot_scale_int16;
|
||||
} TfLiteAddParams;
|
||||
|
||||
typedef struct {
|
||||
@@ -219,6 +222,8 @@ typedef struct {
|
||||
|
||||
typedef struct {
|
||||
TfLiteFusedActivation activation;
|
||||
// Parameter added for the version 5.
|
||||
bool pot_scale_int16;
|
||||
} TfLiteSubParams;
|
||||
|
||||
typedef struct {
|
||||
@@ -297,6 +302,7 @@ typedef struct {
|
||||
|
||||
typedef struct {
|
||||
bool align_corners;
|
||||
bool half_pixel_centers;
|
||||
} TfLiteResizeNearestNeighborParams;
|
||||
|
||||
typedef struct {
|
||||
@@ -459,6 +465,15 @@ typedef struct {
|
||||
int body_subgraph_index;
|
||||
} TfLiteWhileParams;
|
||||
|
||||
typedef struct {
|
||||
bool exclusive;
|
||||
bool reverse;
|
||||
} TfLiteCumsumParams;
|
||||
|
||||
typedef struct {
|
||||
int init_subgraph_index;
|
||||
} TfLiteCallOnceParams;
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
#endif // __cplusplus
|
||||
|
||||
@@ -79,7 +79,8 @@ TfLiteFloatArray* TfLiteFloatArrayCreate(int size) {
|
||||
void TfLiteFloatArrayFree(TfLiteFloatArray* a) { free(a); }
|
||||
|
||||
void TfLiteTensorDataFree(TfLiteTensor* t) {
|
||||
if (t->allocation_type == kTfLiteDynamic) {
|
||||
if (t->allocation_type == kTfLiteDynamic ||
|
||||
t->allocation_type == kTfLitePersistentRo) {
|
||||
free(t->data.raw);
|
||||
}
|
||||
t->data.raw = NULL;
|
||||
@@ -172,7 +173,8 @@ void TfLiteTensorReset(TfLiteType type, const char* name, TfLiteIntArray* dims,
|
||||
}
|
||||
|
||||
void TfLiteTensorRealloc(size_t num_bytes, TfLiteTensor* tensor) {
|
||||
if (tensor->allocation_type != kTfLiteDynamic) {
|
||||
if (tensor->allocation_type != kTfLiteDynamic &&
|
||||
tensor->allocation_type != kTfLitePersistentRo) {
|
||||
return;
|
||||
}
|
||||
// TODO(b/145340303): Tensor data should be aligned.
|
||||
@@ -205,6 +207,8 @@ const char* TfLiteTypeGetName(TfLiteType type) {
|
||||
return "BOOL";
|
||||
case kTfLiteComplex64:
|
||||
return "COMPLEX64";
|
||||
case kTfLiteComplex128:
|
||||
return "COMPLEX128";
|
||||
case kTfLiteString:
|
||||
return "STRING";
|
||||
case kTfLiteFloat16:
|
||||
|
||||
@@ -29,6 +29,9 @@ limitations under the License.
|
||||
// TfLiteDelegate - allows delegation of nodes to alternative backends.
|
||||
//
|
||||
// Some abstractions in this file are created and managed by Interpreter.
|
||||
//
|
||||
// NOTE: The order of values in these structs are "semi-ABI stable". New values
|
||||
// should be added only to the end of structs and never reordered.
|
||||
|
||||
#ifndef TENSORFLOW_LITE_C_COMMON_H_
|
||||
#define TENSORFLOW_LITE_C_COMMON_H_
|
||||
@@ -43,8 +46,18 @@ extern "C" {
|
||||
|
||||
typedef enum TfLiteStatus {
|
||||
kTfLiteOk = 0,
|
||||
|
||||
// Generally referring to an error in the runtime (i.e. interpreter)
|
||||
kTfLiteError = 1,
|
||||
kTfLiteDelegateError = 2
|
||||
|
||||
// Generally referring to an error from a TfLiteDelegate itself.
|
||||
kTfLiteDelegateError = 2,
|
||||
|
||||
// Generally referring to an error in applying a delegate due to
|
||||
// incompatibility between runtime and delegate, e.g., this error is returned
|
||||
// when trying to apply a TfLite delegate onto a model graph that's already
|
||||
// immutable.
|
||||
kTfLiteApplicationError = 3
|
||||
} TfLiteStatus;
|
||||
|
||||
// The list of external context types known to TF Lite. This list exists solely
|
||||
@@ -55,7 +68,7 @@ typedef enum TfLiteExternalContextType {
|
||||
kTfLiteEigenContext = 0, // include eigen_support.h to use.
|
||||
kTfLiteGemmLowpContext = 1, // include gemm_support.h to use.
|
||||
kTfLiteEdgeTpuContext = 2, // Placeholder for Edge TPU support.
|
||||
kTfLiteCpuBackendContext = 3, // include cpu_backend_support.h to use.
|
||||
kTfLiteCpuBackendContext = 3, // include cpu_backend_context.h to use.
|
||||
kTfLiteMaxExternalContexts = 4
|
||||
} TfLiteExternalContextType;
|
||||
|
||||
@@ -83,8 +96,9 @@ typedef struct TfLiteIntArray {
|
||||
int size;
|
||||
// gcc 6.1+ have a bug where flexible members aren't properly handled
|
||||
// https://github.com/google/re2/commit/b94b7cd42e9f02673cd748c1ac1d16db4052514c
|
||||
#if !defined(__clang__) && defined(__GNUC__) && __GNUC__ == 6 && \
|
||||
__GNUC_MINOR__ >= 1
|
||||
#if (!defined(__clang__) && defined(__GNUC__) && __GNUC__ == 6 && \
|
||||
__GNUC_MINOR__ >= 1) || \
|
||||
defined(HEXAGON) || (__clang_major__ == 7 && __clang_minor__ == 1)
|
||||
int data[0];
|
||||
#else
|
||||
int data[];
|
||||
@@ -122,6 +136,7 @@ typedef struct TfLiteFloatArray {
|
||||
int size;
|
||||
// gcc 6.1+ have a bug where flexible members aren't properly handled
|
||||
// https://github.com/google/re2/commit/b94b7cd42e9f02673cd748c1ac1d16db4052514c
|
||||
// This also applies to the toolchain used for Qualcomm Hexagon DSPs.
|
||||
#if !defined(__clang__) && defined(__GNUC__) && __GNUC__ == 6 && \
|
||||
__GNUC_MINOR__ >= 1
|
||||
float data[0];
|
||||
@@ -200,6 +215,7 @@ void TfLiteFloatArrayFree(TfLiteFloatArray* a);
|
||||
// the current function, while also reporting the location of the error.
|
||||
// `a` and `b` may be evaluated more than once, so no side effects or
|
||||
// extremely expensive computations should be done.
|
||||
// NOTE: Use TF_LITE_ENSURE_TYPES_EQ if comparing TfLiteTypes.
|
||||
#define TF_LITE_ENSURE_EQ(context, a, b) \
|
||||
do { \
|
||||
if ((a) != (b)) { \
|
||||
@@ -219,6 +235,17 @@ void TfLiteFloatArrayFree(TfLiteFloatArray* a);
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define TF_LITE_ENSURE_NEAR(context, a, b, epsilon) \
|
||||
do { \
|
||||
auto delta = ((a) > (b)) ? ((a) - (b)) : ((b) - (a)); \
|
||||
if (delta > epsilon) { \
|
||||
TF_LITE_KERNEL_LOG((context), "%s:%d %s not near %s (%f != %f)", \
|
||||
__FILE__, __LINE__, #a, #b, static_cast<double>(a), \
|
||||
static_cast<double>(b)); \
|
||||
return kTfLiteError; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define TF_LITE_ENSURE_OK(context, status) \
|
||||
do { \
|
||||
const TfLiteStatus s = (status); \
|
||||
@@ -227,11 +254,32 @@ void TfLiteFloatArrayFree(TfLiteFloatArray* a);
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
// Define TFL_CAPI_EXPORT macro to export a function properly with a shared
|
||||
// library.
|
||||
#ifdef SWIG
|
||||
#define TFL_CAPI_EXPORT
|
||||
#else
|
||||
#if defined(_WIN32)
|
||||
#ifdef TFL_COMPILE_LIBRARY
|
||||
#define TFL_CAPI_EXPORT __declspec(dllexport)
|
||||
#else
|
||||
#define TFL_CAPI_EXPORT __declspec(dllimport)
|
||||
#endif // TFL_COMPILE_LIBRARY
|
||||
#else
|
||||
#define TFL_CAPI_EXPORT __attribute__((visibility("default")))
|
||||
#endif // _WIN32
|
||||
#endif // SWIG
|
||||
|
||||
// Single-precision complex data type compatible with the C99 definition.
|
||||
typedef struct TfLiteComplex64 {
|
||||
float re, im; // real and imaginary parts, respectively.
|
||||
} TfLiteComplex64;
|
||||
|
||||
// Double-precision complex data type compatible with the C99 definition.
|
||||
typedef struct TfLiteComplex128 {
|
||||
double re, im; // real and imaginary parts, respectively.
|
||||
} TfLiteComplex128;
|
||||
|
||||
// Half precision data type compatible with the C99 definition.
|
||||
typedef struct TfLiteFloat16 {
|
||||
uint16_t data;
|
||||
@@ -251,6 +299,7 @@ typedef enum {
|
||||
kTfLiteInt8 = 9,
|
||||
kTfLiteFloat16 = 10,
|
||||
kTfLiteFloat64 = 11,
|
||||
kTfLiteComplex128 = 12,
|
||||
} TfLiteType;
|
||||
|
||||
// Return the name of a given type, for error reporting purposes.
|
||||
@@ -307,26 +356,39 @@ typedef union TfLitePtrUnion {
|
||||
int64_t* i64;
|
||||
float* f;
|
||||
TfLiteFloat16* f16;
|
||||
double* f64;
|
||||
char* raw;
|
||||
const char* raw_const;
|
||||
uint8_t* uint8;
|
||||
bool* b;
|
||||
int16_t* i16;
|
||||
TfLiteComplex64* c64;
|
||||
TfLiteComplex128* c128;
|
||||
int8_t* int8;
|
||||
/* Only use this member. */
|
||||
void* data;
|
||||
} TfLitePtrUnion;
|
||||
|
||||
// Memory allocation strategies. kTfLiteMmapRo is for read-only memory-mapped
|
||||
// data (or data externally allocated). kTfLiteArenaRw is arena allocated
|
||||
// data. kTfLiteDynamic is for tensors that are allocated during evaluation.
|
||||
// Memory allocation strategies.
|
||||
// * kTfLiteMmapRo: Read-only memory-mapped data, or data externally allocated.
|
||||
// * kTfLiteArenaRw: Arena allocated with no guarantees about persistence,
|
||||
// and available during eval.
|
||||
// * kTfLiteArenaRwPersistent: Arena allocated but persistent across eval, and
|
||||
// only available during eval.
|
||||
// * kTfLiteDynamic: Allocated during eval, or for string tensors.
|
||||
// * kTfLitePersistentRo: Allocated and populated during prepare. This is
|
||||
// useful for tensors that can be computed during prepare and treated
|
||||
// as constant inputs for downstream ops (also in prepare).
|
||||
// * kTfLiteCustom: Custom memory allocation provided by the user. See
|
||||
// TfLiteCustomAllocation below.
|
||||
typedef enum TfLiteAllocationType {
|
||||
kTfLiteMemNone = 0,
|
||||
kTfLiteMmapRo,
|
||||
kTfLiteArenaRw,
|
||||
kTfLiteArenaRwPersistent,
|
||||
kTfLiteDynamic,
|
||||
kTfLitePersistentRo,
|
||||
kTfLiteCustom,
|
||||
} TfLiteAllocationType;
|
||||
|
||||
// The delegates should use zero or positive integers to represent handles.
|
||||
@@ -359,8 +421,18 @@ typedef struct TfLiteSparsity {
|
||||
int dim_metadata_size;
|
||||
} TfLiteSparsity;
|
||||
|
||||
// An tensor in the interpreter system which is a wrapper around a buffer of
|
||||
// Defines a custom memory allocation not owned by the runtime.
|
||||
// `data` should be aligned to kDefaultTensorAlignment defined in
|
||||
// lite/util.h. (Currently 64 bytes)
|
||||
// NOTE: See Interpreter.SetCustomAllocationForTensor for details on usage.
|
||||
typedef struct TfLiteCustomAllocation {
|
||||
void* data;
|
||||
size_t bytes;
|
||||
} TfLiteCustomAllocation;
|
||||
|
||||
// A tensor in the interpreter system which is a wrapper around a buffer of
|
||||
// data including a dimensionality (or NULL if not currently defined).
|
||||
#ifndef TF_LITE_STATIC_MEMORY
|
||||
typedef struct TfLiteTensor {
|
||||
// The data type specification for data stored in `data`. This affects
|
||||
// what member of `data` union should be used.
|
||||
@@ -426,31 +498,6 @@ typedef struct TfLiteTensor {
|
||||
const TfLiteIntArray* dims_signature;
|
||||
} TfLiteTensor;
|
||||
|
||||
#ifndef TF_LITE_STATIC_MEMORY
|
||||
// Free data memory of tensor `t`.
|
||||
void TfLiteTensorDataFree(TfLiteTensor* t);
|
||||
|
||||
// Free quantization data.
|
||||
void TfLiteQuantizationFree(TfLiteQuantization* quantization);
|
||||
|
||||
// Free sparsity parameters.
|
||||
void TfLiteSparsityFree(TfLiteSparsity* sparsity);
|
||||
|
||||
// Free memory of tensor `t`.
|
||||
void TfLiteTensorFree(TfLiteTensor* t);
|
||||
|
||||
// Set all of a tensor's fields (and free any previously allocated data).
|
||||
void TfLiteTensorReset(TfLiteType type, const char* name, TfLiteIntArray* dims,
|
||||
TfLiteQuantizationParams quantization, char* buffer,
|
||||
size_t size, TfLiteAllocationType allocation_type,
|
||||
const void* allocation, bool is_variable,
|
||||
TfLiteTensor* tensor);
|
||||
|
||||
// Resize the allocated data of a (dynamic) tensor. Tensors with allocation
|
||||
// types other than kTfLiteDynamic will be ignored.
|
||||
void TfLiteTensorRealloc(size_t num_bytes, TfLiteTensor* tensor);
|
||||
#endif // TF_LITE_STATIC_MEMORY
|
||||
|
||||
// A structure representing an instance of a node.
|
||||
// This structure only exhibits the inputs, outputs and user defined data, not
|
||||
// other features like the type.
|
||||
@@ -487,6 +534,130 @@ typedef struct TfLiteNode {
|
||||
// WARNING: This is an experimental interface that is subject to change.
|
||||
struct TfLiteDelegate* delegate;
|
||||
} TfLiteNode;
|
||||
#else // defined(TF_LITE_STATIC_MEMORY)?
|
||||
// NOTE: This flag is opt-in only at compile time.
|
||||
//
|
||||
// Specific reduced TfLiteTensor struct for TF Micro runtime. This struct
|
||||
// contains only the minimum fields required to initialize and prepare a micro
|
||||
// inference graph. The fields in this struct have been ordered from
|
||||
// largest-to-smallest for optimal struct sizeof.
|
||||
//
|
||||
// This struct does not use:
|
||||
// - allocation
|
||||
// - buffer_handle
|
||||
// - data_is_stale
|
||||
// - delegate
|
||||
// - dims_signature
|
||||
// - name
|
||||
// - sparsity
|
||||
typedef struct TfLiteTensor {
|
||||
// TODO(b/155784997): Consider consolidating these quantization fields:
|
||||
// Quantization information. Replaces params field above.
|
||||
TfLiteQuantization quantization;
|
||||
|
||||
// Quantization information.
|
||||
TfLiteQuantizationParams params;
|
||||
|
||||
// A union of data pointers. The appropriate type should be used for a typed
|
||||
// tensor based on `type`.
|
||||
TfLitePtrUnion data;
|
||||
|
||||
// A pointer to a structure representing the dimensionality interpretation
|
||||
// that the buffer should have. NOTE: the product of elements of `dims`
|
||||
// and the element datatype size should be equal to `bytes` below.
|
||||
TfLiteIntArray* dims;
|
||||
|
||||
// The number of bytes required to store the data of this Tensor. I.e.
|
||||
// (bytes of each element) * dims[0] * ... * dims[n-1]. For example, if
|
||||
// type is kTfLiteFloat32 and dims = {3, 2} then
|
||||
// bytes = sizeof(float) * 3 * 2 = 4 * 3 * 2 = 24.
|
||||
size_t bytes;
|
||||
|
||||
// The data type specification for data stored in `data`. This affects
|
||||
// what member of `data` union should be used.
|
||||
TfLiteType type;
|
||||
|
||||
// How memory is mapped
|
||||
// kTfLiteMmapRo: Memory mapped read only.
|
||||
// i.e. weights
|
||||
// kTfLiteArenaRw: Arena allocated read write memory
|
||||
// (i.e. temporaries, outputs).
|
||||
TfLiteAllocationType allocation_type;
|
||||
|
||||
// True if the tensor is a variable.
|
||||
bool is_variable;
|
||||
} TfLiteTensor;
|
||||
|
||||
// Specific reduced TfLiteNode struct for TF Micro runtime. This struct contains
|
||||
// only the minimum fields required to represent a node.
|
||||
//
|
||||
// This struct does not use:
|
||||
// - delegate
|
||||
// - intermediates
|
||||
// - temporaries
|
||||
typedef struct TfLiteNode {
|
||||
// Inputs to this node expressed as indices into the simulator's tensors.
|
||||
TfLiteIntArray* inputs;
|
||||
|
||||
// Outputs to this node expressed as indices into the simulator's tensors.
|
||||
TfLiteIntArray* outputs;
|
||||
|
||||
// Opaque data provided by the node implementer through `Registration.init`.
|
||||
void* user_data;
|
||||
|
||||
// Opaque data provided to the node if the node is a builtin. This is usually
|
||||
// a structure defined in builtin_op_data.h
|
||||
void* builtin_data;
|
||||
|
||||
// Custom initial data. This is the opaque data provided in the flatbuffer.
|
||||
// WARNING: This is an experimental interface that is subject to change.
|
||||
const void* custom_initial_data;
|
||||
int custom_initial_data_size;
|
||||
} TfLiteNode;
|
||||
#endif // TF_LITE_STATIC_MEMORY
|
||||
|
||||
// Light-weight tensor struct for TF Micro runtime. Provides the minimal amount
|
||||
// of information required for a kernel to run during TfLiteRegistration::Eval.
|
||||
// TODO(b/160955687): Move this field into TF_LITE_STATIC_MEMORY when TFLM
|
||||
// builds with this flag by default internally.
|
||||
typedef struct TfLiteEvalTensor {
|
||||
// A union of data pointers. The appropriate type should be used for a typed
|
||||
// tensor based on `type`.
|
||||
TfLitePtrUnion data;
|
||||
|
||||
// A pointer to a structure representing the dimensionality interpretation
|
||||
// that the buffer should have.
|
||||
TfLiteIntArray* dims;
|
||||
|
||||
// The data type specification for data stored in `data`. This affects
|
||||
// what member of `data` union should be used.
|
||||
TfLiteType type;
|
||||
} TfLiteEvalTensor;
|
||||
|
||||
#ifndef TF_LITE_STATIC_MEMORY
|
||||
// Free data memory of tensor `t`.
|
||||
void TfLiteTensorDataFree(TfLiteTensor* t);
|
||||
|
||||
// Free quantization data.
|
||||
void TfLiteQuantizationFree(TfLiteQuantization* quantization);
|
||||
|
||||
// Free sparsity parameters.
|
||||
void TfLiteSparsityFree(TfLiteSparsity* sparsity);
|
||||
|
||||
// Free memory of tensor `t`.
|
||||
void TfLiteTensorFree(TfLiteTensor* t);
|
||||
|
||||
// Set all of a tensor's fields (and free any previously allocated data).
|
||||
void TfLiteTensorReset(TfLiteType type, const char* name, TfLiteIntArray* dims,
|
||||
TfLiteQuantizationParams quantization, char* buffer,
|
||||
size_t size, TfLiteAllocationType allocation_type,
|
||||
const void* allocation, bool is_variable,
|
||||
TfLiteTensor* tensor);
|
||||
|
||||
// Resize the allocated data of a (dynamic) tensor. Tensors with allocation
|
||||
// types other than kTfLiteDynamic will be ignored.
|
||||
void TfLiteTensorRealloc(size_t num_bytes, TfLiteTensor* tensor);
|
||||
#endif // TF_LITE_STATIC_MEMORY
|
||||
|
||||
// WARNING: This is an experimental interface that is subject to change.
|
||||
//
|
||||
@@ -578,12 +749,11 @@ typedef struct TfLiteContext {
|
||||
void* profiler;
|
||||
|
||||
// Allocate persistent buffer which has the same life time as the interpreter.
|
||||
// Returns nullptr on failure.
|
||||
// The memory is allocated from heap for TFL, and from tail in TFLM.
|
||||
// If *ptr is not nullptr, the pointer will be reallocated.
|
||||
// This method is only available in Prepare stage.
|
||||
// This method is only available in Init or Prepare stage.
|
||||
// WARNING: This is an experimental interface that is subject to change.
|
||||
TfLiteStatus (*AllocatePersistentBuffer)(struct TfLiteContext* ctx,
|
||||
size_t bytes, void** ptr);
|
||||
void* (*AllocatePersistentBuffer)(struct TfLiteContext* ctx, size_t bytes);
|
||||
|
||||
// Allocate a buffer which will be deallocated right after invoke phase.
|
||||
// The memory is allocated from heap in TFL, and from volatile arena in TFLM.
|
||||
@@ -638,6 +808,18 @@ typedef struct TfLiteContext {
|
||||
TfLiteStatus (*PreviewDelegatePartitioning)(
|
||||
struct TfLiteContext* context, const TfLiteIntArray* nodes_to_replace,
|
||||
TfLiteDelegateParams** partition_params_array, int* num_partitions);
|
||||
|
||||
// Returns a TfLiteTensor struct for a given index.
|
||||
// WARNING: This is an experimental interface that is subject to change.
|
||||
// WARNING: This method may not be available on all platforms.
|
||||
TfLiteTensor* (*GetTensor)(const struct TfLiteContext* context,
|
||||
int tensor_idx);
|
||||
|
||||
// Returns a TfLiteEvalTensor struct for a given index.
|
||||
// WARNING: This is an experimental interface that is subject to change.
|
||||
// WARNING: This method may not be available on all platforms.
|
||||
TfLiteEvalTensor* (*GetEvalTensor)(const struct TfLiteContext* context,
|
||||
int tensor_idx);
|
||||
} TfLiteContext;
|
||||
|
||||
typedef struct TfLiteRegistration {
|
||||
@@ -712,7 +894,26 @@ typedef enum TfLiteDelegateFlags {
|
||||
//
|
||||
// If the delegate isn't capable to handle dynamic tensors, this flag need
|
||||
// to be set to false.
|
||||
kTfLiteDelegateFlagsAllowDynamicTensors = 1
|
||||
kTfLiteDelegateFlagsAllowDynamicTensors = 1,
|
||||
|
||||
// This flag can be used by delegates (that allow dynamic tensors) to ensure
|
||||
// applicable tensor shapes are automatically propagated in the case of tensor
|
||||
// resizing.
|
||||
// This means that non-dynamic (allocation_type != kTfLiteDynamic) I/O tensors
|
||||
// of a delegate kernel will have correct shapes before its Prepare() method
|
||||
// is called. The runtime leverages TFLite builtin ops in the original
|
||||
// execution plan to propagate shapes.
|
||||
//
|
||||
// A few points to note:
|
||||
// 1. This requires kTfLiteDelegateFlagsAllowDynamicTensors. If that flag is
|
||||
// false, this one is redundant since the delegate kernels are re-initialized
|
||||
// every time tensors are resized.
|
||||
// 2. Enabling this flag adds some overhead to AllocateTensors(), since extra
|
||||
// work is required to prepare the original execution plan.
|
||||
// 3. This flag requires that the original execution plan only have ops with
|
||||
// valid registrations (and not 'dummy' custom ops like with Flex).
|
||||
// WARNING: This feature is experimental and subject to change.
|
||||
kTfLiteDelegateFlagsRequirePropagatedShapes = 2
|
||||
} TfLiteDelegateFlags;
|
||||
|
||||
// WARNING: This is an experimental interface that is subject to change.
|
||||
@@ -731,8 +932,9 @@ typedef struct TfLiteDelegate {
|
||||
struct TfLiteDelegate* delegate);
|
||||
|
||||
// Copy the data from delegate buffer handle into raw memory of the given
|
||||
// 'tensor'. This cannot be null. The delegate is allowed to allocate the raw
|
||||
// bytes as long as it follows the rules for kTfLiteDynamic tensors.
|
||||
// 'tensor'. Note that the delegate is allowed to allocate the raw bytes as
|
||||
// long as it follows the rules for kTfLiteDynamic tensors, in which case this
|
||||
// cannot be null.
|
||||
TfLiteStatus (*CopyFromBufferHandle)(TfLiteContext* context,
|
||||
struct TfLiteDelegate* delegate,
|
||||
TfLiteBufferHandle buffer_handle,
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -19,9 +19,12 @@ limitations under the License.
|
||||
// flatbuffer serialization format into in-memory values that are used by the
|
||||
// runtime API and interpreter.
|
||||
|
||||
#include <cstddef>
|
||||
#include <new>
|
||||
#include <type_traits>
|
||||
|
||||
#include "tensorflow/lite/c/common.h"
|
||||
#include "tensorflow/lite/core/api/error_reporter.h"
|
||||
#include "tensorflow/lite/core/api/op_resolver.h"
|
||||
#include "tensorflow/lite/schema/schema_generated.h"
|
||||
|
||||
namespace tflite {
|
||||
@@ -42,7 +45,7 @@ class BuiltinDataAllocator {
|
||||
// platform targets support that properly.
|
||||
static_assert(std::is_pod<T>::value, "Builtin data structure must be POD.");
|
||||
void* allocated_memory = this->Allocate(sizeof(T), alignof(T));
|
||||
return new (allocated_memory) T;
|
||||
return new (allocated_memory) T();
|
||||
}
|
||||
|
||||
virtual ~BuiltinDataAllocator() {}
|
||||
@@ -66,6 +69,196 @@ TfLiteStatus ParseOpData(const Operator* op, BuiltinOperator op_type,
|
||||
TfLiteStatus ConvertTensorType(TensorType tensor_type, TfLiteType* type,
|
||||
ErrorReporter* error_reporter);
|
||||
|
||||
TfLiteStatus ParseAbs(const Operator* op, ErrorReporter* error_reporter,
|
||||
BuiltinDataAllocator* allocator, void** builtin_data);
|
||||
|
||||
TfLiteStatus ParseAdd(const Operator* op, ErrorReporter* error_reporter,
|
||||
BuiltinDataAllocator* allocator, void** builtin_data);
|
||||
|
||||
TfLiteStatus ParseArgMax(const Operator* op, ErrorReporter* error_reporter,
|
||||
BuiltinDataAllocator* allocator, void** builtin_data);
|
||||
|
||||
TfLiteStatus ParseArgMin(const Operator* op, ErrorReporter* error_reporter,
|
||||
BuiltinDataAllocator* allocator, void** builtin_data);
|
||||
|
||||
TfLiteStatus ParseCeil(const Operator* op, ErrorReporter* error_reporter,
|
||||
BuiltinDataAllocator* allocator, void** builtin_data);
|
||||
|
||||
TfLiteStatus ParseConcatenation(const Operator* op,
|
||||
ErrorReporter* error_reporter,
|
||||
BuiltinDataAllocator* allocator,
|
||||
void** builtin_data);
|
||||
|
||||
TfLiteStatus ParseConv2D(const Operator* op, ErrorReporter* error_reporter,
|
||||
BuiltinDataAllocator* allocator, void** builtin_data);
|
||||
|
||||
TfLiteStatus ParseCos(const Operator* op, ErrorReporter* error_reporter,
|
||||
BuiltinDataAllocator* allocator, void** builtin_data);
|
||||
|
||||
TfLiteStatus ParseDepthwiseConv2D(const Operator* op,
|
||||
ErrorReporter* error_reporter,
|
||||
BuiltinDataAllocator* allocator,
|
||||
void** builtin_data);
|
||||
|
||||
TfLiteStatus ParseDequantize(const Operator* op, ErrorReporter* error_reporter,
|
||||
BuiltinDataAllocator* allocator,
|
||||
void** builtin_data);
|
||||
|
||||
TfLiteStatus ParseEqual(const Operator* op, ErrorReporter* error_reporter,
|
||||
BuiltinDataAllocator* allocator, void** builtin_data);
|
||||
|
||||
TfLiteStatus ParseFloor(const Operator* op, ErrorReporter* error_reporter,
|
||||
BuiltinDataAllocator* allocator, void** builtin_data);
|
||||
|
||||
TfLiteStatus ParseFullyConnected(const Operator* op,
|
||||
ErrorReporter* error_reporter,
|
||||
BuiltinDataAllocator* allocator,
|
||||
void** builtin_data);
|
||||
|
||||
TfLiteStatus ParseGreater(const Operator* op, ErrorReporter* error_reporter,
|
||||
BuiltinDataAllocator* allocator, void** builtin_data);
|
||||
|
||||
TfLiteStatus ParseGreaterEqual(const Operator* op,
|
||||
ErrorReporter* error_reporter,
|
||||
BuiltinDataAllocator* allocator,
|
||||
void** builtin_data);
|
||||
|
||||
TfLiteStatus ParseHardSwish(const Operator* op, ErrorReporter* error_reporter,
|
||||
BuiltinDataAllocator* allocator,
|
||||
void** builtin_data);
|
||||
|
||||
TfLiteStatus ParseL2Normalization(const Operator* op,
|
||||
ErrorReporter* error_reporter,
|
||||
BuiltinDataAllocator* allocator,
|
||||
void** builtin_data);
|
||||
|
||||
TfLiteStatus ParseLess(const Operator* op, ErrorReporter* error_reporter,
|
||||
BuiltinDataAllocator* allocator, void** builtin_data);
|
||||
|
||||
TfLiteStatus ParseLessEqual(const Operator* op, ErrorReporter* error_reporter,
|
||||
BuiltinDataAllocator* allocator,
|
||||
void** builtin_data);
|
||||
|
||||
TfLiteStatus ParseLog(const Operator* op, ErrorReporter* error_reporter,
|
||||
BuiltinDataAllocator* allocator, void** builtin_data);
|
||||
|
||||
TfLiteStatus ParseLogicalAnd(const Operator* op, ErrorReporter* error_reporter,
|
||||
BuiltinDataAllocator* allocator,
|
||||
void** builtin_data);
|
||||
|
||||
TfLiteStatus ParseLogicalNot(const Operator* op, ErrorReporter* error_reporter,
|
||||
BuiltinDataAllocator* allocator,
|
||||
void** builtin_data);
|
||||
|
||||
TfLiteStatus ParseLogicalOr(const Operator* op, ErrorReporter* error_reporter,
|
||||
BuiltinDataAllocator* allocator,
|
||||
void** builtin_data);
|
||||
|
||||
TfLiteStatus ParseLogistic(const Operator* op, ErrorReporter* error_reporter,
|
||||
BuiltinDataAllocator* allocator,
|
||||
void** builtin_data);
|
||||
|
||||
TfLiteStatus ParseMaximum(const Operator* op, ErrorReporter* error_reporter,
|
||||
BuiltinDataAllocator* allocator, void** builtin_data);
|
||||
|
||||
TfLiteStatus ParseMinimum(const Operator* op, ErrorReporter* error_reporter,
|
||||
BuiltinDataAllocator* allocator, void** builtin_data);
|
||||
|
||||
TfLiteStatus ParseMul(const Operator* op, ErrorReporter* error_reporter,
|
||||
BuiltinDataAllocator* allocator, void** builtin_data);
|
||||
|
||||
TfLiteStatus ParseNeg(const Operator* op, ErrorReporter* error_reporter,
|
||||
BuiltinDataAllocator* allocator, void** builtin_data);
|
||||
|
||||
TfLiteStatus ParseNotEqual(const Operator* op, ErrorReporter* error_reporter,
|
||||
BuiltinDataAllocator* allocator,
|
||||
void** builtin_data);
|
||||
|
||||
TfLiteStatus ParsePack(const Operator* op, ErrorReporter* error_reporter,
|
||||
BuiltinDataAllocator* allocator, void** builtin_data);
|
||||
|
||||
TfLiteStatus ParsePad(const Operator* op, ErrorReporter* error_reporter,
|
||||
BuiltinDataAllocator* allocator, void** builtin_data);
|
||||
|
||||
TfLiteStatus ParsePadV2(const Operator* op, ErrorReporter* error_reporter,
|
||||
BuiltinDataAllocator* allocator, void** builtin_data);
|
||||
|
||||
TfLiteStatus ParsePool(const Operator* op, ErrorReporter* error_reporter,
|
||||
BuiltinDataAllocator* allocator, void** builtin_data);
|
||||
|
||||
TfLiteStatus ParsePrelu(const Operator* op, ErrorReporter* error_reporter,
|
||||
BuiltinDataAllocator* allocator, void** builtin_data);
|
||||
|
||||
TfLiteStatus ParseQuantize(const Operator* op, ErrorReporter* error_reporter,
|
||||
BuiltinDataAllocator* allocator,
|
||||
void** builtin_data);
|
||||
|
||||
TfLiteStatus ParseReducer(const Operator* op, ErrorReporter* error_reporter,
|
||||
BuiltinDataAllocator* allocator, void** builtin_data);
|
||||
|
||||
TfLiteStatus ParseRelu(const Operator* op, ErrorReporter* error_reporter,
|
||||
BuiltinDataAllocator* allocator, void** builtin_data);
|
||||
|
||||
TfLiteStatus ParseRelu6(const Operator* op, ErrorReporter* error_reporter,
|
||||
BuiltinDataAllocator* allocator, void** builtin_data);
|
||||
|
||||
TfLiteStatus ParseReshape(const Operator* op, ErrorReporter* error_reporter,
|
||||
BuiltinDataAllocator* allocator, void** builtin_data);
|
||||
|
||||
TfLiteStatus ParseResizeBilinear(const Operator* op,
|
||||
ErrorReporter* error_reporter,
|
||||
BuiltinDataAllocator* allocator,
|
||||
void** builtin_data);
|
||||
|
||||
TfLiteStatus ParseResizeNearestNeighbor(const Operator* op,
|
||||
ErrorReporter* error_reporter,
|
||||
BuiltinDataAllocator* allocator,
|
||||
void** builtin_data);
|
||||
|
||||
TfLiteStatus ParseRound(const Operator* op, ErrorReporter* error_reporter,
|
||||
BuiltinDataAllocator* allocator, void** builtin_data);
|
||||
|
||||
TfLiteStatus ParseRsqrt(const Operator* op, ErrorReporter* error_reporter,
|
||||
BuiltinDataAllocator* allocator, void** builtin_data);
|
||||
|
||||
TfLiteStatus ParseShape(const Operator* op, ErrorReporter* error_reporter,
|
||||
BuiltinDataAllocator* allocator, void** builtin_data);
|
||||
|
||||
TfLiteStatus ParseSin(const Operator* op, ErrorReporter* error_reporter,
|
||||
BuiltinDataAllocator* allocator, void** builtin_data);
|
||||
|
||||
TfLiteStatus ParseSoftmax(const Operator* op, ErrorReporter* error_reporter,
|
||||
BuiltinDataAllocator* allocator, void** builtin_data);
|
||||
|
||||
TfLiteStatus ParseSplit(const Operator* op, ErrorReporter* error_reporter,
|
||||
BuiltinDataAllocator* allocator, void** builtin_data);
|
||||
|
||||
TfLiteStatus ParseSplitV(const Operator* op, ErrorReporter* error_reporter,
|
||||
BuiltinDataAllocator* allocator, void** builtin_data);
|
||||
|
||||
TfLiteStatus ParseSqrt(const Operator* op, ErrorReporter* error_reporter,
|
||||
BuiltinDataAllocator* allocator, void** builtin_data);
|
||||
|
||||
TfLiteStatus ParseSquare(const Operator* op, ErrorReporter* error_reporter,
|
||||
BuiltinDataAllocator* allocator, void** builtin_data);
|
||||
|
||||
TfLiteStatus ParseStridedSlice(const Operator* op,
|
||||
ErrorReporter* error_reporter,
|
||||
BuiltinDataAllocator* allocator,
|
||||
void** builtin_data);
|
||||
|
||||
TfLiteStatus ParseSub(const Operator* op, ErrorReporter* error_reporter,
|
||||
BuiltinDataAllocator* allocator, void** builtin_data);
|
||||
|
||||
TfLiteStatus ParseSvdf(const Operator* op, ErrorReporter* error_reporter,
|
||||
BuiltinDataAllocator* allocator, void** builtin_data);
|
||||
|
||||
TfLiteStatus ParseTanh(const Operator* op, ErrorReporter* error_reporter,
|
||||
BuiltinDataAllocator* allocator, void** builtin_data);
|
||||
|
||||
TfLiteStatus ParseUnpack(const Operator* op, ErrorReporter* error_reporter,
|
||||
BuiltinDataAllocator* allocator, void** builtin_data);
|
||||
|
||||
} // namespace tflite
|
||||
|
||||
#endif // TENSORFLOW_LITE_CORE_API_FLATBUFFER_CONVERSIONS_H_
|
||||
|
||||
@@ -15,6 +15,11 @@ limitations under the License.
|
||||
|
||||
#include "tensorflow/lite/core/api/op_resolver.h"
|
||||
|
||||
#include "flatbuffers/flatbuffers.h" // from @flatbuffers
|
||||
#include "tensorflow/lite/c/common.h"
|
||||
#include "tensorflow/lite/core/api/error_reporter.h"
|
||||
#include "tensorflow/lite/schema/schema_utils.h"
|
||||
|
||||
namespace tflite {
|
||||
|
||||
TfLiteStatus GetRegistrationFromOpCode(
|
||||
@@ -22,7 +27,7 @@ TfLiteStatus GetRegistrationFromOpCode(
|
||||
ErrorReporter* error_reporter, const TfLiteRegistration** registration) {
|
||||
TfLiteStatus status = kTfLiteOk;
|
||||
*registration = nullptr;
|
||||
auto builtin_code = opcode->builtin_code();
|
||||
auto builtin_code = GetBuiltinCode(opcode);
|
||||
int version = opcode->version();
|
||||
|
||||
if (builtin_code > BuiltinOperator_MAX ||
|
||||
|
||||
@@ -15,6 +15,8 @@ limitations under the License.
|
||||
#ifndef TENSORFLOW_LITE_CORE_API_OP_RESOLVER_H_
|
||||
#define TENSORFLOW_LITE_CORE_API_OP_RESOLVER_H_
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "tensorflow/lite/c/common.h"
|
||||
#include "tensorflow/lite/core/api/error_reporter.h"
|
||||
#include "tensorflow/lite/schema/schema_generated.h"
|
||||
@@ -32,6 +34,16 @@ class OpResolver {
|
||||
/// Finds the op registration of a custom operator by op name.
|
||||
virtual const TfLiteRegistration* FindOp(const char* op,
|
||||
int version) const = 0;
|
||||
|
||||
// Returns optional delegates for resolving and handling ops in the flatbuffer
|
||||
// model. This may be used in addition to the standard TfLiteRegistration
|
||||
// lookup for graph resolution.
|
||||
using TfLiteDelegatePtrVector =
|
||||
std::vector<std::unique_ptr<TfLiteDelegate, void (*)(TfLiteDelegate*)>>;
|
||||
virtual TfLiteDelegatePtrVector GetDelegates(int num_threads) const {
|
||||
return TfLiteDelegatePtrVector();
|
||||
}
|
||||
|
||||
virtual ~OpResolver() {}
|
||||
};
|
||||
|
||||
|
||||
194
code/lib/tfmicro/tensorflow/lite/core/api/profiler.h
Normal file
194
code/lib/tfmicro/tensorflow/lite/core/api/profiler.h
Normal file
@@ -0,0 +1,194 @@
|
||||
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
#ifndef TENSORFLOW_LITE_CORE_API_PROFILER_H_
|
||||
#define TENSORFLOW_LITE_CORE_API_PROFILER_H_
|
||||
|
||||
#include <cstdint>
|
||||
|
||||
namespace tflite {
|
||||
|
||||
// A simple utility for enabling profiled event tracing in TensorFlow Lite.
|
||||
class Profiler {
|
||||
public:
|
||||
// As certain Profiler instance might be only interested in certain event
|
||||
// types, we define each event type value to allow a Profiler to use
|
||||
// bitmasking bitwise operations to determine whether an event should be
|
||||
// recorded or not.
|
||||
enum class EventType {
|
||||
// Default event type, the metadata field has no special significance.
|
||||
DEFAULT = 1,
|
||||
|
||||
// The event is an operator invocation and the event_metadata field is the
|
||||
// index of operator node.
|
||||
OPERATOR_INVOKE_EVENT = 2,
|
||||
|
||||
// The event is an invocation for an internal operator of a TFLite delegate.
|
||||
// The event_metadata field is the index of operator node that's specific to
|
||||
// the delegate.
|
||||
DELEGATE_OPERATOR_INVOKE_EVENT = 4,
|
||||
|
||||
// The event is a recording of runtime instrumentation such as the overall
|
||||
// TFLite runtime status, the TFLite delegate status (if a delegate
|
||||
// is applied), and the overall model inference latency etc.
|
||||
// Note, the delegate status and overall status are stored as separate
|
||||
// event_metadata fields. In particular, the delegate status is encoded
|
||||
// as DelegateStatus::full_status().
|
||||
GENERAL_RUNTIME_INSTRUMENTATION_EVENT = 8,
|
||||
};
|
||||
|
||||
virtual ~Profiler() {}
|
||||
|
||||
// Signals the beginning of an event and returns a handle to the profile
|
||||
// event. The `event_metadata1` and `event_metadata2` have different
|
||||
// interpretations based on the actual Profiler instance and the `event_type`.
|
||||
// For example, as for the 'SubgraphAwareProfiler' defined in
|
||||
// lite/core/subgraph.h, when the event_type is OPERATOR_INVOKE_EVENT,
|
||||
// `event_metadata1` represents the index of a TFLite node, and
|
||||
// `event_metadata2` represents the index of the subgraph that this event
|
||||
// comes from.
|
||||
virtual uint32_t BeginEvent(const char* tag, EventType event_type,
|
||||
int64_t event_metadata1,
|
||||
int64_t event_metadata2) = 0;
|
||||
// Similar w/ the above, but `event_metadata2` defaults to 0.
|
||||
uint32_t BeginEvent(const char* tag, EventType event_type,
|
||||
int64_t event_metadata) {
|
||||
return BeginEvent(tag, event_type, event_metadata, /*event_metadata2*/ 0);
|
||||
}
|
||||
|
||||
// Signals an end to the specified profile event with 'event_metadata's, This
|
||||
// is useful when 'event_metadata's are not available when the event begins
|
||||
// or when one wants to overwrite the 'event_metadata's set at the beginning.
|
||||
virtual void EndEvent(uint32_t event_handle, int64_t event_metadata1,
|
||||
int64_t event_metadata2) {}
|
||||
// Signals an end to the specified profile event.
|
||||
virtual void EndEvent(uint32_t event_handle) = 0;
|
||||
|
||||
// Appends an event of type 'event_type' with 'tag' and 'event_metadata'
|
||||
// which started at 'start' and ended at 'end'
|
||||
// Note:
|
||||
// In cases were ProfileSimmarizer and tensorflow::StatsCalculator are used
|
||||
// they assume the value is in "usec", if in any case subclasses
|
||||
// didn't put usec, then the values are not meaningful.
|
||||
// TODO karimnosseir: Revisit and make the function more clear.
|
||||
void AddEvent(const char* tag, EventType event_type, uint64_t start,
|
||||
uint64_t end, int64_t event_metadata) {
|
||||
AddEvent(tag, event_type, start, end, event_metadata,
|
||||
/*event_metadata2*/ 0);
|
||||
}
|
||||
|
||||
virtual void AddEvent(const char* tag, EventType event_type, uint64_t start,
|
||||
uint64_t end, int64_t event_metadata1,
|
||||
int64_t event_metadata2) {}
|
||||
|
||||
protected:
|
||||
friend class ScopedProfile;
|
||||
};
|
||||
|
||||
// Adds a profile event to `profiler` that begins with the construction
|
||||
// of the object and ends when the object goes out of scope.
|
||||
// The lifetime of tag should be at least the lifetime of `profiler`.
|
||||
// `profiler` may be null, in which case nothing is profiled.
|
||||
class ScopedProfile {
|
||||
public:
|
||||
ScopedProfile(Profiler* profiler, const char* tag,
|
||||
Profiler::EventType event_type = Profiler::EventType::DEFAULT,
|
||||
int64_t event_metadata = 0)
|
||||
: profiler_(profiler), event_handle_(0) {
|
||||
if (profiler) {
|
||||
event_handle_ = profiler_->BeginEvent(tag, event_type, event_metadata);
|
||||
}
|
||||
}
|
||||
|
||||
~ScopedProfile() {
|
||||
if (profiler_) {
|
||||
profiler_->EndEvent(event_handle_);
|
||||
}
|
||||
}
|
||||
|
||||
protected:
|
||||
Profiler* profiler_;
|
||||
uint32_t event_handle_;
|
||||
};
|
||||
|
||||
class ScopedOperatorProfile : public ScopedProfile {
|
||||
public:
|
||||
ScopedOperatorProfile(Profiler* profiler, const char* tag, int node_index)
|
||||
: ScopedProfile(profiler, tag, Profiler::EventType::OPERATOR_INVOKE_EVENT,
|
||||
static_cast<uint32_t>(node_index)) {}
|
||||
};
|
||||
|
||||
class ScopedDelegateOperatorProfile : public ScopedProfile {
|
||||
public:
|
||||
ScopedDelegateOperatorProfile(Profiler* profiler, const char* tag,
|
||||
int node_index)
|
||||
: ScopedProfile(profiler, tag,
|
||||
Profiler::EventType::DELEGATE_OPERATOR_INVOKE_EVENT,
|
||||
static_cast<uint32_t>(node_index)) {}
|
||||
};
|
||||
|
||||
class ScopedRuntimeInstrumentationProfile : public ScopedProfile {
|
||||
public:
|
||||
ScopedRuntimeInstrumentationProfile(Profiler* profiler, const char* tag)
|
||||
: ScopedProfile(
|
||||
profiler, tag,
|
||||
Profiler::EventType::GENERAL_RUNTIME_INSTRUMENTATION_EVENT, -1) {}
|
||||
|
||||
void set_runtime_status(int64_t delegate_status, int64_t interpreter_status) {
|
||||
if (profiler_) {
|
||||
delegate_status_ = delegate_status;
|
||||
interpreter_status_ = interpreter_status;
|
||||
}
|
||||
}
|
||||
|
||||
~ScopedRuntimeInstrumentationProfile() {
|
||||
if (profiler_) {
|
||||
profiler_->EndEvent(event_handle_, delegate_status_, interpreter_status_);
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
int64_t delegate_status_;
|
||||
int64_t interpreter_status_;
|
||||
};
|
||||
|
||||
} // namespace tflite
|
||||
|
||||
#define TFLITE_VARNAME_UNIQ_IMPL(name, ctr) name##ctr
|
||||
#define TFLITE_VARNAME_UNIQ(name, ctr) TFLITE_VARNAME_UNIQ_IMPL(name, ctr)
|
||||
|
||||
#define TFLITE_SCOPED_TAGGED_DEFAULT_PROFILE(profiler, tag) \
|
||||
tflite::ScopedProfile TFLITE_VARNAME_UNIQ(_profile_, __COUNTER__)( \
|
||||
(profiler), (tag))
|
||||
|
||||
#define TFLITE_SCOPED_TAGGED_OPERATOR_PROFILE(profiler, tag, node_index) \
|
||||
tflite::ScopedOperatorProfile TFLITE_VARNAME_UNIQ(_profile_, __COUNTER__)( \
|
||||
(profiler), (tag), (node_index))
|
||||
|
||||
#define TFLITE_SCOPED_DELEGATE_OPERATOR_PROFILE(profiler, tag, node_index) \
|
||||
tflite::ScopedDelegateOperatorProfile TFLITE_VARNAME_UNIQ( \
|
||||
_profile_, __COUNTER__)((profiler), (tag), (node_index))
|
||||
|
||||
#define TFLITE_ADD_RUNTIME_INSTRUMENTATION_EVENT( \
|
||||
profiler, tag, delegate_status, interpreter_status) \
|
||||
do { \
|
||||
if (!profiler) { \
|
||||
const auto handle = profiler->BeginEvent( \
|
||||
tag, Profiler::EventType::GENERAL_RUNTIME_INSTRUMENTATION_EVENT, \
|
||||
delegate_status, interpreter_status); \
|
||||
profiler->EndEvent(handle); \
|
||||
} \
|
||||
} while (false);
|
||||
|
||||
#endif // TENSORFLOW_LITE_CORE_API_PROFILER_H_
|
||||
@@ -17,6 +17,8 @@ limitations under the License.
|
||||
|
||||
#include <string.h>
|
||||
|
||||
#include "tensorflow/lite/c/common.h"
|
||||
|
||||
namespace tflite {
|
||||
|
||||
TfLiteStatus ResetVariableTensor(TfLiteTensor* tensor) {
|
||||
|
||||
@@ -55,9 +55,12 @@ inline void GetActivationMinMax(FusedActivationFunctionType ac,
|
||||
}
|
||||
}
|
||||
|
||||
inline float ActivationFunctionWithMinMax(float x, float output_activation_min,
|
||||
float output_activation_max) {
|
||||
return std::min(std::max(x, output_activation_min), output_activation_max);
|
||||
template <typename T>
|
||||
inline T ActivationFunctionWithMinMax(T x, T output_activation_min,
|
||||
T output_activation_max) {
|
||||
using std::max;
|
||||
using std::min;
|
||||
return min(max(x, output_activation_min), output_activation_max);
|
||||
}
|
||||
|
||||
// Legacy function, left for compatibility only.
|
||||
@@ -135,23 +138,24 @@ inline void BiasAndClamp(float clamp_min, float clamp_max, int bias_size,
|
||||
#endif
|
||||
}
|
||||
|
||||
inline int32 MultiplyByQuantizedMultiplierSmallerThanOneExp(
|
||||
int32 x, int32 quantized_multiplier, int left_shift) {
|
||||
inline int32_t MultiplyByQuantizedMultiplierSmallerThanOneExp(
|
||||
int32_t x, int32_t quantized_multiplier, int left_shift) {
|
||||
using gemmlowp::RoundingDivideByPOT;
|
||||
using gemmlowp::SaturatingRoundingDoublingHighMul;
|
||||
return RoundingDivideByPOT(
|
||||
SaturatingRoundingDoublingHighMul(x, quantized_multiplier), -left_shift);
|
||||
}
|
||||
|
||||
inline int32 MultiplyByQuantizedMultiplierGreaterThanOne(
|
||||
int32 x, int32 quantized_multiplier, int left_shift) {
|
||||
inline int32_t MultiplyByQuantizedMultiplierGreaterThanOne(
|
||||
int32_t x, int32_t quantized_multiplier, int left_shift) {
|
||||
using gemmlowp::SaturatingRoundingDoublingHighMul;
|
||||
return SaturatingRoundingDoublingHighMul(x * (1 << left_shift),
|
||||
quantized_multiplier);
|
||||
}
|
||||
|
||||
inline int32 MultiplyByQuantizedMultiplier(int32 x, int32 quantized_multiplier,
|
||||
int shift) {
|
||||
inline int32_t MultiplyByQuantizedMultiplier(int32_t x,
|
||||
int32_t quantized_multiplier,
|
||||
int shift) {
|
||||
using gemmlowp::RoundingDivideByPOT;
|
||||
using gemmlowp::SaturatingRoundingDoublingHighMul;
|
||||
int left_shift = shift > 0 ? shift : 0;
|
||||
@@ -161,16 +165,16 @@ inline int32 MultiplyByQuantizedMultiplier(int32 x, int32 quantized_multiplier,
|
||||
right_shift);
|
||||
}
|
||||
|
||||
inline int32 MultiplyByQuantizedMultiplier(int64_t x,
|
||||
int32 quantized_multiplier,
|
||||
int shift) {
|
||||
inline int32_t MultiplyByQuantizedMultiplier(int64_t x,
|
||||
int32_t quantized_multiplier,
|
||||
int shift) {
|
||||
// Inputs:
|
||||
// - quantized_multiplier has fixed point at bit 31
|
||||
// - shift is -31 to +7 (negative for right shift)
|
||||
//
|
||||
// Assumptions: The following input ranges are assumed
|
||||
// - quantize_scale>=0 (the usual range is (1<<30) to (1>>31)-1)
|
||||
// - scaling is chosen so final scaled result fits in int32
|
||||
// - scaling is chosen so final scaled result fits in int32_t
|
||||
// - input x is in the range -(1<<47) <= x < (1<<47)
|
||||
assert(quantized_multiplier >= 0);
|
||||
assert(shift >= -31 && shift < 8);
|
||||
@@ -215,9 +219,9 @@ inline int CountLeadingSignBits(T integer_input) {
|
||||
using U = typename std::make_unsigned<T>::type;
|
||||
return integer_input >= 0
|
||||
? CountLeadingZeros(static_cast<U>(integer_input)) - 1
|
||||
: integer_input != std::numeric_limits<T>::min()
|
||||
? CountLeadingZeros(2 * static_cast<U>(-integer_input) - 1)
|
||||
: 0;
|
||||
: integer_input != std::numeric_limits<T>::min()
|
||||
? CountLeadingZeros(2 * static_cast<U>(-integer_input) - 1)
|
||||
: 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
@@ -237,8 +241,12 @@ inline Integer FloorLog2(Integer n) {
|
||||
|
||||
// generate INT16 LUT for function(), e.g., table exp(x) and 1/(1+x) used in
|
||||
// softmax
|
||||
inline void gen_lut(const std::function<double(double)>& func, double min,
|
||||
double max, int16_t* table, const int num) {
|
||||
// func - the function to build the LUT for (e.g exp(x))
|
||||
// min,max - table limits
|
||||
// table - pointer to buffer
|
||||
// num - number of elements in the LUT
|
||||
inline void gen_lut(double (*func)(double), double min, double max,
|
||||
int16_t* table, const int num) {
|
||||
// size of table should equal to num + 1
|
||||
// last element only for slope calculation
|
||||
double step = (max - min) / (num - 1);
|
||||
@@ -259,7 +267,35 @@ inline void gen_lut(const std::function<double(double)>& func, double min,
|
||||
std::min(std::max(TfLiteRound(func(max) * 32768.0), -32768.0), 32767.0);
|
||||
}
|
||||
|
||||
// int16 func table lookup, e.g., lookup exp() and 1/(1+x) used in softmax
|
||||
// generate INT16 LUT for function(), e.g., table exp(x) and 1/(1+x) used in
|
||||
// softmax
|
||||
// func - the function to build the LUT for (e.g exp(x))
|
||||
// min,max - table limits
|
||||
// table - pointer to buffer
|
||||
// num - number of elements in the LUT
|
||||
inline void gen_lut(float (*func)(float), float min, float max, int16_t* table,
|
||||
const int num) {
|
||||
// size of table should equal to num + 1
|
||||
// last element only for slope calculation
|
||||
float step = (max - min) / (num - 1);
|
||||
float half_step = step / 2.0f;
|
||||
for (int i = 0; i < num - 1; i++) {
|
||||
float sample_val = TfLiteRound(func(min + i * step) * 32768.0f);
|
||||
float midpoint_interp_val =
|
||||
TfLiteRound((func(min + (i + 1) * step) * 32768.0f +
|
||||
TfLiteRound(func(min + i * step) * 32768.0f)) /
|
||||
2.0f);
|
||||
float midpoint_val =
|
||||
TfLiteRound(func(min + i * step + half_step) * 32768.0f);
|
||||
float midpoint_err = midpoint_interp_val - midpoint_val;
|
||||
float bias = TfLiteRound(midpoint_err / 2.0f);
|
||||
table[i] = std::min(std::max(sample_val - bias, -32768.0f), 32767.0f);
|
||||
}
|
||||
table[num - 1] = std::min(
|
||||
std::max(TfLiteRound(func(max) * 32768.0f), -32768.0f), 32767.0f);
|
||||
}
|
||||
|
||||
// int16_t func table lookup, e.g., lookup exp() and 1/(1+x) used in softmax
|
||||
inline int16_t generic_int16_table_lookup(int16_t value, const int16_t* lut) {
|
||||
// 512 base value, lut[513] only for calculate slope
|
||||
uint16_t index = static_cast<uint16_t>(256 + (value >> 7));
|
||||
@@ -410,6 +446,23 @@ SaturatingRoundingMultiplyByPOTParam(
|
||||
SaturatingRoundingMultiplyByPOTParam(a.raw(), exponent));
|
||||
}
|
||||
|
||||
// Convert int32_t multiplier to int16_t with rounding.
|
||||
inline void DownScaleInt32ToInt16Multiplier(int32_t multiplier_int32_t,
|
||||
int16_t* multiplier_int16_t) {
|
||||
TFLITE_DCHECK_GE(multiplier_int32_t, 0);
|
||||
static constexpr int32_t kRoundingOffset = 1 << 15;
|
||||
if (multiplier_int32_t >=
|
||||
std::numeric_limits<int32_t>::max() - kRoundingOffset) {
|
||||
*multiplier_int16_t = std::numeric_limits<int16_t>::max();
|
||||
return;
|
||||
}
|
||||
const int32_t result = (multiplier_int32_t + kRoundingOffset) >> 16;
|
||||
TFLITE_DCHECK_LE(result << 16, multiplier_int32_t + kRoundingOffset);
|
||||
TFLITE_DCHECK_GT(result << 16, multiplier_int32_t - kRoundingOffset);
|
||||
*multiplier_int16_t = result;
|
||||
TFLITE_DCHECK_EQ(*multiplier_int16_t, result);
|
||||
}
|
||||
|
||||
// Minimum output bits to accommodate log of maximum input range. It actually
|
||||
// does not matter if one considers, say, [-64,64] or [-64,64).
|
||||
//
|
||||
@@ -418,15 +471,13 @@ SaturatingRoundingMultiplyByPOTParam(
|
||||
// ceil(log(abs( log(2.^(0:127))+1 ))/log(2)); ...
|
||||
// ceil(log(abs( log(2.^(0:127))+1 ))/log(2))]
|
||||
constexpr int min_log_x_output_bits(int input_bits) {
|
||||
return input_bits > 90
|
||||
? 7
|
||||
: input_bits > 44
|
||||
? 6
|
||||
: input_bits > 21
|
||||
? 5
|
||||
: input_bits > 10
|
||||
? 4
|
||||
: input_bits > 4 ? 3 : input_bits > 1 ? 2 : 1;
|
||||
return input_bits > 90 ? 7
|
||||
: input_bits > 44 ? 6
|
||||
: input_bits > 21 ? 5
|
||||
: input_bits > 10 ? 4
|
||||
: input_bits > 4 ? 3
|
||||
: input_bits > 1 ? 2
|
||||
: 1;
|
||||
}
|
||||
|
||||
// Although currently the name of this function says that it cannot handle
|
||||
@@ -434,17 +485,17 @@ constexpr int min_log_x_output_bits(int input_bits) {
|
||||
// x_max is the largest representable input. In other words, the output range
|
||||
// is symmetric.
|
||||
template <int OutputIntegerBits, int InputIntegerBits>
|
||||
inline gemmlowp::FixedPoint<int32, OutputIntegerBits>
|
||||
inline gemmlowp::FixedPoint<int32_t, OutputIntegerBits>
|
||||
log_x_for_x_greater_than_or_equal_to_1_impl(
|
||||
gemmlowp::FixedPoint<int32, InputIntegerBits> input_val) {
|
||||
// assert(__builtin_clz(0u) >= std::numeric_limits<uint32>::digits - 1);
|
||||
// assert(__builtin_clz(0u) <= std::numeric_limits<uint32>::digits);
|
||||
using FixedPoint0 = gemmlowp::FixedPoint<int32, 0>;
|
||||
gemmlowp::FixedPoint<int32_t, InputIntegerBits> input_val) {
|
||||
// assert(__builtin_clz(0u) >= std::numeric_limits<uint32_t>::digits - 1);
|
||||
// assert(__builtin_clz(0u) <= std::numeric_limits<uint32_t>::digits);
|
||||
using FixedPoint0 = gemmlowp::FixedPoint<int32_t, 0>;
|
||||
// The reason for accumulating the result with an extra bit of headroom is
|
||||
// that z_pow_2_adj * log_2 might be saturated, and adding num_scaled *
|
||||
// recip_denom will otherwise introduce an error.
|
||||
static constexpr int kAccumIntegerBits = OutputIntegerBits + 1;
|
||||
using FixedPointAccum = gemmlowp::FixedPoint<int32, kAccumIntegerBits>;
|
||||
using FixedPointAccum = gemmlowp::FixedPoint<int32_t, kAccumIntegerBits>;
|
||||
|
||||
const FixedPoint0 log_2 = GEMMLOWP_CHECKED_FIXEDPOINT_CONSTANT(
|
||||
FixedPoint0, 1488522236, std::log(2.0));
|
||||
@@ -472,10 +523,10 @@ log_x_for_x_greater_than_or_equal_to_1_impl(
|
||||
// required shift "ourselves" instead of using, say, Rescale.
|
||||
FixedPoint0 z_a = FixedPoint0::FromRaw(input_val.raw());
|
||||
// z_a_pow_2 = input_integer_bits - z_a_headroom;
|
||||
int z_a_headroom_plus_1 = CountLeadingZeros(static_cast<uint32>(z_a.raw()));
|
||||
int z_a_headroom_plus_1 = CountLeadingZeros(static_cast<uint32_t>(z_a.raw()));
|
||||
FixedPoint0 r_a_tmp =
|
||||
SaturatingRoundingMultiplyByPOTParam(z_a, (z_a_headroom_plus_1 - 1));
|
||||
const int32 r_a_raw =
|
||||
const int32_t r_a_raw =
|
||||
SaturatingRoundingMultiplyByPOTParam((r_a_tmp * sqrt_half).raw(), 1);
|
||||
// z_pow_2_adj = max(z_pow_2_a - 0.75, z_pow_2_b - 0.25);
|
||||
// z_pow_2_adj = max(InputIntegerBits - z_a_headroom_plus_1 + 0.25,
|
||||
@@ -487,8 +538,8 @@ log_x_for_x_greater_than_or_equal_to_1_impl(
|
||||
|
||||
// z_b is treated like z_a, but premultiplying by sqrt(0.5).
|
||||
FixedPoint0 z_b = z_a * sqrt_half;
|
||||
int z_b_headroom = CountLeadingZeros(static_cast<uint32>(z_b.raw())) - 1;
|
||||
const int32 r_b_raw =
|
||||
int z_b_headroom = CountLeadingZeros(static_cast<uint32_t>(z_b.raw())) - 1;
|
||||
const int32_t r_b_raw =
|
||||
SaturatingRoundingMultiplyByPOTParam(z_a.raw(), z_b_headroom);
|
||||
const FixedPointAccum z_b_pow_2_adj = SaturatingSub(
|
||||
FixedPointAccum::FromRaw(SaturatingRoundingMultiplyByPOTParam(
|
||||
@@ -516,9 +567,9 @@ log_x_for_x_greater_than_or_equal_to_1_impl(
|
||||
}
|
||||
|
||||
template <int OutputIntegerBits, int InputIntegerBits>
|
||||
inline gemmlowp::FixedPoint<int32, OutputIntegerBits>
|
||||
inline gemmlowp::FixedPoint<int32_t, OutputIntegerBits>
|
||||
log_x_for_x_greater_than_or_equal_to_1(
|
||||
gemmlowp::FixedPoint<int32, InputIntegerBits> input_val) {
|
||||
gemmlowp::FixedPoint<int32_t, InputIntegerBits> input_val) {
|
||||
static_assert(
|
||||
OutputIntegerBits >= min_log_x_output_bits(InputIntegerBits),
|
||||
"Output integer bits must be sufficient to accommodate logs of inputs.");
|
||||
@@ -527,25 +578,25 @@ log_x_for_x_greater_than_or_equal_to_1(
|
||||
input_val);
|
||||
}
|
||||
|
||||
inline int32 GetReciprocal(int32 x, int x_integer_digits,
|
||||
int* num_bits_over_unit) {
|
||||
int headroom_plus_one = CountLeadingZeros(static_cast<uint32>(x));
|
||||
inline int32_t GetReciprocal(int32_t x, int x_integer_digits,
|
||||
int* num_bits_over_unit) {
|
||||
int headroom_plus_one = CountLeadingZeros(static_cast<uint32_t>(x));
|
||||
// This is the number of bits to the left of the binary point above 1.0.
|
||||
// Consider x=1.25. In that case shifted_scale=0.8 and
|
||||
// no later adjustment will be needed.
|
||||
*num_bits_over_unit = x_integer_digits - headroom_plus_one;
|
||||
const int32 shifted_sum_minus_one =
|
||||
static_cast<int32>((static_cast<uint32>(x) << headroom_plus_one) -
|
||||
(static_cast<uint32>(1) << 31));
|
||||
const int32_t shifted_sum_minus_one =
|
||||
static_cast<int32_t>((static_cast<uint32_t>(x) << headroom_plus_one) -
|
||||
(static_cast<uint32_t>(1) << 31));
|
||||
|
||||
gemmlowp::FixedPoint<int32, 0> shifted_scale =
|
||||
gemmlowp::FixedPoint<int32_t, 0> shifted_scale =
|
||||
gemmlowp::one_over_one_plus_x_for_x_in_0_1(
|
||||
gemmlowp::FixedPoint<int32, 0>::FromRaw(shifted_sum_minus_one));
|
||||
gemmlowp::FixedPoint<int32_t, 0>::FromRaw(shifted_sum_minus_one));
|
||||
return shifted_scale.raw();
|
||||
}
|
||||
|
||||
inline void GetInvSqrtQuantizedMultiplierExp(int32 input, int reverse_shift,
|
||||
int32* output_inv_sqrt,
|
||||
inline void GetInvSqrtQuantizedMultiplierExp(int32_t input, int reverse_shift,
|
||||
int32_t* output_inv_sqrt,
|
||||
int* output_shift) {
|
||||
TFLITE_DCHECK_GE(input, 0);
|
||||
if (input <= 1) {
|
||||
@@ -565,7 +616,7 @@ inline void GetInvSqrtQuantizedMultiplierExp(int32 input, int reverse_shift,
|
||||
++*output_shift;
|
||||
}
|
||||
const unsigned max_left_shift_bits =
|
||||
CountLeadingZeros(static_cast<uint32>(input)) - 1;
|
||||
CountLeadingZeros(static_cast<uint32_t>(input)) - 1;
|
||||
const unsigned max_left_shift_bit_pairs = max_left_shift_bits / 2;
|
||||
const unsigned left_shift_bit_pairs = max_left_shift_bit_pairs - 1;
|
||||
*output_shift -= left_shift_bit_pairs;
|
||||
@@ -577,8 +628,8 @@ inline void GetInvSqrtQuantizedMultiplierExp(int32 input, int reverse_shift,
|
||||
using gemmlowp::SaturatingRoundingMultiplyByPOT;
|
||||
// Using 3 integer bits gives us enough room for the internal arithmetic in
|
||||
// this Newton-Raphson iteration.
|
||||
using F3 = FixedPoint<int32, 3>;
|
||||
using F0 = FixedPoint<int32, 0>;
|
||||
using F3 = FixedPoint<int32_t, 3>;
|
||||
using F0 = FixedPoint<int32_t, 0>;
|
||||
const F3 fixedpoint_input = F3::FromRaw(input >> 1);
|
||||
const F3 fixedpoint_half_input =
|
||||
SaturatingRoundingMultiplyByPOT<-1>(fixedpoint_input);
|
||||
@@ -645,6 +696,13 @@ inline int SubscriptToIndex(const NdArrayDesc<5>& desc, int indexes[5]) {
|
||||
indexes[4] * desc.strides[4];
|
||||
}
|
||||
|
||||
inline int SubscriptToIndex(const NdArrayDesc<8>& desc, int indexes[8]) {
|
||||
return indexes[0] * desc.strides[0] + indexes[1] * desc.strides[1] +
|
||||
indexes[2] * desc.strides[2] + indexes[3] * desc.strides[3] +
|
||||
indexes[4] * desc.strides[4] + indexes[5] * desc.strides[5] +
|
||||
indexes[6] * desc.strides[6] + indexes[7] * desc.strides[7];
|
||||
}
|
||||
|
||||
// Given the dimensions of the operands for an element-wise binary broadcast,
|
||||
// adjusts them so that they can be directly iterated over with simple loops.
|
||||
// Returns the adjusted dims as instances of NdArrayDesc in 'desc0_out' and
|
||||
|
||||
@@ -76,13 +76,15 @@ limitations under the License.
|
||||
#define TFLITE_CHECK_LT(x, y) ((x) < (y)) ? (void)0 : TFLITE_ABORT
|
||||
#endif
|
||||
|
||||
// TODO(ahentz): Clean up.
|
||||
#ifndef TF_LITE_STATIC_MEMORY
|
||||
// TODO(b/162019032): Consider removing these type-aliases.
|
||||
using int8 = std::int8_t;
|
||||
using uint8 = std::uint8_t;
|
||||
using int16 = std::int16_t;
|
||||
using uint16 = std::uint16_t;
|
||||
using int32 = std::int32_t;
|
||||
using uint32 = std::uint32_t;
|
||||
#endif // !defined(TF_LITE_STATIC_MEMORY)
|
||||
|
||||
// TFLITE_DEPRECATED()
|
||||
//
|
||||
|
||||
@@ -19,8 +19,9 @@ limitations under the License.
|
||||
|
||||
namespace tflite {
|
||||
|
||||
#if defined(TF_LITE_USE_GLOBAL_CMATH_FUNCTIONS) || \
|
||||
(defined(__ANDROID__) && !defined(__NDK_MAJOR__)) || defined(ARDUINO)
|
||||
#if defined(TF_LITE_USE_GLOBAL_CMATH_FUNCTIONS) || \
|
||||
(defined(__ANDROID__) && !defined(__NDK_MAJOR__)) || defined(ARDUINO) || \
|
||||
defined(__ZEPHYR__)
|
||||
#define TF_LITE_GLOBAL_STD_PREFIX
|
||||
#else
|
||||
#define TF_LITE_GLOBAL_STD_PREFIX std
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
|
||||
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@@ -12,16 +12,24 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
// Optional debugging functionality. For small sized binaries, these are not
|
||||
// needed.
|
||||
#ifndef TENSORFLOW_LITE_MICRO_MICRO_OPTIONAL_DEBUG_TOOLS_H_
|
||||
#define TENSORFLOW_LITE_MICRO_MICRO_OPTIONAL_DEBUG_TOOLS_H_
|
||||
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_MAX_H_
|
||||
#define TENSORFLOW_LITE_KERNELS_INTERNAL_MAX_H_
|
||||
|
||||
#include "tensorflow/lite/micro/micro_interpreter.h"
|
||||
#include <cmath>
|
||||
|
||||
namespace tflite {
|
||||
// Prints a dump of what tensors and what nodes are in the interpreter.
|
||||
void PrintInterpreterState(MicroInterpreter* interpreter);
|
||||
|
||||
#if defined(TF_LITE_USE_GLOBAL_MAX) || defined(__ZEPHYR__)
|
||||
inline float TfLiteMax(const float& x, const float& y) {
|
||||
return std::max(x, y);
|
||||
}
|
||||
#else
|
||||
template <class T>
|
||||
inline T TfLiteMax(const T& x, const T& y) {
|
||||
return std::fmax(x, y);
|
||||
}
|
||||
#endif
|
||||
|
||||
} // namespace tflite
|
||||
|
||||
#endif // TENSORFLOW_LITE_MICRO_MICRO_OPTIONAL_DEBUG_TOOLS_H_
|
||||
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_MAX_H_
|
||||
35
code/lib/tfmicro/tensorflow/lite/kernels/internal/min.h
Normal file
35
code/lib/tfmicro/tensorflow/lite/kernels/internal/min.h
Normal file
@@ -0,0 +1,35 @@
|
||||
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_MIN_H_
|
||||
#define TENSORFLOW_LITE_KERNELS_INTERNAL_MIN_H_
|
||||
|
||||
#include <cmath>
|
||||
|
||||
namespace tflite {
|
||||
|
||||
#if defined(TF_LITE_USE_GLOBAL_MIN) || defined(__ZEPHYR__)
|
||||
inline float TfLiteMin(const float& x, const float& y) {
|
||||
return std::min(x, y);
|
||||
}
|
||||
#else
|
||||
template <class T>
|
||||
inline T TfLiteMin(const T& x, const T& y) {
|
||||
return std::fmin(x, y);
|
||||
}
|
||||
#endif
|
||||
|
||||
} // namespace tflite
|
||||
|
||||
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_MIN_H_
|
||||
@@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_TENSOR_H_
|
||||
#define TENSORFLOW_LITE_KERNELS_INTERNAL_TENSOR_H_
|
||||
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_PORTABLE_TENSOR_H_
|
||||
#define TENSORFLOW_LITE_KERNELS_INTERNAL_PORTABLE_TENSOR_H_
|
||||
|
||||
#include <complex>
|
||||
#include <vector>
|
||||
@@ -21,7 +21,6 @@ limitations under the License.
|
||||
#include "tensorflow/lite/c/common.h"
|
||||
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
|
||||
#include "tensorflow/lite/kernels/internal/types.h"
|
||||
#include "tensorflow/lite/string_util.h"
|
||||
|
||||
namespace tflite {
|
||||
|
||||
@@ -76,12 +75,12 @@ class VectorOfTensors {
|
||||
|
||||
// A list of quantized tensors in a format that can be used by kernels like
|
||||
// split and concatenation.
|
||||
class VectorOfQuantizedTensors : public VectorOfTensors<uint8> {
|
||||
class VectorOfQuantizedTensors : public VectorOfTensors<uint8_t> {
|
||||
public:
|
||||
// Build with the tensors in 'tensor_list'.
|
||||
VectorOfQuantizedTensors(const TfLiteContext& context,
|
||||
const TfLiteIntArray& tensor_list)
|
||||
: VectorOfTensors<uint8>(context, tensor_list) {
|
||||
: VectorOfTensors<uint8_t>(context, tensor_list) {
|
||||
for (int i = 0; i < tensor_list.size; ++i) {
|
||||
TfLiteTensor* t = &context.tensors[tensor_list.data[i]];
|
||||
zero_point_.push_back(t->params.zero_point);
|
||||
@@ -90,10 +89,10 @@ class VectorOfQuantizedTensors : public VectorOfTensors<uint8> {
|
||||
}
|
||||
|
||||
const float* scale() const { return scale_.data(); }
|
||||
const int32* zero_point() const { return zero_point_.data(); }
|
||||
const int32_t* zero_point() const { return zero_point_.data(); }
|
||||
|
||||
private:
|
||||
std::vector<int32> zero_point_;
|
||||
std::vector<int32_t> zero_point_;
|
||||
std::vector<float> scale_;
|
||||
};
|
||||
|
||||
@@ -119,26 +118,6 @@ class SequentialTensorWriter {
|
||||
T* output_ptr_;
|
||||
};
|
||||
|
||||
template <>
|
||||
class SequentialTensorWriter<string> {
|
||||
public:
|
||||
SequentialTensorWriter(const TfLiteTensor* input, TfLiteTensor* output)
|
||||
: input_(input), output_(output) {}
|
||||
~SequentialTensorWriter() { buffer_.WriteToTensor(output_, nullptr); }
|
||||
|
||||
void Write(int position) { this->WriteN(position, 1); }
|
||||
void WriteN(int position, int len) {
|
||||
for (int i = 0; i < len; i++) {
|
||||
buffer_.AddString(GetString(input_, position + i));
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
const TfLiteTensor* input_;
|
||||
TfLiteTensor* output_;
|
||||
DynamicBuffer buffer_;
|
||||
};
|
||||
|
||||
} // namespace tflite
|
||||
|
||||
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_TENSOR_H_
|
||||
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_PORTABLE_TENSOR_H_
|
||||
@@ -342,13 +342,13 @@ void NudgeQuantizationRange(const float min, const float max,
|
||||
const float quant_max_float = static_cast<float>(quant_max);
|
||||
*nudged_scale = (max - min) / (quant_max_float - quant_min_float);
|
||||
const float zero_point_from_min = quant_min_float - min / *nudged_scale;
|
||||
uint16 nudged_zero_point;
|
||||
uint16_t nudged_zero_point;
|
||||
if (zero_point_from_min < quant_min_float) {
|
||||
nudged_zero_point = static_cast<uint16>(quant_min);
|
||||
nudged_zero_point = static_cast<uint16_t>(quant_min);
|
||||
} else if (zero_point_from_min > quant_max_float) {
|
||||
nudged_zero_point = static_cast<uint16>(quant_max);
|
||||
nudged_zero_point = static_cast<uint16_t>(quant_max);
|
||||
} else {
|
||||
nudged_zero_point = static_cast<uint16>(TfLiteRound(zero_point_from_min));
|
||||
nudged_zero_point = static_cast<uint16_t>(TfLiteRound(zero_point_from_min));
|
||||
}
|
||||
*nudged_min = (quant_min_float - nudged_zero_point) * (*nudged_scale);
|
||||
*nudged_max = (quant_max_float - nudged_zero_point) * (*nudged_scale);
|
||||
|
||||
@@ -51,34 +51,39 @@ inline void Add(const ArithmeticParams& params,
|
||||
|
||||
// Element-wise add that can often be used for inner loop of broadcast add as
|
||||
// well as the non-broadcast add.
|
||||
|
||||
// This function is used for 8-bit as well as for 16-bit, but the accumulator
|
||||
// is 32-bit for both cases. The overflow does not happen due to the
|
||||
// choice of the shift (20 or 15, accordingly - see add.cc for more comments).
|
||||
template <typename T>
|
||||
inline void AddElementwise(int size, const ArithmeticParams& params,
|
||||
const uint8* input1_data, const uint8* input2_data,
|
||||
uint8* output_data) {
|
||||
TFLITE_DCHECK_GT(params.input1_offset, -256);
|
||||
TFLITE_DCHECK_GT(params.input2_offset, -256);
|
||||
TFLITE_DCHECK_LT(params.input1_offset, 256);
|
||||
TFLITE_DCHECK_LT(params.input2_offset, 256);
|
||||
const T* input1_data, const T* input2_data,
|
||||
T* output_data) {
|
||||
TFLITE_DCHECK_GT(params.input1_offset, -std::numeric_limits<T>::max());
|
||||
TFLITE_DCHECK_GT(params.input2_offset, -std::numeric_limits<T>::max());
|
||||
TFLITE_DCHECK_LT(params.input1_offset, std::numeric_limits<T>::max());
|
||||
TFLITE_DCHECK_LT(params.input2_offset, std::numeric_limits<T>::max());
|
||||
|
||||
for (int i = 0; i < size; ++i) {
|
||||
const int32 input1_val = params.input1_offset + input1_data[i];
|
||||
const int32 input2_val = params.input2_offset + input2_data[i];
|
||||
const int32 shifted_input1_val = input1_val * (1 << params.left_shift);
|
||||
const int32 shifted_input2_val = input2_val * (1 << params.left_shift);
|
||||
const int32 scaled_input1_val =
|
||||
const int32_t input1_val = params.input1_offset + input1_data[i];
|
||||
const int32_t input2_val = params.input2_offset + input2_data[i];
|
||||
const int32_t shifted_input1_val = input1_val * (1 << params.left_shift);
|
||||
const int32_t shifted_input2_val = input2_val * (1 << params.left_shift);
|
||||
const int32_t scaled_input1_val =
|
||||
MultiplyByQuantizedMultiplierSmallerThanOneExp(
|
||||
shifted_input1_val, params.input1_multiplier, params.input1_shift);
|
||||
const int32 scaled_input2_val =
|
||||
const int32_t scaled_input2_val =
|
||||
MultiplyByQuantizedMultiplierSmallerThanOneExp(
|
||||
shifted_input2_val, params.input2_multiplier, params.input2_shift);
|
||||
const int32 raw_sum = scaled_input1_val + scaled_input2_val;
|
||||
const int32 raw_output =
|
||||
const int32_t raw_sum = scaled_input1_val + scaled_input2_val;
|
||||
const int32_t raw_output =
|
||||
MultiplyByQuantizedMultiplierSmallerThanOneExp(
|
||||
raw_sum, params.output_multiplier, params.output_shift) +
|
||||
params.output_offset;
|
||||
const int32 clamped_output =
|
||||
const int32_t clamped_output =
|
||||
std::min(params.quantized_activation_max,
|
||||
std::max(params.quantized_activation_min, raw_output));
|
||||
output_data[i] = static_cast<uint8>(clamped_output);
|
||||
output_data[i] = static_cast<T>(clamped_output);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -86,40 +91,40 @@ inline void AddElementwise(int size, const ArithmeticParams& params,
|
||||
// broadcast add, so that, for example, scalar-broadcast with batch will still
|
||||
// be fast.
|
||||
inline void AddScalarBroadcast(int size, const ArithmeticParams& params,
|
||||
uint8 input1_data, const uint8* input2_data,
|
||||
uint8* output_data) {
|
||||
uint8_t input1_data, const uint8_t* input2_data,
|
||||
uint8_t* output_data) {
|
||||
TFLITE_DCHECK_GT(params.input1_offset, -256);
|
||||
TFLITE_DCHECK_GT(params.input2_offset, -256);
|
||||
TFLITE_DCHECK_LT(params.input1_offset, 256);
|
||||
TFLITE_DCHECK_LT(params.input2_offset, 256);
|
||||
|
||||
const int32 input1_val = params.input1_offset + input1_data;
|
||||
const int32 shifted_input1_val = input1_val * (1 << params.left_shift);
|
||||
const int32 scaled_input1_val =
|
||||
const int32_t input1_val = params.input1_offset + input1_data;
|
||||
const int32_t shifted_input1_val = input1_val * (1 << params.left_shift);
|
||||
const int32_t scaled_input1_val =
|
||||
MultiplyByQuantizedMultiplierSmallerThanOneExp(
|
||||
shifted_input1_val, params.input1_multiplier, params.input1_shift);
|
||||
for (int i = 0; i < size; ++i) {
|
||||
const int32 input2_val = params.input2_offset + input2_data[i];
|
||||
const int32 shifted_input2_val = input2_val * (1 << params.left_shift);
|
||||
const int32 scaled_input2_val =
|
||||
const int32_t input2_val = params.input2_offset + input2_data[i];
|
||||
const int32_t shifted_input2_val = input2_val * (1 << params.left_shift);
|
||||
const int32_t scaled_input2_val =
|
||||
MultiplyByQuantizedMultiplierSmallerThanOneExp(
|
||||
shifted_input2_val, params.input2_multiplier, params.input2_shift);
|
||||
const int32 raw_sum = scaled_input1_val + scaled_input2_val;
|
||||
const int32 raw_output =
|
||||
const int32_t raw_sum = scaled_input1_val + scaled_input2_val;
|
||||
const int32_t raw_output =
|
||||
MultiplyByQuantizedMultiplierSmallerThanOneExp(
|
||||
raw_sum, params.output_multiplier, params.output_shift) +
|
||||
params.output_offset;
|
||||
const int32 clamped_output =
|
||||
const int32_t clamped_output =
|
||||
std::min(params.quantized_activation_max,
|
||||
std::max(params.quantized_activation_min, raw_output));
|
||||
output_data[i] = static_cast<uint8>(clamped_output);
|
||||
output_data[i] = static_cast<uint8_t>(clamped_output);
|
||||
}
|
||||
}
|
||||
|
||||
inline void Add(const ArithmeticParams& params,
|
||||
const RuntimeShape& input1_shape, const uint8* input1_data,
|
||||
const RuntimeShape& input2_shape, const uint8* input2_data,
|
||||
const RuntimeShape& output_shape, uint8* output_data) {
|
||||
const RuntimeShape& input1_shape, const uint8_t* input1_data,
|
||||
const RuntimeShape& input2_shape, const uint8_t* input2_data,
|
||||
const RuntimeShape& output_shape, uint8_t* output_data) {
|
||||
TFLITE_DCHECK_LE(params.quantized_activation_min,
|
||||
params.quantized_activation_max);
|
||||
const int flat_size =
|
||||
@@ -132,24 +137,53 @@ inline void Add(const ArithmeticParams& params,
|
||||
AddElementwise(flat_size, params, input1_data, input2_data, output_data);
|
||||
}
|
||||
|
||||
inline void AddGeneralParamScale(const ArithmeticParams& params,
|
||||
const RuntimeShape& input1_shape,
|
||||
const int16_t* input1_data,
|
||||
const RuntimeShape& input2_shape,
|
||||
const int16_t* input2_data,
|
||||
const RuntimeShape& output_shape,
|
||||
int16_t* output_data) {
|
||||
TFLITE_DCHECK_LE(params.quantized_activation_min,
|
||||
params.quantized_activation_max);
|
||||
const int flat_size =
|
||||
MatchingElementsSize(input1_shape, input2_shape, output_shape);
|
||||
|
||||
int max_value = std::numeric_limits<int16_t>::max();
|
||||
|
||||
TFLITE_DCHECK_GT(params.input1_offset, -max_value);
|
||||
TFLITE_DCHECK_GT(params.input2_offset, -max_value);
|
||||
TFLITE_DCHECK_LT(params.input1_offset, max_value);
|
||||
TFLITE_DCHECK_LT(params.input2_offset, max_value);
|
||||
AddElementwise(flat_size, params, input1_data, input2_data, output_data);
|
||||
}
|
||||
|
||||
inline void Add(const ArithmeticParams& params,
|
||||
const RuntimeShape& input1_shape, const int16* input1_data,
|
||||
const RuntimeShape& input2_shape, const int16* input2_data,
|
||||
const RuntimeShape& output_shape, int16* output_data) {
|
||||
const RuntimeShape& input1_shape, const int16_t* input1_data,
|
||||
const RuntimeShape& input2_shape, const int16_t* input2_data,
|
||||
const RuntimeShape& output_shape, int16_t* output_data,
|
||||
bool pot_scale = true) {
|
||||
if (!pot_scale) {
|
||||
AddGeneralParamScale(params, input1_shape, input1_data, input2_shape,
|
||||
input2_data, output_shape, output_data);
|
||||
return;
|
||||
}
|
||||
|
||||
TFLITE_DCHECK_LE(params.quantized_activation_min,
|
||||
params.quantized_activation_max);
|
||||
|
||||
const int input1_shift = params.input1_shift;
|
||||
const int flat_size =
|
||||
MatchingElementsSize(input1_shape, input2_shape, output_shape);
|
||||
const int16 output_activation_min = params.quantized_activation_min;
|
||||
const int16 output_activation_max = params.quantized_activation_max;
|
||||
const int16_t output_activation_min = params.quantized_activation_min;
|
||||
const int16_t output_activation_max = params.quantized_activation_max;
|
||||
|
||||
TFLITE_DCHECK(input1_shift == 0 || params.input2_shift == 0);
|
||||
TFLITE_DCHECK_LE(input1_shift, 0);
|
||||
TFLITE_DCHECK_LE(params.input2_shift, 0);
|
||||
const int16* not_shift_input = input1_shift == 0 ? input1_data : input2_data;
|
||||
const int16* shift_input = input1_shift == 0 ? input2_data : input1_data;
|
||||
const int16_t* not_shift_input =
|
||||
input1_shift == 0 ? input1_data : input2_data;
|
||||
const int16_t* shift_input = input1_shift == 0 ? input2_data : input1_data;
|
||||
const int input_right_shift =
|
||||
input1_shift == 0 ? -params.input2_shift : -input1_shift;
|
||||
|
||||
@@ -161,8 +195,8 @@ inline void Add(const ArithmeticParams& params,
|
||||
F0 scaled_input = F0::FromRaw(
|
||||
gemmlowp::RoundingDivideByPOT(shift_input[i], input_right_shift));
|
||||
F0 result = gemmlowp::SaturatingAdd(scaled_input, input_ready_scaled);
|
||||
const int16 raw_output = result.raw();
|
||||
const int16 clamped_output = std::min(
|
||||
const int16_t raw_output = result.raw();
|
||||
const int16_t clamped_output = std::min(
|
||||
output_activation_max, std::max(output_activation_min, raw_output));
|
||||
output_data[i] = clamped_output;
|
||||
}
|
||||
@@ -218,11 +252,11 @@ inline void BroadcastAdd4DSlow(const ArithmeticParams& params,
|
||||
|
||||
inline void BroadcastAdd4DSlow(const ArithmeticParams& params,
|
||||
const RuntimeShape& input1_shape,
|
||||
const int32* input1_data,
|
||||
const int32_t* input1_data,
|
||||
const RuntimeShape& input2_shape,
|
||||
const int32* input2_data,
|
||||
const int32_t* input2_data,
|
||||
const RuntimeShape& output_shape,
|
||||
int32* output_data) {
|
||||
int32_t* output_data) {
|
||||
NdArrayDesc<4> desc1;
|
||||
NdArrayDesc<4> desc2;
|
||||
NdArrayDescsForElementwiseBroadcast(input1_shape, input2_shape, &desc1,
|
||||
@@ -257,13 +291,14 @@ inline void BroadcastAdd4DSlow(const ArithmeticParams& params,
|
||||
}
|
||||
}
|
||||
|
||||
inline void BroadcastAdd4DSlow(const ArithmeticParams& params,
|
||||
const RuntimeShape& input1_shape,
|
||||
const uint8* input1_data,
|
||||
const RuntimeShape& input2_shape,
|
||||
const uint8* input2_data,
|
||||
const RuntimeShape& output_shape,
|
||||
uint8* output_data) {
|
||||
// This function is used for 8-bit as well as for 16-bit, but the accumulator
|
||||
// is 32-bit for both cases. The overflow does not happen due to the
|
||||
// choice of the shift (20 or 15, accordingly - see add.cc for more comments).
|
||||
template <typename T>
|
||||
inline void BroadcastAdd4DSlow(
|
||||
const ArithmeticParams& params, const RuntimeShape& input1_shape,
|
||||
const T* input1_data, const RuntimeShape& input2_shape,
|
||||
const T* input2_data, const RuntimeShape& output_shape, T* output_data) {
|
||||
NdArrayDesc<4> desc1;
|
||||
NdArrayDesc<4> desc2;
|
||||
NdArrayDescsForElementwiseBroadcast(input1_shape, input2_shape, &desc1,
|
||||
@@ -286,34 +321,34 @@ inline void BroadcastAdd4DSlow(const ArithmeticParams& params,
|
||||
for (int y = 0; y < extended_output_shape.Dims(1); ++y) {
|
||||
for (int x = 0; x < extended_output_shape.Dims(2); ++x) {
|
||||
for (int c = 0; c < extended_output_shape.Dims(3); ++c) {
|
||||
const int32 input1_val =
|
||||
const int32_t input1_val =
|
||||
params.input1_offset +
|
||||
input1_data[SubscriptToIndex(desc1, b, y, x, c)];
|
||||
const int32 input2_val =
|
||||
const int32_t input2_val =
|
||||
params.input2_offset +
|
||||
input2_data[SubscriptToIndex(desc2, b, y, x, c)];
|
||||
const int32 shifted_input1_val =
|
||||
const int32_t shifted_input1_val =
|
||||
input1_val * (1 << params.left_shift);
|
||||
const int32 shifted_input2_val =
|
||||
const int32_t shifted_input2_val =
|
||||
input2_val * (1 << params.left_shift);
|
||||
const int32 scaled_input1_val =
|
||||
const int32_t scaled_input1_val =
|
||||
MultiplyByQuantizedMultiplierSmallerThanOneExp(
|
||||
shifted_input1_val, params.input1_multiplier,
|
||||
params.input1_shift);
|
||||
const int32 scaled_input2_val =
|
||||
const int32_t scaled_input2_val =
|
||||
MultiplyByQuantizedMultiplierSmallerThanOneExp(
|
||||
shifted_input2_val, params.input2_multiplier,
|
||||
params.input2_shift);
|
||||
const int32 raw_sum = scaled_input1_val + scaled_input2_val;
|
||||
const int32 raw_output =
|
||||
const int32_t raw_sum = scaled_input1_val + scaled_input2_val;
|
||||
const int32_t raw_output =
|
||||
MultiplyByQuantizedMultiplierSmallerThanOneExp(
|
||||
raw_sum, params.output_multiplier, params.output_shift) +
|
||||
params.output_offset;
|
||||
const int32 clamped_output =
|
||||
const int32_t clamped_output =
|
||||
std::min(params.quantized_activation_max,
|
||||
std::max(params.quantized_activation_min, raw_output));
|
||||
output_data[Offset(extended_output_shape, b, y, x, c)] =
|
||||
static_cast<uint8>(clamped_output);
|
||||
static_cast<T>(clamped_output);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -322,11 +357,11 @@ inline void BroadcastAdd4DSlow(const ArithmeticParams& params,
|
||||
|
||||
inline void BroadcastAddFivefold(const ArithmeticParams& unswitched_params,
|
||||
const RuntimeShape& unswitched_input1_shape,
|
||||
const uint8* unswitched_input1_data,
|
||||
const uint8_t* unswitched_input1_data,
|
||||
const RuntimeShape& unswitched_input2_shape,
|
||||
const uint8* unswitched_input2_data,
|
||||
const uint8_t* unswitched_input2_data,
|
||||
const RuntimeShape& output_shape,
|
||||
uint8* output_data) {
|
||||
uint8_t* output_data) {
|
||||
ArithmeticParams switched_params = unswitched_params;
|
||||
switched_params.input1_offset = unswitched_params.input2_offset;
|
||||
switched_params.input1_multiplier = unswitched_params.input2_multiplier;
|
||||
@@ -341,18 +376,18 @@ inline void BroadcastAddFivefold(const ArithmeticParams& unswitched_params,
|
||||
|
||||
const ArithmeticParams& params =
|
||||
use_unswitched ? unswitched_params : switched_params;
|
||||
const uint8* input1_data =
|
||||
const uint8_t* input1_data =
|
||||
use_unswitched ? unswitched_input1_data : unswitched_input2_data;
|
||||
const uint8* input2_data =
|
||||
const uint8_t* input2_data =
|
||||
use_unswitched ? unswitched_input2_data : unswitched_input1_data;
|
||||
|
||||
// Fivefold nested loops. The second input resets its position for each
|
||||
// iteration of the second loop. The first input resets its position at the
|
||||
// beginning of the fourth loop. The innermost loop is an elementwise add of
|
||||
// sections of the arrays.
|
||||
uint8* output_data_ptr = output_data;
|
||||
const uint8* input1_data_ptr = input1_data;
|
||||
const uint8* input2_data_reset = input2_data;
|
||||
uint8_t* output_data_ptr = output_data;
|
||||
const uint8_t* input1_data_ptr = input1_data;
|
||||
const uint8_t* input2_data_reset = input2_data;
|
||||
// In the fivefold pattern, y0, y2 and y4 are not broadcast, and so shared
|
||||
// between input shapes. y3 for input 1 is always broadcast, and so the
|
||||
// dimension there is 1, whereas optionally y1 might be broadcast for input 2.
|
||||
@@ -368,7 +403,7 @@ inline void BroadcastAddFivefold(const ArithmeticParams& unswitched_params,
|
||||
// General fivefold pattern, with y4 > 1 so there is a non-broadcast inner
|
||||
// dimension.
|
||||
for (int i0 = 0; i0 < y0; ++i0) {
|
||||
const uint8* input2_data_ptr;
|
||||
const uint8_t* input2_data_ptr;
|
||||
for (int i1 = 0; i1 < y1; ++i1) {
|
||||
input2_data_ptr = input2_data_reset;
|
||||
for (int i2 = 0; i2 < y2; ++i2) {
|
||||
@@ -397,7 +432,7 @@ inline void BroadcastAddFivefold(const ArithmeticParams& unswitched_params,
|
||||
// for y4 == 1 and the loop over y3 is contained within the
|
||||
// AddScalarBroadcast function.
|
||||
for (int i0 = 0; i0 < y0; ++i0) {
|
||||
const uint8* input2_data_ptr;
|
||||
const uint8_t* input2_data_ptr;
|
||||
for (int i1 = 0; i1 < y1; ++i1) {
|
||||
input2_data_ptr = input2_data_reset;
|
||||
for (int i2 = 0; i2 < y2; ++i2) {
|
||||
|
||||
@@ -18,7 +18,6 @@ limitations under the License.
|
||||
#include "tensorflow/lite/c/common.h"
|
||||
#include "tensorflow/lite/kernels/internal/common.h"
|
||||
#include "tensorflow/lite/kernels/internal/types.h"
|
||||
#include "tensorflow/lite/string_util.h"
|
||||
|
||||
namespace tflite {
|
||||
|
||||
@@ -51,18 +50,6 @@ inline bool LessEqualFn(T lhs, T rhs) {
|
||||
return lhs <= rhs;
|
||||
}
|
||||
|
||||
inline bool StringRefEqualFn(const StringRef& lhs, const StringRef& rhs) {
|
||||
if (lhs.len != rhs.len) return false;
|
||||
for (int i = 0; i < lhs.len; ++i) {
|
||||
if (lhs.str[i] != rhs.str[i]) return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
inline bool StringRefNotEqualFn(const StringRef& lhs, const StringRef& rhs) {
|
||||
return !StringRefEqualFn(lhs, rhs);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
using ComparisonFn = bool (*)(T, T);
|
||||
|
||||
@@ -78,22 +65,6 @@ inline void ComparisonImpl(
|
||||
}
|
||||
}
|
||||
|
||||
template <bool (*F)(const StringRef&, const StringRef&)>
|
||||
inline void ComparisonStringImpl(const RuntimeShape& input1_shape,
|
||||
const TfLiteTensor* input1,
|
||||
const RuntimeShape& input2_shape,
|
||||
const TfLiteTensor* input2,
|
||||
const RuntimeShape& output_shape,
|
||||
bool* output_data) {
|
||||
const int64_t flatsize =
|
||||
MatchingFlatSize(input1_shape, input2_shape, output_shape);
|
||||
for (int64_t i = 0; i < flatsize; ++i) {
|
||||
const auto lhs = GetString(input1, i);
|
||||
const auto rhs = GetString(input2, i);
|
||||
output_data[i] = F(lhs, rhs);
|
||||
}
|
||||
}
|
||||
|
||||
template <ComparisonFn<float> F>
|
||||
inline void Comparison(const ComparisonParams& op_params,
|
||||
const RuntimeShape& input1_shape,
|
||||
@@ -105,30 +76,30 @@ inline void Comparison(const ComparisonParams& op_params,
|
||||
input2_data, output_shape, output_data);
|
||||
}
|
||||
|
||||
template <typename T, ComparisonFn<int32> F>
|
||||
template <typename T, ComparisonFn<int32_t> F>
|
||||
inline void ComparisonWithScaling(
|
||||
const ComparisonParams& op_params, const RuntimeShape& input1_shape,
|
||||
const T* input1_data, const RuntimeShape& input2_shape,
|
||||
const T* input2_data, const RuntimeShape& output_shape, bool* output_data) {
|
||||
int left_shift = op_params.left_shift;
|
||||
int32 input1_offset = op_params.input1_offset;
|
||||
int32 input1_multiplier = op_params.input1_multiplier;
|
||||
int32_t input1_offset = op_params.input1_offset;
|
||||
int32_t input1_multiplier = op_params.input1_multiplier;
|
||||
int input1_shift = op_params.input1_shift;
|
||||
int32 input2_offset = op_params.input2_offset;
|
||||
int32 input2_multiplier = op_params.input2_multiplier;
|
||||
int32_t input2_offset = op_params.input2_offset;
|
||||
int32_t input2_multiplier = op_params.input2_multiplier;
|
||||
int input2_shift = op_params.input2_shift;
|
||||
|
||||
const int64_t flatsize =
|
||||
MatchingFlatSize(input1_shape, input2_shape, output_shape);
|
||||
for (int64_t i = 0; i < flatsize; ++i) {
|
||||
const int32 input1_val = input1_offset + input1_data[i];
|
||||
const int32 input2_val = input2_offset + input2_data[i];
|
||||
const int32 shifted_input1_val = input1_val * (1 << left_shift);
|
||||
const int32 shifted_input2_val = input2_val * (1 << left_shift);
|
||||
const int32 scaled_input1_val =
|
||||
const int32_t input1_val = input1_offset + input1_data[i];
|
||||
const int32_t input2_val = input2_offset + input2_data[i];
|
||||
const int32_t shifted_input1_val = input1_val * (1 << left_shift);
|
||||
const int32_t shifted_input2_val = input2_val * (1 << left_shift);
|
||||
const int32_t scaled_input1_val =
|
||||
MultiplyByQuantizedMultiplierSmallerThanOneExp(
|
||||
shifted_input1_val, input1_multiplier, input1_shift);
|
||||
const int32 scaled_input2_val =
|
||||
const int32_t scaled_input2_val =
|
||||
MultiplyByQuantizedMultiplierSmallerThanOneExp(
|
||||
shifted_input2_val, input2_multiplier, input2_shift);
|
||||
output_data[i] = F(scaled_input1_val, scaled_input2_val);
|
||||
@@ -180,31 +151,6 @@ inline void BroadcastComparison4DSlowImpl(
|
||||
}
|
||||
}
|
||||
|
||||
template <bool (*F)(const StringRef&, const StringRef&)>
|
||||
inline void BroadcastComparison4DSlowStringImpl(
|
||||
const RuntimeShape& unextended_input1_shape, const TfLiteTensor* input1,
|
||||
const RuntimeShape& unextended_input2_shape, const TfLiteTensor* input2,
|
||||
const RuntimeShape& unextended_output_shape, bool* output_data) {
|
||||
const BroadcastComparison4DSlowCommon dims =
|
||||
BroadcastComparison4DSlowPreprocess(unextended_input1_shape,
|
||||
unextended_input2_shape,
|
||||
unextended_output_shape);
|
||||
|
||||
for (int b = 0; b < dims.output_shape.Dims(0); ++b) {
|
||||
for (int y = 0; y < dims.output_shape.Dims(1); ++y) {
|
||||
for (int x = 0; x < dims.output_shape.Dims(2); ++x) {
|
||||
for (int c = 0; c < dims.output_shape.Dims(3); ++c) {
|
||||
const auto lhs =
|
||||
GetString(input1, SubscriptToIndex(dims.desc1, b, y, x, c));
|
||||
const auto rhs =
|
||||
GetString(input2, SubscriptToIndex(dims.desc2, b, y, x, c));
|
||||
output_data[Offset(dims.output_shape, b, y, x, c)] = F(lhs, rhs);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template <ComparisonFn<float> F>
|
||||
inline void BroadcastComparison4DSlow(const ComparisonParams& op_params,
|
||||
const RuntimeShape& input1_shape,
|
||||
@@ -218,7 +164,7 @@ inline void BroadcastComparison4DSlow(const ComparisonParams& op_params,
|
||||
output_shape, output_data);
|
||||
}
|
||||
|
||||
template <typename T, ComparisonFn<int32> F>
|
||||
template <typename T, ComparisonFn<int32_t> F>
|
||||
inline void BroadcastComparison4DSlowWithScaling(
|
||||
const ComparisonParams& op_params,
|
||||
const RuntimeShape& unextended_input1_shape, const T* input1_data,
|
||||
@@ -230,29 +176,29 @@ inline void BroadcastComparison4DSlowWithScaling(
|
||||
unextended_output_shape);
|
||||
|
||||
int left_shift = op_params.left_shift;
|
||||
int32 input1_offset = op_params.input1_offset;
|
||||
int32 input1_multiplier = op_params.input1_multiplier;
|
||||
int32_t input1_offset = op_params.input1_offset;
|
||||
int32_t input1_multiplier = op_params.input1_multiplier;
|
||||
int input1_shift = op_params.input1_shift;
|
||||
int32 input2_offset = op_params.input2_offset;
|
||||
int32 input2_multiplier = op_params.input2_multiplier;
|
||||
int32_t input2_offset = op_params.input2_offset;
|
||||
int32_t input2_multiplier = op_params.input2_multiplier;
|
||||
int input2_shift = op_params.input2_shift;
|
||||
|
||||
for (int b = 0; b < dims.output_shape.Dims(0); ++b) {
|
||||
for (int y = 0; y < dims.output_shape.Dims(1); ++y) {
|
||||
for (int x = 0; x < dims.output_shape.Dims(2); ++x) {
|
||||
for (int c = 0; c < dims.output_shape.Dims(3); ++c) {
|
||||
const int32 input1_val =
|
||||
const int32_t input1_val =
|
||||
input1_offset +
|
||||
input1_data[SubscriptToIndex(dims.desc1, b, y, x, c)];
|
||||
const int32 input2_val =
|
||||
const int32_t input2_val =
|
||||
input2_offset +
|
||||
input2_data[SubscriptToIndex(dims.desc2, b, y, x, c)];
|
||||
const int32 shifted_input1_val = input1_val * (1 << left_shift);
|
||||
const int32 shifted_input2_val = input2_val * (1 << left_shift);
|
||||
const int32 scaled_input1_val =
|
||||
const int32_t shifted_input1_val = input1_val * (1 << left_shift);
|
||||
const int32_t shifted_input2_val = input2_val * (1 << left_shift);
|
||||
const int32_t scaled_input1_val =
|
||||
MultiplyByQuantizedMultiplierSmallerThanOneExp(
|
||||
shifted_input1_val, input1_multiplier, input1_shift);
|
||||
const int32 scaled_input2_val =
|
||||
const int32_t scaled_input2_val =
|
||||
MultiplyByQuantizedMultiplierSmallerThanOneExp(
|
||||
shifted_input2_val, input2_multiplier, input2_shift);
|
||||
output_data[Offset(dims.output_shape, b, y, x, c)] =
|
||||
|
||||
@@ -74,14 +74,14 @@ inline void Concatenation(const ConcatenationParams& params,
|
||||
// when optimizng this routine further.
|
||||
inline void ConcatenationWithScaling(const ConcatenationParams& params,
|
||||
const RuntimeShape* const* input_shapes,
|
||||
const uint8* const* input_data,
|
||||
const uint8_t* const* input_data,
|
||||
const RuntimeShape& output_shape,
|
||||
uint8* output_data) {
|
||||
uint8_t* output_data) {
|
||||
int axis = params.axis;
|
||||
const int32* input_zeropoint = params.input_zeropoint;
|
||||
const int32_t* input_zeropoint = params.input_zeropoint;
|
||||
const float* input_scale = params.input_scale;
|
||||
int inputs_count = params.inputs_count;
|
||||
const int32 output_zeropoint = params.output_zeropoint;
|
||||
const int32_t output_zeropoint = params.output_zeropoint;
|
||||
const float output_scale = params.output_scale;
|
||||
|
||||
const int concat_dimensions = output_shape.DimensionsCount();
|
||||
@@ -110,11 +110,11 @@ inline void ConcatenationWithScaling(const ConcatenationParams& params,
|
||||
}
|
||||
|
||||
const float inverse_output_scale = 1.f / output_scale;
|
||||
uint8* output_ptr = output_data;
|
||||
uint8_t* output_ptr = output_data;
|
||||
for (int k = 0; k < outer_size; k++) {
|
||||
for (int i = 0; i < inputs_count; ++i) {
|
||||
const int copy_size = input_shapes[i]->Dims(axis) * base_inner_size;
|
||||
const uint8* input_ptr = input_data[i] + k * copy_size;
|
||||
const uint8_t* input_ptr = input_data[i] + k * copy_size;
|
||||
if (input_zeropoint[i] == output_zeropoint &&
|
||||
input_scale[i] == output_scale) {
|
||||
memcpy(output_ptr, input_ptr, copy_size);
|
||||
|
||||
@@ -59,28 +59,31 @@ inline void Conv(const ConvParams& params, const RuntimeShape& input_shape,
|
||||
const int output_width = output_shape.Dims(2);
|
||||
for (int batch = 0; batch < batches; ++batch) {
|
||||
for (int out_y = 0; out_y < output_height; ++out_y) {
|
||||
const int in_y_origin = (out_y * stride_height) - pad_height;
|
||||
for (int out_x = 0; out_x < output_width; ++out_x) {
|
||||
const int in_x_origin = (out_x * stride_width) - pad_width;
|
||||
for (int out_channel = 0; out_channel < output_depth; ++out_channel) {
|
||||
const int in_x_origin = (out_x * stride_width) - pad_width;
|
||||
const int in_y_origin = (out_y * stride_height) - pad_height;
|
||||
float total = 0.f;
|
||||
for (int filter_y = 0; filter_y < filter_height; ++filter_y) {
|
||||
const int in_y = in_y_origin + dilation_height_factor * filter_y;
|
||||
for (int filter_x = 0; filter_x < filter_width; ++filter_x) {
|
||||
const int in_x = in_x_origin + dilation_width_factor * filter_x;
|
||||
|
||||
// Zero padding by omitting the areas outside the image.
|
||||
const bool is_point_inside_image =
|
||||
(in_x >= 0) && (in_x < input_width) && (in_y >= 0) &&
|
||||
(in_y < input_height);
|
||||
|
||||
if (!is_point_inside_image) {
|
||||
continue;
|
||||
}
|
||||
|
||||
for (int in_channel = 0; in_channel < input_depth; ++in_channel) {
|
||||
const int in_x = in_x_origin + dilation_width_factor * filter_x;
|
||||
const int in_y =
|
||||
in_y_origin + dilation_height_factor * filter_y;
|
||||
// If the location is outside the bounds of the input image,
|
||||
// use zero as a default value.
|
||||
if ((in_x >= 0) && (in_x < input_width) && (in_y >= 0) &&
|
||||
(in_y < input_height)) {
|
||||
float input_value = input_data[Offset(
|
||||
input_shape, batch, in_y, in_x, in_channel)];
|
||||
float filter_value =
|
||||
filter_data[Offset(filter_shape, out_channel, filter_y,
|
||||
filter_x, in_channel)];
|
||||
total += (input_value * filter_value);
|
||||
}
|
||||
float input_value = input_data[Offset(input_shape, batch, in_y,
|
||||
in_x, in_channel)];
|
||||
float filter_value = filter_data[Offset(
|
||||
filter_shape, out_channel, filter_y, filter_x, in_channel)];
|
||||
total += (input_value * filter_value);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -99,11 +102,11 @@ inline void Conv(const ConvParams& params, const RuntimeShape& input_shape,
|
||||
}
|
||||
|
||||
inline void Conv(const ConvParams& params, const RuntimeShape& input_shape,
|
||||
const uint8* input_data, const RuntimeShape& filter_shape,
|
||||
const uint8* filter_data, const RuntimeShape& bias_shape,
|
||||
const int32* bias_data, const RuntimeShape& output_shape,
|
||||
uint8* output_data, const RuntimeShape& im2col_shape,
|
||||
uint8* im2col_data, void* cpu_backend_context) {
|
||||
const uint8_t* input_data, const RuntimeShape& filter_shape,
|
||||
const uint8_t* filter_data, const RuntimeShape& bias_shape,
|
||||
const int32_t* bias_data, const RuntimeShape& output_shape,
|
||||
uint8_t* output_data, const RuntimeShape& im2col_shape,
|
||||
uint8_t* im2col_data, void* cpu_backend_context) {
|
||||
(void)cpu_backend_context; // only used in optimized code.
|
||||
(void)im2col_data; // only used in optimized code.
|
||||
(void)im2col_shape; // only used in optimized code.
|
||||
@@ -113,13 +116,13 @@ inline void Conv(const ConvParams& params, const RuntimeShape& input_shape,
|
||||
const int dilation_height_factor = params.dilation_height_factor;
|
||||
const int pad_width = params.padding_values.width;
|
||||
const int pad_height = params.padding_values.height;
|
||||
const int32 input_offset = params.input_offset;
|
||||
const int32 filter_offset = params.weights_offset;
|
||||
const int32 output_offset = params.output_offset;
|
||||
const int32 output_multiplier = params.output_multiplier;
|
||||
const int32_t input_offset = params.input_offset;
|
||||
const int32_t filter_offset = params.weights_offset;
|
||||
const int32_t output_offset = params.output_offset;
|
||||
const int32_t output_multiplier = params.output_multiplier;
|
||||
const int output_shift = params.output_shift;
|
||||
const int32 output_activation_min = params.quantized_activation_min;
|
||||
const int32 output_activation_max = params.quantized_activation_max;
|
||||
const int32_t output_activation_min = params.quantized_activation_min;
|
||||
const int32_t output_activation_max = params.quantized_activation_max;
|
||||
TFLITE_DCHECK_LE(output_activation_min, output_activation_max);
|
||||
|
||||
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
|
||||
@@ -139,29 +142,32 @@ inline void Conv(const ConvParams& params, const RuntimeShape& input_shape,
|
||||
const int output_width = output_shape.Dims(2);
|
||||
for (int batch = 0; batch < batches; ++batch) {
|
||||
for (int out_y = 0; out_y < output_height; ++out_y) {
|
||||
const int in_y_origin = (out_y * stride_height) - pad_height;
|
||||
for (int out_x = 0; out_x < output_width; ++out_x) {
|
||||
const int in_x_origin = (out_x * stride_width) - pad_width;
|
||||
for (int out_channel = 0; out_channel < output_depth; ++out_channel) {
|
||||
const int in_x_origin = (out_x * stride_width) - pad_width;
|
||||
const int in_y_origin = (out_y * stride_height) - pad_height;
|
||||
int32 acc = 0;
|
||||
int32_t acc = 0;
|
||||
for (int filter_y = 0; filter_y < filter_height; ++filter_y) {
|
||||
const int in_y = in_y_origin + dilation_height_factor * filter_y;
|
||||
for (int filter_x = 0; filter_x < filter_width; ++filter_x) {
|
||||
const int in_x = in_x_origin + dilation_width_factor * filter_x;
|
||||
|
||||
// Zero padding by omitting the areas outside the image.
|
||||
const bool is_point_inside_image =
|
||||
(in_x >= 0) && (in_x < input_width) && (in_y >= 0) &&
|
||||
(in_y < input_height);
|
||||
|
||||
if (!is_point_inside_image) {
|
||||
continue;
|
||||
}
|
||||
|
||||
for (int in_channel = 0; in_channel < input_depth; ++in_channel) {
|
||||
const int in_x = in_x_origin + dilation_width_factor * filter_x;
|
||||
const int in_y =
|
||||
in_y_origin + dilation_height_factor * filter_y;
|
||||
// If the location is outside the bounds of the input image,
|
||||
// use zero as a default value.
|
||||
if ((in_x >= 0) && (in_x < input_width) && (in_y >= 0) &&
|
||||
(in_y < input_height)) {
|
||||
int32 input_val = input_data[Offset(input_shape, batch, in_y,
|
||||
int32_t input_val = input_data[Offset(input_shape, batch, in_y,
|
||||
in_x, in_channel)];
|
||||
int32 filter_val =
|
||||
filter_data[Offset(filter_shape, out_channel, filter_y,
|
||||
filter_x, in_channel)];
|
||||
acc +=
|
||||
(filter_val + filter_offset) * (input_val + input_offset);
|
||||
}
|
||||
int32_t filter_val = filter_data[Offset(
|
||||
filter_shape, out_channel, filter_y, filter_x, in_channel)];
|
||||
acc +=
|
||||
(filter_val + filter_offset) * (input_val + input_offset);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -174,7 +180,7 @@ inline void Conv(const ConvParams& params, const RuntimeShape& input_shape,
|
||||
acc = std::max(acc, output_activation_min);
|
||||
acc = std::min(acc, output_activation_max);
|
||||
output_data[Offset(output_shape, batch, out_y, out_x, out_channel)] =
|
||||
static_cast<uint8>(acc);
|
||||
static_cast<uint8_t>(acc);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -220,7 +226,7 @@ inline void HybridConvPerChannel(
|
||||
for (int out_channel = 0; out_channel < output_depth; ++out_channel) {
|
||||
const int in_x_origin = (out_x * stride_width) - pad_width;
|
||||
const int in_y_origin = (out_y * stride_height) - pad_height;
|
||||
int32 acc = 0;
|
||||
int32_t acc = 0;
|
||||
for (int filter_y = 0; filter_y < filter_height; ++filter_y) {
|
||||
for (int filter_x = 0; filter_x < filter_width; ++filter_x) {
|
||||
for (int in_channel = 0; in_channel < input_depth; ++in_channel) {
|
||||
@@ -231,9 +237,9 @@ inline void HybridConvPerChannel(
|
||||
// use zero as a default value.
|
||||
if ((in_x >= 0) && (in_x < input_width) && (in_y >= 0) &&
|
||||
(in_y < input_height)) {
|
||||
int32 input_val = input_data[Offset(input_shape, batch, in_y,
|
||||
in_x, in_channel)];
|
||||
int32 filter_val =
|
||||
int32_t input_val = input_data[Offset(
|
||||
input_shape, batch, in_y, in_x, in_channel)];
|
||||
int32_t filter_val =
|
||||
filter_data[Offset(filter_shape, out_channel, filter_y,
|
||||
filter_x, in_channel)];
|
||||
acc += filter_val * (input_val - input_offset[batch]);
|
||||
@@ -258,5 +264,4 @@ inline void HybridConvPerChannel(
|
||||
} // namespace reference_ops
|
||||
} // namespace tflite
|
||||
|
||||
|
||||
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CONV_H_
|
||||
|
||||
@@ -62,21 +62,21 @@ namespace reference_ops {
|
||||
namespace depthwise_conv {
|
||||
|
||||
template <DepthwiseConvOutputRounding output_rounding>
|
||||
inline int32 DepthwiseConvRound(int32 x, int32 quantized_multiplier,
|
||||
int shift) {
|
||||
inline int32_t DepthwiseConvRound(int32_t x, int32_t quantized_multiplier,
|
||||
int shift) {
|
||||
TFLITE_DCHECK_NE(output_rounding, DepthwiseConvOutputRounding::kNone);
|
||||
return MultiplyByQuantizedMultiplier(x, quantized_multiplier, shift);
|
||||
}
|
||||
|
||||
template <>
|
||||
inline int32 DepthwiseConvRound<DepthwiseConvOutputRounding::kAwayFromZero>(
|
||||
int32 x, int32 quantized_multiplier, int shift) {
|
||||
inline int32_t DepthwiseConvRound<DepthwiseConvOutputRounding::kAwayFromZero>(
|
||||
int32_t x, int32_t quantized_multiplier, int shift) {
|
||||
return MultiplyByQuantizedMultiplier(x, quantized_multiplier, shift);
|
||||
}
|
||||
|
||||
template <>
|
||||
inline int32 DepthwiseConvRound<DepthwiseConvOutputRounding::kUpward>(
|
||||
int32 x, int32 quantized_multiplier, int shift) {
|
||||
inline int32_t DepthwiseConvRound<DepthwiseConvOutputRounding::kUpward>(
|
||||
int32_t x, int32_t quantized_multiplier, int shift) {
|
||||
using gemmlowp::SaturatingRoundingDoublingHighMul;
|
||||
const int left_shift = shift > 0 ? shift : 0;
|
||||
const int right_shift = shift > 0 ? 0 : -shift;
|
||||
@@ -89,13 +89,12 @@ inline int32 DepthwiseConvRound<DepthwiseConvOutputRounding::kUpward>(
|
||||
|
||||
template <DepthwiseConvOutputRounding output_rounding>
|
||||
struct DepthwiseConvBasicKernel {
|
||||
static inline void Run(const DepthwiseParams& params,
|
||||
const RuntimeShape& input_shape,
|
||||
const uint8* input_data,
|
||||
const RuntimeShape& filter_shape,
|
||||
const uint8* filter_data,
|
||||
const RuntimeShape& bias_shape, const int32* bias_data,
|
||||
const RuntimeShape& output_shape, uint8* output_data) {
|
||||
static inline void Run(
|
||||
const DepthwiseParams& params, const RuntimeShape& input_shape,
|
||||
const uint8_t* input_data, const RuntimeShape& filter_shape,
|
||||
const uint8_t* filter_data, const RuntimeShape& bias_shape,
|
||||
const int32_t* bias_data, const RuntimeShape& output_shape,
|
||||
uint8_t* output_data) {
|
||||
const int stride_width = params.stride_width;
|
||||
const int stride_height = params.stride_height;
|
||||
const int dilation_width_factor = params.dilation_width_factor;
|
||||
@@ -103,12 +102,12 @@ struct DepthwiseConvBasicKernel {
|
||||
const int pad_width = params.padding_values.width;
|
||||
const int pad_height = params.padding_values.height;
|
||||
const int depth_multiplier = params.depth_multiplier;
|
||||
const int32 output_activation_min = params.quantized_activation_min;
|
||||
const int32 output_activation_max = params.quantized_activation_max;
|
||||
const int32 input_offset = params.input_offset;
|
||||
const int32 filter_offset = params.weights_offset;
|
||||
const int32 output_offset = params.output_offset;
|
||||
const int32 output_multiplier = params.output_multiplier;
|
||||
const int32_t output_activation_min = params.quantized_activation_min;
|
||||
const int32_t output_activation_max = params.quantized_activation_max;
|
||||
const int32_t input_offset = params.input_offset;
|
||||
const int32_t filter_offset = params.weights_offset;
|
||||
const int32_t output_offset = params.output_offset;
|
||||
const int32_t output_multiplier = params.output_multiplier;
|
||||
const int output_shift = params.output_shift;
|
||||
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
|
||||
TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 4);
|
||||
@@ -135,7 +134,7 @@ struct DepthwiseConvBasicKernel {
|
||||
const int oc = m + ic * depth_multiplier;
|
||||
const int in_x_origin = (out_x * stride_width) - pad_width;
|
||||
const int in_y_origin = (out_y * stride_height) - pad_height;
|
||||
int32 acc = 0;
|
||||
int32_t acc = 0;
|
||||
for (int filter_y = 0; filter_y < filter_height; ++filter_y) {
|
||||
for (int filter_x = 0; filter_x < filter_width; ++filter_x) {
|
||||
const int in_x =
|
||||
@@ -146,9 +145,9 @@ struct DepthwiseConvBasicKernel {
|
||||
// use zero as a default value.
|
||||
if ((in_x >= 0) && (in_x < input_width) && (in_y >= 0) &&
|
||||
(in_y < input_height)) {
|
||||
int32 input_val =
|
||||
int32_t input_val =
|
||||
input_data[Offset(input_shape, b, in_y, in_x, ic)];
|
||||
int32 filter_val = filter_data[Offset(
|
||||
int32_t filter_val = filter_data[Offset(
|
||||
filter_shape, 0, filter_y, filter_x, oc)];
|
||||
acc += (filter_val + filter_offset) *
|
||||
(input_val + input_offset);
|
||||
@@ -164,7 +163,7 @@ struct DepthwiseConvBasicKernel {
|
||||
acc = std::max(acc, output_activation_min);
|
||||
acc = std::min(acc, output_activation_max);
|
||||
output_data[Offset(output_shape, b, out_y, out_x, oc)] =
|
||||
static_cast<uint8>(acc);
|
||||
static_cast<uint8_t>(acc);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -176,10 +175,10 @@ struct DepthwiseConvBasicKernel {
|
||||
// MultiplyByQuantizedMultiplier or DepthwiseConvRound function.
|
||||
static inline void RunPerChannel(
|
||||
const DepthwiseParams& params, const RuntimeShape& input_shape,
|
||||
const int8* input_data, const RuntimeShape& filter_shape,
|
||||
const int8* filter_data, const RuntimeShape& bias_shape,
|
||||
const int32* bias_data, const RuntimeShape& output_shape,
|
||||
int8* output_data) {
|
||||
const int8_t* input_data, const RuntimeShape& filter_shape,
|
||||
const int8_t* filter_data, const RuntimeShape& bias_shape,
|
||||
const int32_t* bias_data, const RuntimeShape& output_shape,
|
||||
int8_t* output_data) {
|
||||
// Get parameters.
|
||||
// TODO(b/141565753): Re-introduce ScopedProfilingLabel on Micro.
|
||||
const int stride_width = params.stride_width;
|
||||
@@ -189,12 +188,12 @@ struct DepthwiseConvBasicKernel {
|
||||
const int pad_width = params.padding_values.width;
|
||||
const int pad_height = params.padding_values.height;
|
||||
const int depth_multiplier = params.depth_multiplier;
|
||||
const int32 input_offset = params.input_offset;
|
||||
const int32 output_offset = params.output_offset;
|
||||
const int32 output_activation_min = params.quantized_activation_min;
|
||||
const int32 output_activation_max = params.quantized_activation_max;
|
||||
const int32* output_multiplier = params.output_multiplier_per_channel;
|
||||
const int32* output_shift = params.output_shift_per_channel;
|
||||
const int32_t input_offset = params.input_offset;
|
||||
const int32_t output_offset = params.output_offset;
|
||||
const int32_t output_activation_min = params.quantized_activation_min;
|
||||
const int32_t output_activation_max = params.quantized_activation_max;
|
||||
const int32_t* output_multiplier = params.output_multiplier_per_channel;
|
||||
const int32_t* output_shift = params.output_shift_per_channel;
|
||||
|
||||
// Check dimensions of the tensors.
|
||||
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
|
||||
@@ -222,7 +221,7 @@ struct DepthwiseConvBasicKernel {
|
||||
const int output_channel = m + in_channel * depth_multiplier;
|
||||
const int in_x_origin = (out_x * stride_width) - pad_width;
|
||||
const int in_y_origin = (out_y * stride_height) - pad_height;
|
||||
int32 acc = 0;
|
||||
int32_t acc = 0;
|
||||
for (int filter_y = 0; filter_y < filter_height; ++filter_y) {
|
||||
for (int filter_x = 0; filter_x < filter_width; ++filter_x) {
|
||||
const int in_x =
|
||||
@@ -234,17 +233,18 @@ struct DepthwiseConvBasicKernel {
|
||||
(in_x >= 0) && (in_x < input_width) && (in_y >= 0) &&
|
||||
(in_y < input_height);
|
||||
if (is_point_inside_image) {
|
||||
int32 input_val = input_data[Offset(
|
||||
int32_t input_val = input_data[Offset(
|
||||
input_shape, batch, in_y, in_x, in_channel)];
|
||||
int32 filter_val = filter_data[Offset(
|
||||
int32_t filter_val = filter_data[Offset(
|
||||
filter_shape, 0, filter_y, filter_x, output_channel)];
|
||||
// Accumulate with 32 bits accumulator.
|
||||
// In the nudging process during model quantization, we
|
||||
// force real value of 0.0 be represented by a quantized
|
||||
// value. This guarantees that the input_offset is a int8,
|
||||
// even though it is represented using int32. int32 += int8
|
||||
// * (int8 - int8) so the highest value we can get from each
|
||||
// accumulation is [-127, 127] * ([-128, 127] -
|
||||
// value. This guarantees that the input_offset is a int8_t,
|
||||
// even though it is represented using int32_t. int32_t +=
|
||||
// int8_t
|
||||
// * (int8_t - int8_t) so the highest value we can get from
|
||||
// each accumulation is [-127, 127] * ([-128, 127] -
|
||||
// [-128, 127]), which is [-32512, 32512]. log2(32512)
|
||||
// = 14.98, which means we can accumulate at least 2^16
|
||||
// multiplications without overflow. The accumulator is
|
||||
@@ -279,10 +279,10 @@ struct DepthwiseConvBasicKernel {
|
||||
|
||||
inline void DepthwiseConv(
|
||||
const DepthwiseParams& params, const RuntimeShape& input_shape,
|
||||
const uint8* input_data, const RuntimeShape& filter_shape,
|
||||
const uint8* filter_data, const RuntimeShape& bias_shape,
|
||||
const int32* bias_data, const RuntimeShape& output_shape,
|
||||
uint8* output_data) {
|
||||
const uint8_t* input_data, const RuntimeShape& filter_shape,
|
||||
const uint8_t* filter_data, const RuntimeShape& bias_shape,
|
||||
const int32_t* bias_data, const RuntimeShape& output_shape,
|
||||
uint8_t* output_data) {
|
||||
return depthwise_conv::DepthwiseConvBasicKernel<
|
||||
DepthwiseConvOutputRounding::kAwayFromZero>::Run(params, input_shape,
|
||||
input_data, filter_shape,
|
||||
|
||||
@@ -32,12 +32,12 @@ inline void Dequantize(const tflite::DequantizationParams& op_params,
|
||||
const RuntimeShape& input_shape,
|
||||
const InputT* input_data,
|
||||
const RuntimeShape& output_shape, OutputT* output_data) {
|
||||
int32 zero_point = op_params.zero_point;
|
||||
int32_t zero_point = op_params.zero_point;
|
||||
const double scale = op_params.scale;
|
||||
const int flat_size = MatchingFlatSize(input_shape, output_shape);
|
||||
|
||||
for (int i = 0; i < flat_size; i++) {
|
||||
const int32 val = input_data[i];
|
||||
const int32_t val = input_data[i];
|
||||
const OutputT result = static_cast<OutputT>(scale * (val - zero_point));
|
||||
output_data[i] = result;
|
||||
}
|
||||
@@ -52,11 +52,11 @@ inline void PerChannelDequantize(
|
||||
// Ensure flat size is same.
|
||||
MatchingFlatSize(input_shape, output_shape);
|
||||
|
||||
const int32* zero_point = op_params.zero_point;
|
||||
const int32_t* zero_point = op_params.zero_point;
|
||||
const float* scale = op_params.scale;
|
||||
const int32 quantized_dimension = op_params.quantized_dimension;
|
||||
const int32 num_dims = input_shape.DimensionsCount();
|
||||
const int32* dims_data = input_shape.DimsData();
|
||||
const int32_t quantized_dimension = op_params.quantized_dimension;
|
||||
const int32_t num_dims = input_shape.DimensionsCount();
|
||||
const int32_t* dims_data = input_shape.DimsData();
|
||||
std::vector<int> current_dim(num_dims, 0);
|
||||
|
||||
do {
|
||||
@@ -64,7 +64,7 @@ inline void PerChannelDequantize(
|
||||
ReducedOutputOffset(num_dims, reinterpret_cast<const int*>(dims_data),
|
||||
current_dim.data(), 0, nullptr);
|
||||
const int channel = current_dim[quantized_dimension];
|
||||
const int32 val = input_data[offset];
|
||||
const int32_t val = input_data[offset];
|
||||
const float result =
|
||||
static_cast<float>(scale[channel] * (val - zero_point[channel]));
|
||||
output_data[offset] = result;
|
||||
|
||||
@@ -61,17 +61,17 @@ inline void FullyConnected(
|
||||
|
||||
inline void FullyConnected(
|
||||
const FullyConnectedParams& params, const RuntimeShape& input_shape,
|
||||
const uint8* input_data, const RuntimeShape& filter_shape,
|
||||
const uint8* filter_data, const RuntimeShape& bias_shape,
|
||||
const int32* bias_data, const RuntimeShape& output_shape,
|
||||
uint8* output_data) {
|
||||
const int32 input_offset = params.input_offset;
|
||||
const int32 filter_offset = params.weights_offset;
|
||||
const int32 output_offset = params.output_offset;
|
||||
const int32 output_multiplier = params.output_multiplier;
|
||||
const uint8_t* input_data, const RuntimeShape& filter_shape,
|
||||
const uint8_t* filter_data, const RuntimeShape& bias_shape,
|
||||
const int32_t* bias_data, const RuntimeShape& output_shape,
|
||||
uint8_t* output_data) {
|
||||
const int32_t input_offset = params.input_offset;
|
||||
const int32_t filter_offset = params.weights_offset;
|
||||
const int32_t output_offset = params.output_offset;
|
||||
const int32_t output_multiplier = params.output_multiplier;
|
||||
const int output_shift = params.output_shift;
|
||||
const int32 output_activation_min = params.quantized_activation_min;
|
||||
const int32 output_activation_max = params.quantized_activation_max;
|
||||
const int32_t output_activation_min = params.quantized_activation_min;
|
||||
const int32_t output_activation_max = params.quantized_activation_max;
|
||||
TFLITE_DCHECK_GE(filter_shape.DimensionsCount(), 2);
|
||||
TFLITE_DCHECK_GE(output_shape.DimensionsCount(), 1);
|
||||
|
||||
@@ -89,10 +89,10 @@ inline void FullyConnected(
|
||||
const int accum_depth = filter_shape.Dims(filter_dim_count - 1);
|
||||
for (int b = 0; b < batches; ++b) {
|
||||
for (int out_c = 0; out_c < output_depth; ++out_c) {
|
||||
int32 acc = 0;
|
||||
int32_t acc = 0;
|
||||
for (int d = 0; d < accum_depth; ++d) {
|
||||
int32 input_val = input_data[b * accum_depth + d];
|
||||
int32 filter_val = filter_data[out_c * accum_depth + d];
|
||||
int32_t input_val = input_data[b * accum_depth + d];
|
||||
int32_t filter_val = filter_data[out_c * accum_depth + d];
|
||||
acc += (filter_val + filter_offset) * (input_val + input_offset);
|
||||
}
|
||||
if (bias_data) {
|
||||
@@ -102,24 +102,24 @@ inline void FullyConnected(
|
||||
acc += output_offset;
|
||||
acc = std::max(acc, output_activation_min);
|
||||
acc = std::min(acc, output_activation_max);
|
||||
output_data[out_c + output_depth * b] = static_cast<uint8>(acc);
|
||||
output_data[out_c + output_depth * b] = static_cast<uint8_t>(acc);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
inline void FullyConnected(
|
||||
const FullyConnectedParams& params, const RuntimeShape& input_shape,
|
||||
const uint8* input_data, const RuntimeShape& filter_shape,
|
||||
const uint8* filter_data, const RuntimeShape& bias_shape,
|
||||
const int32* bias_data, const RuntimeShape& output_shape,
|
||||
int16* output_data) {
|
||||
const int32 input_offset = params.input_offset;
|
||||
const int32 filter_offset = params.weights_offset;
|
||||
const int32 output_offset = params.output_offset;
|
||||
const int32 output_multiplier = params.output_multiplier;
|
||||
const uint8_t* input_data, const RuntimeShape& filter_shape,
|
||||
const uint8_t* filter_data, const RuntimeShape& bias_shape,
|
||||
const int32_t* bias_data, const RuntimeShape& output_shape,
|
||||
int16_t* output_data) {
|
||||
const int32_t input_offset = params.input_offset;
|
||||
const int32_t filter_offset = params.weights_offset;
|
||||
const int32_t output_offset = params.output_offset;
|
||||
const int32_t output_multiplier = params.output_multiplier;
|
||||
const int output_shift = params.output_shift;
|
||||
const int32 output_activation_min = params.quantized_activation_min;
|
||||
const int32 output_activation_max = params.quantized_activation_max;
|
||||
const int32_t output_activation_min = params.quantized_activation_min;
|
||||
const int32_t output_activation_max = params.quantized_activation_max;
|
||||
|
||||
TFLITE_DCHECK_LE(output_activation_min, output_activation_max);
|
||||
TFLITE_DCHECK_EQ(output_offset, 0);
|
||||
@@ -138,20 +138,21 @@ inline void FullyConnected(
|
||||
for (int out_c = 0; out_c < output_depth; ++out_c) {
|
||||
// Internal accumulation.
|
||||
// Initialize accumulator with the bias-value.
|
||||
int32 accum = bias_data[out_c];
|
||||
int32_t accum = bias_data[out_c];
|
||||
// Accumulation loop.
|
||||
for (int d = 0; d < accum_depth; ++d) {
|
||||
int16 input_val = input_data[b * accum_depth + d] + input_offset;
|
||||
int16 filter_val = filter_data[out_c * accum_depth + d] + filter_offset;
|
||||
int16_t input_val = input_data[b * accum_depth + d] + input_offset;
|
||||
int16_t filter_val =
|
||||
filter_data[out_c * accum_depth + d] + filter_offset;
|
||||
accum += filter_val * input_val;
|
||||
}
|
||||
// Down-scale the final int32 accumulator to the scale used by our
|
||||
// Down-scale the final int32_t accumulator to the scale used by our
|
||||
// (16-bit, typically 3 integer bits) fixed-point format. The quantized
|
||||
// multiplier and shift here have been pre-computed offline
|
||||
// (e.g. by toco).
|
||||
accum =
|
||||
MultiplyByQuantizedMultiplier(accum, output_multiplier, output_shift);
|
||||
// Saturate, cast to int16, and store to output array.
|
||||
// Saturate, cast to int16_t, and store to output array.
|
||||
accum = std::max(accum, output_activation_min - output_offset);
|
||||
accum = std::min(accum, output_activation_max - output_offset);
|
||||
accum += output_offset;
|
||||
@@ -162,14 +163,14 @@ inline void FullyConnected(
|
||||
|
||||
inline void ShuffledFullyConnected(
|
||||
const FullyConnectedParams& params, const RuntimeShape& input_shape,
|
||||
const uint8* input_data, const RuntimeShape& weights_shape,
|
||||
const uint8* shuffled_weights_data, const RuntimeShape& bias_shape,
|
||||
const int32* bias_data, const RuntimeShape& output_shape,
|
||||
int16* output_data, uint8* shuffled_input_workspace_data) {
|
||||
const int32 output_multiplier = params.output_multiplier;
|
||||
const uint8_t* input_data, const RuntimeShape& weights_shape,
|
||||
const uint8_t* shuffled_weights_data, const RuntimeShape& bias_shape,
|
||||
const int32_t* bias_data, const RuntimeShape& output_shape,
|
||||
int16_t* output_data, uint8_t* shuffled_input_workspace_data) {
|
||||
const int32_t output_multiplier = params.output_multiplier;
|
||||
const int output_shift = params.output_shift;
|
||||
const int32 output_activation_min = params.quantized_activation_min;
|
||||
const int32 output_activation_max = params.quantized_activation_max;
|
||||
const int32_t output_activation_min = params.quantized_activation_min;
|
||||
const int32_t output_activation_max = params.quantized_activation_max;
|
||||
TFLITE_DCHECK_LE(output_activation_min, output_activation_max);
|
||||
|
||||
TFLITE_DCHECK_GE(input_shape.DimensionsCount(), 1);
|
||||
@@ -190,7 +191,7 @@ inline void ShuffledFullyConnected(
|
||||
TFLITE_DCHECK((output_depth % 4) == 0);
|
||||
|
||||
// Shuffling and xoring of input activations into the workspace buffer
|
||||
uint8* shuffled_input_workspace_ptr = shuffled_input_workspace_data;
|
||||
uint8_t* shuffled_input_workspace_ptr = shuffled_input_workspace_data;
|
||||
if (batches == 1) {
|
||||
for (int i = 0; i < accum_depth; i++) {
|
||||
shuffled_input_workspace_data[i] = input_data[i] ^ 0x80;
|
||||
@@ -198,13 +199,13 @@ inline void ShuffledFullyConnected(
|
||||
} else if (batches == 4) {
|
||||
for (int c = 0; c < accum_depth; c += 16) {
|
||||
for (int b = 0; b < 4; b++) {
|
||||
const uint8* src_data_ptr = input_data + b * accum_depth + c;
|
||||
const uint8_t* src_data_ptr = input_data + b * accum_depth + c;
|
||||
for (int j = 0; j < 16; j++) {
|
||||
uint8 src_val = *src_data_ptr++;
|
||||
uint8_t src_val = *src_data_ptr++;
|
||||
// Flip the sign bit, so that the kernel will only need to
|
||||
// reinterpret these uint8 values as int8, getting for free the
|
||||
// reinterpret these uint8_t values as int8_t, getting for free the
|
||||
// subtraction of the zero_point value 128.
|
||||
uint8 dst_val = src_val ^ 0x80;
|
||||
uint8_t dst_val = src_val ^ 0x80;
|
||||
*shuffled_input_workspace_ptr++ = dst_val;
|
||||
}
|
||||
}
|
||||
@@ -216,62 +217,62 @@ inline void ShuffledFullyConnected(
|
||||
|
||||
// Actual computation
|
||||
if (batches == 1) {
|
||||
int16* output_ptr = output_data;
|
||||
int16_t* output_ptr = output_data;
|
||||
// Shuffled weights have had their sign bit (0x80) pre-flipped (xor'd)
|
||||
// so that just reinterpreting them as int8 values is equivalent to
|
||||
// so that just reinterpreting them as int8_t values is equivalent to
|
||||
// subtracting 128 from them, thus implementing for free the subtraction of
|
||||
// the zero_point value 128.
|
||||
const int8* shuffled_weights_ptr =
|
||||
reinterpret_cast<const int8*>(shuffled_weights_data);
|
||||
const int8_t* shuffled_weights_ptr =
|
||||
reinterpret_cast<const int8_t*>(shuffled_weights_data);
|
||||
// Likewise, we preshuffled and pre-xored the input data above.
|
||||
const int8* shuffled_input_data =
|
||||
reinterpret_cast<const int8*>(shuffled_input_workspace_data);
|
||||
const int8_t* shuffled_input_data =
|
||||
reinterpret_cast<const int8_t*>(shuffled_input_workspace_data);
|
||||
for (int c = 0; c < output_depth; c += 4) {
|
||||
// Internal accumulation.
|
||||
// Initialize accumulator with the bias-value.
|
||||
int32 accum[4] = {0};
|
||||
int32_t accum[4] = {0};
|
||||
// Accumulation loop.
|
||||
for (int d = 0; d < accum_depth; d += 16) {
|
||||
for (int i = 0; i < 4; i++) {
|
||||
for (int j = 0; j < 16; j++) {
|
||||
int8 input_val = shuffled_input_data[d + j];
|
||||
int8 weights_val = *shuffled_weights_ptr++;
|
||||
int8_t input_val = shuffled_input_data[d + j];
|
||||
int8_t weights_val = *shuffled_weights_ptr++;
|
||||
accum[i] += weights_val * input_val;
|
||||
}
|
||||
}
|
||||
}
|
||||
for (int i = 0; i < 4; i++) {
|
||||
// Add bias value
|
||||
int32 acc = accum[i] + bias_data[c + i];
|
||||
// Down-scale the final int32 accumulator to the scale used by our
|
||||
int32_t acc = accum[i] + bias_data[c + i];
|
||||
// Down-scale the final int32_t accumulator to the scale used by our
|
||||
// (16-bit, typically 3 integer bits) fixed-point format. The quantized
|
||||
// multiplier and shift here have been pre-computed offline
|
||||
// (e.g. by toco).
|
||||
acc =
|
||||
MultiplyByQuantizedMultiplier(acc, output_multiplier, output_shift);
|
||||
// Saturate, cast to int16, and store to output array.
|
||||
// Saturate, cast to int16_t, and store to output array.
|
||||
acc = std::max(acc, output_activation_min);
|
||||
acc = std::min(acc, output_activation_max);
|
||||
output_ptr[c + i] = acc;
|
||||
}
|
||||
}
|
||||
} else if (batches == 4) {
|
||||
int16* output_ptr = output_data;
|
||||
int16_t* output_ptr = output_data;
|
||||
// Shuffled weights have had their sign bit (0x80) pre-flipped (xor'd)
|
||||
// so that just reinterpreting them as int8 values is equivalent to
|
||||
// so that just reinterpreting them as int8_t values is equivalent to
|
||||
// subtracting 128 from them, thus implementing for free the subtraction of
|
||||
// the zero_point value 128.
|
||||
const int8* shuffled_weights_ptr =
|
||||
reinterpret_cast<const int8*>(shuffled_weights_data);
|
||||
const int8_t* shuffled_weights_ptr =
|
||||
reinterpret_cast<const int8_t*>(shuffled_weights_data);
|
||||
// Likewise, we preshuffled and pre-xored the input data above.
|
||||
const int8* shuffled_input_data =
|
||||
reinterpret_cast<const int8*>(shuffled_input_workspace_data);
|
||||
const int8_t* shuffled_input_data =
|
||||
reinterpret_cast<const int8_t*>(shuffled_input_workspace_data);
|
||||
for (int c = 0; c < output_depth; c += 4) {
|
||||
const int8* shuffled_input_ptr = shuffled_input_data;
|
||||
const int8_t* shuffled_input_ptr = shuffled_input_data;
|
||||
// Accumulation loop.
|
||||
// Internal accumulation.
|
||||
// Initialize accumulator with the bias-value.
|
||||
int32 accum[4][4];
|
||||
int32_t accum[4][4];
|
||||
for (int i = 0; i < 4; i++) {
|
||||
for (int b = 0; b < 4; b++) {
|
||||
accum[i][b] = 0;
|
||||
@@ -281,8 +282,8 @@ inline void ShuffledFullyConnected(
|
||||
for (int i = 0; i < 4; i++) {
|
||||
for (int b = 0; b < 4; b++) {
|
||||
for (int j = 0; j < 16; j++) {
|
||||
int8 input_val = shuffled_input_ptr[16 * b + j];
|
||||
int8 weights_val = shuffled_weights_ptr[16 * i + j];
|
||||
int8_t input_val = shuffled_input_ptr[16 * b + j];
|
||||
int8_t weights_val = shuffled_weights_ptr[16 * i + j];
|
||||
accum[i][b] += weights_val * input_val;
|
||||
}
|
||||
}
|
||||
@@ -293,14 +294,14 @@ inline void ShuffledFullyConnected(
|
||||
for (int i = 0; i < 4; i++) {
|
||||
for (int b = 0; b < 4; b++) {
|
||||
// Add bias value
|
||||
int32 acc = accum[i][b] + bias_data[c + i];
|
||||
// Down-scale the final int32 accumulator to the scale used by our
|
||||
int32_t acc = accum[i][b] + bias_data[c + i];
|
||||
// Down-scale the final int32_t accumulator to the scale used by our
|
||||
// (16-bit, typically 3 integer bits) fixed-point format. The
|
||||
// quantized multiplier and shift here have been pre-computed offline
|
||||
// (e.g. by toco).
|
||||
acc = MultiplyByQuantizedMultiplier(acc, output_multiplier,
|
||||
output_shift);
|
||||
// Saturate, cast to int16, and store to output array.
|
||||
// Saturate, cast to int16_t, and store to output array.
|
||||
acc = std::max(acc, output_activation_min);
|
||||
acc = std::min(acc, output_activation_max);
|
||||
output_ptr[b * output_depth + c + i] = acc;
|
||||
|
||||
@@ -0,0 +1,166 @@
|
||||
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ACTIVATIONS_H_
|
||||
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ACTIVATIONS_H_
|
||||
|
||||
#include "ruy/profiler/instrumentation.h" // from @ruy
|
||||
#include "tensorflow/lite/kernels/internal/common.h"
|
||||
#include "tensorflow/lite/kernels/internal/types.h"
|
||||
|
||||
namespace tflite {
|
||||
namespace reference_ops {
|
||||
|
||||
inline int16_t SaturatingLeftShift(int16_t value, int amount) {
|
||||
int32_t result = static_cast<int32_t>(value) * (1 << amount);
|
||||
result = std::min<int32_t>(result, std::numeric_limits<int16_t>::max());
|
||||
result = std::max<int32_t>(result, std::numeric_limits<int16_t>::min());
|
||||
return result;
|
||||
}
|
||||
|
||||
// Similar to ARM instruction SQDMULH.
|
||||
// Similar to gemmlowp::SaturatingRoundingDoublingHighMul except
|
||||
// rounding to zero instead of to nearest (SQRDMULH).
|
||||
inline std::int16_t SaturatingDoublingHighMul(std::int16_t a, std::int16_t b) {
|
||||
bool overflow = a == b && a == std::numeric_limits<std::int16_t>::min();
|
||||
std::int32_t a_32(a);
|
||||
std::int32_t b_32(b);
|
||||
std::int32_t ab_32 = a_32 * b_32;
|
||||
std::int16_t ab_x2_high16 = static_cast<std::int16_t>((ab_32) / (1 << 15));
|
||||
return overflow ? std::numeric_limits<std::int16_t>::max() : ab_x2_high16;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline void HardSwish(const RuntimeShape& input_shape, const T* input_data,
|
||||
const RuntimeShape& output_shape, T* output_data) {
|
||||
ruy::profiler::ScopeLabel label("ReferenceHardSwish/Float");
|
||||
auto matching_size = MatchingFlatSize(input_shape, output_shape);
|
||||
const T* in_end = input_data + matching_size;
|
||||
for (; input_data < in_end; input_data++, output_data++) {
|
||||
const float in = *input_data;
|
||||
*output_data =
|
||||
in * std::min(static_cast<T>(6), std::max(static_cast<T>(0), in + 3)) /
|
||||
6;
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline void HardSwish(const HardSwishParams& params,
|
||||
const RuntimeShape& input_shape, const T* input_data,
|
||||
const RuntimeShape& output_shape, T* output_data) {
|
||||
ruy::profiler::ScopeLabel label("ReferenceHardSwish/Quantized");
|
||||
|
||||
const int flat_size = MatchingFlatSize(input_shape, output_shape);
|
||||
|
||||
for (int i = 0; i < flat_size; i++) {
|
||||
const int16_t input_value = input_data[i] - params.input_zero_point;
|
||||
// Left-shift as much as we can without overflow/saturation to put
|
||||
// significant bits in the high bits of our 16-bit fixedpoint values, so
|
||||
// that fixed-point approximate computations below are as accurate as
|
||||
// possible.
|
||||
const int16_t input_value_on_hires_input_scale = input_value * (1 << 7);
|
||||
// Compute the input value on essentially the output scale, just not
|
||||
// right-shifted yet. This is the value that we'll use in the (x >= +3)
|
||||
// case, and that in the general case we'll multiply against the "relu-ish"
|
||||
// fixed-point multiplier in [0, 1].
|
||||
const int16_t input_value_on_preshift_output_scale =
|
||||
gemmlowp::SaturatingRoundingDoublingHighMul(
|
||||
input_value_on_hires_input_scale,
|
||||
params.output_multiplier_fixedpoint_int16);
|
||||
// Now compute the "relu-ish multiplier". In the (-3 <= x <= +3) case, that
|
||||
// is just an affine rescaling of x from [-3, 3] to [0, 1]. In the general
|
||||
// case, it is just that plus saturation at the boundaries of [-3, 3].
|
||||
// First, we rescale from [-3, 3] to [-1, 1], saturating.
|
||||
// That is done by rescaling the input value with a fixed-point multiplier
|
||||
// (reluish_multiplier_fixedpoint) and bit-shift such that we represent
|
||||
// that input value on the scale where the real value 3.0f is represented
|
||||
// by the quantized value 32768. (+32768 is actually not representable as
|
||||
// int16_t, so this saturates at +32767, and that is seen empirically to be
|
||||
// a negligible contribution to numerical error/bias).
|
||||
//
|
||||
// This code is careful to correctly implement any magnitude of multiplier,
|
||||
// involving either a right shift or a left shift, with correct saturation
|
||||
// behavior in the left-shift case. This forces this code to be more
|
||||
// complicated, but is necessary for real applications: a partially
|
||||
// trained quantized MobileNet v3-small model that motivated this code
|
||||
// exhibits some large [min, max] range boundaries, of the order of
|
||||
// magnitude of 10 or 100 depending on layers.
|
||||
//
|
||||
// The next few lines are basically just an ordinary
|
||||
// MultiplyByQuantizedMultiplier, except that we are more careful here
|
||||
// about the fine details of saturation when left-shifting, because here
|
||||
// overflow in left-shift is a common case, not an anomaly as
|
||||
// MultiplyByQuantizedMultiplier assumes.
|
||||
int16_t reluish_value = input_value_on_hires_input_scale;
|
||||
// Shift left, saturating, as much as we can while ensuring that this
|
||||
// saturation will not contribute to the result. That is, left shift amount
|
||||
// reduced by 1.
|
||||
if (params.reluish_multiplier_exponent > 0) {
|
||||
reluish_value = SaturatingLeftShift(
|
||||
reluish_value, params.reluish_multiplier_exponent - 1);
|
||||
}
|
||||
// Apply the fixed-point multiplier, dividing the value by a divisor
|
||||
// ranging in [1, 2].
|
||||
reluish_value = gemmlowp::SaturatingRoundingDoublingHighMul(
|
||||
reluish_value, params.reluish_multiplier_fixedpoint_int16);
|
||||
// Apply the last bit of left-shift. Thus, in the left-shifting case, if
|
||||
// any saturation affects the result, it is happening here --- any
|
||||
// saturation having occurred above is overwritten here, not affecting the
|
||||
// result.
|
||||
if (params.reluish_multiplier_exponent > 0) {
|
||||
reluish_value = SaturatingLeftShift(reluish_value, 1);
|
||||
}
|
||||
// Shift right, in the right-shifting case.
|
||||
if (params.reluish_multiplier_exponent < 0) {
|
||||
reluish_value = gemmlowp::RoundingDivideByPOT(
|
||||
reluish_value, -params.reluish_multiplier_exponent);
|
||||
}
|
||||
// At this point we have rescaled the value into a 16bit fixedpoint
|
||||
// reluish_value in [-1, 1].
|
||||
// We now convert that to a 16bit fixedpoint value in [0, 1].
|
||||
reluish_value = (reluish_value + (1 << 15)) >> 1;
|
||||
// Use of SaturatingDoublingHighMul here is important to cancel the biases
|
||||
// from the above SaturatingRoundingDoublingHighMul.
|
||||
//
|
||||
// On a partially trained MobileNet-v3-small,
|
||||
//
|
||||
// | bias on | ImageNet
|
||||
// | quantized | Top-1
|
||||
// Operation used here | values | accuracy (50k)
|
||||
// --------------------------------------+------------+-----------
|
||||
// SaturatingDoublingHighMul | -0.0024 | 58.920
|
||||
// SaturatingRoundingDoublingHighMul | -0.0067 | 58.064
|
||||
//
|
||||
// In activations_test, this is covered by this testcase:
|
||||
// QuantizedActivationsOpTest.HardSwishBias
|
||||
//
|
||||
const int16_t preshift_output_value = SaturatingDoublingHighMul(
|
||||
reluish_value, input_value_on_preshift_output_scale);
|
||||
// We were so far operating on the pre-shift output scale. Now we finally
|
||||
// apply that output shift, arriving at the final output scale.
|
||||
int16_t output_value = gemmlowp::RoundingDivideByPOT(
|
||||
preshift_output_value, -params.output_multiplier_exponent);
|
||||
output_value += params.output_zero_point;
|
||||
output_value =
|
||||
std::min<int16_t>(output_value, std::numeric_limits<T>::max());
|
||||
output_value =
|
||||
std::max<int16_t>(output_value, std::numeric_limits<T>::min());
|
||||
output_data[i] = output_value;
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace reference_ops
|
||||
} // namespace tflite
|
||||
|
||||
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CONV_H_
|
||||
@@ -23,34 +23,41 @@ limitations under the License.
|
||||
namespace tflite {
|
||||
namespace reference_integer_ops {
|
||||
|
||||
inline void CheckArithmeticParams(const ArithmeticParams& params) {
|
||||
TFLITE_DCHECK_LE(params.quantized_activation_min,
|
||||
params.quantized_activation_max);
|
||||
// Input offset is negative input zero point. Activation tensors are
|
||||
// asymmetric quantized so they span the full int8 range.
|
||||
TFLITE_DCHECK_GE(-params.input1_offset, std::numeric_limits<int8_t>::min());
|
||||
TFLITE_DCHECK_GE(-params.input2_offset, std::numeric_limits<int8_t>::min());
|
||||
TFLITE_DCHECK_LE(-params.input1_offset, std::numeric_limits<int8_t>::max());
|
||||
TFLITE_DCHECK_LE(-params.input2_offset, std::numeric_limits<int8_t>::max());
|
||||
}
|
||||
|
||||
// Element-wise add that can often be used for inner loop of broadcast add as
|
||||
// well as the non-broadcast add.
|
||||
inline void AddElementwise(int size, const ArithmeticParams& params,
|
||||
const int8_t* input1_data, const int8_t* input2_data,
|
||||
int8_t* output_data) {
|
||||
const int32_t int8_max_value = std::numeric_limits<int8_t>::max();
|
||||
TFLITE_DCHECK_GE(params.input1_offset, -1 * int8_max_value);
|
||||
TFLITE_DCHECK_GE(params.input2_offset, -1 * int8_max_value);
|
||||
TFLITE_DCHECK_LE(params.input1_offset, int8_max_value);
|
||||
TFLITE_DCHECK_LE(params.input2_offset, int8_max_value);
|
||||
CheckArithmeticParams(params);
|
||||
|
||||
for (int i = 0; i < size; ++i) {
|
||||
const int32 input1_val = params.input1_offset + input1_data[i];
|
||||
const int32 input2_val = params.input2_offset + input2_data[i];
|
||||
const int32 shifted_input1_val = input1_val * (1 << params.left_shift);
|
||||
const int32 shifted_input2_val = input2_val * (1 << params.left_shift);
|
||||
const int32 scaled_input1_val =
|
||||
const int32_t input1_val = params.input1_offset + input1_data[i];
|
||||
const int32_t input2_val = params.input2_offset + input2_data[i];
|
||||
const int32_t shifted_input1_val = input1_val * (1 << params.left_shift);
|
||||
const int32_t shifted_input2_val = input2_val * (1 << params.left_shift);
|
||||
const int32_t scaled_input1_val =
|
||||
MultiplyByQuantizedMultiplierSmallerThanOneExp(
|
||||
shifted_input1_val, params.input1_multiplier, params.input1_shift);
|
||||
const int32 scaled_input2_val =
|
||||
const int32_t scaled_input2_val =
|
||||
MultiplyByQuantizedMultiplierSmallerThanOneExp(
|
||||
shifted_input2_val, params.input2_multiplier, params.input2_shift);
|
||||
const int32 raw_sum = scaled_input1_val + scaled_input2_val;
|
||||
const int32 raw_output =
|
||||
const int32_t raw_sum = scaled_input1_val + scaled_input2_val;
|
||||
const int32_t raw_output =
|
||||
MultiplyByQuantizedMultiplierSmallerThanOneExp(
|
||||
raw_sum, params.output_multiplier, params.output_shift) +
|
||||
params.output_offset;
|
||||
const int32 clamped_output =
|
||||
const int32_t clamped_output =
|
||||
std::min(params.quantized_activation_max,
|
||||
std::max(params.quantized_activation_min, raw_output));
|
||||
output_data[i] = static_cast<int8_t>(clamped_output);
|
||||
@@ -61,16 +68,11 @@ inline void Add(const ArithmeticParams& params,
|
||||
const RuntimeShape& input1_shape, const int8_t* input1_data,
|
||||
const RuntimeShape& input2_shape, const int8_t* input2_data,
|
||||
const RuntimeShape& output_shape, int8_t* output_data) {
|
||||
TFLITE_DCHECK_LE(params.quantized_activation_min,
|
||||
params.quantized_activation_max);
|
||||
CheckArithmeticParams(params);
|
||||
|
||||
const int flat_size =
|
||||
MatchingElementsSize(input1_shape, input2_shape, output_shape);
|
||||
|
||||
const int32_t int8_max_value = std::numeric_limits<int8_t>::max();
|
||||
TFLITE_DCHECK_GE(params.input1_offset, -1 * int8_max_value);
|
||||
TFLITE_DCHECK_GE(params.input2_offset, -1 * int8_max_value);
|
||||
TFLITE_DCHECK_LE(params.input1_offset, int8_max_value);
|
||||
TFLITE_DCHECK_LE(params.input2_offset, int8_max_value);
|
||||
AddElementwise(flat_size, params, input1_data, input2_data, output_data);
|
||||
}
|
||||
|
||||
|
||||
@@ -22,27 +22,27 @@ namespace reference_integer_ops {
|
||||
|
||||
// Fixed-point per-channel-quantization convolution reference kernel.
|
||||
inline void ConvPerChannel(
|
||||
const ConvParams& params, const int32* output_multiplier,
|
||||
const int32* output_shift, const RuntimeShape& input_shape,
|
||||
const int8* input_data, const RuntimeShape& filter_shape,
|
||||
const int8* filter_data, const RuntimeShape& bias_shape,
|
||||
const int32* bias_data, const RuntimeShape& output_shape,
|
||||
int8* output_data) {
|
||||
const ConvParams& params, const int32_t* output_multiplier,
|
||||
const int32_t* output_shift, const RuntimeShape& input_shape,
|
||||
const int8_t* input_data, const RuntimeShape& filter_shape,
|
||||
const int8_t* filter_data, const RuntimeShape& bias_shape,
|
||||
const int32_t* bias_data, const RuntimeShape& output_shape,
|
||||
int8_t* output_data) {
|
||||
// Get parameters.
|
||||
const int32 input_offset = params.input_offset; // r = s(q - Z)
|
||||
const int32_t input_offset = params.input_offset; // r = s(q - Z)
|
||||
const int stride_width = params.stride_width;
|
||||
const int stride_height = params.stride_height;
|
||||
const int dilation_width_factor = params.dilation_width_factor;
|
||||
const int dilation_height_factor = params.dilation_height_factor;
|
||||
const int pad_width = params.padding_values.width;
|
||||
const int pad_height = params.padding_values.height;
|
||||
const int32 output_offset = params.output_offset;
|
||||
const int32_t output_offset = params.output_offset;
|
||||
|
||||
// Set min and max value of the output.
|
||||
const int32 output_activation_min = params.quantized_activation_min;
|
||||
const int32 output_activation_max = params.quantized_activation_max;
|
||||
const int32_t output_activation_min = params.quantized_activation_min;
|
||||
const int32_t output_activation_max = params.quantized_activation_max;
|
||||
|
||||
// Sanity check.
|
||||
// Consistency check.
|
||||
TFLITE_DCHECK_LE(output_activation_min, output_activation_max);
|
||||
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
|
||||
TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 4);
|
||||
@@ -63,45 +63,47 @@ inline void ConvPerChannel(
|
||||
const int output_width = output_shape.Dims(2);
|
||||
for (int batch = 0; batch < batches; ++batch) {
|
||||
for (int out_y = 0; out_y < output_height; ++out_y) {
|
||||
const int in_y_origin = (out_y * stride_height) - pad_height;
|
||||
for (int out_x = 0; out_x < output_width; ++out_x) {
|
||||
const int in_x_origin = (out_x * stride_width) - pad_width;
|
||||
for (int out_channel = 0; out_channel < output_depth; ++out_channel) {
|
||||
const int in_x_origin = (out_x * stride_width) - pad_width;
|
||||
const int in_y_origin = (out_y * stride_height) - pad_height;
|
||||
int32 acc = 0;
|
||||
int32_t acc = 0;
|
||||
for (int filter_y = 0; filter_y < filter_height; ++filter_y) {
|
||||
const int in_y = in_y_origin + dilation_height_factor * filter_y;
|
||||
for (int filter_x = 0; filter_x < filter_width; ++filter_x) {
|
||||
const int in_x = in_x_origin + dilation_width_factor * filter_x;
|
||||
|
||||
// Zero padding by omitting the areas outside the image.
|
||||
const bool is_point_inside_image =
|
||||
(in_x >= 0) && (in_x < input_width) && (in_y >= 0) &&
|
||||
(in_y < input_height);
|
||||
|
||||
if (!is_point_inside_image) {
|
||||
continue;
|
||||
}
|
||||
|
||||
for (int in_channel = 0; in_channel < input_depth; ++in_channel) {
|
||||
const int in_x = in_x_origin + dilation_width_factor * filter_x;
|
||||
const int in_y =
|
||||
in_y_origin + dilation_height_factor * filter_y;
|
||||
// Zero padding by omitting the areas outside the image.
|
||||
const bool is_point_inside_image =
|
||||
(in_x >= 0) && (in_x < input_width) && (in_y >= 0) &&
|
||||
(in_y < input_height);
|
||||
if (is_point_inside_image) {
|
||||
int32 input_val = input_data[Offset(input_shape, batch, in_y,
|
||||
int32_t input_val = input_data[Offset(input_shape, batch, in_y,
|
||||
in_x, in_channel)];
|
||||
int32 filter_val =
|
||||
filter_data[Offset(filter_shape, out_channel, filter_y,
|
||||
filter_x, in_channel)];
|
||||
// Accumulate with 32 bits accumulator.
|
||||
// In the nudging process during model quantization, we force
|
||||
// real value of 0.0 be represented by a quantized value. This
|
||||
// guarantees that the input_offset is a int8, even though it
|
||||
// is represented using int32.
|
||||
// int32 += int8 * (int8 - int8) so the highest value we can
|
||||
// get from each accumulation is [-127, 127] * ([-128, 127] -
|
||||
// [-128, 127]), which is [-32512, 32512]. log2(32512)
|
||||
// = 14.98, which means we can accumulate at least 2^16
|
||||
// multiplications without overflow. The accumulator is
|
||||
// applied to a filter so the accumulation logic will hold as
|
||||
// long as the filter size (filter_y * filter_x * in_channel)
|
||||
// does not exceed 2^16, which is the case in all the models
|
||||
// we have seen so far.
|
||||
// TODO(jianlijianli): Add a check to make sure the
|
||||
// accumulator depth is smaller than 2^16.
|
||||
acc += filter_val * (input_val + input_offset);
|
||||
}
|
||||
int32_t filter_val = filter_data[Offset(
|
||||
filter_shape, out_channel, filter_y, filter_x, in_channel)];
|
||||
// Accumulate with 32 bits accumulator.
|
||||
// In the nudging process during model quantization, we force
|
||||
// real value of 0.0 be represented by a quantized value. This
|
||||
// guarantees that the input_offset is a int8_t, even though
|
||||
// it is represented using int32_t. int32_t += int8_t *
|
||||
// (int8_t - int8_t) so the highest value we can get from each
|
||||
// accumulation is [-127, 127] * ([-128, 127] -
|
||||
// [-128, 127]), which is [-32512, 32512]. log2(32512)
|
||||
// = 14.98, which means we can accumulate at least 2^16
|
||||
// multiplications without overflow. The accumulator is
|
||||
// applied to a filter so the accumulation logic will hold as
|
||||
// long as the filter size (filter_y * filter_x * in_channel)
|
||||
// does not exceed 2^16, which is the case in all the models
|
||||
// we have seen so far.
|
||||
// TODO(jianlijianli): Add a check to make sure the
|
||||
// accumulator depth is smaller than 2^16.
|
||||
acc += filter_val * (input_val + input_offset);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -125,12 +127,12 @@ inline void ConvPerChannel(
|
||||
// Fixed-point per-channel-quantization convolution reference kernel.
|
||||
// 16-bit data and 8-bit filter
|
||||
inline void ConvPerChannel(
|
||||
const ConvParams& params, const int32* output_multiplier,
|
||||
const int32* output_shift, const RuntimeShape& input_shape,
|
||||
const int16* input_data, const RuntimeShape& filter_shape,
|
||||
const int8* filter_data, const RuntimeShape& bias_shape,
|
||||
const ConvParams& params, const int32_t* output_multiplier,
|
||||
const int32_t* output_shift, const RuntimeShape& input_shape,
|
||||
const int16_t* input_data, const RuntimeShape& filter_shape,
|
||||
const int8_t* filter_data, const RuntimeShape& bias_shape,
|
||||
const std::int64_t* bias_data, const RuntimeShape& output_shape,
|
||||
int16* output_data) {
|
||||
int16_t* output_data) {
|
||||
// Get parameters.
|
||||
const int stride_width = params.stride_width;
|
||||
const int stride_height = params.stride_height;
|
||||
@@ -140,10 +142,10 @@ inline void ConvPerChannel(
|
||||
const int pad_height = params.padding_values.height;
|
||||
|
||||
// Set min and max value of the output.
|
||||
const int32 output_activation_min = params.quantized_activation_min;
|
||||
const int32 output_activation_max = params.quantized_activation_max;
|
||||
const int32_t output_activation_min = params.quantized_activation_min;
|
||||
const int32_t output_activation_max = params.quantized_activation_max;
|
||||
|
||||
// Sanity check.
|
||||
// Consistency check.
|
||||
TFLITE_DCHECK_LE(output_activation_min, output_activation_max);
|
||||
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
|
||||
TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 4);
|
||||
@@ -164,35 +166,37 @@ inline void ConvPerChannel(
|
||||
const int output_width = output_shape.Dims(2);
|
||||
for (int batch = 0; batch < batches; ++batch) {
|
||||
for (int out_y = 0; out_y < output_height; ++out_y) {
|
||||
const int in_y_origin = (out_y * stride_height) - pad_height;
|
||||
for (int out_x = 0; out_x < output_width; ++out_x) {
|
||||
const int in_x_origin = (out_x * stride_width) - pad_width;
|
||||
for (int out_channel = 0; out_channel < output_depth; ++out_channel) {
|
||||
const int in_x_origin = (out_x * stride_width) - pad_width;
|
||||
const int in_y_origin = (out_y * stride_height) - pad_height;
|
||||
std::int64_t acc = 0;
|
||||
for (int filter_y = 0; filter_y < filter_height; ++filter_y) {
|
||||
const int in_y = in_y_origin + dilation_height_factor * filter_y;
|
||||
for (int filter_x = 0; filter_x < filter_width; ++filter_x) {
|
||||
const int in_x = in_x_origin + dilation_width_factor * filter_x;
|
||||
|
||||
// Zero padding by omitting the areas outside the image.
|
||||
const bool is_point_inside_image =
|
||||
(in_x >= 0) && (in_x < input_width) && (in_y >= 0) &&
|
||||
(in_y < input_height);
|
||||
|
||||
if (!is_point_inside_image) {
|
||||
continue;
|
||||
}
|
||||
|
||||
for (int in_channel = 0; in_channel < input_depth; ++in_channel) {
|
||||
const int in_x = in_x_origin + dilation_width_factor * filter_x;
|
||||
const int in_y =
|
||||
in_y_origin + dilation_height_factor * filter_y;
|
||||
// Zero padding by omitting the areas outside the image.
|
||||
const bool is_point_inside_image =
|
||||
(in_x >= 0) && (in_x < input_width) && (in_y >= 0) &&
|
||||
(in_y < input_height);
|
||||
if (is_point_inside_image) {
|
||||
int32 input_val = input_data[Offset(input_shape, batch, in_y,
|
||||
int32_t input_val = input_data[Offset(input_shape, batch, in_y,
|
||||
in_x, in_channel)];
|
||||
int32 filter_val =
|
||||
filter_data[Offset(filter_shape, out_channel, filter_y,
|
||||
filter_x, in_channel)];
|
||||
// Accumulate with 64 bits accumulator.
|
||||
// int64 += int8 * int16 so the highest value we can
|
||||
// get from each accumulation is [-127, 127] * ([-32768,
|
||||
// 32767] -
|
||||
// [-32768, 32767]), which is [-8322945, 8322945].
|
||||
// log2(8322945) = 22.99.
|
||||
acc += filter_val * input_val;
|
||||
}
|
||||
int32_t filter_val = filter_data[Offset(
|
||||
filter_shape, out_channel, filter_y, filter_x, in_channel)];
|
||||
// Accumulate with 64 bits accumulator.
|
||||
// int64_t += int8_t * int16_t so the highest value we can
|
||||
// get from each accumulation is [-127, 127] * ([-32768,
|
||||
// 32767] -
|
||||
// [-32768, 32767]), which is [-8322945, 8322945].
|
||||
// log2(8322945) = 22.99.
|
||||
acc += filter_val * input_val;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -20,12 +20,12 @@ limitations under the License.
|
||||
namespace tflite {
|
||||
namespace reference_integer_ops {
|
||||
inline void DepthwiseConvPerChannel(
|
||||
const DepthwiseParams& params, const int32* output_multiplier,
|
||||
const int32* output_shift, const RuntimeShape& input_shape,
|
||||
const int8* input_data, const RuntimeShape& filter_shape,
|
||||
const int8* filter_data, const RuntimeShape& bias_shape,
|
||||
const int32* bias_data, const RuntimeShape& output_shape,
|
||||
int8* output_data) {
|
||||
const DepthwiseParams& params, const int32_t* output_multiplier,
|
||||
const int32_t* output_shift, const RuntimeShape& input_shape,
|
||||
const int8_t* input_data, const RuntimeShape& filter_shape,
|
||||
const int8_t* filter_data, const RuntimeShape& bias_shape,
|
||||
const int32_t* bias_data, const RuntimeShape& output_shape,
|
||||
int8_t* output_data) {
|
||||
// Get parameters.
|
||||
// TODO(b/141565753): Re-introduce ScopedProfilingLabel on Micro.
|
||||
const int stride_width = params.stride_width;
|
||||
@@ -35,10 +35,10 @@ inline void DepthwiseConvPerChannel(
|
||||
const int pad_width = params.padding_values.width;
|
||||
const int pad_height = params.padding_values.height;
|
||||
const int depth_multiplier = params.depth_multiplier;
|
||||
const int32 input_offset = params.input_offset;
|
||||
const int32 output_offset = params.output_offset;
|
||||
const int32 output_activation_min = params.quantized_activation_min;
|
||||
const int32 output_activation_max = params.quantized_activation_max;
|
||||
const int32_t input_offset = params.input_offset;
|
||||
const int32_t output_offset = params.output_offset;
|
||||
const int32_t output_activation_min = params.quantized_activation_min;
|
||||
const int32_t output_activation_max = params.quantized_activation_max;
|
||||
|
||||
// Check dimensions of the tensors.
|
||||
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
|
||||
@@ -66,7 +66,7 @@ inline void DepthwiseConvPerChannel(
|
||||
const int output_channel = m + in_channel * depth_multiplier;
|
||||
const int in_x_origin = (out_x * stride_width) - pad_width;
|
||||
const int in_y_origin = (out_y * stride_height) - pad_height;
|
||||
int32 acc = 0;
|
||||
int32_t acc = 0;
|
||||
for (int filter_y = 0; filter_y < filter_height; ++filter_y) {
|
||||
for (int filter_x = 0; filter_x < filter_width; ++filter_x) {
|
||||
const int in_x = in_x_origin + dilation_width_factor * filter_x;
|
||||
@@ -77,17 +77,17 @@ inline void DepthwiseConvPerChannel(
|
||||
(in_x >= 0) && (in_x < input_width) && (in_y >= 0) &&
|
||||
(in_y < input_height);
|
||||
if (is_point_inside_image) {
|
||||
int32 input_val = input_data[Offset(input_shape, batch, in_y,
|
||||
in_x, in_channel)];
|
||||
int32 filter_val = filter_data[Offset(
|
||||
int32_t input_val = input_data[Offset(
|
||||
input_shape, batch, in_y, in_x, in_channel)];
|
||||
int32_t filter_val = filter_data[Offset(
|
||||
filter_shape, 0, filter_y, filter_x, output_channel)];
|
||||
// Accumulate with 32 bits accumulator.
|
||||
// In the nudging process during model quantization, we force
|
||||
// real value of 0.0 be represented by a quantized value. This
|
||||
// guarantees that the input_offset is a int8, even though it
|
||||
// is represented using int32.
|
||||
// int32 += int8 * (int8 - int8) so the highest value we can
|
||||
// get from each accumulation is [-127, 127] * ([-128, 127] -
|
||||
// guarantees that the input_offset is a int8_t, even though
|
||||
// it is represented using int32_t. int32_t += int8_t *
|
||||
// (int8_t - int8_t) so the highest value we can get from each
|
||||
// accumulation is [-127, 127] * ([-128, 127] -
|
||||
// [-128, 127]), which is [-32512, 32512]. log2(32512)
|
||||
// = 14.98, which means we can accumulate at least 2^16
|
||||
// multiplications without overflow. The accumulator is
|
||||
@@ -120,12 +120,12 @@ inline void DepthwiseConvPerChannel(
|
||||
}
|
||||
|
||||
inline void DepthwiseConvPerChannel(
|
||||
const DepthwiseParams& params, const int32* output_multiplier,
|
||||
const int32* output_shift, const RuntimeShape& input_shape,
|
||||
const int16* input_data, const RuntimeShape& filter_shape,
|
||||
const int8* filter_data, const RuntimeShape& bias_shape,
|
||||
const DepthwiseParams& params, const int32_t* output_multiplier,
|
||||
const int32_t* output_shift, const RuntimeShape& input_shape,
|
||||
const int16_t* input_data, const RuntimeShape& filter_shape,
|
||||
const int8_t* filter_data, const RuntimeShape& bias_shape,
|
||||
const std::int64_t* bias_data, const RuntimeShape& output_shape,
|
||||
int16* output_data) {
|
||||
int16_t* output_data) {
|
||||
// Get parameters.
|
||||
const int stride_width = params.stride_width;
|
||||
const int stride_height = params.stride_height;
|
||||
@@ -134,8 +134,8 @@ inline void DepthwiseConvPerChannel(
|
||||
const int pad_width = params.padding_values.width;
|
||||
const int pad_height = params.padding_values.height;
|
||||
const int depth_multiplier = params.depth_multiplier;
|
||||
const int32 output_activation_min = params.quantized_activation_min;
|
||||
const int32 output_activation_max = params.quantized_activation_max;
|
||||
const int32_t output_activation_min = params.quantized_activation_min;
|
||||
const int32_t output_activation_max = params.quantized_activation_max;
|
||||
|
||||
// Check dimensions of the tensors.
|
||||
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
|
||||
@@ -174,9 +174,9 @@ inline void DepthwiseConvPerChannel(
|
||||
(in_x >= 0) && (in_x < input_width) && (in_y >= 0) &&
|
||||
(in_y < input_height);
|
||||
if (is_point_inside_image) {
|
||||
int32 input_val = input_data[Offset(input_shape, batch, in_y,
|
||||
in_x, in_channel)];
|
||||
int32 filter_val = filter_data[Offset(
|
||||
int32_t input_val = input_data[Offset(
|
||||
input_shape, batch, in_y, in_x, in_channel)];
|
||||
int32_t filter_val = filter_data[Offset(
|
||||
filter_shape, 0, filter_y, filter_x, output_channel)];
|
||||
// Accumulate with 64 bits accumulator.
|
||||
// We assume maximum of 2^16 accumulations as with the 8-bit
|
||||
@@ -190,7 +190,7 @@ inline void DepthwiseConvPerChannel(
|
||||
if (bias_data) {
|
||||
acc += bias_data[output_channel];
|
||||
}
|
||||
int32 scaled_acc = MultiplyByQuantizedMultiplier(
|
||||
int32_t scaled_acc = MultiplyByQuantizedMultiplier(
|
||||
acc, output_multiplier[output_channel],
|
||||
output_shift[output_channel]);
|
||||
scaled_acc = std::max(scaled_acc, output_activation_min);
|
||||
@@ -207,8 +207,8 @@ inline void DepthwiseConvPerChannel(
|
||||
|
||||
inline void DepthwiseConvHybridPerChannel(
|
||||
const DepthwiseParams& params, float* scaling_factors_ptr,
|
||||
const RuntimeShape& input_shape, const int8* input_data,
|
||||
const RuntimeShape& filter_shape, const int8* filter_data,
|
||||
const RuntimeShape& input_shape, const int8_t* input_data,
|
||||
const RuntimeShape& filter_shape, const int8_t* filter_data,
|
||||
const RuntimeShape& bias_shape, const float* bias_data,
|
||||
const RuntimeShape& output_shape, float* output_data,
|
||||
const float* per_channel_scale, int32_t* input_offset) {
|
||||
@@ -247,7 +247,7 @@ inline void DepthwiseConvHybridPerChannel(
|
||||
const int output_channel = m + in_channel * depth_multiplier;
|
||||
const int in_x_origin = (out_x * stride_width) - pad_width;
|
||||
const int in_y_origin = (out_y * stride_height) - pad_height;
|
||||
int32 acc = 0;
|
||||
int32_t acc = 0;
|
||||
for (int filter_y = 0; filter_y < filter_height; ++filter_y) {
|
||||
for (int filter_x = 0; filter_x < filter_width; ++filter_x) {
|
||||
const int in_x = in_x_origin + dilation_width_factor * filter_x;
|
||||
@@ -258,9 +258,9 @@ inline void DepthwiseConvHybridPerChannel(
|
||||
(in_x >= 0) && (in_x < input_width) && (in_y >= 0) &&
|
||||
(in_y < input_height);
|
||||
if (is_point_inside_image) {
|
||||
int32 input_val = input_data[Offset(input_shape, batch, in_y,
|
||||
in_x, in_channel)];
|
||||
int32 filter_val = filter_data[Offset(
|
||||
int32_t input_val = input_data[Offset(
|
||||
input_shape, batch, in_y, in_x, in_channel)];
|
||||
int32_t filter_val = filter_data[Offset(
|
||||
filter_shape, 0, filter_y, filter_x, output_channel)];
|
||||
acc += filter_val * (input_val - input_offset[batch]);
|
||||
}
|
||||
|
||||
@@ -24,15 +24,15 @@ inline void FullyConnected(
|
||||
const FullyConnectedParams& params, const RuntimeShape& input_shape,
|
||||
const int8_t* input_data, const RuntimeShape& filter_shape,
|
||||
const int8_t* filter_data, const RuntimeShape& bias_shape,
|
||||
const int32* bias_data, const RuntimeShape& output_shape,
|
||||
const int32_t* bias_data, const RuntimeShape& output_shape,
|
||||
int8_t* output_data) {
|
||||
const int32 input_offset = params.input_offset;
|
||||
const int32 filter_offset = params.weights_offset;
|
||||
const int32 output_offset = params.output_offset;
|
||||
const int32 output_multiplier = params.output_multiplier;
|
||||
const int32_t input_offset = params.input_offset;
|
||||
const int32_t filter_offset = params.weights_offset;
|
||||
const int32_t output_offset = params.output_offset;
|
||||
const int32_t output_multiplier = params.output_multiplier;
|
||||
const int output_shift = params.output_shift;
|
||||
const int32 output_activation_min = params.quantized_activation_min;
|
||||
const int32 output_activation_max = params.quantized_activation_max;
|
||||
const int32_t output_activation_min = params.quantized_activation_min;
|
||||
const int32_t output_activation_max = params.quantized_activation_max;
|
||||
TFLITE_DCHECK_GE(filter_shape.DimensionsCount(), 2);
|
||||
TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 2);
|
||||
|
||||
@@ -44,10 +44,10 @@ inline void FullyConnected(
|
||||
const int accum_depth = filter_shape.Dims(filter_dim_count - 1);
|
||||
for (int b = 0; b < batches; ++b) {
|
||||
for (int out_c = 0; out_c < output_depth; ++out_c) {
|
||||
int32 acc = 0;
|
||||
int32_t acc = 0;
|
||||
for (int d = 0; d < accum_depth; ++d) {
|
||||
int32 input_val = input_data[b * accum_depth + d];
|
||||
int32 filter_val = filter_data[out_c * accum_depth + d];
|
||||
int32_t input_val = input_data[b * accum_depth + d];
|
||||
int32_t filter_val = filter_data[out_c * accum_depth + d];
|
||||
acc += (filter_val + filter_offset) * (input_val + input_offset);
|
||||
}
|
||||
if (bias_data) {
|
||||
@@ -68,11 +68,11 @@ inline void FullyConnected(
|
||||
const int8_t* filter_data, const RuntimeShape& bias_shape,
|
||||
const int64_t* bias_data, const RuntimeShape& output_shape,
|
||||
int16_t* output_data) {
|
||||
const int32 filter_offset = params.weights_offset;
|
||||
const int32 output_multiplier = params.output_multiplier;
|
||||
const int32_t filter_offset = params.weights_offset;
|
||||
const int32_t output_multiplier = params.output_multiplier;
|
||||
const int output_shift = params.output_shift;
|
||||
const int32 output_activation_min = params.quantized_activation_min;
|
||||
const int32 output_activation_max = params.quantized_activation_max;
|
||||
const int32_t output_activation_min = params.quantized_activation_min;
|
||||
const int32_t output_activation_max = params.quantized_activation_max;
|
||||
TFLITE_DCHECK_GE(filter_shape.DimensionsCount(), 2);
|
||||
TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 2);
|
||||
|
||||
@@ -86,8 +86,8 @@ inline void FullyConnected(
|
||||
for (int out_c = 0; out_c < output_depth; ++out_c) {
|
||||
int64_t acc = 0;
|
||||
for (int d = 0; d < accum_depth; ++d) {
|
||||
int32 input_val = input_data[b * accum_depth + d];
|
||||
int32 filter_val = filter_data[out_c * accum_depth + d];
|
||||
int32_t input_val = input_data[b * accum_depth + d];
|
||||
int32_t filter_val = filter_data[out_c * accum_depth + d];
|
||||
acc += (filter_val + filter_offset) * input_val;
|
||||
}
|
||||
if (bias_data) {
|
||||
|
||||
@@ -21,8 +21,8 @@ namespace tflite {
|
||||
namespace reference_integer_ops {
|
||||
|
||||
inline void L2Normalization(int32_t input_zero_point, int32_t outer_size,
|
||||
int32_t depth, const int8* input_data,
|
||||
int8* output_data) {
|
||||
int32_t depth, const int8_t* input_data,
|
||||
int8_t* output_data) {
|
||||
static constexpr int8_t kMinInt8 = std::numeric_limits<int8_t>::min();
|
||||
static constexpr int8_t kMaxInt8 = std::numeric_limits<int8_t>::max();
|
||||
// The output scale must be in sync with Prepare().
|
||||
@@ -30,7 +30,7 @@ inline void L2Normalization(int32_t input_zero_point, int32_t outer_size,
|
||||
// to [-1, 127/128].
|
||||
static constexpr int32_t kOutputScale = 7;
|
||||
for (int outer_index = 0; outer_index < outer_size; ++outer_index) {
|
||||
// int32 = (int8 - int8) ^ 2.
|
||||
// int32_t = (int8_t - int8_t) ^ 2.
|
||||
// ([-128, 127] - [-128, 127]) ^ 2 = [0, (2^8 - 1)^2] so the accumulator is
|
||||
// safe from overflowing in at least 2^16 steps.
|
||||
int32_t acc = 0;
|
||||
@@ -55,7 +55,7 @@ inline void L2Normalization(int32_t input_zero_point, int32_t outer_size,
|
||||
std::min(static_cast<int32_t>(kMaxInt8),
|
||||
std::max(static_cast<int32_t>(kMinInt8), output_in_q24));
|
||||
output_data[depth * outer_index + inner_index] =
|
||||
static_cast<int8>(output_in_q24);
|
||||
static_cast<int8_t>(output_in_q24);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -58,12 +58,15 @@ inline void Logistic(int32_t input_zero_point, int32_t input_range_radius,
|
||||
}
|
||||
}
|
||||
|
||||
inline void Logistic(int32_t input_size, const int16_t* ptr_input_data,
|
||||
int16_t* ptr_output_data) {
|
||||
inline void Logistic(int32_t input_multiplier, int32_t input_size,
|
||||
const int16_t* ptr_input_data, int16_t* ptr_output_data) {
|
||||
// We use the LUT for sigmoid and take into account, that
|
||||
// tanh(x) = 2*sigmoid(2*x) - 1
|
||||
|
||||
int32_t input_data_mul = (input_multiplier > 0) ? input_multiplier : 1;
|
||||
|
||||
for (int i = 0; i < input_size; ++i, ptr_input_data++, ptr_output_data++) {
|
||||
int32_t input_data = *ptr_input_data;
|
||||
int32_t input_data = (*ptr_input_data) * input_data_mul;
|
||||
|
||||
// Scale by 3/4 to expand range [-8,8]->[-10.7,10.7] and
|
||||
// we do interpolation on unsigned values.
|
||||
@@ -72,13 +75,20 @@ inline void Logistic(int32_t input_size, const int16_t* ptr_input_data,
|
||||
// We divide by 2 power of 9, because
|
||||
// we need to divide by 2 in power of 7 for
|
||||
// the input conversion + 1/4 from the scale above.
|
||||
uint8_t uh = abs_input_data >> 9;
|
||||
uint32_t ua = sigmoid_table_uint16[uh];
|
||||
uint32_t ub = sigmoid_table_uint16[uh + 1];
|
||||
uint32_t ut = abs_input_data & 0x1ff;
|
||||
// Define uh as uint32_t type not to make this function overflow.
|
||||
uint32_t uh = abs_input_data >> 9;
|
||||
uint32_t result;
|
||||
|
||||
// Interpolation is done using the fractional bit.
|
||||
uint32_t result = (ua << 9) + ut * (ub - ua);
|
||||
if (uh >= 255) {
|
||||
// Saturate to maximum.
|
||||
result = 0x7FFF << 10;
|
||||
} else {
|
||||
uint32_t ua = sigmoid_table_uint16[uh];
|
||||
uint32_t ub = sigmoid_table_uint16[uh + 1];
|
||||
uint32_t ut = abs_input_data & 0x1ff;
|
||||
// Interpolation is done using the fractional bit.
|
||||
result = (ua << 9) + ut * (ub - ua);
|
||||
}
|
||||
|
||||
result = (input_data >= 0) ? (result + (1 << 9))
|
||||
: ((1 << (16 + 9)) - result + (1 << 9) - 1);
|
||||
|
||||
@@ -0,0 +1,77 @@
|
||||
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_MEAN_H_
|
||||
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_MEAN_H_
|
||||
|
||||
#include "tensorflow/lite/kernels/internal/common.h"
|
||||
|
||||
namespace tflite {
|
||||
namespace reference_integer_ops {
|
||||
|
||||
template <typename integer_type>
|
||||
inline void Mean(const tflite::MeanParams& op_params, int32_t multiplier,
|
||||
int32_t shift, const RuntimeShape& unextended_input_shape,
|
||||
const integer_type* input_data, int32_t input_zero_point,
|
||||
const RuntimeShape& unextended_output_shape,
|
||||
integer_type* output_data, int32_t output_zero_point) {
|
||||
// Current implementation only supports dimension equals 4 and simultaneous
|
||||
// reduction over width and height.
|
||||
TFLITE_CHECK_EQ(unextended_input_shape.DimensionsCount(), 4);
|
||||
TFLITE_CHECK_LE(unextended_output_shape.DimensionsCount(), 4);
|
||||
const RuntimeShape input_shape =
|
||||
RuntimeShape::ExtendedShape(4, unextended_input_shape);
|
||||
const RuntimeShape output_shape =
|
||||
RuntimeShape::ExtendedShape(4, unextended_output_shape);
|
||||
const int output_batch = output_shape.Dims(0);
|
||||
const int output_height = output_shape.Dims(1);
|
||||
const int output_width = output_shape.Dims(2);
|
||||
const int output_depth = output_shape.Dims(3);
|
||||
const int input_height = input_shape.Dims(1);
|
||||
const int input_width = input_shape.Dims(2);
|
||||
const int num_elements_in_axis = input_width * input_height;
|
||||
|
||||
TFLITE_CHECK_EQ(op_params.axis_count, 2);
|
||||
TFLITE_CHECK((op_params.axis[0] == 1 && op_params.axis[1] == 2) ||
|
||||
(op_params.axis[0] == 2 && op_params.axis[1] == 1));
|
||||
TFLITE_CHECK_EQ(output_height, 1);
|
||||
TFLITE_CHECK_EQ(output_width, 1);
|
||||
|
||||
static constexpr int32_t kMinInt = std::numeric_limits<integer_type>::min();
|
||||
static constexpr int32_t kMaxInt = std::numeric_limits<integer_type>::max();
|
||||
|
||||
for (int out_b = 0; out_b < output_batch; ++out_b) {
|
||||
for (int out_d = 0; out_d < output_depth; ++out_d) {
|
||||
int32_t acc = 0;
|
||||
for (int in_h = 0; in_h < input_height; ++in_h) {
|
||||
for (int in_w = 0; in_w < input_width; ++in_w) {
|
||||
acc += input_data[Offset(input_shape, out_b, in_h, in_w, out_d)] -
|
||||
input_zero_point;
|
||||
}
|
||||
}
|
||||
acc = MultiplyByQuantizedMultiplier(acc, multiplier, shift);
|
||||
acc = acc > 0 ? (acc + num_elements_in_axis / 2) / num_elements_in_axis
|
||||
: (acc - num_elements_in_axis / 2) / num_elements_in_axis;
|
||||
acc += output_zero_point;
|
||||
acc = std::min(std::max(acc, kMinInt), kMaxInt);
|
||||
output_data[Offset(output_shape, out_b, 0, 0, out_d)] =
|
||||
static_cast<integer_type>(acc);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace reference_integer_ops
|
||||
} // namespace tflite
|
||||
|
||||
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_MEAN_H_
|
||||
@@ -27,14 +27,14 @@ inline void MulElementwise(int size, const ArithmeticParams& params,
|
||||
const T* input1_data, const T* input2_data,
|
||||
T* output_data) {
|
||||
for (int i = 0; i < size; ++i) {
|
||||
const int32 input1_val = params.input1_offset + input1_data[i];
|
||||
const int32 input2_val = params.input2_offset + input2_data[i];
|
||||
const int32 unclamped_result =
|
||||
const int32_t input1_val = params.input1_offset + input1_data[i];
|
||||
const int32_t input2_val = params.input2_offset + input2_data[i];
|
||||
const int32_t unclamped_result =
|
||||
params.output_offset +
|
||||
MultiplyByQuantizedMultiplier(input1_val * input2_val,
|
||||
params.output_multiplier,
|
||||
params.output_shift);
|
||||
const int32 clamped_output =
|
||||
const int32_t clamped_output =
|
||||
std::min(params.quantized_activation_max,
|
||||
std::max(params.quantized_activation_min, unclamped_result));
|
||||
output_data[i] = static_cast<T>(clamped_output);
|
||||
@@ -57,13 +57,13 @@ inline void Mul(const ArithmeticParams& params,
|
||||
|
||||
// Mul with 16 bit inputs and int8_t outputs.
|
||||
inline void Mul(const ArithmeticParams& params,
|
||||
const RuntimeShape& input1_shape, const int16* input1_data,
|
||||
const RuntimeShape& input2_shape, const int16* input2_data,
|
||||
const RuntimeShape& input1_shape, const int16_t* input1_data,
|
||||
const RuntimeShape& input2_shape, const int16_t* input2_data,
|
||||
const RuntimeShape& output_shape, int8_t* output_data) {
|
||||
ruy::profiler::ScopeLabel label("Mul/Int16Int8");
|
||||
int32 output_offset = params.output_offset;
|
||||
int32 output_activation_min = params.quantized_activation_min;
|
||||
int32 output_activation_max = params.quantized_activation_max;
|
||||
int32_t output_offset = params.output_offset;
|
||||
int32_t output_activation_min = params.quantized_activation_min;
|
||||
int32_t output_activation_max = params.quantized_activation_max;
|
||||
TFLITE_DCHECK_LE(output_activation_min, output_activation_max);
|
||||
|
||||
const int flat_size =
|
||||
@@ -75,12 +75,12 @@ inline void Mul(const ArithmeticParams& params,
|
||||
|
||||
F0 unclamped_result =
|
||||
F0::FromRaw(input1_data[i]) * F0::FromRaw(input2_data[i]);
|
||||
int16 rescaled_result =
|
||||
int16_t rescaled_result =
|
||||
gemmlowp::RoundingDivideByPOT(unclamped_result.raw(), 8);
|
||||
int16 clamped_result =
|
||||
std::min<int16>(output_activation_max - output_offset, rescaled_result);
|
||||
clamped_result =
|
||||
std::max<int16>(output_activation_min - output_offset, clamped_result);
|
||||
int16_t clamped_result = std::min<int16_t>(
|
||||
output_activation_max - output_offset, rescaled_result);
|
||||
clamped_result = std::max<int16_t>(output_activation_min - output_offset,
|
||||
clamped_result);
|
||||
output_data[i] = output_offset + clamped_result;
|
||||
}
|
||||
}
|
||||
@@ -104,18 +104,18 @@ inline void BroadcastMul4DSlow(
|
||||
for (int y = 0; y < extended_output_shape.Dims(1); ++y) {
|
||||
for (int x = 0; x < extended_output_shape.Dims(2); ++x) {
|
||||
for (int c = 0; c < extended_output_shape.Dims(3); ++c) {
|
||||
const int32 input1_val =
|
||||
const int32_t input1_val =
|
||||
params.input1_offset +
|
||||
input1_data[SubscriptToIndex(desc1, b, y, x, c)];
|
||||
const int32 input2_val =
|
||||
const int32_t input2_val =
|
||||
params.input2_offset +
|
||||
input2_data[SubscriptToIndex(desc2, b, y, x, c)];
|
||||
const int32 unclamped_result =
|
||||
const int32_t unclamped_result =
|
||||
params.output_offset +
|
||||
MultiplyByQuantizedMultiplier(input1_val * input2_val,
|
||||
params.output_multiplier,
|
||||
params.output_shift);
|
||||
const int32 clamped_output = std::min(
|
||||
const int32_t clamped_output = std::min(
|
||||
params.quantized_activation_max,
|
||||
std::max(params.quantized_activation_min, unclamped_result));
|
||||
output_data[Offset(extended_output_shape, b, y, x, c)] =
|
||||
|
||||
@@ -22,8 +22,9 @@ namespace tflite {
|
||||
namespace reference_integer_ops {
|
||||
|
||||
inline void AveragePool(const PoolParams& params,
|
||||
const RuntimeShape& input_shape, const int8* input_data,
|
||||
const RuntimeShape& output_shape, int8* output_data) {
|
||||
const RuntimeShape& input_shape,
|
||||
const int8_t* input_data,
|
||||
const RuntimeShape& output_shape, int8_t* output_data) {
|
||||
TFLITE_DCHECK_LE(params.quantized_activation_min,
|
||||
params.quantized_activation_max);
|
||||
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
|
||||
@@ -52,7 +53,7 @@ inline void AveragePool(const PoolParams& params,
|
||||
const int filter_y_start = std::max(0, -in_y_origin);
|
||||
const int filter_y_end =
|
||||
std::min(params.filter_height, input_height - in_y_origin);
|
||||
int32 acc = 0;
|
||||
int32_t acc = 0;
|
||||
int filter_count = 0;
|
||||
for (int filter_y = filter_y_start; filter_y < filter_y_end;
|
||||
++filter_y) {
|
||||
@@ -71,7 +72,7 @@ inline void AveragePool(const PoolParams& params,
|
||||
acc = std::max(acc, params.quantized_activation_min);
|
||||
acc = std::min(acc, params.quantized_activation_max);
|
||||
output_data[Offset(output_shape, batch, out_y, out_x, channel)] =
|
||||
static_cast<int8>(acc);
|
||||
static_cast<int8_t>(acc);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -79,8 +80,8 @@ inline void AveragePool(const PoolParams& params,
|
||||
}
|
||||
|
||||
inline void MaxPool(const PoolParams& params, const RuntimeShape& input_shape,
|
||||
const int8* input_data, const RuntimeShape& output_shape,
|
||||
int8* output_data) {
|
||||
const int8_t* input_data, const RuntimeShape& output_shape,
|
||||
int8_t* output_data) {
|
||||
TFLITE_DCHECK_LE(params.quantized_activation_min,
|
||||
params.quantized_activation_max);
|
||||
TFLITE_DCHECK_GE(params.quantized_activation_min,
|
||||
@@ -137,8 +138,9 @@ inline void MaxPool(const PoolParams& params, const RuntimeShape& input_shape,
|
||||
|
||||
inline void AveragePool(const PoolParams& params,
|
||||
const RuntimeShape& input_shape,
|
||||
const int16* input_data,
|
||||
const RuntimeShape& output_shape, int16* output_data) {
|
||||
const int16_t* input_data,
|
||||
const RuntimeShape& output_shape,
|
||||
int16_t* output_data) {
|
||||
TFLITE_DCHECK_LE(params.quantized_activation_min,
|
||||
params.quantized_activation_max);
|
||||
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
|
||||
@@ -167,7 +169,7 @@ inline void AveragePool(const PoolParams& params,
|
||||
const int filter_y_start = std::max(0, -in_y_origin);
|
||||
const int filter_y_end =
|
||||
std::min(params.filter_height, input_height - in_y_origin);
|
||||
int32 acc = 0;
|
||||
int32_t acc = 0;
|
||||
int filter_count = 0;
|
||||
for (int filter_y = filter_y_start; filter_y < filter_y_end;
|
||||
++filter_y) {
|
||||
@@ -186,7 +188,7 @@ inline void AveragePool(const PoolParams& params,
|
||||
acc = std::max(acc, params.quantized_activation_min);
|
||||
acc = std::min(acc, params.quantized_activation_max);
|
||||
output_data[Offset(output_shape, batch, out_y, out_x, channel)] =
|
||||
static_cast<int16>(acc);
|
||||
static_cast<int16_t>(acc);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -194,8 +196,8 @@ inline void AveragePool(const PoolParams& params,
|
||||
}
|
||||
|
||||
inline void MaxPool(const PoolParams& params, const RuntimeShape& input_shape,
|
||||
const int16* input_data, const RuntimeShape& output_shape,
|
||||
int16* output_data) {
|
||||
const int16_t* input_data, const RuntimeShape& output_shape,
|
||||
int16_t* output_data) {
|
||||
TFLITE_DCHECK_LE(params.quantized_activation_min,
|
||||
params.quantized_activation_max);
|
||||
TFLITE_DCHECK_GE(params.quantized_activation_min,
|
||||
|
||||
@@ -0,0 +1,110 @@
|
||||
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_TANH_H_
|
||||
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_TANH_H_
|
||||
|
||||
#include <limits>
|
||||
|
||||
#include "fixedpoint/fixedpoint.h"
|
||||
#include "tensorflow/lite/kernels/internal/common.h"
|
||||
|
||||
namespace tflite {
|
||||
namespace reference_integer_ops {
|
||||
|
||||
inline void Tanh(int32_t input_zero_point, int32_t input_range_radius,
|
||||
int32_t input_multiplier, int32_t input_shift,
|
||||
const RuntimeShape& input_shape, const int8_t* input_data,
|
||||
const RuntimeShape& output_shape, int8_t* output_data) {
|
||||
// Integer bits must be in sync with Prepare() function.
|
||||
static constexpr int32_t kInputIntegerBits = 4;
|
||||
static constexpr int32_t kOutputScale = 7;
|
||||
static constexpr int32_t kMinInt8 = std::numeric_limits<int8_t>::min();
|
||||
static constexpr int32_t kMaxInt8 = std::numeric_limits<int8_t>::max();
|
||||
using F4 = gemmlowp::FixedPoint<int32_t, kInputIntegerBits>;
|
||||
|
||||
const int flat_size = MatchingFlatSize(input_shape, output_shape);
|
||||
|
||||
for (int i = 0; i < flat_size; ++i) {
|
||||
const int32_t input =
|
||||
static_cast<int32_t>(input_data[i]) - input_zero_point;
|
||||
if (input <= -input_range_radius) {
|
||||
output_data[i] = kMinInt8;
|
||||
} else if (input >= input_range_radius) {
|
||||
output_data[i] = kMaxInt8;
|
||||
} else {
|
||||
const int32_t input_in_q4 =
|
||||
MultiplyByQuantizedMultiplier(input, input_multiplier, input_shift);
|
||||
const int32_t output_in_q0 =
|
||||
gemmlowp::tanh(F4::FromRaw(input_in_q4)).raw();
|
||||
|
||||
// Rescale and downcast.
|
||||
using gemmlowp::RoundingDivideByPOT;
|
||||
int32_t output_in_q24 =
|
||||
RoundingDivideByPOT(output_in_q0, 31 - kOutputScale);
|
||||
output_in_q24 = std::min(std::max(output_in_q24, kMinInt8), kMaxInt8);
|
||||
output_data[i] = static_cast<int8_t>(output_in_q24);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
inline void Tanh(int32_t input_multiplier, int32_t input_left_shift,
|
||||
const RuntimeShape& input_shape, const int16_t* ptr_input_data,
|
||||
const RuntimeShape& output_shape, int16_t* ptr_output_data) {
|
||||
// We use the LUT for sigmoid and take into account, that
|
||||
// tanh(x) = 2*sigmoid(2*x) - 1
|
||||
|
||||
int32_t input_data_mul = (input_multiplier > 0) ? input_multiplier : 1;
|
||||
|
||||
int flat_size = MatchingFlatSize(input_shape, output_shape);
|
||||
|
||||
for (int i = 0; i < flat_size; ++i, ptr_input_data++, ptr_output_data++) {
|
||||
int32_t input_data = (*ptr_input_data) * input_data_mul;
|
||||
|
||||
if (input_left_shift == 1) {
|
||||
input_data <<= 1;
|
||||
}
|
||||
|
||||
// Scale by 3/4 to expand range [-8,8]->[-10.7,10.7].
|
||||
uint32_t abs_input_data = 3 * abs(input_data);
|
||||
uint32_t uh = abs_input_data >> 8;
|
||||
int32_t result;
|
||||
|
||||
if (uh >= 255) {
|
||||
// Saturate to maximum.
|
||||
result = 0xFFFF << 8;
|
||||
} else {
|
||||
uint32_t ua = sigmoid_table_uint16[uh];
|
||||
uint32_t ub = sigmoid_table_uint16[uh + 1];
|
||||
|
||||
uint8_t ut = abs_input_data & 0xFF;
|
||||
|
||||
result = (ua << 8) + ut * (ub - ua);
|
||||
}
|
||||
|
||||
result = (input_data >= 0)
|
||||
? (result - (1 << (14 + 9)) + (1 << (9 - 2)))
|
||||
: (-result + (1 << (14 + 9)) + (1 << (9 - 2)) - 1);
|
||||
|
||||
// Convert back to 16-bit.
|
||||
result >>= (9 - 1);
|
||||
|
||||
*ptr_output_data = result;
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace reference_integer_ops
|
||||
} // namespace tflite
|
||||
|
||||
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_TANH_H_
|
||||
@@ -52,40 +52,39 @@ inline void L2Normalization(const tflite::L2NormalizationParams& op_params,
|
||||
|
||||
inline void L2Normalization(const tflite::L2NormalizationParams& op_params,
|
||||
const RuntimeShape& input_shape,
|
||||
const uint8* input_data,
|
||||
const uint8_t* input_data,
|
||||
const RuntimeShape& output_shape,
|
||||
uint8* output_data) {
|
||||
uint8_t* output_data) {
|
||||
const int trailing_dim = input_shape.DimensionsCount() - 1;
|
||||
const int depth =
|
||||
MatchingDim(input_shape, trailing_dim, output_shape, trailing_dim);
|
||||
const int outer_size =
|
||||
MatchingFlatSizeSkipDim(input_shape, trailing_dim, output_shape);
|
||||
const int32 input_zero_point = op_params.input_zero_point;
|
||||
const int32_t input_zero_point = op_params.input_zero_point;
|
||||
|
||||
for (int i = 0; i < outer_size; ++i) {
|
||||
int32 square_l2_norm = 0;
|
||||
int32_t square_l2_norm = 0;
|
||||
for (int c = 0; c < depth; c++) {
|
||||
int32 diff = input_data[depth * i + c] - input_zero_point;
|
||||
int32_t diff = input_data[depth * i + c] - input_zero_point;
|
||||
square_l2_norm += diff * diff;
|
||||
}
|
||||
int32 inv_l2norm_multiplier;
|
||||
int32_t inv_l2norm_multiplier;
|
||||
int inv_l2norm_shift;
|
||||
GetInvSqrtQuantizedMultiplierExp(square_l2_norm, kReverseShift,
|
||||
&inv_l2norm_multiplier, &inv_l2norm_shift);
|
||||
for (int c = 0; c < depth; c++) {
|
||||
int32 diff = input_data[depth * i + c] - input_zero_point;
|
||||
int32 rescaled_diff = MultiplyByQuantizedMultiplierSmallerThanOneExp(
|
||||
int32_t diff = input_data[depth * i + c] - input_zero_point;
|
||||
int32_t rescaled_diff = MultiplyByQuantizedMultiplierSmallerThanOneExp(
|
||||
128 * diff, inv_l2norm_multiplier, inv_l2norm_shift);
|
||||
int32 unclamped_output_val = 128 + rescaled_diff;
|
||||
int32 output_val =
|
||||
std::min(static_cast<int32>(255),
|
||||
std::max(static_cast<int32>(0), unclamped_output_val));
|
||||
output_data[depth * i + c] = static_cast<uint8>(output_val);
|
||||
int32_t unclamped_output_val = 128 + rescaled_diff;
|
||||
int32_t output_val =
|
||||
std::min(static_cast<int32_t>(255),
|
||||
std::max(static_cast<int32_t>(0), unclamped_output_val));
|
||||
output_data[depth * i + c] = static_cast<uint8_t>(output_val);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
} // namespace reference_ops
|
||||
} // namespace tflite
|
||||
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_L2NORMALIZATION_H_
|
||||
|
||||
@@ -66,8 +66,8 @@ inline void Logistic(const LogisticParams&, const RuntimeShape& input_shape,
|
||||
}
|
||||
|
||||
inline void Logistic(const LogisticParams& params,
|
||||
const RuntimeShape& input_shape, const int16* input_data,
|
||||
const RuntimeShape& output_shape, int16* output_data) {
|
||||
const RuntimeShape& input_shape, const int16_t* input_data,
|
||||
const RuntimeShape& output_shape, int16_t* output_data) {
|
||||
const int flat_size = MatchingFlatSize(input_shape, output_shape);
|
||||
|
||||
for (int i = 0; i < flat_size; i++) {
|
||||
@@ -84,12 +84,12 @@ inline void Logistic(const LogisticParams& params,
|
||||
}
|
||||
}
|
||||
|
||||
// Quantized int8 logistic activation. Cheats by dequantizing and requantizing
|
||||
// around the floating point logistic method. This implementation is slow on
|
||||
// platforms without a floating point unit.
|
||||
// Quantized int8_t logistic activation. Cheats by dequantizing and
|
||||
// requantizing around the floating point logistic method. This implementation
|
||||
// is slow on platforms without a floating point unit.
|
||||
|
||||
// TODO(b/141211002): Delete this int8 implementation once we can reuse the
|
||||
// approach used in TFLite for int8 Logistic.
|
||||
// TODO(b/141211002): Delete this int8_t implementation once we can reuse the
|
||||
// approach used in TFLite for int8_t Logistic.
|
||||
inline void Logistic(const RuntimeShape& input_shape, const int8_t* input_data,
|
||||
float input_scale, int input_zero_point,
|
||||
const RuntimeShape& output_shape, int8_t* output_data,
|
||||
|
||||
@@ -24,20 +24,20 @@ namespace reference_ops {
|
||||
// Element-wise mul that can often be used for inner loop of broadcast Mul as
|
||||
// well as the non-broadcast Mul.
|
||||
inline void MulElementwise(int size, const ArithmeticParams& params,
|
||||
const uint8* input1_data, const uint8* input2_data,
|
||||
uint8* output_data) {
|
||||
const uint8_t* input1_data,
|
||||
const uint8_t* input2_data, uint8_t* output_data) {
|
||||
for (int i = 0; i < size; ++i) {
|
||||
const int32 input1_val = params.input1_offset + input1_data[i];
|
||||
const int32 input2_val = params.input2_offset + input2_data[i];
|
||||
const int32 unclamped_result =
|
||||
const int32_t input1_val = params.input1_offset + input1_data[i];
|
||||
const int32_t input2_val = params.input2_offset + input2_data[i];
|
||||
const int32_t unclamped_result =
|
||||
params.output_offset +
|
||||
MultiplyByQuantizedMultiplier(input1_val * input2_val,
|
||||
params.output_multiplier,
|
||||
params.output_shift);
|
||||
const int32 clamped_output =
|
||||
const int32_t clamped_output =
|
||||
std::min(params.quantized_activation_max,
|
||||
std::max(params.quantized_activation_min, unclamped_result));
|
||||
output_data[i] = static_cast<uint8>(clamped_output);
|
||||
output_data[i] = static_cast<uint8_t>(clamped_output);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -60,9 +60,9 @@ inline void Mul(const ArithmeticParams& params,
|
||||
}
|
||||
|
||||
inline void Mul(const ArithmeticParams& params,
|
||||
const RuntimeShape& input1_shape, const uint8* input1_data,
|
||||
const RuntimeShape& input2_shape, const uint8* input2_data,
|
||||
const RuntimeShape& output_shape, uint8* output_data) {
|
||||
const RuntimeShape& input1_shape, const uint8_t* input1_data,
|
||||
const RuntimeShape& input2_shape, const uint8_t* input2_data,
|
||||
const RuntimeShape& output_shape, uint8_t* output_data) {
|
||||
TFLITE_DCHECK_LE(params.quantized_activation_min,
|
||||
params.quantized_activation_max);
|
||||
const int flat_size =
|
||||
@@ -73,11 +73,11 @@ inline void Mul(const ArithmeticParams& params,
|
||||
|
||||
inline void BroadcastMul4DSlow(const ArithmeticParams& params,
|
||||
const RuntimeShape& input1_shape,
|
||||
const uint8* input1_data,
|
||||
const uint8_t* input1_data,
|
||||
const RuntimeShape& input2_shape,
|
||||
const uint8* input2_data,
|
||||
const uint8_t* input2_data,
|
||||
const RuntimeShape& output_shape,
|
||||
uint8* output_data) {
|
||||
uint8_t* output_data) {
|
||||
NdArrayDesc<4> desc1;
|
||||
NdArrayDesc<4> desc2;
|
||||
NdArrayDescsForElementwiseBroadcast(input1_shape, input2_shape, &desc1,
|
||||
@@ -89,22 +89,22 @@ inline void BroadcastMul4DSlow(const ArithmeticParams& params,
|
||||
for (int y = 0; y < extended_output_shape.Dims(1); ++y) {
|
||||
for (int x = 0; x < extended_output_shape.Dims(2); ++x) {
|
||||
for (int c = 0; c < extended_output_shape.Dims(3); ++c) {
|
||||
const int32 input1_val =
|
||||
const int32_t input1_val =
|
||||
params.input1_offset +
|
||||
input1_data[SubscriptToIndex(desc1, b, y, x, c)];
|
||||
const int32 input2_val =
|
||||
const int32_t input2_val =
|
||||
params.input2_offset +
|
||||
input2_data[SubscriptToIndex(desc2, b, y, x, c)];
|
||||
const int32 unclamped_result =
|
||||
const int32_t unclamped_result =
|
||||
params.output_offset +
|
||||
MultiplyByQuantizedMultiplier(input1_val * input2_val,
|
||||
params.output_multiplier,
|
||||
params.output_shift);
|
||||
const int32 clamped_output = std::min(
|
||||
const int32_t clamped_output = std::min(
|
||||
params.quantized_activation_max,
|
||||
std::max(params.quantized_activation_min, unclamped_result));
|
||||
output_data[Offset(extended_output_shape, b, y, x, c)] =
|
||||
static_cast<uint8>(clamped_output);
|
||||
static_cast<uint8_t>(clamped_output);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -32,8 +32,8 @@ constexpr int PadKernelMaxDimensionCount() { return 4; }
|
||||
// equivalent to a simple input1_data. For Pad, it should point to a zero
|
||||
// value.
|
||||
//
|
||||
// Note that two typenames are required, so that T=P=int32 is considered a
|
||||
// specialization distinct from P=int32.
|
||||
// Note that two typenames are required, so that T=P=int32_t is considered a
|
||||
// specialization distinct from P=int32_t.
|
||||
template <typename T, typename P>
|
||||
inline void PadImpl(const tflite::PadParams& op_params,
|
||||
const RuntimeShape& input_shape, const T* input_data,
|
||||
@@ -116,11 +116,11 @@ inline void Pad(const tflite::PadParams& op_params,
|
||||
output_data);
|
||||
}
|
||||
|
||||
// The second (pad-value) input can be int32 when, say, the first is uint8.
|
||||
// The second (pad-value) input can be int32_t when, say, the first is uint8_t.
|
||||
template <typename T>
|
||||
inline void Pad(const tflite::PadParams& op_params,
|
||||
const RuntimeShape& input_shape, const T* input_data,
|
||||
const int32* pad_value_ptr, const RuntimeShape& output_shape,
|
||||
const int32_t* pad_value_ptr, const RuntimeShape& output_shape,
|
||||
T* output_data) {
|
||||
const T converted_pad_value = static_cast<T>(*pad_value_ptr);
|
||||
PadImpl(op_params, input_shape, input_data, &converted_pad_value,
|
||||
@@ -130,40 +130,18 @@ inline void Pad(const tflite::PadParams& op_params,
|
||||
// This version avoids conflicting template matching.
|
||||
template <>
|
||||
inline void Pad(const tflite::PadParams& op_params,
|
||||
const RuntimeShape& input_shape, const int32* input_data,
|
||||
const int32* pad_value_ptr, const RuntimeShape& output_shape,
|
||||
int32* output_data) {
|
||||
const RuntimeShape& input_shape, const int32_t* input_data,
|
||||
const int32_t* pad_value_ptr, const RuntimeShape& output_shape,
|
||||
int32_t* output_data) {
|
||||
PadImpl(op_params, input_shape, input_data, pad_value_ptr, output_shape,
|
||||
output_data);
|
||||
}
|
||||
|
||||
// One could make all PadImageStyle calls simply delegate the work to the
|
||||
// ordinary Pad. However, it is better that the reference code asserts false in
|
||||
// similar cases.
|
||||
template <typename T, typename P>
|
||||
inline void PadImageStyle(const tflite::PadParams& op_params,
|
||||
const RuntimeShape& input_shape, const T* input_data,
|
||||
const P* pad_value_ptr,
|
||||
const RuntimeShape& output_shape, T* output_data) {
|
||||
TFLITE_ASSERT_FALSE;
|
||||
}
|
||||
|
||||
template <typename P>
|
||||
inline void PadImageStyle(const tflite::PadParams& op_params,
|
||||
const RuntimeShape& input_shape,
|
||||
const uint8* input_data, const P* pad_value_ptr,
|
||||
const RuntimeShape& output_shape,
|
||||
uint8* output_data) {
|
||||
Pad(op_params, input_shape, input_data, pad_value_ptr, output_shape,
|
||||
output_data);
|
||||
}
|
||||
|
||||
template <typename P>
|
||||
inline void PadImageStyle(const tflite::PadParams& op_params,
|
||||
const RuntimeShape& input_shape,
|
||||
const int8_t* input_data, const P* pad_value_ptr,
|
||||
const RuntimeShape& output_shape,
|
||||
int8_t* output_data) {
|
||||
Pad(op_params, input_shape, input_data, pad_value_ptr, output_shape,
|
||||
output_data);
|
||||
}
|
||||
|
||||
@@ -78,8 +78,9 @@ inline void AveragePool(const PoolParams& params,
|
||||
|
||||
inline void AveragePool(const PoolParams& params,
|
||||
const RuntimeShape& input_shape,
|
||||
const uint8* input_data,
|
||||
const RuntimeShape& output_shape, uint8* output_data) {
|
||||
const uint8_t* input_data,
|
||||
const RuntimeShape& output_shape,
|
||||
uint8_t* output_data) {
|
||||
TFLITE_DCHECK_LE(params.quantized_activation_min,
|
||||
params.quantized_activation_max);
|
||||
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
|
||||
@@ -108,7 +109,7 @@ inline void AveragePool(const PoolParams& params,
|
||||
const int filter_y_start = std::max(0, -in_y_origin);
|
||||
const int filter_y_end =
|
||||
std::min(params.filter_height, input_height - in_y_origin);
|
||||
int32 acc = 0;
|
||||
int32_t acc = 0;
|
||||
int filter_count = 0;
|
||||
for (int filter_y = filter_y_start; filter_y < filter_y_end;
|
||||
++filter_y) {
|
||||
@@ -125,7 +126,7 @@ inline void AveragePool(const PoolParams& params,
|
||||
acc = std::max(acc, params.quantized_activation_min);
|
||||
acc = std::min(acc, params.quantized_activation_max);
|
||||
output_data[Offset(output_shape, batch, out_y, out_x, channel)] =
|
||||
static_cast<uint8>(acc);
|
||||
static_cast<uint8_t>(acc);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -237,8 +238,8 @@ inline void MaxPool(const PoolParams& params, const RuntimeShape& input_shape,
|
||||
}
|
||||
|
||||
inline void MaxPool(const PoolParams& params, const RuntimeShape& input_shape,
|
||||
const uint8* input_data, const RuntimeShape& output_shape,
|
||||
uint8* output_data) {
|
||||
const uint8_t* input_data, const RuntimeShape& output_shape,
|
||||
uint8_t* output_data) {
|
||||
TFLITE_DCHECK_LE(params.quantized_activation_min,
|
||||
params.quantized_activation_max);
|
||||
TFLITE_DCHECK_GE(params.quantized_activation_min, 0);
|
||||
@@ -269,7 +270,7 @@ inline void MaxPool(const PoolParams& params, const RuntimeShape& input_shape,
|
||||
const int filter_y_start = std::max(0, -in_y_origin);
|
||||
const int filter_y_end =
|
||||
std::min(params.filter_height, input_height - in_y_origin);
|
||||
uint8 max = 0;
|
||||
uint8_t max = 0;
|
||||
for (int filter_y = filter_y_start; filter_y < filter_y_end;
|
||||
++filter_y) {
|
||||
for (int filter_x = filter_x_start; filter_x < filter_x_end;
|
||||
@@ -281,10 +282,10 @@ inline void MaxPool(const PoolParams& params, const RuntimeShape& input_shape,
|
||||
input_data[Offset(input_shape, batch, in_y, in_x, channel)]);
|
||||
}
|
||||
}
|
||||
max = std::max<uint8>(max, params.quantized_activation_min);
|
||||
max = std::min<uint8>(max, params.quantized_activation_max);
|
||||
max = std::max<uint8_t>(max, params.quantized_activation_min);
|
||||
max = std::min<uint8_t>(max, params.quantized_activation_max);
|
||||
output_data[Offset(output_shape, batch, out_y, out_x, channel)] =
|
||||
static_cast<uint8>(max);
|
||||
static_cast<uint8_t>(max);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -23,7 +23,7 @@ namespace tflite {
|
||||
|
||||
namespace reference_ops {
|
||||
|
||||
// Broadcast prelu to output_shape for quantized uint8/int8 data.
|
||||
// Broadcast prelu to output_shape for quantized uint8_t/int8_t data.
|
||||
template <typename T>
|
||||
inline void BroadcastPrelu4DSlow(
|
||||
const PreluParams& params, const RuntimeShape& input_shape,
|
||||
@@ -44,24 +44,26 @@ inline void BroadcastPrelu4DSlow(
|
||||
for (int c = 0; c < extended_output_shape.Dims(3); ++c) {
|
||||
int output_index = Offset(extended_output_shape, b, y, x, c);
|
||||
int input_index = SubscriptToIndex(desc1, b, y, x, c);
|
||||
const int32 input_value =
|
||||
const int32_t input_value =
|
||||
params.input_offset + input_data[input_index];
|
||||
int32 output_value;
|
||||
int32_t output_value;
|
||||
if (input_value >= 0) {
|
||||
output_value = input_value;
|
||||
output_value = MultiplyByQuantizedMultiplier(
|
||||
input_value, params.output_multiplier_1, params.output_shift_1);
|
||||
} else {
|
||||
auto alpha_index = SubscriptToIndex(desc2, b, y, x, c);
|
||||
const int32 alpha_value =
|
||||
const int32_t alpha_value =
|
||||
params.alpha_offset + alpha_data[alpha_index];
|
||||
|
||||
output_value = MultiplyByQuantizedMultiplier(
|
||||
input_value * alpha_value, params.output_multiplier,
|
||||
params.output_shift);
|
||||
input_value * alpha_value, params.output_multiplier_2,
|
||||
params.output_shift_2);
|
||||
}
|
||||
output_value += params.output_offset;
|
||||
|
||||
const int32 quantized_min = std::numeric_limits<T>::min();
|
||||
const int32 quantized_max = std::numeric_limits<T>::max();
|
||||
const int32 clamped_output =
|
||||
const int32_t quantized_min = std::numeric_limits<T>::min();
|
||||
const int32_t quantized_max = std::numeric_limits<T>::max();
|
||||
const int32_t clamped_output =
|
||||
std::min(quantized_max, std::max(quantized_min, output_value));
|
||||
output_data[output_index] = static_cast<T>(clamped_output);
|
||||
}
|
||||
@@ -70,6 +72,37 @@ inline void BroadcastPrelu4DSlow(
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline void Prelu(const PreluParams& params, const RuntimeShape& input_shape,
|
||||
const T* input_data, const RuntimeShape& alpha_shape,
|
||||
const T* alpha_data, const RuntimeShape& output_shape,
|
||||
T* output_data) {
|
||||
const int32_t quantized_min = std::numeric_limits<T>::min();
|
||||
const int32_t quantized_max = std::numeric_limits<T>::max();
|
||||
|
||||
const int flat_size =
|
||||
MatchingElementsSize(input_shape, alpha_shape, output_shape);
|
||||
for (int i = 0; i < flat_size; ++i) {
|
||||
const int32_t input_value = params.input_offset + input_data[i];
|
||||
int32_t output_value;
|
||||
if (input_value >= 0) {
|
||||
output_value = MultiplyByQuantizedMultiplier(
|
||||
input_value, params.output_multiplier_1, params.output_shift_1);
|
||||
} else {
|
||||
const int32_t alpha_value = params.alpha_offset + alpha_data[i];
|
||||
|
||||
output_value = MultiplyByQuantizedMultiplier(input_value * alpha_value,
|
||||
params.output_multiplier_2,
|
||||
params.output_shift_2);
|
||||
}
|
||||
output_value += params.output_offset;
|
||||
|
||||
const int32_t clamped_output =
|
||||
std::min(quantized_max, std::max(quantized_min, output_value));
|
||||
output_data[i] = static_cast<T>(clamped_output);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace reference_ops
|
||||
} // namespace tflite
|
||||
|
||||
|
||||
@@ -76,6 +76,10 @@ inline bool ProcessBroadcastShapes(const RuntimeShape& shape0,
|
||||
BroadcastableOpCategory::kFirstInputBroadcastsFast &&
|
||||
params->broadcast_category !=
|
||||
BroadcastableOpCategory::kSecondInputBroadcastsFast) {
|
||||
// This is unreachable because at least one else clause in the above loop
|
||||
// must be reached.
|
||||
TFLITE_DCHECK(false);
|
||||
params->broadcast_category = BroadcastableOpCategory::kNonBroadcast;
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
@@ -15,7 +15,11 @@ limitations under the License.
|
||||
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_QUANTIZE_H_
|
||||
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_QUANTIZE_H_
|
||||
|
||||
#include <algorithm>
|
||||
#include <limits>
|
||||
|
||||
#include "tensorflow/lite/kernels/internal/common.h"
|
||||
#include "tensorflow/lite/kernels/internal/compatibility.h"
|
||||
#include "tensorflow/lite/kernels/internal/cppmath.h"
|
||||
#include "tensorflow/lite/kernels/internal/types.h"
|
||||
|
||||
@@ -29,18 +33,18 @@ inline void AffineQuantize(const tflite::QuantizationParams& op_params,
|
||||
const InputT* input_data,
|
||||
const RuntimeShape& output_shape,
|
||||
OutputT* output_data) {
|
||||
const int32 zero_point = op_params.zero_point;
|
||||
const int32_t zero_point = op_params.zero_point;
|
||||
const double scale = op_params.scale;
|
||||
const int flat_size = MatchingFlatSize(input_shape, output_shape);
|
||||
static constexpr int32 min_val = std::numeric_limits<OutputT>::min();
|
||||
static constexpr int32 max_val = std::numeric_limits<OutputT>::max();
|
||||
static constexpr int32_t min_val = std::numeric_limits<OutputT>::min();
|
||||
static constexpr int32_t max_val = std::numeric_limits<OutputT>::max();
|
||||
|
||||
for (int i = 0; i < flat_size; i++) {
|
||||
const InputT val = input_data[i];
|
||||
int32 unclamped =
|
||||
static_cast<int32>(TfLiteRound(val / static_cast<float>(scale))) +
|
||||
int32_t unclamped =
|
||||
static_cast<int32_t>(TfLiteRound(val / static_cast<float>(scale))) +
|
||||
zero_point;
|
||||
int32 clamped = std::min(std::max(unclamped, min_val), max_val);
|
||||
int32_t clamped = std::min(std::max(unclamped, min_val), max_val);
|
||||
output_data[i] = clamped;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,6 +18,8 @@ limitations under the License.
|
||||
#include "ruy/profiler/instrumentation.h" // from @ruy
|
||||
#include "tensorflow/lite/kernels/internal/common.h"
|
||||
#include "tensorflow/lite/kernels/internal/cppmath.h"
|
||||
#include "tensorflow/lite/kernels/internal/max.h"
|
||||
#include "tensorflow/lite/kernels/internal/min.h"
|
||||
#include "tensorflow/lite/kernels/internal/quantization_util.h"
|
||||
#include "tensorflow/lite/kernels/internal/types.h"
|
||||
|
||||
@@ -68,6 +70,9 @@ inline bool ResolveAxis(const int num_dims, const int* axis,
|
||||
// eg: For num_dims=3, [0, 1, 2] is the same as [-3, -2, -1] */
|
||||
int current = axis[idx] < 0 ? (axis[idx] + num_dims) : axis[idx];
|
||||
TFLITE_DCHECK(current >= 0 && current < num_dims);
|
||||
if (current < 0 || current >= num_dims) {
|
||||
return false;
|
||||
}
|
||||
bool is_dup = false;
|
||||
for (int j = 0; j < *out_num_axis; ++j) {
|
||||
if (out_axis[j] == current) {
|
||||
@@ -127,6 +132,11 @@ inline bool ReduceGeneric(const T* input_data, const int* input_dims,
|
||||
bool keep_dims, int* temp_index, int* resolved_axis,
|
||||
T init_value,
|
||||
T reducer(const T current, const T in)) {
|
||||
// Return early when input shape has zero dim.
|
||||
for (int i = 0; i < input_num_dims; ++i) {
|
||||
if (input_dims[i] == 0) return true;
|
||||
}
|
||||
|
||||
// Reset output data.
|
||||
if (!InitTensorDataForReduce(output_dims, output_num_dims, init_value,
|
||||
output_data)) {
|
||||
@@ -184,11 +194,11 @@ inline bool Mean(const T* input_data, const int* input_dims,
|
||||
}
|
||||
|
||||
// Calculate mean by dividing output_data by num of aggregated element.
|
||||
U num_elements_in_axis = 1;
|
||||
size_t num_elements_in_axis = 1;
|
||||
for (int idx = 0; idx < num_resolved_axis; ++idx) {
|
||||
size_t current = static_cast<size_t>(input_dims[resolved_axis[idx]]);
|
||||
// Overflow prevention.
|
||||
if (current > (std::numeric_limits<U>::max() / num_elements_in_axis)) {
|
||||
if (current > (std::numeric_limits<size_t>::max() / num_elements_in_axis)) {
|
||||
return false;
|
||||
}
|
||||
num_elements_in_axis *= current;
|
||||
@@ -249,9 +259,9 @@ inline void Mean(const tflite::MeanParams& op_params,
|
||||
|
||||
inline void Mean(const tflite::MeanParams& op_params,
|
||||
const RuntimeShape& unextended_input_shape,
|
||||
const uint8_t* input_data, int32 input_zero_point,
|
||||
const uint8_t* input_data, int32_t input_zero_point,
|
||||
float input_scale, const RuntimeShape& unextended_output_shape,
|
||||
uint8_t* output_data, int32 output_zero_point,
|
||||
uint8_t* output_data, int32_t output_zero_point,
|
||||
float output_scale) {
|
||||
ruy::profiler::ScopeLabel label("Mean4D/Uint8");
|
||||
|
||||
@@ -280,9 +290,9 @@ inline void Mean(const tflite::MeanParams& op_params,
|
||||
constexpr int32_t kMinValue = std::numeric_limits<uint8_t>::min();
|
||||
constexpr int32_t kMaxValue = std::numeric_limits<uint8_t>::max();
|
||||
|
||||
int32 bias =
|
||||
int32_t bias =
|
||||
output_zero_point -
|
||||
static_cast<int32>(input_zero_point * input_scale / output_scale);
|
||||
static_cast<int32_t>(input_zero_point * input_scale / output_scale);
|
||||
double real_scale =
|
||||
static_cast<double>(input_scale / (num_elements_in_axis * output_scale));
|
||||
|
||||
@@ -291,7 +301,7 @@ inline void Mean(const tflite::MeanParams& op_params,
|
||||
QuantizeMultiplier(real_scale, &multiplier, &shift);
|
||||
for (int out_b = 0; out_b < output_batch; ++out_b) {
|
||||
for (int out_d = 0; out_d < output_depth; ++out_d) {
|
||||
int32 acc = 0;
|
||||
int32_t acc = 0;
|
||||
for (int in_h = 0; in_h < input_height; ++in_h) {
|
||||
for (int in_w = 0; in_w < input_width; ++in_w) {
|
||||
acc += input_data[Offset(input_shape, out_b, in_h, in_w, out_d)];
|
||||
@@ -310,18 +320,21 @@ inline void Mean(const tflite::MeanParams& op_params,
|
||||
// It does so in two stages, first calculates the sum of elements along the axis
|
||||
// then divides it by the number of element in axis for quantized values.
|
||||
template <typename T, typename U>
|
||||
inline bool QuantizedMeanOrSum(const T* input_data, int32 input_zero_point,
|
||||
inline bool QuantizedMeanOrSum(const T* input_data, int32_t input_zero_point,
|
||||
float input_scale, const int* input_dims,
|
||||
const int input_num_dims, T* output_data,
|
||||
int32 output_zero_point, float output_scale,
|
||||
int32_t output_zero_point, float output_scale,
|
||||
const int* output_dims,
|
||||
const int output_num_dims, const int* axis,
|
||||
const int num_axis_dimensions, bool keep_dims,
|
||||
int* temp_index, int* resolved_axis, U* temp_sum,
|
||||
bool compute_sum) {
|
||||
const bool uint8_case = std::is_same<T, int8_t>::value;
|
||||
const bool uint8_case = std::is_same<T, uint8_t>::value;
|
||||
const bool int16_case = std::is_same<T, int16_t>::value;
|
||||
if (uint8_case) {
|
||||
ruy::profiler::ScopeLabel label(compute_sum ? "Sum/Uint8" : "Mean/Uint8");
|
||||
} else if (int16_case) {
|
||||
ruy::profiler::ScopeLabel label(compute_sum ? "Sum/Int16" : "Mean/Int16");
|
||||
} else {
|
||||
ruy::profiler::ScopeLabel label(compute_sum ? "Sum/Int8" : "Mean/Int8");
|
||||
}
|
||||
@@ -354,11 +367,11 @@ inline bool QuantizedMeanOrSum(const T* input_data, int32 input_zero_point,
|
||||
}
|
||||
|
||||
// Calculate mean by dividing output_data by num of aggregated element.
|
||||
U num_elements_in_axis = 1;
|
||||
size_t num_elements_in_axis = 1;
|
||||
for (int idx = 0; idx < num_resolved_axis; ++idx) {
|
||||
size_t current = static_cast<size_t>(input_dims[resolved_axis[idx]]);
|
||||
// Overflow prevention.
|
||||
if (current > (std::numeric_limits<U>::max() / num_elements_in_axis)) {
|
||||
if (current > (std::numeric_limits<size_t>::max() / num_elements_in_axis)) {
|
||||
return false;
|
||||
}
|
||||
num_elements_in_axis *= current;
|
||||
@@ -368,8 +381,7 @@ inline bool QuantizedMeanOrSum(const T* input_data, int32 input_zero_point,
|
||||
const float scale = input_scale / output_scale;
|
||||
if (compute_sum) {
|
||||
// TODO(b/116341117): Eliminate float and do this completely in 8bit.
|
||||
const float bias =
|
||||
-input_zero_point * scale * num_elements_in_axis + 0.5f;
|
||||
const float bias = -input_zero_point * scale * num_elements_in_axis;
|
||||
for (size_t idx = 0; idx < num_outputs; ++idx) {
|
||||
const U value =
|
||||
static_cast<U>(TfLiteRound(temp_sum[idx] * scale + bias)) +
|
||||
@@ -377,15 +389,15 @@ inline bool QuantizedMeanOrSum(const T* input_data, int32 input_zero_point,
|
||||
output_data[idx] = static_cast<T>(value);
|
||||
}
|
||||
} else {
|
||||
const float bias = -input_zero_point * scale + 0.5f;
|
||||
const float bias = -input_zero_point * scale;
|
||||
for (size_t idx = 0; idx < num_outputs; ++idx) {
|
||||
float float_mean = static_cast<float>(temp_sum[idx]) /
|
||||
static_cast<float>(num_elements_in_axis);
|
||||
float result =
|
||||
std::min(TfLiteRound(float_mean * scale + bias) + output_zero_point,
|
||||
static_cast<float>(std::numeric_limits<T>::max()));
|
||||
result =
|
||||
std::max(result, static_cast<float>(std::numeric_limits<T>::min()));
|
||||
float result = TfLiteMin(
|
||||
TfLiteRound(float_mean * scale + bias) + output_zero_point,
|
||||
static_cast<float>(std::numeric_limits<T>::max()));
|
||||
result = TfLiteMax(result,
|
||||
static_cast<float>(std::numeric_limits<T>::min()));
|
||||
output_data[idx] = static_cast<T>(result);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -17,28 +17,30 @@ limitations under the License.
|
||||
|
||||
#include <cmath>
|
||||
|
||||
#include "tensorflow/lite/kernels/internal/cppmath.h"
|
||||
#include "tensorflow/lite/kernels/internal/types.h"
|
||||
|
||||
namespace tflite {
|
||||
|
||||
namespace reference_ops {
|
||||
|
||||
inline int32 GetNearestNeighbor(const int input_value, const int32 input_size,
|
||||
const int32 output_size,
|
||||
const bool align_corners,
|
||||
const bool half_pixel_centers) {
|
||||
inline int32_t GetNearestNeighbor(const int input_value,
|
||||
const int32_t input_size,
|
||||
const int32_t output_size,
|
||||
const bool align_corners,
|
||||
const bool half_pixel_centers) {
|
||||
const float scale =
|
||||
(align_corners && output_size > 1)
|
||||
? (input_size - 1) / static_cast<float>(output_size - 1)
|
||||
: input_size / static_cast<float>(output_size);
|
||||
const float offset = half_pixel_centers ? 0.5f : 0.0f;
|
||||
int32 output_value = std::min(
|
||||
int32_t output_value = std::min(
|
||||
align_corners
|
||||
? static_cast<int32>(std::round((input_value + offset) * scale))
|
||||
: static_cast<int32>(std::floor((input_value + offset) * scale)),
|
||||
? static_cast<int32_t>(TfLiteRound((input_value + offset) * scale))
|
||||
: static_cast<int32_t>(std::floor((input_value + offset) * scale)),
|
||||
input_size - 1);
|
||||
if (half_pixel_centers) {
|
||||
output_value = std::max(static_cast<int32>(0), output_value);
|
||||
output_value = std::max(static_cast<int32_t>(0), output_value);
|
||||
}
|
||||
return output_value;
|
||||
}
|
||||
@@ -47,7 +49,7 @@ template <typename T>
|
||||
inline void ResizeNearestNeighbor(
|
||||
const tflite::ResizeNearestNeighborParams& op_params,
|
||||
const RuntimeShape& unextended_input_shape, const T* input_data,
|
||||
const RuntimeShape& output_size_shape, const int32* output_size_data,
|
||||
const RuntimeShape& output_size_shape, const int32_t* output_size_data,
|
||||
const RuntimeShape& unextended_output_shape, T* output_data) {
|
||||
TFLITE_DCHECK_LE(unextended_input_shape.DimensionsCount(), 4);
|
||||
TFLITE_DCHECK_LE(unextended_output_shape.DimensionsCount(), 4);
|
||||
@@ -57,16 +59,16 @@ inline void ResizeNearestNeighbor(
|
||||
const RuntimeShape output_shape =
|
||||
RuntimeShape::ExtendedShape(4, unextended_output_shape);
|
||||
|
||||
int32 batches = MatchingDim(input_shape, 0, output_shape, 0);
|
||||
int32 input_height = input_shape.Dims(1);
|
||||
int32 input_width = input_shape.Dims(2);
|
||||
int32 depth = MatchingDim(input_shape, 3, output_shape, 3);
|
||||
int32_t batches = MatchingDim(input_shape, 0, output_shape, 0);
|
||||
int32_t input_height = input_shape.Dims(1);
|
||||
int32_t input_width = input_shape.Dims(2);
|
||||
int32_t depth = MatchingDim(input_shape, 3, output_shape, 3);
|
||||
|
||||
// The Tensorflow version of this op allows resize on the width and height
|
||||
// axis only.
|
||||
TFLITE_DCHECK_EQ(output_size_shape.FlatSize(), 2);
|
||||
int32 output_height = output_size_data[0];
|
||||
int32 output_width = output_size_data[1];
|
||||
int32_t output_height = output_size_data[0];
|
||||
int32_t output_width = output_size_data[1];
|
||||
|
||||
const int col_offset = input_shape.Dims(3);
|
||||
const int row_offset = input_shape.Dims(2) * col_offset;
|
||||
@@ -76,14 +78,14 @@ inline void ResizeNearestNeighbor(
|
||||
T* output_ptr = output_data;
|
||||
for (int b = 0; b < batches; ++b) {
|
||||
for (int y = 0; y < output_height; ++y) {
|
||||
int32 in_y = GetNearestNeighbor(y, input_height, output_height,
|
||||
op_params.align_corners,
|
||||
op_params.half_pixel_centers);
|
||||
const T* y_input_ptr = input_ptr + in_y * row_offset;
|
||||
for (int x = 0; x < output_width; ++x) {
|
||||
int32 in_x = GetNearestNeighbor(x, input_width, output_width,
|
||||
int32_t in_y = GetNearestNeighbor(y, input_height, output_height,
|
||||
op_params.align_corners,
|
||||
op_params.half_pixel_centers);
|
||||
const T* y_input_ptr = input_ptr + in_y * row_offset;
|
||||
for (int x = 0; x < output_width; ++x) {
|
||||
int32_t in_x = GetNearestNeighbor(x, input_width, output_width,
|
||||
op_params.align_corners,
|
||||
op_params.half_pixel_centers);
|
||||
const T* x_input_ptr = y_input_ptr + in_x * col_offset;
|
||||
memcpy(output_ptr, x_input_ptr, depth * sizeof(T));
|
||||
output_ptr += depth;
|
||||
|
||||
@@ -16,7 +16,6 @@ limitations under the License.
|
||||
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_SOFTMAX_H_
|
||||
|
||||
#include <limits>
|
||||
#include <vector>
|
||||
|
||||
#include "fixedpoint/fixedpoint.h"
|
||||
#include "tensorflow/lite/kernels/internal/common.h"
|
||||
@@ -49,26 +48,27 @@ inline void Softmax(const SoftmaxParams& params,
|
||||
// Compute sum.
|
||||
float sum = 0.f;
|
||||
for (int c = 0; c < depth; ++c) {
|
||||
sum += std::exp((input_data[i * depth + c] - max) *
|
||||
static_cast<float>(params.beta));
|
||||
const float exp_c = std::exp((input_data[i * depth + c] - max) *
|
||||
static_cast<float>(params.beta));
|
||||
output_data[i * depth + c] = exp_c;
|
||||
sum += exp_c;
|
||||
}
|
||||
|
||||
// Compute result.
|
||||
for (int c = 0; c < depth; ++c) {
|
||||
output_data[i * depth + c] = std::exp((input_data[i * depth + c] - max) *
|
||||
static_cast<float>(params.beta)) /
|
||||
sum;
|
||||
output_data[i * depth + c] = output_data[i * depth + c] / sum;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Quantized softmax with int8/uint8 input and int8/uint8/int16 output.
|
||||
// Quantized softmax with int8_t/uint8_t input and int8_t/uint8_t/int16_t
|
||||
// output.
|
||||
template <typename InputT, typename OutputT>
|
||||
inline void Softmax(const SoftmaxParams& params,
|
||||
const RuntimeShape& input_shape, const InputT* input_data,
|
||||
const RuntimeShape& output_shape, OutputT* output_data) {
|
||||
const int32 input_beta_multiplier = params.input_multiplier;
|
||||
const int32 input_beta_left_shift = params.input_left_shift;
|
||||
const int32_t input_beta_multiplier = params.input_multiplier;
|
||||
const int32_t input_beta_left_shift = params.input_left_shift;
|
||||
const int diff_min = params.diff_min;
|
||||
// The representation chosen for the input to the exp() function is Q5.26.
|
||||
// We need to leave extra space since values that we skip might be as large as
|
||||
@@ -78,9 +78,10 @@ inline void Softmax(const SoftmaxParams& params,
|
||||
static const int kScaledDiffIntegerBits = 5;
|
||||
static const int kAccumulationIntegerBits = 12;
|
||||
using FixedPointScaledDiff =
|
||||
gemmlowp::FixedPoint<int32, kScaledDiffIntegerBits>;
|
||||
using FixedPointAccum = gemmlowp::FixedPoint<int32, kAccumulationIntegerBits>;
|
||||
using FixedPoint0 = gemmlowp::FixedPoint<int32, 0>;
|
||||
gemmlowp::FixedPoint<int32_t, kScaledDiffIntegerBits>;
|
||||
using FixedPointAccum =
|
||||
gemmlowp::FixedPoint<int32_t, kAccumulationIntegerBits>;
|
||||
using FixedPoint0 = gemmlowp::FixedPoint<int32_t, 0>;
|
||||
|
||||
const int trailing_dim = input_shape.DimensionsCount() - 1;
|
||||
const int outer_size =
|
||||
@@ -96,10 +97,10 @@ inline void Softmax(const SoftmaxParams& params,
|
||||
|
||||
FixedPointAccum sum_of_exps = FixedPointAccum::Zero();
|
||||
for (int c = 0; c < depth; ++c) {
|
||||
int32 input_diff =
|
||||
static_cast<int32>(input_data[i * depth + c]) - max_in_row;
|
||||
int32_t input_diff =
|
||||
static_cast<int32_t>(input_data[i * depth + c]) - max_in_row;
|
||||
if (input_diff >= diff_min) {
|
||||
const int32 input_diff_rescaled =
|
||||
const int32_t input_diff_rescaled =
|
||||
MultiplyByQuantizedMultiplierGreaterThanOne(
|
||||
input_diff, input_beta_multiplier, input_beta_left_shift);
|
||||
const FixedPointScaledDiff scaled_diff_f8 =
|
||||
@@ -114,28 +115,28 @@ inline void Softmax(const SoftmaxParams& params,
|
||||
sum_of_exps.raw(), kAccumulationIntegerBits, &num_bits_over_unit));
|
||||
|
||||
for (int c = 0; c < depth; ++c) {
|
||||
int32 input_diff =
|
||||
static_cast<int32>(input_data[i * depth + c]) - max_in_row;
|
||||
int32_t input_diff =
|
||||
static_cast<int32_t>(input_data[i * depth + c]) - max_in_row;
|
||||
if (input_diff >= diff_min) {
|
||||
const int32 input_diff_rescaled =
|
||||
const int32_t input_diff_rescaled =
|
||||
MultiplyByQuantizedMultiplierGreaterThanOne(
|
||||
input_diff, input_beta_multiplier, input_beta_left_shift);
|
||||
const FixedPointScaledDiff scaled_diff_f8 =
|
||||
FixedPointScaledDiff::FromRaw(input_diff_rescaled);
|
||||
|
||||
FixedPoint0 exp_in_0 = exp_on_negative_values(scaled_diff_f8);
|
||||
int32 unsat_output = gemmlowp::RoundingDivideByPOT(
|
||||
int32_t unsat_output = gemmlowp::RoundingDivideByPOT(
|
||||
(shifted_scale * exp_in_0).raw(),
|
||||
num_bits_over_unit + 31 - (sizeof(OutputT) * 8));
|
||||
|
||||
const int32 shifted_output =
|
||||
const int32_t shifted_output =
|
||||
unsat_output +
|
||||
static_cast<int32>(std::numeric_limits<OutputT>::min());
|
||||
static_cast<int32_t>(std::numeric_limits<OutputT>::min());
|
||||
|
||||
output_data[i * depth + c] = static_cast<OutputT>(std::max(
|
||||
std::min(shifted_output,
|
||||
static_cast<int32>(std::numeric_limits<OutputT>::max())),
|
||||
static_cast<int32>(std::numeric_limits<OutputT>::min())));
|
||||
static_cast<int32_t>(std::numeric_limits<OutputT>::max())),
|
||||
static_cast<int32_t>(std::numeric_limits<OutputT>::min())));
|
||||
} else {
|
||||
output_data[i * depth + c] = std::numeric_limits<OutputT>::min();
|
||||
}
|
||||
@@ -143,7 +144,24 @@ inline void Softmax(const SoftmaxParams& params,
|
||||
}
|
||||
}
|
||||
|
||||
// Quantized softmax with int16 input and int16 output.
|
||||
// Computes exp(input - max_input)
|
||||
inline int16_t SoftMaxCalculateExp(const SoftmaxParams& params,
|
||||
const int16_t* input_data, const int depth,
|
||||
int16_t max_in_row, int i, int c) {
|
||||
int32_t input_diff = input_data[i * depth + c] - max_in_row;
|
||||
// scale the input_diff such that [-65535, 0] correspond to [-10.0, 0.0]
|
||||
// exp lut generated with range [-10, 0], as exp(-10) is negligible.
|
||||
int32_t scaled_diff = MultiplyByQuantizedMultiplier(
|
||||
input_diff, params.input_multiplier, params.input_left_shift);
|
||||
// recenter to [-32768, 32767]
|
||||
int32_t sym_scaled_diff = scaled_diff + 32767;
|
||||
int16_t sat_sym_scaled_diff =
|
||||
std::min(std::max(sym_scaled_diff, static_cast<int32_t>(-32768)),
|
||||
static_cast<int32_t>(32767));
|
||||
// apply the exp() LUT activation function
|
||||
return generic_int16_table_lookup(sat_sym_scaled_diff, params.exp_lut);
|
||||
}
|
||||
// Quantized softmax with int16_t input and int16_t output.
|
||||
inline void SoftmaxInt16(const SoftmaxParams& params,
|
||||
const RuntimeShape& input_shape,
|
||||
const int16_t* input_data,
|
||||
@@ -162,28 +180,16 @@ inline void SoftmaxInt16(const SoftmaxParams& params,
|
||||
max_in_row = std::max(max_in_row, input_data[i * depth + c]);
|
||||
}
|
||||
|
||||
// Compute exp(input - max_input)
|
||||
std::vector<int16_t> exp_result_Q015(depth);
|
||||
// This loops computes the exp values and their sum. We will need the exp
|
||||
// values later on in the function so we cache them in the output_data
|
||||
// buffer. This is an optimization done to avoid calculating the exp values
|
||||
// twice making use of the output_data buffer as scratch memory.
|
||||
int32_t sum_of_exps = 0; // Q16.15 fixed point format.
|
||||
int16_t* exp_results_Q015 = output_data + i * depth;
|
||||
for (int c = 0; c < depth; ++c) {
|
||||
int32_t input_diff = input_data[i * depth + c] - max_in_row;
|
||||
// scale the input_diff such that [-65535, 0] correspond to [-10.0, 0.0]
|
||||
int32_t scaled_diff = MultiplyByQuantizedMultiplier(
|
||||
input_diff, params.input_multiplier, params.input_left_shift);
|
||||
// recenter to [-32768, 32767]
|
||||
int32_t sym_scaled_diff = scaled_diff + 32767;
|
||||
int16_t sat_sym_scaled_diff =
|
||||
std::min(std::max(sym_scaled_diff, static_cast<int32_t>(-32768)),
|
||||
static_cast<int32_t>(32767));
|
||||
// apply the exp() LUT activation function
|
||||
exp_result_Q015[c] =
|
||||
generic_int16_table_lookup(sat_sym_scaled_diff, params.exp_lut);
|
||||
}
|
||||
|
||||
// sum_of_exps is a Q16.15 fixed point format.
|
||||
int32_t sum_of_exps = 0;
|
||||
for (int c = 0; c < depth; ++c) {
|
||||
// Q16.15 + Q0.15
|
||||
sum_of_exps += exp_result_Q015[c];
|
||||
exp_results_Q015[c] =
|
||||
SoftMaxCalculateExp(params, input_data, depth, max_in_row, i, c);
|
||||
sum_of_exps += exp_results_Q015[c];
|
||||
}
|
||||
|
||||
// Compute the reciprocal 1/sum_of_exps
|
||||
@@ -209,7 +215,7 @@ inline void SoftmaxInt16(const SoftmaxParams& params,
|
||||
for (int c = 0; c < depth; ++c) {
|
||||
uint8_t right_shift = 31 - headroom_plus_one;
|
||||
int64_t round = 1 << (right_shift - 1);
|
||||
int32_t result = (static_cast<int64_t>(exp_result_Q015[c]) *
|
||||
int32_t result = (static_cast<int64_t>(exp_results_Q015[c]) *
|
||||
static_cast<int64_t>(reciprocal_scale_Q015) +
|
||||
round) >>
|
||||
right_shift;
|
||||
|
||||
@@ -16,8 +16,10 @@ limitations under the License.
|
||||
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_STRIDED_SLICE_H_
|
||||
|
||||
#include "tensorflow/lite/kernels/internal/common.h"
|
||||
#include "tensorflow/lite/kernels/internal/compatibility.h"
|
||||
#include "tensorflow/lite/kernels/internal/strided_slice_logic.h"
|
||||
#include "tensorflow/lite/kernels/internal/types.h"
|
||||
|
||||
namespace tflite {
|
||||
|
||||
namespace reference_ops {
|
||||
|
||||
@@ -15,9 +15,15 @@ limitations under the License.
|
||||
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_SUB_H_
|
||||
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_SUB_H_
|
||||
|
||||
#include "fixedpoint/fixedpoint.h"
|
||||
#include <stdint.h>
|
||||
|
||||
#include <algorithm>
|
||||
#include <limits>
|
||||
|
||||
#include "ruy/profiler/instrumentation.h" // from @ruy
|
||||
#include "tensorflow/lite/kernels/internal/common.h"
|
||||
#include "tensorflow/lite/kernels/internal/compatibility.h"
|
||||
#include "tensorflow/lite/kernels/internal/types.h"
|
||||
|
||||
namespace tflite {
|
||||
|
||||
@@ -41,11 +47,11 @@ inline void SubNonBroadcast(const ArithmeticParams& params,
|
||||
|
||||
inline void SubNonBroadcast(const ArithmeticParams& params,
|
||||
const RuntimeShape& input1_shape,
|
||||
const int32* input1_data,
|
||||
const int32_t* input1_data,
|
||||
const RuntimeShape& input2_shape,
|
||||
const int32* input2_data,
|
||||
const int32_t* input2_data,
|
||||
const RuntimeShape& output_shape,
|
||||
int32* output_data) {
|
||||
int32_t* output_data) {
|
||||
const int flat_size =
|
||||
MatchingElementsSize(input1_shape, input2_shape, output_shape);
|
||||
for (int i = 0; i < flat_size; ++i) {
|
||||
@@ -106,12 +112,12 @@ inline void BroadcastSubSlow(const ArithmeticParams& params,
|
||||
template <int N = 5>
|
||||
inline void BroadcastSubSlow(const ArithmeticParams& params,
|
||||
const RuntimeShape& input1_shape,
|
||||
const uint8* input1_data,
|
||||
const uint8_t* input1_data,
|
||||
const RuntimeShape& input2_shape,
|
||||
const uint8* input2_data,
|
||||
const uint8_t* input2_data,
|
||||
const RuntimeShape& output_shape,
|
||||
uint8* output_data) {
|
||||
ruy::profiler::ScopeLabel label("BroadcastSubSlow/uint8");
|
||||
uint8_t* output_data) {
|
||||
ruy::profiler::ScopeLabel label("BroadcastSubSlow/uint8_t");
|
||||
TFLITE_DCHECK_LE(input1_shape.DimensionsCount(), N);
|
||||
TFLITE_DCHECK_LE(input2_shape.DimensionsCount(), N);
|
||||
TFLITE_DCHECK_LE(output_shape.DimensionsCount(), N);
|
||||
@@ -134,28 +140,28 @@ inline void BroadcastSubSlow(const ArithmeticParams& params,
|
||||
// nesting loops such that the innermost loop has the smallest stride for the
|
||||
// best cache behavior.
|
||||
auto sub_func = [&](int indexes[N]) {
|
||||
const int32 input1_val =
|
||||
const int32_t input1_val =
|
||||
params.input1_offset + input1_data[SubscriptToIndex(desc1, indexes)];
|
||||
const int32 input2_val =
|
||||
const int32_t input2_val =
|
||||
params.input2_offset + input2_data[SubscriptToIndex(desc2, indexes)];
|
||||
const int32 shifted_input1_val = input1_val * (1 << params.left_shift);
|
||||
const int32 shifted_input2_val = input2_val * (1 << params.left_shift);
|
||||
const int32 scaled_input1_val =
|
||||
const int32_t shifted_input1_val = input1_val * (1 << params.left_shift);
|
||||
const int32_t shifted_input2_val = input2_val * (1 << params.left_shift);
|
||||
const int32_t scaled_input1_val =
|
||||
MultiplyByQuantizedMultiplierSmallerThanOneExp(
|
||||
shifted_input1_val, params.input1_multiplier, params.input1_shift);
|
||||
const int32 scaled_input2_val =
|
||||
const int32_t scaled_input2_val =
|
||||
MultiplyByQuantizedMultiplierSmallerThanOneExp(
|
||||
shifted_input2_val, params.input2_multiplier, params.input2_shift);
|
||||
const int32 raw_sub = scaled_input1_val - scaled_input2_val;
|
||||
const int32 raw_output =
|
||||
const int32_t raw_sub = scaled_input1_val - scaled_input2_val;
|
||||
const int32_t raw_output =
|
||||
MultiplyByQuantizedMultiplierSmallerThanOneExp(
|
||||
raw_sub, params.output_multiplier, params.output_shift) +
|
||||
params.output_offset;
|
||||
const int32 clamped_output =
|
||||
const int32_t clamped_output =
|
||||
std::min(params.quantized_activation_max,
|
||||
std::max(params.quantized_activation_min, raw_output));
|
||||
output_data[SubscriptToIndex(output_desc, indexes)] =
|
||||
static_cast<uint8>(clamped_output);
|
||||
static_cast<uint8_t>(clamped_output);
|
||||
};
|
||||
NDOpsHelper<N>(output_desc, sub_func);
|
||||
}
|
||||
@@ -163,12 +169,12 @@ inline void BroadcastSubSlow(const ArithmeticParams& params,
|
||||
template <int N = 5>
|
||||
inline void BroadcastSubSlow(const ArithmeticParams& params,
|
||||
const RuntimeShape& input1_shape,
|
||||
const int32* input1_data,
|
||||
const int32_t* input1_data,
|
||||
const RuntimeShape& input2_shape,
|
||||
const int32* input2_data,
|
||||
const int32_t* input2_data,
|
||||
const RuntimeShape& output_shape,
|
||||
int32* output_data) {
|
||||
ruy::profiler::ScopeLabel label("BroadcastSubSlow/int32");
|
||||
int32_t* output_data) {
|
||||
ruy::profiler::ScopeLabel label("BroadcastSubSlow/int32_t");
|
||||
TFLITE_DCHECK_LE(input1_shape.DimensionsCount(), N);
|
||||
TFLITE_DCHECK_LE(input2_shape.DimensionsCount(), N);
|
||||
TFLITE_DCHECK_LE(output_shape.DimensionsCount(), N);
|
||||
@@ -208,7 +214,7 @@ inline void BroadcastSubSlow(const ArithmeticParams& params,
|
||||
const int8_t* input2_data,
|
||||
const RuntimeShape& output_shape,
|
||||
int8_t* output_data) {
|
||||
ruy::profiler::ScopeLabel label("BroadcastSubSlow/int8");
|
||||
ruy::profiler::ScopeLabel label("BroadcastSubSlow/int8_t");
|
||||
NdArrayDesc<N> desc1;
|
||||
NdArrayDesc<N> desc2;
|
||||
NdArrayDesc<N> output_desc;
|
||||
@@ -254,6 +260,45 @@ inline void BroadcastSubSlow(const ArithmeticParams& params,
|
||||
NDOpsHelper<N>(output_desc, sub_func);
|
||||
}
|
||||
|
||||
template <int N = 5>
|
||||
void BroadcastSubSlow(const ArithmeticParams& params,
|
||||
const RuntimeShape& input1_shape,
|
||||
const int64_t* input1_data,
|
||||
const RuntimeShape& input2_shape,
|
||||
const int64_t* input2_data,
|
||||
const RuntimeShape& output_shape, int64_t* output_data) {
|
||||
ruy::profiler::ScopeLabel label("BroadcastSubSlow/int64_t");
|
||||
TFLITE_DCHECK_LE(input1_shape.DimensionsCount(), N);
|
||||
TFLITE_DCHECK_LE(input2_shape.DimensionsCount(), N);
|
||||
TFLITE_DCHECK_LE(output_shape.DimensionsCount(), N);
|
||||
NdArrayDesc<N> desc1;
|
||||
NdArrayDesc<N> desc2;
|
||||
NdArrayDesc<N> output_desc;
|
||||
NdArrayDescsForElementwiseBroadcast(input1_shape, input2_shape, &desc1,
|
||||
&desc2);
|
||||
CopyDimsToDesc(RuntimeShape::ExtendedShape(N, output_shape), &output_desc);
|
||||
|
||||
// In Tensorflow, the dimensions are canonically named (batch_number, row,
|
||||
// col, channel), with extents (batches, height, width, depth), with the
|
||||
// trailing dimension changing most rapidly (channels has the smallest stride,
|
||||
// typically 1 element).
|
||||
//
|
||||
// In generated C code, we store arrays with the dimensions reversed. The
|
||||
// first dimension has smallest stride.
|
||||
//
|
||||
// We name our variables by their Tensorflow convention, but generate C code
|
||||
// nesting loops such that the innermost loop has the smallest stride for the
|
||||
// best cache behavior.
|
||||
auto sub_func = [&](int indexes[N]) {
|
||||
output_data[SubscriptToIndex(output_desc, indexes)] =
|
||||
ActivationFunctionWithMinMax(
|
||||
input1_data[SubscriptToIndex(desc1, indexes)] -
|
||||
input2_data[SubscriptToIndex(desc2, indexes)],
|
||||
params.int64_activation_min, params.int64_activation_max);
|
||||
};
|
||||
NDOpsHelper<N>(output_desc, sub_func);
|
||||
}
|
||||
|
||||
template <typename T, int N = 5>
|
||||
void BroadcastSubSlow(const ArithmeticParams& params,
|
||||
const RuntimeShape& input1_shape, const T* input1_data,
|
||||
@@ -294,33 +339,33 @@ void BroadcastSubSlow(const ArithmeticParams& params,
|
||||
// Element-wise Sub that can often be used for inner loop of broadcast sub as
|
||||
// well as the non-broadcast sub.
|
||||
inline void SubElementwise(int size, const ArithmeticParams& params,
|
||||
const uint8* input1_data, const uint8* input2_data,
|
||||
uint8* output_data) {
|
||||
const uint8_t* input1_data,
|
||||
const uint8_t* input2_data, uint8_t* output_data) {
|
||||
TFLITE_DCHECK_GT(params.input1_offset, -256);
|
||||
TFLITE_DCHECK_GT(params.input2_offset, -256);
|
||||
TFLITE_DCHECK_LT(params.input1_offset, 256);
|
||||
TFLITE_DCHECK_LT(params.input2_offset, 256);
|
||||
|
||||
for (int i = 0; i < size; ++i) {
|
||||
const int32 input1_val = params.input1_offset + input1_data[i];
|
||||
const int32 input2_val = params.input2_offset + input2_data[i];
|
||||
const int32 shifted_input1_val = input1_val * (1 << params.left_shift);
|
||||
const int32 shifted_input2_val = input2_val * (1 << params.left_shift);
|
||||
const int32 scaled_input1_val =
|
||||
const int32_t input1_val = params.input1_offset + input1_data[i];
|
||||
const int32_t input2_val = params.input2_offset + input2_data[i];
|
||||
const int32_t shifted_input1_val = input1_val * (1 << params.left_shift);
|
||||
const int32_t shifted_input2_val = input2_val * (1 << params.left_shift);
|
||||
const int32_t scaled_input1_val =
|
||||
MultiplyByQuantizedMultiplierSmallerThanOneExp(
|
||||
shifted_input1_val, params.input1_multiplier, params.input1_shift);
|
||||
const int32 scaled_input2_val =
|
||||
const int32_t scaled_input2_val =
|
||||
MultiplyByQuantizedMultiplierSmallerThanOneExp(
|
||||
shifted_input2_val, params.input2_multiplier, params.input2_shift);
|
||||
const int32 raw_sub = scaled_input1_val - scaled_input2_val;
|
||||
const int32 raw_output =
|
||||
const int32_t raw_sub = scaled_input1_val - scaled_input2_val;
|
||||
const int32_t raw_output =
|
||||
MultiplyByQuantizedMultiplierSmallerThanOneExp(
|
||||
raw_sub, params.output_multiplier, params.output_shift) +
|
||||
params.output_offset;
|
||||
const int32 clamped_output =
|
||||
const int32_t clamped_output =
|
||||
std::min(params.quantized_activation_max,
|
||||
std::max(params.quantized_activation_min, raw_output));
|
||||
output_data[i] = static_cast<uint8>(clamped_output);
|
||||
output_data[i] = static_cast<uint8_t>(clamped_output);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -336,22 +381,22 @@ inline void SubElementwise(int size, const ArithmeticParams& params,
|
||||
TFLITE_DCHECK_LE(params.input2_offset, int8_max_value);
|
||||
|
||||
for (int i = 0; i < size; ++i) {
|
||||
const int32 input1_val = params.input1_offset + input1_data[i];
|
||||
const int32 input2_val = params.input2_offset + input2_data[i];
|
||||
const int32 shifted_input1_val = input1_val * (1 << params.left_shift);
|
||||
const int32 shifted_input2_val = input2_val * (1 << params.left_shift);
|
||||
const int32 scaled_input1_val =
|
||||
const int32_t input1_val = params.input1_offset + input1_data[i];
|
||||
const int32_t input2_val = params.input2_offset + input2_data[i];
|
||||
const int32_t shifted_input1_val = input1_val * (1 << params.left_shift);
|
||||
const int32_t shifted_input2_val = input2_val * (1 << params.left_shift);
|
||||
const int32_t scaled_input1_val =
|
||||
MultiplyByQuantizedMultiplierSmallerThanOneExp(
|
||||
shifted_input1_val, params.input1_multiplier, params.input1_shift);
|
||||
const int32 scaled_input2_val =
|
||||
const int32_t scaled_input2_val =
|
||||
MultiplyByQuantizedMultiplierSmallerThanOneExp(
|
||||
shifted_input2_val, params.input2_multiplier, params.input2_shift);
|
||||
const int32 raw_sub = scaled_input1_val - scaled_input2_val;
|
||||
const int32 raw_output =
|
||||
const int32_t raw_sub = scaled_input1_val - scaled_input2_val;
|
||||
const int32_t raw_output =
|
||||
MultiplyByQuantizedMultiplierSmallerThanOneExp(
|
||||
raw_sub, params.output_multiplier, params.output_shift) +
|
||||
params.output_offset;
|
||||
const int32 clamped_output =
|
||||
const int32_t clamped_output =
|
||||
std::min(params.quantized_activation_max,
|
||||
std::max(params.quantized_activation_min, raw_output));
|
||||
output_data[i] = static_cast<int8_t>(clamped_output);
|
||||
@@ -359,9 +404,9 @@ inline void SubElementwise(int size, const ArithmeticParams& params,
|
||||
}
|
||||
|
||||
inline void Sub(const ArithmeticParams& params,
|
||||
const RuntimeShape& input1_shape, const uint8* input1_data,
|
||||
const RuntimeShape& input2_shape, const uint8* input2_data,
|
||||
const RuntimeShape& output_shape, uint8* output_data) {
|
||||
const RuntimeShape& input1_shape, const uint8_t* input1_data,
|
||||
const RuntimeShape& input2_shape, const uint8_t* input2_data,
|
||||
const RuntimeShape& output_shape, uint8_t* output_data) {
|
||||
TFLITE_DCHECK_LE(params.quantized_activation_min,
|
||||
params.quantized_activation_max);
|
||||
const int flat_size =
|
||||
@@ -428,40 +473,43 @@ void Sub(const ArithmeticParams& params, const RuntimeShape& input1_shape,
|
||||
}
|
||||
}
|
||||
|
||||
inline void SubWithActivation(const ArithmeticParams& params,
|
||||
const RuntimeShape& input1_shape,
|
||||
const int32* input1_data,
|
||||
const RuntimeShape& input2_shape,
|
||||
const int32* input2_data,
|
||||
const RuntimeShape& output_shape,
|
||||
int32* output_data) {
|
||||
inline void SetActivationMinMax(const ArithmeticParams& params,
|
||||
int32_t* activation_min,
|
||||
int32_t* activation_max) {
|
||||
*activation_min = params.quantized_activation_min;
|
||||
*activation_max = params.quantized_activation_max;
|
||||
}
|
||||
|
||||
inline void SetActivationMinMax(const ArithmeticParams& params,
|
||||
float* activation_min, float* activation_max) {
|
||||
*activation_min = params.float_activation_min;
|
||||
*activation_max = params.float_activation_max;
|
||||
}
|
||||
|
||||
inline void SetActivationMinMax(const ArithmeticParams& params,
|
||||
int64_t* activation_min,
|
||||
int64_t* activation_max) {
|
||||
*activation_min = params.int64_activation_min;
|
||||
*activation_max = params.int64_activation_max;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline void SubWithActivation(
|
||||
const ArithmeticParams& params, const RuntimeShape& input1_shape,
|
||||
const T* input1_data, const RuntimeShape& input2_shape,
|
||||
const T* input2_data, const RuntimeShape& output_shape, T* output_data) {
|
||||
ruy::profiler::ScopeLabel label("SubWithActivation");
|
||||
const int flat_size =
|
||||
MatchingElementsSize(input1_shape, input2_shape, output_shape);
|
||||
T activation_min, activation_max;
|
||||
SetActivationMinMax(params, &activation_min, &activation_max);
|
||||
|
||||
for (int i = 0; i < flat_size; ++i) {
|
||||
output_data[i] = ActivationFunctionWithMinMax(
|
||||
input1_data[i] - input2_data[i], params.quantized_activation_min,
|
||||
params.quantized_activation_max);
|
||||
input1_data[i] - input2_data[i], activation_min, activation_max);
|
||||
}
|
||||
}
|
||||
|
||||
inline void SubWithActivation(const ArithmeticParams& params,
|
||||
const RuntimeShape& input1_shape,
|
||||
const float* input1_data,
|
||||
const RuntimeShape& input2_shape,
|
||||
const float* input2_data,
|
||||
const RuntimeShape& output_shape,
|
||||
float* output_data) {
|
||||
const int flat_size =
|
||||
MatchingElementsSize(input1_shape, input2_shape, output_shape);
|
||||
for (int i = 0; i < flat_size; ++i) {
|
||||
output_data[i] = ActivationFunctionWithMinMax(
|
||||
input1_data[i] - input2_data[i], params.float_activation_min,
|
||||
params.float_activation_max);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
} // namespace reference_ops
|
||||
} // namespace tflite
|
||||
|
||||
|
||||
@@ -0,0 +1,129 @@
|
||||
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_TANH_H_
|
||||
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_TANH_H_
|
||||
|
||||
#include <cmath>
|
||||
|
||||
#include "fixedpoint/fixedpoint.h"
|
||||
#include "tensorflow/lite/kernels/internal/common.h"
|
||||
#include "tensorflow/lite/kernels/internal/cppmath.h"
|
||||
#include "tensorflow/lite/kernels/internal/types.h"
|
||||
#include "tensorflow/lite/kernels/op_macros.h"
|
||||
|
||||
namespace tflite {
|
||||
namespace reference_ops {
|
||||
|
||||
inline void Tanh(const RuntimeShape& input_shape, const float* input_data,
|
||||
const RuntimeShape& output_shape, float* output_data) {
|
||||
const int flat_size = MatchingFlatSize(input_shape, output_shape);
|
||||
|
||||
for (int i = 0; i < flat_size; i++) {
|
||||
float val = input_data[i];
|
||||
float result = std::tanh(val);
|
||||
output_data[i] = result;
|
||||
}
|
||||
}
|
||||
|
||||
// Convenience version that allows, for example, generated-code calls to be
|
||||
// uniform between data types.
|
||||
inline void Tanh(const TanhParams&, const RuntimeShape& input_shape,
|
||||
const float* input_data, const RuntimeShape& output_shape,
|
||||
float* output_data) {
|
||||
// Drop params: not needed.
|
||||
Tanh(input_shape, input_data, output_shape, output_data);
|
||||
}
|
||||
|
||||
inline void Tanh(const TanhParams& params, const RuntimeShape& input_shape,
|
||||
const int16_t* input_data, const RuntimeShape& output_shape,
|
||||
int16_t* output_data) {
|
||||
const int input_left_shift = params.input_left_shift;
|
||||
// Support for shifts is limited until we have a parameterized version of
|
||||
// SaturatingRoundingMultiplyByPOT().
|
||||
TFLITE_DCHECK_GE(input_left_shift, 0);
|
||||
TFLITE_DCHECK_LE(input_left_shift, 1);
|
||||
|
||||
const int flat_size = MatchingFlatSize(input_shape, output_shape);
|
||||
|
||||
// F0 uses 0 integer bits, range [-1, 1].
|
||||
// This is the return type of math functions such as tanh, logistic,
|
||||
// whose range is in [-1, 1].
|
||||
using F0 = gemmlowp::FixedPoint<std::int16_t, 0>;
|
||||
// F3 uses 3 integer bits, range [-8, 8], the input range expected here.
|
||||
using F3 = gemmlowp::FixedPoint<std::int16_t, 3>;
|
||||
|
||||
if (input_left_shift == 0) {
|
||||
for (int i = 0; i < flat_size; i++) {
|
||||
F3 input = F3::FromRaw(input_data[i]);
|
||||
F0 output = gemmlowp::tanh(input);
|
||||
output_data[i] = output.raw();
|
||||
}
|
||||
} else {
|
||||
for (int i = 0; i < flat_size; i++) {
|
||||
F3 input = F3::FromRaw(
|
||||
gemmlowp::SaturatingRoundingMultiplyByPOT<1>(input_data[i]));
|
||||
F0 output = gemmlowp::tanh(input);
|
||||
output_data[i] = output.raw();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
inline void Tanh(const TanhParams& params, const RuntimeShape& input_shape,
|
||||
const uint8_t* input_data, const RuntimeShape& output_shape,
|
||||
uint8_t* output_data) {
|
||||
const int32_t input_zero_point = params.input_zero_point;
|
||||
const int32_t input_range_radius = params.input_range_radius;
|
||||
const int32_t input_multiplier = params.input_multiplier;
|
||||
const int input_left_shift = params.input_left_shift;
|
||||
const int32_t output_zero_point = 128;
|
||||
const int flat_size = MatchingFlatSize(input_shape, output_shape);
|
||||
|
||||
for (int i = 0; i < flat_size; i++) {
|
||||
const uint8_t input_val_u8 = input_data[i];
|
||||
const int32_t input_val_centered =
|
||||
static_cast<int32_t>(input_val_u8) - input_zero_point;
|
||||
uint8_t output_val;
|
||||
if (input_val_centered <= -input_range_radius) {
|
||||
output_val = 0;
|
||||
} else if (input_val_centered >= input_range_radius) {
|
||||
output_val = 255;
|
||||
} else {
|
||||
const int32_t input_val_rescaled =
|
||||
MultiplyByQuantizedMultiplierGreaterThanOne(
|
||||
input_val_centered, input_multiplier, input_left_shift);
|
||||
using FixedPoint4 = gemmlowp::FixedPoint<int32_t, 4>;
|
||||
using FixedPoint0 = gemmlowp::FixedPoint<int32_t, 0>;
|
||||
const FixedPoint4 input_val_f4 = FixedPoint4::FromRaw(input_val_rescaled);
|
||||
const FixedPoint0 output_val_f0 = gemmlowp::tanh(input_val_f4);
|
||||
// Convert from Q0.31 to Q24.7.
|
||||
using gemmlowp::RoundingDivideByPOT;
|
||||
int32_t output_val_s32 = RoundingDivideByPOT(output_val_f0.raw(), 24);
|
||||
output_val_s32 += output_zero_point;
|
||||
if (output_val_s32 == 256) {
|
||||
output_val_s32 = 255;
|
||||
}
|
||||
// Reinterpret as Q0.7, encoded in uint8_t.
|
||||
TFLITE_DCHECK_GE(output_val_s32, 0);
|
||||
TFLITE_DCHECK_LE(output_val_s32, 255);
|
||||
output_val = static_cast<uint8_t>(output_val_s32);
|
||||
}
|
||||
output_data[i] = output_val;
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace reference_ops
|
||||
} // namespace tflite
|
||||
|
||||
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_TANH_H_
|
||||
@@ -18,6 +18,7 @@ limitations under the License.
|
||||
|
||||
#include <limits>
|
||||
#include <vector>
|
||||
|
||||
#include "tensorflow/lite/kernels/internal/compatibility.h"
|
||||
#include "tensorflow/lite/kernels/internal/types.h"
|
||||
|
||||
@@ -69,8 +70,8 @@ inline void StridedSlicePadIndices(tflite::StridedSliceParams* p,
|
||||
}
|
||||
|
||||
// Return the index for the first element along that axis. This index will be a
|
||||
// positive integer between [0, axis_size - 1] that can be used to index
|
||||
// directly into the data.
|
||||
// positive integer between [0, axis_size] (or [-1, axis_size -1] if stride < 0)
|
||||
// that can be used to index directly into the data.
|
||||
inline int StartForAxis(const tflite::StridedSliceParams& params,
|
||||
const RuntimeShape& input_shape, int axis) {
|
||||
const auto begin_mask = params.begin_mask;
|
||||
@@ -102,7 +103,13 @@ inline int StartForAxis(const tflite::StridedSliceParams& params,
|
||||
}
|
||||
|
||||
// Clamping
|
||||
start = Clamp(start, 0, axis_size - 1);
|
||||
if (strides[axis] > 0) {
|
||||
// Forward iteration
|
||||
start = Clamp(start, 0, axis_size);
|
||||
} else {
|
||||
// Backward iteration
|
||||
start = Clamp(start, -1, axis_size - 1);
|
||||
}
|
||||
|
||||
return start;
|
||||
}
|
||||
|
||||
@@ -24,24 +24,29 @@ limitations under the License.
|
||||
|
||||
namespace tflite {
|
||||
|
||||
enum class FusedActivationFunctionType : uint8 { kNone, kRelu6, kRelu1, kRelu };
|
||||
enum class PaddingType : uint8 { kNone, kSame, kValid };
|
||||
enum class FusedActivationFunctionType : uint8_t {
|
||||
kNone,
|
||||
kRelu6,
|
||||
kRelu1,
|
||||
kRelu
|
||||
};
|
||||
enum class PaddingType : uint8_t { kNone, kSame, kValid };
|
||||
|
||||
struct PaddingValues {
|
||||
int16 width;
|
||||
int16 height;
|
||||
int16_t width;
|
||||
int16_t height;
|
||||
// offset is used for calculating "remaining" padding, for example, `width`
|
||||
// is 1 and `width_offset` is 1, so padding_left is 1 while padding_right is
|
||||
// 1 + 1 = 2.
|
||||
int16 width_offset;
|
||||
int16_t width_offset;
|
||||
// Same as width_offset except it's over the height dimension.
|
||||
int16 height_offset;
|
||||
int16_t height_offset;
|
||||
};
|
||||
|
||||
// This enumeration allows for non-default formats for the weights array
|
||||
// of a fully-connected operator, allowing the use of special optimized
|
||||
// runtime paths.
|
||||
enum class FullyConnectedWeightsFormat : uint8 {
|
||||
enum class FullyConnectedWeightsFormat : uint8_t {
|
||||
// Default format (flat 2D layout, the inner contiguous dimension
|
||||
// is input_depth, the outer non-contiguous dimension is output_depth)
|
||||
kDefault,
|
||||
@@ -88,11 +93,11 @@ enum class FullyConnectedWeightsFormat : uint8 {
|
||||
// maximize arithmetic throughput.
|
||||
//
|
||||
// Finally, the 'Int8' part in the name refers to the fact that this
|
||||
// weights format has each weights value encoded as a signed int8 value,
|
||||
// even if the data type of the weights buffer is uint8. This is intended
|
||||
// weights format has each weights value encoded as a signed int8_t value,
|
||||
// even if the data type of the weights buffer is uint8_t. This is intended
|
||||
// to save runtime kernels the effort to have to XOR the top bit of these
|
||||
// bytes before using them in signed arithmetic, see this file for more
|
||||
// explanations on the 'signed int8 trick' in matrix multiplication kernels:
|
||||
// explanations on the 'signed int8_t trick' in matrix multiplication kernels:
|
||||
//
|
||||
// tensorflow/lite/toco/graph_transformations/ensure_uint8_weights_safe_for_fast_int8_kernels.cc
|
||||
//
|
||||
@@ -111,7 +116,7 @@ enum class FullyConnectedWeightsFormat : uint8 {
|
||||
// the real 0 value, and scale designates the difference between the real values
|
||||
// corresponding to consecutive quantized values differing by 1.
|
||||
struct QuantizationParams {
|
||||
int32 zero_point = 0;
|
||||
int32_t zero_point = 0;
|
||||
double scale = 0.0;
|
||||
};
|
||||
|
||||
@@ -140,20 +145,20 @@ class RuntimeShape {
|
||||
if (dimensions_count > kMaxSmallSize) {
|
||||
#ifdef TF_LITE_STATIC_MEMORY
|
||||
TFLITE_CHECK(false && "No shape resizing supported on this platform");
|
||||
#else // TF_LITE_STATIC_MEMORY
|
||||
dims_pointer_ = new int32[dimensions_count];
|
||||
#else // TF_LITE_STATIC_MEMORY
|
||||
dims_pointer_ = new int32_t[dimensions_count];
|
||||
#endif // TF_LITE_STATIC_MEMORY
|
||||
}
|
||||
}
|
||||
|
||||
RuntimeShape(int shape_size, int32 value) : size_(0) {
|
||||
RuntimeShape(int shape_size, int32_t value) : size_(0) {
|
||||
Resize(shape_size);
|
||||
for (int i = 0; i < shape_size; ++i) {
|
||||
SetDim(i, value);
|
||||
}
|
||||
}
|
||||
|
||||
RuntimeShape(int dimensions_count, const int32* dims_data) : size_(0) {
|
||||
RuntimeShape(int dimensions_count, const int32_t* dims_data) : size_(0) {
|
||||
ReplaceWith(dimensions_count, dims_data);
|
||||
}
|
||||
|
||||
@@ -165,33 +170,34 @@ class RuntimeShape {
|
||||
// rolls out.
|
||||
RuntimeShape(RuntimeShape const& other) : size_(other.DimensionsCount()) {
|
||||
if (size_ > kMaxSmallSize) {
|
||||
dims_pointer_ = new int32[size_];
|
||||
dims_pointer_ = new int32_t[size_];
|
||||
}
|
||||
std::memcpy(DimsData(), other.DimsData(), sizeof(int32) * size_);
|
||||
std::memcpy(DimsData(), other.DimsData(), sizeof(int32_t) * size_);
|
||||
}
|
||||
|
||||
bool operator==(const RuntimeShape& comp) const {
|
||||
return this->size_ == comp.size_ &&
|
||||
std::memcmp(DimsData(), comp.DimsData(), size_ * sizeof(int32)) == 0;
|
||||
std::memcmp(DimsData(), comp.DimsData(), size_ * sizeof(int32_t)) ==
|
||||
0;
|
||||
}
|
||||
|
||||
~RuntimeShape() {
|
||||
if (size_ > kMaxSmallSize) {
|
||||
#ifdef TF_LITE_STATIC_MEMORY
|
||||
TFLITE_CHECK(false && "No shape resizing supported on this platform");
|
||||
#else // TF_LITE_STATIC_MEMORY
|
||||
#else // TF_LITE_STATIC_MEMORY
|
||||
delete[] dims_pointer_;
|
||||
#endif // TF_LITE_STATIC_MEMORY
|
||||
}
|
||||
}
|
||||
|
||||
inline int32 DimensionsCount() const { return size_; }
|
||||
inline int32 Dims(int i) const {
|
||||
inline int32_t DimensionsCount() const { return size_; }
|
||||
inline int32_t Dims(int i) const {
|
||||
TFLITE_DCHECK_GE(i, 0);
|
||||
TFLITE_DCHECK_LT(i, size_);
|
||||
return size_ > kMaxSmallSize ? dims_pointer_[i] : dims_[i];
|
||||
}
|
||||
inline void SetDim(int i, int32 val) {
|
||||
inline void SetDim(int i, int32_t val) {
|
||||
TFLITE_DCHECK_GE(i, 0);
|
||||
TFLITE_DCHECK_LT(i, size_);
|
||||
if (size_ > kMaxSmallSize) {
|
||||
@@ -201,20 +207,20 @@ class RuntimeShape {
|
||||
}
|
||||
}
|
||||
|
||||
inline int32* DimsData() {
|
||||
inline int32_t* DimsData() {
|
||||
return size_ > kMaxSmallSize ? dims_pointer_ : dims_;
|
||||
}
|
||||
inline const int32* DimsData() const {
|
||||
inline const int32_t* DimsData() const {
|
||||
return size_ > kMaxSmallSize ? dims_pointer_ : dims_;
|
||||
}
|
||||
// The caller must ensure that the shape is no bigger than 5-D.
|
||||
inline const int32* DimsDataUpTo5D() const { return dims_; }
|
||||
inline const int32_t* DimsDataUpTo5D() const { return dims_; }
|
||||
|
||||
inline void Resize(int dimensions_count) {
|
||||
if (size_ > kMaxSmallSize) {
|
||||
#ifdef TF_LITE_STATIC_MEMORY
|
||||
TFLITE_CHECK(false && "No shape resizing supported on this platform");
|
||||
#else // TF_LITE_STATIC_MEMORY
|
||||
#else // TF_LITE_STATIC_MEMORY
|
||||
delete[] dims_pointer_;
|
||||
#endif // TF_LITE_STATIC_MEMORY
|
||||
}
|
||||
@@ -222,16 +228,16 @@ class RuntimeShape {
|
||||
if (dimensions_count > kMaxSmallSize) {
|
||||
#ifdef TF_LITE_STATIC_MEMORY
|
||||
TFLITE_CHECK(false && "No shape resizing supported on this platform");
|
||||
#else // TF_LITE_STATIC_MEMORY
|
||||
dims_pointer_ = new int32[dimensions_count];
|
||||
#else // TF_LITE_STATIC_MEMORY
|
||||
dims_pointer_ = new int32_t[dimensions_count];
|
||||
#endif // TF_LITE_STATIC_MEMORY
|
||||
}
|
||||
}
|
||||
|
||||
inline void ReplaceWith(int dimensions_count, const int32* dims_data) {
|
||||
inline void ReplaceWith(int dimensions_count, const int32_t* dims_data) {
|
||||
Resize(dimensions_count);
|
||||
int32* dst_dims = DimsData();
|
||||
std::memcpy(dst_dims, dims_data, dimensions_count * sizeof(int32));
|
||||
int32_t* dst_dims = DimsData();
|
||||
std::memcpy(dst_dims, dims_data, dimensions_count * sizeof(int32_t));
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
@@ -239,7 +245,7 @@ class RuntimeShape {
|
||||
const int dimensions_count =
|
||||
std::distance(src_iterable.begin(), src_iterable.end());
|
||||
Resize(dimensions_count);
|
||||
int32* data = DimsData();
|
||||
int32_t* data = DimsData();
|
||||
for (auto it : src_iterable) {
|
||||
*data = it;
|
||||
++data;
|
||||
@@ -288,13 +294,13 @@ class RuntimeShape {
|
||||
SetDim(i, pad_value);
|
||||
}
|
||||
std::memcpy(DimsData() + size_increase, shape.DimsData(),
|
||||
sizeof(int32) * shape.DimensionsCount());
|
||||
sizeof(int32_t) * shape.DimensionsCount());
|
||||
}
|
||||
|
||||
int32 size_;
|
||||
int32_t size_;
|
||||
union {
|
||||
int32 dims_[kMaxSmallSize];
|
||||
int32* dims_pointer_;
|
||||
int32_t dims_[kMaxSmallSize];
|
||||
int32_t* dims_pointer_;
|
||||
};
|
||||
};
|
||||
|
||||
@@ -432,7 +438,7 @@ int MatchingArraySize(const ArrayType1& array1, int index1,
|
||||
inline int MatchingDim(const RuntimeShape& shape1, int index1,
|
||||
const RuntimeShape& shape2, int index2) {
|
||||
TFLITE_DCHECK_EQ(shape1.Dims(index1), shape2.Dims(index2));
|
||||
return shape1.Dims(index1);
|
||||
return std::min(shape1.Dims(index1), shape2.Dims(index2));
|
||||
}
|
||||
|
||||
template <typename... Args>
|
||||
@@ -713,7 +719,7 @@ void ComputeStrides(Dims<N>* dims) {
|
||||
}
|
||||
}
|
||||
|
||||
enum class BroadcastableOpCategory : uint8 {
|
||||
enum class BroadcastableOpCategory : uint8_t {
|
||||
kNone,
|
||||
kNonBroadcast, // Matching input shapes.
|
||||
kFirstInputBroadcastsFast, // Fivefold nested loops.
|
||||
@@ -729,21 +735,21 @@ static_assert(sizeof(MinMax) == 8, "");
|
||||
|
||||
struct ActivationParams {
|
||||
FusedActivationFunctionType activation_type;
|
||||
// uint8, etc, activation params.
|
||||
int32 quantized_activation_min;
|
||||
int32 quantized_activation_max;
|
||||
// uint8_t, etc, activation params.
|
||||
int32_t quantized_activation_min;
|
||||
int32_t quantized_activation_max;
|
||||
};
|
||||
|
||||
struct ReluParams : public ActivationParams {
|
||||
int32 input_offset;
|
||||
int32 output_offset;
|
||||
int32 output_multiplier;
|
||||
int32 output_shift;
|
||||
int32_t input_offset;
|
||||
int32_t output_offset;
|
||||
int32_t output_multiplier;
|
||||
int output_shift;
|
||||
};
|
||||
|
||||
// Styles of resizing op usages. For example, kImageStyle can be used with a Pad
|
||||
// op for pattern-specific optimization.
|
||||
enum class ResizingCategory : uint8 {
|
||||
enum class ResizingCategory : uint8_t {
|
||||
kNone,
|
||||
kImageStyle, // 4D, operating on inner dimensions, say {0, a, b, 0}.
|
||||
kGenericResize,
|
||||
@@ -753,24 +759,29 @@ enum class ResizingCategory : uint8 {
|
||||
struct ArithmeticParams {
|
||||
// Shape dependent / common to data / op types.
|
||||
BroadcastableOpCategory broadcast_category;
|
||||
// uint8 inference params.
|
||||
int32 input1_offset;
|
||||
int32 input2_offset;
|
||||
int32 output_offset;
|
||||
int32 output_multiplier;
|
||||
// uint8_t inference params.
|
||||
int32_t input1_offset;
|
||||
int32_t input2_offset;
|
||||
int32_t output_offset;
|
||||
int32_t output_multiplier;
|
||||
int output_shift;
|
||||
// Add / Sub, not Mul, uint8 inference params.
|
||||
// Add / Sub, not Mul, uint8_t inference params.
|
||||
int left_shift;
|
||||
int32 input1_multiplier;
|
||||
int32_t input1_multiplier;
|
||||
int input1_shift;
|
||||
int32 input2_multiplier;
|
||||
int32_t input2_multiplier;
|
||||
int input2_shift;
|
||||
// uint8, etc, activation params.
|
||||
int32 quantized_activation_min;
|
||||
int32 quantized_activation_max;
|
||||
|
||||
// TODO(b/158622529): Union the following activation params.
|
||||
// uint8_t, etc, activation params.
|
||||
int32_t quantized_activation_min;
|
||||
int32_t quantized_activation_max;
|
||||
// float activation params.
|
||||
float float_activation_min;
|
||||
float float_activation_max;
|
||||
// int64_t activation params.
|
||||
int64_t int64_activation_min;
|
||||
int64_t int64_activation_max;
|
||||
|
||||
// Processed output dimensions.
|
||||
// Let input "a" be the one that broadcasts in the faster-changing dimension.
|
||||
@@ -785,22 +796,22 @@ struct ArithmeticParams {
|
||||
};
|
||||
|
||||
struct ConcatenationParams {
|
||||
int8 axis;
|
||||
const int32* input_zeropoint;
|
||||
int8_t axis;
|
||||
const int32_t* input_zeropoint;
|
||||
const float* input_scale;
|
||||
uint16 inputs_count;
|
||||
int32 output_zeropoint;
|
||||
uint16_t inputs_count;
|
||||
int32_t output_zeropoint;
|
||||
float output_scale;
|
||||
};
|
||||
|
||||
struct ComparisonParams {
|
||||
// uint8 inference params.
|
||||
// uint8_t inference params.
|
||||
int left_shift;
|
||||
int32 input1_offset;
|
||||
int32 input1_multiplier;
|
||||
int32_t input1_offset;
|
||||
int32_t input1_multiplier;
|
||||
int input1_shift;
|
||||
int32 input2_offset;
|
||||
int32 input2_multiplier;
|
||||
int32_t input2_offset;
|
||||
int32_t input2_multiplier;
|
||||
int input2_shift;
|
||||
// Shape dependent / common to inference types.
|
||||
bool is_broadcast;
|
||||
@@ -810,81 +821,81 @@ struct ConvParams {
|
||||
PaddingType padding_type;
|
||||
PaddingValues padding_values;
|
||||
// TODO(starka): This was just "stride", so check that width+height is OK.
|
||||
int16 stride_width;
|
||||
int16 stride_height;
|
||||
int16 dilation_width_factor;
|
||||
int16 dilation_height_factor;
|
||||
// uint8 inference params.
|
||||
int16_t stride_width;
|
||||
int16_t stride_height;
|
||||
int16_t dilation_width_factor;
|
||||
int16_t dilation_height_factor;
|
||||
// uint8_t inference params.
|
||||
// TODO(b/65838351): Use smaller types if appropriate.
|
||||
int32 input_offset;
|
||||
int32 weights_offset;
|
||||
int32 output_offset;
|
||||
int32 output_multiplier;
|
||||
int32_t input_offset;
|
||||
int32_t weights_offset;
|
||||
int32_t output_offset;
|
||||
int32_t output_multiplier;
|
||||
int output_shift;
|
||||
// uint8, etc, activation params.
|
||||
int32 quantized_activation_min;
|
||||
int32 quantized_activation_max;
|
||||
// uint8_t, etc, activation params.
|
||||
int32_t quantized_activation_min;
|
||||
int32_t quantized_activation_max;
|
||||
// float activation params.
|
||||
float float_activation_min;
|
||||
float float_activation_max;
|
||||
};
|
||||
|
||||
struct DepthToSpaceParams {
|
||||
int32 block_size;
|
||||
int32_t block_size;
|
||||
};
|
||||
|
||||
struct DepthwiseParams {
|
||||
PaddingType padding_type;
|
||||
PaddingValues padding_values;
|
||||
int16 stride_width;
|
||||
int16 stride_height;
|
||||
int16 dilation_width_factor;
|
||||
int16 dilation_height_factor;
|
||||
int16 depth_multiplier;
|
||||
// uint8 inference params.
|
||||
int16_t stride_width;
|
||||
int16_t stride_height;
|
||||
int16_t dilation_width_factor;
|
||||
int16_t dilation_height_factor;
|
||||
int16_t depth_multiplier;
|
||||
// uint8_t inference params.
|
||||
// TODO(b/65838351): Use smaller types if appropriate.
|
||||
int32 input_offset;
|
||||
int32 weights_offset;
|
||||
int32 output_offset;
|
||||
int32 output_multiplier;
|
||||
int32_t input_offset;
|
||||
int32_t weights_offset;
|
||||
int32_t output_offset;
|
||||
int32_t output_multiplier;
|
||||
int output_shift;
|
||||
// uint8, etc, activation params.
|
||||
int32 quantized_activation_min;
|
||||
int32 quantized_activation_max;
|
||||
// uint8_t, etc, activation params.
|
||||
int32_t quantized_activation_min;
|
||||
int32_t quantized_activation_max;
|
||||
// float activation params.
|
||||
float float_activation_min;
|
||||
float float_activation_max;
|
||||
const int32* output_multiplier_per_channel;
|
||||
const int32* output_shift_per_channel;
|
||||
const int32_t* output_multiplier_per_channel;
|
||||
const int32_t* output_shift_per_channel;
|
||||
};
|
||||
|
||||
struct DequantizationParams {
|
||||
double scale;
|
||||
int32 zero_point;
|
||||
int32_t zero_point;
|
||||
};
|
||||
|
||||
struct PerChannelDequantizationParams {
|
||||
const float* scale;
|
||||
const int32* zero_point;
|
||||
int32 quantized_dimension;
|
||||
const int32_t* zero_point;
|
||||
int32_t quantized_dimension;
|
||||
};
|
||||
|
||||
struct FakeQuantParams {
|
||||
MinMax minmax;
|
||||
int32 num_bits;
|
||||
int32_t num_bits;
|
||||
};
|
||||
|
||||
struct FullyConnectedParams {
|
||||
// uint8 inference params.
|
||||
// uint8_t inference params.
|
||||
// TODO(b/65838351): Use smaller types if appropriate.
|
||||
int32 input_offset;
|
||||
int32 weights_offset;
|
||||
int32 output_offset;
|
||||
int32 output_multiplier;
|
||||
int32_t input_offset;
|
||||
int32_t weights_offset;
|
||||
int32_t output_offset;
|
||||
int32_t output_multiplier;
|
||||
int output_shift;
|
||||
// uint8, etc, activation params.
|
||||
int32 quantized_activation_min;
|
||||
int32 quantized_activation_max;
|
||||
// uint8_t, etc, activation params.
|
||||
int32_t quantized_activation_min;
|
||||
int32_t quantized_activation_max;
|
||||
// float activation params.
|
||||
float float_activation_min;
|
||||
float float_activation_max;
|
||||
@@ -895,16 +906,16 @@ struct FullyConnectedParams {
|
||||
};
|
||||
|
||||
struct GatherParams {
|
||||
int16 axis;
|
||||
int16_t axis;
|
||||
};
|
||||
|
||||
struct L2NormalizationParams {
|
||||
// uint8 inference params.
|
||||
int32 input_zero_point;
|
||||
// uint8_t inference params.
|
||||
int32_t input_zero_point;
|
||||
};
|
||||
|
||||
struct LocalResponseNormalizationParams {
|
||||
int32 range;
|
||||
int32_t range;
|
||||
double bias;
|
||||
double alpha;
|
||||
double beta;
|
||||
@@ -932,48 +943,50 @@ struct HardSwishParams {
|
||||
};
|
||||
|
||||
struct LogisticParams {
|
||||
// uint8 inference params.
|
||||
int32 input_zero_point;
|
||||
int32 input_range_radius;
|
||||
int32 input_multiplier;
|
||||
// uint8_t inference params.
|
||||
int32_t input_zero_point;
|
||||
int32_t input_range_radius;
|
||||
int32_t input_multiplier;
|
||||
int input_left_shift;
|
||||
};
|
||||
|
||||
struct LstmCellParams {
|
||||
int32 weights_zero_point;
|
||||
int32 accum_multiplier;
|
||||
int32_t weights_zero_point;
|
||||
int32_t accum_multiplier;
|
||||
int accum_shift;
|
||||
int state_integer_bits;
|
||||
};
|
||||
|
||||
struct MeanParams {
|
||||
int8 axis_count;
|
||||
int16 axis[4];
|
||||
int8_t axis_count;
|
||||
int16_t axis[4];
|
||||
};
|
||||
|
||||
struct PackParams {
|
||||
int8 axis;
|
||||
const int32* input_zeropoint;
|
||||
int8_t axis;
|
||||
const int32_t* input_zeropoint;
|
||||
const float* input_scale;
|
||||
uint16 inputs_count;
|
||||
int32 output_zeropoint;
|
||||
uint16_t inputs_count;
|
||||
int32_t output_zeropoint;
|
||||
float output_scale;
|
||||
};
|
||||
|
||||
struct PadParams {
|
||||
int8 left_padding_count;
|
||||
int32 left_padding[4];
|
||||
int8 right_padding_count;
|
||||
int32 right_padding[4];
|
||||
int8_t left_padding_count;
|
||||
int32_t left_padding[4];
|
||||
int8_t right_padding_count;
|
||||
int32_t right_padding[4];
|
||||
ResizingCategory resizing_category;
|
||||
};
|
||||
|
||||
struct PreluParams {
|
||||
int32 input_offset;
|
||||
int32 alpha_offset;
|
||||
int32 output_offset;
|
||||
int32 output_multiplier;
|
||||
int output_shift;
|
||||
int32_t input_offset;
|
||||
int32_t alpha_offset;
|
||||
int32_t output_offset;
|
||||
int32_t output_multiplier_1;
|
||||
int output_shift_1;
|
||||
int32_t output_multiplier_2;
|
||||
int output_shift_2;
|
||||
};
|
||||
|
||||
struct PoolParams {
|
||||
@@ -984,17 +997,17 @@ struct PoolParams {
|
||||
int stride_width;
|
||||
int filter_height;
|
||||
int filter_width;
|
||||
// uint8, etc, activation params.
|
||||
int32 quantized_activation_min;
|
||||
int32 quantized_activation_max;
|
||||
// uint8_t, etc, activation params.
|
||||
int32_t quantized_activation_min;
|
||||
int32_t quantized_activation_max;
|
||||
// float activation params.
|
||||
float float_activation_min;
|
||||
float float_activation_max;
|
||||
};
|
||||
|
||||
struct ReshapeParams {
|
||||
int8 shape_count;
|
||||
int32 shape[4];
|
||||
int8_t shape_count;
|
||||
int32_t shape[4];
|
||||
};
|
||||
|
||||
struct ResizeBilinearParams {
|
||||
@@ -1011,91 +1024,95 @@ struct ResizeNearestNeighborParams {
|
||||
};
|
||||
|
||||
struct SliceParams {
|
||||
int8 begin_count;
|
||||
int32 begin[4];
|
||||
int8 size_count;
|
||||
int32 size[4];
|
||||
int8_t begin_count;
|
||||
int32_t begin[4];
|
||||
int8_t size_count;
|
||||
int32_t size[4];
|
||||
};
|
||||
|
||||
struct SoftmaxParams {
|
||||
// beta is not really used (not a Tensorflow parameter) and not implemented
|
||||
// for LogSoftmax.
|
||||
double beta;
|
||||
// uint8 inference params. Used even when beta defaults to 1.0.
|
||||
int32 input_multiplier;
|
||||
int32 input_left_shift;
|
||||
// uint8_t inference params. Used even when beta defaults to 1.0.
|
||||
int32_t input_multiplier;
|
||||
int32_t input_left_shift;
|
||||
// Reverse scaling is only used by LogSoftmax.
|
||||
int32 reverse_scaling_divisor;
|
||||
int32 reverse_scaling_right_shift;
|
||||
int32_t reverse_scaling_divisor;
|
||||
int32_t reverse_scaling_right_shift;
|
||||
int diff_min;
|
||||
int32_t zero_point;
|
||||
float scale;
|
||||
float* table;
|
||||
// int16 LUT for exp(x), where x uniform distributed between [-10.0 , 0.0]
|
||||
int16_t* exp_lut;
|
||||
// int16 LUT for 1 / (1 + x), where x uniform distributed between [0.0 , 1.0]
|
||||
int16_t* one_over_one_plus_x_lut;
|
||||
uint8_t* uint8_table1;
|
||||
uint8_t* uint8_table2;
|
||||
};
|
||||
|
||||
struct SpaceToBatchParams {
|
||||
// "Zero" padding for uint8 means padding with the output offset.
|
||||
int32 output_offset;
|
||||
// "Zero" padding for uint8_t means padding with the output offset.
|
||||
int32_t output_offset;
|
||||
};
|
||||
|
||||
struct SpaceToDepthParams {
|
||||
int32 block_size;
|
||||
int32_t block_size;
|
||||
};
|
||||
|
||||
struct SplitParams {
|
||||
// Graphs that split into, say, 2000 nodes are encountered. The indices in
|
||||
// OperatorEdges are of type uint16.
|
||||
uint16 num_split;
|
||||
int16 axis;
|
||||
// OperatorEdges are of type uint16_t.
|
||||
uint16_t num_split;
|
||||
int16_t axis;
|
||||
};
|
||||
|
||||
struct SqueezeParams {
|
||||
int8 squeeze_dims_count;
|
||||
int32 squeeze_dims[4];
|
||||
int8_t squeeze_dims_count;
|
||||
int32_t squeeze_dims[4];
|
||||
};
|
||||
|
||||
struct StridedSliceParams {
|
||||
int8 start_indices_count;
|
||||
int32 start_indices[5];
|
||||
int8 stop_indices_count;
|
||||
int32 stop_indices[5];
|
||||
int8 strides_count;
|
||||
int32 strides[5];
|
||||
int8_t start_indices_count;
|
||||
int32_t start_indices[5];
|
||||
int8_t stop_indices_count;
|
||||
int32_t stop_indices[5];
|
||||
int8_t strides_count;
|
||||
int32_t strides[5];
|
||||
|
||||
int16 begin_mask;
|
||||
int16 ellipsis_mask;
|
||||
int16 end_mask;
|
||||
int16 new_axis_mask;
|
||||
int16 shrink_axis_mask;
|
||||
int16_t begin_mask;
|
||||
int16_t ellipsis_mask;
|
||||
int16_t end_mask;
|
||||
int16_t new_axis_mask;
|
||||
int16_t shrink_axis_mask;
|
||||
};
|
||||
|
||||
struct TanhParams {
|
||||
int32 input_zero_point;
|
||||
int32 input_range_radius;
|
||||
int32 input_multiplier;
|
||||
int32_t input_zero_point;
|
||||
int32_t input_range_radius;
|
||||
int32_t input_multiplier;
|
||||
int input_left_shift;
|
||||
};
|
||||
|
||||
struct TransposeParams {
|
||||
int8 perm_count;
|
||||
int32 perm[5];
|
||||
int8_t perm_count;
|
||||
int32_t perm[5];
|
||||
};
|
||||
|
||||
struct UnpackParams {
|
||||
uint16 num_split;
|
||||
int16 axis;
|
||||
uint16_t num_split;
|
||||
int16_t axis;
|
||||
};
|
||||
|
||||
struct LeakyReluParams {
|
||||
float alpha;
|
||||
int32 input_offset;
|
||||
int32 output_offset;
|
||||
int32 output_multiplier_alpha;
|
||||
int32 output_shift_alpha;
|
||||
int32 output_multiplier_identity;
|
||||
int32 output_shift_identity;
|
||||
int32_t input_offset;
|
||||
int32_t output_offset;
|
||||
int32_t output_multiplier_alpha;
|
||||
int32_t output_shift_alpha;
|
||||
int32_t output_multiplier_identity;
|
||||
int32_t output_shift_identity;
|
||||
};
|
||||
|
||||
template <typename P>
|
||||
@@ -1105,13 +1122,19 @@ inline void SetActivationParams(float min, float max, P* params) {
|
||||
}
|
||||
|
||||
template <typename P>
|
||||
inline void SetActivationParams(int32 min, int32 max, P* params) {
|
||||
inline void SetActivationParams(int32_t min, int32_t max, P* params) {
|
||||
params->quantized_activation_min = min;
|
||||
params->quantized_activation_max = max;
|
||||
}
|
||||
|
||||
template <typename P>
|
||||
inline void GetActivationParams(const P& params, int32* min, int32* max) {
|
||||
inline void SetActivationParams(int64_t min, int64_t max, P* params) {
|
||||
params->int64_activation_min = min;
|
||||
params->int64_activation_max = max;
|
||||
}
|
||||
|
||||
template <typename P>
|
||||
inline void GetActivationParams(const P& params, int32_t* min, int32_t* max) {
|
||||
*min = params.quantized_activation_min;
|
||||
*max = params.quantized_activation_max;
|
||||
}
|
||||
@@ -1122,6 +1145,11 @@ inline void GetActivationParams(const P& params, float* min, float* max) {
|
||||
*max = params.float_activation_max;
|
||||
}
|
||||
|
||||
template <typename P>
|
||||
inline void GetActivationParams(const P& params, int64_t* min, int64_t* max) {
|
||||
*min = params.int64_activation_min;
|
||||
*max = params.int64_activation_max;
|
||||
}
|
||||
} // namespace tflite
|
||||
|
||||
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_TYPES_H_
|
||||
|
||||
@@ -14,15 +14,176 @@ limitations under the License.
|
||||
==============================================================================*/
|
||||
#include "tensorflow/lite/kernels/kernel_util.h"
|
||||
|
||||
#include <stdint.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
#include <algorithm>
|
||||
#include <cmath>
|
||||
#include <complex>
|
||||
#include <limits>
|
||||
#include <memory>
|
||||
|
||||
#include "tensorflow/lite/c/builtin_op_data.h"
|
||||
#include "tensorflow/lite/c/common.h"
|
||||
#include "tensorflow/lite/kernels/internal/cppmath.h"
|
||||
#include "tensorflow/lite/kernels/internal/quantization_util.h"
|
||||
|
||||
namespace tflite {
|
||||
|
||||
namespace {
|
||||
|
||||
// Assumes tensor_index is a valid index (in bounds)
|
||||
inline TfLiteTensor* GetTensorAtIndex(const TfLiteContext* context,
|
||||
int tensor_index) {
|
||||
if (context->tensors != nullptr) {
|
||||
return &context->tensors[tensor_index];
|
||||
} else {
|
||||
return context->GetTensor(context, tensor_index);
|
||||
}
|
||||
}
|
||||
|
||||
// Validate in a single place to reduce binary size
|
||||
inline TfLiteStatus ValidateTensorIndexingSafe(const TfLiteContext* context,
|
||||
int index, int max_size,
|
||||
const int* tensor_indices,
|
||||
int* tensor_index) {
|
||||
if (index < 0 || index >= max_size) {
|
||||
TF_LITE_KERNEL_LOG(const_cast<TfLiteContext*>(context),
|
||||
"Invalid tensor index %d (not in [0, %d))\n", index,
|
||||
max_size);
|
||||
return kTfLiteError;
|
||||
}
|
||||
if (tensor_indices[index] == kTfLiteOptionalTensor) {
|
||||
TF_LITE_KERNEL_LOG(const_cast<TfLiteContext*>(context),
|
||||
"Tensor at index %d was optional but was expected\n",
|
||||
index);
|
||||
return kTfLiteError;
|
||||
}
|
||||
|
||||
*tensor_index = tensor_indices[index];
|
||||
return kTfLiteOk;
|
||||
}
|
||||
|
||||
// Same as above but returns -1 for invalid inputs instead of status + logging
|
||||
// error.
|
||||
inline int ValidateTensorIndexing(const TfLiteContext* context, int index,
|
||||
int max_size, const int* tensor_indices) {
|
||||
if (index >= 0 && index < max_size) {
|
||||
const int tensor_index = tensor_indices[index];
|
||||
if (tensor_index != kTfLiteOptionalTensor) {
|
||||
return tensor_index;
|
||||
}
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
inline TfLiteTensor* GetMutableInput(const TfLiteContext* context,
|
||||
const TfLiteNode* node, int index) {
|
||||
const int tensor_index = ValidateTensorIndexing(
|
||||
context, index, node->inputs->size, node->inputs->data);
|
||||
if (tensor_index < 0) {
|
||||
return nullptr;
|
||||
}
|
||||
return GetTensorAtIndex(context, tensor_index);
|
||||
}
|
||||
|
||||
inline TfLiteStatus GetMutableInputSafe(const TfLiteContext* context,
|
||||
const TfLiteNode* node, int index,
|
||||
const TfLiteTensor** tensor) {
|
||||
int tensor_index;
|
||||
TF_LITE_ENSURE_OK(
|
||||
context, ValidateTensorIndexingSafe(context, index, node->inputs->size,
|
||||
node->inputs->data, &tensor_index));
|
||||
*tensor = GetTensorAtIndex(context, tensor_index);
|
||||
return kTfLiteOk;
|
||||
}
|
||||
|
||||
} // anonymous namespace.
|
||||
|
||||
const TfLiteTensor* GetInput(const TfLiteContext* context,
|
||||
const TfLiteNode* node, int index) {
|
||||
return GetMutableInput(context, node, index);
|
||||
}
|
||||
|
||||
TfLiteStatus GetInputSafe(const TfLiteContext* context, const TfLiteNode* node,
|
||||
int index, const TfLiteTensor** tensor) {
|
||||
return GetMutableInputSafe(context, node, index, tensor);
|
||||
}
|
||||
|
||||
TfLiteTensor* GetVariableInput(TfLiteContext* context, const TfLiteNode* node,
|
||||
int index) {
|
||||
TfLiteTensor* tensor = GetMutableInput(context, node, index);
|
||||
return tensor->is_variable ? tensor : nullptr;
|
||||
}
|
||||
|
||||
TfLiteTensor* GetOutput(TfLiteContext* context, const TfLiteNode* node,
|
||||
int index) {
|
||||
const int tensor_index = ValidateTensorIndexing(
|
||||
context, index, node->outputs->size, node->outputs->data);
|
||||
if (tensor_index < 0) {
|
||||
return nullptr;
|
||||
}
|
||||
return GetTensorAtIndex(context, tensor_index);
|
||||
}
|
||||
|
||||
TfLiteStatus GetOutputSafe(const TfLiteContext* context, const TfLiteNode* node,
|
||||
int index, TfLiteTensor** tensor) {
|
||||
int tensor_index;
|
||||
TF_LITE_ENSURE_OK(
|
||||
context, ValidateTensorIndexingSafe(context, index, node->outputs->size,
|
||||
node->outputs->data, &tensor_index));
|
||||
*tensor = GetTensorAtIndex(context, tensor_index);
|
||||
return kTfLiteOk;
|
||||
}
|
||||
|
||||
const TfLiteTensor* GetOptionalInputTensor(const TfLiteContext* context,
|
||||
const TfLiteNode* node, int index) {
|
||||
return GetInput(context, node, index);
|
||||
}
|
||||
|
||||
#ifndef TF_LITE_STATIC_MEMORY
|
||||
TfLiteTensor* GetTemporary(TfLiteContext* context, const TfLiteNode* node,
|
||||
int index) {
|
||||
const int tensor_index = ValidateTensorIndexing(
|
||||
context, index, node->temporaries->size, node->temporaries->data);
|
||||
if (tensor_index < 0) {
|
||||
return nullptr;
|
||||
}
|
||||
return GetTensorAtIndex(context, tensor_index);
|
||||
}
|
||||
|
||||
TfLiteStatus GetTemporarySafe(const TfLiteContext* context,
|
||||
const TfLiteNode* node, int index,
|
||||
TfLiteTensor** tensor) {
|
||||
int tensor_index;
|
||||
TF_LITE_ENSURE_OK(context, ValidateTensorIndexingSafe(
|
||||
context, index, node->temporaries->size,
|
||||
node->temporaries->data, &tensor_index));
|
||||
*tensor = GetTensorAtIndex(context, tensor_index);
|
||||
return kTfLiteOk;
|
||||
}
|
||||
|
||||
const TfLiteTensor* GetIntermediates(TfLiteContext* context,
|
||||
const TfLiteNode* node, int index) {
|
||||
const int tensor_index = ValidateTensorIndexing(
|
||||
context, index, node->intermediates->size, node->intermediates->data);
|
||||
if (tensor_index < 0) {
|
||||
return nullptr;
|
||||
}
|
||||
return GetTensorAtIndex(context, tensor_index);
|
||||
}
|
||||
|
||||
TfLiteStatus GetIntermediatesSafe(const TfLiteContext* context,
|
||||
const TfLiteNode* node, int index,
|
||||
TfLiteTensor** tensor) {
|
||||
int tensor_index;
|
||||
TF_LITE_ENSURE_OK(context, ValidateTensorIndexingSafe(
|
||||
context, index, node->intermediates->size,
|
||||
node->intermediates->data, &tensor_index));
|
||||
*tensor = GetTensorAtIndex(context, tensor_index);
|
||||
return kTfLiteOk;
|
||||
}
|
||||
#endif // TF_LITE_STATIC_MEMORY
|
||||
|
||||
// Per-axis
|
||||
TfLiteStatus PopulateConvolutionQuantizationParams(
|
||||
TfLiteContext* context, const TfLiteTensor* input,
|
||||
@@ -126,11 +287,27 @@ TfLiteStatus GetQuantizedConvolutionMultipler(TfLiteContext* context,
|
||||
// pipeline.
|
||||
if (bias) {
|
||||
const double bias_scale = static_cast<double>(bias->params.scale);
|
||||
// Here we're making sure the input_product_scale & bias_scale the same.
|
||||
// Normally this should be guaranteed by the training pipeline, we are
|
||||
// setting the threshold to be 2e-6 to allow some numeric stability
|
||||
// difference.
|
||||
TF_LITE_ENSURE(context, std::abs(input_product_scale - bias_scale) <= 2e-6);
|
||||
// Here we're making sure the input_product_scale & bias_scale are about the
|
||||
// same. Since we have:
|
||||
// (output - output_zp) * output_scale =
|
||||
// input_product_scale * input_product + bias * bias_scale ---- (0)
|
||||
//
|
||||
// (0) equals:
|
||||
// (input_product + bias) * input_product_scale ----- (1)
|
||||
// +
|
||||
// bias * (bias_scale - input_product_scale) ------ (2)
|
||||
//
|
||||
// For the real kernel computation, we're doing (1), so we really need to
|
||||
// make sure (2) has minimum impact on the output, so:
|
||||
// bias * (bias_scale - input_product_scale) / output_scale should be
|
||||
// a small number for an integer.
|
||||
// Since normally bias should be within a small range.
|
||||
// We should expect (bias_scale - input_product_scale) / output_scale to
|
||||
// be a small number like 0.02.
|
||||
const double scale_diff = std::abs(input_product_scale - bias_scale);
|
||||
const double output_scale = static_cast<double>(output->params.scale);
|
||||
|
||||
TF_LITE_ENSURE(context, scale_diff / output_scale <= 0.02);
|
||||
}
|
||||
return GetQuantizedConvolutionMultipler(context, input, filter, output,
|
||||
multiplier);
|
||||
@@ -167,7 +344,7 @@ void CalculateActivationRangeQuantizedImpl(TfLiteFusedActivation activation,
|
||||
} else if (activation == kTfLiteActRelu6) {
|
||||
*act_min = std::max(qmin, quantize(0.0));
|
||||
*act_max = std::min(qmax, quantize(6.0));
|
||||
} else if (activation == kTfLiteActRelu1) {
|
||||
} else if (activation == kTfLiteActReluN1To1) {
|
||||
*act_min = std::max(qmin, quantize(-1.0));
|
||||
*act_max = std::min(qmax, quantize(1.0));
|
||||
} else {
|
||||
@@ -258,4 +435,44 @@ TfLiteStatus CalculateShapeForBroadcast(TfLiteContext* context,
|
||||
}
|
||||
#endif // TF_LITE_STATIC_MEMORY
|
||||
|
||||
// Size of string is not constant, return 0 in such case.
|
||||
int TfLiteTypeGetSize(TfLiteType type) {
|
||||
switch (type) {
|
||||
case kTfLiteUInt8:
|
||||
TF_LITE_ASSERT_EQ(sizeof(uint8_t), 1);
|
||||
return 1;
|
||||
case kTfLiteInt8:
|
||||
TF_LITE_ASSERT_EQ(sizeof(int8_t), 1);
|
||||
return 1;
|
||||
case kTfLiteBool:
|
||||
return sizeof(bool);
|
||||
case kTfLiteInt16:
|
||||
TF_LITE_ASSERT_EQ(sizeof(int16_t), 2);
|
||||
return 2;
|
||||
case kTfLiteFloat16:
|
||||
TF_LITE_ASSERT_EQ(sizeof(int16_t), 2);
|
||||
return 2;
|
||||
case kTfLiteFloat32:
|
||||
TF_LITE_ASSERT_EQ(sizeof(float), 4);
|
||||
return 4;
|
||||
case kTfLiteInt32:
|
||||
TF_LITE_ASSERT_EQ(sizeof(int32_t), 4);
|
||||
return 4;
|
||||
case kTfLiteInt64:
|
||||
TF_LITE_ASSERT_EQ(sizeof(int64_t), 8);
|
||||
return 8;
|
||||
case kTfLiteFloat64:
|
||||
TF_LITE_ASSERT_EQ(sizeof(double), 8);
|
||||
return 8;
|
||||
case kTfLiteComplex64:
|
||||
TF_LITE_ASSERT_EQ(sizeof(std::complex<float>), 8);
|
||||
return 8;
|
||||
case kTfLiteComplex128:
|
||||
TF_LITE_ASSERT_EQ(sizeof(std::complex<double>), 16);
|
||||
return 16;
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace tflite
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user