mirror of
https://github.com/jomjol/AI-on-the-edge-device.git
synced 2025-12-07 12:06:58 +03:00
Compare commits
76 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b290099d5b | ||
|
|
f6b1a41a0b | ||
|
|
e529af04cf | ||
|
|
6c365dd949 | ||
|
|
32f15fc557 | ||
|
|
6f06af1d5f | ||
|
|
a91f99faab | ||
|
|
17a87b23a1 | ||
|
|
d4b5ec2ae2 | ||
|
|
1bcaf09855 | ||
|
|
fa3842b2b4 | ||
|
|
ea72256e56 | ||
|
|
be5828cb3e | ||
|
|
104b72505c | ||
|
|
23728a0686 | ||
|
|
eaaa856b13 | ||
|
|
01e81d02b5 | ||
|
|
9ae8d0a512 | ||
|
|
da16322fb8 | ||
|
|
a6d39afc26 | ||
|
|
1b6a124f54 | ||
|
|
8aff6bf8f3 | ||
|
|
21115752aa | ||
|
|
025c2b88b9 | ||
|
|
655f9d7c97 | ||
|
|
03b5e36114 | ||
|
|
9c78c1e9ca | ||
|
|
136186a526 | ||
|
|
1f6b02a671 | ||
|
|
76a0518d52 | ||
|
|
a688a69af6 | ||
|
|
b890ffc5f3 | ||
|
|
b98558107e | ||
|
|
3e360ad0fb | ||
|
|
a7ced407f8 | ||
|
|
45a1e137d1 | ||
|
|
a44e0d81cc | ||
|
|
749fc6699c | ||
|
|
d7bb147a23 | ||
|
|
08b0b254f2 | ||
|
|
5414a4c3f1 | ||
|
|
7944ab329d | ||
|
|
8ca14a434c | ||
|
|
e24ba68fec | ||
|
|
b205326782 | ||
|
|
daa1960dff | ||
|
|
894c7f6972 | ||
|
|
737dcc76b8 | ||
|
|
b42f17916b | ||
|
|
2c6ce6fd07 | ||
|
|
f243f4b8ea | ||
|
|
02e881ebc0 | ||
|
|
7b8f10a14e | ||
|
|
d995c31b7b | ||
|
|
45154cb55c | ||
|
|
48067b10cd | ||
|
|
f24c40d780 | ||
|
|
f4edd36744 | ||
|
|
a202a6abdc | ||
|
|
c25adfe28a | ||
|
|
822c6cc45c | ||
|
|
c48b44d06a | ||
|
|
21a59fbd35 | ||
|
|
cdcf940d12 | ||
|
|
6cefc44fb6 | ||
|
|
8308f159ad | ||
|
|
e5ff8f2164 | ||
|
|
a000252c8a | ||
|
|
9a42c580cf | ||
|
|
6e0a7a742e | ||
|
|
026bac121f | ||
|
|
8a26b817f7 | ||
|
|
528a4435a9 | ||
|
|
9b791bb7a7 | ||
|
|
58eb0b1292 | ||
|
|
39eda4a4be |
@@ -11,13 +11,41 @@
|
||||
|
||||
____
|
||||
|
||||
#### #6 Check for double ROI names
|
||||
#### #10 Improve and bug fix logging of images
|
||||
|
||||
Check during configuration, that ROI names are unique.
|
||||
* https://github.com/jomjol/AI-on-the-edge-device/issues/307
|
||||
|
||||
|
||||
|
||||
#### #9 Basic auth for the UI
|
||||
|
||||
* https://github.com/jomjol/AI-on-the-edge-device/issues/283
|
||||
|
||||
* Implementation of an authentication mechanism.
|
||||
|
||||
#### #8 MQTT configurable readout intervall
|
||||
|
||||
Make the readout intervall configurable via MQTT.
|
||||
|
||||
* Change the mqtt part to receive and process input and not only sending
|
||||
|
||||
#### #7 Extended Error Handling
|
||||
|
||||
Check different types of error (e.g. tflite not availabe) and generate an error on the html page.
|
||||
|
||||
To do:
|
||||
|
||||
* Implementation of ROI name checking in html code before saving analog or digital ROIs
|
||||
* Make a list of "important" errors
|
||||
* Implement a checking algo
|
||||
* Extend the firmware and html page for the error handling
|
||||
|
||||
#### ~~#6 Check for double ROI names~~ - implemented v8.0.0
|
||||
|
||||
~~Check during configuration, that ROI names are unique.~~
|
||||
|
||||
~~To do:~~
|
||||
|
||||
* ~~Implementation of ROI name checking in html code before saving analog or digital ROIs~~
|
||||
|
||||
|
||||
|
||||
@@ -33,31 +61,31 @@ To do:
|
||||
|
||||
|
||||
|
||||
#### #4 Initial Shifting and Rotation
|
||||
#### ~~#4 Initial Shifting and Rotation~~ - implemented v7.0.0
|
||||
|
||||
* https://github.com/jomjol/AI-on-the-edge-device/issues/123
|
||||
* ~~https://github.com/jomjol/AI-on-the-edge-device/issues/123~~
|
||||
|
||||
Implementation of a shifting additional to the initial rotation of the raw camera input
|
||||
~~Implementation of a shifting additional to the initial rotation of the raw camera input~~
|
||||
|
||||
To do:
|
||||
~~To do:~~
|
||||
|
||||
* Implementation of shifting
|
||||
* Extension of configuration
|
||||
* Adaption of the html configuration to implement shifting
|
||||
* ~~Implementation of shifting~~
|
||||
* ~~Extension of configuration~~
|
||||
* ~~Adaption of the html configuration to implement shifting~~
|
||||
|
||||
|
||||
|
||||
#### #3 Allow grouping of digits to multiple reading values
|
||||
#### ~~#3 Allow grouping of digits to multiple reading values~~ - implemented v8.0.0
|
||||
|
||||
* https://github.com/jomjol/AI-on-the-edge-device/issues/123
|
||||
* ~~https://github.com/jomjol/AI-on-the-edge-device/issues/123~~
|
||||
|
||||
Implementation of two different independent readouts in one setup
|
||||
~~Implementation of two different independent readouts in one setup~~
|
||||
|
||||
To do:
|
||||
~~To do:~~
|
||||
|
||||
* ~~Extend the configuration, setting and processing flow for two independend readouts~~
|
||||
|
||||
* Extend the configuration, setting and processing flow for two independend readouts
|
||||
|
||||
https://github.com/jomjol/AI-on-the-edge-device/issues/123
|
||||
|
||||
|
||||
|
||||
@@ -80,15 +108,16 @@ To do:
|
||||
|
||||
____
|
||||
|
||||
#### #1 Optional GPIO for external flash/lighting
|
||||
#### ~~#1 Optional GPIO for external flash/lighting~~ - implemented (v8.0.0)
|
||||
|
||||
* https://github.com/jomjol/AI-on-the-edge-device/issues/133
|
||||
* ~~https://github.com/jomjol/AI-on-the-edge-device/issues/133~~
|
||||
|
||||
Implementation of an an extrnal flash / lightning through GPIOs.
|
||||
* available GPIOs: 12 & 13 (currently in use for html switching)
|
||||
~~Implementation of an an extrnal flash / lightning through GPIOs.~~
|
||||
|
||||
To do:
|
||||
* ~~available GPIOs: 12 & 13 (currently in use for html switching)~~
|
||||
|
||||
* Implementation of a software module for external light source (e.g. WS8132 LED controller, ...)
|
||||
* Update of the camera module to use the external light instead of the internal flash light
|
||||
* Adopt the configuration algorithm with a configurable light source
|
||||
~~To do:~~
|
||||
|
||||
* ~~Implementation of a software module for external light source (e.g. WS8132 LED controller, ...)~~
|
||||
* ~~Update of the camera module to use the external light instead of the internal flash light~~
|
||||
* ~~Adopt the configuration algorithm with a configurable light source~~
|
||||
37
README.md
37
README.md
@@ -12,6 +12,8 @@ respectively ESP32-Cam housing only: https://www.thingiverse.com/thing:4571627
|
||||
|
||||
<img src="https://raw.githubusercontent.com/jomjol/AI-on-the-edge-device/master/images/watermeter.jpg" width="600">
|
||||
|
||||
<img src="https://raw.githubusercontent.com/jomjol/AI-on-the-edge-device/master/images/powermeter.jpg" width="600">
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -45,6 +47,41 @@ In other cases you can contact the developer via email: <img src="https://raw.gi
|
||||
|
||||
|
||||
|
||||
##### 8.1.0 - Multi Meter Support (2021-08-12)
|
||||
|
||||
* GPIO: using the general mqtt main topic for GPIO
|
||||
|
||||
* Upgrade digital CNN to v12.0.0 (added new images)
|
||||
* Update tfmicro to new master (2021-08-07)
|
||||
* Bug fix: remove text in mqtt value, remove connect limit in wlan reconnet
|
||||
|
||||
##### 8.0.5 - Multi Meter Support (2021-08-01)
|
||||
|
||||
* NEW 8.0.5: bug fix: saving prevalue
|
||||
* NEW 8.0.4: bug fix: load config.ini after upgrade
|
||||
* NEW 8.0.3: bug fix: reboot during `config.ini` handling, html error
|
||||
* NEW 8.0.2: saving roundes prevalue, bug fix html server
|
||||
* NEW 8.0.1: bug fix: html handling of parameter `FixedExposure` and `ImageSize`
|
||||
* Dual / multi meter support (more than 1 number to be recognized)
|
||||
This is implemented with the feature "number" on the ROI definition as well as selected options
|
||||
* MQTT: standardization of the naming - including new topics (`json`, `freeMem `, `uptime`)c
|
||||
* Preparation for extended GPIO support (thanks to Zwerk2k) - not tested and fully functional yet
|
||||
* Bug fixing: html server, memory leak, MQTT connect, hostname, turn of flash LED
|
||||
|
||||
<span style="color: red;">**ATTENTION: the configuration and prevalue files are modified automatically and will not be backward compatible!**</span>
|
||||
|
||||
|
||||
|
||||
##### 7.1.2 MQTT-Update - (2021-06-17)
|
||||
|
||||
* NEW: 7.1.2: bug fix setting hostname, Flash-LED not off during rebootNEW: 7.1.1: bug fix wlan password with "=" (again)
|
||||
* MQTT error message: changes "no error", send retain flag
|
||||
* Update wlan handling to esp-idf 4.1
|
||||
* Upgrade digital CNN to v8.7.0 (added new images)
|
||||
* Bug fix: MQTT, WLAN, LED-Controll, GPIO usage, fixed IP, calculation flow rate
|
||||
|
||||
|
||||
|
||||
##### 7.0.1 MQTT-Update - (2021-05-13)
|
||||
|
||||
* NEW: 7.0.1: bug fix wlan password with "="
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
copy "C:\Users\Muell\Documents\Programmieren\GitHub\AI-on-the-edge-device\code\.pio\build\esp32cam\firmware.bin" "C:\Users\Muell\Documents\Programmieren\GitHub\AI-on-the-edge-device\firmware\firmware.bin"
|
||||
copy "C:\Users\Muell\Documents\Programmieren\GitHub\AI-on-the-edge-device\code\.pio\build\esp32cam\bootloader.bin" "C:\Users\Muell\Documents\Programmieren\GitHub\AI-on-the-edge-device\firmware\bootloader.bin"
|
||||
copy "C:\Users\Muell\Documents\Programmieren\GitHub\AI-on-the-edge-device\code\.pio\build\esp32cam\partitions.bin" "C:\Users\Muell\Documents\Programmieren\GitHub\AI-on-the-edge-device\firmware\partitions.bin"
|
||||
copy "..\..\code\.pio\build\esp32cam\firmware.bin" "..\..\firmware\firmware.bin"
|
||||
copy "..\..\code\.pio\build\esp32cam\bootloader.bin" "..\..\firmware\bootloader.bin"
|
||||
copy "..\..\code\.pio\build\esp32cam\partitions.bin" "..\..\firmware\partitions.bin"
|
||||
@@ -1 +1 @@
|
||||
powershell Compress-Archive "C:\Users\Muell\Documents\Programmieren\GitHub\AI-on-the-edge-device\sd-card\html\*.*" "C:\Users\Muell\Documents\Programmieren\GitHub\AI-on-the-edge-device\firmware\html.zip"
|
||||
powershell Compress-Archive "..\..\sd-card\html\*.*" "..\..\firmware\html.zip"
|
||||
@@ -1,492 +0,0 @@
|
||||
#include "connect_wlan.h"
|
||||
|
||||
|
||||
|
||||
#include <string.h>
|
||||
#include "esp_wifi.h"
|
||||
#include "esp_event_loop.h"
|
||||
#include "freertos/event_groups.h"
|
||||
#include "esp_log.h"
|
||||
|
||||
#include <fstream>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include <sstream>
|
||||
#include <iostream>
|
||||
|
||||
#include <arpa/inet.h>
|
||||
|
||||
#include "Helper.h"
|
||||
|
||||
|
||||
|
||||
|
||||
static const char *MAIN_TAG = "connect_wlan";
|
||||
|
||||
std::string ssid = "";
|
||||
std::string passphrase = "";
|
||||
std::string hostname = "";
|
||||
std::string ipaddress = "";
|
||||
std::string gw = "";
|
||||
std::string netmask = "";
|
||||
std::string dns = "";
|
||||
std::string std_hostname = "watermeter";
|
||||
|
||||
static EventGroupHandle_t wifi_event_group;
|
||||
|
||||
|
||||
#define BLINK_GPIO GPIO_NUM_33
|
||||
|
||||
|
||||
std::vector<string> ZerlegeZeile(std::string input, std::string _delimiter = "")
|
||||
{
|
||||
std::vector<string> Output;
|
||||
std::string delimiter = " =,";
|
||||
if (_delimiter.length() > 0){
|
||||
delimiter = _delimiter;
|
||||
}
|
||||
|
||||
input = trim(input, delimiter);
|
||||
size_t pos = findDelimiterPos(input, delimiter);
|
||||
std::string token;
|
||||
while (pos != std::string::npos) {
|
||||
token = input.substr(0, pos);
|
||||
token = trim(token, delimiter);
|
||||
Output.push_back(token);
|
||||
input.erase(0, pos + 1);
|
||||
input = trim(input, delimiter);
|
||||
pos = findDelimiterPos(input, delimiter);
|
||||
}
|
||||
Output.push_back(input);
|
||||
|
||||
return Output;
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
void wifi_connect(){
|
||||
wifi_config_t cfg = { };
|
||||
strcpy((char*)cfg.sta.ssid, (const char*)ssid.c_str());
|
||||
strcpy((char*)cfg.sta.password, (const char*)passphrase.c_str());
|
||||
|
||||
ESP_ERROR_CHECK( esp_wifi_disconnect() );
|
||||
ESP_ERROR_CHECK( esp_wifi_set_config(ESP_IF_WIFI_STA, &cfg) );
|
||||
ESP_ERROR_CHECK( esp_wifi_connect() );
|
||||
}
|
||||
|
||||
|
||||
|
||||
void blinkstatus(int dauer, int _anzahl)
|
||||
{
|
||||
gpio_reset_pin(BLINK_GPIO);
|
||||
gpio_set_direction(BLINK_GPIO, GPIO_MODE_OUTPUT);
|
||||
for (int i = 0; i < _anzahl; ++i)
|
||||
{
|
||||
gpio_set_level(BLINK_GPIO, 0);
|
||||
vTaskDelay(dauer / portTICK_PERIOD_MS);
|
||||
gpio_set_level(BLINK_GPIO, 1);
|
||||
vTaskDelay(dauer / portTICK_PERIOD_MS);
|
||||
}
|
||||
}
|
||||
|
||||
static esp_err_t event_handler(void *ctx, system_event_t *event)
|
||||
{
|
||||
switch(event->event_id) {
|
||||
case SYSTEM_EVENT_STA_START:
|
||||
blinkstatus(200, 1);
|
||||
wifi_connect();
|
||||
break;
|
||||
case SYSTEM_EVENT_STA_GOT_IP:
|
||||
xEventGroupSetBits(wifi_event_group, CONNECTED_BIT);
|
||||
blinkstatus(1000, 3);
|
||||
break;
|
||||
case SYSTEM_EVENT_STA_DISCONNECTED:
|
||||
blinkstatus(200, 5);
|
||||
esp_wifi_connect();
|
||||
xEventGroupClearBits(wifi_event_group, CONNECTED_BIT);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return ESP_OK;
|
||||
}
|
||||
|
||||
void initialise_wifi()
|
||||
{
|
||||
ESP_ERROR_CHECK(esp_event_loop_init(event_handler, NULL) );
|
||||
wifi_event_group = xEventGroupCreate();
|
||||
|
||||
esp_log_level_set("wifi", ESP_LOG_NONE); // disable wifi driver logging
|
||||
tcpip_adapter_init();
|
||||
wifi_init_config_t cfg = WIFI_INIT_CONFIG_DEFAULT();
|
||||
ESP_ERROR_CHECK( esp_wifi_init(&cfg) );
|
||||
ESP_ERROR_CHECK( esp_wifi_set_mode(WIFI_MODE_STA) );
|
||||
ESP_ERROR_CHECK( esp_wifi_start() );
|
||||
esp_err_t ret = tcpip_adapter_set_hostname(TCPIP_ADAPTER_IF_STA , hostname.c_str());
|
||||
if(ret != ESP_OK ){
|
||||
ESP_LOGE(MAIN_TAG,"failed to set hostname:%d",ret);
|
||||
}
|
||||
xEventGroupWaitBits(wifi_event_group,CONNECTED_BIT,true,true,portMAX_DELAY);
|
||||
tcpip_adapter_ip_info_t ip_info;
|
||||
ESP_ERROR_CHECK(tcpip_adapter_get_ip_info(TCPIP_ADAPTER_IF_STA, &ip_info));
|
||||
ipaddress = std::string(ip4addr_ntoa(&ip_info.ip));
|
||||
netmask = std::string(ip4addr_ntoa(&ip_info.netmask));
|
||||
gw = std::string(ip4addr_ntoa(&ip_info.gw));
|
||||
printf("IPv4 : %s\n", ip4addr_ntoa(&ip_info.ip));
|
||||
printf("HostName : %s\n", hostname.c_str());
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////////
|
||||
///////////////////////////////////////////////////////////
|
||||
|
||||
void strinttoip4(std::string ip, int &a, int &b, int &c, int &d) {
|
||||
std::stringstream s(ip);
|
||||
char ch; //to temporarily store the '.'
|
||||
s >> a >> ch >> b >> ch >> c >> ch >> d;
|
||||
}
|
||||
|
||||
void initialise_wifi_fixed_ip()
|
||||
{
|
||||
|
||||
wifi_event_group = xEventGroupCreate();
|
||||
|
||||
ESP_ERROR_CHECK(esp_netif_init());
|
||||
ESP_ERROR_CHECK(esp_event_loop_create_default());
|
||||
|
||||
esp_netif_t *my_sta = esp_netif_create_default_wifi_sta();
|
||||
|
||||
esp_netif_dhcpc_stop(my_sta);
|
||||
|
||||
esp_netif_ip_info_t ip_info;
|
||||
|
||||
int a, b, c, d;
|
||||
|
||||
strinttoip4(ipaddress, a, b, c, d);
|
||||
IP4_ADDR(&ip_info.ip, a, b, c, d);
|
||||
|
||||
strinttoip4(gw, a, b, c, d);
|
||||
IP4_ADDR(&ip_info.gw, a, b, c, d);
|
||||
|
||||
strinttoip4(netmask, a, b, c, d);
|
||||
IP4_ADDR(&ip_info.netmask, a, b, c, d);
|
||||
|
||||
esp_netif_set_ip_info(my_sta, &ip_info);
|
||||
|
||||
wifi_init_config_t cfg = WIFI_INIT_CONFIG_DEFAULT();
|
||||
ESP_ERROR_CHECK(esp_wifi_init(&cfg));
|
||||
|
||||
if (dns.length() > 0) {
|
||||
esp_netif_dns_info_t dns_info;
|
||||
ip4_addr_t ip;
|
||||
ip.addr = esp_ip4addr_aton(dns.c_str());
|
||||
ip_addr_set_ip4_u32(&dns_info.ip, ip.addr);
|
||||
ESP_ERROR_CHECK(esp_netif_set_dns_info(my_sta, ESP_NETIF_DNS_MAIN, &dns_info));
|
||||
}
|
||||
|
||||
ESP_ERROR_CHECK(esp_event_loop_init(event_handler, NULL) );
|
||||
|
||||
wifi_config_t wifi_config = { };
|
||||
strcpy((char*)wifi_config.sta.ssid, (const char*)ssid.c_str());
|
||||
strcpy((char*)wifi_config.sta.password, (const char*)passphrase.c_str());
|
||||
|
||||
ESP_ERROR_CHECK(esp_wifi_set_mode(WIFI_MODE_STA) );
|
||||
ESP_ERROR_CHECK(esp_wifi_set_config(ESP_IF_WIFI_STA, &wifi_config) );
|
||||
ESP_ERROR_CHECK(esp_wifi_start() );
|
||||
|
||||
ESP_LOGI(MAIN_TAG, "wifi_init_sta finished.");
|
||||
|
||||
EventBits_t bits = xEventGroupWaitBits(wifi_event_group,CONNECTED_BIT,true,true,portMAX_DELAY);
|
||||
|
||||
if (bits & CONNECTED_BIT) {
|
||||
ESP_LOGI(MAIN_TAG, "connected to ap SSID:%s password:%s",
|
||||
ssid.c_str(), passphrase.c_str());
|
||||
} else {
|
||||
ESP_LOGI(MAIN_TAG, "Failed to connect to SSID:%s, password:%s",
|
||||
ssid.c_str(), passphrase.c_str());
|
||||
}
|
||||
tcpip_adapter_ip_info_t ip_info2;
|
||||
ESP_ERROR_CHECK(tcpip_adapter_get_ip_info(TCPIP_ADAPTER_IF_STA, &ip_info2));
|
||||
ipaddress = std::string(ip4addr_ntoa(&ip_info2.ip));
|
||||
netmask = std::string(ip4addr_ntoa(&ip_info2.netmask));
|
||||
gw = std::string(ip4addr_ntoa(&ip_info2.gw));
|
||||
|
||||
// vEventGroupDelete(wifi_event_group);
|
||||
}
|
||||
|
||||
|
||||
|
||||
void ConnectToWLAN()
|
||||
{
|
||||
if (ipaddress.length() == 0 || gw.length() == 0 || netmask.length() == 0)
|
||||
{
|
||||
printf("Connect to WLAN with dyn. IP\n");
|
||||
initialise_wifi();
|
||||
}
|
||||
else
|
||||
{
|
||||
printf("Connect to WLAN with fixed IP\n");
|
||||
initialise_wifi_fixed_ip();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
bool ChangeHostName(std::string fn, std::string _newhostname)
|
||||
{
|
||||
if (_newhostname == hostname)
|
||||
return false;
|
||||
|
||||
string line = "";
|
||||
std::vector<string> zerlegt;
|
||||
|
||||
bool found = false;
|
||||
|
||||
std::vector<string> neuesfile;
|
||||
|
||||
FILE* pFile;
|
||||
fn = FormatFileName(fn);
|
||||
pFile = OpenFileAndWait(fn.c_str(), "r");
|
||||
|
||||
printf("file loaded\n");
|
||||
|
||||
if (pFile == NULL)
|
||||
return false;
|
||||
|
||||
char zw[1024];
|
||||
fgets(zw, 1024, pFile);
|
||||
line = std::string(zw);
|
||||
|
||||
while ((line.size() > 0) || !(feof(pFile)))
|
||||
{
|
||||
printf("%s", line.c_str());
|
||||
zerlegt = ZerlegeZeile(line, "=");
|
||||
zerlegt[0] = trim(zerlegt[0], " ");
|
||||
|
||||
if ((zerlegt.size() > 1) && (toUpper(zerlegt[0]) == "HOSTNAME")){
|
||||
line = "hostname = \"" + _newhostname + "\"\n";
|
||||
found = true;
|
||||
}
|
||||
|
||||
neuesfile.push_back(line);
|
||||
|
||||
if (fgets(zw, 1024, pFile) == NULL)
|
||||
{
|
||||
line = "";
|
||||
}
|
||||
else
|
||||
{
|
||||
line = std::string(zw);
|
||||
}
|
||||
}
|
||||
|
||||
if (!found)
|
||||
{
|
||||
line = "\nhostname = \"" + _newhostname + "\"\n";
|
||||
neuesfile.push_back(line);
|
||||
}
|
||||
|
||||
fclose(pFile);
|
||||
|
||||
pFile = OpenFileAndWait(fn.c_str(), "w+");
|
||||
|
||||
for (int i = 0; i < neuesfile.size(); ++i)
|
||||
{
|
||||
printf(neuesfile[i].c_str());
|
||||
fputs(neuesfile[i].c_str(), pFile);
|
||||
}
|
||||
|
||||
fclose(pFile);
|
||||
|
||||
printf("*** Update hostname done ***\n");
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
void LoadWlanFromFile(std::string fn)
|
||||
{
|
||||
string line = "";
|
||||
std::vector<string> zerlegt;
|
||||
hostname = std_hostname;
|
||||
|
||||
FILE* pFile;
|
||||
fn = FormatFileName(fn);
|
||||
|
||||
pFile = OpenFileAndWait(fn.c_str(), "r");
|
||||
printf("file loaded\n");
|
||||
|
||||
if (pFile == NULL)
|
||||
return;
|
||||
|
||||
char zw[1024];
|
||||
fgets(zw, 1024, pFile);
|
||||
line = std::string(zw);
|
||||
|
||||
while ((line.size() > 0) || !(feof(pFile)))
|
||||
{
|
||||
printf("%s", line.c_str());
|
||||
zerlegt = ZerlegeZeile(line, "=");
|
||||
zerlegt[0] = trim(zerlegt[0], " ");
|
||||
for (int i = 2; i < zerlegt.size(); ++i)
|
||||
zerlegt[1] = zerlegt[1] + "=" + zerlegt[i];
|
||||
|
||||
if ((zerlegt.size() > 1) && (toUpper(zerlegt[0]) == "HOSTNAME")){
|
||||
hostname = trim(zerlegt[1]);
|
||||
if ((hostname[0] == '"') && (hostname[hostname.length()-1] == '"')){
|
||||
hostname = hostname.substr(1, hostname.length()-2);
|
||||
}
|
||||
}
|
||||
|
||||
if ((zerlegt.size() > 1) && (toUpper(zerlegt[0]) == "SSID")){
|
||||
ssid = trim(zerlegt[1]);
|
||||
if ((ssid[0] == '"') && (ssid[ssid.length()-1] == '"')){
|
||||
ssid = ssid.substr(1, ssid.length()-2);
|
||||
}
|
||||
}
|
||||
|
||||
if ((zerlegt.size() > 1) && (toUpper(zerlegt[0]) == "PASSWORD")){
|
||||
passphrase = zerlegt[1];
|
||||
if ((passphrase[0] == '"') && (passphrase[passphrase.length()-1] == '"')){
|
||||
passphrase = passphrase.substr(1, passphrase.length()-2);
|
||||
}
|
||||
}
|
||||
|
||||
if ((zerlegt.size() > 1) && (toUpper(zerlegt[0]) == "IP")){
|
||||
ipaddress = zerlegt[1];
|
||||
if ((ipaddress[0] == '"') && (ipaddress[ipaddress.length()-1] == '"')){
|
||||
ipaddress = ipaddress.substr(1, ipaddress.length()-2);
|
||||
}
|
||||
}
|
||||
|
||||
if ((zerlegt.size() > 1) && (toUpper(zerlegt[0]) == "GATEWAY")){
|
||||
gw = zerlegt[1];
|
||||
if ((gw[0] == '"') && (gw[gw.length()-1] == '"')){
|
||||
gw = gw.substr(1, gw.length()-2);
|
||||
}
|
||||
}
|
||||
|
||||
if ((zerlegt.size() > 1) && (toUpper(zerlegt[0]) == "NETMASK")){
|
||||
netmask = zerlegt[1];
|
||||
if ((netmask[0] == '"') && (netmask[netmask.length()-1] == '"')){
|
||||
netmask = netmask.substr(1, netmask.length()-2);
|
||||
}
|
||||
}
|
||||
|
||||
if ((zerlegt.size() > 1) && (toUpper(zerlegt[0]) == "DNS")){
|
||||
dns = zerlegt[1];
|
||||
if ((dns[0] == '"') && (dns[dns.length()-1] == '"')){
|
||||
dns = dns.substr(1, dns.length()-2);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
if (fgets(zw, 1024, pFile) == NULL)
|
||||
{
|
||||
line = "";
|
||||
}
|
||||
else
|
||||
{
|
||||
line = std::string(zw);
|
||||
}
|
||||
}
|
||||
|
||||
fclose(pFile);
|
||||
|
||||
// Check if Hostname was empty in .ini if yes set to std_hostname
|
||||
if(hostname.length() <= 0){
|
||||
hostname = std_hostname;
|
||||
}
|
||||
|
||||
printf("\nWLan: %s, %s\n", ssid.c_str(), passphrase.c_str());
|
||||
printf("Hostename: %s\n", hostname.c_str());
|
||||
printf("Fixed IP: %s, Gateway %s, Netmask %s, DNS %s\n", ipaddress.c_str(), gw.c_str(), netmask.c_str(), dns.c_str());
|
||||
|
||||
}
|
||||
|
||||
void LoadNetConfigFromFile(std::string fn, std::string &_ip, std::string &_gw, std::string &_netmask, std::string &_dns)
|
||||
{
|
||||
string line = "";
|
||||
std::vector<string> zerlegt;
|
||||
|
||||
FILE* pFile;
|
||||
fn = FormatFileName(fn);
|
||||
pFile = OpenFileAndWait(fn.c_str(), "r");
|
||||
|
||||
printf("file loaded\n");
|
||||
|
||||
if (pFile == NULL)
|
||||
return;
|
||||
|
||||
char zw[1024];
|
||||
fgets(zw, 1024, pFile);
|
||||
line = std::string(zw);
|
||||
|
||||
while ((line.size() > 0) || !(feof(pFile)))
|
||||
{
|
||||
printf("%s", line.c_str());
|
||||
zerlegt = ZerlegeZeile(line, "=");
|
||||
zerlegt[0] = trim(zerlegt[0], " ");
|
||||
|
||||
if ((zerlegt.size() > 1) && (toUpper(zerlegt[0]) == "IP")){
|
||||
_ip = zerlegt[1];
|
||||
if ((_ip[0] == '"') && (_ip[_ip.length()-1] == '"')){
|
||||
_ip = _ip.substr(1, _ip.length()-2);
|
||||
}
|
||||
}
|
||||
|
||||
if ((zerlegt.size() > 1) && (toUpper(zerlegt[0]) == "GATEWAY")){
|
||||
_gw = zerlegt[1];
|
||||
if ((_gw[0] == '"') && (_gw[_gw.length()-1] == '"')){
|
||||
_gw = _gw.substr(1, _gw.length()-2);
|
||||
}
|
||||
}
|
||||
|
||||
if ((zerlegt.size() > 1) && (toUpper(zerlegt[0]) == "NETMASK")){
|
||||
_netmask = zerlegt[1];
|
||||
if ((_netmask[0] == '"') && (_netmask[_netmask.length()-1] == '"')){
|
||||
_netmask = _netmask.substr(1, _netmask.length()-2);
|
||||
}
|
||||
}
|
||||
|
||||
if ((zerlegt.size() > 1) && (toUpper(zerlegt[0]) == "DNS")){
|
||||
_dns = zerlegt[1];
|
||||
if ((_dns[0] == '"') && (_dns[_dns.length()-1] == '"')){
|
||||
_dns = _dns.substr(1, _dns.length()-2);
|
||||
}
|
||||
}
|
||||
|
||||
if (fgets(zw, 1024, pFile) == NULL)
|
||||
{
|
||||
line = "";
|
||||
}
|
||||
else
|
||||
{
|
||||
line = std::string(zw);
|
||||
}
|
||||
}
|
||||
|
||||
fclose(pFile);
|
||||
}
|
||||
|
||||
|
||||
std::string getHostname(){
|
||||
return hostname;
|
||||
}
|
||||
|
||||
std::string getIPAddress(){
|
||||
return ipaddress;
|
||||
}
|
||||
|
||||
std::string getSSID(){
|
||||
return ssid;
|
||||
}
|
||||
|
||||
std::string getNetMask(){
|
||||
return netmask;
|
||||
}
|
||||
|
||||
std::string getGW(){
|
||||
return gw;
|
||||
}
|
||||
|
||||
@@ -1,20 +0,0 @@
|
||||
#ifndef CONNECT_WLAN_H
|
||||
#define CONNECT_WLAN_H
|
||||
|
||||
#include <string>
|
||||
#include "driver/gpio.h"
|
||||
|
||||
const int CONNECTED_BIT = BIT0;
|
||||
void ConnectToWLAN();
|
||||
|
||||
void LoadWlanFromFile(std::string fn);
|
||||
|
||||
bool ChangeHostName(std::string fn, std::string _newhostname);
|
||||
|
||||
std::string getHostname();
|
||||
std::string getIPAddress();
|
||||
std::string getSSID();
|
||||
std::string getNetMask();
|
||||
std::string getGW();
|
||||
|
||||
#endif
|
||||
@@ -29,7 +29,7 @@
|
||||
|
||||
// ================================ CODE ======================================
|
||||
|
||||
#include <esp_event_loop.h>
|
||||
#include <esp_event.h>
|
||||
#include <esp_log.h>
|
||||
#include <esp_system.h>
|
||||
#include <nvs_flash.h>
|
||||
|
||||
@@ -2,6 +2,6 @@ FILE(GLOB_RECURSE app_sources ${CMAKE_CURRENT_SOURCE_DIR}/*.*)
|
||||
|
||||
idf_component_register(SRCS ${app_sources}
|
||||
INCLUDE_DIRS "."
|
||||
REQUIRES jomjol_helper)
|
||||
REQUIRES jomjol_logfile)
|
||||
|
||||
|
||||
105
code/components/jomjol_configfile/configFile.cpp
Normal file
105
code/components/jomjol_configfile/configFile.cpp
Normal file
@@ -0,0 +1,105 @@
|
||||
#include <string.h>
|
||||
#include "freertos/FreeRTOS.h"
|
||||
#include "freertos/task.h"
|
||||
|
||||
#include "Helper.h"
|
||||
#include "configFile.h"
|
||||
|
||||
//static const char *TAGCONFIGFILE = "configFile";
|
||||
|
||||
ConfigFile::ConfigFile(std::string filePath)
|
||||
{
|
||||
std::string config = FormatFileName(filePath);
|
||||
pFile = OpenFileAndWait(config.c_str(), "r");
|
||||
}
|
||||
|
||||
ConfigFile::~ConfigFile()
|
||||
{
|
||||
fclose(pFile);
|
||||
}
|
||||
|
||||
bool ConfigFile::isNewParagraph(std::string input)
|
||||
{
|
||||
if ((input[0] == '[') || ((input[0] == ';') && (input[1] == '[')))
|
||||
{
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool ConfigFile::GetNextParagraph(std::string& aktparamgraph, bool &disabled, bool &eof)
|
||||
{
|
||||
while (getNextLine(&aktparamgraph, disabled, eof) && !isNewParagraph(aktparamgraph));
|
||||
|
||||
if (isNewParagraph(aktparamgraph))
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
bool ConfigFile::getNextLine(std::string *rt, bool &disabled, bool &eof)
|
||||
{
|
||||
eof = false;
|
||||
char zw[1024] = "";
|
||||
if (pFile == NULL)
|
||||
{
|
||||
*rt = "";
|
||||
return false;
|
||||
}
|
||||
|
||||
if (fgets(zw, 1024, pFile))
|
||||
{
|
||||
printf("%s", zw);
|
||||
if ((strlen(zw) == 0) && feof(pFile))
|
||||
{
|
||||
*rt = "";
|
||||
eof = true;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
*rt = "";
|
||||
eof = true;
|
||||
return false;
|
||||
}
|
||||
*rt = zw;
|
||||
*rt = trim(*rt);
|
||||
while ((zw[0] == ';' || zw[0] == '#' || (rt->size() == 0)) && !(zw[1] == '[')) // Kommentarzeilen (; oder #) und Leerzeilen überspringen, es sei denn es ist ein neuer auskommentierter Paragraph
|
||||
{
|
||||
fgets(zw, 1024, pFile);
|
||||
printf("%s", zw);
|
||||
if (feof(pFile))
|
||||
{
|
||||
*rt = "";
|
||||
eof = true;
|
||||
return false;
|
||||
}
|
||||
*rt = zw;
|
||||
*rt = trim(*rt);
|
||||
}
|
||||
|
||||
disabled = ((*rt)[0] == ';');
|
||||
return true;
|
||||
}
|
||||
|
||||
std::vector<string> ConfigFile::ZerlegeZeile(std::string input, std::string delimiter)
|
||||
{
|
||||
std::vector<string> Output;
|
||||
// std::string delimiter = " =,";
|
||||
|
||||
input = trim(input, delimiter);
|
||||
size_t pos = findDelimiterPos(input, delimiter);
|
||||
std::string token;
|
||||
while (pos != std::string::npos) {
|
||||
token = input.substr(0, pos);
|
||||
token = trim(token, delimiter);
|
||||
Output.push_back(token);
|
||||
input.erase(0, pos + 1);
|
||||
input = trim(input, delimiter);
|
||||
pos = findDelimiterPos(input, delimiter);
|
||||
}
|
||||
Output.push_back(input);
|
||||
|
||||
return Output;
|
||||
|
||||
}
|
||||
16
code/components/jomjol_configfile/configFile.h
Normal file
16
code/components/jomjol_configfile/configFile.h
Normal file
@@ -0,0 +1,16 @@
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
class ConfigFile {
|
||||
public:
|
||||
ConfigFile(std::string filePath);
|
||||
~ConfigFile();
|
||||
|
||||
bool isNewParagraph(std::string input);
|
||||
bool GetNextParagraph(std::string& aktparamgraph, bool &disabled, bool &eof);
|
||||
bool getNextLine(std::string* rt, bool &disabled, bool &eof);
|
||||
std::vector<std::string> ZerlegeZeile(std::string input, std::string delimiter = " =, \t");
|
||||
|
||||
private:
|
||||
FILE* pFile;
|
||||
};
|
||||
@@ -3,7 +3,7 @@ FILE(GLOB_RECURSE app_sources ${CMAKE_CURRENT_SOURCE_DIR}/*.*)
|
||||
list(APPEND EXTRA_COMPONENT_DIRS $ENV{IDF_PATH}/examples/common_components/protocol_examples_common)
|
||||
|
||||
idf_component_register(SRCS ${app_sources}
|
||||
INCLUDE_DIRS "."
|
||||
REQUIRES esp_http_server jomjol_logfile)
|
||||
INCLUDE_DIRS "." "../../include"
|
||||
REQUIRES esp_http_server jomjol_logfile jomjol_configfile jomjol_mqtt jomjol_flowcontroll)
|
||||
|
||||
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
#include <string>
|
||||
#include <functional>
|
||||
#include "string.h"
|
||||
|
||||
#include <string.h>
|
||||
@@ -6,22 +7,395 @@
|
||||
#include "freertos/task.h"
|
||||
#include "esp_system.h"
|
||||
#include "esp_event.h"
|
||||
|
||||
#include "server_tflite.h"
|
||||
|
||||
//#define LOG_LOCAL_LEVEL ESP_LOG_DEBUG
|
||||
#include "esp_log.h"
|
||||
#include "driver/gpio.h"
|
||||
//#include "errno.h"
|
||||
|
||||
#include <sys/stat.h>
|
||||
#include <vector>
|
||||
//#include <regex>
|
||||
|
||||
#include "defines.h"
|
||||
|
||||
#include "server_GPIO.h"
|
||||
|
||||
#include "ClassLogFile.h"
|
||||
|
||||
#include "configFile.h"
|
||||
#include "Helper.h"
|
||||
#include "interface_mqtt.h"
|
||||
|
||||
// #define DEBUG_DETAIL_ON
|
||||
static const char *TAG_SERVERGPIO = "server_GPIO";
|
||||
QueueHandle_t gpio_queue_handle = NULL;
|
||||
|
||||
esp_err_t handler_switch_GPIO(httpd_req_t *req)
|
||||
#define DEBUG_DETAIL_ON
|
||||
|
||||
GpioPin::GpioPin(gpio_num_t gpio, const char* name, gpio_pin_mode_t mode, gpio_int_type_t interruptType, uint8_t dutyResolution, std::string mqttTopic, bool httpEnable)
|
||||
{
|
||||
_gpio = gpio;
|
||||
_name = name;
|
||||
_mode = mode;
|
||||
_interruptType = interruptType;
|
||||
_mqttTopic = mqttTopic;
|
||||
}
|
||||
|
||||
GpioPin::~GpioPin()
|
||||
{
|
||||
ESP_LOGD(TAG_SERVERGPIO,"reset GPIO pin %d", _gpio);
|
||||
if (_interruptType != GPIO_INTR_DISABLE) {
|
||||
//hook isr handler for specific gpio pin
|
||||
gpio_isr_handler_remove(_gpio);
|
||||
}
|
||||
gpio_reset_pin(_gpio);
|
||||
}
|
||||
|
||||
static void IRAM_ATTR gpio_isr_handler(void* arg)
|
||||
{
|
||||
GpioResult gpioResult;
|
||||
gpioResult.gpio = *(gpio_num_t*) arg;
|
||||
gpioResult.value = gpio_get_level(gpioResult.gpio);
|
||||
BaseType_t ContextSwitchRequest = pdFALSE;
|
||||
|
||||
xQueueSendToBackFromISR(gpio_queue_handle,(void*)&gpioResult,&ContextSwitchRequest);
|
||||
|
||||
if(ContextSwitchRequest){
|
||||
taskYIELD();
|
||||
}
|
||||
}
|
||||
|
||||
static void gpioHandlerTask(void *arg) {
|
||||
ESP_LOGD(TAG_SERVERGPIO,"start interrupt task");
|
||||
while(1){
|
||||
if(uxQueueMessagesWaiting(gpio_queue_handle)){
|
||||
while(uxQueueMessagesWaiting(gpio_queue_handle)){
|
||||
GpioResult gpioResult;
|
||||
xQueueReceive(gpio_queue_handle,(void*)&gpioResult,10);
|
||||
ESP_LOGD(TAG_SERVERGPIO,"gpio: %d state: %d", gpioResult.gpio, gpioResult.value);
|
||||
((GpioHandler*)arg)->gpioInterrupt(&gpioResult);
|
||||
}
|
||||
}
|
||||
|
||||
((GpioHandler*)arg)->taskHandler();
|
||||
vTaskDelay(pdMS_TO_TICKS(1000));
|
||||
}
|
||||
}
|
||||
|
||||
void GpioPin::gpioInterrupt(int value) {
|
||||
if (_mqttTopic != "") {
|
||||
ESP_LOGD(TAG_SERVERGPIO, "gpioInterrupt %s %d", _mqttTopic.c_str(), value);
|
||||
|
||||
MQTTPublish(_mqttTopic, value ? "true" : "false");
|
||||
currentState = value;
|
||||
}
|
||||
}
|
||||
|
||||
void GpioPin::init()
|
||||
{
|
||||
gpio_config_t io_conf;
|
||||
//set interrupt
|
||||
io_conf.intr_type = _interruptType;
|
||||
//set as output mode
|
||||
io_conf.mode = (_mode == GPIO_PIN_MODE_OUTPUT) || (_mode == GPIO_PIN_MODE_BUILT_IN_FLASH_LED) ? gpio_mode_t::GPIO_MODE_OUTPUT : gpio_mode_t::GPIO_MODE_INPUT;
|
||||
//bit mask of the pins that you want to set,e.g.GPIO18/19
|
||||
io_conf.pin_bit_mask = (1ULL << _gpio);
|
||||
//set pull-down mode
|
||||
io_conf.pull_down_en = _mode == GPIO_PIN_MODE_INPUT_PULLDOWN ? gpio_pulldown_t::GPIO_PULLDOWN_ENABLE : gpio_pulldown_t::GPIO_PULLDOWN_DISABLE;
|
||||
//set pull-up mode
|
||||
io_conf.pull_up_en = _mode == GPIO_PIN_MODE_INPUT_PULLDOWN ? gpio_pullup_t::GPIO_PULLUP_ENABLE : gpio_pullup_t::GPIO_PULLUP_DISABLE;
|
||||
//configure GPIO with the given settings
|
||||
gpio_config(&io_conf);
|
||||
|
||||
if (_interruptType != GPIO_INTR_DISABLE) {
|
||||
//hook isr handler for specific gpio pin
|
||||
ESP_LOGD(TAG_SERVERGPIO, "GpioPin::init add isr handler for GPIO %d\r\n", _gpio);
|
||||
gpio_isr_handler_add(_gpio, gpio_isr_handler, (void*)&_gpio);
|
||||
}
|
||||
|
||||
if ((_mqttTopic != "") && ((_mode == GPIO_PIN_MODE_OUTPUT) || (_mode == GPIO_PIN_MODE_OUTPUT_PWM) || (_mode == GPIO_PIN_MODE_BUILT_IN_FLASH_LED))) {
|
||||
std::function<bool(std::string, char*, int)> f = std::bind(&GpioPin::handleMQTT, this, std::placeholders::_1, std::placeholders::_2, std::placeholders::_3);
|
||||
MQTTregisterSubscribeFunction(_mqttTopic, f);
|
||||
}
|
||||
}
|
||||
|
||||
bool GpioPin::getValue(std::string* errorText)
|
||||
{
|
||||
if ((_mode != GPIO_PIN_MODE_INPUT) && (_mode != GPIO_PIN_MODE_INPUT_PULLUP) && (_mode != GPIO_PIN_MODE_INPUT_PULLDOWN)) {
|
||||
(*errorText) = "GPIO is not in input mode";
|
||||
}
|
||||
|
||||
return gpio_get_level(_gpio) == 1;
|
||||
}
|
||||
|
||||
void GpioPin::setValue(bool value, gpio_set_source setSource, std::string* errorText)
|
||||
{
|
||||
ESP_LOGD(TAG_SERVERGPIO, "GpioPin::setValue %d\r\n", value);
|
||||
|
||||
if ((_mode != GPIO_PIN_MODE_OUTPUT) && (_mode != GPIO_PIN_MODE_OUTPUT_PWM) && (_mode != GPIO_PIN_MODE_BUILT_IN_FLASH_LED)) {
|
||||
(*errorText) = "GPIO is not in output mode";
|
||||
} else {
|
||||
gpio_set_level(_gpio, value);
|
||||
|
||||
if ((_mqttTopic != "") && (setSource != GPIO_SET_SOURCE_MQTT)) {
|
||||
MQTTPublish(_mqttTopic, value ? "true" : "false");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void GpioPin::publishState() {
|
||||
int newState = gpio_get_level(_gpio);
|
||||
if (newState != currentState) {
|
||||
ESP_LOGD(TAG_SERVERGPIO,"publish state of GPIO %d new state %d", _gpio, newState);
|
||||
MQTTPublish(_mqttTopic, newState ? "true" : "false");
|
||||
currentState = newState;
|
||||
}
|
||||
}
|
||||
|
||||
bool GpioPin::handleMQTT(std::string, char* data, int data_len) {
|
||||
ESP_LOGD(TAG_SERVERGPIO, "GpioPin::handleMQTT data %.*s\r\n", data_len, data);
|
||||
|
||||
std::string dataStr(data, data_len);
|
||||
dataStr = toLower(dataStr);
|
||||
std::string errorText = "";
|
||||
if ((dataStr == "true") || (dataStr == "1")) {
|
||||
setValue(true, GPIO_SET_SOURCE_MQTT, &errorText);
|
||||
} else if ((dataStr == "false") || (dataStr == "0")) {
|
||||
setValue(false, GPIO_SET_SOURCE_MQTT, &errorText);
|
||||
} else {
|
||||
errorText = "wrong value ";
|
||||
errorText.append(data, data_len);
|
||||
}
|
||||
|
||||
if (errorText != "") {
|
||||
ESP_LOGE(TAG_SERVERGPIO, "%s", errorText.c_str());
|
||||
}
|
||||
|
||||
return (errorText == "");
|
||||
}
|
||||
|
||||
|
||||
esp_err_t callHandleHttpRequest(httpd_req_t *req)
|
||||
{
|
||||
ESP_LOGD(TAG_SERVERGPIO,"callHandleHttpRequest");
|
||||
|
||||
GpioHandler *gpioHandler = (GpioHandler*)req->user_ctx;
|
||||
return gpioHandler->handleHttpRequest(req);
|
||||
}
|
||||
|
||||
void taskGpioHandler(void *pvParameter)
|
||||
{
|
||||
ESP_LOGD(TAG_SERVERGPIO,"taskGpioHandler");
|
||||
((GpioHandler*)pvParameter)->init();
|
||||
}
|
||||
|
||||
GpioHandler::GpioHandler(std::string configFile, httpd_handle_t httpServer)
|
||||
{
|
||||
ESP_LOGI(TAG_SERVERGPIO,"start GpioHandler");
|
||||
_configFile = configFile;
|
||||
_httpServer = httpServer;
|
||||
|
||||
ESP_LOGI(TAG_SERVERGPIO, "register GPIO Uri");
|
||||
registerGpioUri();
|
||||
}
|
||||
|
||||
GpioHandler::~GpioHandler() {
|
||||
if (gpioMap != NULL) {
|
||||
clear();
|
||||
delete gpioMap;
|
||||
}
|
||||
}
|
||||
|
||||
void GpioHandler::init()
|
||||
{
|
||||
// TickType_t xDelay = 60000 / portTICK_PERIOD_MS;
|
||||
// printf("wait before start %ldms\r\n", (long) xDelay);
|
||||
// vTaskDelay( xDelay );
|
||||
|
||||
if (gpioMap == NULL) {
|
||||
gpioMap = new std::map<gpio_num_t, GpioPin*>();
|
||||
} else {
|
||||
clear();
|
||||
}
|
||||
|
||||
|
||||
ESP_LOGI(TAG_SERVERGPIO, "read GPIO config and init GPIO");
|
||||
if (!readConfig()) {
|
||||
clear();
|
||||
delete gpioMap;
|
||||
gpioMap = NULL;
|
||||
ESP_LOGI(TAG_SERVERGPIO, "GPIO init comleted, handler is disabled");
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
for(std::map<gpio_num_t, GpioPin*>::iterator it = gpioMap->begin(); it != gpioMap->end(); ++it) {
|
||||
it->second->init();
|
||||
}
|
||||
|
||||
std::function<void()> f = std::bind(&GpioHandler::handleMQTTconnect, this);
|
||||
MQTTregisterConnectFunction("gpio-handler", f);
|
||||
|
||||
if (xHandleTaskGpio == NULL) {
|
||||
gpio_queue_handle = xQueueCreate(10,sizeof(GpioResult));
|
||||
BaseType_t xReturned = xTaskCreate(&gpioHandlerTask, "gpio_int", configMINIMAL_STACK_SIZE * 8, (void *)this, tskIDLE_PRIORITY + 2, &xHandleTaskGpio);
|
||||
if(xReturned == pdPASS ) {
|
||||
ESP_LOGD(TAG_SERVERGPIO, "xHandletaskGpioHandler started");
|
||||
} else {
|
||||
ESP_LOGD(TAG_SERVERGPIO, "xHandletaskGpioHandler not started %d ", (int)xHandleTaskGpio);
|
||||
}
|
||||
}
|
||||
|
||||
ESP_LOGI(TAG_SERVERGPIO, "GPIO init comleted, is enabled");
|
||||
}
|
||||
|
||||
void GpioHandler::taskHandler() {
|
||||
if (gpioMap != NULL) {
|
||||
for(std::map<gpio_num_t, GpioPin*>::iterator it = gpioMap->begin(); it != gpioMap->end(); ++it) {
|
||||
if ((it->second->getInterruptType() == GPIO_INTR_DISABLE))
|
||||
it->second->publishState();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void GpioHandler::handleMQTTconnect()
|
||||
{
|
||||
if (gpioMap != NULL) {
|
||||
for(std::map<gpio_num_t, GpioPin*>::iterator it = gpioMap->begin(); it != gpioMap->end(); ++it) {
|
||||
if ((it->second->getMode() == GPIO_PIN_MODE_INPUT) || (it->second->getMode() == GPIO_PIN_MODE_INPUT_PULLDOWN) || (it->second->getMode() == GPIO_PIN_MODE_INPUT_PULLUP))
|
||||
it->second->publishState();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void GpioHandler::deinit() {
|
||||
MQTTunregisterConnectFunction("gpio-handler");
|
||||
clear();
|
||||
if (xHandleTaskGpio != NULL) {
|
||||
vTaskDelete(xHandleTaskGpio);
|
||||
xHandleTaskGpio = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
void GpioHandler::gpioInterrupt(GpioResult* gpioResult) {
|
||||
if ((gpioMap != NULL) && (gpioMap->find(gpioResult->gpio) != gpioMap->end())) {
|
||||
(*gpioMap)[gpioResult->gpio]->gpioInterrupt(gpioResult->value);
|
||||
}
|
||||
}
|
||||
|
||||
bool GpioHandler::readConfig()
|
||||
{
|
||||
if (!gpioMap->empty())
|
||||
clear();
|
||||
|
||||
ConfigFile configFile = ConfigFile(_configFile);
|
||||
|
||||
std::vector<std::string> zerlegt;
|
||||
std::string line = "";
|
||||
bool disabledLine = false;
|
||||
bool eof = false;
|
||||
|
||||
while ((!configFile.GetNextParagraph(line, disabledLine, eof) || (line.compare("[GPIO]") != 0)) && !disabledLine && !eof) {}
|
||||
if (eof)
|
||||
return false;
|
||||
|
||||
_isEnabled = !disabledLine;
|
||||
|
||||
if (!_isEnabled)
|
||||
return false;
|
||||
|
||||
// std::string mainTopicMQTT = "";
|
||||
std::string mainTopicMQTT = GetMQTTMainTopic();
|
||||
if (mainTopicMQTT.length() > 0)
|
||||
{
|
||||
mainTopicMQTT = mainTopicMQTT + "/GPIO";
|
||||
ESP_LOGD(TAG_SERVERGPIO, "MAINTOPICMQTT found\r\n");
|
||||
}
|
||||
|
||||
bool registerISR = false;
|
||||
while (configFile.getNextLine(&line, disabledLine, eof) && !configFile.isNewParagraph(line))
|
||||
{
|
||||
zerlegt = configFile.ZerlegeZeile(line);
|
||||
// const std::regex pieces_regex("IO([0-9]{1,2})");
|
||||
// std::smatch pieces_match;
|
||||
// if (std::regex_match(zerlegt[0], pieces_match, pieces_regex) && (pieces_match.size() == 2))
|
||||
// {
|
||||
// std::string gpioStr = pieces_match[1];
|
||||
ESP_LOGD(TAG_SERVERGPIO, "conf param %s\r\n", toUpper(zerlegt[0]).c_str());
|
||||
if (toUpper(zerlegt[0]) == "MAINTOPICMQTT") {
|
||||
// ESP_LOGD(TAG_SERVERGPIO, "MAINTOPICMQTT found\r\n");
|
||||
// mainTopicMQTT = zerlegt[1];
|
||||
} else if ((zerlegt[0].rfind("IO", 0) == 0) && (zerlegt.size() >= 6))
|
||||
{
|
||||
ESP_LOGI(TAG_SERVERGPIO,"Enable GP%s in %s mode", zerlegt[0].c_str(), zerlegt[1].c_str());
|
||||
std::string gpioStr = zerlegt[0].substr(2, 2);
|
||||
gpio_num_t gpioNr = (gpio_num_t)atoi(gpioStr.c_str());
|
||||
gpio_pin_mode_t pinMode = resolvePinMode(toLower(zerlegt[1]));
|
||||
gpio_int_type_t intType = resolveIntType(toLower(zerlegt[2]));
|
||||
uint16_t dutyResolution = (uint8_t)atoi(zerlegt[3].c_str());
|
||||
bool mqttEnabled = toLower(zerlegt[4]) == "true";
|
||||
bool httpEnabled = toLower(zerlegt[5]) == "true";
|
||||
char gpioName[100];
|
||||
if (zerlegt.size() >= 7) {
|
||||
strcpy(gpioName, trim(zerlegt[6]).c_str());
|
||||
} else {
|
||||
sprintf(gpioName, "GPIO%d", gpioNr);
|
||||
}
|
||||
std::string mqttTopic = mqttEnabled ? (mainTopicMQTT + "/" + gpioName) : "";
|
||||
GpioPin* gpioPin = new GpioPin(gpioNr, gpioName, pinMode, intType,dutyResolution, mqttTopic, httpEnabled);
|
||||
(*gpioMap)[gpioNr] = gpioPin;
|
||||
|
||||
if (intType != GPIO_INTR_DISABLE) {
|
||||
registerISR = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (registerISR) {
|
||||
//install gpio isr service
|
||||
gpio_install_isr_service(ESP_INTR_FLAG_LOWMED | ESP_INTR_FLAG_IRAM);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void GpioHandler::clear()
|
||||
{
|
||||
ESP_LOGD(TAG_SERVERGPIO, "GpioHandler::clear\r\n");
|
||||
|
||||
if (gpioMap != NULL) {
|
||||
for(std::map<gpio_num_t, GpioPin*>::iterator it = gpioMap->begin(); it != gpioMap->end(); ++it) {
|
||||
delete it->second;
|
||||
}
|
||||
gpioMap->clear();
|
||||
}
|
||||
|
||||
// gpio_uninstall_isr_service(); can't uninstall, isr service is used by camera
|
||||
}
|
||||
|
||||
void GpioHandler::registerGpioUri()
|
||||
{
|
||||
ESP_LOGI(TAG_SERVERGPIO, "server_GPIO - Registering URI handlers");
|
||||
|
||||
httpd_uri_t camuri = { };
|
||||
camuri.method = HTTP_GET;
|
||||
camuri.uri = "/GPIO";
|
||||
camuri.handler = callHandleHttpRequest;
|
||||
camuri.user_ctx = (void*)this;
|
||||
httpd_register_uri_handler(_httpServer, &camuri);
|
||||
}
|
||||
|
||||
esp_err_t GpioHandler::handleHttpRequest(httpd_req_t *req)
|
||||
{
|
||||
ESP_LOGD(TAG_SERVERGPIO, "handleHttpRequest");
|
||||
|
||||
if (gpioMap == NULL) {
|
||||
std::string resp_str = "GPIO handler not initialized";
|
||||
httpd_resp_send(req, resp_str.c_str(), resp_str.length());
|
||||
return ESP_OK;
|
||||
}
|
||||
|
||||
#ifdef DEBUG_DETAIL_ON
|
||||
LogFile.WriteHeapInfo("handler_switch_GPIO - Start");
|
||||
#endif
|
||||
@@ -30,95 +404,183 @@ esp_err_t handler_switch_GPIO(httpd_req_t *req)
|
||||
char _query[200];
|
||||
char _valueGPIO[30];
|
||||
char _valueStatus[30];
|
||||
std::string gpio, status, zw;
|
||||
int gpionum = 0;
|
||||
gpio_num_t gpio_num;
|
||||
std::string gpio, status;
|
||||
|
||||
if (httpd_req_get_url_query_str(req, _query, 200) == ESP_OK)
|
||||
{
|
||||
printf("Query: "); printf(_query); printf("\n");
|
||||
if (httpd_req_get_url_query_str(req, _query, 200) == ESP_OK) {
|
||||
ESP_LOGD(TAG_SERVERGPIO, "Query: %s", _query);
|
||||
|
||||
if (httpd_query_key_value(_query, "GPIO", _valueGPIO, 30) == ESP_OK)
|
||||
{
|
||||
printf("GPIO is found"); printf(_valueGPIO); printf("\n");
|
||||
ESP_LOGD(TAG_SERVERGPIO, "GPIO is found %s", _valueGPIO);
|
||||
gpio = std::string(_valueGPIO);
|
||||
} else {
|
||||
std::string resp_str = "GPIO No is not defined";
|
||||
httpd_resp_send(req, resp_str.c_str(), resp_str.length());
|
||||
return ESP_OK;
|
||||
}
|
||||
if (httpd_query_key_value(_query, "Status", _valueStatus, 30) == ESP_OK)
|
||||
{
|
||||
printf("Status is found"); printf(_valueStatus); printf("\n");
|
||||
ESP_LOGD(TAG_SERVERGPIO, "Status is found %s", _valueStatus);
|
||||
status = std::string(_valueStatus);
|
||||
}
|
||||
};
|
||||
} else {
|
||||
const char* resp_str = "Error in call. Use /GPIO?GPIO=12&Status=high";
|
||||
httpd_resp_send(req, resp_str, strlen(resp_str));
|
||||
return ESP_OK;
|
||||
}
|
||||
|
||||
status = toUpper(status);
|
||||
if (!(status == "HIGH") && !(status == "LOW"))
|
||||
if ((status != "HIGH") && (status != "LOW") && (status != "TRUE") && (status != "FALSE") && (status != "0") && (status != "1") && (status != ""))
|
||||
{
|
||||
zw = "Status not valid: " + status;;
|
||||
std::string zw = "Status not valid: " + status;
|
||||
httpd_resp_sendstr_chunk(req, zw.c_str());
|
||||
httpd_resp_sendstr_chunk(req, NULL);
|
||||
return ESP_OK;
|
||||
}
|
||||
|
||||
gpionum = stoi(gpio);
|
||||
int gpionum = stoi(gpio);
|
||||
|
||||
// frei: 16; 12-15; 2; 4 // nur 12 und 13 funktionieren 2: reboot, 4: BlitzLED, 14/15: DMA für SDKarte ???
|
||||
|
||||
switch (gpionum) {
|
||||
case 12:
|
||||
gpio_num = GPIO_NUM_12;
|
||||
break;
|
||||
case 13:
|
||||
gpio_num = GPIO_NUM_13;
|
||||
break;
|
||||
default:
|
||||
zw = "GPIO" + std::to_string(gpionum) + " not support - only 12 & 13 free";
|
||||
// frei: 16; 12-15; 2; 4 // nur 12 und 13 funktionieren 2: reboot, 4: BlitzLED, 15: PSRAM, 14/15: DMA für SDKarte ???
|
||||
gpio_num_t gpio_num = resolvePinNr(gpionum);
|
||||
if (gpio_num == GPIO_NUM_NC)
|
||||
{
|
||||
std::string zw = "GPIO" + std::to_string(gpionum) + " not support - only 12 & 13 free";
|
||||
httpd_resp_sendstr_chunk(req, zw.c_str());
|
||||
httpd_resp_sendstr_chunk(req, NULL);
|
||||
return ESP_OK;
|
||||
}
|
||||
|
||||
if (status == "HIGH")
|
||||
gpio_set_level(gpio_num, 1);
|
||||
if (gpioMap->count(gpio_num) == 0) {
|
||||
char resp_str [30];
|
||||
sprintf(resp_str, "GPIO%d is not registred", gpio_num);
|
||||
httpd_resp_send(req, resp_str, strlen(resp_str));
|
||||
return ESP_OK;
|
||||
}
|
||||
|
||||
if (status == "")
|
||||
{
|
||||
std::string resp_str = "";
|
||||
status = (*gpioMap)[gpio_num]->getValue(&resp_str) ? "HIGH" : "LOW";
|
||||
if (resp_str == "") {
|
||||
resp_str = status;
|
||||
}
|
||||
httpd_resp_sendstr_chunk(req, resp_str.c_str());
|
||||
httpd_resp_sendstr_chunk(req, NULL);
|
||||
}
|
||||
else
|
||||
gpio_set_level(gpio_num, 0);
|
||||
{
|
||||
std::string resp_str = "";
|
||||
(*gpioMap)[gpio_num]->setValue((status == "HIGH") || (status == "TRUE") || (status == "1"), GPIO_SET_SOURCE_HTTP, &resp_str);
|
||||
if (resp_str == "") {
|
||||
resp_str = "GPIO" + std::to_string(gpionum) + " switched to " + status;
|
||||
}
|
||||
httpd_resp_sendstr_chunk(req, resp_str.c_str());
|
||||
httpd_resp_sendstr_chunk(req, NULL);
|
||||
}
|
||||
|
||||
|
||||
zw = "GPIO" + std::to_string(gpionum) + " switched to " + status;
|
||||
httpd_resp_sendstr_chunk(req, zw.c_str());
|
||||
httpd_resp_sendstr_chunk(req, NULL);
|
||||
return ESP_OK;
|
||||
};
|
||||
|
||||
void initGPIO()
|
||||
void GpioHandler::flashLightEnable(bool value)
|
||||
{
|
||||
gpio_config_t io_conf;
|
||||
//disable interrupt
|
||||
io_conf.intr_type = GPIO_INTR_DISABLE;
|
||||
//set as output mode
|
||||
io_conf.mode = GPIO_MODE_OUTPUT;
|
||||
//bit mask of the pins that you want to set,e.g.GPIO18/19
|
||||
// io_conf.pin_bit_mask = ((1ULL<<GPIO_OUTPUT_IO_0) | (1ULL<<GPIO_OUTPUT_IO_1));
|
||||
// io_conf.pin_bit_mask = ((1ULL << GPIO_NUM_12) | (1ULL << GPIO_NUM_2) | (1ULL << GPIO_NUM_4) | (1ULL << GPIO_NUM_12) | (1ULL << GPIO_NUM_13) | (1ULL << GPIO_NUM_14) | (1ULL << GPIO_NUM_15));
|
||||
io_conf.pin_bit_mask = ((1ULL << GPIO_NUM_12) | (1ULL << GPIO_NUM_13));
|
||||
//disable pull-down mode
|
||||
io_conf.pull_down_en = (gpio_pulldown_t) 0;
|
||||
//disable pull-up mode
|
||||
io_conf.pull_up_en = (gpio_pullup_t) 0;
|
||||
//configure GPIO with the given settings
|
||||
gpio_config(&io_conf);
|
||||
ESP_LOGD(TAG_SERVERGPIO, "GpioHandler::flashLightEnable %s\r\n", value ? "true" : "false");
|
||||
|
||||
if (gpioMap != NULL) {
|
||||
for(std::map<gpio_num_t, GpioPin*>::iterator it = gpioMap->begin(); it != gpioMap->end(); ++it)
|
||||
{
|
||||
if (it->second->getMode() == GPIO_PIN_MODE_BUILT_IN_FLASH_LED) //|| (it->second->getMode() == GPIO_PIN_MODE_EXTERNAL_FLASH_PWM) || (it->second->getMode() == GPIO_PIN_MODE_EXTERNAL_FLASH_WS281X))
|
||||
{
|
||||
std::string resp_str = "";
|
||||
it->second->setValue(value, GPIO_SET_SOURCE_INTERNAL, &resp_str);
|
||||
|
||||
if (resp_str == "") {
|
||||
ESP_LOGD(TAG_SERVERGPIO, "Flash light pin GPIO %d switched to %s\r\n", (int)it->first, (value ? "on" : "off"));
|
||||
} else {
|
||||
ESP_LOGE(TAG_SERVERGPIO, "Can't set flash light pin GPIO %d. Error: %s\r\n", (int)it->first, resp_str.c_str());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void register_server_GPIO_uri(httpd_handle_t server)
|
||||
gpio_num_t GpioHandler::resolvePinNr(uint8_t pinNr)
|
||||
{
|
||||
ESP_LOGI(TAGPARTGPIO, "server_GPIO - Registering URI handlers");
|
||||
|
||||
httpd_uri_t camuri = { };
|
||||
camuri.method = HTTP_GET;
|
||||
camuri.uri = "/GPIO";
|
||||
camuri.handler = handler_switch_GPIO;
|
||||
camuri.user_ctx = (void*) "switch GPIO";
|
||||
httpd_register_uri_handler(server, &camuri);
|
||||
|
||||
initGPIO();
|
||||
switch(pinNr) {
|
||||
case 0:
|
||||
return GPIO_NUM_0;
|
||||
case 1:
|
||||
return GPIO_NUM_1;
|
||||
case 3:
|
||||
return GPIO_NUM_3;
|
||||
case 4:
|
||||
return GPIO_NUM_4;
|
||||
case 12:
|
||||
return GPIO_NUM_12;
|
||||
case 13:
|
||||
return GPIO_NUM_13;
|
||||
default:
|
||||
return GPIO_NUM_NC;
|
||||
}
|
||||
}
|
||||
|
||||
gpio_pin_mode_t GpioHandler::resolvePinMode(std::string input)
|
||||
{
|
||||
if( input == "disabled" ) return GPIO_PIN_MODE_DISABLED;
|
||||
if( input == "input" ) return GPIO_PIN_MODE_INPUT;
|
||||
if( input == "input-pullup" ) return GPIO_PIN_MODE_INPUT_PULLUP;
|
||||
if( input == "input-pulldown" ) return GPIO_PIN_MODE_INPUT_PULLDOWN;
|
||||
if( input == "output" ) return GPIO_PIN_MODE_OUTPUT;
|
||||
if( input == "built-in-led" ) return GPIO_PIN_MODE_BUILT_IN_FLASH_LED;
|
||||
if( input == "output-pwm" ) return GPIO_PIN_MODE_OUTPUT_PWM;
|
||||
if( input == "external-flash-pwm" ) return GPIO_PIN_MODE_EXTERNAL_FLASH_PWM;
|
||||
if( input == "external-flash-ws281x" ) return GPIO_PIN_MODE_EXTERNAL_FLASH_WS281X;
|
||||
|
||||
return GPIO_PIN_MODE_DISABLED;
|
||||
}
|
||||
|
||||
gpio_int_type_t GpioHandler::resolveIntType(std::string input)
|
||||
{
|
||||
if( input == "disabled" ) return GPIO_INTR_DISABLE;
|
||||
if( input == "rising-edge" ) return GPIO_INTR_POSEDGE;
|
||||
if( input == "falling-edge" ) return GPIO_INTR_NEGEDGE;
|
||||
if( input == "rising-and-falling" ) return GPIO_INTR_ANYEDGE ;
|
||||
if( input == "low-level-trigger" ) return GPIO_INTR_LOW_LEVEL;
|
||||
if( input == "high-level-trigger" ) return GPIO_INTR_HIGH_LEVEL;
|
||||
|
||||
|
||||
return GPIO_INTR_DISABLE;
|
||||
}
|
||||
|
||||
static GpioHandler *gpioHandler = NULL;
|
||||
|
||||
void gpio_handler_create(httpd_handle_t server)
|
||||
{
|
||||
if (gpioHandler == NULL)
|
||||
gpioHandler = new GpioHandler(CONFIG_FILE, server);
|
||||
}
|
||||
|
||||
void gpio_handler_init()
|
||||
{
|
||||
if (gpioHandler != NULL) {
|
||||
gpioHandler->init();
|
||||
}
|
||||
}
|
||||
|
||||
void gpio_handler_deinit() {
|
||||
if (gpioHandler != NULL) {
|
||||
gpioHandler->deinit();
|
||||
}
|
||||
}
|
||||
|
||||
void gpio_handler_destroy()
|
||||
{
|
||||
if (gpioHandler != NULL) {
|
||||
delete gpioHandler;
|
||||
gpioHandler = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
GpioHandler* gpio_handler_get()
|
||||
{
|
||||
return gpioHandler;
|
||||
}
|
||||
|
||||
@@ -1,10 +1,99 @@
|
||||
#ifndef SERVER_GPIO_H
|
||||
#define SERVER_GPIO_H
|
||||
|
||||
#include <esp_log.h>
|
||||
|
||||
#include <esp_http_server.h>
|
||||
#include <map>
|
||||
#include "driver/gpio.h"
|
||||
|
||||
//#include "ClassControllCamera.h"
|
||||
|
||||
static const char *TAGPARTGPIO = "server_GPIO";
|
||||
typedef enum {
|
||||
GPIO_PIN_MODE_DISABLED = 0x0,
|
||||
GPIO_PIN_MODE_INPUT = 0x1,
|
||||
GPIO_PIN_MODE_INPUT_PULLUP = 0x2,
|
||||
GPIO_PIN_MODE_INPUT_PULLDOWN = 0x3,
|
||||
GPIO_PIN_MODE_OUTPUT = 0x4,
|
||||
GPIO_PIN_MODE_BUILT_IN_FLASH_LED = 0x5,
|
||||
GPIO_PIN_MODE_OUTPUT_PWM = 0x6,
|
||||
GPIO_PIN_MODE_EXTERNAL_FLASH_PWM = 0x7,
|
||||
GPIO_PIN_MODE_EXTERNAL_FLASH_WS281X = 0x8,
|
||||
} gpio_pin_mode_t;
|
||||
|
||||
void register_server_GPIO_uri(httpd_handle_t server);
|
||||
struct GpioResult {
|
||||
gpio_num_t gpio;
|
||||
int value;
|
||||
};
|
||||
|
||||
typedef enum {
|
||||
GPIO_SET_SOURCE_INTERNAL = 0,
|
||||
GPIO_SET_SOURCE_MQTT = 1,
|
||||
GPIO_SET_SOURCE_HTTP = 2,
|
||||
} gpio_set_source;
|
||||
|
||||
class GpioPin {
|
||||
public:
|
||||
GpioPin(gpio_num_t gpio, const char* name, gpio_pin_mode_t mode, gpio_int_type_t interruptType, uint8_t dutyResolution, std::string mqttTopic, bool httpEnable);
|
||||
~GpioPin();
|
||||
|
||||
void init();
|
||||
bool getValue(std::string* errorText);
|
||||
void setValue(bool value, gpio_set_source setSource, std::string* errorText);
|
||||
bool handleMQTT(std::string, char* data, int data_len);
|
||||
void publishState();
|
||||
void gpioInterrupt(int value);
|
||||
gpio_int_type_t getInterruptType() { return _interruptType; }
|
||||
gpio_pin_mode_t getMode() { return _mode; }
|
||||
|
||||
private:
|
||||
gpio_num_t _gpio;
|
||||
const char* _name;
|
||||
gpio_pin_mode_t _mode;
|
||||
gpio_int_type_t _interruptType;
|
||||
std::string _mqttTopic;
|
||||
int currentState = -1;
|
||||
};
|
||||
|
||||
esp_err_t callHandleHttpRequest(httpd_req_t *req);
|
||||
void taskGpioHandler(void *pvParameter);
|
||||
|
||||
class GpioHandler {
|
||||
public:
|
||||
GpioHandler(std::string configFile, httpd_handle_t httpServer);
|
||||
~GpioHandler();
|
||||
|
||||
void init();
|
||||
void deinit();
|
||||
void registerGpioUri();
|
||||
esp_err_t handleHttpRequest(httpd_req_t *req);
|
||||
void taskHandler();
|
||||
void gpioInterrupt(GpioResult* gpioResult);
|
||||
void flashLightEnable(bool value);
|
||||
bool isEnabled() { return _isEnabled; }
|
||||
void handleMQTTconnect();
|
||||
|
||||
private:
|
||||
std::string _configFile;
|
||||
httpd_handle_t _httpServer;
|
||||
std::map<gpio_num_t, GpioPin*> *gpioMap = NULL;
|
||||
TaskHandle_t xHandleTaskGpio = NULL;
|
||||
bool _isEnabled = false;
|
||||
|
||||
bool readConfig();
|
||||
void clear();
|
||||
|
||||
gpio_num_t resolvePinNr(uint8_t pinNr);
|
||||
gpio_pin_mode_t resolvePinMode(std::string input);
|
||||
gpio_int_type_t resolveIntType(std::string input);
|
||||
};
|
||||
|
||||
void gpio_handler_create(httpd_handle_t server);
|
||||
void gpio_handler_init();
|
||||
void gpio_handler_deinit();
|
||||
void gpio_handler_destroy();
|
||||
GpioHandler* gpio_handler_get();
|
||||
|
||||
|
||||
|
||||
#endif //SERVER_GPIO_H
|
||||
@@ -4,6 +4,6 @@ list(APPEND EXTRA_COMPONENT_DIRS $ENV{IDF_PATH}/examples/common_components/proto
|
||||
|
||||
idf_component_register(SRCS ${app_sources}
|
||||
INCLUDE_DIRS "."
|
||||
REQUIRES esp32-camera-master esp_http_server jomjol_logfile jomjol_image_proc nvs_flash)
|
||||
REQUIRES esp32-camera-master esp_http_server jomjol_logfile jomjol_image_proc nvs_flash jomjol_fileserver_ota jomjol_controlGPIO)
|
||||
|
||||
|
||||
|
||||
@@ -9,11 +9,14 @@
|
||||
#include "Helper.h"
|
||||
#include "CImageBasis.h"
|
||||
|
||||
#include "server_ota.h"
|
||||
#include "server_GPIO.h"
|
||||
|
||||
|
||||
#define BOARD_ESP32CAM_AITHINKER
|
||||
|
||||
|
||||
#include <esp_event_loop.h>
|
||||
#include <esp_event.h>
|
||||
#include <esp_log.h>
|
||||
#include <esp_system.h>
|
||||
#include <nvs_flash.h>
|
||||
@@ -48,7 +51,7 @@
|
||||
#define CAM_PIN_HREF 23
|
||||
#define CAM_PIN_PCLK 22
|
||||
|
||||
static const char *TAG = "example:take_picture";
|
||||
static const char *TAGCAMERACLASS = "server_part_camera";
|
||||
|
||||
static camera_config_t camera_config = {
|
||||
.pin_pwdn = CAM_PIN_PWDN,
|
||||
@@ -220,13 +223,17 @@ void CCamera::SetQualitySize(int qual, framesize_t resol)
|
||||
void CCamera::EnableAutoExposure(int flashdauer)
|
||||
{
|
||||
LEDOnOff(true);
|
||||
LightOnOff(true);
|
||||
if (flashdauer > 0)
|
||||
LightOnOff(true);
|
||||
const TickType_t xDelay = flashdauer / portTICK_PERIOD_MS;
|
||||
vTaskDelay( xDelay );
|
||||
|
||||
camera_fb_t * fb = esp_camera_fb_get();
|
||||
if (!fb) {
|
||||
ESP_LOGE(TAGCAMERACLASS, "Camera Capture Failed");
|
||||
LEDOnOff(false);
|
||||
LightOnOff(false);
|
||||
doReboot();
|
||||
}
|
||||
esp_camera_fb_return(fb);
|
||||
|
||||
@@ -269,8 +276,11 @@ esp_err_t CCamera::CaptureToBasisImage(CImageBasis *_Image, int delay)
|
||||
|
||||
camera_fb_t * fb = esp_camera_fb_get();
|
||||
if (!fb) {
|
||||
ESP_LOGE(TAGCAMERACLASS, "Camera Capture Failed");
|
||||
ESP_LOGE(TAGCAMERACLASS, "CaptureToBasisImage: Camera Capture Failed");
|
||||
LEDOnOff(false);
|
||||
LightOnOff(false);
|
||||
doReboot();
|
||||
|
||||
return ESP_FAIL;
|
||||
}
|
||||
|
||||
@@ -353,8 +363,11 @@ esp_err_t CCamera::CaptureToFile(std::string nm, int delay)
|
||||
|
||||
camera_fb_t * fb = esp_camera_fb_get();
|
||||
if (!fb) {
|
||||
ESP_LOGE(TAGCAMERACLASS, "Camera Capture Failed");
|
||||
ESP_LOGE(TAGCAMERACLASS, "CaptureToFile: Camera Capture Failed");
|
||||
LEDOnOff(false);
|
||||
LightOnOff(false);
|
||||
doReboot();
|
||||
|
||||
return ESP_FAIL;
|
||||
}
|
||||
LEDOnOff(false);
|
||||
@@ -443,7 +456,11 @@ esp_err_t CCamera::CaptureToHTTP(httpd_req_t *req, int delay)
|
||||
fb = esp_camera_fb_get();
|
||||
if (!fb) {
|
||||
ESP_LOGE(TAGCAMERACLASS, "Camera capture failed");
|
||||
LEDOnOff(false);
|
||||
LightOnOff(false);
|
||||
httpd_resp_send_500(req);
|
||||
// doReboot();
|
||||
|
||||
return ESP_FAIL;
|
||||
}
|
||||
|
||||
@@ -480,15 +497,20 @@ esp_err_t CCamera::CaptureToHTTP(httpd_req_t *req, int delay)
|
||||
|
||||
void CCamera::LightOnOff(bool status)
|
||||
{
|
||||
// Init the GPIO
|
||||
gpio_pad_select_gpio(FLASH_GPIO);
|
||||
/* Set the GPIO as a push/pull output */
|
||||
gpio_set_direction(FLASH_GPIO, GPIO_MODE_OUTPUT);
|
||||
GpioHandler* gpioHandler = gpio_handler_get();
|
||||
if ((gpioHandler != NULL) && (gpioHandler->isEnabled())) {
|
||||
gpioHandler->flashLightEnable(status);
|
||||
} else {
|
||||
// Init the GPIO
|
||||
gpio_pad_select_gpio(FLASH_GPIO);
|
||||
/* Set the GPIO as a push/pull output */
|
||||
gpio_set_direction(FLASH_GPIO, GPIO_MODE_OUTPUT);
|
||||
|
||||
if (status)
|
||||
gpio_set_level(FLASH_GPIO, 1);
|
||||
else
|
||||
gpio_set_level(FLASH_GPIO, 0);
|
||||
if (status)
|
||||
gpio_set_level(FLASH_GPIO, 1);
|
||||
else
|
||||
gpio_set_level(FLASH_GPIO, 0);
|
||||
}
|
||||
}
|
||||
|
||||
void CCamera::LEDOnOff(bool status)
|
||||
|
||||
@@ -16,9 +16,6 @@
|
||||
#define CAMERA_MODEL_AI_THINKER
|
||||
|
||||
|
||||
static const char *TAGCAMERACLASS = "server_part_camera";
|
||||
|
||||
|
||||
class CCamera {
|
||||
protected:
|
||||
int ActualQuality;
|
||||
|
||||
@@ -12,14 +12,17 @@
|
||||
char scratch2[SCRATCH_BUFSIZE2];
|
||||
|
||||
//#define DEBUG_DETAIL_ON
|
||||
|
||||
static const char *TAGPARTCAMERA = "server_camera";
|
||||
|
||||
|
||||
void PowerResetCamera(){
|
||||
ESP_LOGD(TAGPARTCAMERA, "Resetting camera by power down line");
|
||||
gpio_config_t conf = { 0 };
|
||||
gpio_config_t conf;
|
||||
conf.intr_type = GPIO_INTR_DISABLE;
|
||||
conf.pin_bit_mask = 1LL << GPIO_NUM_32;
|
||||
conf.mode = GPIO_MODE_OUTPUT;
|
||||
conf.pull_down_en = GPIO_PULLDOWN_DISABLE;
|
||||
conf.pull_up_en = GPIO_PULLUP_DISABLE;
|
||||
gpio_config(&conf);
|
||||
|
||||
// carefull, logic is inverted compared to reset pin
|
||||
|
||||
@@ -7,8 +7,6 @@
|
||||
|
||||
//#include "ClassControllCamera.h"
|
||||
|
||||
static const char *TAGPARTCAMERA = "server_camera";
|
||||
|
||||
void register_server_camera_uri(httpd_handle_t server);
|
||||
|
||||
void PowerResetCamera();
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
FILE(GLOB_RECURSE app_sources ${CMAKE_CURRENT_SOURCE_DIR}/*.*)
|
||||
|
||||
idf_component_register(SRCS ${app_sources}
|
||||
INCLUDE_DIRS "."
|
||||
REQUIRES tfmicro esp_http_server app_update esp_http_client nvs_flash jomjol_tfliteclass jomjol_flowcontroll spiffs jomjol_helper)
|
||||
INCLUDE_DIRS "." "../../include"
|
||||
REQUIRES tfmicro esp_http_server app_update esp_http_client nvs_flash jomjol_tfliteclass jomjol_flowcontroll spiffs jomjol_helper jomjol_controlGPIO)
|
||||
|
||||
|
||||
|
||||
@@ -26,9 +26,12 @@
|
||||
#include <esp_spiffs.h>
|
||||
#include "esp_http_server.h"
|
||||
|
||||
#include "defines.h"
|
||||
#include "ClassLogFile.h"
|
||||
|
||||
#include "server_help.h"
|
||||
#include "interface_mqtt.h"
|
||||
#include "server_GPIO.h"
|
||||
|
||||
#include "Helper.h"
|
||||
#include "miniz.h"
|
||||
@@ -53,17 +56,17 @@ struct file_server_data {
|
||||
char scratch[SCRATCH_BUFSIZE];
|
||||
};
|
||||
|
||||
static const char *TAG = "file_server";
|
||||
static const char *TAG_FILESERVER = "file_server";
|
||||
|
||||
/* Handler to redirect incoming GET request for /index.html to /
|
||||
* This can be overridden by uploading file with same name */
|
||||
static esp_err_t index_html_get_handler(httpd_req_t *req)
|
||||
{
|
||||
httpd_resp_set_status(req, "307 Temporary Redirect");
|
||||
httpd_resp_set_hdr(req, "Location", "/");
|
||||
httpd_resp_send(req, NULL, 0); // Response body can be empty
|
||||
return ESP_OK;
|
||||
}
|
||||
// static esp_err_t index_html_get_handler(httpd_req_t *req)
|
||||
// {
|
||||
// httpd_resp_set_status(req, "307 Temporary Redirect");
|
||||
// httpd_resp_set_hdr(req, "Location", "/");
|
||||
// httpd_resp_send(req, NULL, 0); // Response body can be empty
|
||||
// return ESP_OK;
|
||||
// }
|
||||
|
||||
/* Send HTTP response with a run-time generated html consisting of
|
||||
* a list of all files and folders under the requested path.
|
||||
@@ -95,7 +98,7 @@ static esp_err_t http_resp_dir_html(httpd_req_t *req, const char *dirpath, const
|
||||
printf("entrypath: <%s>\n", entrypath);
|
||||
|
||||
if (!dir) {
|
||||
ESP_LOGE(TAG, "Failed to stat dir : %s", dirpath);
|
||||
ESP_LOGE(TAG_FILESERVER, "Failed to stat dir : %s", dirpath);
|
||||
/* Respond with 404 Not Found */
|
||||
httpd_resp_send_err(req, HTTPD_404_NOT_FOUND, "Directory does not exist");
|
||||
return ESP_FAIL;
|
||||
@@ -115,7 +118,7 @@ static esp_err_t http_resp_dir_html(httpd_req_t *req, const char *dirpath, const
|
||||
if (chunksize > 0){
|
||||
if (httpd_resp_send_chunk(req, chunk, chunksize) != ESP_OK) {
|
||||
fclose(fd);
|
||||
ESP_LOGE(TAG, "File sending failed!");
|
||||
ESP_LOGE(TAG_FILESERVER, "File sending failed!");
|
||||
return ESP_FAIL;
|
||||
}
|
||||
}
|
||||
@@ -154,11 +157,11 @@ static esp_err_t http_resp_dir_html(httpd_req_t *req, const char *dirpath, const
|
||||
strlcpy(entrypath + dirpath_len, entry->d_name, sizeof(entrypath) - dirpath_len);
|
||||
printf("Entrypath: %s\n", entrypath);
|
||||
if (stat(entrypath, &entry_stat) == -1) {
|
||||
ESP_LOGE(TAG, "Failed to stat %s : %s", entrytype, entry->d_name);
|
||||
ESP_LOGE(TAG_FILESERVER, "Failed to stat %s : %s", entrytype, entry->d_name);
|
||||
continue;
|
||||
}
|
||||
sprintf(entrysize, "%ld", entry_stat.st_size);
|
||||
ESP_LOGI(TAG, "Found %s : %s (%s bytes)", entrytype, entry->d_name, entrysize);
|
||||
ESP_LOGI(TAG_FILESERVER, "Found %s : %s (%s bytes)", entrytype, entry->d_name, entrysize);
|
||||
|
||||
/* Send chunk of HTML file containing table entries with file name and size */
|
||||
httpd_resp_sendstr_chunk(req, "<tr><td><a href=\"");
|
||||
@@ -206,19 +209,19 @@ static esp_err_t logfileact_get_handler(httpd_req_t *req)
|
||||
LogFile.WriteToFile("logfileact_get_handler");
|
||||
char filepath[FILE_PATH_MAX];
|
||||
FILE *fd = NULL;
|
||||
struct stat file_stat;
|
||||
//struct stat file_stat;
|
||||
printf("uri: %s\n", req->uri);
|
||||
|
||||
const char filename = 'log_current.txt';
|
||||
const char* filename = "log_current.txt";
|
||||
|
||||
printf("uri: %s, filename: %s, filepath: %s\n", req->uri, &filename, filepath);
|
||||
printf("uri: %s, filename: %s, filepath: %s\n", req->uri, filename, filepath);
|
||||
|
||||
std::string currentfilename = LogFile.GetCurrentFileName();
|
||||
|
||||
|
||||
fd = OpenFileAndWait(currentfilename.c_str(), "r");
|
||||
if (!fd) {
|
||||
ESP_LOGE(TAG, "Failed to read existing file : %s", filepath);
|
||||
ESP_LOGE(TAG_FILESERVER, "Failed to read existing file : %s", filepath);
|
||||
/* Respond with 500 Internal Server Error */
|
||||
httpd_resp_send_err(req, HTTPD_500_INTERNAL_SERVER_ERROR, "Failed to read existing file");
|
||||
return ESP_FAIL;
|
||||
@@ -226,8 +229,8 @@ static esp_err_t logfileact_get_handler(httpd_req_t *req)
|
||||
|
||||
httpd_resp_set_hdr(req, "Access-Control-Allow-Origin", "*");
|
||||
|
||||
// ESP_LOGI(TAG, "Sending file : %s (%ld bytes)...", &filename, file_stat.st_size);
|
||||
set_content_type_from_file(req, &filename);
|
||||
// ESP_LOGI(TAG_FILESERVER, "Sending file : %s (%ld bytes)...", &filename, file_stat.st_size);
|
||||
set_content_type_from_file(req, filename);
|
||||
|
||||
/* Retrieve the pointer to scratch buffer for temporary storage */
|
||||
char *chunk = ((struct file_server_data *)req->user_ctx)->scratch;
|
||||
@@ -239,7 +242,7 @@ static esp_err_t logfileact_get_handler(httpd_req_t *req)
|
||||
/* Send the buffer contents as HTTP response chunk */
|
||||
if (httpd_resp_send_chunk(req, chunk, chunksize) != ESP_OK) {
|
||||
fclose(fd);
|
||||
ESP_LOGE(TAG, "File sending failed!");
|
||||
ESP_LOGE(TAG_FILESERVER, "File sending failed!");
|
||||
/* Abort sending file */
|
||||
httpd_resp_sendstr_chunk(req, NULL);
|
||||
/* Respond with 500 Internal Server Error */
|
||||
@@ -252,7 +255,7 @@ static esp_err_t logfileact_get_handler(httpd_req_t *req)
|
||||
|
||||
/* Close file after sending complete */
|
||||
fclose(fd);
|
||||
ESP_LOGI(TAG, "File sending complete");
|
||||
ESP_LOGI(TAG_FILESERVER, "File sending complete");
|
||||
|
||||
/* Respond with an empty chunk to signal HTTP response completion */
|
||||
httpd_resp_send_chunk(req, NULL, 0);
|
||||
@@ -284,7 +287,7 @@ static esp_err_t download_get_handler(httpd_req_t *req)
|
||||
|
||||
|
||||
if (!filename) {
|
||||
ESP_LOGE(TAG, "Filename is too long");
|
||||
ESP_LOGE(TAG_FILESERVER, "Filename is too long");
|
||||
/* Respond with 500 Internal Server Error */
|
||||
httpd_resp_send_err(req, HTTPD_500_INTERNAL_SERVER_ERROR, "Filename too long");
|
||||
return ESP_FAIL;
|
||||
@@ -297,11 +300,11 @@ static esp_err_t download_get_handler(httpd_req_t *req)
|
||||
if (buf_len > 1) {
|
||||
char buf[buf_len];
|
||||
if (httpd_req_get_url_query_str(req, buf, buf_len) == ESP_OK) {
|
||||
ESP_LOGI(TAG, "Found URL query => %s", buf);
|
||||
ESP_LOGI(TAG_FILESERVER, "Found URL query => %s", buf);
|
||||
char param[32];
|
||||
/* Get value of expected key from query string */
|
||||
if (httpd_query_key_value(buf, "readonly", param, sizeof(param)) == ESP_OK) {
|
||||
ESP_LOGI(TAG, "Found URL query parameter => readonly=%s", param);
|
||||
ESP_LOGI(TAG_FILESERVER, "Found URL query parameter => readonly=%s", param);
|
||||
readonly = param && strcmp(param,"true")==0;
|
||||
}
|
||||
}
|
||||
@@ -316,7 +319,7 @@ static esp_err_t download_get_handler(httpd_req_t *req)
|
||||
|
||||
/* If file not present on SPIFFS check if URI
|
||||
* corresponds to one of the hardcoded paths */
|
||||
ESP_LOGE(TAG, "Failed to stat file : %s", filepath);
|
||||
ESP_LOGE(TAG_FILESERVER, "Failed to stat file : %s", filepath);
|
||||
/* Respond with 404 Not Found */
|
||||
httpd_resp_send_err(req, HTTPD_404_NOT_FOUND, "File does not exist");
|
||||
return ESP_FAIL;
|
||||
@@ -324,7 +327,7 @@ static esp_err_t download_get_handler(httpd_req_t *req)
|
||||
|
||||
fd = OpenFileAndWait(filepath, "r");
|
||||
if (!fd) {
|
||||
ESP_LOGE(TAG, "Failed to read existing file : %s", filepath);
|
||||
ESP_LOGE(TAG_FILESERVER, "Failed to read existing file : %s", filepath);
|
||||
/* Respond with 500 Internal Server Error */
|
||||
httpd_resp_send_err(req, HTTPD_500_INTERNAL_SERVER_ERROR, "Failed to read existing file");
|
||||
return ESP_FAIL;
|
||||
@@ -332,7 +335,7 @@ static esp_err_t download_get_handler(httpd_req_t *req)
|
||||
|
||||
httpd_resp_set_hdr(req, "Access-Control-Allow-Origin", "*");
|
||||
|
||||
ESP_LOGI(TAG, "Sending file : %s (%ld bytes)...", filename, file_stat.st_size);
|
||||
ESP_LOGI(TAG_FILESERVER, "Sending file : %s (%ld bytes)...", filename, file_stat.st_size);
|
||||
set_content_type_from_file(req, filename);
|
||||
|
||||
/* Retrieve the pointer to scratch buffer for temporary storage */
|
||||
@@ -345,7 +348,7 @@ static esp_err_t download_get_handler(httpd_req_t *req)
|
||||
/* Send the buffer contents as HTTP response chunk */
|
||||
if (httpd_resp_send_chunk(req, chunk, chunksize) != ESP_OK) {
|
||||
fclose(fd);
|
||||
ESP_LOGE(TAG, "File sending failed!");
|
||||
ESP_LOGE(TAG_FILESERVER, "File sending failed!");
|
||||
/* Abort sending file */
|
||||
httpd_resp_sendstr_chunk(req, NULL);
|
||||
/* Respond with 500 Internal Server Error */
|
||||
@@ -358,7 +361,7 @@ static esp_err_t download_get_handler(httpd_req_t *req)
|
||||
|
||||
/* Close file after sending complete */
|
||||
fclose(fd);
|
||||
ESP_LOGI(TAG, "File sending complete");
|
||||
ESP_LOGI(TAG_FILESERVER, "File sending complete");
|
||||
|
||||
/* Respond with an empty chunk to signal HTTP response completion */
|
||||
httpd_resp_send_chunk(req, NULL, 0);
|
||||
@@ -385,13 +388,13 @@ static esp_err_t upload_post_handler(httpd_req_t *req)
|
||||
|
||||
/* Filename cannot have a trailing '/' */
|
||||
if (filename[strlen(filename) - 1] == '/') {
|
||||
ESP_LOGE(TAG, "Invalid filename : %s", filename);
|
||||
ESP_LOGE(TAG_FILESERVER, "Invalid filename : %s", filename);
|
||||
httpd_resp_send_err(req, HTTPD_500_INTERNAL_SERVER_ERROR, "Invalid filename");
|
||||
return ESP_FAIL;
|
||||
}
|
||||
|
||||
if (stat(filepath, &file_stat) == 0) {
|
||||
ESP_LOGE(TAG, "File already exists : %s", filepath);
|
||||
ESP_LOGE(TAG_FILESERVER, "File already exists : %s", filepath);
|
||||
/* Respond with 400 Bad Request */
|
||||
httpd_resp_send_err(req, HTTPD_400_BAD_REQUEST, "File already exists");
|
||||
return ESP_FAIL;
|
||||
@@ -399,7 +402,7 @@ static esp_err_t upload_post_handler(httpd_req_t *req)
|
||||
|
||||
/* File cannot be larger than a limit */
|
||||
if (req->content_len > MAX_FILE_SIZE) {
|
||||
ESP_LOGE(TAG, "File too large : %d bytes", req->content_len);
|
||||
ESP_LOGE(TAG_FILESERVER, "File too large : %d bytes", req->content_len);
|
||||
/* Respond with 400 Bad Request */
|
||||
httpd_resp_send_err(req, HTTPD_400_BAD_REQUEST,
|
||||
"File size must be less than "
|
||||
@@ -411,13 +414,13 @@ static esp_err_t upload_post_handler(httpd_req_t *req)
|
||||
|
||||
fd = OpenFileAndWait(filepath, "w");
|
||||
if (!fd) {
|
||||
ESP_LOGE(TAG, "Failed to create file : %s", filepath);
|
||||
ESP_LOGE(TAG_FILESERVER, "Failed to create file : %s", filepath);
|
||||
/* Respond with 500 Internal Server Error */
|
||||
httpd_resp_send_err(req, HTTPD_500_INTERNAL_SERVER_ERROR, "Failed to create file");
|
||||
return ESP_FAIL;
|
||||
}
|
||||
|
||||
ESP_LOGI(TAG, "Receiving file : %s...", filename);
|
||||
ESP_LOGI(TAG_FILESERVER, "Receiving file : %s...", filename);
|
||||
|
||||
/* Retrieve the pointer to scratch buffer for temporary storage */
|
||||
char *buf = ((struct file_server_data *)req->user_ctx)->scratch;
|
||||
@@ -429,7 +432,7 @@ static esp_err_t upload_post_handler(httpd_req_t *req)
|
||||
|
||||
while (remaining > 0) {
|
||||
|
||||
ESP_LOGI(TAG, "Remaining size : %d", remaining);
|
||||
ESP_LOGI(TAG_FILESERVER, "Remaining size : %d", remaining);
|
||||
/* Receive the file part by part into a buffer */
|
||||
if ((received = httpd_req_recv(req, buf, MIN(remaining, SCRATCH_BUFSIZE))) <= 0) {
|
||||
if (received == HTTPD_SOCK_ERR_TIMEOUT) {
|
||||
@@ -442,7 +445,7 @@ static esp_err_t upload_post_handler(httpd_req_t *req)
|
||||
fclose(fd);
|
||||
unlink(filepath);
|
||||
|
||||
ESP_LOGE(TAG, "File reception failed!");
|
||||
ESP_LOGE(TAG_FILESERVER, "File reception failed!");
|
||||
/* Respond with 500 Internal Server Error */
|
||||
httpd_resp_send_err(req, HTTPD_500_INTERNAL_SERVER_ERROR, "Failed to receive file");
|
||||
return ESP_FAIL;
|
||||
@@ -455,7 +458,7 @@ static esp_err_t upload_post_handler(httpd_req_t *req)
|
||||
fclose(fd);
|
||||
unlink(filepath);
|
||||
|
||||
ESP_LOGE(TAG, "File write failed!");
|
||||
ESP_LOGE(TAG_FILESERVER, "File write failed!");
|
||||
/* Respond with 500 Internal Server Error */
|
||||
httpd_resp_send_err(req, HTTPD_500_INTERNAL_SERVER_ERROR, "Failed to write file to storage");
|
||||
return ESP_FAIL;
|
||||
@@ -468,7 +471,7 @@ static esp_err_t upload_post_handler(httpd_req_t *req)
|
||||
|
||||
/* Close file upon upload completion */
|
||||
fclose(fd);
|
||||
ESP_LOGI(TAG, "File reception complete");
|
||||
ESP_LOGI(TAG_FILESERVER, "File reception complete");
|
||||
|
||||
std::string directory = std::string(filepath);
|
||||
size_t zw = directory.find("/");
|
||||
@@ -483,10 +486,10 @@ static esp_err_t upload_post_handler(httpd_req_t *req)
|
||||
int start_fn = strlen(((struct file_server_data *)req->user_ctx)->base_path);
|
||||
printf("Directory: %s, start_fn: %d, found: %d\n", directory.c_str(), start_fn, found);
|
||||
directory = directory.substr(start_fn, found - start_fn + 1);
|
||||
printf("Directory danach: %s\n", directory.c_str());
|
||||
printf("Directory danach 1: %s\n", directory.c_str());
|
||||
|
||||
directory = "/fileserver" + directory;
|
||||
printf("Directory danach: %s\n", directory.c_str());
|
||||
printf("Directory danach 2: %s\n", directory.c_str());
|
||||
|
||||
/* Redirect onto root to see the updated file list */
|
||||
httpd_resp_set_status(req, "303 See Other");
|
||||
@@ -496,6 +499,15 @@ static esp_err_t upload_post_handler(httpd_req_t *req)
|
||||
httpd_resp_set_status(req, "303 See Other");
|
||||
httpd_resp_set_hdr(req, "Location", directory.c_str());
|
||||
httpd_resp_sendstr(req, "File uploaded successfully");
|
||||
|
||||
/*
|
||||
if (strcmp(filepath, CONFIG_FILE) == 0) {
|
||||
printf("New config found. Reload handler.");
|
||||
gpio_handler_deinit();
|
||||
MQTTdestroy();
|
||||
}
|
||||
*/
|
||||
|
||||
return ESP_OK;
|
||||
}
|
||||
|
||||
@@ -567,19 +579,19 @@ static esp_err_t delete_post_handler(httpd_req_t *req)
|
||||
|
||||
/* Filename cannot have a trailing '/' */
|
||||
if (filename[strlen(filename) - 1] == '/') {
|
||||
ESP_LOGE(TAG, "Invalid filename : %s", filename);
|
||||
ESP_LOGE(TAG_FILESERVER, "Invalid filename : %s", filename);
|
||||
httpd_resp_send_err(req, HTTPD_500_INTERNAL_SERVER_ERROR, "Invalid filename");
|
||||
return ESP_FAIL;
|
||||
}
|
||||
|
||||
if (stat(filepath, &file_stat) == -1) {
|
||||
ESP_LOGE(TAG, "File does not exist : %s", filename);
|
||||
ESP_LOGE(TAG_FILESERVER, "File does not exist : %s", filename);
|
||||
/* Respond with 400 Bad Request */
|
||||
httpd_resp_send_err(req, HTTPD_400_BAD_REQUEST, "File does not exist");
|
||||
return ESP_FAIL;
|
||||
}
|
||||
|
||||
ESP_LOGI(TAG, "Deleting file : %s", filename);
|
||||
ESP_LOGI(TAG_FILESERVER, "Deleting file : %s", filename);
|
||||
/* Delete file */
|
||||
unlink(filepath);
|
||||
|
||||
@@ -596,10 +608,10 @@ static esp_err_t delete_post_handler(httpd_req_t *req)
|
||||
int start_fn = strlen(((struct file_server_data *)req->user_ctx)->base_path);
|
||||
printf("Directory: %s, start_fn: %d, found: %d\n", directory.c_str(), start_fn, found);
|
||||
directory = directory.substr(start_fn, found - start_fn + 1);
|
||||
printf("Directory danach: %s\n", directory.c_str());
|
||||
printf("Directory danach 3: %s\n", directory.c_str());
|
||||
|
||||
directory = "/fileserver" + directory;
|
||||
printf("Directory danach: %s\n", directory.c_str());
|
||||
printf("Directory danach 4: %s\n", directory.c_str());
|
||||
}
|
||||
|
||||
|
||||
@@ -623,7 +635,7 @@ void delete_all_in_directory(std::string _directory)
|
||||
std::string filename;
|
||||
|
||||
if (!dir) {
|
||||
ESP_LOGE(TAG, "Failed to stat dir : %s", _directory.c_str());
|
||||
ESP_LOGE(TAG_FILESERVER, "Failed to stat dir : %s", _directory.c_str());
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -632,7 +644,7 @@ void delete_all_in_directory(std::string _directory)
|
||||
if (!(entry->d_type == DT_DIR)){
|
||||
if (strcmp("wlan.ini", entry->d_name) != 0){ // auf wlan.ini soll nicht zugegriffen werden !!!
|
||||
filename = _directory + "/" + std::string(entry->d_name);
|
||||
ESP_LOGI(TAG, "Deleting file : %s", filename.c_str());
|
||||
ESP_LOGI(TAG_FILESERVER, "Deleting file : %s", filename.c_str());
|
||||
/* Delete file */
|
||||
unlink(filename.c_str());
|
||||
}
|
||||
@@ -722,19 +734,19 @@ void register_server_file_uri(httpd_handle_t server, const char *base_path)
|
||||
/* Validate file storage base path */
|
||||
if (!base_path) {
|
||||
// if (!base_path || strcmp(base_path, "/spiffs") != 0) {
|
||||
ESP_LOGE(TAG, "File server base_path not set");
|
||||
ESP_LOGE(TAG_FILESERVER, "File server base_path not set");
|
||||
// return ESP_ERR_INVALID_ARG;
|
||||
}
|
||||
|
||||
if (server_data) {
|
||||
ESP_LOGE(TAG, "File server already started");
|
||||
ESP_LOGE(TAG_FILESERVER, "File server already started");
|
||||
// return ESP_ERR_INVALID_STATE;
|
||||
}
|
||||
|
||||
/* Allocate memory for server data */
|
||||
server_data = (file_server_data *) calloc(1, sizeof(struct file_server_data));
|
||||
if (!server_data) {
|
||||
ESP_LOGE(TAG, "Failed to allocate memory for server data");
|
||||
ESP_LOGE(TAG_FILESERVER, "Failed to allocate memory for server data");
|
||||
// return ESP_ERR_NO_MEM;
|
||||
}
|
||||
strlcpy(server_data->base_path, base_path,
|
||||
|
||||
@@ -107,6 +107,8 @@ esp_err_t set_content_type_from_file(httpd_req_t *req, const char *filename)
|
||||
return httpd_resp_set_type(req, "text/html");
|
||||
} else if (IS_FILE_EXT(filename, ".jpeg")) {
|
||||
return httpd_resp_set_type(req, "image/jpeg");
|
||||
} else if (IS_FILE_EXT(filename, ".jpg")) {
|
||||
return httpd_resp_set_type(req, "image/jpeg");
|
||||
} else if (IS_FILE_EXT(filename, ".ico")) {
|
||||
return httpd_resp_set_type(req, "image/x-icon");
|
||||
}
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
#include "freertos/task.h"
|
||||
#include "esp_system.h"
|
||||
#include "esp_event.h"
|
||||
#include "esp_event_loop.h"
|
||||
#include "esp_event.h"
|
||||
#include "esp_log.h"
|
||||
#include <esp_ota_ops.h>
|
||||
#include "esp_http_client.h"
|
||||
@@ -28,6 +28,7 @@
|
||||
|
||||
#include "server_tflite.h"
|
||||
#include "server_file.h"
|
||||
#include "server_GPIO.h"
|
||||
|
||||
#include "ClassLogFile.h"
|
||||
|
||||
@@ -46,6 +47,7 @@ static char ota_write_data[BUFFSIZE + 1] = { 0 };
|
||||
|
||||
|
||||
#define OTA_URL_SIZE 256
|
||||
static const char *TAGPARTOTA = "server_ota";
|
||||
|
||||
|
||||
static void infinite_loop(void)
|
||||
@@ -60,14 +62,14 @@ static void infinite_loop(void)
|
||||
|
||||
|
||||
|
||||
static bool ota_example_task(std::string fn)
|
||||
static bool ota_update_task(std::string fn)
|
||||
{
|
||||
esp_err_t err;
|
||||
/* update handle : set by esp_ota_begin(), must be freed via esp_ota_end() */
|
||||
esp_ota_handle_t update_handle = 0 ;
|
||||
const esp_partition_t *update_partition = NULL;
|
||||
|
||||
ESP_LOGI(TAGPARTOTA, "Starting OTA example");
|
||||
ESP_LOGI(TAGPARTOTA, "Starting OTA update");
|
||||
|
||||
const esp_partition_t *configured = esp_ota_get_boot_partition();
|
||||
const esp_partition_t *running = esp_ota_get_running_partition();
|
||||
@@ -374,7 +376,9 @@ esp_err_t handler_ota_update(httpd_req_t *req)
|
||||
|
||||
const char* resp_str;
|
||||
|
||||
if (ota_example_task(fn))
|
||||
KillTFliteTasks();
|
||||
gpio_handler_deinit();
|
||||
if (ota_update_task(fn))
|
||||
{
|
||||
resp_str = "Firmware Update Successfull!<br><br>You can restart now.";
|
||||
}
|
||||
@@ -400,8 +404,6 @@ void hard_restart() {
|
||||
|
||||
void task_reboot(void *pvParameter)
|
||||
{
|
||||
|
||||
|
||||
while(1)
|
||||
{
|
||||
vTaskDelay(5000 / portTICK_PERIOD_MS);
|
||||
@@ -413,9 +415,11 @@ void task_reboot(void *pvParameter)
|
||||
}
|
||||
|
||||
void doReboot(){
|
||||
LogFile.WriteToFile("Reboot - now");
|
||||
KillTFliteTasks();
|
||||
ESP_LOGI(TAGPARTOTA, "Reboot in 5sec");
|
||||
LogFile.WriteToFile("Reboot in 5sec");
|
||||
xTaskCreate(&task_reboot, "reboot", configMINIMAL_STACK_SIZE * 64, NULL, 10, NULL);
|
||||
// KillTFliteTasks(); // kills itself
|
||||
gpio_handler_destroy();
|
||||
vTaskDelay(5000 / portTICK_PERIOD_MS);
|
||||
esp_restart();
|
||||
hard_restart();
|
||||
|
||||
@@ -4,8 +4,6 @@
|
||||
|
||||
//#include "ClassControllCamera.h"
|
||||
|
||||
static const char *TAGPARTOTA = "server_ota";
|
||||
|
||||
void register_server_ota_sdcard_uri(httpd_handle_t server);
|
||||
void CheckOTAUpdate();
|
||||
void doReboot();
|
||||
|
||||
@@ -2,6 +2,6 @@ FILE(GLOB_RECURSE app_sources ${CMAKE_CURRENT_SOURCE_DIR}/*.*)
|
||||
|
||||
idf_component_register(SRCS ${app_sources}
|
||||
INCLUDE_DIRS "."
|
||||
REQUIRES jomjol_tfliteclass jomjol_helper jomjol_controlcamera jomjol_mqtt jomjol_fileserver_ota jomjol_image_proc connect_wlan)
|
||||
REQUIRES jomjol_tfliteclass jomjol_helper jomjol_controlcamera jomjol_mqtt jomjol_fileserver_ota jomjol_image_proc jomjol_wlan)
|
||||
|
||||
|
||||
|
||||
@@ -94,6 +94,23 @@ string ClassFlow::getReadout()
|
||||
return string();
|
||||
}
|
||||
|
||||
std::string ClassFlow::GetParameterName(std::string _input)
|
||||
{
|
||||
string _param;
|
||||
int _pospunkt = _input.find_first_of(".");
|
||||
if (_pospunkt > -1)
|
||||
{
|
||||
_param = _input.substr(_pospunkt+1, _input.length() - _pospunkt - 1);
|
||||
}
|
||||
else
|
||||
{
|
||||
_param = _input;
|
||||
}
|
||||
// printf("Parameter: %s, Pospunkt: %d\n", _param.c_str(), _pospunkt);
|
||||
return _param;
|
||||
}
|
||||
|
||||
|
||||
bool ClassFlow::getNextLine(FILE* pfile, string *rt)
|
||||
{
|
||||
char zw[1024];
|
||||
@@ -102,24 +119,21 @@ bool ClassFlow::getNextLine(FILE* pfile, string *rt)
|
||||
*rt = "";
|
||||
return false;
|
||||
}
|
||||
fgets(zw, 1024, pfile);
|
||||
printf("%s", zw);
|
||||
if ((strlen(zw) == 0) && feof(pfile))
|
||||
if (!fgets(zw, 1024, pfile))
|
||||
{
|
||||
*rt = "";
|
||||
printf("END OF FILE\n");
|
||||
return false;
|
||||
}
|
||||
printf("%s", zw);
|
||||
*rt = zw;
|
||||
*rt = trim(*rt);
|
||||
while ((zw[0] == ';' || zw[0] == '#' || (rt->size() == 0)) && !(zw[1] == '[')) // Kommentarzeilen (; oder #) und Leerzeilen überspringen, es sei denn es ist ein neuer auskommentierter Paragraph
|
||||
{
|
||||
fgets(zw, 1024, pfile);
|
||||
printf("%s", zw);
|
||||
if (feof(pfile))
|
||||
{
|
||||
*rt = "";
|
||||
*rt = "";
|
||||
if (!fgets(zw, 1024, pfile))
|
||||
return false;
|
||||
}
|
||||
printf("%s", zw);
|
||||
*rt = zw;
|
||||
*rt = trim(*rt);
|
||||
}
|
||||
|
||||
@@ -37,6 +37,8 @@ protected:
|
||||
|
||||
virtual void SetInitialParameter(void);
|
||||
|
||||
std::string GetParameterName(std::string _input);
|
||||
|
||||
bool disabled;
|
||||
|
||||
public:
|
||||
|
||||
@@ -46,9 +46,9 @@ ClassFlowAnalog::ClassFlowAnalog(std::vector<ClassFlow*>* lfc) : ClassFlowImage(
|
||||
}
|
||||
|
||||
|
||||
int ClassFlowAnalog::AnzahlROIs()
|
||||
int ClassFlowAnalog::AnzahlROIs(int _analog = 0)
|
||||
{
|
||||
int zw = ROI.size();
|
||||
int zw = ANALOG[_analog]->ROI.size();
|
||||
if (extendedResolution)
|
||||
zw++;
|
||||
|
||||
@@ -56,27 +56,27 @@ int ClassFlowAnalog::AnzahlROIs()
|
||||
}
|
||||
|
||||
|
||||
string ClassFlowAnalog::getReadout()
|
||||
string ClassFlowAnalog::getReadout(int _analog = 0)
|
||||
{
|
||||
string result = "";
|
||||
if (ROI.size() == 0)
|
||||
if (ANALOG[_analog]->ROI.size() == 0)
|
||||
return result;
|
||||
|
||||
|
||||
float zahl = ROI[ROI.size() - 1]->result;
|
||||
float zahl = ANALOG[_analog]->ROI[ANALOG[_analog]->ROI.size() - 1]->result;
|
||||
int ergebnis_nachkomma = ((int) floor(zahl * 10)) % 10;
|
||||
|
||||
int prev = -1;
|
||||
|
||||
prev = ZeigerEval(ROI[ROI.size() - 1]->result, prev);
|
||||
prev = ZeigerEval(ANALOG[_analog]->ROI[ANALOG[_analog]->ROI.size() - 1]->result, prev);
|
||||
result = std::to_string(prev);
|
||||
|
||||
if (extendedResolution)
|
||||
result = result + std::to_string(ergebnis_nachkomma);
|
||||
|
||||
for (int i = ROI.size() - 2; i >= 0; --i)
|
||||
for (int i = ANALOG[_analog]->ROI.size() - 2; i >= 0; --i)
|
||||
{
|
||||
prev = ZeigerEval(ROI[i]->result, prev);
|
||||
prev = ZeigerEval(ANALOG[_analog]->ROI[i]->result, prev);
|
||||
result = std::to_string(prev) + result;
|
||||
}
|
||||
|
||||
@@ -153,8 +153,8 @@ bool ClassFlowAnalog::ReadParameter(FILE* pfile, string& aktparamgraph)
|
||||
}
|
||||
if (zerlegt.size() >= 5)
|
||||
{
|
||||
roianalog* neuroi = new roianalog;
|
||||
neuroi->name = zerlegt[0];
|
||||
analog* _analog = GetANALOG(zerlegt[0], true);
|
||||
roianalog* neuroi = _analog->ROI[_analog->ROI.size()-1];
|
||||
neuroi->posx = std::stoi(zerlegt[1]);
|
||||
neuroi->posy = std::stoi(zerlegt[2]);
|
||||
neuroi->deltax = std::stoi(zerlegt[3]);
|
||||
@@ -162,7 +162,7 @@ bool ClassFlowAnalog::ReadParameter(FILE* pfile, string& aktparamgraph)
|
||||
neuroi->result = -1;
|
||||
neuroi->image = NULL;
|
||||
neuroi->image_org = NULL;
|
||||
ROI.push_back(neuroi);
|
||||
// ROI.push_back(neuroi);
|
||||
}
|
||||
|
||||
if ((toUpper(zerlegt[0]) == "SAVEALLFILES") && (zerlegt.size() > 1))
|
||||
@@ -178,15 +178,75 @@ bool ClassFlowAnalog::ReadParameter(FILE* pfile, string& aktparamgraph)
|
||||
}
|
||||
}
|
||||
|
||||
for (int i = 0; i < ROI.size(); ++i)
|
||||
{
|
||||
ROI[i]->image = new CImageBasis(modelxsize, modelysize, 3);
|
||||
ROI[i]->image_org = new CImageBasis(ROI[i]->deltax, ROI[i]->deltay, 3);
|
||||
}
|
||||
for (int _ana = 0; _ana < ANALOG.size(); ++_ana)
|
||||
for (int i = 0; i < ANALOG[_ana]->ROI.size(); ++i)
|
||||
{
|
||||
ANALOG[_ana]->ROI[i]->image = new CImageBasis(modelxsize, modelysize, 3);
|
||||
ANALOG[_ana]->ROI[i]->image_org = new CImageBasis(ANALOG[_ana]->ROI[i]->deltax, ANALOG[_ana]->ROI[i]->deltay, 3);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
analog* ClassFlowAnalog::FindANALOG(string _name_number)
|
||||
{
|
||||
|
||||
for (int i = 0; i < ANALOG.size(); ++i)
|
||||
{
|
||||
if (ANALOG[i]->name == _name_number)
|
||||
return ANALOG[i];
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
|
||||
analog* ClassFlowAnalog::GetANALOG(string _name, bool _create = true)
|
||||
{
|
||||
string _analog, _roi;
|
||||
int _pospunkt = _name.find_first_of(".");
|
||||
// printf("Name: %s, Pospunkt: %d\n", _name.c_str(), _pospunkt);
|
||||
if (_pospunkt > -1)
|
||||
{
|
||||
_analog = _name.substr(0, _pospunkt);
|
||||
_roi = _name.substr(_pospunkt+1, _name.length() - _pospunkt - 1);
|
||||
}
|
||||
else
|
||||
{
|
||||
_analog = "default";
|
||||
_roi = _name;
|
||||
}
|
||||
|
||||
analog *_ret = NULL;
|
||||
|
||||
for (int i = 0; i < ANALOG.size(); ++i)
|
||||
{
|
||||
if (ANALOG[i]->name == _analog)
|
||||
_ret = ANALOG[i];
|
||||
}
|
||||
|
||||
if (!_create) // nicht gefunden und soll auch nicht erzeugt werden
|
||||
return _ret;
|
||||
|
||||
|
||||
if (_ret == NULL)
|
||||
{
|
||||
_ret = new analog;
|
||||
_ret->name = _analog;
|
||||
ANALOG.push_back(_ret);
|
||||
}
|
||||
|
||||
roianalog* neuroi = new roianalog;
|
||||
neuroi->name = _roi;
|
||||
_ret->ROI.push_back(neuroi);
|
||||
|
||||
printf("GetANALOG - ANALOG %s - roi %s\n", _analog.c_str(), _roi.c_str());
|
||||
|
||||
return _ret;
|
||||
}
|
||||
|
||||
|
||||
|
||||
string ClassFlowAnalog::getHTMLSingleStep(string host)
|
||||
{
|
||||
@@ -238,16 +298,29 @@ bool ClassFlowAnalog::doAlignAndCut(string time)
|
||||
|
||||
CAlignAndCutImage *caic = flowpostalignment->GetAlignAndCutImage();
|
||||
|
||||
for (int i = 0; i < ROI.size(); ++i)
|
||||
{
|
||||
printf("Analog %d - Align&Cut\n", i);
|
||||
for (int _ana = 0; _ana < ANALOG.size(); ++_ana)
|
||||
for (int i = 0; i < ANALOG[_ana]->ROI.size(); ++i)
|
||||
{
|
||||
printf("Analog %d - Align&Cut\n", i);
|
||||
|
||||
caic->CutAndSave(ROI[i]->posx, ROI[i]->posy, ROI[i]->deltax, ROI[i]->deltay, ROI[i]->image_org);
|
||||
if (SaveAllFiles) ROI[i]->image_org->SaveToFile(FormatFileName("/sdcard/img_tmp/" + ROI[i]->name + ".jpg"));
|
||||
caic->CutAndSave(ANALOG[_ana]->ROI[i]->posx, ANALOG[_ana]->ROI[i]->posy, ANALOG[_ana]->ROI[i]->deltax, ANALOG[_ana]->ROI[i]->deltay, ANALOG[_ana]->ROI[i]->image_org);
|
||||
if (SaveAllFiles)
|
||||
{
|
||||
if (ANALOG[_ana]->name == "default")
|
||||
ANALOG[_ana]->ROI[i]->image_org->SaveToFile(FormatFileName("/sdcard/img_tmp/" + ANALOG[_ana]->ROI[i]->name + ".jpg"));
|
||||
else
|
||||
ANALOG[_ana]->ROI[i]->image_org->SaveToFile(FormatFileName("/sdcard/img_tmp/" + ANALOG[_ana]->name + "_" + ANALOG[_ana]->ROI[i]->name + ".jpg"));
|
||||
}
|
||||
|
||||
ROI[i]->image_org->Resize(modelxsize, modelysize, ROI[i]->image);
|
||||
if (SaveAllFiles) ROI[i]->image->SaveToFile(FormatFileName("/sdcard/img_tmp/" + ROI[i]->name + ".bmp"));
|
||||
}
|
||||
ANALOG[_ana]->ROI[i]->image_org->Resize(modelxsize, modelysize, ANALOG[_ana]->ROI[i]->image);
|
||||
if (SaveAllFiles)
|
||||
{
|
||||
if (ANALOG[_ana]->name == "default")
|
||||
ANALOG[_ana]->ROI[i]->image->SaveToFile(FormatFileName("/sdcard/img_tmp/" + ANALOG[_ana]->ROI[i]->name + ".bmp"));
|
||||
else
|
||||
ANALOG[_ana]->ROI[i]->image->SaveToFile(FormatFileName("/sdcard/img_tmp/" + ANALOG[_ana]->name + "_" + ANALOG[_ana]->ROI[i]->name + ".bmp"));
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
@@ -258,13 +331,14 @@ void ClassFlowAnalog::DrawROI(CImageBasis *_zw)
|
||||
int g = 255;
|
||||
int b = 0;
|
||||
|
||||
for (int i = 0; i < ROI.size(); ++i)
|
||||
{
|
||||
_zw->drawRect(ROI[i]->posx, ROI[i]->posy, ROI[i]->deltax, ROI[i]->deltay, r, g, b, 1);
|
||||
_zw->drawCircle((int) (ROI[i]->posx + ROI[i]->deltax/2), (int) (ROI[i]->posy + ROI[i]->deltay/2), (int) (ROI[i]->deltax/2), r, g, b, 2);
|
||||
_zw->drawLine((int) (ROI[i]->posx + ROI[i]->deltax/2), (int) ROI[i]->posy, (int) (ROI[i]->posx + ROI[i]->deltax/2), (int) (ROI[i]->posy + ROI[i]->deltay), r, g, b, 2);
|
||||
_zw->drawLine((int) ROI[i]->posx, (int) (ROI[i]->posy + ROI[i]->deltay/2), (int) ROI[i]->posx + ROI[i]->deltax, (int) (ROI[i]->posy + ROI[i]->deltay/2), r, g, b, 2);
|
||||
}
|
||||
for (int _ana = 0; _ana < ANALOG.size(); ++_ana)
|
||||
for (int i = 0; i < ANALOG[_ana]->ROI.size(); ++i)
|
||||
{
|
||||
_zw->drawRect(ANALOG[_ana]->ROI[i]->posx, ANALOG[_ana]->ROI[i]->posy, ANALOG[_ana]->ROI[i]->deltax, ANALOG[_ana]->ROI[i]->deltay, r, g, b, 1);
|
||||
_zw->drawCircle((int) (ANALOG[_ana]->ROI[i]->posx + ANALOG[_ana]->ROI[i]->deltax/2), (int) (ANALOG[_ana]->ROI[i]->posy + ANALOG[_ana]->ROI[i]->deltay/2), (int) (ANALOG[_ana]->ROI[i]->deltax/2), r, g, b, 2);
|
||||
_zw->drawLine((int) (ANALOG[_ana]->ROI[i]->posx + ANALOG[_ana]->ROI[i]->deltax/2), (int) ANALOG[_ana]->ROI[i]->posy, (int) (ANALOG[_ana]->ROI[i]->posx + ANALOG[_ana]->ROI[i]->deltax/2), (int) (ANALOG[_ana]->ROI[i]->posy + ANALOG[_ana]->ROI[i]->deltay), r, g, b, 2);
|
||||
_zw->drawLine((int) ANALOG[_ana]->ROI[i]->posx, (int) (ANALOG[_ana]->ROI[i]->posy + ANALOG[_ana]->ROI[i]->deltay/2), (int) ANALOG[_ana]->ROI[i]->posx + ANALOG[_ana]->ROI[i]->deltax, (int) (ANALOG[_ana]->ROI[i]->posy + ANALOG[_ana]->ROI[i]->deltay/2), r, g, b, 2);
|
||||
}
|
||||
}
|
||||
|
||||
bool ClassFlowAnalog::doNeuralNetwork(string time)
|
||||
@@ -284,43 +358,46 @@ bool ClassFlowAnalog::doNeuralNetwork(string time)
|
||||
string zwcnn = "/sdcard" + cnnmodelfile;
|
||||
zwcnn = FormatFileName(zwcnn);
|
||||
printf(zwcnn.c_str());printf("\n");
|
||||
tflite->LoadModel(zwcnn);
|
||||
if (!tflite->LoadModel(zwcnn)) {
|
||||
printf("Can't read model file /sdcard%s\n", cnnmodelfile.c_str());
|
||||
delete tflite;
|
||||
return false;
|
||||
}
|
||||
tflite->MakeAllocate();
|
||||
#endif
|
||||
|
||||
for (int i = 0; i < ROI.size(); ++i)
|
||||
for (int _ana = 0; _ana < ANALOG.size(); ++_ana)
|
||||
{
|
||||
printf("Analog %d - TfLite\n", i);
|
||||
ioresize = "/sdcard/img_tmp/ra" + std::to_string(i) + ".bmp";
|
||||
ioresize = FormatFileName(ioresize);
|
||||
|
||||
|
||||
float f1, f2;
|
||||
f1 = 0; f2 = 0;
|
||||
|
||||
#ifndef OHNETFLITE
|
||||
// LogFile.WriteToFile("ClassFlowAnalog::doNeuralNetwork vor CNN tflite->LoadInputImage(ioresize)");
|
||||
// tflite->LoadInputImage(ioresize);
|
||||
tflite->LoadInputImageBasis(ROI[i]->image);
|
||||
tflite->Invoke();
|
||||
if (debugdetailanalog) LogFile.WriteToFile("Nach Invoke");
|
||||
|
||||
|
||||
f1 = tflite->GetOutputValue(0);
|
||||
f2 = tflite->GetOutputValue(1);
|
||||
#endif
|
||||
|
||||
float result = fmod(atan2(f1, f2) / (M_PI * 2) + 2, 1);
|
||||
// printf("Result sin, cos, ziffer: %f, %f, %f\n", f1, f2, result);
|
||||
ROI[i]->result = result * 10;
|
||||
|
||||
printf("Result Analog%i: %f\n", i, ROI[i]->result);
|
||||
|
||||
if (isLogImage)
|
||||
for (int i = 0; i < ANALOG[_ana]->ROI.size(); ++i)
|
||||
{
|
||||
LogImage(logPath, ROI[i]->name, &ROI[i]->result, NULL, time, ROI[i]->image_org);
|
||||
printf("Analog %d - TfLite\n", i);
|
||||
|
||||
float f1, f2;
|
||||
f1 = 0; f2 = 0;
|
||||
|
||||
#ifndef OHNETFLITE
|
||||
tflite->LoadInputImageBasis(ANALOG[_ana]->ROI[i]->image);
|
||||
tflite->Invoke();
|
||||
if (debugdetailanalog) LogFile.WriteToFile("Nach Invoke");
|
||||
|
||||
|
||||
f1 = tflite->GetOutputValue(0);
|
||||
f2 = tflite->GetOutputValue(1);
|
||||
#endif
|
||||
|
||||
float result = fmod(atan2(f1, f2) / (M_PI * 2) + 2, 1);
|
||||
// printf("Result sin, cos, ziffer: %f, %f, %f\n", f1, f2, result);
|
||||
ANALOG[_ana]->ROI[i]->result = result * 10;
|
||||
|
||||
printf("Result Analog%i: %f\n", i, ANALOG[_ana]->ROI[i]->result);
|
||||
|
||||
if (isLogImage)
|
||||
{
|
||||
LogImage(logPath, ANALOG[_ana]->ROI[i]->name, &ANALOG[_ana]->ROI[i]->result, NULL, time, ANALOG[_ana]->ROI[i]->image_org);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#ifndef OHNETFLITE
|
||||
delete tflite;
|
||||
#endif
|
||||
@@ -333,18 +410,78 @@ std::vector<HTMLInfo*> ClassFlowAnalog::GetHTMLInfo()
|
||||
{
|
||||
std::vector<HTMLInfo*> result;
|
||||
|
||||
for (int i = 0; i < ROI.size(); ++i)
|
||||
{
|
||||
HTMLInfo *zw = new HTMLInfo;
|
||||
zw->filename = ROI[i]->name + ".bmp";
|
||||
zw->filename_org = ROI[i]->name + ".jpg";
|
||||
zw->val = ROI[i]->result;
|
||||
zw->image = ROI[i]->image;
|
||||
zw->image_org = ROI[i]->image_org;
|
||||
result.push_back(zw);
|
||||
}
|
||||
for (int _ana = 0; _ana < ANALOG.size(); ++_ana)
|
||||
for (int i = 0; i < ANALOG[_ana]->ROI.size(); ++i)
|
||||
{
|
||||
if (ANALOG[_ana]->name == "default")
|
||||
ANALOG[_ana]->ROI[i]->image->SaveToFile(FormatFileName("/sdcard/img_tmp/" + ANALOG[_ana]->ROI[i]->name + ".bmp"));
|
||||
else
|
||||
ANALOG[_ana]->ROI[i]->image->SaveToFile(FormatFileName("/sdcard/img_tmp/" + ANALOG[_ana]->name + "_" + ANALOG[_ana]->ROI[i]->name + ".bmp"));
|
||||
|
||||
|
||||
HTMLInfo *zw = new HTMLInfo;
|
||||
if (ANALOG[_ana]->name == "default")
|
||||
{
|
||||
zw->filename = ANALOG[_ana]->ROI[i]->name + ".bmp";
|
||||
zw->filename_org = ANALOG[_ana]->ROI[i]->name + ".jpg";
|
||||
}
|
||||
else
|
||||
{
|
||||
zw->filename = ANALOG[_ana]->name + "_" + ANALOG[_ana]->ROI[i]->name + ".bmp";
|
||||
zw->filename_org = ANALOG[_ana]->name + "_" + ANALOG[_ana]->ROI[i]->name + ".jpg";
|
||||
}
|
||||
|
||||
zw->val = ANALOG[_ana]->ROI[i]->result;
|
||||
zw->image = ANALOG[_ana]->ROI[i]->image;
|
||||
zw->image_org = ANALOG[_ana]->ROI[i]->image_org;
|
||||
|
||||
result.push_back(zw);
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
|
||||
int ClassFlowAnalog::getAnzahlANALOG()
|
||||
{
|
||||
return ANALOG.size();
|
||||
}
|
||||
|
||||
string ClassFlowAnalog::getNameANALOG(int _analog)
|
||||
{
|
||||
if (_analog < ANALOG.size())
|
||||
return ANALOG[_analog]->name;
|
||||
|
||||
return "ANALOG DOES NOT EXIST";
|
||||
}
|
||||
|
||||
analog* ClassFlowAnalog::GetANALOG(int _analog)
|
||||
{
|
||||
if (_analog < ANALOG.size())
|
||||
return ANALOG[_analog];
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
|
||||
void ClassFlowAnalog::UpdateNameNumbers(std::vector<std::string> *_name_numbers)
|
||||
{
|
||||
for (int _dig = 0; _dig < ANALOG.size(); _dig++)
|
||||
{
|
||||
std::string _name = ANALOG[_dig]->name;
|
||||
bool found = false;
|
||||
for (int i = 0; i < (*_name_numbers).size(); ++i)
|
||||
{
|
||||
if ((*_name_numbers)[i] == _name)
|
||||
found = true;
|
||||
}
|
||||
if (!found)
|
||||
(*_name_numbers).push_back(_name);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -10,12 +10,19 @@ struct roianalog {
|
||||
string name;
|
||||
};
|
||||
|
||||
struct analog {
|
||||
string name;
|
||||
std::vector<roianalog*> ROI;
|
||||
};
|
||||
|
||||
|
||||
class ClassFlowAnalog :
|
||||
public ClassFlowImage
|
||||
{
|
||||
protected:
|
||||
std::vector<roianalog*> ROI;
|
||||
// std::vector<roianalog*> ROI;
|
||||
std::vector<analog*> ANALOG;
|
||||
|
||||
string cnnmodelfile;
|
||||
int modelxsize, modelysize;
|
||||
int ZeigerEval(float zahl, int ziffer_vorgaenger);
|
||||
@@ -26,6 +33,7 @@ protected:
|
||||
|
||||
void SetInitialParameter(void);
|
||||
|
||||
|
||||
public:
|
||||
bool extendedResolution;
|
||||
|
||||
@@ -34,14 +42,23 @@ public:
|
||||
bool ReadParameter(FILE* pfile, string& aktparamgraph);
|
||||
bool doFlow(string time);
|
||||
string getHTMLSingleStep(string host);
|
||||
string getReadout();
|
||||
string getReadout(int _analog);
|
||||
|
||||
void DrawROI(CImageBasis *_zw);
|
||||
|
||||
bool doNeuralNetwork(string time);
|
||||
bool doAlignAndCut(string time);
|
||||
std::vector<HTMLInfo*> GetHTMLInfo();
|
||||
int AnzahlROIs();
|
||||
int AnzahlROIs(int _analog);
|
||||
|
||||
int getAnzahlANALOG();
|
||||
analog* GetANALOG(int _analog);
|
||||
analog* GetANALOG(string _name, bool _create);
|
||||
analog* FindANALOG(string _name_number);
|
||||
string getNameANALOG(int _analog);
|
||||
|
||||
void UpdateNameNumbers(std::vector<std::string> *_name_numbers);
|
||||
|
||||
|
||||
string name(){return "ClassFlowAnalog";};
|
||||
};
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
#include "ClassFlowControll.h"
|
||||
|
||||
#include "connect_wlan.h"
|
||||
#include "read_wlanini.h"
|
||||
|
||||
#include "freertos/task.h"
|
||||
|
||||
@@ -21,13 +22,16 @@ static const char* TAG = "flow_controll";
|
||||
std::string ClassFlowControll::doSingleStep(std::string _stepname, std::string _host){
|
||||
std::string _classname = "";
|
||||
std::string result = "";
|
||||
// printf("_stepname: %s\n", _stepname.c_str());
|
||||
if ((_stepname.compare("[MakeImage]") == 0) || (_stepname.compare(";[MakeImage]") == 0)){
|
||||
_classname = "ClassFlowMakeImage";
|
||||
}
|
||||
if ((_stepname.compare("[Alignment]") == 0) || (_stepname.compare(";[Alignment]") == 0)){
|
||||
_classname = "ClassFlowAlignment";
|
||||
}
|
||||
if ((_stepname.compare("[Digits]") == 0) || (_stepname.compare(";[Digits]") == 0)){
|
||||
if ((_stepname.compare(0, 7, "[Digits") == 0) || (_stepname.compare(0, 8, ";[Digits") == 0)) {
|
||||
// if ((_stepname.compare("[Digits]") == 0) || (_stepname.compare(";[Digits]") == 0)){
|
||||
// printf("Digits!!!\n");
|
||||
_classname = "ClassFlowDigit";
|
||||
}
|
||||
if ((_stepname.compare("[Analog]") == 0) || (_stepname.compare(";[Analog]") == 0)){
|
||||
@@ -68,6 +72,19 @@ std::vector<HTMLInfo*> ClassFlowControll::GetAllAnalog()
|
||||
}
|
||||
|
||||
|
||||
|
||||
string ClassFlowControll::GetMQTTMainTopic()
|
||||
{
|
||||
for (int i = 0; i < FlowControll.size(); ++i)
|
||||
if (FlowControll[i]->name().compare("ClassFlowMQTT") == 0)
|
||||
return ((ClassFlowMQTT*) (FlowControll[i]))->GetMQTTMainTopic();
|
||||
|
||||
|
||||
return "";
|
||||
}
|
||||
|
||||
|
||||
|
||||
void ClassFlowControll::SetInitialParameter(void)
|
||||
{
|
||||
AutoStart = false;
|
||||
@@ -109,13 +126,14 @@ ClassFlow* ClassFlowControll::CreateClassFlow(std::string _type)
|
||||
cfc = new ClassFlowAnalog(&FlowControll);
|
||||
flowanalog = (ClassFlowAnalog*) cfc;
|
||||
}
|
||||
if (toUpper(_type).compare("[DIGITS]") == 0)
|
||||
if (toUpper(_type).compare(0, 7, "[DIGITS") == 0)
|
||||
{
|
||||
cfc = new ClassFlowDigit(&FlowControll);
|
||||
flowdigit = (ClassFlowDigit*) cfc;
|
||||
}
|
||||
if (toUpper(_type).compare("[MQTT]") == 0)
|
||||
cfc = new ClassFlowMQTT(&FlowControll);
|
||||
|
||||
if (toUpper(_type).compare("[POSTPROCESSING]") == 0)
|
||||
{
|
||||
cfc = new ClassFlowPostProcessing(&FlowControll);
|
||||
@@ -168,9 +186,12 @@ void ClassFlowControll::InitFlow(std::string config)
|
||||
}
|
||||
else
|
||||
{
|
||||
fgets(zw, 1024, pFile);
|
||||
printf("%s", zw);
|
||||
line = std::string(zw);
|
||||
line = "";
|
||||
if (fgets(zw, 1024, pFile) && !feof(pFile))
|
||||
{
|
||||
printf("Read: %s", zw);
|
||||
line = std::string(zw);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -205,7 +226,7 @@ bool ClassFlowControll::doFlow(string time)
|
||||
int repeat = 0;
|
||||
|
||||
#ifdef DEBUG_DETAIL_ON
|
||||
LogFile.WriteHeapInfo("ClassFlowAnalog::doFlow - Start");
|
||||
LogFile.WriteHeapInfo("ClassFlowControll::doFlow - Start");
|
||||
#endif
|
||||
|
||||
for (int i = 0; i < FlowControll.size(); ++i)
|
||||
@@ -234,7 +255,7 @@ bool ClassFlowControll::doFlow(string time)
|
||||
}
|
||||
|
||||
#ifdef DEBUG_DETAIL_ON
|
||||
LogFile.WriteHeapInfo("ClassFlowAnalog::doFlow");
|
||||
LogFile.WriteHeapInfo("ClassFlowControll::doFlow");
|
||||
#endif
|
||||
|
||||
}
|
||||
@@ -258,6 +279,38 @@ void ClassFlowControll::UpdateAktStatus(std::string _flow)
|
||||
}
|
||||
|
||||
|
||||
string ClassFlowControll::getReadoutAll(int _type)
|
||||
{
|
||||
std::vector<NumberPost*> numbers = flowpostprocessing->GetNumbers();
|
||||
std::string out = "";
|
||||
|
||||
for (int i = 0; i < numbers.size(); ++i)
|
||||
{
|
||||
out = out + numbers[i]->name + "\t";
|
||||
switch (_type) {
|
||||
case READOUT_TYPE_VALUE:
|
||||
out = out + numbers[i]->ReturnValue;
|
||||
break;
|
||||
case READOUT_TYPE_PREVALUE:
|
||||
out = out + numbers[i]->ReturnPreValue;
|
||||
break;
|
||||
case READOUT_TYPE_RAWVALUE:
|
||||
out = out + numbers[i]->ReturnRawValue;
|
||||
break;
|
||||
case READOUT_TYPE_ERROR:
|
||||
out = out + numbers[i]->ErrorMessageText;
|
||||
break;
|
||||
}
|
||||
if (i < numbers.size()-1)
|
||||
out = out + "\r\n";
|
||||
}
|
||||
|
||||
// printf("OUT: %s", out.c_str());
|
||||
|
||||
return out;
|
||||
}
|
||||
|
||||
|
||||
string ClassFlowControll::getReadout(bool _rawvalue = false, bool _noerror = false)
|
||||
{
|
||||
if (flowpostprocessing)
|
||||
@@ -281,17 +334,17 @@ string ClassFlowControll::getReadout(bool _rawvalue = false, bool _noerror = fal
|
||||
return result;
|
||||
}
|
||||
|
||||
string ClassFlowControll::GetPrevalue()
|
||||
string ClassFlowControll::GetPrevalue(std::string _number)
|
||||
{
|
||||
if (flowpostprocessing)
|
||||
{
|
||||
return flowpostprocessing->GetPreValue();
|
||||
return flowpostprocessing->GetPreValue(_number);
|
||||
}
|
||||
|
||||
return std::string();
|
||||
}
|
||||
|
||||
std::string ClassFlowControll::UpdatePrevalue(std::string _newvalue)
|
||||
std::string ClassFlowControll::UpdatePrevalue(std::string _newvalue, std::string _numbers, bool _extern)
|
||||
{
|
||||
float zw;
|
||||
char* p;
|
||||
@@ -313,7 +366,7 @@ std::string ClassFlowControll::UpdatePrevalue(std::string _newvalue)
|
||||
|
||||
if (flowpostprocessing)
|
||||
{
|
||||
flowpostprocessing->SavePreValue(zw);
|
||||
flowpostprocessing->SetPreValue(zw, _numbers, _extern);
|
||||
return _newvalue;
|
||||
}
|
||||
|
||||
@@ -405,6 +458,7 @@ bool ClassFlowControll::ReadParameter(FILE* pfile, string& aktparamgraph)
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
int ClassFlowControll::CleanTempFolder() {
|
||||
const char* folderPath = "/sdcard/img_tmp";
|
||||
|
||||
@@ -438,7 +492,7 @@ int ClassFlowControll::CleanTempFolder() {
|
||||
|
||||
esp_err_t ClassFlowControll::SendRawJPG(httpd_req_t *req)
|
||||
{
|
||||
return flowmakeimage->SendRawJPG(req);
|
||||
return flowmakeimage != NULL ? flowmakeimage->SendRawJPG(req) : ESP_FAIL;
|
||||
}
|
||||
|
||||
|
||||
@@ -450,6 +504,12 @@ esp_err_t ClassFlowControll::GetJPGStream(std::string _fn, httpd_req_t *req)
|
||||
esp_err_t result = ESP_FAIL;
|
||||
bool Dodelete = false;
|
||||
|
||||
if (flowalignment == NULL)
|
||||
{
|
||||
printf("Can't continue, flowalignment is NULL\n");
|
||||
return ESP_FAIL;
|
||||
}
|
||||
|
||||
if (_fn == "alg.jpg")
|
||||
{
|
||||
_send = flowalignment->ImageBasis;
|
||||
@@ -481,7 +541,9 @@ esp_err_t ClassFlowControll::GetJPGStream(std::string _fn, httpd_req_t *req)
|
||||
if (htmlinfo[i]->image_org)
|
||||
_send = htmlinfo[i]->image_org;
|
||||
}
|
||||
delete htmlinfo[i];
|
||||
}
|
||||
htmlinfo.clear();
|
||||
|
||||
htmlinfo = GetAllAnalog();
|
||||
for (int i = 0; i < htmlinfo.size(); ++i)
|
||||
@@ -496,7 +558,9 @@ esp_err_t ClassFlowControll::GetJPGStream(std::string _fn, httpd_req_t *req)
|
||||
if (htmlinfo[i]->image_org)
|
||||
_send = htmlinfo[i]->image_org;
|
||||
}
|
||||
delete htmlinfo[i];
|
||||
}
|
||||
htmlinfo.clear();
|
||||
|
||||
if (_send)
|
||||
{
|
||||
|
||||
@@ -11,6 +11,12 @@
|
||||
#include "ClassFlowMQTT.h"
|
||||
|
||||
|
||||
#define READOUT_TYPE_VALUE 0
|
||||
#define READOUT_TYPE_PREVALUE 1
|
||||
#define READOUT_TYPE_RAWVALUE 2
|
||||
#define READOUT_TYPE_ERROR 3
|
||||
|
||||
|
||||
class ClassFlowControll :
|
||||
public ClassFlow
|
||||
{
|
||||
@@ -38,10 +44,13 @@ public:
|
||||
void doFlowMakeImageOnly(string time);
|
||||
bool getStatusSetupModus(){return SetupModeActive;};
|
||||
string getReadout(bool _rawvalue, bool _noerror);
|
||||
string UpdatePrevalue(std::string _newvalue);
|
||||
string GetPrevalue();
|
||||
string getReadoutAll(int _type);
|
||||
string UpdatePrevalue(std::string _newvalue, std::string _numbers, bool _extern);
|
||||
string GetPrevalue(std::string _number = "");
|
||||
bool ReadParameter(FILE* pfile, string& aktparamgraph);
|
||||
|
||||
string GetMQTTMainTopic();
|
||||
|
||||
esp_err_t GetJPGStream(std::string _fn, httpd_req_t *req);
|
||||
esp_err_t SendRawJPG(httpd_req_t *req);
|
||||
|
||||
|
||||
@@ -26,7 +26,8 @@ void ClassFlowDigit::SetInitialParameter(void)
|
||||
previousElement = NULL;
|
||||
SaveAllFiles = false;
|
||||
disabled = false;
|
||||
|
||||
DecimalShift = 0;
|
||||
DecimalShiftEnabled = false;
|
||||
}
|
||||
|
||||
ClassFlowDigit::ClassFlowDigit() : ClassFlowImage(TAG)
|
||||
@@ -63,16 +64,16 @@ ClassFlowDigit::ClassFlowDigit(std::vector<ClassFlow*>* lfc, ClassFlow *_prev) :
|
||||
}
|
||||
}
|
||||
|
||||
string ClassFlowDigit::getReadout()
|
||||
string ClassFlowDigit::getReadout(int _digit = 0)
|
||||
{
|
||||
string rst = "";
|
||||
|
||||
for (int i = 0; i < ROI.size(); ++i)
|
||||
for (int i = 0; i < DIGIT[_digit]->ROI.size(); ++i)
|
||||
{
|
||||
if (ROI[i]->resultklasse == 10)
|
||||
if (DIGIT[_digit]->ROI[i]->resultklasse == 10)
|
||||
rst = rst + "N";
|
||||
else
|
||||
rst = rst + std::to_string(ROI[i]->resultklasse);
|
||||
rst = rst + std::to_string(DIGIT[_digit]->ROI[i]->resultklasse);
|
||||
}
|
||||
|
||||
return rst;
|
||||
@@ -88,9 +89,19 @@ bool ClassFlowDigit::ReadParameter(FILE* pfile, string& aktparamgraph)
|
||||
if (!this->GetNextParagraph(pfile, aktparamgraph))
|
||||
return false;
|
||||
|
||||
if ((aktparamgraph.compare("[Digits]") != 0) && (aktparamgraph.compare(";[Digits]") != 0)) // Paragraph passt nich zu MakeImage
|
||||
printf("aktparamgraph: %s\n", aktparamgraph.c_str());
|
||||
|
||||
if ((aktparamgraph.compare(0, 7, "[Digits") != 0) && (aktparamgraph.compare(0, 8, ";[Digits") != 0)) // Paragraph passt nich zu MakeImage
|
||||
return false;
|
||||
|
||||
int _pospkt = aktparamgraph.find_first_of(".");
|
||||
int _posklammerzu = aktparamgraph.find_first_of("]");
|
||||
if (_pospkt > -1)
|
||||
NameDigit = aktparamgraph.substr(_pospkt+1, _posklammerzu - _pospkt-1);
|
||||
else
|
||||
NameDigit = "";
|
||||
printf("Name Digit: %s\n", NameDigit.c_str());
|
||||
|
||||
if (aktparamgraph[0] == ';')
|
||||
{
|
||||
disabled = true;
|
||||
@@ -119,8 +130,8 @@ bool ClassFlowDigit::ReadParameter(FILE* pfile, string& aktparamgraph)
|
||||
}
|
||||
if (zerlegt.size() >= 5)
|
||||
{
|
||||
roi* neuroi = new roi;
|
||||
neuroi->name = zerlegt[0];
|
||||
digit* _digit = GetDIGIT(zerlegt[0], true);
|
||||
roi* neuroi = _digit->ROI[_digit->ROI.size()-1];
|
||||
neuroi->posx = std::stoi(zerlegt[1]);
|
||||
neuroi->posy = std::stoi(zerlegt[2]);
|
||||
neuroi->deltax = std::stoi(zerlegt[3]);
|
||||
@@ -128,7 +139,6 @@ bool ClassFlowDigit::ReadParameter(FILE* pfile, string& aktparamgraph)
|
||||
neuroi->resultklasse = -1;
|
||||
neuroi->image = NULL;
|
||||
neuroi->image_org = NULL;
|
||||
ROI.push_back(neuroi);
|
||||
}
|
||||
|
||||
if ((toUpper(zerlegt[0]) == "SAVEALLFILES") && (zerlegt.size() > 1))
|
||||
@@ -139,15 +149,72 @@ bool ClassFlowDigit::ReadParameter(FILE* pfile, string& aktparamgraph)
|
||||
|
||||
}
|
||||
|
||||
for (int i = 0; i < ROI.size(); ++i)
|
||||
{
|
||||
ROI[i]->image = new CImageBasis(modelxsize, modelysize, 3);
|
||||
ROI[i]->image_org = new CImageBasis(ROI[i]->deltax, ROI[i]->deltay, 3);
|
||||
}
|
||||
for (int _dig = 0; _dig < DIGIT.size(); ++_dig)
|
||||
for (int i = 0; i < DIGIT[_dig]->ROI.size(); ++i)
|
||||
{
|
||||
DIGIT[_dig]->ROI[i]->image = new CImageBasis(modelxsize, modelysize, 3);
|
||||
DIGIT[_dig]->ROI[i]->image_org = new CImageBasis(DIGIT[_dig]->ROI[i]->deltax, DIGIT[_dig]->ROI[i]->deltay, 3);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
digit* ClassFlowDigit::FindDIGIT(string _name_number)
|
||||
{
|
||||
for (int i = 0; i < DIGIT.size(); ++i)
|
||||
{
|
||||
if (DIGIT[i]->name == _name_number)
|
||||
return DIGIT[i];
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
digit* ClassFlowDigit::GetDIGIT(string _name, bool _create = true)
|
||||
{
|
||||
string _digit, _roi;
|
||||
int _pospunkt = _name.find_first_of(".");
|
||||
// printf("Name: %s, Pospunkt: %d\n", _name.c_str(), _pospunkt);
|
||||
if (_pospunkt > -1)
|
||||
{
|
||||
_digit = _name.substr(0, _pospunkt);
|
||||
_roi = _name.substr(_pospunkt+1, _name.length() - _pospunkt - 1);
|
||||
}
|
||||
else
|
||||
{
|
||||
_digit = "default";
|
||||
_roi = _name;
|
||||
}
|
||||
|
||||
digit *_ret = NULL;
|
||||
|
||||
for (int i = 0; i < DIGIT.size(); ++i)
|
||||
{
|
||||
if (DIGIT[i]->name == _digit)
|
||||
_ret = DIGIT[i];
|
||||
}
|
||||
|
||||
if (!_create) // nicht gefunden und soll auch nicht erzeugt werden, ggf. geht eine NULL zurück
|
||||
return _ret;
|
||||
|
||||
if (_ret == NULL)
|
||||
{
|
||||
_ret = new digit;
|
||||
_ret->name = _digit;
|
||||
DIGIT.push_back(_ret);
|
||||
}
|
||||
|
||||
roi* neuroi = new roi;
|
||||
neuroi->name = _roi;
|
||||
_ret->ROI.push_back(neuroi);
|
||||
|
||||
printf("GetDIGIT - digit %s - roi %s\n", _digit.c_str(), _roi.c_str());
|
||||
|
||||
return _ret;
|
||||
}
|
||||
|
||||
|
||||
|
||||
string ClassFlowDigit::getHTMLSingleStep(string host)
|
||||
{
|
||||
@@ -198,17 +265,32 @@ bool ClassFlowDigit::doAlignAndCut(string time)
|
||||
|
||||
CAlignAndCutImage *caic = flowpostalignment->GetAlignAndCutImage();
|
||||
|
||||
for (int i = 0; i < ROI.size(); ++i)
|
||||
for (int _dig = 0; _dig < DIGIT.size(); ++_dig)
|
||||
{
|
||||
printf("DigitalDigit %d - Align&Cut\n", i);
|
||||
printf("DIGIT[_dig]->ROI.size() %d\n", DIGIT[_dig]->ROI.size());
|
||||
for (int i = 0; i < DIGIT[_dig]->ROI.size(); ++i)
|
||||
{
|
||||
printf("DigitalDigit %d - Align&Cut\n", i);
|
||||
|
||||
caic->CutAndSave(ROI[i]->posx, ROI[i]->posy, ROI[i]->deltax, ROI[i]->deltay, ROI[i]->image_org);
|
||||
if (SaveAllFiles) ROI[i]->image_org->SaveToFile(FormatFileName("/sdcard/img_tmp/" + ROI[i]->name + ".jpg"));
|
||||
caic->CutAndSave(DIGIT[_dig]->ROI[i]->posx, DIGIT[_dig]->ROI[i]->posy, DIGIT[_dig]->ROI[i]->deltax, DIGIT[_dig]->ROI[i]->deltay, DIGIT[_dig]->ROI[i]->image_org);
|
||||
if (SaveAllFiles)
|
||||
{
|
||||
if (DIGIT[_dig]->name == "default")
|
||||
DIGIT[_dig]->ROI[i]->image_org->SaveToFile(FormatFileName("/sdcard/img_tmp/" + DIGIT[_dig]->ROI[i]->name + ".jpg"));
|
||||
else
|
||||
DIGIT[_dig]->ROI[i]->image_org->SaveToFile(FormatFileName("/sdcard/img_tmp/" + DIGIT[_dig]->name + "_" + DIGIT[_dig]->ROI[i]->name + ".jpg"));
|
||||
}
|
||||
|
||||
ROI[i]->image_org->Resize(modelxsize, modelysize, ROI[i]->image);
|
||||
if (SaveAllFiles) ROI[i]->image->SaveToFile(FormatFileName("/sdcard/img_tmp/" + ROI[i]->name + ".bmp"));
|
||||
DIGIT[_dig]->ROI[i]->image_org->Resize(modelxsize, modelysize, DIGIT[_dig]->ROI[i]->image);
|
||||
if (SaveAllFiles)
|
||||
{
|
||||
if (DIGIT[_dig]->name == "default")
|
||||
DIGIT[_dig]->ROI[i]->image->SaveToFile(FormatFileName("/sdcard/img_tmp/" + DIGIT[_dig]->ROI[i]->name + ".bmp"));
|
||||
else
|
||||
DIGIT[_dig]->ROI[i]->image->SaveToFile(FormatFileName("/sdcard/img_tmp/" + DIGIT[_dig]->name + "_" + DIGIT[_dig]->ROI[i]->name + ".bmp"));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -223,26 +305,32 @@ bool ClassFlowDigit::doNeuralNetwork(string time)
|
||||
CTfLiteClass *tflite = new CTfLiteClass;
|
||||
string zwcnn = FormatFileName("/sdcard" + cnnmodelfile);
|
||||
printf(zwcnn.c_str());printf("\n");
|
||||
tflite->LoadModel(zwcnn);
|
||||
if (!tflite->LoadModel(zwcnn)) {
|
||||
printf("Can't read model file /sdcard%s\n", cnnmodelfile.c_str());
|
||||
delete tflite;
|
||||
return false;
|
||||
}
|
||||
|
||||
tflite->MakeAllocate();
|
||||
#endif
|
||||
|
||||
for (int i = 0; i < ROI.size(); ++i)
|
||||
{
|
||||
printf("DigitalDigit %d - TfLite\n", i);
|
||||
|
||||
ROI[i]->resultklasse = 0;
|
||||
#ifndef OHNETFLITE
|
||||
ROI[i]->resultklasse = tflite->GetClassFromImageBasis(ROI[i]->image);
|
||||
|
||||
#endif
|
||||
printf("Result Digit%i: %d\n", i, ROI[i]->resultklasse);
|
||||
|
||||
if (isLogImage)
|
||||
for (int _dig = 0; _dig < DIGIT.size(); ++_dig)
|
||||
for (int i = 0; i < DIGIT[_dig]->ROI.size(); ++i)
|
||||
{
|
||||
LogImage(logPath, ROI[i]->name, NULL, &ROI[i]->resultklasse, time, ROI[i]->image_org);
|
||||
printf("DigitalDigit %d - TfLite\n", i);
|
||||
|
||||
DIGIT[_dig]->ROI[i]->resultklasse = 0;
|
||||
#ifndef OHNETFLITE
|
||||
DIGIT[_dig]->ROI[i]->resultklasse = tflite->GetClassFromImageBasis(DIGIT[_dig]->ROI[i]->image);
|
||||
|
||||
#endif
|
||||
printf("Result Digit%i: %d\n", i, DIGIT[_dig]->ROI[i]->resultklasse);
|
||||
|
||||
if (isLogImage)
|
||||
{
|
||||
LogImage(logPath, DIGIT[_dig]->ROI[i]->name, NULL, &DIGIT[_dig]->ROI[i]->resultklasse, time, DIGIT[_dig]->ROI[i]->image_org);
|
||||
}
|
||||
}
|
||||
}
|
||||
#ifndef OHNETFLITE
|
||||
delete tflite;
|
||||
#endif
|
||||
@@ -251,25 +339,82 @@ bool ClassFlowDigit::doNeuralNetwork(string time)
|
||||
|
||||
void ClassFlowDigit::DrawROI(CImageBasis *_zw)
|
||||
{
|
||||
for (int i = 0; i < ROI.size(); ++i)
|
||||
_zw->drawRect(ROI[i]->posx, ROI[i]->posy, ROI[i]->deltax, ROI[i]->deltay, 0, 0, 255, 2);
|
||||
for (int _dig = 0; _dig < DIGIT.size(); ++_dig)
|
||||
for (int i = 0; i < DIGIT[_dig]->ROI.size(); ++i)
|
||||
_zw->drawRect(DIGIT[_dig]->ROI[i]->posx, DIGIT[_dig]->ROI[i]->posy, DIGIT[_dig]->ROI[i]->deltax, DIGIT[_dig]->ROI[i]->deltay, 0, 0, (255 - _dig*100), 2);
|
||||
}
|
||||
|
||||
std::vector<HTMLInfo*> ClassFlowDigit::GetHTMLInfo()
|
||||
{
|
||||
std::vector<HTMLInfo*> result;
|
||||
|
||||
for (int i = 0; i < ROI.size(); ++i)
|
||||
{
|
||||
HTMLInfo *zw = new HTMLInfo;
|
||||
zw->filename = ROI[i]->name + ".bmp";
|
||||
zw->filename_org = ROI[i]->name + ".jpg";
|
||||
zw->val = ROI[i]->resultklasse;
|
||||
zw->image = ROI[i]->image;
|
||||
zw->image_org = ROI[i]->image_org;
|
||||
result.push_back(zw);
|
||||
}
|
||||
for (int _dig = 0; _dig < DIGIT.size(); ++_dig)
|
||||
for (int i = 0; i < DIGIT[_dig]->ROI.size(); ++i)
|
||||
{
|
||||
if (DIGIT[_dig]->name == "default")
|
||||
DIGIT[_dig]->ROI[i]->image->SaveToFile(FormatFileName("/sdcard/img_tmp/" + DIGIT[_dig]->ROI[i]->name + ".bmp"));
|
||||
else
|
||||
DIGIT[_dig]->ROI[i]->image->SaveToFile(FormatFileName("/sdcard/img_tmp/" + DIGIT[_dig]->name + "_" + DIGIT[_dig]->ROI[i]->name + ".bmp"));
|
||||
|
||||
|
||||
HTMLInfo *zw = new HTMLInfo;
|
||||
if (DIGIT[_dig]->name == "default")
|
||||
{
|
||||
zw->filename = DIGIT[_dig]->ROI[i]->name + ".bmp";
|
||||
zw->filename_org = DIGIT[_dig]->ROI[i]->name + ".jpg";
|
||||
}
|
||||
else
|
||||
{
|
||||
zw->filename = DIGIT[_dig]->name + "_" + DIGIT[_dig]->ROI[i]->name + ".bmp";
|
||||
zw->filename_org = DIGIT[_dig]->name + "_" + DIGIT[_dig]->ROI[i]->name + ".jpg";
|
||||
}
|
||||
|
||||
zw->val = DIGIT[_dig]->ROI[i]->resultklasse;
|
||||
zw->image = DIGIT[_dig]->ROI[i]->image;
|
||||
zw->image_org = DIGIT[_dig]->ROI[i]->image_org;
|
||||
result.push_back(zw);
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
int ClassFlowDigit::getAnzahlDIGIT()
|
||||
{
|
||||
return DIGIT.size();
|
||||
}
|
||||
|
||||
string ClassFlowDigit::getNameDIGIT(int _digit)
|
||||
{
|
||||
if (_digit < DIGIT.size())
|
||||
return DIGIT[_digit]->name;
|
||||
|
||||
return "DIGIT DOES NOT EXIST";
|
||||
}
|
||||
|
||||
digit* ClassFlowDigit::GetDIGIT(int _digit)
|
||||
{
|
||||
if (_digit < DIGIT.size())
|
||||
return DIGIT[_digit];
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void ClassFlowDigit::UpdateNameNumbers(std::vector<std::string> *_name_numbers)
|
||||
{
|
||||
for (int _dig = 0; _dig < DIGIT.size(); _dig++)
|
||||
{
|
||||
std::string _name = DIGIT[_dig]->name;
|
||||
bool found = false;
|
||||
for (int i = 0; i < (*_name_numbers).size(); ++i)
|
||||
{
|
||||
if ((*_name_numbers)[i] == _name)
|
||||
found = true;
|
||||
}
|
||||
if (!found)
|
||||
(*_name_numbers).push_back(_name);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -5,6 +5,8 @@
|
||||
|
||||
#include <string>
|
||||
|
||||
|
||||
|
||||
struct roi {
|
||||
int posx, posy, deltax, deltay;
|
||||
int resultklasse;
|
||||
@@ -13,20 +15,31 @@ struct roi {
|
||||
roi* next;
|
||||
};
|
||||
|
||||
struct digit {
|
||||
string name;
|
||||
std::vector<roi*> ROI;
|
||||
};
|
||||
|
||||
class ClassFlowDigit :
|
||||
public ClassFlowImage
|
||||
{
|
||||
protected:
|
||||
std::vector<roi*> ROI;
|
||||
// std::vector<roi*> ROI;
|
||||
std::vector<digit*> DIGIT;
|
||||
string cnnmodelfile;
|
||||
int modelxsize, modelysize;
|
||||
bool SaveAllFiles;
|
||||
string NameDigit;
|
||||
int DecimalShift;
|
||||
bool DecimalShiftEnabled;
|
||||
|
||||
|
||||
ClassFlowAlignment* flowpostalignment;
|
||||
|
||||
bool doNeuralNetwork(string time);
|
||||
bool doAlignAndCut(string time);
|
||||
|
||||
|
||||
void SetInitialParameter(void);
|
||||
|
||||
public:
|
||||
@@ -36,9 +49,18 @@ public:
|
||||
bool ReadParameter(FILE* pfile, string& aktparamgraph);
|
||||
bool doFlow(string time);
|
||||
string getHTMLSingleStep(string host);
|
||||
string getReadout();
|
||||
string getReadout(int _digit);
|
||||
std::vector<HTMLInfo*> GetHTMLInfo();
|
||||
|
||||
int getAnzahlDIGIT();
|
||||
digit* GetDIGIT(int _digit);
|
||||
digit* GetDIGIT(string _name, bool _create);
|
||||
digit* FindDIGIT(string _name_number);
|
||||
|
||||
string getNameDIGIT(int _digit);
|
||||
|
||||
void UpdateNameNumbers(std::vector<std::string> *_name_numbers);
|
||||
|
||||
void DrawROI(CImageBasis *_zw);
|
||||
|
||||
string name(){return "ClassFlowDigit";};
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
#include <sstream>
|
||||
#include "ClassFlowMQTT.h"
|
||||
#include "Helper.h"
|
||||
|
||||
#include "time_sntp.h"
|
||||
#include "interface_mqtt.h"
|
||||
#include "ClassFlowPostProcessing.h"
|
||||
|
||||
@@ -13,6 +15,11 @@ void ClassFlowMQTT::SetInitialParameter(void)
|
||||
topicError = "";
|
||||
topicRate = "";
|
||||
topicTimeStamp = "";
|
||||
maintopic = "";
|
||||
mainerrortopic = "";
|
||||
|
||||
topicUptime = "";
|
||||
topicFreeMem = "";
|
||||
clientname = "watermeter";
|
||||
OldValue = "";
|
||||
flowpostprocessing = NULL;
|
||||
@@ -21,6 +28,9 @@ void ClassFlowMQTT::SetInitialParameter(void)
|
||||
previousElement = NULL;
|
||||
ListFlowControll = NULL;
|
||||
disabled = false;
|
||||
MQTTenable = false;
|
||||
|
||||
|
||||
|
||||
}
|
||||
|
||||
@@ -88,53 +98,98 @@ bool ClassFlowMQTT::ReadParameter(FILE* pfile, string& aktparamgraph)
|
||||
{
|
||||
this->uri = zerlegt[1];
|
||||
}
|
||||
if ((toUpper(zerlegt[0]) == "TOPIC") && (zerlegt.size() > 1))
|
||||
{
|
||||
this->topic = zerlegt[1];
|
||||
}
|
||||
if ((toUpper(zerlegt[0]) == "TOPICERROR") && (zerlegt.size() > 1))
|
||||
{
|
||||
this->topicError = zerlegt[1];
|
||||
}
|
||||
if ((toUpper(zerlegt[0]) == "TOPICRATE") && (zerlegt.size() > 1))
|
||||
{
|
||||
this->topicRate = zerlegt[1];
|
||||
}
|
||||
if ((toUpper(zerlegt[0]) == "TOPICTIMESTAMP") && (zerlegt.size() > 1))
|
||||
{
|
||||
this->topicTimeStamp = zerlegt[1];
|
||||
}
|
||||
|
||||
if ((toUpper(zerlegt[0]) == "CLIENTID") && (zerlegt.size() > 1))
|
||||
{
|
||||
this->clientname = zerlegt[1];
|
||||
}
|
||||
|
||||
if (((toUpper(zerlegt[0]) == "TOPIC") || (toUpper(zerlegt[0]) == "MAINTOPIC")) && (zerlegt.size() > 1))
|
||||
{
|
||||
maintopic = zerlegt[1];
|
||||
}
|
||||
}
|
||||
|
||||
if ((uri.length() > 0) && (topic.length() > 0))
|
||||
if (!MQTTisConnected() && (uri.length() > 0) && (maintopic.length() > 0))
|
||||
{
|
||||
MQTTInit(uri, clientname, user, password, topicError, 60);
|
||||
mainerrortopic = maintopic + "/connection";
|
||||
MQTTInit(uri, clientname, user, password, mainerrortopic, 60);
|
||||
MQTTPublish(mainerrortopic, "connected");
|
||||
MQTTenable = true;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
string ClassFlowMQTT::GetMQTTMainTopic()
|
||||
{
|
||||
return maintopic;
|
||||
}
|
||||
|
||||
|
||||
bool ClassFlowMQTT::doFlow(string zwtime)
|
||||
{
|
||||
if (!MQTTenable)
|
||||
return true;
|
||||
|
||||
std::string result;
|
||||
std::string resulterror = "";
|
||||
std::string resultrate = "";
|
||||
std::string resulttimestamp = "";
|
||||
string zw = "";
|
||||
string namenumber = "";
|
||||
|
||||
MQTTPublish(mainerrortopic, "connected");
|
||||
|
||||
zw = maintopic + "/" + "uptime";
|
||||
char uptimeStr[11];
|
||||
sprintf(uptimeStr, "%ld", (long)getUpTime());
|
||||
MQTTPublish(zw, uptimeStr);
|
||||
|
||||
zw = maintopic + "/" + "freeMem";
|
||||
char freeheapmem[11];
|
||||
sprintf(freeheapmem, "%zu", esp_get_free_heap_size());
|
||||
MQTTPublish(zw, freeheapmem);
|
||||
|
||||
if (flowpostprocessing)
|
||||
{
|
||||
result = flowpostprocessing->getReadoutParam(false, true);
|
||||
resulterror = flowpostprocessing->getReadoutError();
|
||||
resultrate = flowpostprocessing->getReadoutRate();
|
||||
resulttimestamp = flowpostprocessing->getReadoutTimeStamp();
|
||||
std::vector<NumberPost*> NUMBERS = flowpostprocessing->GetNumbers();
|
||||
|
||||
for (int i = 0; i < NUMBERS.size(); ++i)
|
||||
{
|
||||
result = NUMBERS[i]->ReturnValueNoError;
|
||||
resulterror = NUMBERS[i]->ErrorMessageText;
|
||||
resultrate = std::to_string(NUMBERS[i]->FlowRateAct);
|
||||
resulttimestamp = NUMBERS[i]->timeStamp;
|
||||
|
||||
namenumber = NUMBERS[i]->name;
|
||||
if (namenumber == "default")
|
||||
namenumber = maintopic + "/";
|
||||
else
|
||||
namenumber = maintopic + "/" + namenumber + "/";
|
||||
|
||||
zw = namenumber + "value";
|
||||
MQTTPublish(zw, result);
|
||||
|
||||
zw = namenumber + "error";
|
||||
MQTTPublish(zw, resulterror, 1);
|
||||
|
||||
zw = namenumber + "rate";
|
||||
MQTTPublish(zw, resultrate);
|
||||
|
||||
zw = namenumber + "timestamp";
|
||||
MQTTPublish(zw, resulttimestamp);
|
||||
|
||||
|
||||
std::string json="{\"value\":"+result;
|
||||
json += ",\"error\":\""+resulterror;
|
||||
json += "\",\"rate\":"+resultrate;
|
||||
json += ",\"timestamp\":\""+resulttimestamp+"\"}";
|
||||
|
||||
zw = namenumber + "json";
|
||||
MQTTPublish(zw, json);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
@@ -149,24 +204,7 @@ bool ClassFlowMQTT::doFlow(string zwtime)
|
||||
result = result + "\t" + zw;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
MQTTPublish(topic, result);
|
||||
|
||||
if (topicError.length() > 0) {
|
||||
if (resulterror.length() == 0)
|
||||
{
|
||||
resulterror = " ";
|
||||
}
|
||||
MQTTPublish(topicError, resulterror);
|
||||
}
|
||||
|
||||
if (topicRate.length() > 0) {
|
||||
MQTTPublish(topicRate, resultrate);
|
||||
}
|
||||
|
||||
if (topicRate.length() > 0) {
|
||||
MQTTPublish(topicTimeStamp, resulttimestamp);
|
||||
MQTTPublish(topic, result);
|
||||
}
|
||||
|
||||
OldValue = result;
|
||||
|
||||
@@ -9,10 +9,13 @@ class ClassFlowMQTT :
|
||||
public ClassFlow
|
||||
{
|
||||
protected:
|
||||
std::string uri, topic, topicError, clientname, topicRate, topicTimeStamp;
|
||||
std::string uri, topic, topicError, clientname, topicRate, topicTimeStamp, topicUptime, topicFreeMem;
|
||||
std::string OldValue;
|
||||
ClassFlowPostProcessing* flowpostprocessing;
|
||||
std::string user, password;
|
||||
bool MQTTenable;
|
||||
|
||||
std::string maintopic, mainerrortopic;
|
||||
void SetInitialParameter(void);
|
||||
|
||||
public:
|
||||
@@ -20,6 +23,8 @@ public:
|
||||
ClassFlowMQTT(std::vector<ClassFlow*>* lfc);
|
||||
ClassFlowMQTT(std::vector<ClassFlow*>* lfc, ClassFlow *_prev);
|
||||
|
||||
string GetMQTTMainTopic();
|
||||
|
||||
bool ReadParameter(FILE* pfile, string& aktparamgraph);
|
||||
bool doFlow(string time);
|
||||
string name(){return "ClassFlowMQTT";};
|
||||
|
||||
@@ -14,6 +14,9 @@ static const char* TAG = "flow_make_image";
|
||||
esp_err_t ClassFlowMakeImage::camera_capture(){
|
||||
string nm = namerawimage;
|
||||
Camera.CaptureToFile(nm);
|
||||
time(&TimeImageTaken);
|
||||
localtime(&TimeImageTaken);
|
||||
|
||||
return ESP_OK;
|
||||
}
|
||||
|
||||
@@ -23,7 +26,11 @@ void ClassFlowMakeImage::takePictureWithFlash(int flashdauer)
|
||||
rawImage->width = image_width;
|
||||
rawImage->height = image_height;
|
||||
/////////////////////////////////////////////////////////////////////////////////////
|
||||
printf("Flashdauer: %d\n", flashdauer);
|
||||
Camera.CaptureToBasisImage(rawImage, flashdauer);
|
||||
time(&TimeImageTaken);
|
||||
localtime(&TimeImageTaken);
|
||||
|
||||
if (SaveAllFiles) rawImage->SaveToFile(namerawimage);
|
||||
}
|
||||
|
||||
@@ -87,6 +94,12 @@ bool ClassFlowMakeImage::ReadParameter(FILE* pfile, string& aktparamgraph)
|
||||
SaveAllFiles = true;
|
||||
}
|
||||
|
||||
if ((toUpper(zerlegt[0]) == "WAITBEFORETAKINGPICTURE") && (zerlegt.size() > 1))
|
||||
{
|
||||
waitbeforepicture = stoi(zerlegt[1]);
|
||||
}
|
||||
|
||||
|
||||
if ((toUpper(zerlegt[0]) == "BRIGHTNESS") && (zerlegt.size() > 1))
|
||||
{
|
||||
_brightness = stoi(zerlegt[1]);
|
||||
@@ -118,9 +131,9 @@ bool ClassFlowMakeImage::ReadParameter(FILE* pfile, string& aktparamgraph)
|
||||
rawImage->CreateEmptyImage(image_width, image_height, 3);
|
||||
|
||||
waitbeforepicture_store = waitbeforepicture;
|
||||
if (FixedExposure)
|
||||
if (FixedExposure && (waitbeforepicture > 0))
|
||||
{
|
||||
printf("Fixed Exposure enabled!\n");
|
||||
// printf("Fixed Exposure enabled!\n");
|
||||
int flashdauer = (int) (waitbeforepicture * 1000);
|
||||
Camera.EnableAutoExposure(flashdauer);
|
||||
waitbeforepicture = 0.2;
|
||||
@@ -169,6 +182,9 @@ bool ClassFlowMakeImage::doFlow(string zwtime)
|
||||
esp_err_t ClassFlowMakeImage::SendRawJPG(httpd_req_t *req)
|
||||
{
|
||||
int flashdauer = (int) (waitbeforepicture * 1000);
|
||||
time(&TimeImageTaken);
|
||||
localtime(&TimeImageTaken);
|
||||
|
||||
return Camera.CaptureToHTTP(req, flashdauer);
|
||||
}
|
||||
|
||||
@@ -179,6 +195,9 @@ ImageData* ClassFlowMakeImage::SendRawImage()
|
||||
ImageData *id;
|
||||
int flashdauer = (int) (waitbeforepicture * 1000);
|
||||
Camera.CaptureToBasisImage(zw, flashdauer);
|
||||
time(&TimeImageTaken);
|
||||
localtime(&TimeImageTaken);
|
||||
|
||||
id = zw->writeToMemoryAsJPG();
|
||||
delete zw;
|
||||
return id;
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
#include "ClassFlowPostProcessing.h"
|
||||
|
||||
#include "Helper.h"
|
||||
#include "ClassFlowAnalog.h"
|
||||
#include "ClassFlowDigit.h"
|
||||
#include "ClassFlowMakeImage.h"
|
||||
#include "ClassLogFile.h"
|
||||
|
||||
@@ -11,130 +9,223 @@
|
||||
|
||||
#include <time.h>
|
||||
|
||||
string ClassFlowPostProcessing::GetPreValue()
|
||||
#include "time_sntp.h"
|
||||
|
||||
|
||||
#define PREVALUE_TIME_FORMAT_OUTPUT "%Y-%m-%dT%H:%M:%S"
|
||||
#define PREVALUE_TIME_FORMAT_INPUT "%d-%d-%dT%d:%d:%d"
|
||||
|
||||
|
||||
string ClassFlowPostProcessing::GetPreValue(std::string _number)
|
||||
{
|
||||
std::string result;
|
||||
bool isAnalog = false;
|
||||
bool isDigit = false;
|
||||
int index = -1;
|
||||
|
||||
int AnzahlAnalog = 0;
|
||||
result = RundeOutput(PreValue, -DecimalShift);
|
||||
if (_number == "")
|
||||
_number = "default";
|
||||
|
||||
for (int i = 0; i < ListFlowControll->size(); ++i)
|
||||
{
|
||||
if (((*ListFlowControll)[i])->name().compare("ClassFlowAnalog") == 0)
|
||||
{
|
||||
isAnalog = true;
|
||||
AnzahlAnalog = ((ClassFlowAnalog*)(*ListFlowControll)[i])->AnzahlROIs();
|
||||
}
|
||||
if (((*ListFlowControll)[i])->name().compare("ClassFlowDigit") == 0)
|
||||
{
|
||||
isDigit = true;
|
||||
}
|
||||
}
|
||||
for (int i = 0; i < NUMBERS.size(); ++i)
|
||||
if (NUMBERS[i]->name == _number)
|
||||
index = i;
|
||||
|
||||
if (isDigit && isAnalog)
|
||||
result = RundeOutput(PreValue, AnzahlAnalog - DecimalShift);
|
||||
// result = RundeOutput(NUMBERS[index]->PreValue, -NUMBERS[index]->DecimalShift);
|
||||
result = RundeOutput(NUMBERS[index]->PreValue, NUMBERS[index]->Nachkomma);
|
||||
|
||||
// if (NUMBERS[index]->digit_roi && NUMBERS[index]->analog_roi)
|
||||
// result = RundeOutput(NUMBERS[index]->PreValue, NUMBERS[index]->AnzahlAnalog - NUMBERS[index]->DecimalShift);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
void ClassFlowPostProcessing::SetPreValue(float zw, string _numbers, bool _extern)
|
||||
{
|
||||
printf("SetPrevalue: %f, %s\n", zw, _numbers.c_str());
|
||||
for (int j = 0; j < NUMBERS.size(); ++j)
|
||||
{
|
||||
// printf("Number %d, %s\n", j, NUMBERS[j]->name.c_str());
|
||||
if (NUMBERS[j]->name == _numbers)
|
||||
{
|
||||
NUMBERS[j]->PreValue = zw;
|
||||
if (_extern)
|
||||
{
|
||||
time(&(NUMBERS[j]->lastvalue));
|
||||
localtime(&(NUMBERS[j]->lastvalue));
|
||||
}
|
||||
// printf("Found %d! - set to %f\n", j, NUMBERS[j]->PreValue);
|
||||
}
|
||||
}
|
||||
UpdatePreValueINI = true;
|
||||
SavePreValue();
|
||||
}
|
||||
|
||||
|
||||
bool ClassFlowPostProcessing::LoadPreValue(void)
|
||||
{
|
||||
std::vector<string> zerlegt;
|
||||
FILE* pFile;
|
||||
char zw[1024];
|
||||
string zwtime, zwvalue;
|
||||
string zwtime, zwvalue, name;
|
||||
bool _done = false;
|
||||
|
||||
UpdatePreValueINI = false; // Konvertierung ins neue Format
|
||||
|
||||
|
||||
pFile = fopen(FilePreValue.c_str(), "r");
|
||||
if (pFile == NULL)
|
||||
return false;
|
||||
|
||||
fgets(zw, 1024, pFile);
|
||||
printf("%s", zw);
|
||||
printf("Read Zeile Prevalue.ini: %s", zw);
|
||||
zwtime = trim(std::string(zw));
|
||||
|
||||
fgets(zw, 1024, pFile);
|
||||
fclose(pFile);
|
||||
printf("%s", zw);
|
||||
zwvalue = trim(std::string(zw));
|
||||
PreValue = stof(zwvalue.c_str());
|
||||
|
||||
time_t tStart;
|
||||
int yy, month, dd, hh, mm, ss;
|
||||
struct tm whenStart;
|
||||
|
||||
sscanf(zwtime.c_str(), "%d-%d-%dT%d:%d:%d", &yy, &month, &dd, &hh, &mm, &ss);
|
||||
whenStart.tm_year = yy - 1900;
|
||||
whenStart.tm_mon = month - 1;
|
||||
whenStart.tm_mday = dd;
|
||||
whenStart.tm_hour = hh;
|
||||
whenStart.tm_min = mm;
|
||||
whenStart.tm_sec = ss;
|
||||
whenStart.tm_isdst = -1;
|
||||
|
||||
tStart = mktime(&whenStart);
|
||||
|
||||
time(&lastvalue);
|
||||
localtime(&lastvalue);
|
||||
double difference = difftime(lastvalue, tStart);
|
||||
difference /= 60;
|
||||
if (difference > PreValueAgeStartup)
|
||||
if (zwtime.length() == 0)
|
||||
return false;
|
||||
|
||||
Value = PreValue;
|
||||
ReturnValue = to_string(Value);
|
||||
ReturnValueNoError = ReturnValue;
|
||||
|
||||
bool isAnalog = false;
|
||||
bool isDigit = false;
|
||||
int AnzahlAnalog = 0;
|
||||
|
||||
for (int i = 0; i < ListFlowControll->size(); ++i)
|
||||
zerlegt = HelperZerlegeZeile(zwtime, "\t");
|
||||
if (zerlegt.size() > 1) // neues Format
|
||||
{
|
||||
if (((*ListFlowControll)[i])->name().compare("ClassFlowAnalog") == 0)
|
||||
isAnalog = true;
|
||||
if (((*ListFlowControll)[i])->name().compare("ClassFlowDigit") == 0)
|
||||
isDigit = true;
|
||||
while ((zerlegt.size() > 1) && !_done)
|
||||
{
|
||||
name = trim(zerlegt[0]);
|
||||
zwtime = trim(zerlegt[1]);
|
||||
zwvalue = trim(zerlegt[2]);
|
||||
|
||||
for (int j = 0; j < NUMBERS.size(); ++j)
|
||||
{
|
||||
if (NUMBERS[j]->name == name)
|
||||
{
|
||||
NUMBERS[j]->PreValue = stof(zwvalue.c_str());
|
||||
NUMBERS[j]->ReturnPreValue = RundeOutput(NUMBERS[j]->PreValue, NUMBERS[j]->Nachkomma);
|
||||
|
||||
time_t tStart;
|
||||
int yy, month, dd, hh, mm, ss;
|
||||
struct tm whenStart;
|
||||
|
||||
sscanf(zwtime.c_str(), PREVALUE_TIME_FORMAT_INPUT, &yy, &month, &dd, &hh, &mm, &ss);
|
||||
whenStart.tm_year = yy - 1900;
|
||||
whenStart.tm_mon = month - 1;
|
||||
whenStart.tm_mday = dd;
|
||||
whenStart.tm_hour = hh;
|
||||
whenStart.tm_min = mm;
|
||||
whenStart.tm_sec = ss;
|
||||
whenStart.tm_isdst = -1;
|
||||
|
||||
NUMBERS[j]->lastvalue = mktime(&whenStart);
|
||||
|
||||
time(&tStart);
|
||||
localtime(&tStart);
|
||||
double difference = difftime(tStart, NUMBERS[j]->lastvalue);
|
||||
difference /= 60;
|
||||
if (difference > PreValueAgeStartup)
|
||||
{
|
||||
NUMBERS[j]->PreValueOkay = false;
|
||||
}
|
||||
else
|
||||
{
|
||||
NUMBERS[j]->PreValueOkay = true;
|
||||
NUMBERS[j]->Value = NUMBERS[j]->PreValue;
|
||||
NUMBERS[j]->ReturnValue = to_string(NUMBERS[j]->Value);
|
||||
NUMBERS[j]->ReturnValueNoError = NUMBERS[j]->ReturnValue;
|
||||
|
||||
if (NUMBERS[j]->digit_roi || NUMBERS[j]->analog_roi)
|
||||
{
|
||||
NUMBERS[j]->ReturnValue = RundeOutput(NUMBERS[j]->Value, NUMBERS[j]->AnzahlAnalog - NUMBERS[j]->DecimalShift);
|
||||
NUMBERS[j]->ReturnValueNoError = NUMBERS[j]->ReturnValue;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
if (!fgets(zw, 1024, pFile))
|
||||
_done = true;
|
||||
else
|
||||
{
|
||||
printf("Read Zeile Prevalue.ini: %s", zw);
|
||||
zerlegt = HelperZerlegeZeile(trim(std::string(zw)), "\t");
|
||||
if (zerlegt.size() > 1)
|
||||
{
|
||||
name = trim(zerlegt[0]);
|
||||
zwtime = trim(zerlegt[1]);
|
||||
zwvalue = trim(zerlegt[2]);
|
||||
}
|
||||
}
|
||||
}
|
||||
fclose(pFile);
|
||||
}
|
||||
|
||||
if (isDigit || isAnalog)
|
||||
else // altes Format
|
||||
{
|
||||
ReturnValue = RundeOutput(Value, AnzahlAnalog - DecimalShift);
|
||||
ReturnValueNoError = ReturnValue;
|
||||
fgets(zw, 1024, pFile);
|
||||
fclose(pFile);
|
||||
printf("%s", zw);
|
||||
zwvalue = trim(std::string(zw));
|
||||
NUMBERS[0]->PreValue = stof(zwvalue.c_str());
|
||||
|
||||
time_t tStart;
|
||||
int yy, month, dd, hh, mm, ss;
|
||||
struct tm whenStart;
|
||||
|
||||
sscanf(zwtime.c_str(), PREVALUE_TIME_FORMAT_INPUT, &yy, &month, &dd, &hh, &mm, &ss);
|
||||
whenStart.tm_year = yy - 1900;
|
||||
whenStart.tm_mon = month - 1;
|
||||
whenStart.tm_mday = dd;
|
||||
whenStart.tm_hour = hh;
|
||||
whenStart.tm_min = mm;
|
||||
whenStart.tm_sec = ss;
|
||||
whenStart.tm_isdst = -1;
|
||||
|
||||
printf("TIME: %d, %d, %d, %d, %d, %d\n", whenStart.tm_year, whenStart.tm_mon, whenStart.tm_wday, whenStart.tm_hour, whenStart.tm_min, whenStart.tm_sec);
|
||||
|
||||
NUMBERS[0]->lastvalue = mktime(&whenStart);
|
||||
|
||||
time(&tStart);
|
||||
localtime(&tStart);
|
||||
double difference = difftime(tStart, NUMBERS[0]->lastvalue);
|
||||
difference /= 60;
|
||||
if (difference > PreValueAgeStartup)
|
||||
return false;
|
||||
|
||||
NUMBERS[0]->Value = NUMBERS[0]->PreValue;
|
||||
NUMBERS[0]->ReturnValue = to_string(NUMBERS[0]->Value);
|
||||
NUMBERS[0]->ReturnValueNoError = NUMBERS[0]->ReturnValue;
|
||||
|
||||
if (NUMBERS[0]->digit_roi || NUMBERS[0]->analog_roi)
|
||||
{
|
||||
NUMBERS[0]->ReturnValue = RundeOutput(NUMBERS[0]->Value, NUMBERS[0]->AnzahlAnalog - NUMBERS[0]->DecimalShift);
|
||||
NUMBERS[0]->ReturnValueNoError = NUMBERS[0]->ReturnValue;
|
||||
}
|
||||
|
||||
UpdatePreValueINI = true; // Konvertierung ins neue Format
|
||||
SavePreValue();
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void ClassFlowPostProcessing::SavePreValue(float value, string zwtime)
|
||||
void ClassFlowPostProcessing::SavePreValue()
|
||||
{
|
||||
FILE* pFile;
|
||||
string _zw;
|
||||
|
||||
if (!UpdatePreValueINI) // PreValues unverändert --> File muss nicht neu geschrieben werden
|
||||
return;
|
||||
|
||||
pFile = fopen(FilePreValue.c_str(), "w");
|
||||
|
||||
if (strlen(zwtime.c_str()) == 0)
|
||||
for (int j = 0; j < NUMBERS.size(); ++j)
|
||||
{
|
||||
time_t rawtime;
|
||||
struct tm* timeinfo;
|
||||
char buffer[80];
|
||||
struct tm* timeinfo = localtime(&NUMBERS[j]->lastvalue);
|
||||
strftime(buffer, 80, PREVALUE_TIME_FORMAT_OUTPUT, timeinfo);
|
||||
NUMBERS[j]->timeStamp = std::string(buffer);
|
||||
// printf("SaverPreValue %d, Value: %f, Nachkomma %d\n", j, NUMBERS[j]->PreValue, NUMBERS[j]->Nachkomma);
|
||||
|
||||
time(&rawtime);
|
||||
timeinfo = localtime(&rawtime);
|
||||
_zw = NUMBERS[j]->name + "\t" + NUMBERS[j]->timeStamp + "\t" + RundeOutput(NUMBERS[j]->PreValue, NUMBERS[j]->Nachkomma) + "\n";
|
||||
printf("Write PreValue Zeile: %s\n", _zw.c_str());
|
||||
|
||||
strftime(buffer, 80, "%Y-%m-%dT%H:%M:%S", timeinfo);
|
||||
timeStamp = std::string(buffer);
|
||||
}
|
||||
else
|
||||
{
|
||||
timeStamp = zwtime;
|
||||
fputs(_zw.c_str(), pFile);
|
||||
}
|
||||
|
||||
PreValue = value;
|
||||
|
||||
fputs(timeStamp.c_str(), pFile);
|
||||
fputs("\n", pFile);
|
||||
fputs(to_string(value).c_str(), pFile);
|
||||
fputs("\n", pFile);
|
||||
UpdatePreValueINI = false;
|
||||
|
||||
fclose(pFile);
|
||||
}
|
||||
@@ -142,27 +233,105 @@ void ClassFlowPostProcessing::SavePreValue(float value, string zwtime)
|
||||
|
||||
ClassFlowPostProcessing::ClassFlowPostProcessing(std::vector<ClassFlow*>* lfc)
|
||||
{
|
||||
FlowRateAct = 0;
|
||||
// FlowRateAct = 0;
|
||||
PreValueUse = false;
|
||||
PreValueAgeStartup = 30;
|
||||
AllowNegativeRates = false;
|
||||
MaxRateValue = 0.1;
|
||||
ErrorMessage = false;
|
||||
ListFlowControll = NULL;
|
||||
PreValueOkay = false;
|
||||
useMaxRateValue = false;
|
||||
checkDigitIncreaseConsistency = false;
|
||||
DecimalShift = 0;
|
||||
ErrorMessageText = "";
|
||||
timeStamp = "";
|
||||
// PreValueOkay = false;
|
||||
// DecimalShift = 0;
|
||||
// ErrorMessageText = "";
|
||||
// timeStamp = "";
|
||||
FilePreValue = FormatFileName("/sdcard/config/prevalue.ini");
|
||||
ListFlowControll = lfc;
|
||||
flowMakeImage = NULL;
|
||||
UpdatePreValueINI = false;
|
||||
|
||||
for (int i = 0; i < ListFlowControll->size(); ++i)
|
||||
{
|
||||
if (((*ListFlowControll)[i])->name().compare("ClassFlowMakeImage") == 0)
|
||||
{
|
||||
flowMakeImage = (ClassFlowMakeImage*) (*ListFlowControll)[i];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void ClassFlowPostProcessing::handleDecimalSeparator(string _decsep, string _value)
|
||||
{
|
||||
string _digit, _decpos;
|
||||
int _pospunkt = _decsep.find_first_of(".");
|
||||
// printf("Name: %s, Pospunkt: %d\n", _decsep.c_str(), _pospunkt);
|
||||
if (_pospunkt > -1)
|
||||
_digit = _decsep.substr(0, _pospunkt);
|
||||
else
|
||||
_digit = "default";
|
||||
|
||||
for (int j = 0; j < NUMBERS.size(); ++j)
|
||||
{
|
||||
int _zwdc = 0;
|
||||
|
||||
try
|
||||
{
|
||||
_zwdc = stoi(_value);
|
||||
}
|
||||
catch(const std::exception& e)
|
||||
{
|
||||
printf("ERROR - Decimalshift is not a number: %s\n", _value.c_str());
|
||||
}
|
||||
|
||||
if (_digit == "default") // erstmal auf default setzen (falls sonst nichts gesetzt)
|
||||
NUMBERS[j]->DecimalShift = _zwdc;
|
||||
|
||||
if (NUMBERS[j]->name == _digit)
|
||||
NUMBERS[j]->DecimalShift = _zwdc;
|
||||
|
||||
NUMBERS[j]->Nachkomma = NUMBERS[j]->AnzahlAnalog - NUMBERS[j]->DecimalShift;
|
||||
}
|
||||
}
|
||||
|
||||
void ClassFlowPostProcessing::handleMaxRateValue(string _decsep, string _value)
|
||||
{
|
||||
string _digit, _decpos;
|
||||
int _pospunkt = _decsep.find_first_of(".");
|
||||
// printf("Name: %s, Pospunkt: %d\n", _decsep.c_str(), _pospunkt);
|
||||
if (_pospunkt > -1)
|
||||
_digit = _decsep.substr(0, _pospunkt);
|
||||
else
|
||||
_digit = "default";
|
||||
|
||||
for (int j = 0; j < NUMBERS.size(); ++j)
|
||||
{
|
||||
float _zwdc = 1;
|
||||
|
||||
try
|
||||
{
|
||||
_zwdc = stof(_value);
|
||||
}
|
||||
catch(const std::exception& e)
|
||||
{
|
||||
printf("ERROR - MaxRateValue is not a number: %s\n", _value.c_str());
|
||||
}
|
||||
|
||||
|
||||
if (_digit == "default") // erstmal auf default setzen (falls sonst nichts gesetzt)
|
||||
{
|
||||
NUMBERS[j]->useMaxRateValue = true;
|
||||
NUMBERS[j]->MaxRateValue = _zwdc;
|
||||
}
|
||||
|
||||
if (NUMBERS[j]->name == _digit)
|
||||
{
|
||||
NUMBERS[j]->useMaxRateValue = true;
|
||||
NUMBERS[j]->MaxRateValue = _zwdc;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
bool ClassFlowPostProcessing::ReadParameter(FILE* pfile, string& aktparamgraph)
|
||||
{
|
||||
std::vector<string> zerlegt;
|
||||
int _n;
|
||||
|
||||
aktparamgraph = trim(aktparamgraph);
|
||||
|
||||
@@ -174,53 +343,148 @@ bool ClassFlowPostProcessing::ReadParameter(FILE* pfile, string& aktparamgraph)
|
||||
if (aktparamgraph.compare("[PostProcessing]") != 0) // Paragraph passt nich zu MakeImage
|
||||
return false;
|
||||
|
||||
InitNUMBERS();
|
||||
|
||||
|
||||
while (this->getNextLine(pfile, &aktparamgraph) && !this->isNewParagraph(aktparamgraph))
|
||||
{
|
||||
zerlegt = this->ZerlegeZeile(aktparamgraph);
|
||||
if ((toUpper(zerlegt[0]) == "DECIMALSHIFT") && (zerlegt.size() > 1))
|
||||
std::string _param = GetParameterName(zerlegt[0]);
|
||||
|
||||
if ((toUpper(_param) == "DECIMALSHIFT") && (zerlegt.size() > 1))
|
||||
{
|
||||
DecimalShift = stoi(zerlegt[1]);
|
||||
handleDecimalSeparator(zerlegt[0], zerlegt[1]);
|
||||
}
|
||||
if ((toUpper(_param) == "MAXRATEVALUE") && (zerlegt.size() > 1))
|
||||
{
|
||||
handleMaxRateValue(zerlegt[0], zerlegt[1]);
|
||||
}
|
||||
|
||||
if ((toUpper(zerlegt[0]) == "PREVALUEUSE") && (zerlegt.size() > 1))
|
||||
if ((toUpper(_param) == "PREVALUEUSE") && (zerlegt.size() > 1))
|
||||
{
|
||||
if (toUpper(zerlegt[1]) == "TRUE")
|
||||
{
|
||||
PreValueUse = true;
|
||||
}
|
||||
}
|
||||
if ((toUpper(zerlegt[0]) == "CHECKDIGITINCREASECONSISTENCY") && (zerlegt.size() > 1))
|
||||
if ((toUpper(_param) == "CHECKDIGITINCREASECONSISTENCY") && (zerlegt.size() > 1))
|
||||
{
|
||||
if (toUpper(zerlegt[1]) == "TRUE")
|
||||
checkDigitIncreaseConsistency = true;
|
||||
for (_n = 0; _n < NUMBERS.size(); ++_n)
|
||||
NUMBERS[_n]->checkDigitIncreaseConsistency = true;
|
||||
}
|
||||
if ((toUpper(zerlegt[0]) == "ALLOWNEGATIVERATES") && (zerlegt.size() > 1))
|
||||
if ((toUpper(_param) == "ALLOWNEGATIVERATES") && (zerlegt.size() > 1))
|
||||
{
|
||||
if (toUpper(zerlegt[1]) == "TRUE")
|
||||
AllowNegativeRates = true;
|
||||
for (_n = 0; _n < NUMBERS.size(); ++_n)
|
||||
NUMBERS[_n]->AllowNegativeRates = true;
|
||||
}
|
||||
if ((toUpper(zerlegt[0]) == "ERRORMESSAGE") && (zerlegt.size() > 1))
|
||||
if ((toUpper(_param) == "ERRORMESSAGE") && (zerlegt.size() > 1))
|
||||
{
|
||||
if (toUpper(zerlegt[1]) == "TRUE")
|
||||
ErrorMessage = true;
|
||||
}
|
||||
if ((toUpper(zerlegt[0]) == "PREVALUEAGESTARTUP") && (zerlegt.size() > 1))
|
||||
if ((toUpper(_param) == "PREVALUEAGESTARTUP") && (zerlegt.size() > 1))
|
||||
{
|
||||
PreValueAgeStartup = std::stoi(zerlegt[1]);
|
||||
}
|
||||
if ((toUpper(zerlegt[0]) == "MAXRATEVALUE") && (zerlegt.size() > 1))
|
||||
{
|
||||
useMaxRateValue = true;
|
||||
MaxRateValue = std::stof(zerlegt[1]);
|
||||
}
|
||||
}
|
||||
|
||||
if (PreValueUse) {
|
||||
PreValueOkay = LoadPreValue();
|
||||
LoadPreValue();
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void ClassFlowPostProcessing::InitNUMBERS()
|
||||
{
|
||||
// ClassFlowDigit* _cdigit = NULL;
|
||||
// ClassFlowAnalog* _canalog = NULL;
|
||||
int anzDIGIT = 0;
|
||||
int anzANALOG = 0;
|
||||
std::vector<std::string> name_numbers;
|
||||
|
||||
flowAnalog = NULL;
|
||||
flowDigit = NULL;
|
||||
|
||||
for (int i = 0; i < ListFlowControll->size(); ++i)
|
||||
{
|
||||
if (((*ListFlowControll)[i])->name().compare("ClassFlowDigit") == 0)
|
||||
{
|
||||
flowDigit = (ClassFlowDigit*) (*ListFlowControll)[i];
|
||||
anzDIGIT = flowDigit->getAnzahlDIGIT();
|
||||
}
|
||||
if (((*ListFlowControll)[i])->name().compare("ClassFlowAnalog") == 0)
|
||||
{
|
||||
flowAnalog = (ClassFlowAnalog*)(*ListFlowControll)[i];
|
||||
anzANALOG = flowAnalog->getAnzahlANALOG();
|
||||
}
|
||||
}
|
||||
|
||||
if (flowDigit)
|
||||
flowDigit->UpdateNameNumbers(&name_numbers);
|
||||
if (flowAnalog)
|
||||
flowAnalog->UpdateNameNumbers(&name_numbers);
|
||||
|
||||
printf("Anzahl NUMBERS: %d - DIGITS: %d, ANALOG: %d\n", name_numbers.size(), anzDIGIT, anzANALOG);
|
||||
|
||||
for (int _num = 0; _num < name_numbers.size(); ++_num)
|
||||
{
|
||||
NumberPost *_number = new NumberPost;
|
||||
|
||||
_number->name = name_numbers[_num];
|
||||
|
||||
_number->digit_roi = NULL;
|
||||
if (flowDigit)
|
||||
_number->digit_roi = flowDigit->FindDIGIT(name_numbers[_num]);
|
||||
|
||||
if (_number->digit_roi)
|
||||
_number->AnzahlDigital = _number->digit_roi->ROI.size();
|
||||
else
|
||||
_number->AnzahlDigital = 0;
|
||||
|
||||
_number->analog_roi = NULL;
|
||||
if (flowAnalog)
|
||||
_number->analog_roi = flowAnalog->FindANALOG(name_numbers[_num]);
|
||||
|
||||
|
||||
if (_number->analog_roi)
|
||||
_number->AnzahlAnalog = _number->analog_roi->ROI.size();
|
||||
else
|
||||
_number->AnzahlAnalog = 0;
|
||||
|
||||
_number->ReturnRawValue = ""; // Rohwert (mit N & führenden 0)
|
||||
_number->ReturnValue = ""; // korrigierter Rückgabewert, ggf. mit Fehlermeldung
|
||||
_number->ReturnValueNoError = ""; // korrigierter Rückgabewert ohne Fehlermeldung
|
||||
_number->ErrorMessageText = ""; // Fehlermeldung bei Consistency Check
|
||||
_number->ReturnPreValue = "";
|
||||
_number->PreValueOkay = false;
|
||||
_number->AllowNegativeRates = false;
|
||||
_number->MaxRateValue = 0.1;
|
||||
_number->useMaxRateValue = false;
|
||||
_number->checkDigitIncreaseConsistency = false;
|
||||
_number->PreValueOkay = false;
|
||||
_number->useMaxRateValue = false;
|
||||
_number->DecimalShift = 0;
|
||||
|
||||
_number->FlowRateAct = 0; // m3 / min
|
||||
_number->PreValue = 0; // letzter Wert, der gut ausgelesen wurde
|
||||
_number->Value = 0; // letzer ausgelesener Wert, inkl. Korrekturen
|
||||
_number->ReturnRawValue = ""; // Rohwert (mit N & führenden 0)
|
||||
_number->ReturnValue = ""; // korrigierter Rückgabewert, ggf. mit Fehlermeldung
|
||||
_number->ReturnValueNoError = ""; // korrigierter Rückgabewert ohne Fehlermeldung
|
||||
_number->ErrorMessageText = ""; // Fehlermeldung bei Consistency Check
|
||||
|
||||
_number->Nachkomma = _number->AnzahlAnalog;
|
||||
|
||||
NUMBERS.push_back(_number);
|
||||
}
|
||||
|
||||
for (int i = 0; i < NUMBERS.size(); ++i)
|
||||
printf("Number %s, Anz DIG: %d, Anz ANA %d\n", NUMBERS[i]->name.c_str(), NUMBERS[i]->AnzahlDigital, NUMBERS[i]->AnzahlAnalog);
|
||||
}
|
||||
|
||||
string ClassFlowPostProcessing::ShiftDecimal(string in, int _decShift){
|
||||
|
||||
if (_decShift == 0){
|
||||
@@ -269,151 +533,127 @@ bool ClassFlowPostProcessing::doFlow(string zwtime)
|
||||
string digit = "";
|
||||
string analog = "";
|
||||
string zwvalue;
|
||||
bool isdigit = false;
|
||||
bool isanalog = false;
|
||||
int AnzahlAnalog = 0;
|
||||
string zw;
|
||||
time_t imagetime = 0;
|
||||
string rohwert;
|
||||
|
||||
ErrorMessageText = "";
|
||||
|
||||
|
||||
for (int i = 0; i < ListFlowControll->size(); ++i)
|
||||
{
|
||||
if (((*ListFlowControll)[i])->name().compare("ClassFlowMakeImage") == 0)
|
||||
{
|
||||
imagetime = ((ClassFlowMakeImage*)(*ListFlowControll)[i])->getTimeImageTaken();
|
||||
}
|
||||
if (((*ListFlowControll)[i])->name().compare("ClassFlowDigit") == 0)
|
||||
{
|
||||
isdigit = true;
|
||||
digit = (*ListFlowControll)[i]->getReadout();
|
||||
}
|
||||
if (((*ListFlowControll)[i])->name().compare("ClassFlowAnalog") == 0)
|
||||
{
|
||||
isanalog = true;
|
||||
analog = (*ListFlowControll)[i]->getReadout();
|
||||
AnzahlAnalog = ((ClassFlowAnalog*)(*ListFlowControll)[i])->AnzahlROIs();
|
||||
}
|
||||
}
|
||||
// ErrorMessageText = "";
|
||||
|
||||
imagetime = flowMakeImage->getTimeImageTaken();
|
||||
if (imagetime == 0)
|
||||
time(&imagetime);
|
||||
|
||||
struct tm* timeinfo;
|
||||
timeinfo = localtime(&imagetime);
|
||||
|
||||
char strftime_buf[64];
|
||||
strftime(strftime_buf, sizeof(strftime_buf), "%Y-%m-%dT%H:%M:%S", timeinfo);
|
||||
zwtime = std::string(strftime_buf);
|
||||
|
||||
printf("Anzahl NUMBERS: %d\n", NUMBERS.size());
|
||||
|
||||
// // TESTING ONLY////////////////////
|
||||
// isdigit = true; digit = "12N";
|
||||
// isanalog = true; analog = "456";
|
||||
|
||||
ReturnRawValue = "";
|
||||
|
||||
if (isdigit)
|
||||
ReturnRawValue = digit;
|
||||
if (isdigit && isanalog)
|
||||
ReturnRawValue = ReturnRawValue + ".";
|
||||
if (isanalog)
|
||||
ReturnRawValue = ReturnRawValue + analog;
|
||||
|
||||
|
||||
if (!isdigit)
|
||||
for (int j = 0; j < NUMBERS.size(); ++j)
|
||||
{
|
||||
AnzahlAnalog = 0;
|
||||
}
|
||||
NUMBERS[j]->ReturnRawValue = "";
|
||||
NUMBERS[j]->ErrorMessageText = "";
|
||||
|
||||
ReturnRawValue = ShiftDecimal(ReturnRawValue, DecimalShift);
|
||||
if (NUMBERS[j]->digit_roi)
|
||||
NUMBERS[j]->ReturnRawValue = flowDigit->getReadout(j);
|
||||
if (NUMBERS[j]->digit_roi && NUMBERS[j]->analog_roi)
|
||||
NUMBERS[j]->ReturnRawValue = NUMBERS[j]->ReturnRawValue + ".";
|
||||
if (NUMBERS[j]->analog_roi)
|
||||
NUMBERS[j]->ReturnRawValue = NUMBERS[j]->ReturnRawValue + flowAnalog->getReadout(j);
|
||||
|
||||
rohwert = ReturnRawValue;
|
||||
NUMBERS[j]->ReturnRawValue = ShiftDecimal(NUMBERS[j]->ReturnRawValue, NUMBERS[j]->DecimalShift);
|
||||
|
||||
if (!PreValueUse || !PreValueOkay)
|
||||
{
|
||||
ReturnValue = ReturnRawValue;
|
||||
ReturnValueNoError = ReturnRawValue;
|
||||
rohwert = NUMBERS[j]->ReturnRawValue;
|
||||
|
||||
if ((findDelimiterPos(ReturnValue, "N") == std::string::npos) && (ReturnValue.length() > 0))
|
||||
if (!PreValueUse || !NUMBERS[j]->PreValueOkay)
|
||||
{
|
||||
while ((ReturnValue.length() > 1) && (ReturnValue[0] == '0'))
|
||||
NUMBERS[j]->ReturnValue = NUMBERS[j]->ReturnRawValue;
|
||||
NUMBERS[j]->ReturnValueNoError = NUMBERS[j]->ReturnRawValue;
|
||||
|
||||
if ((findDelimiterPos(NUMBERS[j]->ReturnValue, "N") == std::string::npos) && (NUMBERS[j]->ReturnValue.length() > 0))
|
||||
{
|
||||
ReturnValue.erase(0, 1);
|
||||
while ((NUMBERS[j]->ReturnValue.length() > 1) && (NUMBERS[j]->ReturnValue[0] == '0'))
|
||||
{
|
||||
NUMBERS[j]->ReturnValue.erase(0, 1);
|
||||
}
|
||||
NUMBERS[j]->Value = std::stof(NUMBERS[j]->ReturnValue);
|
||||
NUMBERS[j]->ReturnValueNoError = NUMBERS[j]->ReturnValue;
|
||||
|
||||
NUMBERS[j]->PreValueOkay = true;
|
||||
NUMBERS[j]->PreValue = NUMBERS[j]->Value;
|
||||
NUMBERS[j]->ReturnPreValue = RundeOutput(NUMBERS[j]->PreValue, NUMBERS[j]->Nachkomma);
|
||||
NUMBERS[j]->lastvalue = flowMakeImage->getTimeImageTaken();
|
||||
zwtime = ConvertTimeToString(NUMBERS[j]->lastvalue, PREVALUE_TIME_FORMAT_OUTPUT);
|
||||
|
||||
UpdatePreValueINI = true;
|
||||
SavePreValue();
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
zw = ErsetzteN(NUMBERS[j]->ReturnRawValue, NUMBERS[j]->PreValue);
|
||||
|
||||
NUMBERS[j]->Value = std::stof(zw);
|
||||
if (NUMBERS[j]->checkDigitIncreaseConsistency)
|
||||
{
|
||||
NUMBERS[j]->Value = checkDigitConsistency(NUMBERS[j]->Value, NUMBERS[j]->DecimalShift, NUMBERS[j]->analog_roi != NULL, NUMBERS[j]->PreValue);
|
||||
}
|
||||
|
||||
zwvalue = RundeOutput(NUMBERS[j]->Value, NUMBERS[j]->AnzahlAnalog - NUMBERS[j]->DecimalShift);
|
||||
|
||||
if ((!NUMBERS[j]->AllowNegativeRates) && (NUMBERS[j]->Value < NUMBERS[j]->PreValue))
|
||||
{
|
||||
NUMBERS[j]->ErrorMessageText = NUMBERS[j]->ErrorMessageText + "Neg. Rate - Read: " + zwvalue + " - Raw: " + NUMBERS[j]->ReturnRawValue + " - Pre: " + RundeOutput(NUMBERS[j]->PreValue, NUMBERS[j]->Nachkomma) + " ";
|
||||
NUMBERS[j]->Value = NUMBERS[j]->PreValue;
|
||||
zwvalue = RundeOutput(NUMBERS[j]->Value, NUMBERS[j]->AnzahlAnalog - NUMBERS[j]->DecimalShift);
|
||||
}
|
||||
|
||||
if (NUMBERS[j]->useMaxRateValue && (abs(NUMBERS[j]->Value - NUMBERS[j]->PreValue) > NUMBERS[j]->MaxRateValue))
|
||||
{
|
||||
NUMBERS[j]->ErrorMessageText = NUMBERS[j]->ErrorMessageText + "Rate too high - Read: " + RundeOutput(NUMBERS[j]->Value, NUMBERS[j]->Nachkomma) + " - Pre: " + RundeOutput(NUMBERS[j]->PreValue, NUMBERS[j]->Nachkomma);
|
||||
NUMBERS[j]->Value = NUMBERS[j]->PreValue;
|
||||
zwvalue = RundeOutput(NUMBERS[j]->Value, NUMBERS[j]->Nachkomma);
|
||||
}
|
||||
|
||||
NUMBERS[j]->ReturnValueNoError = zwvalue;
|
||||
NUMBERS[j]->ReturnValue = zwvalue;
|
||||
if (NUMBERS[j]->ErrorMessage && (NUMBERS[j]->ErrorMessageText.length() > 0))
|
||||
NUMBERS[j]->ReturnValue = NUMBERS[j]->ReturnValue + "\t" + NUMBERS[j]->ErrorMessageText;
|
||||
|
||||
|
||||
double difference = difftime(imagetime, NUMBERS[j]->lastvalue); // in Sekunden
|
||||
difference /= 60; // in Minuten
|
||||
NUMBERS[j]->FlowRateAct = (NUMBERS[j]->Value - NUMBERS[j]->PreValue) / difference;
|
||||
NUMBERS[j]->lastvalue = imagetime;
|
||||
|
||||
if (NUMBERS[j]->ErrorMessageText.length() == 0)
|
||||
{
|
||||
NUMBERS[j]->PreValue = NUMBERS[j]->Value;
|
||||
NUMBERS[j]->ReturnPreValue = RundeOutput(NUMBERS[j]->PreValue, NUMBERS[j]->Nachkomma);
|
||||
NUMBERS[j]->ErrorMessageText = "no error";
|
||||
UpdatePreValueINI = true;
|
||||
}
|
||||
Value = std::stof(ReturnValue);
|
||||
ReturnValueNoError = ReturnValue;
|
||||
|
||||
PreValueOkay = true;
|
||||
PreValue = Value;
|
||||
time(&lastvalue);
|
||||
localtime(&lastvalue);
|
||||
|
||||
SavePreValue(Value, zwtime);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
zw = ErsetzteN(ReturnRawValue);
|
||||
|
||||
Value = std::stof(zw);
|
||||
if (checkDigitIncreaseConsistency)
|
||||
{
|
||||
Value = checkDigitConsistency(Value, DecimalShift, isanalog);
|
||||
}
|
||||
|
||||
zwvalue = RundeOutput(Value, AnzahlAnalog - DecimalShift);
|
||||
|
||||
if ((!AllowNegativeRates) && (Value < PreValue))
|
||||
{
|
||||
ErrorMessageText = ErrorMessageText + "Negative Rate - Returned old value - read value: " + zwvalue + " - raw value: " + ReturnRawValue + " - checked value: " + std::to_string(Value) + " ";
|
||||
Value = PreValue;
|
||||
zwvalue = RundeOutput(Value, AnzahlAnalog - DecimalShift);
|
||||
}
|
||||
|
||||
if (useMaxRateValue && (abs(Value - PreValue) > MaxRateValue))
|
||||
{
|
||||
ErrorMessageText = ErrorMessageText + "Rate too high - Returned old value - read value: " + zwvalue + " - checked value: " + RundeOutput(Value, AnzahlAnalog - DecimalShift) + " ";
|
||||
Value = PreValue;
|
||||
zwvalue = RundeOutput(Value, AnzahlAnalog - DecimalShift);
|
||||
}
|
||||
|
||||
|
||||
ReturnValueNoError = zwvalue;
|
||||
ReturnValue = zwvalue;
|
||||
if (ErrorMessage && (ErrorMessageText.length() > 0))
|
||||
ReturnValue = ReturnValue + "\t" + ErrorMessageText;
|
||||
|
||||
if (ErrorMessageText.length() == 0)
|
||||
{
|
||||
time_t currenttime;
|
||||
time(¤ttime);
|
||||
localtime(¤ttime);
|
||||
double difference = difftime(currenttime, lastvalue); // in Sekunden
|
||||
difference /= 60; // in Minuten
|
||||
FlowRateAct = (Value - PreValue) / difference;
|
||||
|
||||
PreValue = Value;
|
||||
SavePreValue(Value, zwtime);
|
||||
}
|
||||
SavePreValue();
|
||||
return true;
|
||||
}
|
||||
|
||||
string ClassFlowPostProcessing::getReadout()
|
||||
string ClassFlowPostProcessing::getReadout(int _number)
|
||||
{
|
||||
return ReturnValue;
|
||||
return NUMBERS[_number]->ReturnValue;
|
||||
}
|
||||
|
||||
string ClassFlowPostProcessing::getReadoutParam(bool _rawValue, bool _noerror)
|
||||
string ClassFlowPostProcessing::getReadoutParam(bool _rawValue, bool _noerror, int _number)
|
||||
{
|
||||
if (_rawValue)
|
||||
return ReturnRawValue;
|
||||
return NUMBERS[_number]->ReturnRawValue;
|
||||
if (_noerror)
|
||||
return ReturnValueNoError;
|
||||
return ReturnValue;
|
||||
return NUMBERS[_number]->ReturnValueNoError;
|
||||
return NUMBERS[_number]->ReturnValue;
|
||||
}
|
||||
|
||||
string ClassFlowPostProcessing::RundeOutput(float _in, int _anzNachkomma){
|
||||
@@ -440,7 +680,7 @@ string ClassFlowPostProcessing::RundeOutput(float _in, int _anzNachkomma){
|
||||
}
|
||||
|
||||
|
||||
string ClassFlowPostProcessing::ErsetzteN(string input)
|
||||
string ClassFlowPostProcessing::ErsetzteN(string input, float _prevalue)
|
||||
{
|
||||
int posN, posPunkt;
|
||||
int pot, ziffer;
|
||||
@@ -461,7 +701,7 @@ string ClassFlowPostProcessing::ErsetzteN(string input)
|
||||
pot = posPunkt - posN;
|
||||
}
|
||||
|
||||
zw = PreValue / pow(10, pot);
|
||||
zw =_prevalue / pow(10, pot);
|
||||
ziffer = ((int) zw) % 10;
|
||||
input[posN] = ziffer + 48;
|
||||
|
||||
@@ -471,7 +711,7 @@ string ClassFlowPostProcessing::ErsetzteN(string input)
|
||||
return input;
|
||||
}
|
||||
|
||||
float ClassFlowPostProcessing::checkDigitConsistency(float input, int _decilamshift, bool _isanalog){
|
||||
float ClassFlowPostProcessing::checkDigitConsistency(float input, int _decilamshift, bool _isanalog, float _preValue){
|
||||
int aktdigit, olddigit;
|
||||
int aktdigit_before, olddigit_before;
|
||||
int pot, pot_max;
|
||||
@@ -489,12 +729,12 @@ float ClassFlowPostProcessing::checkDigitConsistency(float input, int _decilamsh
|
||||
{
|
||||
zw = input / pow(10, pot-1);
|
||||
aktdigit_before = ((int) zw) % 10;
|
||||
zw = PreValue / pow(10, pot-1);
|
||||
zw = _preValue / pow(10, pot-1);
|
||||
olddigit_before = ((int) zw) % 10;
|
||||
|
||||
zw = input / pow(10, pot);
|
||||
aktdigit = ((int) zw) % 10;
|
||||
zw = PreValue / pow(10, pot);
|
||||
zw = _preValue / pow(10, pot);
|
||||
olddigit = ((int) zw) % 10;
|
||||
|
||||
no_nulldurchgang = (olddigit_before <= aktdigit_before);
|
||||
@@ -520,18 +760,18 @@ float ClassFlowPostProcessing::checkDigitConsistency(float input, int _decilamsh
|
||||
return input;
|
||||
}
|
||||
|
||||
string ClassFlowPostProcessing::getReadoutRate()
|
||||
string ClassFlowPostProcessing::getReadoutRate(int _number)
|
||||
{
|
||||
return std::to_string(FlowRateAct);
|
||||
return std::to_string(NUMBERS[_number]->FlowRateAct);
|
||||
}
|
||||
|
||||
string ClassFlowPostProcessing::getReadoutTimeStamp()
|
||||
string ClassFlowPostProcessing::getReadoutTimeStamp(int _number)
|
||||
{
|
||||
return timeStamp;
|
||||
return NUMBERS[_number]->timeStamp;
|
||||
}
|
||||
|
||||
|
||||
string ClassFlowPostProcessing::getReadoutError()
|
||||
string ClassFlowPostProcessing::getReadoutError(int _number)
|
||||
{
|
||||
return ErrorMessageText;
|
||||
return NUMBERS[_number]->ErrorMessageText;
|
||||
}
|
||||
|
||||
@@ -1,53 +1,94 @@
|
||||
#pragma once
|
||||
#include "ClassFlow.h"
|
||||
#include "ClassFlowMakeImage.h"
|
||||
#include "ClassFlowAnalog.h"
|
||||
#include "ClassFlowDigit.h"
|
||||
|
||||
|
||||
#include <string>
|
||||
|
||||
|
||||
struct NumberPost {
|
||||
// int PreValueAgeStartup;
|
||||
float MaxRateValue;
|
||||
bool useMaxRateValue;
|
||||
bool ErrorMessage;
|
||||
bool PreValueOkay;
|
||||
bool AllowNegativeRates;
|
||||
bool checkDigitIncreaseConsistency;
|
||||
time_t lastvalue;
|
||||
string timeStamp;
|
||||
float FlowRateAct; // m3 / min
|
||||
float PreValue; // letzter Wert, der gut ausgelesen wurde
|
||||
float Value; // letzer ausgelesener Wert, inkl. Korrekturen
|
||||
string ReturnRawValue; // Rohwert (mit N & führenden 0)
|
||||
string ReturnValue; // korrigierter Rückgabewert, ggf. mit Fehlermeldung
|
||||
string ReturnPreValue; // korrigierter Rückgabewert ohne Fehlermeldung
|
||||
string ReturnValueNoError;
|
||||
string ErrorMessageText; // Fehlermeldung bei Consistency Check
|
||||
int AnzahlAnalog;
|
||||
int AnzahlDigital;
|
||||
int DecimalShift;
|
||||
int Nachkomma;
|
||||
// ClassFlowAnalog* ANALOG;
|
||||
// ClassFlowDigit* DIGIT;
|
||||
|
||||
digit *digit_roi;
|
||||
analog *analog_roi;
|
||||
|
||||
|
||||
|
||||
string name;
|
||||
};
|
||||
|
||||
|
||||
|
||||
|
||||
class ClassFlowPostProcessing :
|
||||
public ClassFlow
|
||||
{
|
||||
protected:
|
||||
std::vector<NumberPost*> NUMBERS;
|
||||
bool UpdatePreValueINI;
|
||||
|
||||
bool PreValueUse;
|
||||
int PreValueAgeStartup;
|
||||
bool AllowNegativeRates;
|
||||
float MaxRateValue;
|
||||
bool useMaxRateValue;
|
||||
bool ErrorMessage;
|
||||
bool PreValueOkay;
|
||||
bool checkDigitIncreaseConsistency;
|
||||
int DecimalShift;
|
||||
time_t lastvalue;
|
||||
float FlowRateAct; // m3 / min
|
||||
|
||||
|
||||
ClassFlowAnalog* flowAnalog;
|
||||
ClassFlowDigit* flowDigit;
|
||||
|
||||
|
||||
string FilePreValue;
|
||||
float PreValue; // letzter Wert, der gut ausgelesen wurde
|
||||
float Value; // letzer ausgelesener Wert, inkl. Korrekturen
|
||||
string ReturnRawValue; // Rohwert (mit N & führenden 0)
|
||||
string ReturnValue; // korrigierter Rückgabewert, ggf. mit Fehlermeldung
|
||||
string ReturnValueNoError; // korrigierter Rückgabewert ohne Fehlermeldung
|
||||
string ErrorMessageText; // Fehlermeldung bei Consistency Check
|
||||
string timeStamp;
|
||||
|
||||
ClassFlowMakeImage *flowMakeImage;
|
||||
|
||||
bool LoadPreValue(void);
|
||||
string ShiftDecimal(string in, int _decShift);
|
||||
|
||||
string ErsetzteN(string);
|
||||
float checkDigitConsistency(float input, int _decilamshift, bool _isanalog);
|
||||
string ErsetzteN(string, float _prevalue);
|
||||
float checkDigitConsistency(float input, int _decilamshift, bool _isanalog, float _preValue);
|
||||
string RundeOutput(float _in, int _anzNachkomma);
|
||||
|
||||
void InitNUMBERS();
|
||||
void handleDecimalSeparator(string _decsep, string _value);
|
||||
void handleMaxRateValue(string _decsep, string _value);
|
||||
|
||||
|
||||
public:
|
||||
ClassFlowPostProcessing(std::vector<ClassFlow*>* lfc);
|
||||
bool ReadParameter(FILE* pfile, string& aktparamgraph);
|
||||
bool doFlow(string time);
|
||||
string getReadout();
|
||||
string getReadoutParam(bool _rawValue, bool _noerror);
|
||||
string getReadoutError();
|
||||
string getReadoutRate();
|
||||
string getReadoutTimeStamp();
|
||||
void SavePreValue(float value, string time = "");
|
||||
string GetPreValue();
|
||||
string getReadout(int _number);
|
||||
string getReadoutParam(bool _rawValue, bool _noerror, int _number = 0);
|
||||
string getReadoutError(int _number = 0);
|
||||
string getReadoutRate(int _number = 0);
|
||||
string getReadoutTimeStamp(int _number = 0);
|
||||
void SavePreValue();
|
||||
string GetPreValue(std::string _number = "");
|
||||
void SetPreValue(float zw, string _numbers, bool _extern = false);
|
||||
std::vector<NumberPost*> GetNumbers(){return NUMBERS;};
|
||||
|
||||
string name(){return "ClassFlowPostProcessing";};
|
||||
};
|
||||
|
||||
@@ -10,6 +10,7 @@
|
||||
#include <string.h>
|
||||
#include <esp_log.h>
|
||||
|
||||
|
||||
#include "ClassLogFile.h"
|
||||
//#include "ClassLogFile.h"
|
||||
|
||||
@@ -77,8 +78,9 @@ void memCopyGen(uint8_t* _source, uint8_t* _target, int _size)
|
||||
|
||||
|
||||
|
||||
FILE* OpenFileAndWait(const char* nm, char* _mode, int _waitsec)
|
||||
FILE* OpenFileAndWait(const char* nm, const char* _mode, int _waitsec)
|
||||
{
|
||||
printf("open config file %s in mode %s\n", nm, _mode);
|
||||
FILE *pfile = fopen(nm, _mode);
|
||||
|
||||
if (pfile == NULL)
|
||||
@@ -313,6 +315,14 @@ string toUpper(string in)
|
||||
return in;
|
||||
}
|
||||
|
||||
string toLower(string in)
|
||||
{
|
||||
for (int i = 0; i < in.length(); ++i)
|
||||
in[i] = tolower(in[i]);
|
||||
|
||||
return in;
|
||||
}
|
||||
|
||||
// CPU Temp
|
||||
extern "C" uint8_t temprature_sens_read();
|
||||
float temperatureRead()
|
||||
@@ -358,3 +368,30 @@ int removeFolder(const char* folderPath, const char* logTag) {
|
||||
|
||||
return deleted;
|
||||
}
|
||||
|
||||
|
||||
|
||||
std::vector<string> HelperZerlegeZeile(std::string input, std::string _delimiter = "")
|
||||
{
|
||||
std::vector<string> Output;
|
||||
std::string delimiter = " =,";
|
||||
if (_delimiter.length() > 0){
|
||||
delimiter = _delimiter;
|
||||
}
|
||||
|
||||
input = trim(input, delimiter);
|
||||
size_t pos = findDelimiterPos(input, delimiter);
|
||||
std::string token;
|
||||
while (pos != std::string::npos) {
|
||||
token = input.substr(0, pos);
|
||||
token = trim(token, delimiter);
|
||||
Output.push_back(token);
|
||||
input.erase(0, pos + 1);
|
||||
input = trim(input, delimiter);
|
||||
pos = findDelimiterPos(input, delimiter);
|
||||
}
|
||||
Output.push_back(input);
|
||||
|
||||
return Output;
|
||||
}
|
||||
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
#pragma once
|
||||
#include <string>
|
||||
#include <fstream>
|
||||
#include <vector>
|
||||
|
||||
|
||||
using namespace std;
|
||||
@@ -10,7 +11,7 @@ void FindReplace(std::string& line, std::string& oldString, std::string& newStri
|
||||
|
||||
void CopyFile(string input, string output);
|
||||
|
||||
FILE* OpenFileAndWait(const char* nm, char* _mode, int _waitsec = 1);
|
||||
FILE* OpenFileAndWait(const char* nm, const char* _mode, int _waitsec = 1);
|
||||
|
||||
size_t findDelimiterPos(string input, string delimiter);
|
||||
//string trim(string istring);
|
||||
@@ -22,6 +23,7 @@ string getFileType(string filename);
|
||||
int mkdir_r(const char *dir, const mode_t mode);
|
||||
int removeFolder(const char* folderPath, const char* logTag);
|
||||
|
||||
string toLower(string in);
|
||||
string toUpper(string in);
|
||||
|
||||
float temperatureRead();
|
||||
@@ -30,6 +32,8 @@ time_t addDays(time_t startTime, int days);
|
||||
|
||||
void memCopyGen(uint8_t* _source, uint8_t* _target, int _size);
|
||||
|
||||
std::vector<string> HelperZerlegeZeile(std::string input, std::string _delimiter);
|
||||
|
||||
///////////////////////////
|
||||
size_t getInternalESPHeapSize();
|
||||
size_t getESPHeapSize();
|
||||
|
||||
@@ -354,12 +354,10 @@ CImageBasis::CImageBasis(CImageBasis *_copyfrom, int _anzrepeat)
|
||||
int memsize = width * height * channels;
|
||||
rgb_image = (unsigned char*)GET_MEMORY(memsize);
|
||||
|
||||
TickType_t xDelay;
|
||||
int anz = 1;
|
||||
while (!rgb_image && (anz < _anzrepeat))
|
||||
{
|
||||
printf("Create Image from Copy - Speicher ist voll - Versuche es erneut: %d.\n", anz);
|
||||
xDelay = 1000 / portTICK_PERIOD_MS;
|
||||
printf("Create Image from Copy - Speicher ist voll - Versuche es erneut: %d.\n", anz);
|
||||
rgb_image = (unsigned char*) malloc(memsize);
|
||||
anz++;
|
||||
}
|
||||
|
||||
@@ -12,7 +12,7 @@ ClassLogFile LogFile("/sdcard/log/message", "log_%Y-%m-%d.txt");
|
||||
|
||||
void ClassLogFile::WriteHeapInfo(std::string _id)
|
||||
{
|
||||
std::string _zw = "\t" + _id;
|
||||
std::string _zw = "\t" + _id;
|
||||
if (loglevel > 0)
|
||||
_zw = _zw + "\t" + getESPHeapInfo();
|
||||
|
||||
|
||||
@@ -1,12 +1,14 @@
|
||||
#include "interface_mqtt.h"
|
||||
|
||||
|
||||
//#define LOG_LOCAL_LEVEL ESP_LOG_DEBUG
|
||||
#include "esp_log.h"
|
||||
#include "mqtt_client.h"
|
||||
#include "ClassLogFile.h"
|
||||
|
||||
static const char *TAG = "interface_mqtt";
|
||||
static const char *TAG_INTERFACEMQTT = "interface_mqtt";
|
||||
|
||||
std::map<std::string, std::function<void()>>* connectFunktionMap = NULL;
|
||||
std::map<std::string, std::function<bool(std::string, char*, int)>>* subscribeFunktionMap = NULL;
|
||||
bool debugdetail = true;
|
||||
|
||||
// #define CONFIG_BROKER_URL "mqtt://192.168.178.43:1883"
|
||||
@@ -16,51 +18,74 @@ esp_mqtt_event_id_t esp_mmqtt_ID = MQTT_EVENT_ANY;
|
||||
bool mqtt_connected = false;
|
||||
esp_mqtt_client_handle_t client = NULL;
|
||||
|
||||
void MQTTPublish(std::string _key, std::string _content){
|
||||
void MQTTPublish(std::string _key, std::string _content, int retained_flag){
|
||||
if (client && mqtt_connected) {
|
||||
int msg_id;
|
||||
std::string zw;
|
||||
msg_id = esp_mqtt_client_publish(client, _key.c_str(), _content.c_str(), 0, 1, 0);
|
||||
msg_id = esp_mqtt_client_publish(client, _key.c_str(), _content.c_str(), 0, 1, retained_flag);
|
||||
zw = "sent publish successful in MQTTPublish, msg_id=" + std::to_string(msg_id) + ", " + _key + ", " + _content;
|
||||
if (debugdetail) LogFile.WriteToFile(zw);
|
||||
ESP_LOGI(TAG, "sent publish successful in MQTTPublish, msg_id=%d, %s, %s", msg_id, _key.c_str(), _content.c_str());
|
||||
ESP_LOGD(TAG_INTERFACEMQTT, "sent publish successful in MQTTPublish, msg_id=%d, %s, %s", msg_id, _key.c_str(), _content.c_str());
|
||||
}
|
||||
else {
|
||||
ESP_LOGI(TAG, "Problem with Publish, client=%d, mqtt_connected %d", (int) client, (int) mqtt_connected);
|
||||
ESP_LOGW(TAG_INTERFACEMQTT, "Problem with Publish, client=%d, mqtt_connected %d", (int) client, (int) mqtt_connected);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static esp_err_t mqtt_event_handler_cb(esp_mqtt_event_handle_t event)
|
||||
{
|
||||
int msg_id;
|
||||
std::string topic = "";
|
||||
switch (event->event_id) {
|
||||
case MQTT_EVENT_BEFORE_CONNECT:
|
||||
ESP_LOGI(TAG_INTERFACEMQTT, "MQTT_EVENT_BEFORE_CONNECT");
|
||||
break;
|
||||
case MQTT_EVENT_CONNECTED:
|
||||
ESP_LOGI(TAG, "MQTT_EVENT_CONNECTED");
|
||||
ESP_LOGI(TAG_INTERFACEMQTT, "MQTT_EVENT_CONNECTED");
|
||||
mqtt_connected = true;
|
||||
MQTTconnected();
|
||||
break;
|
||||
case MQTT_EVENT_DISCONNECTED:
|
||||
ESP_LOGI(TAG, "MQTT_EVENT_DISCONNECTED");
|
||||
ESP_LOGI(TAG_INTERFACEMQTT, "MQTT_EVENT_DISCONNECTED");
|
||||
break;
|
||||
case MQTT_EVENT_SUBSCRIBED:
|
||||
ESP_LOGI(TAG_INTERFACEMQTT, "MQTT_EVENT_SUBSCRIBED, msg_id=%d", event->msg_id);
|
||||
msg_id = esp_mqtt_client_publish(client, "/topic/qos0", "data", 0, 0, 0);
|
||||
ESP_LOGI(TAG_INTERFACEMQTT, "sent publish successful, msg_id=%d", msg_id);
|
||||
break;
|
||||
case MQTT_EVENT_UNSUBSCRIBED:
|
||||
ESP_LOGI(TAG_INTERFACEMQTT, "MQTT_EVENT_UNSUBSCRIBED, msg_id=%d", event->msg_id);
|
||||
break;
|
||||
case MQTT_EVENT_PUBLISHED:
|
||||
ESP_LOGI(TAG, "MQTT_EVENT_PUBLISHED, msg_id=%d", event->msg_id);
|
||||
ESP_LOGI(TAG_INTERFACEMQTT, "MQTT_EVENT_PUBLISHED, msg_id=%d", event->msg_id);
|
||||
break;
|
||||
case MQTT_EVENT_DATA:
|
||||
ESP_LOGI(TAG, "MQTT_EVENT_DATA");
|
||||
printf("TOPIC=%.*s\r\n", event->topic_len, event->topic);
|
||||
printf("DATA=%.*s\r\n", event->data_len, event->data);
|
||||
ESP_LOGI(TAG_INTERFACEMQTT, "MQTT_EVENT_DATA");
|
||||
ESP_LOGI(TAG_INTERFACEMQTT, "TOPIC=%.*s\r\n", event->topic_len, event->topic);
|
||||
ESP_LOGI(TAG_INTERFACEMQTT, "DATA=%.*s\r\n", event->data_len, event->data);
|
||||
topic.assign(event->topic, event->topic_len);
|
||||
if (subscribeFunktionMap != NULL) {
|
||||
if (subscribeFunktionMap->find(topic) != subscribeFunktionMap->end()) {
|
||||
ESP_LOGD(TAG_INTERFACEMQTT, "call handler function\r\n");
|
||||
(*subscribeFunktionMap)[topic](topic, event->data, event->data_len);
|
||||
}
|
||||
} else {
|
||||
ESP_LOGW(TAG_INTERFACEMQTT, "no handler available\r\n");
|
||||
}
|
||||
break;
|
||||
case MQTT_EVENT_ERROR:
|
||||
ESP_LOGI(TAG, "MQTT_EVENT_ERROR");
|
||||
ESP_LOGI(TAG_INTERFACEMQTT, "MQTT_EVENT_ERROR");
|
||||
break;
|
||||
default:
|
||||
ESP_LOGI(TAG, "Other event id:%d", event->event_id);
|
||||
ESP_LOGI(TAG_INTERFACEMQTT, "Other event id:%d", event->event_id);
|
||||
break;
|
||||
}
|
||||
return ESP_OK;
|
||||
}
|
||||
|
||||
static void mqtt_event_handler(void *handler_args, esp_event_base_t base, int32_t event_id, void *event_data) {
|
||||
ESP_LOGD(TAG, "Event dispatched from event loop base=%s, event_id=%d", base, event_id);
|
||||
ESP_LOGD(TAG_INTERFACEMQTT, "Event dispatched from event loop base=%s, event_id=%d", base, event_id);
|
||||
mqtt_event_handler_cb((esp_mqtt_event_handle_t) event_data);
|
||||
}
|
||||
|
||||
@@ -74,6 +99,7 @@ void MQTTInit(std::string _mqttURI, std::string _clientid, std::string _user, st
|
||||
.client_id = _clientid.c_str(),
|
||||
.lwt_topic = _LWTContext.c_str(),
|
||||
.lwt_msg = _zwmessage.c_str(),
|
||||
.lwt_retain = 1,
|
||||
.lwt_msg_len = _lzw,
|
||||
.keepalive = _keepalive
|
||||
};
|
||||
@@ -81,12 +107,100 @@ void MQTTInit(std::string _mqttURI, std::string _clientid, std::string _user, st
|
||||
if (_user.length() && _password.length()){
|
||||
mqtt_cfg.username = _user.c_str();
|
||||
mqtt_cfg.password = _password.c_str();
|
||||
printf("Connect to MQTT: %s, %s", mqtt_cfg.username, mqtt_cfg.password);
|
||||
ESP_LOGI(TAG_INTERFACEMQTT, "Connect to MQTT: %s, %s", mqtt_cfg.username, mqtt_cfg.password);
|
||||
};
|
||||
|
||||
client = esp_mqtt_client_init(&mqtt_cfg);
|
||||
esp_mqtt_client_register_event(client, esp_mmqtt_ID, mqtt_event_handler, client);
|
||||
esp_mqtt_client_start(client);
|
||||
|
||||
MQTTPublish(_LWTContext, "");
|
||||
MQTTPublish(_LWTContext, "", 1);
|
||||
}
|
||||
|
||||
void MQTTdestroy() {
|
||||
if (client != NULL) {
|
||||
esp_mqtt_client_stop(client);
|
||||
esp_mqtt_client_destroy(client);
|
||||
}
|
||||
}
|
||||
|
||||
bool MQTTisConnected() {
|
||||
return mqtt_connected;
|
||||
}
|
||||
|
||||
void MQTTregisterConnectFunction(std::string name, std::function<void()> func){
|
||||
ESP_LOGD(TAG_INTERFACEMQTT, "MQTTregisteronnectFunction %s\r\n", name.c_str());
|
||||
if (connectFunktionMap == NULL) {
|
||||
connectFunktionMap = new std::map<std::string, std::function<void()>>();
|
||||
}
|
||||
|
||||
if ((*connectFunktionMap)[name] != NULL) {
|
||||
ESP_LOGW(TAG_INTERFACEMQTT, "connect function %s already registred", name.c_str());
|
||||
return;
|
||||
}
|
||||
|
||||
(*connectFunktionMap)[name] = func;
|
||||
|
||||
if (mqtt_connected) {
|
||||
func();
|
||||
}
|
||||
}
|
||||
|
||||
void MQTTunregisterConnectFunction(std::string name){
|
||||
ESP_LOGD(TAG_INTERFACEMQTT, "MQTTregisteronnectFunction %s\r\n", name.c_str());
|
||||
if ((connectFunktionMap != NULL) && (connectFunktionMap->find(name) != connectFunktionMap->end())) {
|
||||
connectFunktionMap->erase(name);
|
||||
}
|
||||
}
|
||||
|
||||
void MQTTregisterSubscribeFunction(std::string topic, std::function<bool(std::string, char*, int)> func){
|
||||
ESP_LOGD(TAG_INTERFACEMQTT, "MQTTregisterSubscribeFunction %s\r\n", topic.c_str());
|
||||
if (subscribeFunktionMap == NULL) {
|
||||
subscribeFunktionMap = new std::map<std::string, std::function<bool(std::string, char*, int)>>();
|
||||
}
|
||||
|
||||
if ((*subscribeFunktionMap)[topic] != NULL) {
|
||||
ESP_LOGW(TAG_INTERFACEMQTT, "topic %s already registred for subscription", topic.c_str());
|
||||
return;
|
||||
}
|
||||
|
||||
(*subscribeFunktionMap)[topic] = func;
|
||||
|
||||
if (mqtt_connected) {
|
||||
int msg_id = esp_mqtt_client_subscribe(client, topic.c_str(), 0);
|
||||
ESP_LOGD(TAG_INTERFACEMQTT, "topic %s subscribe successful, msg_id=%d", topic.c_str(), msg_id);
|
||||
}
|
||||
}
|
||||
|
||||
void MQTTconnected(){
|
||||
if (mqtt_connected) {
|
||||
if (connectFunktionMap != NULL) {
|
||||
for(std::map<std::string, std::function<void()>>::iterator it = connectFunktionMap->begin(); it != connectFunktionMap->end(); ++it) {
|
||||
it->second();
|
||||
ESP_LOGD(TAG_INTERFACEMQTT, "call connect function %s", it->first.c_str());
|
||||
}
|
||||
}
|
||||
|
||||
if (subscribeFunktionMap != NULL) {
|
||||
for(std::map<std::string, std::function<bool(std::string, char*, int)>>::iterator it = subscribeFunktionMap->begin(); it != subscribeFunktionMap->end(); ++it) {
|
||||
int msg_id = esp_mqtt_client_subscribe(client, it->first.c_str(), 0);
|
||||
ESP_LOGD(TAG_INTERFACEMQTT, "topic %s subscribe successful, msg_id=%d", it->first.c_str(), msg_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void MQTTdestroySubscribeFunction(){
|
||||
if (subscribeFunktionMap != NULL) {
|
||||
if (mqtt_connected) {
|
||||
for(std::map<std::string, std::function<bool(std::string, char*, int)>>::iterator it = subscribeFunktionMap->begin(); it != subscribeFunktionMap->end(); ++it) {
|
||||
int msg_id = esp_mqtt_client_unsubscribe(client, it->first.c_str());
|
||||
ESP_LOGI(TAG_INTERFACEMQTT, "topic %s unsubscribe successful, msg_id=%d", it->first.c_str(), msg_id);
|
||||
}
|
||||
}
|
||||
|
||||
subscribeFunktionMap->clear();
|
||||
delete subscribeFunktionMap;
|
||||
subscribeFunktionMap = NULL;
|
||||
}
|
||||
}
|
||||
@@ -1,7 +1,23 @@
|
||||
#ifndef INTERFACE_MQTT_H
|
||||
#define INTERFACE_MQTT_H
|
||||
|
||||
#include <string>
|
||||
#include <map>
|
||||
#include <functional>
|
||||
|
||||
void MQTTInit(std::string _mqttURI, std::string _clientid, std::string _user, std::string _password, std::string _LWTContext, int _keepalive);
|
||||
void MQTTdestroy();
|
||||
|
||||
//void MQTTInit(std::string _mqttURI, std::string _clientid, std::string _user = "", std::string _password = "");
|
||||
|
||||
void MQTTPublish(std::string _key, std::string _content);
|
||||
void MQTTPublish(std::string _key, std::string _content, int retained_flag = 0);
|
||||
|
||||
bool MQTTisConnected();
|
||||
|
||||
void MQTTregisterConnectFunction(std::string name, std::function<void()> func);
|
||||
void MQTTunregisterConnectFunction(std::string name);
|
||||
void MQTTregisterSubscribeFunction(std::string topic, std::function<bool(std::string, char*, int)> func);
|
||||
void MQTTdestroySubscribeFunction();
|
||||
void MQTTconnected();
|
||||
|
||||
#endif //INTERFACE_MQTT_H
|
||||
@@ -98,7 +98,8 @@ void CTfLiteClass::GetOutPut()
|
||||
|
||||
void CTfLiteClass::Invoke()
|
||||
{
|
||||
interpreter->Invoke();
|
||||
if (interpreter != nullptr)
|
||||
interpreter->Invoke();
|
||||
}
|
||||
|
||||
|
||||
@@ -155,9 +156,9 @@ void CTfLiteClass::MakeAllocate()
|
||||
}
|
||||
|
||||
void CTfLiteClass::GetInputTensorSize(){
|
||||
#ifdef DEBUG_DETAIL_ON
|
||||
float *zw = this->input;
|
||||
int test = sizeof(zw);
|
||||
#ifdef DEBUG_DETAIL_ON
|
||||
printf("Input Tensor Dimension: %d\n", test);
|
||||
#endif
|
||||
}
|
||||
@@ -184,13 +185,11 @@ unsigned char* CTfLiteClass::ReadFileToCharArray(std::string _fn)
|
||||
|
||||
unsigned char *result = (unsigned char*) malloc(size);
|
||||
int anz = 1;
|
||||
TickType_t xDelay;
|
||||
while (!result && (anz < 6)) // maximal 5x versuchen (= 5s)
|
||||
{
|
||||
#ifdef DEBUG_DETAIL_ON
|
||||
printf("Speicher ist voll - Versuche es erneut: %d.\n", anz);
|
||||
#endif
|
||||
xDelay = 1000 / portTICK_PERIOD_MS;
|
||||
result = (unsigned char*) malloc(size);
|
||||
anz++;
|
||||
}
|
||||
@@ -208,7 +207,7 @@ unsigned char* CTfLiteClass::ReadFileToCharArray(std::string _fn)
|
||||
return result;
|
||||
}
|
||||
|
||||
void CTfLiteClass::LoadModel(std::string _fn){
|
||||
bool CTfLiteClass::LoadModel(std::string _fn){
|
||||
|
||||
#ifdef SUPRESS_TFLITE_ERRORS
|
||||
this->error_reporter = new tflite::OwnMicroErrorReporter;
|
||||
@@ -219,9 +218,14 @@ void CTfLiteClass::LoadModel(std::string _fn){
|
||||
unsigned char *rd;
|
||||
rd = ReadFileToCharArray(_fn.c_str());
|
||||
|
||||
if (rd == NULL)
|
||||
return false;
|
||||
|
||||
this->model = tflite::GetModel(rd);
|
||||
free(rd);
|
||||
TFLITE_MINIMAL_CHECK(model != nullptr);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -56,7 +56,7 @@ class CTfLiteClass
|
||||
public:
|
||||
CTfLiteClass();
|
||||
~CTfLiteClass();
|
||||
void LoadModel(std::string _fn);
|
||||
bool LoadModel(std::string _fn);
|
||||
void MakeAllocate();
|
||||
void GetInputTensorSize();
|
||||
bool LoadInputImageBasis(CImageBasis *rs);
|
||||
|
||||
@@ -8,6 +8,7 @@
|
||||
#include <iomanip>
|
||||
#include <sstream>
|
||||
|
||||
#include "defines.h"
|
||||
#include "Helper.h"
|
||||
|
||||
#include "esp_camera.h"
|
||||
@@ -17,8 +18,9 @@
|
||||
#include "ClassFlowControll.h"
|
||||
|
||||
#include "ClassLogFile.h"
|
||||
#include "server_GPIO.h"
|
||||
|
||||
//#define DEBUG_DETAIL_ON
|
||||
// #define DEBUG_DETAIL_ON
|
||||
|
||||
|
||||
ClassFlowControll tfliteflow;
|
||||
@@ -37,6 +39,9 @@ bool auto_isrunning = false;
|
||||
|
||||
int countRounds = 0;
|
||||
|
||||
static const char *TAGTFLITE = "server_tflite";
|
||||
|
||||
|
||||
int getCountFlowRounds() {
|
||||
return countRounds;
|
||||
}
|
||||
@@ -64,9 +69,11 @@ void KillTFliteTasks()
|
||||
#ifdef DEBUG_DETAIL_ON
|
||||
printf("Handle: xHandleblink_task_doFlow: %ld\n", (long) xHandleblink_task_doFlow);
|
||||
#endif
|
||||
if (xHandleblink_task_doFlow)
|
||||
if (xHandleblink_task_doFlow != NULL)
|
||||
{
|
||||
vTaskDelete(xHandleblink_task_doFlow);
|
||||
TaskHandle_t xHandleblink_task_doFlowTmp = xHandleblink_task_doFlow;
|
||||
xHandleblink_task_doFlow = NULL;
|
||||
vTaskDelete(xHandleblink_task_doFlowTmp);
|
||||
#ifdef DEBUG_DETAIL_ON
|
||||
printf("Killed: xHandleblink_task_doFlow\n");
|
||||
#endif
|
||||
@@ -75,9 +82,11 @@ void KillTFliteTasks()
|
||||
#ifdef DEBUG_DETAIL_ON
|
||||
printf("Handle: xHandletask_autodoFlow: %ld\n", (long) xHandletask_autodoFlow);
|
||||
#endif
|
||||
if (xHandletask_autodoFlow)
|
||||
if (xHandletask_autodoFlow != NULL)
|
||||
{
|
||||
vTaskDelete(xHandletask_autodoFlow);
|
||||
TaskHandle_t xHandletask_autodoFlowTmp = xHandletask_autodoFlow;
|
||||
xHandletask_autodoFlow = NULL;
|
||||
vTaskDelete(xHandletask_autodoFlowTmp);
|
||||
#ifdef DEBUG_DETAIL_ON
|
||||
printf("Killed: xHandletask_autodoFlow\n");
|
||||
#endif
|
||||
@@ -87,11 +96,10 @@ void KillTFliteTasks()
|
||||
|
||||
void doInit(void)
|
||||
{
|
||||
string config = "/sdcard/config/config.ini";
|
||||
#ifdef DEBUG_DETAIL_ON
|
||||
printf("Start tfliteflow.InitFlow(config);\n");
|
||||
#endif
|
||||
tfliteflow.InitFlow(config);
|
||||
tfliteflow.InitFlow(CONFIG_FILE);
|
||||
#ifdef DEBUG_DETAIL_ON
|
||||
printf("Finished tfliteflow.InitFlow(config);\n");
|
||||
#endif
|
||||
@@ -136,7 +144,7 @@ esp_err_t handler_init(httpd_req_t *req)
|
||||
printf("handler_doinit uri:\n"); printf(req->uri); printf("\n");
|
||||
#endif
|
||||
|
||||
char* resp_str = "Init started<br>";
|
||||
const char* resp_str = "Init started<br>";
|
||||
httpd_resp_send(req, resp_str, strlen(resp_str));
|
||||
|
||||
doInit();
|
||||
@@ -159,8 +167,6 @@ esp_err_t handler_doflow(httpd_req_t *req)
|
||||
LogFile.WriteHeapInfo("handler_doflow - Start");
|
||||
#endif
|
||||
|
||||
char* resp_str;
|
||||
|
||||
printf("handler_doFlow uri: "); printf(req->uri); printf("\n");
|
||||
|
||||
if (flowisrunning)
|
||||
@@ -173,7 +179,7 @@ esp_err_t handler_doflow(httpd_req_t *req)
|
||||
{
|
||||
xTaskCreate(&blink_task_doFlow, "blink_doFlow", configMINIMAL_STACK_SIZE * 64, NULL, tskIDLE_PRIORITY+1, &xHandleblink_task_doFlow);
|
||||
}
|
||||
resp_str = "doFlow gestartet - dauert ca. 60 Sekunden";
|
||||
const char* resp_str = "doFlow gestartet - dauert ca. 60 Sekunden";
|
||||
httpd_resp_send(req, resp_str, strlen(resp_str));
|
||||
/* Respond with an empty chunk to signal HTTP response completion */
|
||||
httpd_resp_send_chunk(req, NULL, 0);
|
||||
@@ -196,6 +202,8 @@ esp_err_t handler_wasserzaehler(httpd_req_t *req)
|
||||
|
||||
bool _rawValue = false;
|
||||
bool _noerror = false;
|
||||
bool _all = false;
|
||||
std::string _type = "value";
|
||||
string zw;
|
||||
|
||||
printf("handler_wasserzaehler uri:\n"); printf(req->uri); printf("\n");
|
||||
@@ -206,6 +214,22 @@ esp_err_t handler_wasserzaehler(httpd_req_t *req)
|
||||
if (httpd_req_get_url_query_str(req, _query, 100) == ESP_OK)
|
||||
{
|
||||
// printf("Query: "); printf(_query); printf("\n");
|
||||
if (httpd_query_key_value(_query, "all", _size, 10) == ESP_OK)
|
||||
{
|
||||
#ifdef DEBUG_DETAIL_ON
|
||||
printf("all is found"); printf(_size); printf("\n");
|
||||
#endif
|
||||
_all = true;
|
||||
}
|
||||
|
||||
if (httpd_query_key_value(_query, "type", _size, 10) == ESP_OK)
|
||||
{
|
||||
#ifdef DEBUG_DETAIL_ON
|
||||
printf("all is found"); printf(_size); printf("\n");
|
||||
#endif
|
||||
_type = std::string(_size);
|
||||
}
|
||||
|
||||
if (httpd_query_key_value(_query, "rawvalue", _size, 10) == ESP_OK)
|
||||
{
|
||||
#ifdef DEBUG_DETAIL_ON
|
||||
@@ -222,6 +246,29 @@ esp_err_t handler_wasserzaehler(httpd_req_t *req)
|
||||
}
|
||||
}
|
||||
|
||||
httpd_resp_set_hdr(req, "Access-Control-Allow-Origin", "*");
|
||||
|
||||
if (_all)
|
||||
{
|
||||
httpd_resp_set_type(req, "text/plain");
|
||||
printf("TYPE: %s\n", _type.c_str());
|
||||
int _intype = READOUT_TYPE_VALUE;
|
||||
if (_type == "prevalue")
|
||||
_intype = READOUT_TYPE_PREVALUE;
|
||||
if (_type == "raw")
|
||||
_intype = READOUT_TYPE_RAWVALUE;
|
||||
if (_type == "error")
|
||||
_intype = READOUT_TYPE_ERROR;
|
||||
|
||||
|
||||
zw = tfliteflow.getReadoutAll(_intype);
|
||||
printf("ZW: %s\n", zw.c_str());
|
||||
if (zw.length() > 0)
|
||||
httpd_resp_sendstr_chunk(req, zw.c_str());
|
||||
httpd_resp_sendstr_chunk(req, NULL);
|
||||
return ESP_OK;
|
||||
}
|
||||
|
||||
zw = tfliteflow.getReadout(_rawValue, _noerror);
|
||||
if (zw.length() > 0)
|
||||
httpd_resp_sendstr_chunk(req, zw.c_str());
|
||||
@@ -429,7 +476,7 @@ esp_err_t handler_editflow(httpd_req_t *req)
|
||||
|
||||
// printf("Parameter host: "); printf(_host.c_str()); printf("\n");
|
||||
// string zwzw = "Do " + _task + " start\n"; printf(zwzw.c_str());
|
||||
bool changed = Camera.SetBrightnessContrastSaturation(bri, con, sat);
|
||||
Camera.SetBrightnessContrastSaturation(bri, con, sat);
|
||||
std::string zw = tfliteflow.doSingleStep("[MakeImage]", _host);
|
||||
httpd_resp_sendstr_chunk(req, zw.c_str());
|
||||
}
|
||||
@@ -498,6 +545,7 @@ esp_err_t handler_prevalue(httpd_req_t *req)
|
||||
|
||||
char _query[100];
|
||||
char _size[10] = "";
|
||||
char _numbers[50] = "default";
|
||||
|
||||
if (httpd_req_get_url_query_str(req, _query, 100) == ESP_OK)
|
||||
{
|
||||
@@ -511,15 +559,24 @@ esp_err_t handler_prevalue(httpd_req_t *req)
|
||||
printf("Value: "); printf(_size); printf("\n");
|
||||
#endif
|
||||
}
|
||||
|
||||
httpd_query_key_value(_query, "numbers", _numbers, 50);
|
||||
}
|
||||
|
||||
if (strlen(_size) == 0)
|
||||
zw = tfliteflow.GetPrevalue();
|
||||
{
|
||||
zw = tfliteflow.GetPrevalue(std::string(_numbers));
|
||||
}
|
||||
else
|
||||
zw = "SetPrevalue to " + tfliteflow.UpdatePrevalue(_size);
|
||||
{
|
||||
zw = "SetPrevalue to " + tfliteflow.UpdatePrevalue(_size, _numbers, true);
|
||||
}
|
||||
|
||||
resp_str = zw.c_str();
|
||||
|
||||
httpd_resp_set_hdr(req, "Access-Control-Allow-Origin", "*");
|
||||
|
||||
|
||||
httpd_resp_send(req, resp_str, strlen(resp_str));
|
||||
/* Respond with an empty chunk to signal HTTP response completion */
|
||||
httpd_resp_send_chunk(req, NULL, 0);
|
||||
@@ -535,17 +592,17 @@ void task_autodoFlow(void *pvParameter)
|
||||
{
|
||||
int64_t fr_start, fr_delta_ms;
|
||||
|
||||
printf("task_autodoFlow: start\r\n");
|
||||
doInit();
|
||||
gpio_handler_init();
|
||||
|
||||
auto_isrunning = tfliteflow.isAutoStart(auto_intervall);
|
||||
|
||||
if (isSetupModusActive()) {
|
||||
auto_isrunning = false;
|
||||
std::string zw_time = gettimestring(LOGFILE_TIME_FORMAT);
|
||||
tfliteflow.doFlowMakeImageOnly(zw_time);
|
||||
|
||||
}
|
||||
|
||||
while (auto_isrunning)
|
||||
{
|
||||
std::string _zw = "task_autodoFlow - next round - Round #" + std::to_string(++countRounds);
|
||||
@@ -590,6 +647,7 @@ void task_autodoFlow(void *pvParameter)
|
||||
}
|
||||
vTaskDelete(NULL); //Delete this task if it exits from the loop above
|
||||
xHandletask_autodoFlow = NULL;
|
||||
printf("task_autodoFlow: end\r\n");
|
||||
}
|
||||
|
||||
void TFliteDoAutoStart()
|
||||
@@ -597,6 +655,11 @@ void TFliteDoAutoStart()
|
||||
xTaskCreate(&task_autodoFlow, "task_autodoFlow", configMINIMAL_STACK_SIZE * 64, NULL, tskIDLE_PRIORITY+1, &xHandletask_autodoFlow);
|
||||
}
|
||||
|
||||
std::string GetMQTTMainTopic()
|
||||
{
|
||||
return tfliteflow.GetMQTTMainTopic();
|
||||
}
|
||||
|
||||
|
||||
|
||||
void register_server_tflite_uri(httpd_handle_t server)
|
||||
|
||||
@@ -1,12 +1,11 @@
|
||||
#include <esp_log.h>
|
||||
#include <string>
|
||||
|
||||
#include <esp_http_server.h>
|
||||
#include "CImageBasis.h"
|
||||
|
||||
//#include "ClassControllCamera.h"
|
||||
|
||||
static const char *TAGTFLITE = "server_tflite";
|
||||
|
||||
void register_server_tflite_uri(httpd_handle_t server);
|
||||
|
||||
void KillTFliteTasks();
|
||||
@@ -15,6 +14,8 @@ void TFliteDoAutoStart();
|
||||
|
||||
bool isSetupModusActive();
|
||||
|
||||
std::string GetMQTTMainTopic();
|
||||
|
||||
esp_err_t GetJPG(std::string _filename, httpd_req_t *req);
|
||||
|
||||
esp_err_t GetRawJPG(httpd_req_t *req);
|
||||
|
||||
@@ -18,6 +18,7 @@
|
||||
static const char *TAG = "sntp";
|
||||
|
||||
bool setTimeAlwaysOnReboot = true;
|
||||
time_t bootTime;
|
||||
|
||||
static void obtain_time(void);
|
||||
static void initialize_sntp(void);
|
||||
@@ -27,6 +28,17 @@ void time_sync_notification_cb(struct timeval *tv)
|
||||
ESP_LOGI(TAG, "Notification of a time synchronization event");
|
||||
}
|
||||
|
||||
std::string ConvertTimeToString(time_t _time, const char * frm)
|
||||
{
|
||||
struct tm timeinfo;
|
||||
char strftime_buf[64];
|
||||
localtime_r(&_time, &timeinfo);
|
||||
strftime(strftime_buf, sizeof(strftime_buf), frm, &timeinfo);
|
||||
|
||||
std::string result(strftime_buf);
|
||||
return result;
|
||||
}
|
||||
|
||||
std::string gettimestring(const char * frm)
|
||||
{
|
||||
time_t now;
|
||||
@@ -115,3 +127,16 @@ static void initialize_sntp(void)
|
||||
// sntp_set_time_sync_notification_cb(time_sync_notification_cb);
|
||||
sntp_init();
|
||||
}
|
||||
|
||||
void setBootTime()
|
||||
{
|
||||
time(&bootTime);
|
||||
}
|
||||
|
||||
time_t getUpTime()
|
||||
{
|
||||
time_t now;
|
||||
time(&now);
|
||||
|
||||
return now - bootTime;
|
||||
}
|
||||
@@ -15,5 +15,10 @@
|
||||
void setup_time(void);
|
||||
|
||||
std::string gettimestring(const char * frm);
|
||||
std::string ConvertTimeToString(time_t _time, const char * frm);
|
||||
|
||||
void setTimeZone(std::string _tzstring);
|
||||
void reset_servername(std::string _servername);
|
||||
|
||||
void setBootTime();
|
||||
time_t getUpTime();
|
||||
|
||||
7
code/components/jomjol_wlan/CMakeLists.txt
Normal file
7
code/components/jomjol_wlan/CMakeLists.txt
Normal file
@@ -0,0 +1,7 @@
|
||||
FILE(GLOB_RECURSE app_sources ${CMAKE_CURRENT_SOURCE_DIR}/*.*)
|
||||
|
||||
idf_component_register(SRCS ${app_sources}
|
||||
INCLUDE_DIRS "."
|
||||
REQUIRES nvs_flash jomjol_helper)
|
||||
|
||||
|
||||
289
code/components/jomjol_wlan/connect_wlan.cpp
Normal file
289
code/components/jomjol_wlan/connect_wlan.cpp
Normal file
@@ -0,0 +1,289 @@
|
||||
#include "connect_wlan.h"
|
||||
|
||||
#include <string.h>
|
||||
#include "freertos/FreeRTOS.h"
|
||||
#include "freertos/task.h"
|
||||
#include "freertos/event_groups.h"
|
||||
#include "driver/gpio.h"
|
||||
#include "esp_system.h"
|
||||
#include "esp_wifi.h"
|
||||
#include "esp_event.h"
|
||||
#include "esp_log.h"
|
||||
#include "nvs_flash.h"
|
||||
|
||||
#include "lwip/err.h"
|
||||
#include "lwip/sys.h"
|
||||
|
||||
#include <fstream>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include <sstream>
|
||||
#include <iostream>
|
||||
|
||||
|
||||
|
||||
#define EXAMPLE_ESP_MAXIMUM_RETRY 1000
|
||||
|
||||
/* FreeRTOS event group to signal when we are connected*/
|
||||
static EventGroupHandle_t s_wifi_event_group;
|
||||
|
||||
/* The event group allows multiple bits for each event, but we only care about two events:
|
||||
* - we are connected to the AP with an IP
|
||||
* - we failed to connect after the maximum amount of retries */
|
||||
#define WIFI_CONNECTED_BIT BIT0
|
||||
#define WIFI_FAIL_BIT BIT1
|
||||
|
||||
static const char *TAG = "wifi station";
|
||||
|
||||
static int s_retry_num = 0;
|
||||
|
||||
///////////////////////////////////////////////////////////
|
||||
#define BLINK_GPIO GPIO_NUM_33
|
||||
|
||||
int BlinkDauer;
|
||||
int BlinkAnzahl;
|
||||
bool BlinkOff;
|
||||
bool BlinkIsRunning = false;
|
||||
|
||||
std::string hostname = "";
|
||||
std::string std_hostname = "watermeter";
|
||||
std::string ipadress = "";
|
||||
std::string ssid = "";
|
||||
|
||||
std::string getIPAddress()
|
||||
{
|
||||
return ipadress;
|
||||
}
|
||||
|
||||
std::string getSSID()
|
||||
{
|
||||
return ssid;
|
||||
}
|
||||
|
||||
|
||||
void task_doBlink(void *pvParameter)
|
||||
{
|
||||
ESP_LOGI("BLINK", "Blinken - start");
|
||||
while (BlinkIsRunning)
|
||||
{
|
||||
// ESP_LOGI("BLINK", "Blinken - wait");
|
||||
vTaskDelay(100 / portTICK_PERIOD_MS);
|
||||
}
|
||||
|
||||
BlinkIsRunning = true;
|
||||
|
||||
// Init the GPIO
|
||||
gpio_pad_select_gpio(BLINK_GPIO);
|
||||
/* Set the GPIO as a push/pull output */
|
||||
gpio_set_direction(BLINK_GPIO, GPIO_MODE_OUTPUT);
|
||||
|
||||
for (int i = 0; i < BlinkAnzahl; ++i)
|
||||
{
|
||||
if (BlinkAnzahl > 1)
|
||||
{
|
||||
gpio_set_level(BLINK_GPIO, 1);
|
||||
vTaskDelay(BlinkDauer / portTICK_PERIOD_MS);
|
||||
}
|
||||
gpio_set_level(BLINK_GPIO, 0);
|
||||
vTaskDelay(BlinkDauer / portTICK_PERIOD_MS);
|
||||
}
|
||||
|
||||
if (BlinkOff)
|
||||
gpio_set_level(BLINK_GPIO, 1);
|
||||
|
||||
ESP_LOGI("BLINK", "Blinken - done");
|
||||
BlinkIsRunning = false;
|
||||
|
||||
vTaskDelete(NULL); //Delete this task if it exits from the loop above
|
||||
}
|
||||
|
||||
void LEDBlinkTask(int _dauer, int _anz, bool _off)
|
||||
{
|
||||
BlinkDauer = _dauer;
|
||||
BlinkAnzahl = _anz;
|
||||
BlinkOff = _off;
|
||||
|
||||
xTaskCreate(&task_doBlink, "task_doBlink", configMINIMAL_STACK_SIZE * 8, NULL, tskIDLE_PRIORITY+1, NULL);
|
||||
}
|
||||
/////////////////////////////////////////////////////////
|
||||
|
||||
|
||||
static void event_handler(void* arg, esp_event_base_t event_base,
|
||||
int32_t event_id, void* event_data)
|
||||
{
|
||||
if (event_base == WIFI_EVENT && event_id == WIFI_EVENT_STA_START) {
|
||||
LEDBlinkTask(200, 1, true);
|
||||
esp_wifi_connect();
|
||||
} else if (event_base == WIFI_EVENT && event_id == WIFI_EVENT_STA_DISCONNECTED) {
|
||||
// if (s_retry_num < EXAMPLE_ESP_MAXIMUM_RETRY){
|
||||
esp_wifi_connect();
|
||||
s_retry_num++;
|
||||
ESP_LOGI(TAG, "retry to connect to the AP");
|
||||
ESP_LOGI(TAG,"connect to the AP fail");
|
||||
} else if (event_base == IP_EVENT && event_id == IP_EVENT_STA_GOT_IP) {
|
||||
ip_event_got_ip_t* event = (ip_event_got_ip_t*) event_data;
|
||||
ESP_LOGI(TAG, "got ip:" IPSTR, IP2STR(&event->ip_info.ip));
|
||||
ipadress = std::string(ip4addr_ntoa((const ip4_addr*) &event->ip_info.ip));
|
||||
s_retry_num = 0;
|
||||
xEventGroupSetBits(s_wifi_event_group, WIFI_CONNECTED_BIT);
|
||||
LEDBlinkTask(1000, 5, true);
|
||||
}
|
||||
}
|
||||
|
||||
void strinttoip4(const char *ip, int &a, int &b, int &c, int &d) {
|
||||
std::string zw = std::string(ip);
|
||||
std::stringstream s(zw);
|
||||
char ch; //to temporarily store the '.'
|
||||
s >> a >> ch >> b >> ch >> c >> ch >> d;
|
||||
}
|
||||
|
||||
|
||||
void wifi_init_sta(const char *_ssid, const char *_password, const char *_hostname, const char *_ipadr, const char *_gw, const char *_netmask, const char *_dns)
|
||||
{
|
||||
ssid = std::string(_ssid);
|
||||
|
||||
s_wifi_event_group = xEventGroupCreate();
|
||||
|
||||
ESP_ERROR_CHECK(esp_netif_init());
|
||||
ESP_ERROR_CHECK(esp_event_loop_create_default());
|
||||
|
||||
/////////////////////////////////////////////////////////////////
|
||||
|
||||
|
||||
esp_netif_t *my_sta = esp_netif_create_default_wifi_sta();
|
||||
|
||||
if ((_ipadr != NULL) && (_gw != NULL) && (_netmask != NULL))
|
||||
{
|
||||
ESP_LOGI(TAG, "set IP %s, GW %s, Netmask %s manual", _ipadr, _gw, _netmask);
|
||||
esp_netif_dhcpc_stop(my_sta);
|
||||
|
||||
esp_netif_ip_info_t ip_info;
|
||||
int a, b, c, d;
|
||||
strinttoip4(_ipadr, a, b, c, d);
|
||||
IP4_ADDR(&ip_info.ip, a, b, c, d);
|
||||
strinttoip4(_gw, a, b, c, d);
|
||||
IP4_ADDR(&ip_info.gw, a, b, c, d);
|
||||
strinttoip4(_netmask, a, b, c, d);
|
||||
IP4_ADDR(&ip_info.netmask, a, b, c, d);
|
||||
|
||||
esp_netif_set_ip_info(my_sta, &ip_info);
|
||||
}
|
||||
|
||||
wifi_init_config_t cfg = WIFI_INIT_CONFIG_DEFAULT();
|
||||
ESP_ERROR_CHECK(esp_wifi_init(&cfg));
|
||||
|
||||
if ((_ipadr != NULL) && (_gw != NULL) && (_netmask != NULL))
|
||||
{
|
||||
if (_dns == NULL)
|
||||
_dns = _gw;
|
||||
|
||||
ESP_LOGI(TAG, "set DNS manual");
|
||||
esp_netif_dns_info_t dns_info;
|
||||
ip4_addr_t ip;
|
||||
ip.addr = esp_ip4addr_aton(_dns);
|
||||
ip_addr_set_ip4_u32(&dns_info.ip, ip.addr);
|
||||
ESP_ERROR_CHECK(esp_netif_set_dns_info(my_sta, ESP_NETIF_DNS_MAIN, &dns_info));
|
||||
}
|
||||
/////////////////////////////////////////////////////////////////
|
||||
|
||||
|
||||
// esp_netif_create_default_wifi_sta();
|
||||
|
||||
// wifi_init_config_t cfg = WIFI_INIT_CONFIG_DEFAULT();
|
||||
// ESP_ERROR_CHECK(esp_wifi_init(&cfg));
|
||||
|
||||
|
||||
|
||||
/*
|
||||
////////////////////////////// esp-idf 4.2 //////////////////////////
|
||||
esp_event_handler_instance_t instance_any_id;
|
||||
esp_event_handler_instance_t instance_got_ip;
|
||||
ESP_ERROR_CHECK(esp_event_handler_instance_register(WIFI_EVENT,
|
||||
ESP_EVENT_ANY_ID,
|
||||
&event_handler,
|
||||
NULL,
|
||||
&instance_any_id));
|
||||
ESP_ERROR_CHECK(esp_event_handler_instance_register(IP_EVENT,
|
||||
IP_EVENT_STA_GOT_IP,
|
||||
&event_handler,
|
||||
NULL,
|
||||
&instance_got_ip));
|
||||
////////////////////////// ENDE esp-idf 4.2 ///////////////////////////
|
||||
*/
|
||||
|
||||
ESP_ERROR_CHECK(esp_event_handler_register(WIFI_EVENT, ESP_EVENT_ANY_ID, &event_handler, NULL));
|
||||
ESP_ERROR_CHECK(esp_event_handler_register(IP_EVENT, IP_EVENT_STA_GOT_IP, &event_handler, NULL));
|
||||
|
||||
wifi_config_t wifi_config = { };
|
||||
|
||||
strcpy((char*)wifi_config.sta.ssid, (const char*)_ssid);
|
||||
strcpy((char*)wifi_config.sta.password, (const char*)_password);
|
||||
|
||||
ESP_ERROR_CHECK(esp_wifi_set_mode(WIFI_MODE_STA) );
|
||||
ESP_ERROR_CHECK(esp_wifi_set_config(WIFI_IF_STA, &wifi_config) );
|
||||
ESP_ERROR_CHECK(esp_wifi_start() );
|
||||
|
||||
if (_hostname != NULL)
|
||||
{
|
||||
esp_err_t ret = tcpip_adapter_set_hostname(TCPIP_ADAPTER_IF_STA , _hostname);
|
||||
hostname = std::string(_hostname);
|
||||
if(ret != ESP_OK ){
|
||||
ESP_LOGE(TAG,"failed to set hostname:%d",ret);
|
||||
}
|
||||
}
|
||||
|
||||
ESP_LOGI(TAG, "wifi_init_sta finished.");
|
||||
|
||||
/* Waiting until either the connection is established (WIFI_CONNECTED_BIT) or connection failed for the maximum
|
||||
* number of re-tries (WIFI_FAIL_BIT). The bits are set by event_handler() (see above) */
|
||||
EventBits_t bits = xEventGroupWaitBits(s_wifi_event_group,
|
||||
WIFI_CONNECTED_BIT | WIFI_FAIL_BIT,
|
||||
pdFALSE,
|
||||
pdFALSE,
|
||||
portMAX_DELAY);
|
||||
|
||||
/* xEventGroupWaitBits() returns the bits before the call returned, hence we can test which event actually
|
||||
* happened. */
|
||||
if (bits & WIFI_CONNECTED_BIT) {
|
||||
ESP_LOGI(TAG, "connected to ap SSID:%s password:%s",
|
||||
_ssid, _password);
|
||||
} else if (bits & WIFI_FAIL_BIT) {
|
||||
ESP_LOGI(TAG, "Failed to connect to SSID:%s, password:%s",
|
||||
_ssid, _password);
|
||||
} else {
|
||||
ESP_LOGE(TAG, "UNEXPECTED EVENT");
|
||||
}
|
||||
|
||||
/* The event will not be processed after unregister */
|
||||
/*
|
||||
////////////////////////////// esp-idf 4.2 //////////////////////////
|
||||
ESP_ERROR_CHECK(esp_event_handler_instance_unregister(IP_EVENT, IP_EVENT_STA_GOT_IP, instance_got_ip));
|
||||
ESP_ERROR_CHECK(esp_event_handler_instance_unregister(WIFI_EVENT, ESP_EVENT_ANY_ID, instance_any_id));
|
||||
////////////////////////// ENDE esp-idf 4.2 ///////////////////////////
|
||||
*/
|
||||
|
||||
/* Deaktiveren, damit bei einen Verbindungsabbruch neu aufgebaut wird
|
||||
ESP_ERROR_CHECK(esp_event_handler_unregister(IP_EVENT, IP_EVENT_STA_GOT_IP, &event_handler));
|
||||
ESP_ERROR_CHECK(esp_event_handler_unregister(WIFI_EVENT, ESP_EVENT_ANY_ID, &event_handler));
|
||||
vEventGroupDelete(s_wifi_event_group);
|
||||
*/
|
||||
|
||||
/*
|
||||
while (BlinkIsRunning)
|
||||
{
|
||||
vTaskDelay(100 / portTICK_PERIOD_MS);
|
||||
}
|
||||
*/
|
||||
}
|
||||
|
||||
|
||||
void wifi_init_sta(const char *_ssid, const char *_password, const char *_hostname)
|
||||
{
|
||||
wifi_init_sta(_ssid, _password, _hostname, NULL, NULL, NULL, NULL);
|
||||
}
|
||||
|
||||
void wifi_init_sta(const char *_ssid, const char *_password)
|
||||
{
|
||||
wifi_init_sta(_ssid, _password, NULL, NULL, NULL, NULL, NULL);
|
||||
}
|
||||
|
||||
16
code/components/jomjol_wlan/connect_wlan.h
Normal file
16
code/components/jomjol_wlan/connect_wlan.h
Normal file
@@ -0,0 +1,16 @@
|
||||
#ifndef CONNECT_WLAN_H
|
||||
#define CONNECT_WLAN_H
|
||||
|
||||
#include <string>
|
||||
|
||||
void wifi_init_sta(const char *_ssid, const char *_password, const char *_hostname, const char *_ipadr, const char *_gw, const char *_netmask, const char *_dns);
|
||||
void wifi_init_sta(const char *_ssid, const char *_password, const char *_hostname);
|
||||
void wifi_init_sta(const char *_ssid, const char *_password);
|
||||
|
||||
std::string getIPAddress();
|
||||
std::string getSSID();
|
||||
|
||||
extern std::string hostname;
|
||||
extern std::string std_hostname;
|
||||
|
||||
#endif
|
||||
257
code/components/jomjol_wlan/read_wlanini.cpp
Normal file
257
code/components/jomjol_wlan/read_wlanini.cpp
Normal file
@@ -0,0 +1,257 @@
|
||||
#include "read_wlanini.h"
|
||||
|
||||
#include "Helper.h"
|
||||
|
||||
#include "connect_wlan.h"
|
||||
|
||||
#include <fstream>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include <sstream>
|
||||
#include <iostream>
|
||||
#include <string.h>
|
||||
|
||||
|
||||
std::vector<string> ZerlegeZeile(std::string input, std::string _delimiter = "")
|
||||
{
|
||||
std::vector<string> Output;
|
||||
std::string delimiter = " =,";
|
||||
if (_delimiter.length() > 0){
|
||||
delimiter = _delimiter;
|
||||
}
|
||||
|
||||
input = trim(input, delimiter);
|
||||
size_t pos = findDelimiterPos(input, delimiter);
|
||||
std::string token;
|
||||
while (pos != std::string::npos) {
|
||||
token = input.substr(0, pos);
|
||||
token = trim(token, delimiter);
|
||||
Output.push_back(token);
|
||||
input.erase(0, pos + 1);
|
||||
input = trim(input, delimiter);
|
||||
pos = findDelimiterPos(input, delimiter);
|
||||
}
|
||||
Output.push_back(input);
|
||||
|
||||
return Output;
|
||||
}
|
||||
|
||||
|
||||
|
||||
void LoadWlanFromFile(std::string fn, char *&_ssid, char *&_password, char *&_hostname, char *&_ipadr, char *&_gw, char *&_netmask, char *&_dns)
|
||||
{
|
||||
std::string ssid = "";
|
||||
std::string passphrase = "";
|
||||
std::string ipaddress = "";
|
||||
std::string gw = "";
|
||||
std::string netmask = "";
|
||||
std::string dns = "";
|
||||
|
||||
std::string line = "";
|
||||
std::vector<string> zerlegt;
|
||||
hostname = std_hostname;
|
||||
|
||||
FILE* pFile;
|
||||
fn = FormatFileName(fn);
|
||||
|
||||
pFile = OpenFileAndWait(fn.c_str(), "r");
|
||||
printf("file loaded\n");
|
||||
|
||||
if (pFile == NULL)
|
||||
return;
|
||||
|
||||
char zw[1024];
|
||||
fgets(zw, 1024, pFile);
|
||||
line = std::string(zw);
|
||||
|
||||
while ((line.size() > 0) || !(feof(pFile)))
|
||||
{
|
||||
// printf("%s", line.c_str());
|
||||
zerlegt = ZerlegeZeile(line, "=");
|
||||
zerlegt[0] = trim(zerlegt[0], " ");
|
||||
for (int i = 2; i < zerlegt.size(); ++i)
|
||||
zerlegt[1] = zerlegt[1] + "=" + zerlegt[i];
|
||||
|
||||
if ((zerlegt.size() > 1) && (toUpper(zerlegt[0]) == "HOSTNAME")){
|
||||
hostname = trim(zerlegt[1]);
|
||||
if ((hostname[0] == '"') && (hostname[hostname.length()-1] == '"')){
|
||||
hostname = hostname.substr(1, hostname.length()-2);
|
||||
}
|
||||
}
|
||||
|
||||
if ((zerlegt.size() > 1) && (toUpper(zerlegt[0]) == "SSID")){
|
||||
ssid = trim(zerlegt[1]);
|
||||
if ((ssid[0] == '"') && (ssid[ssid.length()-1] == '"')){
|
||||
ssid = ssid.substr(1, ssid.length()-2);
|
||||
}
|
||||
}
|
||||
|
||||
if ((zerlegt.size() > 1) && (toUpper(zerlegt[0]) == "PASSWORD")){
|
||||
passphrase = zerlegt[1];
|
||||
if ((passphrase[0] == '"') && (passphrase[passphrase.length()-1] == '"')){
|
||||
passphrase = passphrase.substr(1, passphrase.length()-2);
|
||||
}
|
||||
}
|
||||
|
||||
if ((zerlegt.size() > 1) && (toUpper(zerlegt[0]) == "IP")){
|
||||
ipaddress = zerlegt[1];
|
||||
if ((ipaddress[0] == '"') && (ipaddress[ipaddress.length()-1] == '"')){
|
||||
ipaddress = ipaddress.substr(1, ipaddress.length()-2);
|
||||
}
|
||||
}
|
||||
|
||||
if ((zerlegt.size() > 1) && (toUpper(zerlegt[0]) == "GATEWAY")){
|
||||
gw = zerlegt[1];
|
||||
if ((gw[0] == '"') && (gw[gw.length()-1] == '"')){
|
||||
gw = gw.substr(1, gw.length()-2);
|
||||
}
|
||||
}
|
||||
|
||||
if ((zerlegt.size() > 1) && (toUpper(zerlegt[0]) == "NETMASK")){
|
||||
netmask = zerlegt[1];
|
||||
if ((netmask[0] == '"') && (netmask[netmask.length()-1] == '"')){
|
||||
netmask = netmask.substr(1, netmask.length()-2);
|
||||
}
|
||||
}
|
||||
|
||||
if ((zerlegt.size() > 1) && (toUpper(zerlegt[0]) == "DNS")){
|
||||
dns = zerlegt[1];
|
||||
if ((dns[0] == '"') && (dns[dns.length()-1] == '"')){
|
||||
dns = dns.substr(1, dns.length()-2);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
if (fgets(zw, 1024, pFile) == NULL)
|
||||
{
|
||||
line = "";
|
||||
}
|
||||
else
|
||||
{
|
||||
line = std::string(zw);
|
||||
}
|
||||
}
|
||||
|
||||
fclose(pFile);
|
||||
|
||||
// Check if Hostname was empty in .ini if yes set to std_hostname
|
||||
if(hostname.length() == 0){
|
||||
hostname = std_hostname;
|
||||
}
|
||||
|
||||
_hostname = new char[hostname.length() + 1];
|
||||
strcpy(_hostname, hostname.c_str());
|
||||
|
||||
_ssid = new char[ssid.length() + 1];
|
||||
strcpy(_ssid, ssid.c_str());
|
||||
|
||||
_password = new char[passphrase.length() + 1];
|
||||
strcpy(_password, passphrase.c_str());
|
||||
|
||||
if (ipaddress.length() > 0)
|
||||
{
|
||||
_ipadr = new char[ipaddress.length() + 1];
|
||||
strcpy(_ipadr, ipaddress.c_str());
|
||||
}
|
||||
else
|
||||
_ipadr = NULL;
|
||||
|
||||
if (gw.length() > 0)
|
||||
{
|
||||
_gw = new char[gw.length() + 1];
|
||||
strcpy(_gw, gw.c_str());
|
||||
}
|
||||
else
|
||||
_gw = NULL;
|
||||
|
||||
if (netmask.length() > 0)
|
||||
{
|
||||
_netmask = new char[netmask.length() + 1];
|
||||
strcpy(_netmask, netmask.c_str());
|
||||
}
|
||||
else
|
||||
_netmask = NULL;
|
||||
|
||||
if (dns.length() > 0)
|
||||
{
|
||||
_dns = new char[dns.length() + 1];
|
||||
strcpy(_dns, dns.c_str());
|
||||
}
|
||||
else
|
||||
_dns = NULL;
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
bool ChangeHostName(std::string fn, std::string _newhostname)
|
||||
{
|
||||
if (_newhostname == hostname)
|
||||
return false;
|
||||
|
||||
string line = "";
|
||||
std::vector<string> zerlegt;
|
||||
|
||||
bool found = false;
|
||||
|
||||
std::vector<string> neuesfile;
|
||||
|
||||
FILE* pFile;
|
||||
fn = FormatFileName(fn);
|
||||
pFile = OpenFileAndWait(fn.c_str(), "r");
|
||||
|
||||
printf("file loaded\n");
|
||||
|
||||
if (pFile == NULL)
|
||||
return false;
|
||||
|
||||
char zw[1024];
|
||||
fgets(zw, 1024, pFile);
|
||||
line = std::string(zw);
|
||||
|
||||
while ((line.size() > 0) || !(feof(pFile)))
|
||||
{
|
||||
printf("%s", line.c_str());
|
||||
zerlegt = ZerlegeZeile(line, "=");
|
||||
zerlegt[0] = trim(zerlegt[0], " ");
|
||||
|
||||
if ((zerlegt.size() > 1) && (toUpper(zerlegt[0]) == "HOSTNAME")){
|
||||
line = "hostname = \"" + _newhostname + "\"\n";
|
||||
found = true;
|
||||
}
|
||||
|
||||
neuesfile.push_back(line);
|
||||
|
||||
if (fgets(zw, 1024, pFile) == NULL)
|
||||
{
|
||||
line = "";
|
||||
}
|
||||
else
|
||||
{
|
||||
line = std::string(zw);
|
||||
}
|
||||
}
|
||||
|
||||
if (!found)
|
||||
{
|
||||
line = "\nhostname = \"" + _newhostname + "\"\n";
|
||||
neuesfile.push_back(line);
|
||||
}
|
||||
|
||||
fclose(pFile);
|
||||
|
||||
pFile = OpenFileAndWait(fn.c_str(), "w+");
|
||||
|
||||
for (int i = 0; i < neuesfile.size(); ++i)
|
||||
{
|
||||
printf(neuesfile[i].c_str());
|
||||
fputs(neuesfile[i].c_str(), pFile);
|
||||
}
|
||||
|
||||
fclose(pFile);
|
||||
|
||||
printf("*** Update hostname done ***\n");
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
11
code/components/jomjol_wlan/read_wlanini.h
Normal file
11
code/components/jomjol_wlan/read_wlanini.h
Normal file
@@ -0,0 +1,11 @@
|
||||
#ifndef READ_WLANINI_H
|
||||
#define READ_WLANINI_H
|
||||
|
||||
#include <string>
|
||||
|
||||
void LoadWlanFromFile(std::string fn, char *&_ssid, char *&_password, char *&_hostname, char *&_ipadr, char *&_gw, char *&_netmask, char *&_dns);
|
||||
|
||||
bool ChangeHostName(std::string fn, std::string _newhostname);
|
||||
|
||||
|
||||
#endif
|
||||
BIN
code/components/tfmicro.zip
Normal file
BIN
code/components/tfmicro.zip
Normal file
Binary file not shown.
@@ -23,7 +23,7 @@ if(NOT DEFINED ENV{IDF_PATH})
|
||||
endif()
|
||||
|
||||
idf_component_register(
|
||||
SRCS tensorflow/lite/micro/simple_memory_allocator.cc tensorflow/lite/micro/micro_error_reporter.cc tensorflow/lite/micro/memory_helpers.cc tensorflow/lite/micro/test_helpers.cc tensorflow/lite/micro/recording_micro_allocator.cc tensorflow/lite/micro/micro_time.cc tensorflow/lite/micro/recording_simple_memory_allocator.cc tensorflow/lite/micro/micro_string.cc tensorflow/lite/micro/micro_profiler.cc tensorflow/lite/micro/debug_log.cc tensorflow/lite/micro/all_ops_resolver.cc tensorflow/lite/micro/micro_utils.cc tensorflow/lite/micro/micro_interpreter.cc tensorflow/lite/micro/micro_allocator.cc tensorflow/lite/micro/system_setup.cc tensorflow/lite/micro/memory_planner/linear_memory_planner.cc tensorflow/lite/micro/memory_planner/greedy_memory_planner.cc tensorflow/lite/c/common.c tensorflow/lite/core/api/error_reporter.cc tensorflow/lite/core/api/flatbuffer_conversions.cc tensorflow/lite/core/api/op_resolver.cc tensorflow/lite/core/api/tensor_utils.cc tensorflow/lite/kernels/internal/quantization_util.cc tensorflow/lite/kernels/kernel_util.cc tensorflow/lite/schema/schema_utils.cc tensorflow/lite/micro/kernels/activations.cc tensorflow/lite/micro/kernels/add.cc tensorflow/lite/micro/kernels/add_n.cc tensorflow/lite/micro/kernels/arg_min_max.cc tensorflow/lite/micro/kernels/batch_to_space_nd.cc tensorflow/lite/micro/kernels/cast.cc tensorflow/lite/micro/kernels/ceil.cc tensorflow/lite/micro/kernels/circular_buffer.cc tensorflow/lite/micro/kernels/comparisons.cc tensorflow/lite/micro/kernels/concatenation.cc tensorflow/lite/micro/kernels/conv.cc tensorflow/lite/micro/kernels/conv_common.cc tensorflow/lite/micro/kernels/depthwise_conv.cc tensorflow/lite/micro/kernels/depthwise_conv_common.cc tensorflow/lite/micro/kernels/dequantize.cc tensorflow/lite/micro/kernels/detection_postprocess.cc tensorflow/lite/micro/kernels/div.cc tensorflow/lite/micro/kernels/elementwise.cc tensorflow/lite/micro/kernels/elu.cc tensorflow/lite/micro/kernels/ethosu.cc tensorflow/lite/micro/kernels/exp.cc tensorflow/lite/micro/kernels/expand_dims.cc tensorflow/lite/micro/kernels/fill.cc tensorflow/lite/micro/kernels/floor.cc tensorflow/lite/micro/kernels/fully_connected.cc tensorflow/lite/micro/kernels/fully_connected_common.cc tensorflow/lite/micro/kernels/hard_swish.cc tensorflow/lite/micro/kernels/kernel_runner.cc tensorflow/lite/micro/kernels/kernel_util.cc tensorflow/lite/micro/kernels/l2norm.cc tensorflow/lite/micro/kernels/l2_pool_2d.cc tensorflow/lite/micro/kernels/leaky_relu.cc tensorflow/lite/micro/kernels/logical.cc tensorflow/lite/micro/kernels/logistic.cc tensorflow/lite/micro/kernels/maximum_minimum.cc tensorflow/lite/micro/kernels/mul.cc tensorflow/lite/micro/kernels/neg.cc tensorflow/lite/micro/kernels/pack.cc tensorflow/lite/micro/kernels/pad.cc tensorflow/lite/micro/kernels/pooling.cc tensorflow/lite/micro/kernels/prelu.cc tensorflow/lite/micro/kernels/quantize.cc tensorflow/lite/micro/kernels/quantize_common.cc tensorflow/lite/micro/kernels/reduce.cc tensorflow/lite/micro/kernels/reshape.cc tensorflow/lite/micro/kernels/resize_nearest_neighbor.cc tensorflow/lite/micro/kernels/round.cc tensorflow/lite/micro/kernels/shape.cc tensorflow/lite/micro/kernels/softmax.cc tensorflow/lite/micro/kernels/softmax_common.cc tensorflow/lite/micro/kernels/space_to_batch_nd.cc tensorflow/lite/micro/kernels/split.cc tensorflow/lite/micro/kernels/split_v.cc tensorflow/lite/micro/kernels/squeeze.cc tensorflow/lite/micro/kernels/strided_slice.cc tensorflow/lite/micro/kernels/sub.cc tensorflow/lite/micro/kernels/svdf.cc tensorflow/lite/micro/kernels/svdf_common.cc tensorflow/lite/micro/kernels/tanh.cc tensorflow/lite/micro/kernels/transpose_conv.cc tensorflow/lite/micro/kernels/unpack.cc tensorflow/lite/micro/kernels/zeros_like.cc
|
||||
SRCS tensorflow/lite/micro/simple_memory_allocator.cc tensorflow/lite/micro/debug_log.cc tensorflow/lite/micro/micro_error_reporter.cc tensorflow/lite/micro/memory_helpers.cc tensorflow/lite/micro/test_helpers.cc tensorflow/lite/micro/recording_micro_allocator.cc tensorflow/lite/micro/micro_time.cc tensorflow/lite/micro/recording_simple_memory_allocator.cc tensorflow/lite/micro/micro_string.cc tensorflow/lite/micro/micro_profiler.cc tensorflow/lite/micro/flatbuffer_utils.cc tensorflow/lite/micro/micro_graph.cc tensorflow/lite/micro/mock_micro_graph.cc tensorflow/lite/micro/all_ops_resolver.cc tensorflow/lite/micro/micro_utils.cc tensorflow/lite/micro/micro_interpreter.cc tensorflow/lite/micro/micro_allocator.cc tensorflow/lite/micro/system_setup.cc tensorflow/lite/micro/memory_planner/linear_memory_planner.cc tensorflow/lite/micro/memory_planner/greedy_memory_planner.cc tensorflow/lite/schema/schema_utils.cc tensorflow/lite/c/common.c tensorflow/lite/core/api/tensor_utils.cc tensorflow/lite/core/api/error_reporter.cc tensorflow/lite/core/api/flatbuffer_conversions.cc tensorflow/lite/core/api/op_resolver.cc tensorflow/lite/kernels/kernel_util.cc tensorflow/lite/kernels/internal/quantization_util.cc tensorflow/lite/kernels/internal/reference/portable_tensor_utils.cc tensorflow/lite/micro/kernels/activations.cc tensorflow/lite/micro/kernels/activations_common.cc tensorflow/lite/micro/kernels/add.cc tensorflow/lite/micro/kernels/add_n.cc tensorflow/lite/micro/kernels/arg_min_max.cc tensorflow/lite/micro/kernels/batch_to_space_nd.cc tensorflow/lite/micro/kernels/cast.cc tensorflow/lite/micro/kernels/ceil.cc tensorflow/lite/micro/kernels/circular_buffer.cc tensorflow/lite/micro/kernels/comparisons.cc tensorflow/lite/micro/kernels/concatenation.cc tensorflow/lite/micro/kernels/conv.cc tensorflow/lite/micro/kernels/conv_common.cc tensorflow/lite/micro/kernels/cumsum.cc tensorflow/lite/micro/kernels/depth_to_space.cc tensorflow/lite/micro/kernels/depthwise_conv.cc tensorflow/lite/micro/kernels/depthwise_conv_common.cc tensorflow/lite/micro/kernels/dequantize.cc tensorflow/lite/micro/kernels/detection_postprocess.cc tensorflow/lite/micro/kernels/elementwise.cc tensorflow/lite/micro/kernels/elu.cc tensorflow/lite/micro/kernels/ethosu.cc tensorflow/lite/micro/kernels/exp.cc tensorflow/lite/micro/kernels/expand_dims.cc tensorflow/lite/micro/kernels/fill.cc tensorflow/lite/micro/kernels/floor.cc tensorflow/lite/micro/kernels/floor_div.cc tensorflow/lite/micro/kernels/floor_mod.cc tensorflow/lite/micro/kernels/fully_connected.cc tensorflow/lite/micro/kernels/fully_connected_common.cc tensorflow/lite/micro/kernels/gather.cc tensorflow/lite/micro/kernels/gather_nd.cc tensorflow/lite/micro/kernels/hard_swish.cc tensorflow/lite/micro/kernels/hard_swish_common.cc tensorflow/lite/micro/kernels/if.cc tensorflow/lite/micro/kernels/kernel_runner.cc tensorflow/lite/micro/kernels/kernel_util.cc tensorflow/lite/micro/kernels/l2norm.cc tensorflow/lite/micro/kernels/l2_pool_2d.cc tensorflow/lite/micro/kernels/leaky_relu.cc tensorflow/lite/micro/kernels/logical.cc tensorflow/lite/micro/kernels/logical_common.cc tensorflow/lite/micro/kernels/logistic.cc tensorflow/lite/micro/kernels/logistic_common.cc tensorflow/lite/micro/kernels/log_softmax.cc tensorflow/lite/micro/kernels/maximum_minimum.cc tensorflow/lite/micro/kernels/mul.cc tensorflow/lite/micro/kernels/neg.cc tensorflow/lite/micro/kernels/pack.cc tensorflow/lite/micro/kernels/pad.cc tensorflow/lite/micro/kernels/pooling.cc tensorflow/lite/micro/kernels/pooling_common.cc tensorflow/lite/micro/kernels/prelu.cc tensorflow/lite/micro/kernels/quantize.cc tensorflow/lite/micro/kernels/quantize_common.cc tensorflow/lite/micro/kernels/reduce.cc tensorflow/lite/micro/kernels/reshape.cc tensorflow/lite/micro/kernels/resize_bilinear.cc tensorflow/lite/micro/kernels/resize_nearest_neighbor.cc tensorflow/lite/micro/kernels/round.cc tensorflow/lite/micro/kernels/shape.cc tensorflow/lite/micro/kernels/softmax.cc tensorflow/lite/micro/kernels/softmax_common.cc tensorflow/lite/micro/kernels/space_to_batch_nd.cc tensorflow/lite/micro/kernels/space_to_depth.cc tensorflow/lite/micro/kernels/split.cc tensorflow/lite/micro/kernels/split_v.cc tensorflow/lite/micro/kernels/squeeze.cc tensorflow/lite/micro/kernels/strided_slice.cc tensorflow/lite/micro/kernels/sub.cc tensorflow/lite/micro/kernels/svdf.cc tensorflow/lite/micro/kernels/svdf_common.cc tensorflow/lite/micro/kernels/tanh.cc tensorflow/lite/micro/kernels/transpose.cc tensorflow/lite/micro/kernels/transpose_conv.cc tensorflow/lite/micro/kernels/unpack.cc tensorflow/lite/micro/kernels/zeros_like.cc
|
||||
INCLUDE_DIRS . third_party/gemmlowp third_party/flatbuffers/include third_party/ruy)
|
||||
|
||||
# Reduce the level of paranoia to be able to compile TF sources
|
||||
@@ -32,7 +32,7 @@ target_compile_options(${COMPONENT_LIB} PRIVATE
|
||||
-Wno-missing-field-initializers
|
||||
-Wno-type-limits)
|
||||
|
||||
target_compile_options(${COMPONENT_LIB} PRIVATE -fno-unwind-tables -ffunction-sections -fdata-sections -fmessage-length=0 -DTF_LITE_STATIC_MEMORY -DTF_LITE_DISABLE_X86_NEON -O3 -Werror -Wsign-compare -Wdouble-promotion -Wshadow -Wunused-variable -Wmissing-field-initializers -Wunused-function -Wswitch -Wvla -Wall -Wextra -Wstrict-aliasing -Wno-unused-parameter -DESP)
|
||||
target_compile_options(${COMPONENT_LIB} PRIVATE $<$<COMPILE_LANGUAGE:CXX>: -std=c++11 -fno-rtti -fno-exceptions -fno-threadsafe-statics -fno-unwind-tables -ffunction-sections -fdata-sections -fmessage-length=0 -DTF_LITE_STATIC_MEMORY -DTF_LITE_DISABLE_X86_NEON -O3 -Werror -Wsign-compare -Wdouble-promotion -Wshadow -Wunused-variable -Wmissing-field-initializers -Wunused-function -Wswitch -Wvla -Wall -Wextra -Wstrict-aliasing -Wno-unused-parameter -DESP >)
|
||||
target_compile_options(${COMPONENT_LIB} PRIVATE -Wimplicit-function-declaration -Werror -fno-unwind-tables -ffunction-sections -fdata-sections -fmessage-length=0 -DTF_LITE_STATIC_MEMORY -DTF_LITE_DISABLE_X86_NEON -Wsign-compare -Wdouble-promotion -Wshadow -Wunused-variable -Wmissing-field-initializers -Wunused-function -Wswitch -Wvla -Wall -Wextra -Wstrict-aliasing -Wno-unused-parameter -DESP)
|
||||
target_compile_options(${COMPONENT_LIB} PRIVATE $<$<COMPILE_LANGUAGE:CXX>: -std=c++11 -fno-rtti -fno-exceptions -fno-threadsafe-statics -Werror -fno-unwind-tables -ffunction-sections -fdata-sections -fmessage-length=0 -DTF_LITE_STATIC_MEMORY -DTF_LITE_DISABLE_X86_NEON -Wsign-compare -Wdouble-promotion -Wshadow -Wunused-variable -Wmissing-field-initializers -Wunused-function -Wswitch -Wvla -Wall -Wextra -Wstrict-aliasing -Wno-unused-parameter -DESP >)
|
||||
target_compile_options(${COMPONENT_LIB} INTERFACE $<$<IN_LIST:-DTF_LITE_STATIC_MEMORY,$<TARGET_PROPERTY:${COMPONENT_LIB},COMPILE_OPTIONS>>:-DTF_LITE_STATIC_MEMORY>)
|
||||
target_link_libraries(${COMPONENT_LIB} PRIVATE -lm)
|
||||
|
||||
@@ -63,7 +63,6 @@ typedef struct {
|
||||
} TfLiteMirrorPaddingParams;
|
||||
|
||||
// Possible fused activation functions.
|
||||
// TODO(aselle): rename to TfLiteActivation
|
||||
typedef enum {
|
||||
kTfLiteActNone = 0,
|
||||
kTfLiteActRelu,
|
||||
@@ -98,6 +97,8 @@ typedef struct {
|
||||
TfLiteFusedActivation activation;
|
||||
} TfLiteConv3DParams;
|
||||
|
||||
typedef TfLiteConv3DParams TfLiteConv3DTransposeParams;
|
||||
|
||||
typedef struct {
|
||||
TfLitePadding padding;
|
||||
int stride_width;
|
||||
@@ -328,8 +329,9 @@ typedef struct {
|
||||
} TfLitePadV2Params;
|
||||
|
||||
typedef struct {
|
||||
// TODO(ahentz): We can't have dynamic data in this struct, at least not yet.
|
||||
// For now we will fix the maximum possible number of dimensions.
|
||||
// These fields are only used in old models for backward compatibility.
|
||||
// In the current implementation, we use the 2nd input of the op as the shape,
|
||||
// and these fields are unused.
|
||||
int shape[TFLITE_RESHAPE_PARAMS_MAX_DIMENSION_COUNT];
|
||||
int num_dimensions;
|
||||
} TfLiteReshapeParams;
|
||||
@@ -495,6 +497,11 @@ typedef struct {
|
||||
TfLiteType value_dtype;
|
||||
} TfLiteHashtableParams;
|
||||
|
||||
typedef struct {
|
||||
const char* container;
|
||||
const char* shared_name;
|
||||
} TfLiteVarHandleParams;
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
#endif // __cplusplus
|
||||
|
||||
@@ -29,7 +29,9 @@ extern "C" {
|
||||
// library.
|
||||
#ifdef SWIG
|
||||
#define TFL_CAPI_EXPORT
|
||||
#else
|
||||
#elif defined(TFL_STATIC_LIBRARY_BUILD)
|
||||
#define TFL_CAPI_EXPORT
|
||||
#else // not definded TFL_STATIC_LIBRARY_BUILD
|
||||
#if defined(_WIN32)
|
||||
#ifdef TFL_COMPILE_LIBRARY
|
||||
#define TFL_CAPI_EXPORT __declspec(dllexport)
|
||||
@@ -54,7 +56,19 @@ typedef enum TfLiteStatus {
|
||||
// incompatibility between runtime and delegate, e.g., this error is returned
|
||||
// when trying to apply a TfLite delegate onto a model graph that's already
|
||||
// immutable.
|
||||
kTfLiteApplicationError = 3
|
||||
kTfLiteApplicationError = 3,
|
||||
|
||||
// Generally referring to serialized delegate data not being found.
|
||||
// See tflite::delegates::Serialization.
|
||||
kTfLiteDelegateDataNotFound = 4,
|
||||
|
||||
// Generally referring to data-writing issues in delegate serialization.
|
||||
// See tflite::delegates::Serialization.
|
||||
kTfLiteDelegateDataWriteError = 5,
|
||||
|
||||
// Generally referring to data-reading issues in delegate serialization.
|
||||
// See tflite::delegates::Serialization.
|
||||
kTfLiteDelegateDataReadError = 5,
|
||||
} TfLiteStatus;
|
||||
|
||||
// Types supported by tensor
|
||||
|
||||
@@ -45,8 +45,10 @@ int TfLiteIntArrayEqualsArray(const TfLiteIntArray* a, int b_size,
|
||||
#ifndef TF_LITE_STATIC_MEMORY
|
||||
|
||||
TfLiteIntArray* TfLiteIntArrayCreate(int size) {
|
||||
TfLiteIntArray* ret =
|
||||
(TfLiteIntArray*)malloc(TfLiteIntArrayGetSizeInBytes(size));
|
||||
int alloc_size = TfLiteIntArrayGetSizeInBytes(size);
|
||||
if (alloc_size <= 0) return NULL;
|
||||
TfLiteIntArray* ret = (TfLiteIntArray*)malloc(alloc_size);
|
||||
if (!ret) return ret;
|
||||
ret->size = size;
|
||||
return ret;
|
||||
}
|
||||
@@ -181,9 +183,9 @@ void TfLiteTensorRealloc(size_t num_bytes, TfLiteTensor* tensor) {
|
||||
}
|
||||
// TODO(b/145340303): Tensor data should be aligned.
|
||||
if (!tensor->data.raw) {
|
||||
tensor->data.raw = malloc(num_bytes);
|
||||
tensor->data.raw = (char*)malloc(num_bytes);
|
||||
} else if (num_bytes > tensor->bytes) {
|
||||
tensor->data.raw = realloc(tensor->data.raw, num_bytes);
|
||||
tensor->data.raw = (char*)realloc(tensor->data.raw, num_bytes);
|
||||
}
|
||||
tensor->bytes = num_bytes;
|
||||
}
|
||||
@@ -229,7 +231,7 @@ const char* TfLiteTypeGetName(TfLiteType type) {
|
||||
return "Unknown type";
|
||||
}
|
||||
|
||||
TfLiteDelegate TfLiteDelegateCreate() {
|
||||
TfLiteDelegate TfLiteDelegateCreate(void) {
|
||||
TfLiteDelegate d = {
|
||||
.data_ = NULL,
|
||||
.Prepare = NULL,
|
||||
|
||||
@@ -456,8 +456,8 @@ typedef struct TfLiteTensor {
|
||||
} TfLiteTensor;
|
||||
|
||||
// A structure representing an instance of a node.
|
||||
// This structure only exhibits the inputs, outputs and user defined data, not
|
||||
// other features like the type.
|
||||
// This structure only exhibits the inputs, outputs, user defined data and some
|
||||
// node properties (like statefulness), not other features like the type.
|
||||
typedef struct TfLiteNode {
|
||||
// Inputs to this node expressed as indices into the simulator's tensors.
|
||||
TfLiteIntArray* inputs;
|
||||
@@ -490,6 +490,9 @@ typedef struct TfLiteNode {
|
||||
// created by calling `interpreter.ModifyGraphWithDelegate`.
|
||||
// WARNING: This is an experimental interface that is subject to change.
|
||||
struct TfLiteDelegate* delegate;
|
||||
|
||||
// Whether this op might have side effect (e.g. stateful op).
|
||||
bool might_have_side_effect;
|
||||
} TfLiteNode;
|
||||
#else // defined(TF_LITE_STATIC_MEMORY)?
|
||||
// NOTE: This flag is opt-in only at compile time.
|
||||
@@ -640,6 +643,7 @@ typedef struct TfLiteContext {
|
||||
// TfLiteDelegates can traverse the current execution plan by iterating
|
||||
// through each member of this array and using GetNodeAndRegistration() to
|
||||
// access details about a node. i.e.
|
||||
//
|
||||
// TfLiteIntArray* execution_plan;
|
||||
// TF_LITE_ENSURE_STATUS(context->GetExecutionPlan(context, &execution_plan));
|
||||
// for (int exec_index = 0; exec_index < execution_plan->size; exec_index++) {
|
||||
@@ -648,6 +652,28 @@ typedef struct TfLiteContext {
|
||||
// TfLiteRegistration* reg;
|
||||
// context->GetNodeAndRegistration(context, node_index, &node, ®);
|
||||
// }
|
||||
// Note: the memory pointed by '`*execution_plan` is OWNED by TfLite runtime.
|
||||
// Future calls to GetExecutionPlan invalidates earlier outputs. The following
|
||||
// code snippet shows the issue of such an invocation pattern. After calling
|
||||
// CheckNode, subsequent access to `plan_1st` is undefined.
|
||||
//
|
||||
// void CheckNode(const TfLiteNode* node) {
|
||||
// ...
|
||||
// TfLiteIntArray* plan_2nd;
|
||||
// TF_LITE_ENSURE_STATUS(context->GetExecutionPlan(context, &plan_2nd));
|
||||
// ...
|
||||
// }
|
||||
//
|
||||
// TfLiteIntArray* plan_1st;
|
||||
// TF_LITE_ENSURE_STATUS(context->GetExecutionPlan(context, &plan_1st));
|
||||
// for (int exec_index = 0; exec_index < plan_1st->size; exec_index++) {
|
||||
// int node_index = plan_1st->data[exec_index];
|
||||
// TfLiteNode* node;
|
||||
// TfLiteRegistration* reg;
|
||||
// context->GetNodeAndRegistration(context, node_index, &node, ®);
|
||||
// CheckNode(node);
|
||||
// }
|
||||
//
|
||||
// WARNING: This is an experimental interface that is subject to change.
|
||||
TfLiteStatus (*GetExecutionPlan)(struct TfLiteContext* context,
|
||||
TfLiteIntArray** execution_plan);
|
||||
@@ -777,6 +803,18 @@ typedef struct TfLiteContext {
|
||||
// WARNING: This method may not be available on all platforms.
|
||||
TfLiteEvalTensor* (*GetEvalTensor)(const struct TfLiteContext* context,
|
||||
int tensor_idx);
|
||||
|
||||
// Retrieves named metadata buffer from the TFLite model.
|
||||
// Returns kTfLiteOk if metadata is successfully obtained from the flatbuffer
|
||||
// Model: that is, there exists a `metadata` entry with given `name` string.
|
||||
// (see TFLite's schema.fbs).
|
||||
// The corresponding `buffer` information is populated in `ptr` & `bytes`.
|
||||
// The data from `ptr` is valid for the lifetime of the Interpreter.
|
||||
//
|
||||
// WARNING: This is an experimental interface that is subject to change.
|
||||
TfLiteStatus (*GetModelMetadata)(const struct TfLiteContext* context,
|
||||
const char* name, const char** ptr,
|
||||
size_t* bytes);
|
||||
} TfLiteContext;
|
||||
|
||||
typedef struct TfLiteRegistration {
|
||||
@@ -918,7 +956,7 @@ typedef struct TfLiteDelegate {
|
||||
|
||||
// Build a 'null' delegate, with all the fields properly set to their default
|
||||
// values.
|
||||
TfLiteDelegate TfLiteDelegateCreate();
|
||||
TfLiteDelegate TfLiteDelegateCreate(void);
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
|
||||
@@ -373,6 +373,10 @@ TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type,
|
||||
return ParseReducer(op, error_reporter, allocator, builtin_data);
|
||||
}
|
||||
|
||||
case BuiltinOperator_REDUCE_ALL: {
|
||||
return ParseReducer(op, error_reporter, allocator, builtin_data);
|
||||
}
|
||||
|
||||
case BuiltinOperator_REDUCE_MAX: {
|
||||
return ParseReducer(op, error_reporter, allocator, builtin_data);
|
||||
}
|
||||
@@ -663,7 +667,6 @@ TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type,
|
||||
return kTfLiteOk;
|
||||
}
|
||||
case BuiltinOperator_DELEGATE: {
|
||||
// TODO(ycling): Revisit when supporting saving delegated models.
|
||||
TF_LITE_REPORT_ERROR(error_reporter,
|
||||
"DELEGATE op shouldn't exist in model.");
|
||||
return kTfLiteError;
|
||||
@@ -757,7 +760,8 @@ TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type,
|
||||
*builtin_data = params.release();
|
||||
return kTfLiteOk;
|
||||
}
|
||||
case BuiltinOperator_CONV_3D: {
|
||||
case BuiltinOperator_CONV_3D:
|
||||
case BuiltinOperator_CONV_3D_TRANSPOSE: {
|
||||
auto params = safe_allocator.Allocate<TfLiteConv3DParams>();
|
||||
TF_LITE_ENSURE(error_reporter, params != nullptr);
|
||||
if (const auto* conv3d_params = op->builtin_options_as_Conv3DOptions()) {
|
||||
@@ -789,6 +793,21 @@ TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type,
|
||||
*builtin_data = params.release();
|
||||
return kTfLiteOk;
|
||||
}
|
||||
case BuiltinOperator_VAR_HANDLE: {
|
||||
auto params = safe_allocator.Allocate<TfLiteVarHandleParams>();
|
||||
TF_LITE_ENSURE(error_reporter, params != nullptr);
|
||||
params->container = nullptr;
|
||||
params->shared_name = nullptr;
|
||||
if (const auto* var_handle_params =
|
||||
op->builtin_options_as_VarHandleOptions()) {
|
||||
if (var_handle_params->container())
|
||||
params->container = var_handle_params->container()->c_str();
|
||||
if (var_handle_params->shared_name())
|
||||
params->shared_name = var_handle_params->shared_name()->c_str();
|
||||
}
|
||||
*builtin_data = params.release();
|
||||
return kTfLiteOk;
|
||||
}
|
||||
// Below are the ops with no builtin_data structure.
|
||||
// TODO(aselle): Implement call in BuiltinOptions, but nullptrs are
|
||||
// ok for now, since there is no call implementation either.
|
||||
@@ -825,6 +844,9 @@ TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type,
|
||||
case BuiltinOperator_HASHTABLE_FIND:
|
||||
case BuiltinOperator_HASHTABLE_IMPORT:
|
||||
case BuiltinOperator_HASHTABLE_SIZE:
|
||||
case BuiltinOperator_READ_VARIABLE:
|
||||
case BuiltinOperator_ASSIGN_VARIABLE:
|
||||
case BuiltinOperator_BROADCAST_ARGS:
|
||||
return kTfLiteOk;
|
||||
case BuiltinOperator_PLACEHOLDER_FOR_GREATER_OP_CODES:
|
||||
return kTfLiteError;
|
||||
@@ -1372,6 +1394,30 @@ TfLiteStatus ParseHardSwish(const Operator*, ErrorReporter*,
|
||||
return kTfLiteOk;
|
||||
}
|
||||
|
||||
TfLiteStatus ParseIf(const Operator* op, ErrorReporter* error_reporter,
|
||||
BuiltinDataAllocator* allocator, void** builtin_data) {
|
||||
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
|
||||
|
||||
SafeBuiltinDataAllocator safe_allocator(allocator);
|
||||
std::unique_ptr<TfLiteIfParams, SafeBuiltinDataAllocator::BuiltinDataDeleter>
|
||||
params = safe_allocator.Allocate<TfLiteIfParams>();
|
||||
TF_LITE_ENSURE(error_reporter, params != nullptr);
|
||||
|
||||
const IfOptions* schema_params = op->builtin_options_as_IfOptions();
|
||||
|
||||
if (schema_params != nullptr) {
|
||||
params->then_subgraph_index = schema_params->then_subgraph_index();
|
||||
params->else_subgraph_index = schema_params->else_subgraph_index();
|
||||
} else {
|
||||
// TODO(b/157480169): We should either return kTfLiteError or fill in some
|
||||
// reasonable defaults in the params struct. We are not doing so until we
|
||||
// better undertand the ramifications of changing the legacy behavior.
|
||||
}
|
||||
|
||||
*builtin_data = params.release();
|
||||
return kTfLiteOk;
|
||||
}
|
||||
|
||||
TfLiteStatus ParseL2Normalization(const Operator* op,
|
||||
ErrorReporter* error_reporter,
|
||||
BuiltinDataAllocator* allocator,
|
||||
|
||||
@@ -181,6 +181,9 @@ TfLiteStatus ParseHardSwish(const Operator* op, ErrorReporter* error_reporter,
|
||||
BuiltinDataAllocator* allocator,
|
||||
void** builtin_data);
|
||||
|
||||
TfLiteStatus ParseIf(const Operator* op, ErrorReporter* error_reporter,
|
||||
BuiltinDataAllocator* allocator, void** builtin_data);
|
||||
|
||||
TfLiteStatus ParseL2Normalization(const Operator* op,
|
||||
ErrorReporter* error_reporter,
|
||||
BuiltinDataAllocator* allocator,
|
||||
|
||||
@@ -30,8 +30,7 @@ TfLiteStatus GetRegistrationFromOpCode(
|
||||
auto builtin_code = GetBuiltinCode(opcode);
|
||||
int version = opcode->version();
|
||||
|
||||
if (builtin_code > BuiltinOperator_MAX ||
|
||||
builtin_code < BuiltinOperator_MIN) {
|
||||
if (builtin_code > BuiltinOperator_MAX) {
|
||||
TF_LITE_REPORT_ERROR(
|
||||
error_reporter,
|
||||
"Op builtin_code out of range: %d. Are you using old TFLite binary "
|
||||
|
||||
@@ -46,6 +46,22 @@ class OpResolver {
|
||||
}
|
||||
|
||||
virtual ~OpResolver() {}
|
||||
|
||||
private:
|
||||
/// Returns true if this OpResolver may contain any "user defined" ops.
|
||||
/// By "user defined" ops, we mean any op definitions other than those
|
||||
/// contained in tflite::ops::builtin::BuiltinOpResolver.
|
||||
///
|
||||
/// If this method returns true, it doesn't necessarily mean that the
|
||||
/// OpResolver contains a user-defined op, just that the absence of
|
||||
/// user-defined ops can't be guaranteed.
|
||||
///
|
||||
/// Note that "user-defined" ops are not the same as "custom" ops;
|
||||
/// BuiltinOpResolver may support certain "custom" ops, in addition to
|
||||
/// "builtin" ops, and may not support all of the "builtin" op enum values.
|
||||
virtual bool MayContainUserDefinedOps() const { return true; }
|
||||
|
||||
friend class OpResolverInternal;
|
||||
};
|
||||
|
||||
// Handles the logic for converting between an OperatorCode structure extracted
|
||||
|
||||
@@ -279,81 +279,125 @@ inline Integer FloorLog2(Integer n) {
|
||||
}
|
||||
}
|
||||
|
||||
// generate INT16 LUT for function(), e.g., table exp(x) and 1/(1+x) used in
|
||||
// softmax
|
||||
// func - the function to build the LUT for (e.g exp(x))
|
||||
// min,max - table limits
|
||||
// table - pointer to buffer
|
||||
// num - number of elements in the LUT
|
||||
inline void gen_lut(double (*func)(double), double min, double max,
|
||||
int16_t* table, const int num) {
|
||||
// size of table should equal to num + 1
|
||||
// last element only for slope calculation
|
||||
double step = (max - min) / (num - 1);
|
||||
double half_step = step / 2.0;
|
||||
for (int i = 0; i < num - 1; i++) {
|
||||
double sample_val = TfLiteRound(func(min + i * step) * 32768.0);
|
||||
double midpoint_interp_val =
|
||||
TfLiteRound((func(min + (i + 1) * step) * 32768.0 +
|
||||
TfLiteRound(func(min + i * step) * 32768.0)) /
|
||||
2.0);
|
||||
double midpoint_val =
|
||||
TfLiteRound(func(min + i * step + half_step) * 32768.0);
|
||||
double midpoint_err = midpoint_interp_val - midpoint_val;
|
||||
double bias = TfLiteRound(midpoint_err / 2.0);
|
||||
table[i] = std::min<double>(std::max<double>(sample_val - bias, -32768.0),
|
||||
32767.0);
|
||||
}
|
||||
table[num - 1] = std::min<double>(
|
||||
std::max<double>(TfLiteRound(func(max) * 32768.0), -32768.0), 32767.0);
|
||||
// The size of the LUT depends on the type of input. For int8 inputs a simple
|
||||
// 256 entries LUT is used. For int16 inputs the high 9 bits are used for
|
||||
// indexing and the 7 remaining bits are used for interpolation. We thus use a
|
||||
// 513-entries LUT for int16 cases, 512 for the 9-bit indexing and 1 extra entry
|
||||
// to interpolate the last value.
|
||||
template <typename LutInT>
|
||||
constexpr int lut_size() {
|
||||
static_assert(std::is_same<LutInT, int8_t>::value ||
|
||||
std::is_same<LutInT, int16_t>::value,
|
||||
"Only LUTs with int8 or int16 inputs are supported.");
|
||||
return std::is_same<LutInT, int8_t>::value ? 256 : 513;
|
||||
}
|
||||
|
||||
// generate INT16 LUT for function(), e.g., table exp(x) and 1/(1+x) used in
|
||||
// softmax
|
||||
// func - the function to build the LUT for (e.g exp(x))
|
||||
// min,max - table limits
|
||||
// table - pointer to buffer
|
||||
// num - number of elements in the LUT
|
||||
inline void gen_lut(float (*func)(float), float min, float max, int16_t* table,
|
||||
const int num) {
|
||||
// size of table should equal to num + 1
|
||||
// last element only for slope calculation
|
||||
float step = (max - min) / (num - 1);
|
||||
float half_step = step / 2.0f;
|
||||
for (int i = 0; i < num - 1; i++) {
|
||||
float sample_val = TfLiteRound(func(min + i * step) * 32768.0f);
|
||||
float midpoint_interp_val =
|
||||
TfLiteRound((func(min + (i + 1) * step) * 32768.0f +
|
||||
TfLiteRound(func(min + i * step) * 32768.0f)) /
|
||||
2.0f);
|
||||
float midpoint_val =
|
||||
TfLiteRound(func(min + i * step + half_step) * 32768.0f);
|
||||
float midpoint_err = midpoint_interp_val - midpoint_val;
|
||||
float bias = TfLiteRound(midpoint_err / 2.0f);
|
||||
table[i] = std::min<float>(std::max<float>(sample_val - bias, -32768.0f),
|
||||
32767.0f);
|
||||
// Generate a LUT for 'func' which can be used to approximate functions like
|
||||
// exp, log, ...
|
||||
//
|
||||
// - func: the function to build the LUT for (e.g exp(x))
|
||||
// - input_min, input_max: range of the func inputs
|
||||
// - output_min, output_max: range of the func outputs
|
||||
// - lut: pointer to the LUT table to fill, the table must be of size
|
||||
// lut_size<LutInT>()
|
||||
template <typename FloatT, typename LutInT, typename LutOutT>
|
||||
inline void gen_lut(FloatT (*func)(FloatT), FloatT input_min, FloatT input_max,
|
||||
FloatT output_min, FloatT output_max, LutOutT* lut) {
|
||||
static_assert(std::is_same<LutInT, int8_t>::value ||
|
||||
std::is_same<LutInT, int16_t>::value,
|
||||
"Only LUTs with int8 or int16 inputs are supported.");
|
||||
static_assert(std::is_same<LutOutT, int8_t>::value ||
|
||||
std::is_same<LutOutT, int16_t>::value,
|
||||
"Only LUTs with int8 or int16 outputs are supported.");
|
||||
static_assert(std::is_floating_point<FloatT>::value,
|
||||
"FloatT must be a floating-point type.");
|
||||
|
||||
const int nb_steps = std::is_same<LutInT, int8_t>::value ? 256 : 512;
|
||||
const FloatT step = (input_max - input_min) / nb_steps;
|
||||
const FloatT half_step = step / 2;
|
||||
const FloatT output_scaling_inv =
|
||||
static_cast<FloatT>(std::numeric_limits<LutOutT>::max() -
|
||||
std::numeric_limits<LutOutT>::min() + 1) /
|
||||
(output_max - output_min);
|
||||
const FloatT table_min =
|
||||
static_cast<FloatT>(std::numeric_limits<LutOutT>::min());
|
||||
const FloatT table_max =
|
||||
static_cast<FloatT>(std::numeric_limits<LutOutT>::max());
|
||||
|
||||
for (int i = 0; i < nb_steps; i++) {
|
||||
const FloatT val = func(input_min + i * step);
|
||||
const FloatT val_midpoint = func(input_min + i * step + half_step);
|
||||
const FloatT val_next = func(input_min + (i + 1) * step);
|
||||
|
||||
const FloatT sample_val = TfLiteRound(val * output_scaling_inv);
|
||||
const FloatT midpoint_interp_val =
|
||||
TfLiteRound((val_next * output_scaling_inv +
|
||||
TfLiteRound(val * output_scaling_inv)) /
|
||||
2);
|
||||
const FloatT midpoint_val = TfLiteRound(val_midpoint * output_scaling_inv);
|
||||
const FloatT midpoint_err = midpoint_interp_val - midpoint_val;
|
||||
const FloatT bias = TfLiteRound(midpoint_err / 2);
|
||||
|
||||
lut[i] = static_cast<LutOutT>(std::min<FloatT>(
|
||||
std::max<FloatT>(sample_val - bias, table_min), table_max));
|
||||
}
|
||||
|
||||
const bool with_extra_interpolation_value =
|
||||
std::is_same<LutInT, int16_t>::value;
|
||||
if (with_extra_interpolation_value) {
|
||||
lut[nb_steps] = static_cast<LutOutT>(std::min<FloatT>(
|
||||
std::max<FloatT>(TfLiteRound(func(input_max) * output_scaling_inv),
|
||||
table_min),
|
||||
table_max));
|
||||
}
|
||||
table[num - 1] = std::min<float>(
|
||||
std::max<float>(TfLiteRound(func(max) * 32768.0f), -32768.0f), 32767.0f);
|
||||
}
|
||||
|
||||
// int16_t func table lookup, e.g., lookup exp() and 1/(1+x) used in softmax
|
||||
inline int16_t generic_int16_table_lookup(int16_t value, const int16_t* lut) {
|
||||
// 512 base value, lut[513] only for calculate slope
|
||||
uint16_t index = static_cast<uint16_t>(256 + (value >> 7));
|
||||
// LUT must have 513 values
|
||||
template <typename LutOutT>
|
||||
inline LutOutT lut_lookup_with_interpolation(int16_t value,
|
||||
const LutOutT* lut) {
|
||||
static_assert(std::is_same<LutOutT, int8_t>::value ||
|
||||
std::is_same<LutOutT, int16_t>::value,
|
||||
"Only LUTs with int8 or int16 outputs are supported.");
|
||||
// 512 base values, lut[513] is only used to calculate the slope
|
||||
const uint16_t index = static_cast<uint16_t>(256 + (value >> 7));
|
||||
assert(index < 512 && "LUT index out of range.");
|
||||
int16_t offset = value & 0x7f;
|
||||
const int16_t offset = value & 0x7f;
|
||||
|
||||
// base and slope are Q0.15
|
||||
int16_t base = lut[index];
|
||||
int16_t slope = lut[index + 1] - lut[index];
|
||||
// Base and slope are Q0.x
|
||||
const LutOutT base = lut[index];
|
||||
const LutOutT slope = lut[index + 1] - lut[index];
|
||||
|
||||
// Q0.15 * Q0.7 = Q0.22
|
||||
// Round and convert from Q0.22 to Q0.15
|
||||
int32_t delta = (static_cast<int32_t>(slope) * offset + 64) >> 7;
|
||||
// Q0.x * Q0.7 = Q0.(x + 7)
|
||||
// Round and convert from Q0.(x + 7) to Q0.x
|
||||
const int delta = (slope * offset + 64) >> 7;
|
||||
|
||||
// Q0.15 + Q0.15
|
||||
return base + delta;
|
||||
return static_cast<LutOutT>(base + delta);
|
||||
}
|
||||
|
||||
// int16_t -> int16_t table lookup with interpolation
|
||||
// LUT must have 513 values
|
||||
inline int16_t lut_lookup(int16_t value, const int16_t* lut) {
|
||||
return lut_lookup_with_interpolation(value, lut);
|
||||
}
|
||||
|
||||
// int16_t -> int8_t table lookup with interpolation
|
||||
// LUT must have 513 values
|
||||
inline int8_t lut_lookup(int16_t value, const int8_t* lut) {
|
||||
return lut_lookup_with_interpolation(value, lut);
|
||||
}
|
||||
|
||||
// int8_t -> int8_t table lookup without interpolation
|
||||
// LUT must have 256 values
|
||||
inline int8_t lut_lookup(int8_t value, const int8_t* lut) {
|
||||
return lut[128 + value];
|
||||
}
|
||||
|
||||
// int8_t -> int16_t table lookup without interpolation
|
||||
// LUT must have 256 values
|
||||
inline int16_t lut_lookup(int8_t value, const int16_t* lut) {
|
||||
return lut[128 + value];
|
||||
}
|
||||
|
||||
// Table of sigmoid(i/24) at 0.16 format - 256 elements.
|
||||
@@ -575,7 +619,8 @@ log_x_for_x_greater_than_or_equal_to_1_impl(
|
||||
// InputIntegerBits - z_b_headroom - 0.25);
|
||||
const FixedPointAccum z_a_pow_2_adj = SaturatingAddNonGemmlowp(
|
||||
FixedPointAccum::FromRaw(SaturatingRoundingMultiplyByPOTParam(
|
||||
InputIntegerBits - z_a_headroom_plus_1, 31 - kAccumIntegerBits)),
|
||||
static_cast<int32_t>(InputIntegerBits - z_a_headroom_plus_1),
|
||||
31 - kAccumIntegerBits)),
|
||||
shifted_quarter);
|
||||
|
||||
// z_b is treated like z_a, but premultiplying by sqrt(0.5).
|
||||
@@ -585,7 +630,8 @@ log_x_for_x_greater_than_or_equal_to_1_impl(
|
||||
SaturatingRoundingMultiplyByPOTParam(z_a.raw(), z_b_headroom);
|
||||
const FixedPointAccum z_b_pow_2_adj = SaturatingSub(
|
||||
FixedPointAccum::FromRaw(SaturatingRoundingMultiplyByPOTParam(
|
||||
InputIntegerBits - z_b_headroom, 31 - kAccumIntegerBits)),
|
||||
static_cast<int32_t>(InputIntegerBits - z_b_headroom),
|
||||
31 - kAccumIntegerBits)),
|
||||
shifted_quarter);
|
||||
|
||||
const FixedPoint0 r = FixedPoint0::FromRaw(std::min(r_a_raw, r_b_raw));
|
||||
|
||||
@@ -19,9 +19,8 @@ limitations under the License.
|
||||
|
||||
namespace tflite {
|
||||
|
||||
#if defined(TF_LITE_USE_GLOBAL_CMATH_FUNCTIONS) || \
|
||||
(defined(__ANDROID__) && !defined(__NDK_MAJOR__)) || defined(ARDUINO) || \
|
||||
defined(__ZEPHYR__)
|
||||
#if defined(TF_LITE_USE_GLOBAL_CMATH_FUNCTIONS) || \
|
||||
(defined(__ANDROID__) && !defined(__NDK_MAJOR__)) || defined(__ZEPHYR__)
|
||||
#define TF_LITE_GLOBAL_STD_PREFIX
|
||||
#else
|
||||
#define TF_LITE_GLOBAL_STD_PREFIX std
|
||||
|
||||
@@ -15,26 +15,6 @@ limitations under the License.
|
||||
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_NEON_CHECK_H_
|
||||
#define TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_NEON_CHECK_H_
|
||||
|
||||
#if defined(__ARM_NEON__) || defined(__ARM_NEON)
|
||||
#define USE_NEON
|
||||
#include <arm_neon.h>
|
||||
#endif
|
||||
|
||||
#if defined __GNUC__ && defined __SSE4_1__ && !defined TF_LITE_DISABLE_X86_NEON
|
||||
#define USE_NEON
|
||||
#include "NEON_2_SSE.h"
|
||||
#endif
|
||||
|
||||
// NEON_OR_PORTABLE(SomeFunc, args) calls NeonSomeFunc(args) if USE_NEON is
|
||||
// defined, PortableSomeFunc(args) otherwise.
|
||||
#ifdef USE_NEON
|
||||
// Always use Neon code
|
||||
#define NEON_OR_PORTABLE(funcname, ...) Neon##funcname(__VA_ARGS__)
|
||||
|
||||
#else
|
||||
// No NEON available: Use Portable code
|
||||
#define NEON_OR_PORTABLE(funcname, ...) Portable##funcname(__VA_ARGS__)
|
||||
|
||||
#endif // defined(USE_NEON)
|
||||
// TFLM does not need to utilize any Neon optimizations.
|
||||
|
||||
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_NEON_CHECK_H_
|
||||
|
||||
@@ -15,6 +15,8 @@ limitations under the License.
|
||||
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ADD_H_
|
||||
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ADD_H_
|
||||
|
||||
#include <type_traits>
|
||||
|
||||
#include "fixedpoint/fixedpoint.h"
|
||||
#include "tensorflow/lite/kernels/internal/common.h"
|
||||
|
||||
@@ -27,25 +29,14 @@ inline void Add(const ArithmeticParams& params,
|
||||
const RuntimeShape& input1_shape, const T* input1_data,
|
||||
const RuntimeShape& input2_shape, const T* input2_data,
|
||||
const RuntimeShape& output_shape, T* output_data) {
|
||||
T activation_min, activation_max;
|
||||
GetActivationParams(params, &activation_min, &activation_max);
|
||||
|
||||
const int flat_size =
|
||||
MatchingElementsSize(input1_shape, input2_shape, output_shape);
|
||||
for (int i = 0; i < flat_size; ++i) {
|
||||
output_data[i] = ActivationFunctionWithMinMax(
|
||||
input1_data[i] + input2_data[i], params.quantized_activation_min,
|
||||
params.quantized_activation_max);
|
||||
}
|
||||
}
|
||||
|
||||
inline void Add(const ArithmeticParams& params,
|
||||
const RuntimeShape& input1_shape, const float* input1_data,
|
||||
const RuntimeShape& input2_shape, const float* input2_data,
|
||||
const RuntimeShape& output_shape, float* output_data) {
|
||||
const int flat_size =
|
||||
MatchingElementsSize(input1_shape, input2_shape, output_shape);
|
||||
for (int i = 0; i < flat_size; i++) {
|
||||
auto x = input1_data[i] + input2_data[i];
|
||||
output_data[i] = ActivationFunctionWithMinMax(
|
||||
x, params.float_activation_min, params.float_activation_max);
|
||||
input1_data[i] + input2_data[i], activation_min, activation_max);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -202,13 +193,12 @@ inline void Add(const ArithmeticParams& params,
|
||||
}
|
||||
}
|
||||
|
||||
inline void BroadcastAdd4DSlow(const ArithmeticParams& params,
|
||||
const RuntimeShape& input1_shape,
|
||||
const float* input1_data,
|
||||
const RuntimeShape& input2_shape,
|
||||
const float* input2_data,
|
||||
const RuntimeShape& output_shape,
|
||||
float* output_data) {
|
||||
template <typename T>
|
||||
inline typename std::enable_if<!is_small_integer<T>::value, void>::type
|
||||
BroadcastAdd4DSlow(const ArithmeticParams& params,
|
||||
const RuntimeShape& input1_shape, const T* input1_data,
|
||||
const RuntimeShape& input2_shape, const T* input2_data,
|
||||
const RuntimeShape& output_shape, T* output_data) {
|
||||
NdArrayDesc<4> desc1;
|
||||
NdArrayDesc<4> desc2;
|
||||
NdArrayDescsForElementwiseBroadcast(input1_shape, input2_shape, &desc1,
|
||||
@@ -216,6 +206,9 @@ inline void BroadcastAdd4DSlow(const ArithmeticParams& params,
|
||||
const RuntimeShape extended_output_shape =
|
||||
RuntimeShape::ExtendedShape(4, output_shape);
|
||||
|
||||
T activation_min, activation_max;
|
||||
GetActivationParams(params, &activation_min, &activation_max);
|
||||
|
||||
// In Tensorflow, the dimensions are canonically named (batch_number, row,
|
||||
// col, channel), with extents (batches, height, width, depth), with the
|
||||
// trailing dimension changing most rapidly (channels has the smallest stride,
|
||||
@@ -232,51 +225,10 @@ inline void BroadcastAdd4DSlow(const ArithmeticParams& params,
|
||||
for (int x = 0; x < extended_output_shape.Dims(2); ++x) {
|
||||
for (int c = 0; c < extended_output_shape.Dims(3); ++c) {
|
||||
output_data[Offset(extended_output_shape, b, y, x, c)] =
|
||||
ActivationFunctionWithMinMax(
|
||||
ActivationFunctionWithMinMax<T>(
|
||||
input1_data[SubscriptToIndex(desc1, b, y, x, c)] +
|
||||
input2_data[SubscriptToIndex(desc2, b, y, x, c)],
|
||||
params.float_activation_min, params.float_activation_max);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
inline void BroadcastAdd4DSlow(const ArithmeticParams& params,
|
||||
const RuntimeShape& input1_shape,
|
||||
const int32_t* input1_data,
|
||||
const RuntimeShape& input2_shape,
|
||||
const int32_t* input2_data,
|
||||
const RuntimeShape& output_shape,
|
||||
int32_t* output_data) {
|
||||
NdArrayDesc<4> desc1;
|
||||
NdArrayDesc<4> desc2;
|
||||
NdArrayDescsForElementwiseBroadcast(input1_shape, input2_shape, &desc1,
|
||||
&desc2);
|
||||
const RuntimeShape extended_output_shape =
|
||||
RuntimeShape::ExtendedShape(4, output_shape);
|
||||
|
||||
// In Tensorflow, the dimensions are canonically named (batch_number, row,
|
||||
// col, channel), with extents (batches, height, width, depth), with the
|
||||
// trailing dimension changing most rapidly (channels has the smallest stride,
|
||||
// typically 1 element).
|
||||
//
|
||||
// In generated C code, we store arrays with the dimensions reversed. The
|
||||
// first dimension has smallest stride.
|
||||
//
|
||||
// We name our variables by their Tensorflow convention, but generate C code
|
||||
// nesting loops such that the innermost loop has the smallest stride for the
|
||||
// best cache behavior.
|
||||
for (int b = 0; b < extended_output_shape.Dims(0); ++b) {
|
||||
for (int y = 0; y < extended_output_shape.Dims(1); ++y) {
|
||||
for (int x = 0; x < extended_output_shape.Dims(2); ++x) {
|
||||
for (int c = 0; c < extended_output_shape.Dims(3); ++c) {
|
||||
output_data[Offset(extended_output_shape, b, y, x, c)] =
|
||||
ActivationFunctionWithMinMax(
|
||||
input1_data[SubscriptToIndex(desc1, b, y, x, c)] +
|
||||
input2_data[SubscriptToIndex(desc2, b, y, x, c)],
|
||||
params.quantized_activation_min,
|
||||
params.quantized_activation_max);
|
||||
activation_min, activation_max);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -287,10 +239,11 @@ inline void BroadcastAdd4DSlow(const ArithmeticParams& params,
|
||||
// is 32-bit for both cases. The overflow does not happen due to the
|
||||
// choice of the shift (20 or 15, accordingly - see add.cc for more comments).
|
||||
template <typename T>
|
||||
inline void BroadcastAdd4DSlow(
|
||||
const ArithmeticParams& params, const RuntimeShape& input1_shape,
|
||||
const T* input1_data, const RuntimeShape& input2_shape,
|
||||
const T* input2_data, const RuntimeShape& output_shape, T* output_data) {
|
||||
inline typename std::enable_if<is_small_integer<T>::value, void>::type
|
||||
BroadcastAdd4DSlow(const ArithmeticParams& params,
|
||||
const RuntimeShape& input1_shape, const T* input1_data,
|
||||
const RuntimeShape& input2_shape, const T* input2_data,
|
||||
const RuntimeShape& output_shape, T* output_data) {
|
||||
NdArrayDesc<4> desc1;
|
||||
NdArrayDesc<4> desc2;
|
||||
NdArrayDescsForElementwiseBroadcast(input1_shape, input2_shape, &desc1,
|
||||
|
||||
@@ -15,7 +15,10 @@ limitations under the License.
|
||||
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ADD_N_H_
|
||||
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ADD_N_H_
|
||||
|
||||
#include "tensorflow/lite/kernels/internal/types.h"
|
||||
#include <algorithm>
|
||||
#include <limits>
|
||||
|
||||
#include "tensorflow/lite/kernels/internal/common.h"
|
||||
|
||||
namespace tflite {
|
||||
namespace reference_ops {
|
||||
@@ -36,6 +39,47 @@ inline void AddN(const RuntimeShape& input_shape, const size_t num_inputs,
|
||||
}
|
||||
}
|
||||
|
||||
inline void AddN(const ArithmeticParams& params,
|
||||
const RuntimeShape& input_shape, const size_t num_inputs,
|
||||
const int8_t* const* input_data, int8_t* output_data) {
|
||||
TFLITE_DCHECK_LE(params.quantized_activation_min,
|
||||
params.quantized_activation_max);
|
||||
// Input offset is negative input zero point. Activation tensors are
|
||||
// asymmetric quantized so they span the full int8 range.
|
||||
// All inputs should have same zero-point and scale, this is checked during
|
||||
// Prepare stage.
|
||||
TFLITE_DCHECK_GE(-params.input1_offset, std::numeric_limits<int8_t>::min());
|
||||
TFLITE_DCHECK_LE(-params.input1_offset, std::numeric_limits<int8_t>::max());
|
||||
|
||||
// All inputs and output should have the same shape, this is checked during
|
||||
// Prepare stage.
|
||||
const size_t size = input_shape.FlatSize();
|
||||
for (size_t i = 0; i < size; ++i) {
|
||||
// accumulate in scaled_x before clamping to avoid overflow
|
||||
const int32_t x = params.input1_offset; // x = 0
|
||||
const int32_t shifted_x = x * (1 << params.left_shift);
|
||||
int32_t scaled_x = MultiplyByQuantizedMultiplierSmallerThanOneExp(
|
||||
shifted_x, params.input1_multiplier, params.input1_shift);
|
||||
|
||||
for (size_t j = 0; j < num_inputs; ++j) {
|
||||
const int32_t y = params.input1_offset + input_data[j][i];
|
||||
const int32_t shifted_y = y * (1 << params.left_shift);
|
||||
int32_t scaled_y = MultiplyByQuantizedMultiplierSmallerThanOneExp(
|
||||
shifted_y, params.input1_multiplier, params.input1_shift);
|
||||
scaled_x += scaled_y;
|
||||
}
|
||||
|
||||
const int32_t raw_output =
|
||||
MultiplyByQuantizedMultiplierSmallerThanOneExp(
|
||||
scaled_x, params.output_multiplier, params.output_shift) +
|
||||
params.output_offset;
|
||||
const int32_t clamped_output =
|
||||
std::min(params.quantized_activation_max,
|
||||
std::max(params.quantized_activation_min, raw_output));
|
||||
output_data[i] = static_cast<int8_t>(clamped_output);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace reference_ops
|
||||
} // namespace tflite
|
||||
|
||||
|
||||
@@ -0,0 +1,275 @@
|
||||
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_BATCH_MATMUL_H_
|
||||
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_BATCH_MATMUL_H_
|
||||
|
||||
#include <algorithm>
|
||||
#include <cstdint>
|
||||
|
||||
#include "tensorflow/lite/kernels/internal/common.h"
|
||||
#include "tensorflow/lite/kernels/internal/compatibility.h"
|
||||
#include "tensorflow/lite/kernels/internal/tensor_utils_common.h"
|
||||
#include "tensorflow/lite/kernels/internal/types.h"
|
||||
|
||||
namespace tflite {
|
||||
namespace reference_ops {
|
||||
namespace batch_matmul {
|
||||
|
||||
// Determine which dimension is the broadcast dimension.
|
||||
inline int broadcast_dim(int lhs_dim, int rhs_dim) {
|
||||
if (lhs_dim == rhs_dim) return lhs_dim;
|
||||
if (lhs_dim == 1) return rhs_dim;
|
||||
TFLITE_DCHECK_EQ(rhs_dim, 1);
|
||||
return lhs_dim;
|
||||
}
|
||||
|
||||
// Compute the "extent" for iterating on this dimension.
|
||||
// If we are broadcasting, then don't advance (i.e return 0).
|
||||
inline int extent(const RuntimeShape& shape, int x) {
|
||||
if (shape.Dims(x) == 1) {
|
||||
return 0;
|
||||
}
|
||||
int prod = 1;
|
||||
for (int i = x + 1; i < shape.DimensionsCount(); ++i) {
|
||||
prod *= shape.Dims(i);
|
||||
}
|
||||
return prod;
|
||||
}
|
||||
|
||||
} // namespace batch_matmul
|
||||
|
||||
template <typename Ta, typename Tb, typename Tout>
|
||||
inline void BatchMatMul(const RuntimeShape& lhs_shape, const Ta* lhs_data,
|
||||
const RuntimeShape& rhs_shape, const Tb* rhs_data,
|
||||
const RuntimeShape& output_shape, Tout* output_data) {
|
||||
const RuntimeShape extended_lhs_shape =
|
||||
RuntimeShape::ExtendedShape(5, lhs_shape);
|
||||
const RuntimeShape extended_rhs_shape =
|
||||
RuntimeShape::ExtendedShape(5, rhs_shape);
|
||||
|
||||
const int batch_dim0 = batch_matmul::broadcast_dim(
|
||||
extended_lhs_shape.Dims(0), extended_rhs_shape.Dims(0));
|
||||
const int batch_dim1 = batch_matmul::broadcast_dim(
|
||||
extended_lhs_shape.Dims(1), extended_rhs_shape.Dims(1));
|
||||
const int batch_dim2 = batch_matmul::broadcast_dim(
|
||||
extended_lhs_shape.Dims(2), extended_rhs_shape.Dims(2));
|
||||
|
||||
const int lhs_ext0 = batch_matmul::extent(extended_lhs_shape, 0);
|
||||
const int lhs_ext1 = batch_matmul::extent(extended_lhs_shape, 1);
|
||||
const int lhs_ext2 = batch_matmul::extent(extended_lhs_shape, 2);
|
||||
const int rhs_ext0 = batch_matmul::extent(extended_rhs_shape, 0);
|
||||
const int rhs_ext1 = batch_matmul::extent(extended_rhs_shape, 1);
|
||||
const int rhs_ext2 = batch_matmul::extent(extended_rhs_shape, 2);
|
||||
|
||||
// Set params for each matrix multiply.
|
||||
const int lhs_rows = extended_lhs_shape.Dims(3);
|
||||
const int rhs_cols = extended_rhs_shape.Dims(4);
|
||||
const int accum_depth = extended_lhs_shape.Dims(4);
|
||||
|
||||
for (int b0 = 0; b0 < batch_dim0; ++b0) {
|
||||
const Ta* lhs_ptr0 = lhs_data + (b0 * lhs_ext0);
|
||||
const Tb* rhs_ptr0 = rhs_data + (b0 * rhs_ext0);
|
||||
for (int b1 = 0; b1 < batch_dim1; ++b1) {
|
||||
const Ta* lhs_ptr1 = lhs_ptr0 + b1 * lhs_ext1;
|
||||
const Tb* rhs_ptr1 = rhs_ptr0 + b1 * rhs_ext1;
|
||||
for (int b2 = 0; b2 < batch_dim2; ++b2) {
|
||||
const Ta* lhs_ptr2 = lhs_ptr1 + b2 * lhs_ext2;
|
||||
const Tb* rhs_ptr2 = rhs_ptr1 + b2 * rhs_ext2;
|
||||
Tout* out_ptr = output_data + ((b0 * batch_dim1 * batch_dim2) +
|
||||
b1 * batch_dim2 + b2) *
|
||||
lhs_rows * rhs_cols;
|
||||
for (int j = 0; j < rhs_cols; ++j) {
|
||||
for (int i = 0; i < lhs_rows; ++i) {
|
||||
Tout total = 0;
|
||||
for (int k = 0; k < accum_depth; ++k) {
|
||||
total += static_cast<Tout>(lhs_ptr2[accum_depth * i + k]) *
|
||||
static_cast<Tout>(rhs_ptr2[j * accum_depth + k]);
|
||||
}
|
||||
int idx = lhs_rows * j + i;
|
||||
out_ptr[idx] = total;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
inline void BatchMatMul(const RuntimeShape& lhs_shape, const int8_t* lhs_data,
|
||||
const RuntimeShape& rhs_shape, const int8_t* rhs_data,
|
||||
const float* scaling_factors,
|
||||
const int32_t* input_offset, int32_t* row_sums,
|
||||
const RuntimeShape& output_shape, float* output_data,
|
||||
bool* compute_row_sums) {
|
||||
const RuntimeShape extended_lhs_shape =
|
||||
RuntimeShape::ExtendedShape(5, lhs_shape);
|
||||
const RuntimeShape extended_rhs_shape =
|
||||
RuntimeShape::ExtendedShape(5, rhs_shape);
|
||||
|
||||
const int batch_dim0 = batch_matmul::broadcast_dim(
|
||||
extended_lhs_shape.Dims(0), extended_rhs_shape.Dims(0));
|
||||
const int batch_dim1 = batch_matmul::broadcast_dim(
|
||||
extended_lhs_shape.Dims(1), extended_rhs_shape.Dims(1));
|
||||
const int batch_dim2 = batch_matmul::broadcast_dim(
|
||||
extended_lhs_shape.Dims(2), extended_rhs_shape.Dims(2));
|
||||
|
||||
const int lhs_ext0 = batch_matmul::extent(extended_lhs_shape, 0);
|
||||
const int lhs_ext1 = batch_matmul::extent(extended_lhs_shape, 1);
|
||||
const int lhs_ext2 = batch_matmul::extent(extended_lhs_shape, 2);
|
||||
const int rhs_ext0 = batch_matmul::extent(extended_rhs_shape, 0);
|
||||
const int rhs_ext1 = batch_matmul::extent(extended_rhs_shape, 1);
|
||||
const int rhs_ext2 = batch_matmul::extent(extended_rhs_shape, 2);
|
||||
|
||||
// Set params for each matrix multiply.
|
||||
const int lhs_rows = extended_lhs_shape.Dims(3);
|
||||
const int rhs_cols = extended_rhs_shape.Dims(4);
|
||||
const int accum_depth = extended_lhs_shape.Dims(4);
|
||||
|
||||
const int ioff_ext0 = rhs_ext0 == 0 ? 0 : rhs_cols;
|
||||
const int ioff_ext1 = rhs_ext1 == 0 ? 0 : rhs_cols;
|
||||
const int ioff_ext2 = rhs_ext2 == 0 ? 0 : rhs_cols;
|
||||
const int woff_ext0 = lhs_ext0 == 0 ? 0 : lhs_rows;
|
||||
const int woff_ext1 = lhs_ext1 == 0 ? 0 : lhs_rows;
|
||||
const int woff_ext2 = lhs_ext2 == 0 ? 0 : lhs_rows;
|
||||
|
||||
if (!compute_row_sums || *compute_row_sums) {
|
||||
int num_weights_matrices = 1;
|
||||
for (int i = 1; i < extended_lhs_shape.DimensionsCount() - 2; ++i) {
|
||||
num_weights_matrices *= extended_lhs_shape.Dims(i);
|
||||
}
|
||||
tensor_utils::ReductionSumVector(
|
||||
lhs_data, row_sums, num_weights_matrices * lhs_rows, accum_depth);
|
||||
if (compute_row_sums) {
|
||||
*compute_row_sums = false;
|
||||
}
|
||||
}
|
||||
|
||||
for (int b0 = 0; b0 < batch_dim0; ++b0) {
|
||||
const int8_t* lhs_ptr0 = lhs_data + (b0 * lhs_ext0);
|
||||
const int8_t* rhs_ptr0 = rhs_data + (b0 * rhs_ext0);
|
||||
const int32_t* ioff_ptr0 = input_offset + (b0 * ioff_ext0);
|
||||
const float* scale_ptr0 = scaling_factors + (b0 * ioff_ext0);
|
||||
const int32_t* woff_ptr0 = row_sums + (b0 * woff_ext0);
|
||||
for (int b1 = 0; b1 < batch_dim1; ++b1) {
|
||||
const int8_t* lhs_ptr1 = lhs_ptr0 + b1 * lhs_ext1;
|
||||
const int8_t* rhs_ptr1 = rhs_ptr0 + b1 * rhs_ext1;
|
||||
const int32_t* ioff_ptr1 = ioff_ptr0 + (b1 * ioff_ext1);
|
||||
const float* scale_ptr1 = scale_ptr0 + (b1 * ioff_ext1);
|
||||
const int32_t* woff_ptr1 = woff_ptr0 + (b1 * woff_ext1);
|
||||
for (int b2 = 0; b2 < batch_dim2; ++b2) {
|
||||
const int8_t* lhs_ptr2 = lhs_ptr1 + b2 * lhs_ext2;
|
||||
const int8_t* rhs_ptr2 = rhs_ptr1 + b2 * rhs_ext2;
|
||||
const int32_t* ioff_ptr2 = ioff_ptr1 + (b2 * ioff_ext2);
|
||||
const float* scale_ptr2 = scale_ptr1 + (b2 * ioff_ext2);
|
||||
const int32_t* woff_ptr2 = woff_ptr1 + (b2 * woff_ext2);
|
||||
float* out_ptr = output_data + ((b0 * batch_dim1 * batch_dim2) +
|
||||
b1 * batch_dim2 + b2) *
|
||||
lhs_rows * rhs_cols;
|
||||
for (int j = 0; j < rhs_cols; ++j) {
|
||||
const float batch_scaling_factor = scale_ptr2[j];
|
||||
const float batch_offset = static_cast<float>(ioff_ptr2[j]);
|
||||
for (int i = 0; i < lhs_rows; ++i) {
|
||||
int32_t total = 0;
|
||||
for (int k = 0; k < accum_depth; ++k) {
|
||||
total +=
|
||||
lhs_ptr2[accum_depth * i + k] * rhs_ptr2[j * accum_depth + k];
|
||||
}
|
||||
int32_t row_sum = woff_ptr2[i];
|
||||
total -= row_sum * batch_offset;
|
||||
int idx = lhs_rows * j + i;
|
||||
out_ptr[idx] += batch_scaling_factor * total;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T, typename AccumT>
|
||||
inline void BatchMatMul(const FullyConnectedParams& params,
|
||||
const RuntimeShape& lhs_shape, const T* lhs_data,
|
||||
const RuntimeShape& rhs_shape, const T* rhs_data,
|
||||
const RuntimeShape& output_shape, T* output_data) {
|
||||
const RuntimeShape extended_lhs_shape =
|
||||
RuntimeShape::ExtendedShape(5, lhs_shape);
|
||||
const RuntimeShape extended_rhs_shape =
|
||||
RuntimeShape::ExtendedShape(5, rhs_shape);
|
||||
|
||||
const int batch_dim0 = batch_matmul::broadcast_dim(
|
||||
extended_lhs_shape.Dims(0), extended_rhs_shape.Dims(0));
|
||||
const int batch_dim1 = batch_matmul::broadcast_dim(
|
||||
extended_lhs_shape.Dims(1), extended_rhs_shape.Dims(1));
|
||||
const int batch_dim2 = batch_matmul::broadcast_dim(
|
||||
extended_lhs_shape.Dims(2), extended_rhs_shape.Dims(2));
|
||||
|
||||
const int lhs_ext0 = batch_matmul::extent(extended_lhs_shape, 0);
|
||||
const int lhs_ext1 = batch_matmul::extent(extended_lhs_shape, 1);
|
||||
const int lhs_ext2 = batch_matmul::extent(extended_lhs_shape, 2);
|
||||
const int rhs_ext0 = batch_matmul::extent(extended_rhs_shape, 0);
|
||||
const int rhs_ext1 = batch_matmul::extent(extended_rhs_shape, 1);
|
||||
const int rhs_ext2 = batch_matmul::extent(extended_rhs_shape, 2);
|
||||
|
||||
// Set params for each matrix multiply.
|
||||
const int lhs_rows = extended_lhs_shape.Dims(3);
|
||||
const int rhs_cols = extended_rhs_shape.Dims(4);
|
||||
const int accum_depth = extended_lhs_shape.Dims(4);
|
||||
|
||||
const int32_t input_offset = params.input_offset;
|
||||
const int32_t filter_offset = params.weights_offset;
|
||||
const int32_t output_offset = params.output_offset;
|
||||
const int32_t output_multiplier = params.output_multiplier;
|
||||
const int output_shift = params.output_shift;
|
||||
const int32_t output_activation_min = params.quantized_activation_min;
|
||||
const int32_t output_activation_max = params.quantized_activation_max;
|
||||
TFLITE_DCHECK_LE(output_activation_min, output_activation_max);
|
||||
|
||||
for (int b0 = 0; b0 < batch_dim0; ++b0) {
|
||||
const T* lhs_ptr0 = lhs_data + (b0 * lhs_ext0);
|
||||
const T* rhs_ptr0 = rhs_data + (b0 * rhs_ext0);
|
||||
for (int b1 = 0; b1 < batch_dim1; ++b1) {
|
||||
const T* lhs_ptr1 = lhs_ptr0 + b1 * lhs_ext1;
|
||||
const T* rhs_ptr1 = rhs_ptr0 + b1 * rhs_ext1;
|
||||
for (int b2 = 0; b2 < batch_dim2; ++b2) {
|
||||
const T* lhs_ptr2 = lhs_ptr1 + b2 * lhs_ext2;
|
||||
const T* rhs_ptr2 = rhs_ptr1 + b2 * rhs_ext2;
|
||||
T* out_ptr = output_data +
|
||||
((b0 * batch_dim1 * batch_dim2) + b1 * batch_dim2 + b2) *
|
||||
lhs_rows * rhs_cols;
|
||||
|
||||
for (int j = 0; j < rhs_cols; ++j) {
|
||||
for (int i = 0; i < lhs_rows; ++i) {
|
||||
AccumT total = 0;
|
||||
for (int k = 0; k < accum_depth; ++k) {
|
||||
AccumT lhs_val = lhs_ptr2[accum_depth * i + k];
|
||||
AccumT rhs_val = rhs_ptr2[accum_depth * j + k];
|
||||
total += (lhs_val + filter_offset) * (rhs_val + input_offset);
|
||||
}
|
||||
int32_t total_scaled = MultiplyByQuantizedMultiplier(
|
||||
total, output_multiplier, output_shift);
|
||||
total_scaled += output_offset;
|
||||
total_scaled = std::max(total_scaled, output_activation_min);
|
||||
total_scaled = std::min(total_scaled, output_activation_max);
|
||||
const int idx = lhs_rows * j + i;
|
||||
out_ptr[idx] = static_cast<T>(total_scaled);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace reference_ops
|
||||
} // namespace tflite
|
||||
|
||||
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_BATCH_MATMUL_H_
|
||||
@@ -0,0 +1,175 @@
|
||||
/* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CUMSUM_H_
|
||||
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CUMSUM_H_
|
||||
|
||||
#include <algorithm>
|
||||
#include <cstdint>
|
||||
#include <limits>
|
||||
|
||||
#include "tensorflow/lite/kernels/internal/common.h"
|
||||
#include "tensorflow/lite/kernels/internal/compatibility.h"
|
||||
|
||||
namespace tflite {
|
||||
namespace reference_ops {
|
||||
|
||||
template <typename T>
|
||||
inline void CumSum(const T* input_data, const RuntimeShape& shape, int32_t axis,
|
||||
bool exclusive, bool reverse, T* output_data) {
|
||||
const int32_t rank = shape.DimensionsCount();
|
||||
TFLITE_DCHECK_GE(rank, 1);
|
||||
TFLITE_DCHECK_GE(axis, 0);
|
||||
TFLITE_DCHECK_LT(axis, rank);
|
||||
|
||||
size_t inner = 1;
|
||||
size_t outer = 1;
|
||||
size_t depth = 1;
|
||||
for (int32_t i = 0; i < rank; i++) {
|
||||
if (i < axis)
|
||||
inner *= shape.Dims(i);
|
||||
else if (i > axis)
|
||||
outer *= shape.Dims(i);
|
||||
else
|
||||
depth = shape.Dims(i);
|
||||
}
|
||||
|
||||
for (size_t outer_index = 0; outer_index < outer; outer_index++) {
|
||||
size_t outer_index_adj;
|
||||
if (reverse)
|
||||
outer_index_adj = (outer - 1) - outer_index;
|
||||
else
|
||||
outer_index_adj = outer_index;
|
||||
for (size_t inner_index = 0; inner_index < inner; inner_index++) {
|
||||
T accumulator = 0;
|
||||
size_t inner_index_adj;
|
||||
if (reverse)
|
||||
inner_index_adj = (inner - 1) - inner_index;
|
||||
else
|
||||
inner_index_adj = inner_index;
|
||||
for (size_t depth_index = 0; depth_index < depth; depth_index++) {
|
||||
size_t depth_index_adj;
|
||||
if (reverse)
|
||||
depth_index_adj = (depth - 1) - depth_index;
|
||||
else
|
||||
depth_index_adj = depth_index;
|
||||
|
||||
size_t index = outer_index_adj;
|
||||
index += inner_index_adj * depth * outer;
|
||||
index += depth_index_adj * outer;
|
||||
|
||||
if (exclusive) {
|
||||
output_data[index] = accumulator;
|
||||
accumulator += input_data[index];
|
||||
} else {
|
||||
accumulator += input_data[index];
|
||||
output_data[index] = accumulator;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
// Quantized INT8 CUMSUM
|
||||
//
|
||||
inline void CumSum(const ArithmeticParams& params, const int8_t* input_data,
|
||||
const RuntimeShape& shape, int32_t axis, bool exclusive,
|
||||
bool reverse, int8_t* output_data) {
|
||||
TFLITE_DCHECK_LE(params.quantized_activation_min,
|
||||
params.quantized_activation_max);
|
||||
// Input offset is negative input zero point. Activation tensors are
|
||||
// asymmetric quantized so they span the full int8 range.
|
||||
// All inputs should have same zero-point and scale, this is checked during
|
||||
// Prepare stage.
|
||||
TFLITE_DCHECK_GE(-params.input1_offset, std::numeric_limits<int8_t>::min());
|
||||
TFLITE_DCHECK_LE(-params.input1_offset, std::numeric_limits<int8_t>::max());
|
||||
|
||||
const int32_t rank = shape.DimensionsCount();
|
||||
TFLITE_DCHECK_GE(rank, 1);
|
||||
TFLITE_DCHECK_GE(axis, 0);
|
||||
TFLITE_DCHECK_LT(axis, rank);
|
||||
|
||||
size_t inner = 1;
|
||||
size_t outer = 1;
|
||||
size_t depth = 1;
|
||||
for (int32_t i = 0; i < rank; i++) {
|
||||
if (i < axis)
|
||||
inner *= shape.Dims(i);
|
||||
else if (i > axis)
|
||||
outer *= shape.Dims(i);
|
||||
else
|
||||
depth = shape.Dims(i);
|
||||
}
|
||||
|
||||
for (size_t outer_index = 0; outer_index < outer; outer_index++) {
|
||||
size_t outer_index_adj;
|
||||
if (reverse)
|
||||
outer_index_adj = (outer - 1) - outer_index;
|
||||
else
|
||||
outer_index_adj = outer_index;
|
||||
for (size_t inner_index = 0; inner_index < inner; inner_index++) {
|
||||
int32_t accumulator = params.input1_offset; // accumulator = 0
|
||||
accumulator *= (1 << params.left_shift);
|
||||
accumulator = MultiplyByQuantizedMultiplierSmallerThanOneExp(
|
||||
accumulator, params.input1_multiplier, params.input1_shift);
|
||||
|
||||
size_t inner_index_adj;
|
||||
if (reverse)
|
||||
inner_index_adj = (inner - 1) - inner_index;
|
||||
else
|
||||
inner_index_adj = inner_index;
|
||||
|
||||
for (size_t depth_index = 0; depth_index < depth; depth_index++) {
|
||||
size_t depth_index_adj;
|
||||
if (reverse)
|
||||
depth_index_adj = (depth - 1) - depth_index;
|
||||
else
|
||||
depth_index_adj = depth_index;
|
||||
|
||||
size_t index = outer_index_adj;
|
||||
index += inner_index_adj * depth * outer;
|
||||
index += depth_index_adj * outer;
|
||||
|
||||
const int32_t y = params.input1_offset + input_data[index];
|
||||
const int32_t shifted_y = y * (1 << params.left_shift);
|
||||
const int32_t scaled_y = MultiplyByQuantizedMultiplierSmallerThanOneExp(
|
||||
shifted_y, params.input1_multiplier, params.input1_shift);
|
||||
|
||||
int32_t scaled_output;
|
||||
if (exclusive) {
|
||||
scaled_output = accumulator;
|
||||
accumulator += scaled_y;
|
||||
} else {
|
||||
accumulator += scaled_y;
|
||||
scaled_output = accumulator;
|
||||
}
|
||||
|
||||
const int32_t raw_output =
|
||||
MultiplyByQuantizedMultiplierSmallerThanOneExp(
|
||||
scaled_output, params.output_multiplier, params.output_shift) +
|
||||
params.output_offset;
|
||||
const int32_t clamped_output =
|
||||
std::min(params.quantized_activation_max,
|
||||
std::max(params.quantized_activation_min, raw_output));
|
||||
output_data[index] = static_cast<int8_t>(clamped_output);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace reference_ops
|
||||
} // namespace tflite
|
||||
|
||||
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CUMSUM_H_
|
||||
@@ -0,0 +1,79 @@
|
||||
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_DEPTH_TO_SPACE_H_
|
||||
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_DEPTH_TO_SPACE_H_
|
||||
|
||||
#include "tensorflow/lite/kernels/internal/types.h"
|
||||
|
||||
namespace tflite {
|
||||
namespace reference_ops {
|
||||
|
||||
template <typename T>
|
||||
inline void DepthToSpace(const tflite::DepthToSpaceParams& op_params,
|
||||
const RuntimeShape& unextended_input_shape,
|
||||
const T* input_data,
|
||||
const RuntimeShape& unextended_output_shape,
|
||||
T* output_data) {
|
||||
TFLITE_DCHECK_LE(unextended_input_shape.DimensionsCount(), 4);
|
||||
TFLITE_DCHECK_LE(unextended_output_shape.DimensionsCount(), 4);
|
||||
const RuntimeShape input_shape =
|
||||
RuntimeShape::ExtendedShape(4, unextended_input_shape);
|
||||
const RuntimeShape output_shape =
|
||||
RuntimeShape::ExtendedShape(4, unextended_output_shape);
|
||||
|
||||
const int input_depth = input_shape.Dims(3);
|
||||
const int input_width = input_shape.Dims(2);
|
||||
const int input_height = input_shape.Dims(1);
|
||||
const int input_batch = input_shape.Dims(0);
|
||||
|
||||
const int output_depth = output_shape.Dims(3);
|
||||
const int output_width = output_shape.Dims(2);
|
||||
const int output_height = output_shape.Dims(1);
|
||||
const int output_batch = output_shape.Dims(0);
|
||||
|
||||
const int32_t block_size = op_params.block_size;
|
||||
|
||||
TFLITE_DCHECK_EQ(input_width * block_size, output_width);
|
||||
TFLITE_DCHECK_EQ(input_height * block_size, output_height);
|
||||
TFLITE_DCHECK_EQ(input_depth, output_depth * block_size * block_size);
|
||||
TFLITE_DCHECK_EQ(input_batch, output_batch);
|
||||
|
||||
for (int out_b = 0; out_b < output_batch; ++out_b) {
|
||||
for (int out_h = 0; out_h < output_height; ++out_h) {
|
||||
for (int out_w = 0; out_w < output_width; ++out_w) {
|
||||
for (int out_d = 0; out_d < output_depth; ++out_d) {
|
||||
const int in_d =
|
||||
out_d + ((out_h % block_size) * block_size + out_w % block_size) *
|
||||
output_depth;
|
||||
|
||||
const int in_w = out_w / block_size;
|
||||
const int in_h = out_h / block_size;
|
||||
const int in_b = out_b;
|
||||
|
||||
const int input_index = Offset(input_shape, in_b, in_h, in_w, in_d);
|
||||
const int output_index =
|
||||
Offset(output_shape, out_b, out_h, out_w, out_d);
|
||||
|
||||
output_data[output_index] = input_data[input_index];
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace reference_ops
|
||||
} // namespace tflite
|
||||
|
||||
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_DEPTH_TO_SPACE_H_
|
||||
@@ -1,239 +0,0 @@
|
||||
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_DIV_H_
|
||||
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_DIV_H_
|
||||
|
||||
#include <algorithm>
|
||||
|
||||
#include "tensorflow/lite/kernels/internal/common.h"
|
||||
|
||||
namespace tflite {
|
||||
|
||||
namespace reference_ops {
|
||||
|
||||
template <typename T>
|
||||
inline void DivCheckArithmeticParams(const ArithmeticParams& params) {
|
||||
TFLITE_DCHECK_LE(params.quantized_activation_min,
|
||||
params.quantized_activation_max);
|
||||
// Input offset is negative input zero point. Activation tensors are
|
||||
// asymmetric quantized so they span the full int8 range.
|
||||
constexpr int32_t max_value =
|
||||
static_cast<int32_t>(std::numeric_limits<T>::max());
|
||||
TFLITE_DCHECK_GE(params.input1_offset, -max_value);
|
||||
TFLITE_DCHECK_LE(params.input1_offset, max_value);
|
||||
TFLITE_DCHECK_GE(params.input2_offset, -max_value);
|
||||
TFLITE_DCHECK_LE(params.input2_offset, max_value);
|
||||
TFLITE_DCHECK_GE(params.output_offset, -max_value);
|
||||
TFLITE_DCHECK_LE(params.output_offset, max_value);
|
||||
}
|
||||
|
||||
// Element-wise div that can often be used for inner loop of broadcast Div as
|
||||
// well as the non-broadcast Div.
|
||||
template <typename T>
|
||||
inline void DivElementwise(int size, const ArithmeticParams& params,
|
||||
const T* input1_data, const T* input2_data,
|
||||
T* output_data) {
|
||||
DivCheckArithmeticParams<T>(params);
|
||||
|
||||
for (int i = 0; i < size; ++i) {
|
||||
const int32_t input1_val = params.input1_offset + input1_data[i];
|
||||
const int32_t input2_val = params.input2_offset + input2_data[i];
|
||||
TFLITE_DCHECK_NE(input2_val, 0);
|
||||
int recip_shift;
|
||||
const int32_t input2_inv =
|
||||
(input2_val > 0) ? GetReciprocal(input2_val, 31, &recip_shift)
|
||||
: -GetReciprocal(-input2_val, 31, &recip_shift);
|
||||
const int headroom = CountLeadingSignBits(input1_val);
|
||||
const int32_t unscaled_quotient =
|
||||
MultiplyByQuantizedMultiplierGreaterThanOne(input1_val, input2_inv,
|
||||
headroom);
|
||||
const int total_shift = params.output_shift - recip_shift - headroom;
|
||||
const int32_t unclamped_result =
|
||||
params.output_offset +
|
||||
MultiplyByQuantizedMultiplierSmallerThanOneExp(
|
||||
unscaled_quotient, params.output_multiplier, total_shift);
|
||||
const int32_t clamped_output =
|
||||
std::min(params.quantized_activation_max,
|
||||
std::max(params.quantized_activation_min, unclamped_result));
|
||||
output_data[i] = static_cast<T>(clamped_output);
|
||||
}
|
||||
}
|
||||
|
||||
inline void Div(const ArithmeticParams& params,
|
||||
const RuntimeShape& input1_shape, const uint8_t* input1_data,
|
||||
const RuntimeShape& input2_shape, const uint8_t* input2_data,
|
||||
const RuntimeShape& output_shape, uint8_t* output_data) {
|
||||
TFLITE_DCHECK_LE(params.quantized_activation_min,
|
||||
params.quantized_activation_max);
|
||||
const int flat_size =
|
||||
MatchingElementsSize(input1_shape, input2_shape, output_shape);
|
||||
|
||||
DivElementwise(flat_size, params, input1_data, input2_data, output_data);
|
||||
}
|
||||
|
||||
inline void Div(const ArithmeticParams& params,
|
||||
const RuntimeShape& input1_shape, const int8_t* input1_data,
|
||||
const RuntimeShape& input2_shape, const int8_t* input2_data,
|
||||
const RuntimeShape& output_shape, int8_t* output_data) {
|
||||
TFLITE_DCHECK_LE(params.quantized_activation_min,
|
||||
params.quantized_activation_max);
|
||||
const int flat_size =
|
||||
MatchingElementsSize(input1_shape, input2_shape, output_shape);
|
||||
|
||||
DivElementwise(flat_size, params, input1_data, input2_data, output_data);
|
||||
}
|
||||
|
||||
template <typename T, int N = 5>
|
||||
inline void BroadcastDivSlowQuantized(
|
||||
const ArithmeticParams& params, const RuntimeShape& unextended_input1_shape,
|
||||
const T* input1_data, const RuntimeShape& unextended_input2_shape,
|
||||
const T* input2_data, const RuntimeShape& unextended_output_shape,
|
||||
T* output_data) {
|
||||
TFLITE_DCHECK_LE(unextended_input1_shape.DimensionsCount(), N);
|
||||
TFLITE_DCHECK_LE(unextended_input2_shape.DimensionsCount(), N);
|
||||
TFLITE_DCHECK_LE(unextended_output_shape.DimensionsCount(), N);
|
||||
|
||||
NdArrayDesc<N> desc1;
|
||||
NdArrayDesc<N> desc2;
|
||||
NdArrayDesc<N> output_desc;
|
||||
NdArrayDescsForElementwiseBroadcast(unextended_input1_shape,
|
||||
unextended_input2_shape, &desc1, &desc2);
|
||||
CopyDimsToDesc(RuntimeShape::ExtendedShape(N, unextended_output_shape),
|
||||
&output_desc);
|
||||
|
||||
DivCheckArithmeticParams<T>(params);
|
||||
|
||||
auto div_func = [&](int indexes[N]) {
|
||||
const int32_t input1_val =
|
||||
params.input1_offset + input1_data[SubscriptToIndex(desc1, indexes)];
|
||||
const int32_t input2_val =
|
||||
params.input2_offset + input2_data[SubscriptToIndex(desc2, indexes)];
|
||||
TFLITE_DCHECK_NE(input2_val, 0);
|
||||
int recip_shift;
|
||||
const int32_t input2_inv =
|
||||
(input2_val > 0) ? GetReciprocal(input2_val, 31, &recip_shift)
|
||||
: -GetReciprocal(-input2_val, 31, &recip_shift);
|
||||
const int headroom = CountLeadingSignBits(input1_val);
|
||||
const int32_t unscaled_quotient =
|
||||
MultiplyByQuantizedMultiplierGreaterThanOne(input1_val, input2_inv,
|
||||
headroom);
|
||||
const int total_shift = params.output_shift - recip_shift - headroom;
|
||||
const int32_t unclamped_result =
|
||||
params.output_offset +
|
||||
MultiplyByQuantizedMultiplierSmallerThanOneExp(
|
||||
unscaled_quotient, params.output_multiplier, total_shift);
|
||||
const int32_t clamped_output =
|
||||
std::min(params.quantized_activation_max,
|
||||
std::max(params.quantized_activation_min, unclamped_result));
|
||||
output_data[SubscriptToIndex(output_desc, indexes)] =
|
||||
static_cast<T>(clamped_output);
|
||||
};
|
||||
NDOpsHelper<N>(output_desc, div_func);
|
||||
}
|
||||
|
||||
template <int N = 5>
|
||||
inline void BroadcastDivSlow(const ArithmeticParams& params,
|
||||
const RuntimeShape& unextended_input1_shape,
|
||||
const uint8_t* input1_data,
|
||||
const RuntimeShape& unextended_input2_shape,
|
||||
const uint8_t* input2_data,
|
||||
const RuntimeShape& unextended_output_shape,
|
||||
uint8_t* output_data) {
|
||||
BroadcastDivSlowQuantized<uint8_t, N>(
|
||||
params, unextended_input1_shape, input1_data, unextended_input2_shape,
|
||||
input2_data, unextended_output_shape, output_data);
|
||||
}
|
||||
|
||||
template <int N = 5>
|
||||
inline void BroadcastDivSlow(const ArithmeticParams& params,
|
||||
const RuntimeShape& unextended_input1_shape,
|
||||
const int8_t* input1_data,
|
||||
const RuntimeShape& unextended_input2_shape,
|
||||
const int8_t* input2_data,
|
||||
const RuntimeShape& unextended_output_shape,
|
||||
int8_t* output_data) {
|
||||
BroadcastDivSlowQuantized<int8_t, N>(
|
||||
params, unextended_input1_shape, input1_data, unextended_input2_shape,
|
||||
input2_data, unextended_output_shape, output_data);
|
||||
}
|
||||
|
||||
// TODO(jiawen): We can implement BroadcastDiv on buffers of arbitrary
|
||||
// dimensionality if the runtime code does a single loop over one dimension
|
||||
// that handles broadcasting as the base case. The code generator would then
|
||||
// generate max(D1, D2) nested for loops.
|
||||
template <typename T, int N = 5>
|
||||
void BroadcastDivSlow(const ArithmeticParams& params,
|
||||
const RuntimeShape& unextended_input1_shape,
|
||||
const T* input1_data,
|
||||
const RuntimeShape& unextended_input2_shape,
|
||||
const T* input2_data,
|
||||
const RuntimeShape& unextended_output_shape,
|
||||
T* output_data) {
|
||||
T output_activation_min;
|
||||
T output_activation_max;
|
||||
GetActivationParams(params, &output_activation_min, &output_activation_max);
|
||||
|
||||
TFLITE_DCHECK_LE(unextended_input1_shape.DimensionsCount(), N);
|
||||
TFLITE_DCHECK_LE(unextended_input2_shape.DimensionsCount(), N);
|
||||
TFLITE_DCHECK_LE(unextended_output_shape.DimensionsCount(), N);
|
||||
|
||||
NdArrayDesc<N> desc1;
|
||||
NdArrayDesc<N> desc2;
|
||||
NdArrayDesc<N> output_desc;
|
||||
NdArrayDescsForElementwiseBroadcast(unextended_input1_shape,
|
||||
unextended_input2_shape, &desc1, &desc2);
|
||||
CopyDimsToDesc(RuntimeShape::ExtendedShape(N, unextended_output_shape),
|
||||
&output_desc);
|
||||
|
||||
// In Tensorflow, the dimensions are canonically named (batch_number, row,
|
||||
// col, channel), with extents (batches, height, width, depth), with the
|
||||
// trailing dimension changing most rapidly (channels has the smallest
|
||||
// stride, typically 1 element).
|
||||
//
|
||||
// In generated C code, we store arrays with the dimensions reversed. The
|
||||
// first dimension has smallest stride.
|
||||
|
||||
auto div_func = [&](int indexes[N]) {
|
||||
output_data[SubscriptToIndex(output_desc, indexes)] =
|
||||
ActivationFunctionWithMinMax(
|
||||
input1_data[SubscriptToIndex(desc1, indexes)] /
|
||||
input2_data[SubscriptToIndex(desc2, indexes)],
|
||||
output_activation_min, output_activation_max);
|
||||
};
|
||||
NDOpsHelper<N>(output_desc, div_func);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline void Div(const ArithmeticParams& params,
|
||||
const RuntimeShape& input1_shape, const T* input1_data,
|
||||
const RuntimeShape& input2_shape, const T* input2_data,
|
||||
const RuntimeShape& output_shape, T* output_data) {
|
||||
T output_activation_min;
|
||||
T output_activation_max;
|
||||
GetActivationParams(params, &output_activation_min, &output_activation_max);
|
||||
|
||||
const int flat_size =
|
||||
MatchingElementsSize(input1_shape, input2_shape, output_shape);
|
||||
for (int i = 0; i < flat_size; ++i) {
|
||||
output_data[i] = ActivationFunctionWithMinMax(
|
||||
input1_data[i] / input2_data[i], output_activation_min,
|
||||
output_activation_max);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace reference_ops
|
||||
} // namespace tflite
|
||||
|
||||
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_DIV_H_
|
||||
@@ -0,0 +1,35 @@
|
||||
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FLOOR_DIV_H_
|
||||
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FLOOR_DIV_H_
|
||||
|
||||
#include <cmath>
|
||||
#include <functional>
|
||||
|
||||
#include "tensorflow/lite/kernels/internal/types.h"
|
||||
|
||||
namespace tflite {
|
||||
namespace reference_ops {
|
||||
|
||||
template <typename T>
|
||||
T FloorDiv(T input1, T input2) {
|
||||
return std::floor(std::divides<double>()(static_cast<double>(input1),
|
||||
static_cast<double>(input2)));
|
||||
}
|
||||
|
||||
} // namespace reference_ops
|
||||
} // namespace tflite
|
||||
|
||||
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FLOOR_DIV_H_
|
||||
@@ -0,0 +1,44 @@
|
||||
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FLOOR_MOD_H_
|
||||
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FLOOR_MOD_H_
|
||||
|
||||
#include <cmath>
|
||||
#include <functional>
|
||||
|
||||
namespace tflite {
|
||||
|
||||
namespace reference_ops {
|
||||
|
||||
template <typename T>
|
||||
T FloorMod(T input1, T input2) {
|
||||
struct FloatMod {
|
||||
float operator()(const float lhs, const float rhs) const {
|
||||
return std::fmod(lhs, rhs);
|
||||
}
|
||||
};
|
||||
using ModFunc = typename std::conditional<std::is_integral<T>::value,
|
||||
std::modulus<T>, FloatMod>::type;
|
||||
ModFunc mod_func;
|
||||
T trunc_mod = mod_func(input1, input2);
|
||||
return (trunc_mod != 0) && ((input2 < 0) != (trunc_mod < 0))
|
||||
? (trunc_mod + input2)
|
||||
: trunc_mod;
|
||||
}
|
||||
|
||||
} // namespace reference_ops
|
||||
} // namespace tflite
|
||||
|
||||
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FLOOR_MOD_H_
|
||||
@@ -21,7 +21,7 @@ limitations under the License.
|
||||
namespace tflite {
|
||||
namespace reference_integer_ops {
|
||||
|
||||
inline void AveragePool(const PoolParams& params,
|
||||
inline bool AveragePool(const PoolParams& params,
|
||||
const RuntimeShape& input_shape,
|
||||
const int8_t* input_data,
|
||||
const RuntimeShape& output_shape, int8_t* output_data) {
|
||||
@@ -66,6 +66,7 @@ inline void AveragePool(const PoolParams& params,
|
||||
filter_count++;
|
||||
}
|
||||
}
|
||||
if (filter_count == 0) return false;
|
||||
// Round to the closest integer value.
|
||||
acc = acc > 0 ? (acc + filter_count / 2) / filter_count
|
||||
: (acc - filter_count / 2) / filter_count;
|
||||
@@ -77,6 +78,7 @@ inline void AveragePool(const PoolParams& params,
|
||||
}
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
inline void MaxPool(const PoolParams& params, const RuntimeShape& input_shape,
|
||||
@@ -136,7 +138,7 @@ inline void MaxPool(const PoolParams& params, const RuntimeShape& input_shape,
|
||||
}
|
||||
}
|
||||
|
||||
inline void AveragePool(const PoolParams& params,
|
||||
inline bool AveragePool(const PoolParams& params,
|
||||
const RuntimeShape& input_shape,
|
||||
const int16_t* input_data,
|
||||
const RuntimeShape& output_shape,
|
||||
@@ -182,6 +184,7 @@ inline void AveragePool(const PoolParams& params,
|
||||
filter_count++;
|
||||
}
|
||||
}
|
||||
if (filter_count == 0) return false;
|
||||
// Round to the closest integer value.
|
||||
acc = acc > 0 ? (acc + filter_count / 2) / filter_count
|
||||
: (acc - filter_count / 2) / filter_count;
|
||||
@@ -193,6 +196,7 @@ inline void AveragePool(const PoolParams& params,
|
||||
}
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
inline void MaxPool(const PoolParams& params, const RuntimeShape& input_shape,
|
||||
|
||||
@@ -0,0 +1,256 @@
|
||||
/* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_LOG_SOFTMAX_H_
|
||||
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_LOG_SOFTMAX_H_
|
||||
|
||||
#include <algorithm>
|
||||
#include <cstddef>
|
||||
#include <limits>
|
||||
|
||||
#include "fixedpoint/fixedpoint.h"
|
||||
#include "tensorflow/lite/kernels/internal/common.h"
|
||||
|
||||
namespace tflite {
|
||||
namespace reference_ops {
|
||||
|
||||
inline void LogSoftmax(const SoftmaxParams& params,
|
||||
const RuntimeShape& input_shape, const float* input_data,
|
||||
const RuntimeShape& output_shape, float* output_data) {
|
||||
const int trailing_dim = input_shape.DimensionsCount() - 1;
|
||||
const int outer_size =
|
||||
MatchingFlatSizeSkipDim(input_shape, trailing_dim, output_shape);
|
||||
const int depth =
|
||||
MatchingDim(input_shape, trailing_dim, output_shape, trailing_dim);
|
||||
|
||||
for (int i = 0; i < outer_size; ++i) {
|
||||
// Find max element value which we'll use to ensure numerical stability
|
||||
// taking advantage of the following equality:
|
||||
// log(exp(x[i])/sum(exp(x[i]))) == log(exp(x[i]+C)/sum(exp(x[i]+C)))
|
||||
float max = std::numeric_limits<float>::lowest();
|
||||
for (int c = 0; c < depth; ++c) {
|
||||
max = std::max(max, input_data[i * depth + c]);
|
||||
}
|
||||
|
||||
// Compute sum.
|
||||
float sum = 0.f;
|
||||
for (int c = 0; c < depth; ++c) {
|
||||
sum += std::exp(input_data[i * depth + c] - max);
|
||||
}
|
||||
|
||||
// Compute result.
|
||||
const float log_sum = std::log(sum);
|
||||
for (int c = 0; c < depth; ++c) {
|
||||
output_data[i * depth + c] = input_data[i * depth + c] - max - log_sum;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
inline void LogSoftmax(const SoftmaxParams& params,
|
||||
const RuntimeShape& input_shape,
|
||||
const uint8_t* input_data,
|
||||
const RuntimeShape& output_shape, uint8_t* output_data) {
|
||||
const int32_t input_multiplier = params.input_multiplier;
|
||||
const int32_t input_left_shift = params.input_left_shift;
|
||||
const int32_t reverse_scaling_divisor = params.reverse_scaling_divisor;
|
||||
const int32_t reverse_scaling_right_shift =
|
||||
params.reverse_scaling_right_shift;
|
||||
const int diff_min = params.diff_min;
|
||||
// The representation chosen for the input to the exp() function is Q5.26.
|
||||
// We need to leave extra space since values that we skip might be as large
|
||||
// as -32 before multiplying by input_beta_multiplier, and therefore as
|
||||
// large as -16 afterwards. Note that exp(-8) is definitely not
|
||||
// insignificant to accumulation, but exp(-16) definitely is.
|
||||
static constexpr int kScaledDiffIntegerBits = 5;
|
||||
static constexpr int kAccumulationIntegerBits = 12;
|
||||
static constexpr int kOutputIntegerBits = 4;
|
||||
using FixedPointScaledDiff =
|
||||
gemmlowp::FixedPoint<int32_t, kScaledDiffIntegerBits>;
|
||||
using FixedPointAccum =
|
||||
gemmlowp::FixedPoint<int32_t, kAccumulationIntegerBits>;
|
||||
|
||||
const int trailing_dim = input_shape.DimensionsCount() - 1;
|
||||
const int outer_size =
|
||||
MatchingFlatSizeSkipDim(input_shape, trailing_dim, output_shape);
|
||||
const int depth =
|
||||
MatchingDim(input_shape, trailing_dim, output_shape, trailing_dim);
|
||||
|
||||
for (int i = 0; i < outer_size; ++i) {
|
||||
uint8_t max_in_row = 0;
|
||||
for (int c = 0; c < depth; ++c) {
|
||||
max_in_row = std::max(max_in_row, input_data[i * depth + c]);
|
||||
}
|
||||
|
||||
FixedPointAccum sum_of_exps = FixedPointAccum::Zero();
|
||||
for (int c = 0; c < depth; ++c) {
|
||||
int32_t input_diff =
|
||||
static_cast<int32_t>(input_data[i * depth + c]) - max_in_row;
|
||||
if (input_diff >= diff_min) {
|
||||
const int32_t input_diff_rescaled =
|
||||
MultiplyByQuantizedMultiplierGreaterThanOne(
|
||||
input_diff, input_multiplier, input_left_shift);
|
||||
const FixedPointScaledDiff scaled_diff_f8 =
|
||||
FixedPointScaledDiff::FromRaw(input_diff_rescaled);
|
||||
sum_of_exps = sum_of_exps + gemmlowp::Rescale<kAccumulationIntegerBits>(
|
||||
exp_on_negative_values(scaled_diff_f8));
|
||||
}
|
||||
}
|
||||
|
||||
const int32_t fixed_log_sum_of_exps =
|
||||
log_x_for_x_greater_than_or_equal_to_1<kScaledDiffIntegerBits>(
|
||||
sum_of_exps)
|
||||
.raw();
|
||||
|
||||
// rescaled_diff_min is smallest representable in
|
||||
// Q(kScaledDiffIntegerBits).(31-kScaledDiffIntegerBits) plus the
|
||||
// log-sub-exps that will be subtracted in the loop.
|
||||
//
|
||||
// The thresholds diff_min, etc are negative.
|
||||
const int rescaled_diff_min =
|
||||
fixed_log_sum_of_exps + std::numeric_limits<int32_t>::lowest();
|
||||
const int adjusted_diff_min =
|
||||
std::max(static_cast<int32_t>(
|
||||
diff_min - 1), // Note use of > below instead of >= above.
|
||||
MultiplyByQuantizedMultiplierSmallerThanOneExp(
|
||||
rescaled_diff_min, reverse_scaling_divisor,
|
||||
-reverse_scaling_right_shift));
|
||||
|
||||
for (int c = 0; c < depth; ++c) {
|
||||
int32_t input_diff =
|
||||
static_cast<int32_t>(input_data[i * depth + c]) - max_in_row;
|
||||
if (input_diff > adjusted_diff_min) {
|
||||
const int32_t input_diff_rescaled =
|
||||
MultiplyByQuantizedMultiplierGreaterThanOne(
|
||||
input_diff, input_multiplier, input_left_shift);
|
||||
int32_t unsat_output =
|
||||
gemmlowp::RoundingDivideByPOT(
|
||||
(input_diff_rescaled - fixed_log_sum_of_exps),
|
||||
31 - kScaledDiffIntegerBits - kOutputIntegerBits) +
|
||||
255;
|
||||
|
||||
output_data[i * depth + c] = static_cast<uint8_t>(
|
||||
std::max(std::min(unsat_output, static_cast<int32_t>(255)),
|
||||
static_cast<int32_t>(0)));
|
||||
} else {
|
||||
// Set output to smallest value.
|
||||
output_data[i * depth + c] = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline void LogSoftmaxQuantized(const SoftmaxParams& params,
|
||||
const size_t outer_size, const size_t depth,
|
||||
const RuntimeShape& input_shape,
|
||||
const T* input_data,
|
||||
const RuntimeShape& output_shape,
|
||||
T* output_data) {
|
||||
const int32_t input_multiplier = params.input_multiplier;
|
||||
const int32_t input_left_shift = params.input_left_shift;
|
||||
const int32_t reverse_scaling_divisor = params.reverse_scaling_divisor;
|
||||
const int32_t reverse_scaling_right_shift =
|
||||
params.reverse_scaling_right_shift;
|
||||
const int diff_min = params.diff_min;
|
||||
|
||||
static constexpr T kMinT8 = std::numeric_limits<T>::min();
|
||||
static constexpr T kMaxT8 = std::numeric_limits<T>::max();
|
||||
static constexpr int32_t kMinInt32 = std::numeric_limits<int32_t>::min();
|
||||
|
||||
// All IntegerBits must agree with Prepare function.
|
||||
// Input is chosen as Q5.26 so exp(-1 * 2^5 * 2^-1) = exp(-16) is negligible.
|
||||
static constexpr int kInputIntegerBits = 5;
|
||||
static constexpr int kAccumulationIntegerBits = 12;
|
||||
static constexpr int kOutputIntegerBits = 4;
|
||||
using F5 = gemmlowp::FixedPoint<int32_t, kInputIntegerBits>;
|
||||
using F12 = gemmlowp::FixedPoint<int32_t, kAccumulationIntegerBits>;
|
||||
|
||||
for (size_t outer_index = 0; outer_index < outer_size; ++outer_index) {
|
||||
T max_in_row = kMinT8;
|
||||
for (size_t inner_index = 0; inner_index < depth; ++inner_index) {
|
||||
max_in_row =
|
||||
std::max(max_in_row, input_data[outer_index * depth + inner_index]);
|
||||
}
|
||||
|
||||
// Accumulator "sum_of_exps_in_q12" is safe from overflowing in 2^12 steps.
|
||||
F12 sum_of_exps_in_q12 = F12::FromRaw(0);
|
||||
for (size_t inner_index = 0; inner_index < depth; ++inner_index) {
|
||||
int32_t input_diff =
|
||||
static_cast<int32_t>(input_data[outer_index * depth + inner_index]) -
|
||||
max_in_row;
|
||||
if (input_diff >= diff_min) {
|
||||
const int32_t input_diff_in_q5 = MultiplyByQuantizedMultiplier(
|
||||
input_diff, input_multiplier, input_left_shift);
|
||||
sum_of_exps_in_q12 =
|
||||
sum_of_exps_in_q12 +
|
||||
gemmlowp::Rescale<kAccumulationIntegerBits>(
|
||||
exp_on_negative_values(F5::FromRaw(input_diff_in_q5)));
|
||||
}
|
||||
}
|
||||
|
||||
const int32_t log_sum_of_exps_in_q5 =
|
||||
log_x_for_x_greater_than_or_equal_to_1<kInputIntegerBits>(
|
||||
sum_of_exps_in_q12)
|
||||
.raw();
|
||||
|
||||
// Potentially reduced the valid range. shifted_log_sum_of_exps_in_q5 is
|
||||
// smallest representable in Q5.26 plus the log_sum_of_exps.
|
||||
const int32_t shifted_log_sum_of_exps_in_q5 =
|
||||
log_sum_of_exps_in_q5 + kMinInt32;
|
||||
const int32_t adjusted_diff_min =
|
||||
std::max(static_cast<int32_t>(diff_min - 1),
|
||||
MultiplyByQuantizedMultiplier(shifted_log_sum_of_exps_in_q5,
|
||||
reverse_scaling_divisor,
|
||||
-reverse_scaling_right_shift));
|
||||
|
||||
for (size_t inner_index = 0; inner_index < depth; ++inner_index) {
|
||||
int32_t input_diff =
|
||||
static_cast<int32_t>(input_data[outer_index * depth + inner_index]) -
|
||||
max_in_row;
|
||||
// Note use of > below instead of >= above.
|
||||
if (input_diff > adjusted_diff_min) {
|
||||
const int32_t input_diff_in_q5 = MultiplyByQuantizedMultiplier(
|
||||
input_diff, input_multiplier, input_left_shift);
|
||||
|
||||
// Rescale and downcast.
|
||||
int32_t output_in_q27 =
|
||||
gemmlowp::RoundingDivideByPOT(
|
||||
(input_diff_in_q5 - log_sum_of_exps_in_q5),
|
||||
31 - kInputIntegerBits - kOutputIntegerBits) +
|
||||
kMaxT8;
|
||||
|
||||
output_in_q27 =
|
||||
std::max(std::min(output_in_q27, static_cast<int32_t>(kMaxT8)),
|
||||
static_cast<int32_t>(kMinT8));
|
||||
output_data[outer_index * depth + inner_index] =
|
||||
static_cast<T>(output_in_q27);
|
||||
} else {
|
||||
output_data[outer_index * depth + inner_index] = kMinT8;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
inline void LogSoftmax(const SoftmaxParams& params, const size_t outer_size,
|
||||
const size_t depth, const RuntimeShape& input_shape,
|
||||
const int8_t* input_data,
|
||||
const RuntimeShape& output_shape, int8_t* output_data) {
|
||||
LogSoftmaxQuantized(params, outer_size, depth, input_shape, input_data,
|
||||
output_shape, output_data);
|
||||
}
|
||||
|
||||
} // namespace reference_ops
|
||||
} // namespace tflite
|
||||
|
||||
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_LOG_SOFTMAX_H_
|
||||
@@ -51,7 +51,7 @@ inline void Mul(const ArithmeticParams& params,
|
||||
GetActivationParams(params, &output_activation_min, &output_activation_max);
|
||||
|
||||
const int flat_size =
|
||||
MatchingFlatSize(input1_shape, input2_shape, output_shape);
|
||||
MatchingExtendedShapeFlatSize(input1_shape, input2_shape, output_shape);
|
||||
for (int i = 0; i < flat_size; ++i) {
|
||||
output_data[i] = ActivationFunctionWithMinMax(
|
||||
input1_data[i] * input2_data[i], output_activation_min,
|
||||
@@ -66,7 +66,7 @@ inline void Mul(const ArithmeticParams& params,
|
||||
TFLITE_DCHECK_LE(params.quantized_activation_min,
|
||||
params.quantized_activation_max);
|
||||
const int flat_size =
|
||||
MatchingFlatSize(input1_shape, input2_shape, output_shape);
|
||||
MatchingExtendedShapeFlatSize(input1_shape, input2_shape, output_shape);
|
||||
|
||||
MulElementwise(flat_size, params, input1_data, input2_data, output_data);
|
||||
}
|
||||
|
||||
@@ -24,8 +24,8 @@ namespace tflite {
|
||||
|
||||
namespace reference_ops {
|
||||
|
||||
// TFLite Pad supports activation tensors with up to 4 dimensions.
|
||||
constexpr int PadKernelMaxDimensionCount() { return 4; }
|
||||
// TFLite Pad supports activation tensors with up to 5 dimensions.
|
||||
constexpr int PadKernelMaxDimensionCount() { return 5; }
|
||||
|
||||
// There are two versions of pad: Pad and PadV2. In PadV2 there is a second
|
||||
// scalar input that provides the padding value. Therefore pad_value_ptr can be
|
||||
@@ -46,8 +46,8 @@ inline void PadImpl(const tflite::PadParams& op_params,
|
||||
TFLITE_DCHECK_LE(op_params.left_padding_count, PadKernelMaxDimensionCount());
|
||||
TFLITE_DCHECK_LE(op_params.right_padding_count, PadKernelMaxDimensionCount());
|
||||
|
||||
// Runtime calls are currently fixed at 4 dimensions. Copy inputs so we can
|
||||
// pad them to 4 dims (yes, we are "padding the padding").
|
||||
// Runtime calls are currently fixed at 5 dimensions. Copy inputs so we can
|
||||
// pad them to 5 dims (yes, we are "padding the padding").
|
||||
int left_padding_copy[PadKernelMaxDimensionCount()];
|
||||
for (int i = 0; i < PadKernelMaxDimensionCount(); i++) {
|
||||
left_padding_copy[i] = 0;
|
||||
@@ -67,39 +67,46 @@ inline void PadImpl(const tflite::PadParams& op_params,
|
||||
}
|
||||
|
||||
const int output_batch = ext_output_shape.Dims(0);
|
||||
const int output_height = ext_output_shape.Dims(1);
|
||||
const int output_width = ext_output_shape.Dims(2);
|
||||
const int output_depth = ext_output_shape.Dims(3);
|
||||
const int output_plane = ext_output_shape.Dims(1);
|
||||
const int output_height = ext_output_shape.Dims(2);
|
||||
const int output_width = ext_output_shape.Dims(3);
|
||||
const int output_depth = ext_output_shape.Dims(4);
|
||||
|
||||
const int left_b_padding = left_padding_copy[0];
|
||||
const int left_h_padding = left_padding_copy[1];
|
||||
const int left_w_padding = left_padding_copy[2];
|
||||
const int left_d_padding = left_padding_copy[3];
|
||||
const int left_p_padding = left_padding_copy[1];
|
||||
const int left_h_padding = left_padding_copy[2];
|
||||
const int left_w_padding = left_padding_copy[3];
|
||||
const int left_d_padding = left_padding_copy[4];
|
||||
|
||||
const int right_b_padding = right_padding_copy[0];
|
||||
const int right_h_padding = right_padding_copy[1];
|
||||
const int right_w_padding = right_padding_copy[2];
|
||||
const int right_d_padding = right_padding_copy[3];
|
||||
const int right_p_padding = right_padding_copy[1];
|
||||
const int right_h_padding = right_padding_copy[2];
|
||||
const int right_w_padding = right_padding_copy[3];
|
||||
const int right_d_padding = right_padding_copy[4];
|
||||
|
||||
const T pad_value = *pad_value_ptr;
|
||||
|
||||
const T* in_ptr = input_data;
|
||||
T* out_ptr = output_data;
|
||||
for (int out_b = 0; out_b < output_batch; ++out_b) {
|
||||
for (int out_h = 0; out_h < output_height; ++out_h) {
|
||||
for (int out_w = 0; out_w < output_width; ++out_w) {
|
||||
for (int out_d = 0; out_d < output_depth; ++out_d) {
|
||||
if (out_b < left_b_padding ||
|
||||
out_b >= output_batch - right_b_padding ||
|
||||
out_h < left_h_padding ||
|
||||
out_h >= output_height - right_h_padding ||
|
||||
out_w < left_w_padding ||
|
||||
out_w >= output_width - right_w_padding ||
|
||||
out_d < left_d_padding ||
|
||||
out_d >= output_depth - right_d_padding) {
|
||||
*out_ptr++ = pad_value;
|
||||
} else {
|
||||
*out_ptr++ = *in_ptr++;
|
||||
for (int out_p = 0; out_p < output_plane; ++out_p) {
|
||||
for (int out_h = 0; out_h < output_height; ++out_h) {
|
||||
for (int out_w = 0; out_w < output_width; ++out_w) {
|
||||
for (int out_d = 0; out_d < output_depth; ++out_d) {
|
||||
if (out_b < left_b_padding ||
|
||||
out_b >= output_batch - right_b_padding ||
|
||||
out_p < left_p_padding ||
|
||||
out_p >= output_plane - right_p_padding ||
|
||||
out_h < left_h_padding ||
|
||||
out_h >= output_height - right_h_padding ||
|
||||
out_w < left_w_padding ||
|
||||
out_w >= output_width - right_w_padding ||
|
||||
out_d < left_d_padding ||
|
||||
out_d >= output_depth - right_d_padding) {
|
||||
*out_ptr++ = pad_value;
|
||||
} else {
|
||||
*out_ptr++ = *in_ptr++;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -23,7 +23,7 @@ limitations under the License.
|
||||
namespace tflite {
|
||||
namespace reference_ops {
|
||||
|
||||
inline void AveragePool(const PoolParams& params,
|
||||
inline bool AveragePool(const PoolParams& params,
|
||||
const RuntimeShape& input_shape,
|
||||
const float* input_data,
|
||||
const RuntimeShape& output_shape, float* output_data) {
|
||||
@@ -66,6 +66,7 @@ inline void AveragePool(const PoolParams& params,
|
||||
filter_count++;
|
||||
}
|
||||
}
|
||||
if (filter_count == 0) return false;
|
||||
const float average = total / filter_count;
|
||||
output_data[Offset(output_shape, batch, out_y, out_x, channel)] =
|
||||
ActivationFunctionWithMinMax(average, params.float_activation_min,
|
||||
@@ -74,9 +75,10 @@ inline void AveragePool(const PoolParams& params,
|
||||
}
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
inline void AveragePool(const PoolParams& params,
|
||||
inline bool AveragePool(const PoolParams& params,
|
||||
const RuntimeShape& input_shape,
|
||||
const uint8_t* input_data,
|
||||
const RuntimeShape& output_shape,
|
||||
@@ -122,6 +124,7 @@ inline void AveragePool(const PoolParams& params,
|
||||
filter_count++;
|
||||
}
|
||||
}
|
||||
if (filter_count == 0) return false;
|
||||
acc = (acc + filter_count / 2) / filter_count;
|
||||
acc = std::max(acc, params.quantized_activation_min);
|
||||
acc = std::min(acc, params.quantized_activation_max);
|
||||
@@ -131,6 +134,7 @@ inline void AveragePool(const PoolParams& params,
|
||||
}
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
inline void L2Pool(const PoolParams& params, const RuntimeShape& input_shape,
|
||||
|
||||
@@ -0,0 +1,774 @@
|
||||
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
#include <algorithm>
|
||||
#include <cmath>
|
||||
#include <cstdint>
|
||||
#include <cstring>
|
||||
#include <limits>
|
||||
#include <utility>
|
||||
|
||||
#include "fixedpoint/fixedpoint.h"
|
||||
#include "tensorflow/lite/kernels/internal/common.h"
|
||||
#include "tensorflow/lite/kernels/internal/compatibility.h"
|
||||
#include "tensorflow/lite/kernels/internal/cppmath.h"
|
||||
#include "tensorflow/lite/kernels/internal/reference/portable_tensor_utils_impl.h"
|
||||
|
||||
#if defined(_MSC_VER)
|
||||
#define __restrict__ __restrict
|
||||
#endif
|
||||
|
||||
namespace tflite {
|
||||
namespace tensor_utils {
|
||||
|
||||
namespace {
|
||||
const int32_t kInt16Max = std::numeric_limits<int16_t>::max();
|
||||
const int32_t kInt16Min = std::numeric_limits<int16_t>::min();
|
||||
} // namespace
|
||||
|
||||
void PortableSymmetricQuantizeFloats(const float* values, const int size,
|
||||
int8_t* quantized_values, float* min_value,
|
||||
float* max_value, float* scaling_factor) {
|
||||
auto minmax = std::minmax_element(values, values + size);
|
||||
*min_value = *minmax.first;
|
||||
*max_value = *minmax.second;
|
||||
|
||||
PortableSymmetricQuantizeFloats(values, size, quantized_values, *min_value,
|
||||
*max_value, scaling_factor);
|
||||
}
|
||||
|
||||
void PortableSymmetricQuantizeFloats(const float* values, const int size,
|
||||
int8_t* quantized_values, float min_value,
|
||||
float max_value, float* scaling_factor) {
|
||||
const int32_t kScale = 127;
|
||||
const float range = std::max(std::abs(min_value), std::abs(max_value));
|
||||
if (range == 0) {
|
||||
memset(quantized_values, 0, size * sizeof(int8_t));
|
||||
*scaling_factor = 1;
|
||||
return;
|
||||
}
|
||||
*scaling_factor = range / kScale;
|
||||
const float scaling_factor_inv = kScale / range;
|
||||
for (int i = 0; i < size; ++i) {
|
||||
const int32_t quantized_value =
|
||||
static_cast<int32_t>(TfLiteRound(values[i] * scaling_factor_inv));
|
||||
// Clamp: just in case some odd numeric offset.
|
||||
quantized_values[i] = static_cast<int8_t>(
|
||||
std::min(kScale, std::max(-kScale, quantized_value)));
|
||||
}
|
||||
}
|
||||
|
||||
void PortableAsymmetricQuantizeFloats(const float* values, const int size,
|
||||
int8_t* quantized_values,
|
||||
float* scaling_factor, int32_t* offset) {
|
||||
const int32_t kMinScale = -128;
|
||||
const int32_t kMaxScale = 127;
|
||||
const double qmin_double = kMinScale;
|
||||
const double qmax_double = kMaxScale;
|
||||
const auto minmax = std::minmax_element(values, values + size);
|
||||
const double rmin = std::fmin(0, *minmax.first);
|
||||
const double rmax = std::fmax(0, *minmax.second);
|
||||
if (rmin == rmax) {
|
||||
memset(quantized_values, 0, size * sizeof(int8_t));
|
||||
*scaling_factor = 1;
|
||||
*offset = 0;
|
||||
return;
|
||||
} else {
|
||||
double scale = (rmax - rmin) / (qmax_double - qmin_double);
|
||||
const double zero_point_from_min = qmin_double - rmin / scale;
|
||||
const double zero_point_from_max = qmax_double - rmax / scale;
|
||||
const double zero_point_from_min_error =
|
||||
std::abs(qmin_double) + std::abs(rmin / scale);
|
||||
const double zero_point_from_max_error =
|
||||
std::abs(qmax_double) + std::abs(rmax / scale);
|
||||
const double zero_point_double =
|
||||
zero_point_from_min_error < zero_point_from_max_error
|
||||
? zero_point_from_min
|
||||
: zero_point_from_max;
|
||||
int8_t nudged_zero_point = 0;
|
||||
if (zero_point_double <= qmin_double) {
|
||||
nudged_zero_point = kMinScale;
|
||||
} else if (zero_point_double >= qmax_double) {
|
||||
nudged_zero_point = kMaxScale;
|
||||
} else {
|
||||
nudged_zero_point = static_cast<int8_t>(round(zero_point_double));
|
||||
}
|
||||
*scaling_factor = scale;
|
||||
*offset = nudged_zero_point;
|
||||
}
|
||||
const float scaling_factor_inv = 1.0f / *scaling_factor;
|
||||
for (int i = 0; i < size; ++i) {
|
||||
const int32_t quantized_value = static_cast<int32_t>(
|
||||
TfLiteRound(*offset + values[i] * scaling_factor_inv));
|
||||
quantized_values[i] =
|
||||
std::min(kMaxScale, std::max(kMinScale, quantized_value));
|
||||
}
|
||||
}
|
||||
|
||||
void PortableMatrixBatchVectorMultiplyAccumulate(const float* matrix,
|
||||
int m_rows, int m_cols,
|
||||
const float* vector,
|
||||
int n_batch, float* result) {
|
||||
float* result_in_batch = result;
|
||||
for (int b = 0; b < n_batch; b++) {
|
||||
const float* matrix_ptr = matrix;
|
||||
for (int r = 0; r < m_rows; r++) {
|
||||
float dot_prod = 0.0f;
|
||||
const float* vector_in_batch = vector + b * m_cols;
|
||||
for (int c = 0; c < m_cols; c++) {
|
||||
dot_prod += *matrix_ptr++ * *vector_in_batch++;
|
||||
}
|
||||
*result_in_batch += dot_prod;
|
||||
++result_in_batch;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void PortableMatrixBatchVectorMultiplyAccumulate(
|
||||
const int8_t* __restrict__ matrix, const int m_rows, const int m_cols,
|
||||
const int8_t* __restrict__ vectors, const float* scaling_factors,
|
||||
int n_batch, float* __restrict__ result) {
|
||||
for (int batch = 0; batch < n_batch; ++batch, vectors += m_cols) {
|
||||
const float batch_scaling_factor = scaling_factors[batch];
|
||||
// Get the address of the first row.
|
||||
const int8_t* row_ptr = matrix;
|
||||
for (int row = 0; row < m_rows; ++row) {
|
||||
// Initialize the dot product sum for the row to 0.
|
||||
int32_t dotprod = 0;
|
||||
#if defined(__GNUC__)
|
||||
// Prefetch the row to cache.
|
||||
__builtin_prefetch(row_ptr, 0 /* prefetch for read */,
|
||||
3 /* temporal locality */);
|
||||
#endif
|
||||
for (int col = 0; col < m_cols; ++col, ++row_ptr) {
|
||||
dotprod += (*row_ptr) * (vectors[col]);
|
||||
} // for col
|
||||
*result += dotprod * batch_scaling_factor;
|
||||
++result;
|
||||
} // for row
|
||||
} // for batch
|
||||
}
|
||||
|
||||
void PortableMatrixBatchVectorMultiplyAccumulate(
|
||||
const int8_t* __restrict__ matrix, const int m_rows, const int m_cols,
|
||||
const int8_t* __restrict__ vectors, const float* scaling_factors,
|
||||
int n_batch, float* __restrict__ result, const float* per_channel_scale,
|
||||
const int32_t* input_offset, int32_t* scratch, int32_t* row_sums,
|
||||
bool* compute_row_sums, CpuBackendContext* context) {
|
||||
if (input_offset == nullptr) {
|
||||
PortableMatrixBatchVectorMultiplyAccumulate(
|
||||
matrix, m_rows, m_cols, vectors, scaling_factors, n_batch, result);
|
||||
return;
|
||||
}
|
||||
if (!compute_row_sums || *compute_row_sums) {
|
||||
PortableReductionSumVector(matrix, row_sums, m_rows, m_cols);
|
||||
if (compute_row_sums) {
|
||||
*compute_row_sums = false;
|
||||
}
|
||||
}
|
||||
|
||||
for (int batch = 0; batch < n_batch; ++batch, vectors += m_cols) {
|
||||
const float batch_scaling_factor = scaling_factors[batch];
|
||||
const int32_t batch_offset = input_offset[batch];
|
||||
const int8_t* row_ptr = matrix;
|
||||
for (int row = 0; row < m_rows; ++row) {
|
||||
int32_t dotprod = 0;
|
||||
float scale = batch_scaling_factor;
|
||||
if (per_channel_scale) {
|
||||
scale *= per_channel_scale[row];
|
||||
}
|
||||
#if defined(__GNUC__)
|
||||
// Prefetch the row to cache.
|
||||
__builtin_prefetch(row_ptr, 0 /* prefetch for read */,
|
||||
3 /* temporal locality */);
|
||||
#endif
|
||||
for (int col = 0; col < m_cols; ++col, ++row_ptr) {
|
||||
dotprod += (*row_ptr) * vectors[col];
|
||||
} // for col
|
||||
dotprod -= row_sums[row] * batch_offset;
|
||||
*result += dotprod * scale;
|
||||
++result;
|
||||
} // for row
|
||||
} // for batch
|
||||
}
|
||||
|
||||
void PortableSparseMatrixBatchVectorMultiplyAccumulate1x4(
|
||||
const float* __restrict__ matrix, const int32_t* __restrict__ segments,
|
||||
const int32_t* __restrict__ indices, int m_rows, int m_cols,
|
||||
const float* __restrict__ vector, int n_batch, float* __restrict__ result) {
|
||||
const int kBlockSize = 4;
|
||||
TFLITE_DCHECK_EQ(m_cols % kBlockSize, 0);
|
||||
for (int batch = 0; batch < n_batch; batch++) {
|
||||
const float* matrix_ptr = matrix;
|
||||
for (int row = 0; row < m_rows; row++) {
|
||||
float dot_prod = 0.0f;
|
||||
const float* vector_in_batch = vector + batch * m_cols;
|
||||
for (int i = segments[row]; i < segments[row + 1]; i++) {
|
||||
const int block_start_index = indices[i] * kBlockSize;
|
||||
const float* vector_block_in_batch_ptr =
|
||||
vector_in_batch + block_start_index;
|
||||
for (int c = 0; c < kBlockSize; c++) {
|
||||
dot_prod += *matrix_ptr++ * *vector_block_in_batch_ptr++;
|
||||
}
|
||||
}
|
||||
result[batch * m_rows + row] += dot_prod;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void PortableSparseMatrixBatchVectorMultiplyAccumulate(
|
||||
const float* __restrict__ matrix, const uint8_t* __restrict__ ledger,
|
||||
int m_rows, int m_cols, const float* __restrict__ vector, int n_batch,
|
||||
float* __restrict__ result) {
|
||||
const int kBlockSize = 16;
|
||||
TFLITE_DCHECK_EQ( // NOLINT
|
||||
m_cols % kBlockSize, 0);
|
||||
for (int batch = 0; batch < n_batch; batch++) {
|
||||
const float* matrix_ptr = matrix;
|
||||
const uint8_t* ledger_ptr = ledger;
|
||||
for (int row = 0; row < m_rows; row++) {
|
||||
float dot_prod = 0.0f;
|
||||
int num_nonzero_blocks = *ledger_ptr++;
|
||||
if (num_nonzero_blocks > 0) {
|
||||
const float* vector_in_batch = vector + batch * m_cols;
|
||||
for (int i = 0; i < num_nonzero_blocks; i++) {
|
||||
const int block_start_index = *ledger_ptr++ * kBlockSize;
|
||||
const float* vector_block_in_batch_ptr =
|
||||
vector_in_batch + block_start_index;
|
||||
for (int c = 0; c < kBlockSize; c++) {
|
||||
dot_prod += *matrix_ptr++ * *vector_block_in_batch_ptr++;
|
||||
}
|
||||
}
|
||||
}
|
||||
result[batch * m_rows + row] += dot_prod;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void PortableSparseMatrixBatchVectorMultiplyAccumulate(
|
||||
const int8_t* __restrict__ matrix, const uint8_t* ledger, const int m_rows,
|
||||
const int m_cols, const int8_t* __restrict__ vectors,
|
||||
const float* scaling_factors, int n_batch, float* __restrict__ result) {
|
||||
static const int kBlockSize = 16;
|
||||
TFLITE_DCHECK_EQ( // NOLINT
|
||||
m_cols % kBlockSize, 0);
|
||||
for (int batch = 0; batch < n_batch; ++batch, vectors += m_cols) {
|
||||
const float batch_scaling_factor = scaling_factors[batch];
|
||||
const uint8_t* ledger_ptr = ledger;
|
||||
// Get the address of the first row.
|
||||
const int8_t* row_ptr = matrix;
|
||||
for (int row = 0; row < m_rows; ++row) {
|
||||
// Initialize the dot product sum for the row to 0.
|
||||
int32_t dotprod = 0;
|
||||
#if defined(__GNUC__)
|
||||
// Prefetch the row to cache.
|
||||
__builtin_prefetch(row_ptr, 0 /* prefetch for read */,
|
||||
3 /* temporal locality */);
|
||||
#endif
|
||||
int num_nonzero_blocks = *ledger_ptr++;
|
||||
for (int i = 0; i < num_nonzero_blocks; i++) {
|
||||
const int block_start_index = *ledger_ptr++ * kBlockSize;
|
||||
const int8_t* vector_block_ptr = vectors + block_start_index;
|
||||
for (int c = 0; c < kBlockSize; c++) {
|
||||
dotprod += (*row_ptr++) * (*vector_block_ptr++);
|
||||
} // for block
|
||||
} // for num_nonzero_blocks
|
||||
result[batch * m_rows + row] += dotprod * batch_scaling_factor;
|
||||
} // for row
|
||||
} // for batch
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void PortableMatrixBatchVectorMultiplyAccumulateImpl(
|
||||
const int8_t* input, const int32_t* bias,
|
||||
const int8_t* input_to_gate_weights, int32_t multiplier, int32_t shift,
|
||||
int32_t n_batch, int32_t n_input, int32_t n_output, int32_t output_zp,
|
||||
T* output) {
|
||||
const int16_t output_max = std::numeric_limits<T>::max();
|
||||
const int16_t output_min = std::numeric_limits<T>::min();
|
||||
for (int batch = 0; batch < n_batch; ++batch) {
|
||||
for (int row = 0; row < n_output; ++row) {
|
||||
int32_t acc = bias[row];
|
||||
for (int col = 0; col < n_input; ++col) {
|
||||
int8_t input_val = input[batch * n_input + col];
|
||||
int8_t weights_val = input_to_gate_weights[row * n_input + col];
|
||||
acc += input_val * weights_val;
|
||||
}
|
||||
acc = MultiplyByQuantizedMultiplier(acc, multiplier, shift);
|
||||
acc += output_zp;
|
||||
acc += output[batch * n_output + row];
|
||||
if (acc > output_max) {
|
||||
acc = output_max;
|
||||
}
|
||||
if (acc < output_min) {
|
||||
acc = output_min;
|
||||
}
|
||||
output[batch * n_output + row] = static_cast<T>(acc);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void PortableMatrixBatchVectorMultiplyAccumulate(
|
||||
const int8_t* input, const int32_t* bias,
|
||||
const int8_t* input_to_gate_weights, int32_t multiplier, int32_t shift,
|
||||
int32_t n_batch, int32_t n_input, int32_t n_output, int32_t output_zp,
|
||||
int32_t* scratch, int16_t* output, CpuBackendContext* context) {
|
||||
PortableMatrixBatchVectorMultiplyAccumulateImpl(
|
||||
input, bias, input_to_gate_weights, multiplier, shift, n_batch, n_input,
|
||||
n_output, output_zp, output);
|
||||
}
|
||||
|
||||
void PortableMatrixBatchVectorMultiplyAccumulate(
|
||||
const int8_t* input, const int32_t* bias,
|
||||
const int8_t* input_to_gate_weights, int32_t multiplier, int32_t shift,
|
||||
int32_t n_batch, int32_t n_input, int32_t n_output, int32_t output_zp,
|
||||
int32_t* scratch, int8_t* output, CpuBackendContext* context) {
|
||||
PortableMatrixBatchVectorMultiplyAccumulateImpl(
|
||||
input, bias, input_to_gate_weights, multiplier, shift, n_batch, n_input,
|
||||
n_output, output_zp, output);
|
||||
}
|
||||
|
||||
void PortableMatrixBatchVectorMultiply(const int8_t* input,
|
||||
int32_t input_zeropoint,
|
||||
const int8_t* input_to_gate_weights,
|
||||
int32_t input_to_gate_effective_scale_a,
|
||||
int32_t input_to_gate_effective_scale_b,
|
||||
int32_t n_batch, int32_t n_input,
|
||||
int32_t n_cell, int8_t* gate_output,
|
||||
int8_t gate_output_zp) {
|
||||
const int32_t int8_max = std::numeric_limits<int8_t>::max();
|
||||
const int32_t int8_min = std::numeric_limits<int8_t>::min();
|
||||
for (int batch = 0; batch < n_batch; ++batch) {
|
||||
for (int row = 0; row < n_cell; ++row) {
|
||||
int32_t acc = 0;
|
||||
for (int col = 0; col < n_input; ++col) {
|
||||
int32_t input_val = input[batch * n_input + col];
|
||||
int8_t weights_val = input_to_gate_weights[row * n_input + col];
|
||||
acc += (input_val - input_zeropoint) * weights_val;
|
||||
}
|
||||
acc = MultiplyByQuantizedMultiplier(acc, input_to_gate_effective_scale_a,
|
||||
input_to_gate_effective_scale_b);
|
||||
acc += gate_output_zp;
|
||||
if (acc > int8_max) {
|
||||
acc = int8_max;
|
||||
}
|
||||
if (acc < int8_min) {
|
||||
acc = int8_min;
|
||||
}
|
||||
gate_output[batch * n_cell + row] = static_cast<int8_t>(acc);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void PortableMatrixBatchVectorMultiply(
|
||||
const int16_t* hidden, const int8_t* hidden_to_output_weights,
|
||||
int32_t proj_effective_scale_a, int32_t proj_effective_scale_b,
|
||||
const int32_t* gate_bias, int32_t n_batch, int32_t n_hidden,
|
||||
int32_t n_output, int32_t output_zp, int8_t* proj_output) {
|
||||
const int16_t int8_max = std::numeric_limits<int8_t>::max();
|
||||
const int16_t int8_min = std::numeric_limits<int8_t>::min();
|
||||
for (int batch = 0; batch < n_batch; ++batch) {
|
||||
for (int row = 0; row < n_output; ++row) {
|
||||
int64_t acc = gate_bias[row];
|
||||
for (int col = 0; col < n_hidden; ++col) {
|
||||
int16_t input_val = hidden[batch * n_hidden + col];
|
||||
int8_t weights_val = hidden_to_output_weights[row * n_hidden + col];
|
||||
int64_t curr = acc;
|
||||
acc += input_val * weights_val;
|
||||
if (input_val * weights_val > 0 && acc < curr) {
|
||||
acc = std::numeric_limits<int32_t>::max();
|
||||
}
|
||||
if (input_val * weights_val < 0 && acc > curr) {
|
||||
acc = std::numeric_limits<int32_t>::min();
|
||||
}
|
||||
}
|
||||
acc = MultiplyByQuantizedMultiplier(acc, proj_effective_scale_a,
|
||||
proj_effective_scale_b);
|
||||
acc += output_zp;
|
||||
if (acc > int8_max) {
|
||||
acc = int8_max;
|
||||
}
|
||||
if (acc < int8_min) {
|
||||
acc = int8_min;
|
||||
}
|
||||
proj_output[batch * n_output + row] = acc;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void PortableApplyLayerNorm(const int16_t* input,
|
||||
const int16_t* layer_norm_weights,
|
||||
const int32_t* bias, int32_t layer_norm_scale_a,
|
||||
int32_t layer_norm_scale_b, int32_t variance_limit,
|
||||
int n_batch, int n_input, int16_t* output) {
|
||||
// The square of std::pow(2, 10), which is the extra factor that makes sure
|
||||
// normalized values has enough resolution.
|
||||
static const int kTwoToPower20 = 1 << 20;
|
||||
for (int i = 0; i < n_batch; ++i) {
|
||||
int64_t sum = 0;
|
||||
int64_t sum_sq = 0;
|
||||
for (int j = 0; j < n_input; ++j) {
|
||||
const int32_t index = i * n_input + j;
|
||||
int32_t val = static_cast<int32_t>(input[index]);
|
||||
sum += val;
|
||||
sum_sq += val * val;
|
||||
}
|
||||
int32_t mean =
|
||||
static_cast<int32_t>(static_cast<int64_t>(sum) * 1024 / n_input);
|
||||
// TODO(b/173994730): Avoids overflow but only works for POT n_input.
|
||||
int32_t temp = kTwoToPower20 / n_input;
|
||||
int64_t variance =
|
||||
sum_sq * temp - static_cast<int64_t>(mean) * static_cast<int64_t>(mean);
|
||||
int32_t variance2 = static_cast<int32_t>(variance / kTwoToPower20);
|
||||
if (variance2 < 1) {
|
||||
variance2 = variance_limit;
|
||||
}
|
||||
int32_t stddev_inverse_a;
|
||||
int stddev_inverse_b;
|
||||
GetInvSqrtQuantizedMultiplierExp(variance2, /*reverse_shift*/ -1,
|
||||
&stddev_inverse_a, &stddev_inverse_b);
|
||||
|
||||
for (int j = 0; j < n_input; ++j) {
|
||||
const int32_t index = i * n_input + j;
|
||||
int32_t val = static_cast<int32_t>(input[index]);
|
||||
int32_t shifted = 1024 * val - mean;
|
||||
int32_t rescaled = MultiplyByQuantizedMultiplier(
|
||||
shifted, stddev_inverse_a, stddev_inverse_b);
|
||||
// TODO(jianlijianli): Saturate this.
|
||||
int64_t val3 = rescaled * layer_norm_weights[j] + bias[j];
|
||||
int32_t val4 =
|
||||
static_cast<int32_t>((val3 > 0 ? val3 + 512 : val3 - 512) / 1024);
|
||||
int32_t val5 = MultiplyByQuantizedMultiplier(val4, layer_norm_scale_a,
|
||||
layer_norm_scale_b + 12);
|
||||
val5 = std::min(std::max(kInt16Min, val5), kInt16Max);
|
||||
output[index] = static_cast<int16_t>(val5);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void PortableApplyLayerNormFloat(const int16_t* input,
|
||||
const int16_t* layer_norm_weights,
|
||||
int32_t layer_norm_scale_a,
|
||||
int32_t layer_norm_scale_b,
|
||||
const int32_t* bias, int n_batch, int n_input,
|
||||
int16_t* output) {
|
||||
const int32_t int16_max = std::numeric_limits<int16_t>::max();
|
||||
const int32_t int16_min = std::numeric_limits<int16_t>::min();
|
||||
const float layer_norm_scale =
|
||||
layer_norm_scale_a *
|
||||
std::pow(2.0, static_cast<double>(layer_norm_scale_b - 31));
|
||||
const float bias_scale =
|
||||
static_cast<float>(std::pow(2.0, -10)) * layer_norm_scale;
|
||||
|
||||
for (int batch = 0; batch < n_batch; ++batch) {
|
||||
float sum = 0.0f;
|
||||
float sum_sq = 0.0f;
|
||||
for (int i = 0; i < n_input; ++i) {
|
||||
const int index = batch * n_input + i;
|
||||
const float value = static_cast<float>(input[index]);
|
||||
sum += value;
|
||||
sum_sq += value * value;
|
||||
}
|
||||
const float mean = sum / n_input;
|
||||
float stddev_inv = 0.0f;
|
||||
const float variance = sum_sq / n_input - mean * mean;
|
||||
if (variance == 0) {
|
||||
stddev_inv = 1.0f / std::sqrt(1e-8f);
|
||||
} else {
|
||||
stddev_inv = 1.0f / std::sqrt(variance);
|
||||
}
|
||||
for (int i = 0; i < n_input; ++i) {
|
||||
const int index = batch * n_input + i;
|
||||
const float normalized_value =
|
||||
(static_cast<float>(input[index]) - mean) * stddev_inv;
|
||||
const float weighted_normalized_value =
|
||||
normalized_value * layer_norm_weights[i] * layer_norm_scale +
|
||||
bias[i] * bias_scale;
|
||||
const int32_t quant_output = static_cast<int32_t>(std::round(
|
||||
weighted_normalized_value * static_cast<float>(std::pow(2, 12))));
|
||||
output[index] = std::min(int16_max, std::max(int16_min, quant_output));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void PortableMatrixScalarMultiplyAccumulate(const int8_t* matrix,
|
||||
int32_t scalar, int32_t n_row,
|
||||
int32_t n_col, int32_t* output) {
|
||||
for (int i = 0; i < n_row; ++i) {
|
||||
int32_t row_sum = 0;
|
||||
for (int j = 0; j < n_col; ++j) {
|
||||
row_sum += *matrix++;
|
||||
}
|
||||
output[i] += row_sum * scalar;
|
||||
}
|
||||
}
|
||||
|
||||
void PortableApplySigmoid(const int16_t* input, int32_t n_batch,
|
||||
int32_t n_input, int16_t* output) {
|
||||
for (int batch = 0; batch < n_batch; ++batch) {
|
||||
for (int c = 0; c < n_input; c++) {
|
||||
using F3 = gemmlowp::FixedPoint<std::int16_t, 3>;
|
||||
using F0 = gemmlowp::FixedPoint<std::int16_t, 0>;
|
||||
const int index = batch * n_input + c;
|
||||
F3 sigmoid_input = F3::FromRaw(input[index]);
|
||||
F0 sigmoid_output = gemmlowp::logistic(sigmoid_input);
|
||||
output[index] = sigmoid_output.raw();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void PortableApplySigmoidFloat(const int16_t* input, int32_t n_batch,
|
||||
int32_t n_input, int16_t* output) {
|
||||
const int32_t int16_max = std::numeric_limits<int16_t>::max();
|
||||
const int32_t int16_min = std::numeric_limits<int16_t>::min();
|
||||
for (int batch = 0; batch < n_batch; ++batch) {
|
||||
for (int i = 0; i < n_input; ++i) {
|
||||
const int index = batch * n_input + i;
|
||||
const float float_input =
|
||||
input[index] * static_cast<float>(std::pow(2, -12));
|
||||
const float float_output = 1.0f / (1.0f + std::exp(-float_input));
|
||||
const int32_t quant_output = static_cast<int32_t>(
|
||||
float_output * static_cast<float>(std::pow(2, 15)));
|
||||
const int32_t quant_output_clamped =
|
||||
std::min(int16_max, std::max(int16_min, quant_output));
|
||||
output[index] = static_cast<int16_t>(quant_output_clamped);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template <int IntegerBits>
|
||||
void PortableApplyTanhImpl(const int16_t* input, int32_t n_batch,
|
||||
int32_t n_input, int16_t* output) {
|
||||
using FX = gemmlowp::FixedPoint<std::int16_t, IntegerBits>;
|
||||
using F0 = gemmlowp::FixedPoint<std::int16_t, 0>;
|
||||
for (int batch = 0; batch < n_batch; ++batch) {
|
||||
for (int i = 0; i < n_input; ++i) {
|
||||
const int index = batch * n_input + i;
|
||||
FX tanh_input = FX::FromRaw(input[index]);
|
||||
F0 tanh_output = gemmlowp::tanh(tanh_input);
|
||||
output[index] = tanh_output.raw();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void PortableApplyTanh(int32_t integer_bits, const int16_t* input,
|
||||
int32_t n_batch, int32_t n_input, int16_t* output) {
|
||||
assert(integer_bits <= 6);
|
||||
#define DISPATCH_TANH(i) \
|
||||
case i: \
|
||||
PortableApplyTanhImpl<i>(input, n_batch, n_input, output); \
|
||||
break;
|
||||
switch (integer_bits) {
|
||||
DISPATCH_TANH(0);
|
||||
DISPATCH_TANH(1);
|
||||
DISPATCH_TANH(2);
|
||||
DISPATCH_TANH(3);
|
||||
DISPATCH_TANH(4);
|
||||
DISPATCH_TANH(5);
|
||||
DISPATCH_TANH(6);
|
||||
default:
|
||||
return;
|
||||
}
|
||||
#undef DISPATCH_TANH
|
||||
}
|
||||
|
||||
void PortableApplyTanhFloat(const int16_t* input, int32_t n_batch,
|
||||
int32_t n_input, int32_t integer_bits,
|
||||
int16_t* output) {
|
||||
const int32_t int16_max = std::numeric_limits<int16_t>::max();
|
||||
const int32_t int16_min = std::numeric_limits<int16_t>::min();
|
||||
const double two = 2.0;
|
||||
for (int batch = 0; batch < n_batch; ++batch) {
|
||||
for (int i = 0; i < n_input; ++i) {
|
||||
const int index = batch * n_input + i;
|
||||
const float float_input =
|
||||
input[index] * std::pow(two, static_cast<double>(integer_bits));
|
||||
const float float_output = std::tanh(float_input);
|
||||
const int32_t quant_output = static_cast<int32_t>(
|
||||
float_output * static_cast<float>(std::pow(2, 15)));
|
||||
const int32_t quant_output_clamped =
|
||||
std::min(int16_max, std::max(int16_min, quant_output));
|
||||
output[index] = static_cast<int16_t>(quant_output_clamped);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void PortableCwiseMul(const int16_t* input_1, const int16_t* input_2,
|
||||
int n_batch, int n_input, int shift, int16_t* output) {
|
||||
for (int batch = 0; batch < n_batch; ++batch) {
|
||||
for (int i = 0; i < n_input; ++i) {
|
||||
const int index = batch * n_input + i;
|
||||
const int16_t a = input_1[index];
|
||||
const int16_t b = input_2[index];
|
||||
const int32_t value = static_cast<int32_t>(a) * static_cast<int32_t>(b);
|
||||
output[index] =
|
||||
static_cast<int16_t>(gemmlowp::RoundingDivideByPOT(value, shift));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void PortableCwiseMul(const int16_t* input_1, const int16_t* input_2,
|
||||
int32_t multiplier, int32_t shift, int32_t n_batch,
|
||||
int32_t n_input, int32_t output_zp, int8_t* output) {
|
||||
for (int batch = 0; batch < n_batch; ++batch) {
|
||||
for (int i = 0; i < n_input; ++i) {
|
||||
const int index = batch * n_input + i;
|
||||
const int16_t a = input_1[index];
|
||||
const int16_t b = input_2[index];
|
||||
int32_t value = static_cast<int32_t>(a) * static_cast<int32_t>(b);
|
||||
value = MultiplyByQuantizedMultiplier(value, multiplier, shift);
|
||||
value -= output_zp;
|
||||
value = std::min(std::max(static_cast<int32_t>(-128), value),
|
||||
static_cast<int32_t>(127));
|
||||
|
||||
output[index] = static_cast<int8_t>(value);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void PortableCwiseAdd(const int16_t* input_1, const int16_t* input_2,
|
||||
int n_batch, int n_input, int16_t* output) {
|
||||
for (int batch = 0; batch < n_batch; ++batch) {
|
||||
for (int i = 0; i < n_input; ++i) {
|
||||
const int index = batch * n_input + i;
|
||||
int32_t sum = input_1[index] + input_2[index];
|
||||
const int32_t sum_clamped = std::min(kInt16Max, std::max(kInt16Min, sum));
|
||||
output[index] = static_cast<int16_t>(sum_clamped);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
float PortableVectorVectorDotProduct(const float* vector1, const float* vector2,
|
||||
int v_size) {
|
||||
float result = 0.0;
|
||||
for (int v = 0; v < v_size; v++) {
|
||||
result += *vector1++ * *vector2++;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
namespace {
|
||||
inline int32_t VectorVectorDotProduct(const int16_t* vector1,
|
||||
const int16_t* vector2, int v_size) {
|
||||
int32_t result = 0;
|
||||
for (int v = 0; v < v_size; v++) {
|
||||
result += *vector1++ * *vector2++;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
} // namespace
|
||||
|
||||
void PortableBatchVectorBatchVectorDotProduct(const int16_t* vector1,
|
||||
const int16_t* vector2,
|
||||
int v_size, int n_batch,
|
||||
int32_t* result) {
|
||||
for (int b = 0; b < n_batch; b++) {
|
||||
result[b] = VectorVectorDotProduct(vector1, vector2, v_size);
|
||||
vector1 += v_size;
|
||||
vector2 += v_size;
|
||||
}
|
||||
}
|
||||
|
||||
void PortableVectorBatchVectorCwiseProductAccumulate(
|
||||
const int16_t* vector, int v_size, const int16_t* batch_vector, int n_batch,
|
||||
int32_t multiplier, int shift, int16_t* result) {
|
||||
for (int b = 0; b < n_batch; b++) {
|
||||
for (int v = 0; v < v_size; v++) {
|
||||
int32_t prod = vector[v] * *batch_vector++;
|
||||
prod = MultiplyByQuantizedMultiplier(prod, multiplier, shift);
|
||||
int32_t output = prod + *result;
|
||||
output = std::max(std::min(static_cast<int32_t>(32767), output),
|
||||
static_cast<int32_t>(-32768));
|
||||
*result++ = output;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void PortableSub1Vector(const float* vector, int v_size, float* result) {
|
||||
for (int v = 0; v < v_size; v++) {
|
||||
*result++ = 1.0f - *vector++;
|
||||
}
|
||||
}
|
||||
|
||||
void PortableSub1Vector(const int16_t* vector, int v_size, int16_t* result) {
|
||||
static const int16_t kOne = 32767;
|
||||
for (int v = 0; v < v_size; v++) {
|
||||
*result++ = kOne - *vector++;
|
||||
}
|
||||
}
|
||||
|
||||
void PortableVectorScalarMultiply(const int8_t* vector, const int v_size,
|
||||
const float scale, float* result) {
|
||||
for (int v = 0; v < v_size; ++v) {
|
||||
*result++ = scale * *vector++;
|
||||
}
|
||||
}
|
||||
|
||||
void PortableMeanStddevNormalization(const float* __restrict__ input_vector,
|
||||
float* __restrict__ output_vector,
|
||||
int v_size, int n_batch) {
|
||||
for (int batch = 0; batch < n_batch; ++batch) {
|
||||
float sum = 0.0f;
|
||||
for (int i = 0; i < v_size; ++i) {
|
||||
sum += input_vector[i];
|
||||
}
|
||||
const float mean = sum / v_size;
|
||||
float sum_diff_sq = 0.0f;
|
||||
for (int i = 0; i < v_size; ++i) {
|
||||
const float diff = input_vector[i] - mean;
|
||||
sum_diff_sq += diff * diff;
|
||||
}
|
||||
const float variance = sum_diff_sq / v_size;
|
||||
constexpr float kNormalizationConstant = 1e-8f;
|
||||
const float stddev_inv =
|
||||
1.0f / std::sqrt(variance + kNormalizationConstant);
|
||||
for (int i = 0; i < v_size; ++i) {
|
||||
output_vector[i] = (input_vector[i] - mean) * stddev_inv;
|
||||
}
|
||||
input_vector += v_size;
|
||||
output_vector += v_size;
|
||||
}
|
||||
}
|
||||
|
||||
void PortableTwoGateSaturatingAdd(const int8_t* input, int8_t input_zp,
|
||||
const int8_t* recurrent, int8_t recurrent_zp,
|
||||
int32_t input_effective_scale_a,
|
||||
int32_t input_effective_scale_b,
|
||||
int32_t recurrent_effective_scale_a,
|
||||
int32_t recurrent_effective_scale_b,
|
||||
int32_t n_batch, int32_t n_cell,
|
||||
int16_t* output) {
|
||||
const int32_t int16_max = std::numeric_limits<int16_t>::max();
|
||||
const int32_t int16_min = std::numeric_limits<int16_t>::min();
|
||||
for (int i = 0; i < n_batch * n_cell; ++i) {
|
||||
int32_t x = static_cast<int32_t>(input[i]) - static_cast<int32_t>(input_zp);
|
||||
int32_t h =
|
||||
static_cast<int32_t>(recurrent[i]) - static_cast<int32_t>(recurrent_zp);
|
||||
int32_t x_scaled = MultiplyByQuantizedMultiplier(x, input_effective_scale_a,
|
||||
input_effective_scale_b);
|
||||
int32_t h_scaled = MultiplyByQuantizedMultiplier(
|
||||
h, recurrent_effective_scale_a, recurrent_effective_scale_b);
|
||||
int32_t y = h_scaled + x_scaled;
|
||||
if (y > int16_max) {
|
||||
y = int16_max;
|
||||
}
|
||||
if (y < int16_min) {
|
||||
y = int16_min;
|
||||
}
|
||||
output[i] = static_cast<int16_t>(y);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace tensor_utils
|
||||
} // namespace tflite
|
||||
@@ -0,0 +1,235 @@
|
||||
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_PORTABLE_TENSOR_UTILS_IMPL_H_
|
||||
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_PORTABLE_TENSOR_UTILS_IMPL_H_
|
||||
|
||||
#include <algorithm>
|
||||
#include <cstdint>
|
||||
|
||||
#if defined(_MSC_VER)
|
||||
#define __restrict__ __restrict
|
||||
#endif
|
||||
|
||||
namespace tflite {
|
||||
|
||||
// Not all backends support CpuBackendContext usage, so forward declare to avoid
|
||||
// pulling in its implementation.
|
||||
class CpuBackendContext;
|
||||
|
||||
namespace tensor_utils {
|
||||
|
||||
template <typename T>
|
||||
bool PortableIsZeroVector(const T* vector, int v_size) {
|
||||
for (int i = 0; i < v_size; ++i) {
|
||||
if (vector[i] != 0) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void PortableSymmetricQuantizeFloats(const float* values, const int size,
|
||||
int8_t* quantized_values, float* min_value,
|
||||
float* max_value, float* scaling_factor);
|
||||
|
||||
void PortableSymmetricQuantizeFloats(const float* values, const int size,
|
||||
int8_t* quantized_values, float min_value,
|
||||
float max_value, float* scaling_factor);
|
||||
|
||||
void PortableAsymmetricQuantizeFloats(const float* values, const int size,
|
||||
int8_t* quantized_values,
|
||||
float* scaling_factor, int32_t* offset);
|
||||
|
||||
// Multiply a matrix by a batch vector, and store results in a batch-size
|
||||
// vector.
|
||||
void PortableMatrixBatchVectorMultiplyAccumulate(const float* matrix,
|
||||
int m_rows, int m_cols,
|
||||
const float* vector,
|
||||
int n_batch, float* result);
|
||||
|
||||
void PortableMatrixBatchVectorMultiplyAccumulate(
|
||||
const int8_t* __restrict__ matrix, const int m_rows, const int m_cols,
|
||||
const int8_t* __restrict__ vectors, const float* scaling_factors,
|
||||
int n_batch, float* __restrict__ result);
|
||||
|
||||
void PortableMatrixBatchVectorMultiplyAccumulate(
|
||||
const int8_t* __restrict__ matrix, const int m_rows, const int m_cols,
|
||||
const int8_t* __restrict__ vectors, const float* scaling_factors,
|
||||
int n_batch, float* __restrict__ result, const float* per_channel_scale,
|
||||
const int32_t* input_offset, int32_t* scratch, int32_t* row_sums,
|
||||
bool* compute_row_sums, CpuBackendContext* context);
|
||||
|
||||
void PortableMatrixBatchVectorMultiplyAccumulate(
|
||||
const int8_t* __restrict__ matrix, const int m_rows, const int m_cols,
|
||||
const int8_t* __restrict__ vector, const float* scaling_factors,
|
||||
int n_batch, int32_t* scratch, float* __restrict__ result,
|
||||
CpuBackendContext* context);
|
||||
|
||||
void PortableSparseMatrixBatchVectorMultiplyAccumulate1x4(
|
||||
const float* __restrict__ matrix, const int32_t* __restrict__ segments,
|
||||
const int32_t* __restrict__ indices, int m_rows, int m_cols,
|
||||
const float* __restrict__ vector, int n_batch, float* __restrict__ result);
|
||||
|
||||
void PortableSparseMatrixBatchVectorMultiplyAccumulate(
|
||||
const float* __restrict__ matrix, const uint8_t* __restrict__ ledger,
|
||||
int m_rows, int m_cols, const float* __restrict__ vector, int n_batch,
|
||||
float* __restrict__ result);
|
||||
|
||||
void PortableSparseMatrixBatchVectorMultiplyAccumulate(
|
||||
const int8_t* __restrict__ matrix, const uint8_t* ledger, const int m_rows,
|
||||
const int m_cols, const int8_t* __restrict__ vectors,
|
||||
const float* scaling_factors, int n_batch, float* __restrict__ result);
|
||||
|
||||
// Dot product of two vectors.
|
||||
float PortableVectorVectorDotProduct(const float* vector1, const float* vector2,
|
||||
int v_size);
|
||||
|
||||
void PortableBatchVectorBatchVectorDotProduct(const int16_t* vector1,
|
||||
const int16_t* vector2,
|
||||
int v_size, int n_batch,
|
||||
int32_t* result);
|
||||
|
||||
void PortableVectorBatchVectorCwiseProductAccumulate(
|
||||
const int16_t* vector, int v_size, const int16_t* batch_vector, int n_batch,
|
||||
int32_t multiplier, int shift, int16_t* result);
|
||||
|
||||
void PortableMatrixBatchVectorMultiplyAccumulate(
|
||||
const int8_t* input, const int32_t* bias,
|
||||
const int8_t* input_to_gate_weights, int32_t multiplier, int32_t shift,
|
||||
int32_t n_batch, int32_t n_input, int32_t n_output, int32_t output_zp,
|
||||
int32_t* scratch, int16_t* output, CpuBackendContext* context);
|
||||
|
||||
void PortableMatrixBatchVectorMultiplyAccumulate(
|
||||
const int8_t* input, const int32_t* bias,
|
||||
const int8_t* input_to_gate_weights, int32_t multiplier, int32_t shift,
|
||||
int32_t n_batch, int32_t n_input, int32_t n_output, int32_t output_zp,
|
||||
int32_t* scratch, int8_t* output, CpuBackendContext* context);
|
||||
|
||||
void PortableMatrixBatchVectorMultiply(const int8_t* input,
|
||||
int32_t input_zeropoint,
|
||||
const int8_t* input_to_gate_weights,
|
||||
int32_t input_to_gate_effective_scale_a,
|
||||
int32_t input_to_gate_effective_scale_b,
|
||||
int32_t n_batch, int32_t n_input,
|
||||
int32_t n_cell, int8_t* gate_output,
|
||||
int8_t gate_output_zp);
|
||||
|
||||
void PortableMatrixBatchVectorMultiply(
|
||||
const int16_t* hidden, const int8_t* hidden_to_output_weights,
|
||||
int32_t proj_effective_scale_a, int32_t proj_effective_scale_b,
|
||||
const int32_t* gate_bias, int32_t n_batch, int32_t n_hidden,
|
||||
int32_t n_output, int32_t output_zp, int8_t* proj_output);
|
||||
|
||||
void PortableMatrixScalarMultiplyAccumulate(const int8_t* matrix,
|
||||
int32_t scalar, int32_t n_row,
|
||||
int32_t n_col, int32_t* output);
|
||||
|
||||
void PortableApplyLayerNorm(const int16_t* input,
|
||||
const int16_t* layer_norm_weights,
|
||||
const int32_t* bias, int32_t layer_norm_scale_a,
|
||||
int32_t layer_norm_scale_b, int32_t variance_limit,
|
||||
int n_batch, int n_input, int16_t* output);
|
||||
|
||||
void PortableApplyLayerNormFloat(const int16_t* input,
|
||||
const int16_t* layer_norm_weights,
|
||||
int32_t layer_norm_scale_a,
|
||||
int32_t layer_norm_scale_b,
|
||||
const int32_t* bias, int n_batch, int n_input,
|
||||
int16_t* output);
|
||||
|
||||
void PortableApplySigmoid(const int16_t* input, int32_t n_batch,
|
||||
int32_t n_input, int16_t* output);
|
||||
|
||||
void PortableApplySigmoidFloat(const int16_t* input, int32_t n_batch,
|
||||
int32_t n_input, int16_t* output);
|
||||
|
||||
void PortableApplyTanh(int32_t integer_bits, const int16_t* input,
|
||||
int32_t n_batch, int32_t n_input, int16_t* output);
|
||||
|
||||
void PortableApplyTanhFloat(const int16_t* input, int32_t n_batch,
|
||||
int32_t n_input, int32_t integer_bits,
|
||||
int16_t* output);
|
||||
|
||||
void PortableCwiseMul(const int16_t* input_1, const int16_t* input_2,
|
||||
int n_batch, int n_input, int shift, int16_t* output);
|
||||
|
||||
void PortableCwiseMul(const int16_t* input_1, const int16_t* input_2,
|
||||
int32_t multiplier, int32_t shift, int32_t n_batch,
|
||||
int32_t n_input, int32_t output_zp, int8_t* output);
|
||||
|
||||
void PortableCwiseAdd(const int16_t* input_1, const int16_t* input_2,
|
||||
int n_batch, int n_input, int16_t* output);
|
||||
|
||||
template <typename T>
|
||||
void PortableCwiseClipping(T* vector, const int v_size,
|
||||
const T& clipping_value) {
|
||||
for (int i = 0; i < v_size; i++) {
|
||||
vector[i] = std::max(std::min(clipping_value, vector[i]),
|
||||
static_cast<T>(-clipping_value));
|
||||
}
|
||||
}
|
||||
|
||||
// Batch vector initialization with another vector.
|
||||
void PortableVectorBatchVectorAssign(const float* vector, int v_size,
|
||||
int n_batch, float* batch_vector);
|
||||
|
||||
// Compute "1.0f - elements of vector" (used in CIFG).
|
||||
void PortableSub1Vector(const float* vector, int v_size, float* result);
|
||||
|
||||
void PortableSub1Vector(const int16_t* vector, int v_size, int16_t* result);
|
||||
|
||||
// Multiply all elements of vector with a scalar.
|
||||
void PortableVectorScalarMultiply(const int8_t* vector, int v_size, float scale,
|
||||
float* result);
|
||||
|
||||
// Reduce-sum on a vector:
|
||||
// input_vector: pointer to input vector.
|
||||
// output_vector: pointer to vector.
|
||||
// output_size: output vector size.
|
||||
// reduction_size: number of consecutive elements from input vector which are
|
||||
// added to get one element of output.
|
||||
template <typename INPUT, typename OUTPUT>
|
||||
void PortableReductionSumVector(const INPUT* input_vector,
|
||||
OUTPUT* output_vector, int output_size,
|
||||
int reduction_size) {
|
||||
for (int o = 0; o < output_size; o++) {
|
||||
OUTPUT result = 0;
|
||||
for (int r = 0; r < reduction_size; r++) {
|
||||
result += input_vector[r];
|
||||
}
|
||||
output_vector[o] = result;
|
||||
input_vector += reduction_size;
|
||||
}
|
||||
}
|
||||
|
||||
// Layer norm for each batch.
|
||||
void PortableMeanStddevNormalization(const float* __restrict__ input_vector,
|
||||
float* __restrict__ output_vector,
|
||||
int v_size, int n_batch);
|
||||
|
||||
// Saturate Add.
|
||||
void PortableTwoGateSaturatingAdd(const int8_t* input, int8_t input_zp,
|
||||
const int8_t* recurrent, int8_t recurrent_zp,
|
||||
int32_t input_effective_scale_a,
|
||||
int32_t input_effective_scale_b,
|
||||
int32_t recurrent_effective_scale_a,
|
||||
int32_t recurrent_effective_scale_b,
|
||||
int32_t n_batch, int32_t n_cell,
|
||||
int16_t* output);
|
||||
|
||||
} // namespace tensor_utils
|
||||
} // namespace tflite
|
||||
|
||||
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_PORTABLE_TENSOR_UTILS_IMPL_H_
|
||||
@@ -23,6 +23,25 @@ limitations under the License.
|
||||
#include "tensorflow/lite/kernels/internal/quantization_util.h"
|
||||
#include "tensorflow/lite/kernels/internal/types.h"
|
||||
|
||||
// Check if the reduction at index is the first one along the dimensions given
|
||||
// in axis.
|
||||
inline bool IsFirstReduction(const int* index, const int num_axis,
|
||||
const int* axis) {
|
||||
if (num_axis == 0) {
|
||||
return true;
|
||||
}
|
||||
|
||||
TFLITE_DCHECK(index != nullptr);
|
||||
TFLITE_DCHECK(axis != nullptr);
|
||||
for (int axis_idx = 0; axis_idx < num_axis; ++axis_idx) {
|
||||
if (index[axis[axis_idx]] != 0) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
namespace tflite {
|
||||
|
||||
namespace reference_ops {
|
||||
@@ -35,8 +54,7 @@ inline bool Reduce(const In* input_data, const int* input_dims,
|
||||
const int* output_dims, const int input_num_dims,
|
||||
const int output_num_dims, const int* axis,
|
||||
const int num_axis, int* input_iter,
|
||||
Out reducer(const Out current, const In in),
|
||||
Out* output_data) {
|
||||
Out reducer(Out current, const In in), Out* output_data) {
|
||||
// Reset input iterator.
|
||||
for (int idx = 0; idx < input_num_dims; ++idx) {
|
||||
input_iter[idx] = 0;
|
||||
@@ -53,6 +71,37 @@ inline bool Reduce(const In* input_data, const int* input_dims,
|
||||
return true;
|
||||
}
|
||||
|
||||
// Similar to above Reduce function but takes two reducer functions.
|
||||
// The 'reducer_first' is called with the first value of the reduction,
|
||||
// 'reducer_next' is then called for all the others.
|
||||
template <typename In, typename Out>
|
||||
inline bool Reduce(const In* input_data, const int* input_dims,
|
||||
const int* output_dims, const int input_num_dims,
|
||||
const int output_num_dims, const int* axis,
|
||||
const int num_axis, int* input_iter,
|
||||
const std::function<Out(In in)>& reducer_first,
|
||||
const std::function<Out(Out current, In in)>& reducer_next,
|
||||
Out* output_data) {
|
||||
// Reset input iterator.
|
||||
for (int idx = 0; idx < input_num_dims; ++idx) {
|
||||
input_iter[idx] = 0;
|
||||
}
|
||||
// Iterate through input_data.
|
||||
do {
|
||||
size_t input_offset =
|
||||
ReducedOutputOffset(input_num_dims, input_dims, input_iter, 0, nullptr);
|
||||
size_t output_offset = ReducedOutputOffset(input_num_dims, input_dims,
|
||||
input_iter, num_axis, axis);
|
||||
if (IsFirstReduction(input_iter, num_axis, axis)) {
|
||||
output_data[output_offset] = reducer_first(input_data[input_offset]);
|
||||
} else {
|
||||
output_data[output_offset] =
|
||||
reducer_next(output_data[output_offset], input_data[input_offset]);
|
||||
}
|
||||
} while (NextIndex(input_num_dims, input_dims, input_iter));
|
||||
return true;
|
||||
}
|
||||
|
||||
// This method parses the input 'axis' to remove duplicates and handle negative
|
||||
// values, and returns a valid 'out_axis'
|
||||
inline bool ResolveAxis(const int num_dims, const int* axis,
|
||||
@@ -111,7 +160,8 @@ inline bool InitTensorDataForReduce(const int* dims, const int num_dims,
|
||||
for (int idx = 0; idx < num_dims; ++idx) {
|
||||
size_t current = static_cast<size_t>(dims[idx]);
|
||||
// Overflow prevention.
|
||||
if (num_elements > std::numeric_limits<size_t>::max() / current) {
|
||||
if (current > 0 &&
|
||||
num_elements > std::numeric_limits<size_t>::max() / current) {
|
||||
return false;
|
||||
}
|
||||
num_elements *= current;
|
||||
@@ -132,17 +182,20 @@ inline bool ReduceGeneric(const T* input_data, const int* input_dims,
|
||||
bool keep_dims, int* temp_index, int* resolved_axis,
|
||||
T init_value,
|
||||
T reducer(const T current, const T in)) {
|
||||
// Return early when input shape has zero dim.
|
||||
for (int i = 0; i < input_num_dims; ++i) {
|
||||
if (input_dims[i] == 0) return true;
|
||||
}
|
||||
|
||||
// Reset output data.
|
||||
if (!InitTensorDataForReduce(output_dims, output_num_dims, init_value,
|
||||
output_data)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Return early when input shape has zero dim. This is done after initializing
|
||||
// data for output tensor because there are cases that the input tensor is
|
||||
// empty but output tensor is not. In that case, output tensor should be
|
||||
// filled with init_value.
|
||||
for (int i = 0; i < input_num_dims; ++i) {
|
||||
if (input_dims[i] == 0) return true;
|
||||
}
|
||||
|
||||
// Resolve axis.
|
||||
int num_resolved_axis = 0;
|
||||
if (!ResolveAxis(input_num_dims, axis, num_axis_dimensions, resolved_axis,
|
||||
@@ -290,9 +343,9 @@ inline void Mean(const tflite::MeanParams& op_params,
|
||||
constexpr int32_t kMinValue = std::numeric_limits<uint8_t>::min();
|
||||
constexpr int32_t kMaxValue = std::numeric_limits<uint8_t>::max();
|
||||
|
||||
int32_t bias =
|
||||
output_zero_point -
|
||||
static_cast<int32_t>(input_zero_point * input_scale / output_scale);
|
||||
float temp = input_zero_point * input_scale / output_scale;
|
||||
temp = temp > 0 ? temp + 0.5f : temp - 0.5f;
|
||||
int32_t bias = output_zero_point - static_cast<int32_t>(temp);
|
||||
double real_scale =
|
||||
static_cast<double>(input_scale / (num_elements_in_axis * output_scale));
|
||||
|
||||
@@ -353,6 +406,14 @@ inline bool QuantizedMeanOrSum(const T* input_data, int32_t input_zero_point,
|
||||
temp_sum[idx] = U();
|
||||
}
|
||||
|
||||
// Return early when input shape has zero dim. This is done after initializing
|
||||
// data for output tensor because there are cases that the input tensor is
|
||||
// empty but output tensor is not. In that case, output tensor should be
|
||||
// filled with init_value.
|
||||
for (int i = 0; i < input_num_dims; ++i) {
|
||||
if (input_dims[i] == 0) return true;
|
||||
}
|
||||
|
||||
// Resolve axis.
|
||||
int num_resolved_axis = 0;
|
||||
if (!ResolveAxis(input_num_dims, axis, num_axis_dimensions, resolved_axis,
|
||||
@@ -405,6 +466,57 @@ inline bool QuantizedMeanOrSum(const T* input_data, int32_t input_zero_point,
|
||||
return true;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline bool QuantizedReduceProd(const T* input_data, int32_t input_zero_point,
|
||||
const RuntimeShape& input_shape, T* output_data,
|
||||
int32_t output_zero_point,
|
||||
const RuntimeShape& output_shape,
|
||||
const int* axis,
|
||||
const int64_t num_axis_dimensions,
|
||||
bool keep_dims, int* temp_index,
|
||||
int* resolved_axis, int32_t* temp_prod,
|
||||
int32_t scaling_multiplier, int scaling_shift) {
|
||||
const int32_t kMinValue = std::numeric_limits<T>::min();
|
||||
const int32_t kMaxValue = std::numeric_limits<T>::max();
|
||||
|
||||
// Resolve axis.
|
||||
int num_resolved_axis = 0;
|
||||
if (!ResolveAxis(input_shape.DimensionsCount(), axis, num_axis_dimensions,
|
||||
resolved_axis, &num_resolved_axis)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Calculate the reduced product by rescaling each multiplication step to
|
||||
// avoid an overflow.
|
||||
auto reducer_first = [&](T in) -> int32_t { return in - input_zero_point; };
|
||||
|
||||
auto reducer_next = [&](int32_t current, T in) -> int32_t {
|
||||
const int64_t result =
|
||||
static_cast<int64_t>(current) * (in - input_zero_point);
|
||||
return MultiplyByQuantizedMultiplier(result, scaling_multiplier,
|
||||
scaling_shift);
|
||||
};
|
||||
|
||||
if (!Reduce<T, int32_t>(
|
||||
input_data, input_shape.DimsData(), output_shape.DimsData(),
|
||||
input_shape.DimensionsCount(), output_shape.DimensionsCount(),
|
||||
resolved_axis, num_resolved_axis, temp_index, reducer_first,
|
||||
reducer_next, temp_prod)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
for (int i = 0; i < output_shape.FlatSize(); i++) {
|
||||
int32_t result =
|
||||
MultiplyByQuantizedMultiplier(static_cast<int64_t>(temp_prod[i]),
|
||||
scaling_multiplier, scaling_shift) +
|
||||
output_zero_point;
|
||||
result = std::min(std::max(result, kMinValue), kMaxValue);
|
||||
output_data[i] = static_cast<T>(result);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
} // namespace reference_ops
|
||||
|
||||
} // namespace tflite
|
||||
|
||||
@@ -0,0 +1,228 @@
|
||||
/* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_RESIZE_BILINEAR_H_
|
||||
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_RESIZE_BILINEAR_H_
|
||||
|
||||
#include <algorithm>
|
||||
#include <cmath>
|
||||
#include <cstdint>
|
||||
#include <limits>
|
||||
|
||||
#include "tensorflow/lite/kernels/internal/cppmath.h"
|
||||
#include "tensorflow/lite/kernels/internal/types.h"
|
||||
|
||||
namespace tflite {
|
||||
namespace reference_ops {
|
||||
|
||||
inline void ComputeInterpolationValues(const float value, const float scale,
|
||||
const bool half_pixel_centers,
|
||||
int32_t input_size, float* scaled_value,
|
||||
int32_t* lower_bound,
|
||||
int32_t* upper_bound) {
|
||||
if (half_pixel_centers) {
|
||||
*scaled_value = (value + 0.5f) * scale - 0.5f;
|
||||
} else {
|
||||
*scaled_value = value * scale;
|
||||
}
|
||||
float scaled_value_floor = std::floor(*scaled_value);
|
||||
*lower_bound = std::max(static_cast<int32_t>(scaled_value_floor),
|
||||
static_cast<int32_t>(0));
|
||||
*upper_bound =
|
||||
std::min(static_cast<int32_t>(std::ceil(*scaled_value)), input_size - 1);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline void ResizeBilinear(const tflite::ResizeBilinearParams& op_params,
|
||||
const RuntimeShape& unextended_input_shape,
|
||||
const T* input_data,
|
||||
const RuntimeShape& unextended_output_size_shape,
|
||||
const int32_t* output_size_data,
|
||||
const RuntimeShape& unextended_output_shape,
|
||||
T* output_data) {
|
||||
// If half_pixel_centers is True, align_corners must be False.
|
||||
TFLITE_DCHECK(!op_params.half_pixel_centers || !op_params.align_corners);
|
||||
TFLITE_DCHECK_LE(unextended_input_shape.DimensionsCount(), 4);
|
||||
TFLITE_DCHECK_LE(unextended_output_size_shape.DimensionsCount(), 4);
|
||||
TFLITE_DCHECK_LE(unextended_output_shape.DimensionsCount(), 4);
|
||||
const RuntimeShape input_shape =
|
||||
RuntimeShape::ExtendedShape(4, unextended_input_shape);
|
||||
const RuntimeShape output_size_shape =
|
||||
RuntimeShape::ExtendedShape(4, unextended_output_size_shape);
|
||||
const RuntimeShape output_shape =
|
||||
RuntimeShape::ExtendedShape(4, unextended_output_shape);
|
||||
|
||||
int32_t batches = MatchingDim(input_shape, 0, output_shape, 0);
|
||||
int32_t input_height = input_shape.Dims(1);
|
||||
int32_t input_width = input_shape.Dims(2);
|
||||
int32_t depth = MatchingDim(input_shape, 3, output_shape, 3);
|
||||
|
||||
TFLITE_DCHECK_EQ(output_size_shape.Dims(0), 1);
|
||||
TFLITE_DCHECK_EQ(output_size_shape.Dims(1), 1);
|
||||
TFLITE_DCHECK_EQ(output_size_shape.Dims(2), 1);
|
||||
TFLITE_DCHECK_EQ(output_size_shape.Dims(3), 2);
|
||||
int32_t output_height =
|
||||
output_size_data[Offset(output_size_shape, 0, 0, 0, 0)];
|
||||
int32_t output_width =
|
||||
output_size_data[Offset(output_size_shape, 0, 0, 0, 1)];
|
||||
|
||||
float height_scale = static_cast<float>(input_height) / output_height;
|
||||
float width_scale = static_cast<float>(input_width) / output_width;
|
||||
if (op_params.align_corners && output_height > 1) {
|
||||
height_scale = static_cast<float>(input_height - 1) / (output_height - 1);
|
||||
}
|
||||
if (op_params.align_corners && output_width > 1) {
|
||||
width_scale = static_cast<float>(input_width - 1) / (output_width - 1);
|
||||
}
|
||||
const float rounding_offset = std::numeric_limits<T>::is_integer ? .5f : .0f;
|
||||
|
||||
for (int b = 0; b < batches; ++b) {
|
||||
for (int y = 0; y < output_height; ++y) {
|
||||
float input_y;
|
||||
int32_t y0, y1;
|
||||
ComputeInterpolationValues(y, height_scale, op_params.half_pixel_centers,
|
||||
input_height, &input_y, &y0, &y1);
|
||||
for (int x = 0; x < output_width; ++x) {
|
||||
float input_x;
|
||||
int32_t x0, x1;
|
||||
ComputeInterpolationValues(x, width_scale, op_params.half_pixel_centers,
|
||||
input_width, &input_x, &x0, &x1);
|
||||
for (int c = 0; c < depth; ++c) {
|
||||
T interpolation =
|
||||
static_cast<T>(input_data[Offset(input_shape, b, y0, x0, c)] *
|
||||
(1 - (input_y - y0)) * (1 - (input_x - x0)) +
|
||||
input_data[Offset(input_shape, b, y1, x0, c)] *
|
||||
(input_y - y0) * (1 - (input_x - x0)) +
|
||||
input_data[Offset(input_shape, b, y0, x1, c)] *
|
||||
(1 - (input_y - y0)) * (input_x - x0) +
|
||||
input_data[Offset(input_shape, b, y1, x1, c)] *
|
||||
(input_y - y0) * (input_x - x0) +
|
||||
rounding_offset);
|
||||
output_data[Offset(output_shape, b, y, x, c)] = interpolation;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
inline void ComputeInterpolationValuesInteger(
|
||||
const int32_t value, const int32_t scale_10, const bool half_pixel_centers,
|
||||
int32_t input_size, int32_t* scaled_value, int32_t* lower_bound,
|
||||
int32_t* upper_bound) {
|
||||
if (half_pixel_centers) {
|
||||
*scaled_value = value * scale_10 + scale_10 / 2 - (1 << 9);
|
||||
} else {
|
||||
*scaled_value = value * scale_10;
|
||||
}
|
||||
constexpr int32_t zero = 0;
|
||||
*lower_bound = std::max(*scaled_value / (1 << 10), zero);
|
||||
*upper_bound =
|
||||
std::min((*scaled_value + (1 << 10) - 1) / (1 << 10), input_size - 1);
|
||||
}
|
||||
|
||||
// Same as above but doesn't use any floating-point for the resize
|
||||
template <typename T>
|
||||
inline void ResizeBilinearInteger(
|
||||
const tflite::ResizeBilinearParams& op_params,
|
||||
const RuntimeShape& unextended_input_shape, const T* input_data,
|
||||
const RuntimeShape& unextended_output_size_shape,
|
||||
const int32_t* output_size_data,
|
||||
const RuntimeShape& unextended_output_shape, T* output_data) {
|
||||
// If half_pixel_centers is True, align_corners must be False.
|
||||
TFLITE_DCHECK(!op_params.half_pixel_centers || !op_params.align_corners);
|
||||
TFLITE_DCHECK_LE(unextended_input_shape.DimensionsCount(), 4);
|
||||
TFLITE_DCHECK_LE(unextended_output_size_shape.DimensionsCount(), 4);
|
||||
TFLITE_DCHECK_LE(unextended_output_shape.DimensionsCount(), 4);
|
||||
const RuntimeShape input_shape =
|
||||
RuntimeShape::ExtendedShape(4, unextended_input_shape);
|
||||
const RuntimeShape output_size_shape =
|
||||
RuntimeShape::ExtendedShape(4, unextended_output_size_shape);
|
||||
const RuntimeShape output_shape =
|
||||
RuntimeShape::ExtendedShape(4, unextended_output_shape);
|
||||
|
||||
const int32_t batches = MatchingDim(input_shape, 0, output_shape, 0);
|
||||
const int32_t input_height = input_shape.Dims(1);
|
||||
const int32_t input_width = input_shape.Dims(2);
|
||||
const int32_t depth = MatchingDim(input_shape, 3, output_shape, 3);
|
||||
|
||||
TFLITE_DCHECK_EQ(output_size_shape.Dims(0), 1);
|
||||
TFLITE_DCHECK_EQ(output_size_shape.Dims(1), 1);
|
||||
TFLITE_DCHECK_EQ(output_size_shape.Dims(2), 1);
|
||||
TFLITE_DCHECK_EQ(output_size_shape.Dims(3), 2);
|
||||
const int32_t output_height =
|
||||
output_size_data[Offset(output_size_shape, 0, 0, 0, 0)];
|
||||
const int32_t output_width =
|
||||
output_size_data[Offset(output_size_shape, 0, 0, 0, 1)];
|
||||
|
||||
int32_t height_scale_10 =
|
||||
((1 << 10) * input_height + output_height / 2) / output_height;
|
||||
int32_t width_scale_10 =
|
||||
((1 << 10) * input_width + output_width / 2) / output_width;
|
||||
if (op_params.align_corners && output_height > 1) {
|
||||
height_scale_10 =
|
||||
((1 << 10) * (input_height - 1) + (output_height - 1) / 2) /
|
||||
(output_height - 1);
|
||||
}
|
||||
if (op_params.align_corners && output_width > 1) {
|
||||
width_scale_10 = ((1 << 10) * (input_width - 1) + (output_width - 1) / 2) /
|
||||
(output_width - 1);
|
||||
}
|
||||
|
||||
for (int b = 0; b < batches; ++b) {
|
||||
for (int y = 0; y < output_height; ++y) {
|
||||
int32_t input_y, y0, y1;
|
||||
ComputeInterpolationValuesInteger(y, height_scale_10,
|
||||
op_params.half_pixel_centers,
|
||||
input_height, &input_y, &y0, &y1);
|
||||
for (int x = 0; x < output_width; ++x) {
|
||||
int32_t input_x, x0, x1;
|
||||
ComputeInterpolationValuesInteger(x, width_scale_10,
|
||||
op_params.half_pixel_centers,
|
||||
input_width, &input_x, &x0, &x1);
|
||||
for (int c = 0; c < depth; ++c) {
|
||||
const int64_t output_20_ll =
|
||||
static_cast<int64_t>(
|
||||
input_data[Offset(input_shape, b, y0, x0, c)]) *
|
||||
((1 << 10) - (input_y - (1 << 10) * y0)) *
|
||||
((1 << 10) - (input_x - (1 << 10) * x0));
|
||||
const int64_t output_20_lu =
|
||||
static_cast<int64_t>(
|
||||
input_data[Offset(input_shape, b, y1, x0, c)]) *
|
||||
(input_y - (1 << 10) * y0) *
|
||||
((1 << 10) - (input_x - (1 << 10) * x0));
|
||||
const int64_t output_20_rl =
|
||||
static_cast<int64_t>(
|
||||
input_data[Offset(input_shape, b, y0, x1, c)]) *
|
||||
((1 << 10) - (input_y - (1 << 10) * y0)) *
|
||||
(input_x - (1 << 10) * x0);
|
||||
const int64_t output_20_ru =
|
||||
static_cast<int64_t>(
|
||||
input_data[Offset(input_shape, b, y1, x1, c)]) *
|
||||
(input_y - (1 << 10) * y0) * (input_x - (1 << 10) * x0);
|
||||
const int64_t output_20 =
|
||||
output_20_ll + output_20_lu + output_20_rl + output_20_ru;
|
||||
const int64_t round = (output_20 > 0) ? (1 << 19) : -(1 << 19);
|
||||
const T interpolation =
|
||||
static_cast<T>((output_20 + round) / (1 << 20));
|
||||
output_data[Offset(output_shape, b, y, x, c)] = interpolation;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace reference_ops
|
||||
} // namespace tflite
|
||||
|
||||
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_RESIZE_BILINEAR_H_
|
||||
@@ -159,7 +159,7 @@ inline int16_t SoftMaxCalculateExp(const SoftmaxParams& params,
|
||||
std::min(std::max(sym_scaled_diff, static_cast<int32_t>(-32768)),
|
||||
static_cast<int32_t>(32767));
|
||||
// apply the exp() LUT activation function
|
||||
return generic_int16_table_lookup(sat_sym_scaled_diff, params.exp_lut);
|
||||
return lut_lookup(sat_sym_scaled_diff, params.exp_lut);
|
||||
}
|
||||
// Quantized softmax with int16_t input and int16_t output.
|
||||
inline void SoftmaxInt16(const SoftmaxParams& params,
|
||||
@@ -207,8 +207,8 @@ inline void SoftmaxInt16(const SoftmaxParams& params,
|
||||
std::min(std::max(sym_shifted_sum, static_cast<int32_t>(-32768)),
|
||||
static_cast<int32_t>(32767)));
|
||||
// apply 1/(1 + x) LUT activation function
|
||||
int16_t reciprocal_scale_Q015 = generic_int16_table_lookup(
|
||||
sat_sym_shifted_sum, params.one_over_one_plus_x_lut);
|
||||
int16_t reciprocal_scale_Q015 =
|
||||
lut_lookup(sat_sym_shifted_sum, params.one_over_one_plus_x_lut);
|
||||
|
||||
// Rescale the exp_result with reciprocal
|
||||
// range of output is [0, 32767] correspond to [0.0, 1.0]
|
||||
|
||||
@@ -0,0 +1,80 @@
|
||||
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_SPACE_TO_DEPTH_H_
|
||||
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_SPACE_TO_DEPTH_H_
|
||||
|
||||
#include <cstdint>
|
||||
|
||||
#include "tensorflow/lite/kernels/internal/types.h"
|
||||
|
||||
namespace tflite {
|
||||
namespace reference_ops {
|
||||
|
||||
template <typename T>
|
||||
inline void SpaceToDepth(const tflite::SpaceToDepthParams& op_params,
|
||||
const RuntimeShape& unextended_input_shape,
|
||||
const T* input_data,
|
||||
const RuntimeShape& unextended_output_shape,
|
||||
T* output_data) {
|
||||
TFLITE_DCHECK_LE(unextended_input_shape.DimensionsCount(), 4);
|
||||
TFLITE_DCHECK_LE(unextended_output_shape.DimensionsCount(), 4);
|
||||
const RuntimeShape input_shape =
|
||||
RuntimeShape::ExtendedShape(4, unextended_input_shape);
|
||||
const RuntimeShape output_shape =
|
||||
RuntimeShape::ExtendedShape(4, unextended_output_shape);
|
||||
|
||||
const int input_depth = input_shape.Dims(3);
|
||||
const int input_width = input_shape.Dims(2);
|
||||
const int input_height = input_shape.Dims(1);
|
||||
const int input_batch = input_shape.Dims(0);
|
||||
|
||||
const int output_depth = output_shape.Dims(3);
|
||||
const int output_width = output_shape.Dims(2);
|
||||
const int output_height = output_shape.Dims(1);
|
||||
const int output_batch = output_shape.Dims(0);
|
||||
|
||||
const int32_t block_size = op_params.block_size;
|
||||
|
||||
TFLITE_DCHECK_EQ(input_width, output_width * block_size);
|
||||
TFLITE_DCHECK_EQ(input_height, output_height * block_size);
|
||||
TFLITE_DCHECK_EQ(input_depth * block_size * block_size, output_depth);
|
||||
TFLITE_DCHECK_EQ(input_batch, output_batch);
|
||||
|
||||
for (int in_b = 0; in_b < input_batch; ++in_b) {
|
||||
for (int in_h = 0; in_h < input_height; ++in_h) {
|
||||
for (int in_w = 0; in_w < input_width; ++in_w) {
|
||||
for (int in_d = 0; in_d < input_depth; ++in_d) {
|
||||
const int out_d =
|
||||
in_d + ((in_h % block_size) * block_size + in_w % block_size) *
|
||||
input_depth;
|
||||
const int out_w = in_w / block_size;
|
||||
const int out_h = in_h / block_size;
|
||||
const int out_b = in_b;
|
||||
|
||||
const int input_index = Offset(input_shape, in_b, in_h, in_w, in_d);
|
||||
const int output_index =
|
||||
Offset(output_shape, out_b, out_h, out_w, out_d);
|
||||
|
||||
output_data[output_index] = input_data[input_index];
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace reference_ops
|
||||
} // namespace tflite
|
||||
|
||||
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_SPACE_TO_DEPTH_H_
|
||||
@@ -0,0 +1,111 @@
|
||||
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_TRANSPOSE_H_
|
||||
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_TRANSPOSE_H_
|
||||
|
||||
#include "tensorflow/lite/kernels/internal/common.h"
|
||||
#include "tensorflow/lite/kernels/internal/types.h"
|
||||
|
||||
namespace tflite {
|
||||
|
||||
namespace reference_ops {
|
||||
|
||||
template <typename T, int N>
|
||||
void TransposeImpl(const TransposeParams& params,
|
||||
const RuntimeShape& unextended_input_shape,
|
||||
const T* input_data,
|
||||
const RuntimeShape& unextended_output_shape,
|
||||
T* output_data) {
|
||||
const int unextended_input_size = unextended_input_shape.DimensionsCount();
|
||||
const int unextended_output_size = unextended_output_shape.DimensionsCount();
|
||||
TFLITE_DCHECK_LE(unextended_input_size, N);
|
||||
TFLITE_DCHECK_LE(unextended_output_size, N);
|
||||
TFLITE_DCHECK_EQ(unextended_output_size, params.perm_count);
|
||||
const int input_ext_size = N - unextended_input_size;
|
||||
const int output_ext_size = N - unextended_output_size;
|
||||
NdArrayDesc<N> input_desc;
|
||||
NdArrayDesc<N> output_desc;
|
||||
CopyDimsToDesc(RuntimeShape::ExtendedShape(N, unextended_input_shape),
|
||||
&input_desc);
|
||||
CopyDimsToDesc(RuntimeShape::ExtendedShape(N, unextended_output_shape),
|
||||
&output_desc);
|
||||
|
||||
// The perm data is extended to match the output, each index incremented by
|
||||
// the amount of front padding of the input shape.
|
||||
int extended_perm[N];
|
||||
for (int i = 0; i < N; ++i) {
|
||||
extended_perm[i] = i < output_ext_size
|
||||
? i
|
||||
: params.perm[i - output_ext_size] + input_ext_size;
|
||||
}
|
||||
|
||||
// Permutes the input shape so we don't need to permute the indexes inside
|
||||
// the loop. Check to make sure output_dims is matching input_dims.
|
||||
NdArrayDesc<N> perm_input_desc;
|
||||
for (int k = 0; k < N; ++k) {
|
||||
TFLITE_DCHECK_EQ(input_desc.extents[extended_perm[k]],
|
||||
output_desc.extents[k]);
|
||||
perm_input_desc.extents[k] = input_desc.extents[extended_perm[k]];
|
||||
perm_input_desc.strides[k] = input_desc.strides[extended_perm[k]];
|
||||
}
|
||||
|
||||
// Naive transpose loop (iterate on output index and compute input index).
|
||||
auto tranpose_func = [&](int indexes[N]) {
|
||||
output_data[SubscriptToIndex(output_desc, indexes)] =
|
||||
input_data[SubscriptToIndex(perm_input_desc, indexes)];
|
||||
};
|
||||
NDOpsHelper<N>(output_desc, tranpose_func);
|
||||
}
|
||||
|
||||
template <typename T, int N = 5>
|
||||
void Transpose(const TransposeParams& params,
|
||||
const RuntimeShape& unextended_input_shape, const T* input_data,
|
||||
const RuntimeShape& unextended_output_shape, T* output_data) {
|
||||
// Transpose kernel only does rearranging values not numeric evaluations on
|
||||
// each cell. It's safe to implement per size of scalar type and this trick
|
||||
// keeps the total code size in a reasonable range.
|
||||
switch (sizeof(T)) {
|
||||
case 1:
|
||||
TransposeImpl<int8_t, N>(params, unextended_input_shape,
|
||||
reinterpret_cast<const int8_t*>(input_data),
|
||||
unextended_output_shape,
|
||||
reinterpret_cast<int8_t*>(output_data));
|
||||
break;
|
||||
case 2:
|
||||
TransposeImpl<int16_t, N>(params, unextended_input_shape,
|
||||
reinterpret_cast<const int16_t*>(input_data),
|
||||
unextended_output_shape,
|
||||
reinterpret_cast<int16_t*>(output_data));
|
||||
break;
|
||||
|
||||
case 4:
|
||||
TransposeImpl<int32_t, N>(params, unextended_input_shape,
|
||||
reinterpret_cast<const int32_t*>(input_data),
|
||||
unextended_output_shape,
|
||||
reinterpret_cast<int32_t*>(output_data));
|
||||
break;
|
||||
case 8:
|
||||
TransposeImpl<int64_t, N>(params, unextended_input_shape,
|
||||
reinterpret_cast<const int64_t*>(input_data),
|
||||
unextended_output_shape,
|
||||
reinterpret_cast<int64_t*>(output_data));
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace reference_ops
|
||||
} // namespace tflite
|
||||
|
||||
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_TRANSPOSE_H_
|
||||
@@ -400,13 +400,22 @@ inline size_t ReducedOutputOffset(const int num_dims, const int* dims,
|
||||
return offset;
|
||||
}
|
||||
|
||||
// Since tensors with '0' in their shape are valid in TF, these offset functions
|
||||
// allow that as long as the corresponding index is also 0. It is upto the
|
||||
// calling ops to ensure that they perform verification checks on tensor shapes
|
||||
// if they don't support a particular behavior.
|
||||
|
||||
inline int Offset(const RuntimeShape& shape, int i0, int i1, int i2, int i3) {
|
||||
TFLITE_DCHECK_EQ(shape.DimensionsCount(), 4);
|
||||
const int* dims_data = reinterpret_cast<const int*>(shape.DimsDataUpTo5D());
|
||||
TFLITE_DCHECK(i0 >= 0 && i0 < dims_data[0]);
|
||||
TFLITE_DCHECK(i1 >= 0 && i1 < dims_data[1]);
|
||||
TFLITE_DCHECK(i2 >= 0 && i2 < dims_data[2]);
|
||||
TFLITE_DCHECK(i3 >= 0 && i3 < dims_data[3]);
|
||||
TFLITE_DCHECK((dims_data[0] == 0 && i0 == 0) ||
|
||||
(i0 >= 0 && i0 < dims_data[0]));
|
||||
TFLITE_DCHECK((dims_data[1] == 0 && i1 == 0) ||
|
||||
(i1 >= 0 && i1 < dims_data[1]));
|
||||
TFLITE_DCHECK((dims_data[2] == 0 && i2 == 0) ||
|
||||
(i2 >= 0 && i2 < dims_data[2]));
|
||||
TFLITE_DCHECK((dims_data[3] == 0 && i3 == 0) ||
|
||||
(i3 >= 0 && i3 < dims_data[3]));
|
||||
return ((i0 * dims_data[1] + i1) * dims_data[2] + i2) * dims_data[3] + i3;
|
||||
}
|
||||
|
||||
@@ -414,21 +423,34 @@ inline int Offset(const RuntimeShape& shape, int i0, int i1, int i2, int i3,
|
||||
int i4) {
|
||||
TFLITE_DCHECK_EQ(shape.DimensionsCount(), 5);
|
||||
const int* dims_data = reinterpret_cast<const int*>(shape.DimsDataUpTo5D());
|
||||
TFLITE_DCHECK(i0 >= 0 && i0 < dims_data[0]);
|
||||
TFLITE_DCHECK(i1 >= 0 && i1 < dims_data[1]);
|
||||
TFLITE_DCHECK(i2 >= 0 && i2 < dims_data[2]);
|
||||
TFLITE_DCHECK(i3 >= 0 && i3 < dims_data[3]);
|
||||
TFLITE_DCHECK(i4 >= 0 && i4 < dims_data[4]);
|
||||
TFLITE_DCHECK((dims_data[0] == 0 && i0 == 0) ||
|
||||
(i0 >= 0 && i0 < dims_data[0]));
|
||||
TFLITE_DCHECK((dims_data[1] == 0 && i1 == 0) ||
|
||||
(i1 >= 0 && i1 < dims_data[1]));
|
||||
TFLITE_DCHECK((dims_data[2] == 0 && i2 == 0) ||
|
||||
(i2 >= 0 && i2 < dims_data[2]));
|
||||
TFLITE_DCHECK((dims_data[3] == 0 && i3 == 0) ||
|
||||
(i3 >= 0 && i3 < dims_data[3]));
|
||||
TFLITE_DCHECK((dims_data[4] == 0 && i4 == 0) ||
|
||||
(i4 >= 0 && i4 < dims_data[4]));
|
||||
return (((i0 * dims_data[1] + i1) * dims_data[2] + i2) * dims_data[3] + i3) *
|
||||
dims_data[4] +
|
||||
i4;
|
||||
}
|
||||
|
||||
inline int Offset(const RuntimeShape& shape, int* index) {
|
||||
return Offset(shape, index[0], index[1], index[2], index[3]);
|
||||
}
|
||||
|
||||
inline int Offset(const Dims<4>& dims, int i0, int i1, int i2, int i3) {
|
||||
TFLITE_DCHECK(i0 >= 0 && i0 < dims.sizes[0]);
|
||||
TFLITE_DCHECK(i1 >= 0 && i1 < dims.sizes[1]);
|
||||
TFLITE_DCHECK(i2 >= 0 && i2 < dims.sizes[2]);
|
||||
TFLITE_DCHECK(i3 >= 0 && i3 < dims.sizes[3]);
|
||||
TFLITE_DCHECK((i0 == 0 && dims.sizes[0] == 0) ||
|
||||
(i0 >= 0 && i0 < dims.sizes[0]));
|
||||
TFLITE_DCHECK((i1 == 0 && dims.sizes[1] == 0) ||
|
||||
(i1 >= 0 && i1 < dims.sizes[1]));
|
||||
TFLITE_DCHECK((i2 == 0 && dims.sizes[2] == 0) ||
|
||||
(i2 >= 0 && i2 < dims.sizes[2]));
|
||||
TFLITE_DCHECK((i3 == 0 && dims.sizes[3] == 0) ||
|
||||
(i3 >= 0 && i3 < dims.sizes[3]));
|
||||
return i0 * dims.strides[0] + i1 * dims.strides[1] + i2 * dims.strides[2] +
|
||||
i3 * dims.strides[3];
|
||||
}
|
||||
@@ -437,10 +459,6 @@ inline int Offset(const Dims<4>& dims, int* index) {
|
||||
return Offset(dims, index[0], index[1], index[2], index[3]);
|
||||
}
|
||||
|
||||
inline int Offset(const RuntimeShape& shape, int* index) {
|
||||
return Offset(shape, index[0], index[1], index[2], index[3]);
|
||||
}
|
||||
|
||||
// Get array size, DCHECKing that the dim index is in range.
|
||||
//
|
||||
// Note that this will be phased out with Dims<4>, since RuntimeShape::Dims()
|
||||
@@ -602,6 +620,58 @@ inline int MatchingFlatSize(const Dims<N>& dims, const Dims<N>& check_dims_0,
|
||||
return MatchingFlatSize(dims, check_dims_1, check_dims_2, check_dims_3);
|
||||
}
|
||||
|
||||
// Flat size calculation, checking if their extended shapes match.
|
||||
inline int MatchingExtendedShapeFlatSize(const RuntimeShape& shape,
|
||||
const RuntimeShape& check_shape_0) {
|
||||
const int shape_dims = shape.DimensionsCount();
|
||||
const int check_shape_0_dims = check_shape_0.DimensionsCount();
|
||||
const int min_dims = std::min(shape_dims, check_shape_0_dims);
|
||||
|
||||
for (int i = 0; i < min_dims; ++i) {
|
||||
TFLITE_DCHECK_EQ(shape.Dims(shape_dims - 1 - i),
|
||||
check_shape_0.Dims(check_shape_0_dims - 1 - i));
|
||||
}
|
||||
for (int i = min_dims; i < shape_dims; ++i) {
|
||||
TFLITE_DCHECK_EQ(shape.Dims(shape_dims - 1 - i), 1);
|
||||
}
|
||||
for (int i = min_dims; i < check_shape_0_dims; ++i) {
|
||||
TFLITE_DCHECK_EQ(check_shape_0.Dims(check_shape_0_dims - 1 - i), 1);
|
||||
}
|
||||
return shape.FlatSize();
|
||||
}
|
||||
|
||||
inline int MatchingExtendedShapeFlatSize(const RuntimeShape& shape,
|
||||
const RuntimeShape& check_shape_0,
|
||||
const RuntimeShape& check_shape_1) {
|
||||
const int flat_size = MatchingExtendedShapeFlatSize(shape, check_shape_0);
|
||||
TFLITE_DCHECK_EQ(MatchingExtendedShapeFlatSize(shape, check_shape_1),
|
||||
flat_size);
|
||||
return flat_size;
|
||||
}
|
||||
|
||||
inline int MatchingExtendedShapeFlatSize(const RuntimeShape& shape,
|
||||
const RuntimeShape& check_shape_0,
|
||||
const RuntimeShape& check_shape_1,
|
||||
const RuntimeShape& check_shape_2) {
|
||||
const int flat_size = MatchingExtendedShapeFlatSize(shape, check_shape_0);
|
||||
TFLITE_DCHECK_EQ(
|
||||
MatchingExtendedShapeFlatSize(shape, check_shape_1, check_shape_2),
|
||||
flat_size);
|
||||
return flat_size;
|
||||
}
|
||||
|
||||
inline int MatchingExtendedShapeFlatSize(const RuntimeShape& shape,
|
||||
const RuntimeShape& check_shape_0,
|
||||
const RuntimeShape& check_shape_1,
|
||||
const RuntimeShape& check_shape_2,
|
||||
const RuntimeShape& check_shape_3) {
|
||||
const int flat_size = MatchingExtendedShapeFlatSize(shape, check_shape_0);
|
||||
TFLITE_DCHECK_EQ(MatchingExtendedShapeFlatSize(shape, check_shape_1,
|
||||
check_shape_2, check_shape_3),
|
||||
flat_size);
|
||||
return flat_size;
|
||||
}
|
||||
|
||||
// Data is required to be contiguous, and so many operators can use either the
|
||||
// full array flat size or the flat size with one dimension skipped (commonly
|
||||
// the depth).
|
||||
@@ -885,6 +955,8 @@ struct Conv3DParams {
|
||||
float float_activation_max;
|
||||
};
|
||||
|
||||
typedef Conv3DParams Conv3DTransposeParams;
|
||||
|
||||
struct DepthToSpaceParams {
|
||||
int32_t block_size;
|
||||
};
|
||||
@@ -1019,9 +1091,9 @@ struct PackParams {
|
||||
|
||||
struct PadParams {
|
||||
int8_t left_padding_count;
|
||||
int32_t left_padding[4];
|
||||
int32_t left_padding[5];
|
||||
int8_t right_padding_count;
|
||||
int32_t right_padding[4];
|
||||
int32_t right_padding[5];
|
||||
ResizingCategory resizing_category;
|
||||
};
|
||||
|
||||
@@ -1196,6 +1268,23 @@ inline void GetActivationParams(const P& params, int64_t* min, int64_t* max) {
|
||||
*min = params.int64_activation_min;
|
||||
*max = params.int64_activation_max;
|
||||
}
|
||||
|
||||
// Type trait to check of given type has size smaller than 4 bytes.
|
||||
template <typename T>
|
||||
struct is_small_integer
|
||||
: public std::integral_constant<bool,
|
||||
std::is_same<T, int8_t>::value ||
|
||||
std::is_same<T, uint8_t>::value ||
|
||||
std::is_same<T, int16_t>::value ||
|
||||
std::is_same<T, uint16_t>::value> {};
|
||||
|
||||
// Type trait to check of given type is int32 or int64.
|
||||
template <typename T>
|
||||
struct is_int32_or_int64
|
||||
: public std::integral_constant<bool, std::is_same<T, int32_t>::value ||
|
||||
std::is_same<T, int64_t>::value> {
|
||||
};
|
||||
|
||||
} // namespace tflite
|
||||
|
||||
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_TYPES_H_
|
||||
|
||||
@@ -119,6 +119,7 @@ TfLiteStatus GetInputSafe(const TfLiteContext* context, const TfLiteNode* node,
|
||||
TfLiteTensor* GetVariableInput(TfLiteContext* context, const TfLiteNode* node,
|
||||
int index) {
|
||||
TfLiteTensor* tensor = GetMutableInput(context, node, index);
|
||||
if (tensor == nullptr) return nullptr;
|
||||
return tensor->is_variable ? tensor : nullptr;
|
||||
}
|
||||
|
||||
@@ -197,7 +198,7 @@ TfLiteStatus PopulateConvolutionQuantizationParams(
|
||||
const TfLiteTensor* filter, const TfLiteTensor* bias, TfLiteTensor* output,
|
||||
const TfLiteFusedActivation& activation, int32_t* multiplier, int* shift,
|
||||
int32_t* output_activation_min, int32_t* output_activation_max,
|
||||
int32_t* per_channel_multiplier, int* per_channel_shift) {
|
||||
int32_t* per_channel_multiplier, int32_t* per_channel_shift) {
|
||||
const auto* affine_quantization =
|
||||
reinterpret_cast<TfLiteAffineQuantization*>(filter->quantization.params);
|
||||
return PopulateConvolutionQuantizationParams(
|
||||
@@ -212,7 +213,8 @@ TfLiteStatus PopulateConvolutionQuantizationParams(
|
||||
const TfLiteTensor* filter, const TfLiteTensor* bias, TfLiteTensor* output,
|
||||
const TfLiteFusedActivation& activation, int32_t* multiplier, int* shift,
|
||||
int32_t* output_activation_min, int32_t* output_activation_max,
|
||||
int32_t* per_channel_multiplier, int* per_channel_shift, int num_channels) {
|
||||
int32_t* per_channel_multiplier, int32_t* per_channel_shift,
|
||||
int num_channels) {
|
||||
TF_LITE_ENSURE_EQ(context, input->quantization.type,
|
||||
kTfLiteAffineQuantization);
|
||||
TF_LITE_ENSURE_EQ(context, filter->quantization.type,
|
||||
@@ -333,30 +335,49 @@ TfLiteStatus GetQuantizedConvolutionMultipler(TfLiteContext* context,
|
||||
}
|
||||
|
||||
namespace {
|
||||
void CalculateActivationRangeQuantizedImpl(TfLiteFusedActivation activation,
|
||||
int32_t qmin, int32_t qmax,
|
||||
TfLiteTensor* output,
|
||||
int32_t* act_min, int32_t* act_max) {
|
||||
|
||||
inline TfLiteStatus Quantize(TfLiteContext* context, float scale,
|
||||
int32_t zero_point, float f, int32_t& q) {
|
||||
const float tmp = TfLiteRound(f / scale);
|
||||
const bool no_integer_overflow_from_quantization =
|
||||
(tmp >= static_cast<float>(std::numeric_limits<int32_t>::min()) &&
|
||||
tmp <= static_cast<float>(std::numeric_limits<int32_t>::max()));
|
||||
TF_LITE_ENSURE(context, no_integer_overflow_from_quantization);
|
||||
q = zero_point + static_cast<int32_t>(tmp);
|
||||
return kTfLiteOk;
|
||||
}
|
||||
|
||||
TfLiteStatus CalculateActivationRangeQuantizedImpl(
|
||||
TfLiteContext* context, TfLiteFusedActivation activation, int32_t qmin,
|
||||
int32_t qmax, TfLiteTensor* output, int32_t* act_min, int32_t* act_max) {
|
||||
const auto scale = output->params.scale;
|
||||
const auto zero_point = output->params.zero_point;
|
||||
|
||||
auto quantize = [scale, zero_point](float f) {
|
||||
return zero_point + static_cast<int32_t>(TfLiteRound(f / scale));
|
||||
};
|
||||
|
||||
int32_t tmp_q;
|
||||
if (activation == kTfLiteActRelu) {
|
||||
*act_min = std::max(qmin, quantize(0.0));
|
||||
TF_LITE_ENSURE_OK(context,
|
||||
Quantize(context, scale, zero_point, 0.0, tmp_q));
|
||||
*act_min = std::max(qmin, tmp_q);
|
||||
*act_max = qmax;
|
||||
} else if (activation == kTfLiteActRelu6) {
|
||||
*act_min = std::max(qmin, quantize(0.0));
|
||||
*act_max = std::min(qmax, quantize(6.0));
|
||||
TF_LITE_ENSURE_OK(context,
|
||||
Quantize(context, scale, zero_point, 0.0, tmp_q));
|
||||
*act_min = std::max(qmin, tmp_q);
|
||||
TF_LITE_ENSURE_OK(context,
|
||||
Quantize(context, scale, zero_point, 6.0, tmp_q));
|
||||
*act_max = std::min(qmax, tmp_q);
|
||||
} else if (activation == kTfLiteActReluN1To1) {
|
||||
*act_min = std::max(qmin, quantize(-1.0));
|
||||
*act_max = std::min(qmax, quantize(1.0));
|
||||
TF_LITE_ENSURE_OK(context,
|
||||
Quantize(context, scale, zero_point, -1.0, tmp_q));
|
||||
*act_min = std::max(qmin, tmp_q);
|
||||
TF_LITE_ENSURE_OK(context,
|
||||
Quantize(context, scale, zero_point, 1.0, tmp_q));
|
||||
*act_max = std::min(qmax, tmp_q);
|
||||
} else {
|
||||
*act_min = qmin;
|
||||
*act_max = qmax;
|
||||
}
|
||||
return kTfLiteOk;
|
||||
}
|
||||
} // namespace
|
||||
|
||||
@@ -380,9 +401,8 @@ TfLiteStatus CalculateActivationRangeQuantized(TfLiteContext* context,
|
||||
TF_LITE_ENSURE(context, false);
|
||||
}
|
||||
|
||||
CalculateActivationRangeQuantizedImpl(activation, qmin, qmax, output, act_min,
|
||||
act_max);
|
||||
return kTfLiteOk;
|
||||
return CalculateActivationRangeQuantizedImpl(context, activation, qmin, qmax,
|
||||
output, act_min, act_max);
|
||||
}
|
||||
|
||||
bool HaveSameShapes(const TfLiteTensor* input1, const TfLiteTensor* input2) {
|
||||
@@ -412,18 +432,15 @@ TfLiteStatus CalculateShapeForBroadcast(TfLiteContext* context,
|
||||
const TfLiteTensor* input1,
|
||||
const TfLiteTensor* input2,
|
||||
TfLiteIntArray** output_shape) {
|
||||
int dims1 = NumDimensions(input1);
|
||||
int dims2 = NumDimensions(input2);
|
||||
int out_dims = std::max(dims1, dims2);
|
||||
if (NumElements(input1) == 0) {
|
||||
*output_shape = TfLiteIntArrayCopy(input1->dims);
|
||||
return kTfLiteOk;
|
||||
}
|
||||
const int dims1 = NumDimensions(input1);
|
||||
const int dims2 = NumDimensions(input2);
|
||||
const int out_dims = std::max(dims1, dims2);
|
||||
|
||||
std::unique_ptr<TfLiteIntArray, void (*)(TfLiteIntArray*)> shape(
|
||||
TfLiteIntArrayCreate(out_dims), TfLiteIntArrayFree);
|
||||
for (int i = 0; i < out_dims; ++i) {
|
||||
int d1 = i >= dims1 ? 1 : SizeOfDimension(input1, dims1 - i - 1);
|
||||
int d2 = i >= dims2 ? 1 : SizeOfDimension(input2, dims2 - i - 1);
|
||||
const int d1 = i >= dims1 ? 1 : SizeOfDimension(input1, dims1 - i - 1);
|
||||
const int d2 = i >= dims2 ? 1 : SizeOfDimension(input2, dims2 - i - 1);
|
||||
if (!(d1 == d2 || d1 == 1 || d2 == 1)) {
|
||||
context->ReportError(context,
|
||||
"Given shapes, %s and %s, are not broadcastable.",
|
||||
@@ -431,7 +448,12 @@ TfLiteStatus CalculateShapeForBroadcast(TfLiteContext* context,
|
||||
GetShapeDebugString(input2->dims).c_str());
|
||||
return kTfLiteError;
|
||||
}
|
||||
shape->data[out_dims - i - 1] = std::max(d1, d2);
|
||||
|
||||
if (d1 == 0 || d2 == 0) {
|
||||
shape->data[out_dims - i - 1] = 0;
|
||||
} else {
|
||||
shape->data[out_dims - i - 1] = std::max(d1, d2);
|
||||
}
|
||||
}
|
||||
*output_shape = shape.release();
|
||||
return kTfLiteOk;
|
||||
@@ -442,17 +464,20 @@ TfLiteStatus CalculateShapeForBroadcast(TfLiteContext* context,
|
||||
const TfLiteTensor* input2,
|
||||
const TfLiteTensor* input3,
|
||||
TfLiteIntArray** output_shape) {
|
||||
int dims1 = NumDimensions(input1);
|
||||
int dims2 = NumDimensions(input2);
|
||||
int dims3 = NumDimensions(input3);
|
||||
int out_dims = std::max(std::max(dims1, dims2), dims3);
|
||||
const int dims1 = NumDimensions(input1);
|
||||
const int dims2 = NumDimensions(input2);
|
||||
const int dims3 = NumDimensions(input3);
|
||||
const int out_dims = std::max(std::max(dims1, dims2), dims3);
|
||||
std::unique_ptr<TfLiteIntArray, void (*)(TfLiteIntArray*)> shape(
|
||||
TfLiteIntArrayCreate(out_dims), TfLiteIntArrayFree);
|
||||
for (int i = 0; i < out_dims; ++i) {
|
||||
int d1 = i >= dims1 ? 1 : SizeOfDimension(input1, dims1 - i - 1);
|
||||
int d2 = i >= dims2 ? 1 : SizeOfDimension(input2, dims2 - i - 1);
|
||||
int d3 = i >= dims3 ? 1 : SizeOfDimension(input3, dims3 - i - 1);
|
||||
const int d1 = i >= dims1 ? 1 : SizeOfDimension(input1, dims1 - i - 1);
|
||||
const int d2 = i >= dims2 ? 1 : SizeOfDimension(input2, dims2 - i - 1);
|
||||
const int d3 = i >= dims3 ? 1 : SizeOfDimension(input3, dims3 - i - 1);
|
||||
const int min_value = std::min(std::min(d1, d2), d3);
|
||||
int max_value = std::max(std::max(d1, d2), d3);
|
||||
// If one dimention is 0, others must be 0 or 1.
|
||||
if (min_value == 0) max_value = 0;
|
||||
if (!(d1 == 1 || d1 == max_value) || !(d2 == 1 || d2 == max_value) ||
|
||||
!(d3 == 1 || d3 == max_value)) {
|
||||
context->ReportError(
|
||||
@@ -473,42 +498,42 @@ TfLiteStatus CalculateShapeForBroadcast(TfLiteContext* context,
|
||||
int TfLiteTypeGetSize(TfLiteType type) {
|
||||
switch (type) {
|
||||
case kTfLiteUInt8:
|
||||
TF_LITE_ASSERT_EQ(sizeof(uint8_t), 1);
|
||||
static_assert(sizeof(uint8_t) == 1, "");
|
||||
return 1;
|
||||
case kTfLiteInt8:
|
||||
TF_LITE_ASSERT_EQ(sizeof(int8_t), 1);
|
||||
static_assert(sizeof(int8_t) == 1, "");
|
||||
return 1;
|
||||
case kTfLiteBool:
|
||||
return sizeof(bool);
|
||||
case kTfLiteInt16:
|
||||
TF_LITE_ASSERT_EQ(sizeof(int16_t), 2);
|
||||
static_assert(sizeof(int16_t) == 2, "");
|
||||
return 2;
|
||||
case kTfLiteFloat16:
|
||||
TF_LITE_ASSERT_EQ(sizeof(int16_t), 2);
|
||||
static_assert(sizeof(int16_t) == 2, "");
|
||||
return 2;
|
||||
case kTfLiteFloat32:
|
||||
TF_LITE_ASSERT_EQ(sizeof(float), 4);
|
||||
static_assert(sizeof(float) == 4, "");
|
||||
return 4;
|
||||
case kTfLiteInt32:
|
||||
TF_LITE_ASSERT_EQ(sizeof(int32_t), 4);
|
||||
static_assert(sizeof(int32_t) == 4, "");
|
||||
return 4;
|
||||
case kTfLiteUInt32:
|
||||
TF_LITE_ASSERT_EQ(sizeof(uint32_t), 4);
|
||||
static_assert(sizeof(uint32_t) == 4, "");
|
||||
return 4;
|
||||
case kTfLiteInt64:
|
||||
TF_LITE_ASSERT_EQ(sizeof(int64_t), 8);
|
||||
static_assert(sizeof(int64_t) == 8, "");
|
||||
return 8;
|
||||
case kTfLiteUInt64:
|
||||
TF_LITE_ASSERT_EQ(sizeof(uint64_t), 8);
|
||||
static_assert(sizeof(uint64_t) == 8, "");
|
||||
return 8;
|
||||
case kTfLiteFloat64:
|
||||
TF_LITE_ASSERT_EQ(sizeof(double), 8);
|
||||
static_assert(sizeof(double) == 8, "");
|
||||
return 8;
|
||||
case kTfLiteComplex64:
|
||||
TF_LITE_ASSERT_EQ(sizeof(std::complex<float>), 8);
|
||||
static_assert(sizeof(std::complex<float>) == 8, "");
|
||||
return 8;
|
||||
case kTfLiteComplex128:
|
||||
TF_LITE_ASSERT_EQ(sizeof(std::complex<double>), 16);
|
||||
static_assert(sizeof(std::complex<double>) == 16, "");
|
||||
return 16;
|
||||
default:
|
||||
return 0;
|
||||
|
||||
@@ -214,14 +214,15 @@ TfLiteStatus PopulateConvolutionQuantizationParams(
|
||||
const TfLiteTensor* filter, const TfLiteTensor* bias, TfLiteTensor* output,
|
||||
const TfLiteFusedActivation& activation, int32_t* multiplier, int* shift,
|
||||
int32_t* output_activation_min, int32_t* output_activation_max,
|
||||
int32_t* per_channel_multiplier, int* per_channel_shift);
|
||||
int32_t* per_channel_multiplier, int32_t* per_channel_shift);
|
||||
|
||||
TfLiteStatus PopulateConvolutionQuantizationParams(
|
||||
TfLiteContext* context, const TfLiteTensor* input,
|
||||
const TfLiteTensor* filter, const TfLiteTensor* bias, TfLiteTensor* output,
|
||||
const TfLiteFusedActivation& activation, int32_t* multiplier, int* shift,
|
||||
int32_t* output_activation_min, int32_t* output_activation_max,
|
||||
int32_t* per_channel_multiplier, int* per_channel_shift, int num_channels);
|
||||
int32_t* per_channel_multiplier, int32_t* per_channel_shift,
|
||||
int num_channels);
|
||||
|
||||
// Calculates the multiplication factor for a quantized convolution (or
|
||||
// quantized depthwise convolution) involving the given tensors. Returns an
|
||||
|
||||
@@ -15,69 +15,24 @@ limitations under the License.
|
||||
#ifndef TENSORFLOW_LITE_KERNELS_OP_MACROS_H_
|
||||
#define TENSORFLOW_LITE_KERNELS_OP_MACROS_H_
|
||||
|
||||
// If we're on a platform without standard IO functions, fall back to a
|
||||
// non-portable function.
|
||||
#ifdef TF_LITE_MCU_DEBUG_LOG
|
||||
|
||||
#include "tensorflow/lite/micro/debug_log.h"
|
||||
|
||||
#define DEBUG_LOG(x) \
|
||||
do { \
|
||||
DebugLog(x); \
|
||||
} while (0)
|
||||
|
||||
inline void InfiniteLoop() {
|
||||
DEBUG_LOG("HALTED\n");
|
||||
#if !defined(TF_LITE_MCU_DEBUG_LOG)
|
||||
#include <cstdlib>
|
||||
#define TFLITE_ABORT abort()
|
||||
#else
|
||||
inline void AbortImpl() {
|
||||
DebugLog("HALTED\n");
|
||||
while (1) {
|
||||
}
|
||||
}
|
||||
#define TFLITE_ABORT AbortImpl();
|
||||
#endif
|
||||
|
||||
#define TFLITE_ABORT InfiniteLoop();
|
||||
|
||||
#else // TF_LITE_MCU_DEBUG_LOG
|
||||
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
|
||||
#define DEBUG_LOG(x) \
|
||||
do { \
|
||||
fprintf(stderr, "%s", (x)); \
|
||||
} while (0)
|
||||
|
||||
// Report Error for unsupported type by op 'op_name' and returns kTfLiteError.
|
||||
#define TF_LITE_UNSUPPORTED_TYPE(context, type, op_name) \
|
||||
do { \
|
||||
TF_LITE_KERNEL_LOG((context), "%s:%d Type %s is unsupported by op %s.", \
|
||||
__FILE__, __LINE__, TfLiteTypeGetName(type), \
|
||||
(op_name)); \
|
||||
return kTfLiteError; \
|
||||
} while (0)
|
||||
|
||||
#define TFLITE_ABORT abort()
|
||||
|
||||
#endif // TF_LITE_MCU_DEBUG_LOG
|
||||
|
||||
#if defined(NDEBUG) || defined(ARDUINO)
|
||||
#if defined(NDEBUG)
|
||||
#define TFLITE_ASSERT_FALSE (static_cast<void>(0))
|
||||
#else
|
||||
#define TFLITE_ASSERT_FALSE TFLITE_ABORT
|
||||
#endif
|
||||
|
||||
#define TF_LITE_FATAL(msg) \
|
||||
do { \
|
||||
DEBUG_LOG(msg); \
|
||||
DEBUG_LOG("\nFATAL\n"); \
|
||||
TFLITE_ABORT; \
|
||||
} while (0)
|
||||
|
||||
#define TF_LITE_ASSERT(x) \
|
||||
do { \
|
||||
if (!(x)) TF_LITE_FATAL(#x); \
|
||||
} while (0)
|
||||
|
||||
#define TF_LITE_ASSERT_EQ(x, y) \
|
||||
do { \
|
||||
if ((x) != (y)) TF_LITE_FATAL(#x " didn't equal " #y); \
|
||||
} while (0)
|
||||
|
||||
#endif // TENSORFLOW_LITE_KERNELS_OP_MACROS_H_
|
||||
|
||||
@@ -20,7 +20,6 @@ limitations under the License.
|
||||
|
||||
namespace tflite {
|
||||
|
||||
// TODO(renjieliu): Migrate others to use ComputePaddingWithLeftover.
|
||||
inline int ComputePadding(int stride, int dilation_rate, int in_size,
|
||||
int filter_size, int out_size) {
|
||||
int effective_filter_size = (filter_size - 1) * dilation_rate + 1;
|
||||
@@ -45,6 +44,11 @@ inline int ComputePaddingWithOffset(int stride, int dilation_rate, int in_size,
|
||||
inline int ComputeOutSize(TfLitePadding padding, int image_size,
|
||||
int filter_size, int stride, int dilation_rate = 1) {
|
||||
int effective_filter_size = (filter_size - 1) * dilation_rate + 1;
|
||||
|
||||
// TODO(b/186448822): This uses 0 since the function has no other way to
|
||||
// report error case
|
||||
if (stride == 0) return 0;
|
||||
|
||||
switch (padding) {
|
||||
case kTfLitePaddingSame:
|
||||
return (image_size + stride - 1) / stride;
|
||||
|
||||
@@ -32,14 +32,18 @@ AllOpsResolver::AllOpsResolver() {
|
||||
AddConcatenation();
|
||||
AddConv2D();
|
||||
AddCos();
|
||||
AddCumSum();
|
||||
AddDepthToSpace();
|
||||
AddDepthwiseConv2D();
|
||||
AddDequantize();
|
||||
AddDetectionPostprocess();
|
||||
AddDiv();
|
||||
AddElu();
|
||||
AddEqual();
|
||||
AddEthosU();
|
||||
AddExpandDims();
|
||||
AddFloor();
|
||||
AddFloorDiv();
|
||||
AddFloorMod();
|
||||
AddFullyConnected();
|
||||
AddGreater();
|
||||
AddGreaterEqual();
|
||||
@@ -70,6 +74,7 @@ AllOpsResolver::AllOpsResolver() {
|
||||
AddRelu();
|
||||
AddRelu6();
|
||||
AddReshape();
|
||||
AddResizeBilinear();
|
||||
AddResizeNearestNeighbor();
|
||||
AddRound();
|
||||
AddRsqrt();
|
||||
@@ -77,6 +82,7 @@ AllOpsResolver::AllOpsResolver() {
|
||||
AddSin();
|
||||
AddSoftmax();
|
||||
AddSpaceToBatchNd();
|
||||
AddSpaceToDepth();
|
||||
AddSplit();
|
||||
AddSplitV();
|
||||
AddSqrt();
|
||||
@@ -87,6 +93,7 @@ AllOpsResolver::AllOpsResolver() {
|
||||
AddSvdf();
|
||||
AddTanh();
|
||||
AddTransposeConv();
|
||||
AddTranspose();
|
||||
AddUnpack();
|
||||
}
|
||||
|
||||
|
||||
@@ -0,0 +1,64 @@
|
||||
/* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
|
||||
#include "tensorflow/lite/micro/flatbuffer_utils.h"
|
||||
|
||||
namespace tflite {
|
||||
|
||||
FlexbufferWrapper::FlexbufferWrapper(const uint8_t* buffer, size_t size)
|
||||
: flexbuffers::Vector(flexbuffers::GetRoot(buffer, size).AsVector()) {}
|
||||
|
||||
int64_t FlexbufferWrapper::ElementAsInt64(size_t i) const {
|
||||
const uint8_t* elem = data_ + i * byte_width_;
|
||||
return ::flexbuffers::ReadInt64(elem, byte_width_);
|
||||
}
|
||||
|
||||
uint64_t FlexbufferWrapper::ElementAsUInt64(size_t i) const {
|
||||
const uint8_t* elem = data_ + i * byte_width_;
|
||||
return ::flexbuffers::ReadUInt64(elem, byte_width_);
|
||||
}
|
||||
|
||||
int32_t FlexbufferWrapper::ElementAsInt32(size_t i) const {
|
||||
return static_cast<int32_t>(ElementAsInt64(i));
|
||||
}
|
||||
|
||||
bool FlexbufferWrapper::ElementAsBool(size_t i) const {
|
||||
return static_cast<bool>(ElementAsUInt64(i));
|
||||
}
|
||||
|
||||
double FlexbufferWrapper::ElementAsDouble(size_t i) const {
|
||||
const uint8_t* elem = data_ + i * byte_width_;
|
||||
return ::flexbuffers::ReadDouble(elem, byte_width_);
|
||||
}
|
||||
|
||||
float FlexbufferWrapper::ElementAsFloat(size_t i) const {
|
||||
return static_cast<float>(FlexbufferWrapper::ElementAsDouble(i));
|
||||
}
|
||||
|
||||
// TODO(b/192589496): Ops must always be there. Remove this function when fixed
|
||||
uint32_t NumSubgraphOperators(const SubGraph* subgraph) {
|
||||
if (subgraph->operators() != nullptr) {
|
||||
return subgraph->operators()->size();
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
// TODO(b/192589496): Ops must always be there. Remove this function when fixed
|
||||
uint32_t NumSubgraphOperators(const Model* model, int subgraph_idx) {
|
||||
const SubGraph* subgraph = model->subgraphs()->Get(subgraph_idx);
|
||||
return NumSubgraphOperators(subgraph);
|
||||
}
|
||||
|
||||
} // namespace tflite
|
||||
@@ -0,0 +1,56 @@
|
||||
/* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
|
||||
#ifndef THIRD_PARTY_TFLITE_MICRO_TENSORFLOW_LITE_MICRO_FLATBUFFER_UTILS_H_
|
||||
#define THIRD_PARTY_TFLITE_MICRO_TENSORFLOW_LITE_MICRO_FLATBUFFER_UTILS_H_
|
||||
|
||||
#include "flatbuffers/flatbuffers.h"
|
||||
#include "flatbuffers/flexbuffers.h"
|
||||
#include "tensorflow/lite/schema/schema_generated.h"
|
||||
|
||||
namespace tflite {
|
||||
// Kernels use flexbuffers::Map to pack their init parameters in a tflite file,
|
||||
// with the parameter names as map keys and the parameter values as the
|
||||
// corresponding map values.
|
||||
// Accessing the map values using the flexbuffers:Map class is inline heavy,
|
||||
// which can cause the code size to bloat beyond what's reasonable for a micro
|
||||
// application. Use this class instead, when possible.
|
||||
// FlexbufferWrapper takes advantage of the following properties of
|
||||
// flexbuffers::Map:
|
||||
// 1. It can be viewed as a flexbuffers::Vector of the values.
|
||||
// 2. The values in the vector are ordered alphabetically by their keys.
|
||||
// 3. All integer and Boolean values are stored as 64-bit numbers.
|
||||
// 4. All floating point values are stored as double precision numbers.
|
||||
// The properties are mentioned in the flexbuffers docs, but we rely on
|
||||
// a unit test to catch design changes.
|
||||
class FlexbufferWrapper : public flexbuffers::Vector {
|
||||
public:
|
||||
// Construct with a serialized flexbuffer 'buffer' of 'size' bytes
|
||||
explicit FlexbufferWrapper(const uint8_t* buffer, size_t size);
|
||||
int64_t ElementAsInt64(size_t i) const;
|
||||
uint64_t ElementAsUInt64(size_t i) const;
|
||||
int32_t ElementAsInt32(size_t i) const;
|
||||
bool ElementAsBool(size_t i) const;
|
||||
double ElementAsDouble(size_t i) const;
|
||||
float ElementAsFloat(size_t i) const;
|
||||
};
|
||||
|
||||
// Return the number of operators in a subgraph tflite
|
||||
uint32_t NumSubgraphOperators(const SubGraph* subgraph);
|
||||
uint32_t NumSubgraphOperators(const Model* model, int subgraph_idx);
|
||||
|
||||
} // namespace tflite
|
||||
|
||||
#endif // THIRD_PARTY_TFLITE_MICRO_TENSORFLOW_LITE_MICRO_FLATBUFFER_UTILS_H_
|
||||
@@ -1,4 +1,4 @@
|
||||
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
|
||||
/* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@@ -13,6 +13,8 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
|
||||
#include "tensorflow/lite/micro/kernels/activations.h"
|
||||
|
||||
#include "tensorflow/lite/c/builtin_op_data.h"
|
||||
#include "tensorflow/lite/c/common.h"
|
||||
#include "tensorflow/lite/kernels/internal/common.h"
|
||||
@@ -25,141 +27,21 @@ limitations under the License.
|
||||
#include "tensorflow/lite/micro/micro_utils.h"
|
||||
|
||||
namespace tflite {
|
||||
namespace ops {
|
||||
namespace micro {
|
||||
namespace activations {
|
||||
namespace {
|
||||
|
||||
struct ReluOpData {
|
||||
ReluParams params;
|
||||
};
|
||||
|
||||
struct Relu6OpData {
|
||||
int8_t six_int8;
|
||||
int8_t zero_int8;
|
||||
uint8_t six_uint8;
|
||||
uint8_t zero_uint8;
|
||||
};
|
||||
|
||||
} // namespace
|
||||
|
||||
constexpr int kInputTensor = 0;
|
||||
constexpr int kOutputTensor = 0;
|
||||
|
||||
template <typename T>
|
||||
inline void ReluQuantized(const ReluOpData& data,
|
||||
const RuntimeShape& input_shape,
|
||||
const RuntimeShape& output_shape, const T* input_data,
|
||||
T* output_data) {
|
||||
const int flat_size = MatchingFlatSize(input_shape, output_shape);
|
||||
for (int i = 0; i < flat_size; ++i) {
|
||||
const int32_t val = static_cast<int32_t>(input_data[i]);
|
||||
int32_t clamped =
|
||||
data.params.output_offset +
|
||||
MultiplyByQuantizedMultiplier(val - data.params.input_offset,
|
||||
data.params.output_multiplier,
|
||||
data.params.output_shift);
|
||||
clamped = std::max(data.params.quantized_activation_min, clamped);
|
||||
clamped = std::min(data.params.quantized_activation_max, clamped);
|
||||
output_data[i] = static_cast<T>(clamped);
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline void CalculateReluOpData(const TfLiteTensor* input, TfLiteTensor* output,
|
||||
ReluOpData* data) {
|
||||
float act_min = 0.0;
|
||||
float act_max = std::numeric_limits<float>::infinity();
|
||||
double real_multiplier =
|
||||
static_cast<double>(input->params.scale / output->params.scale);
|
||||
|
||||
const RuntimeShape input_shape = GetTensorShape(input);
|
||||
const RuntimeShape output_shape = GetTensorShape(output);
|
||||
|
||||
QuantizeMultiplier(real_multiplier, &data->params.output_multiplier,
|
||||
&data->params.output_shift);
|
||||
|
||||
data->params.quantized_activation_min = std::max(
|
||||
static_cast<int32_t>(std::numeric_limits<T>::min()),
|
||||
output->params.zero_point +
|
||||
static_cast<int32_t>(roundf(act_min / output->params.scale)));
|
||||
data->params.quantized_activation_max =
|
||||
act_max == std::numeric_limits<float>::infinity()
|
||||
? static_cast<int32_t>(std::numeric_limits<T>::max())
|
||||
: std::min(static_cast<int32_t>(std::numeric_limits<T>::max()),
|
||||
output->params.zero_point +
|
||||
static_cast<int32_t>(
|
||||
roundf(act_max / output->params.scale)));
|
||||
data->params.input_offset = input->params.zero_point;
|
||||
data->params.output_offset = output->params.zero_point;
|
||||
}
|
||||
|
||||
inline void ReluFloat(const RuntimeShape& input_shape, const float* input_data,
|
||||
const RuntimeShape& output_shape, float* output_data) {
|
||||
const int flat_size = MatchingFlatSize(input_shape, output_shape);
|
||||
for (int i = 0; i < flat_size; ++i) {
|
||||
const float val = input_data[i];
|
||||
const float lower = 0.0f;
|
||||
const float clamped = val < lower ? lower : val;
|
||||
output_data[i] = clamped;
|
||||
}
|
||||
}
|
||||
|
||||
inline void Relu6Float(const RuntimeShape& input_shape, const float* input_data,
|
||||
const RuntimeShape& output_shape, float* output_data) {
|
||||
const int flat_size = MatchingFlatSize(input_shape, output_shape);
|
||||
for (int i = 0; i < flat_size; ++i) {
|
||||
const float val = input_data[i];
|
||||
const float upper = 6.0f;
|
||||
const float lower = 0.0f;
|
||||
const float clamped = val > upper ? upper : val < lower ? lower : val;
|
||||
output_data[i] = clamped;
|
||||
}
|
||||
}
|
||||
|
||||
template <typename Q>
|
||||
inline void Relu6Quantized(Q lower, Q upper, const RuntimeShape& input_shape,
|
||||
const Q* input_data,
|
||||
const RuntimeShape& output_shape, Q* output_data) {
|
||||
const int flat_size = MatchingFlatSize(input_shape, output_shape);
|
||||
for (int i = 0; i < flat_size; ++i) {
|
||||
const Q val = input_data[i];
|
||||
const Q clamped = val > upper ? upper : val < lower ? lower : val;
|
||||
output_data[i] = clamped;
|
||||
}
|
||||
}
|
||||
|
||||
void* ReluInit(TfLiteContext* context, const char* buffer, size_t length) {
|
||||
TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr);
|
||||
return context->AllocatePersistentBuffer(context, sizeof(ReluOpData));
|
||||
}
|
||||
|
||||
TfLiteStatus ReluPrepare(TfLiteContext* context, TfLiteNode* node) {
|
||||
TFLITE_DCHECK(node->user_data != nullptr);
|
||||
ReluOpData* data = static_cast<ReluOpData*>(node->user_data);
|
||||
|
||||
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
|
||||
TF_LITE_ENSURE(context, input != nullptr);
|
||||
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
|
||||
TF_LITE_ENSURE(context, output != nullptr);
|
||||
|
||||
if (input->type == kTfLiteInt8) {
|
||||
CalculateReluOpData<int8_t>(input, output, data);
|
||||
} else if (input->type == kTfLiteUInt8) {
|
||||
CalculateReluOpData<uint8_t>(input, output, data);
|
||||
}
|
||||
|
||||
return kTfLiteOk;
|
||||
}
|
||||
|
||||
TfLiteStatus ReluEval(TfLiteContext* context, TfLiteNode* node) {
|
||||
TFLITE_DCHECK(node->user_data != nullptr);
|
||||
const ReluOpData& data = *(static_cast<const ReluOpData*>(node->user_data));
|
||||
|
||||
const TfLiteEvalTensor* input =
|
||||
tflite::micro::GetEvalInput(context, node, kInputTensor);
|
||||
tflite::micro::GetEvalInput(context, node, kActivationsInputTensor);
|
||||
TfLiteEvalTensor* output =
|
||||
tflite::micro::GetEvalOutput(context, node, kOutputTensor);
|
||||
tflite::micro::GetEvalOutput(context, node, kActivationsOutputTensor);
|
||||
|
||||
switch (input->type) {
|
||||
case kTfLiteFloat32: {
|
||||
@@ -171,19 +53,12 @@ TfLiteStatus ReluEval(TfLiteContext* context, TfLiteNode* node) {
|
||||
return kTfLiteOk;
|
||||
}
|
||||
case kTfLiteInt8: {
|
||||
ReluQuantized<int8_t>(data, tflite::micro::GetTensorShape(input),
|
||||
tflite::ReluQuantized(data, tflite::micro::GetTensorShape(input),
|
||||
tflite::micro::GetTensorShape(output),
|
||||
tflite::micro::GetTensorData<int8_t>(input),
|
||||
tflite::micro::GetTensorData<int8_t>(output));
|
||||
return kTfLiteOk;
|
||||
}
|
||||
case kTfLiteUInt8: {
|
||||
ReluQuantized<uint8_t>(data, tflite::micro::GetTensorShape(input),
|
||||
tflite::micro::GetTensorShape(output),
|
||||
tflite::micro::GetTensorData<uint8_t>(input),
|
||||
tflite::micro::GetTensorData<uint8_t>(output));
|
||||
return kTfLiteOk;
|
||||
}
|
||||
default: {
|
||||
TF_LITE_KERNEL_LOG(context, "Only float32 is supported currently, got %s",
|
||||
TfLiteTypeGetName(input->type));
|
||||
@@ -197,34 +72,14 @@ void* Relu6Init(TfLiteContext* context, const char* buffer, size_t length) {
|
||||
return context->AllocatePersistentBuffer(context, sizeof(Relu6OpData));
|
||||
}
|
||||
|
||||
TfLiteStatus Relu6Prepare(TfLiteContext* context, TfLiteNode* node) {
|
||||
TFLITE_DCHECK(node->user_data != nullptr);
|
||||
Relu6OpData* data = static_cast<Relu6OpData*>(node->user_data);
|
||||
|
||||
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
|
||||
TF_LITE_ENSURE(context, input != nullptr);
|
||||
|
||||
if (input->type == kTfLiteInt8) {
|
||||
data->six_int8 = FloatToQuantizedType<int8_t>(6.0f, input->params.scale,
|
||||
input->params.zero_point);
|
||||
data->zero_int8 = input->params.zero_point;
|
||||
} else if (input->type == kTfLiteUInt8) {
|
||||
data->six_uint8 = FloatToQuantizedType<uint8_t>(6.0f, input->params.scale,
|
||||
input->params.zero_point);
|
||||
data->zero_uint8 = input->params.zero_point;
|
||||
}
|
||||
|
||||
return kTfLiteOk;
|
||||
}
|
||||
|
||||
TfLiteStatus Relu6Eval(TfLiteContext* context, TfLiteNode* node) {
|
||||
TFLITE_DCHECK(node->user_data != nullptr);
|
||||
const Relu6OpData& data = *(static_cast<const Relu6OpData*>(node->user_data));
|
||||
|
||||
const TfLiteEvalTensor* input =
|
||||
tflite::micro::GetEvalInput(context, node, kInputTensor);
|
||||
tflite::micro::GetEvalInput(context, node, kActivationsInputTensor);
|
||||
TfLiteEvalTensor* output =
|
||||
tflite::micro::GetEvalOutput(context, node, kOutputTensor);
|
||||
tflite::micro::GetEvalOutput(context, node, kActivationsOutputTensor);
|
||||
|
||||
switch (input->type) {
|
||||
case kTfLiteFloat32: {
|
||||
@@ -236,19 +91,11 @@ TfLiteStatus Relu6Eval(TfLiteContext* context, TfLiteNode* node) {
|
||||
return kTfLiteOk;
|
||||
}
|
||||
case kTfLiteInt8: {
|
||||
Relu6Quantized<int8_t>(data.zero_int8, data.six_int8,
|
||||
tflite::micro::GetTensorShape(input),
|
||||
tflite::micro::GetTensorData<int8_t>(input),
|
||||
tflite::micro::GetTensorShape(output),
|
||||
tflite::micro::GetTensorData<int8_t>(output));
|
||||
return kTfLiteOk;
|
||||
}
|
||||
case kTfLiteUInt8: {
|
||||
Relu6Quantized<uint8_t>(data.zero_uint8, data.six_uint8,
|
||||
tflite::micro::GetTensorShape(input),
|
||||
tflite::micro::GetTensorData<uint8_t>(input),
|
||||
tflite::micro::GetTensorShape(output),
|
||||
tflite::micro::GetTensorData<uint8_t>(output));
|
||||
Relu6Quantized(data.zero_int8, data.six_int8,
|
||||
tflite::micro::GetTensorShape(input),
|
||||
tflite::micro::GetTensorData<int8_t>(input),
|
||||
tflite::micro::GetTensorShape(output),
|
||||
tflite::micro::GetTensorData<int8_t>(output));
|
||||
return kTfLiteOk;
|
||||
}
|
||||
default: {
|
||||
@@ -259,13 +106,13 @@ TfLiteStatus Relu6Eval(TfLiteContext* context, TfLiteNode* node) {
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace activations
|
||||
} // namespace
|
||||
|
||||
TfLiteRegistration Register_RELU() {
|
||||
return {/*init=*/activations::ReluInit,
|
||||
return {/*init=*/ReluInit,
|
||||
/*free=*/nullptr,
|
||||
/*prepare=*/activations::ReluPrepare,
|
||||
/*invoke=*/activations::ReluEval,
|
||||
/*prepare=*/ReluPrepare,
|
||||
/*invoke=*/ReluEval,
|
||||
/*profiling_string=*/nullptr,
|
||||
/*builtin_code=*/0,
|
||||
/*custom_name=*/nullptr,
|
||||
@@ -273,16 +120,14 @@ TfLiteRegistration Register_RELU() {
|
||||
}
|
||||
|
||||
TfLiteRegistration Register_RELU6() {
|
||||
return {/*init=*/activations::Relu6Init,
|
||||
return {/*init=*/Relu6Init,
|
||||
/*free=*/nullptr,
|
||||
/*prepare=*/activations::Relu6Prepare,
|
||||
/*invoke=*/activations::Relu6Eval,
|
||||
/*prepare=*/Relu6Prepare,
|
||||
/*invoke=*/Relu6Eval,
|
||||
/*profiling_string=*/nullptr,
|
||||
/*builtin_code=*/0,
|
||||
/*custom_name=*/nullptr,
|
||||
/*version=*/0};
|
||||
}
|
||||
|
||||
} // namespace micro
|
||||
} // namespace ops
|
||||
} // namespace tflite
|
||||
|
||||
@@ -0,0 +1,63 @@
|
||||
/* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
|
||||
#ifndef TENSORFLOW_LITE_MICRO_KERNELS_ACTIVATIONS_H_
|
||||
#define TENSORFLOW_LITE_MICRO_KERNELS_ACTIVATIONS_H_
|
||||
|
||||
#include <cstdint>
|
||||
|
||||
#include "tensorflow/lite/c/builtin_op_data.h"
|
||||
#include "tensorflow/lite/c/common.h"
|
||||
#include "tensorflow/lite/kernels/internal/types.h"
|
||||
|
||||
namespace tflite {
|
||||
|
||||
extern const int kActivationsInputTensor;
|
||||
extern const int kActivationsOutputTensor;
|
||||
|
||||
struct ReluOpData {
|
||||
ReluParams params;
|
||||
};
|
||||
|
||||
struct Relu6OpData {
|
||||
int8_t six_int8;
|
||||
int8_t zero_int8;
|
||||
};
|
||||
|
||||
void ReluQuantized(const ReluOpData& data, const RuntimeShape& input_shape,
|
||||
const RuntimeShape& output_shape, const int8_t* input_data,
|
||||
int8_t* output_data);
|
||||
|
||||
template <typename T>
|
||||
void CalculateReluOpData(const TfLiteTensor* input, TfLiteTensor* output,
|
||||
ReluOpData* data);
|
||||
|
||||
void ReluFloat(const RuntimeShape& input_shape, const float* input_data,
|
||||
const RuntimeShape& output_shape, float* output_data);
|
||||
|
||||
void Relu6Float(const RuntimeShape& input_shape, const float* input_data,
|
||||
const RuntimeShape& output_shape, float* output_data);
|
||||
|
||||
void Relu6Quantized(int8_t lower, int8_t upper, const RuntimeShape& input_shape,
|
||||
const int8_t* input_data, const RuntimeShape& output_shape,
|
||||
int8_t* output_data);
|
||||
|
||||
TfLiteStatus ReluPrepare(TfLiteContext* context, TfLiteNode* node);
|
||||
|
||||
TfLiteStatus Relu6Prepare(TfLiteContext* context, TfLiteNode* node);
|
||||
|
||||
} // namespace tflite
|
||||
|
||||
#endif // TENSORFLOW_LITE_MICRO_KERNELS_ACTIVATIONS_H_
|
||||
@@ -0,0 +1,148 @@
|
||||
/* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
|
||||
#include <algorithm>
|
||||
#include <cstdint>
|
||||
|
||||
#include "tensorflow/lite/c/builtin_op_data.h"
|
||||
#include "tensorflow/lite/c/common.h"
|
||||
#include "tensorflow/lite/kernels/internal/common.h"
|
||||
#include "tensorflow/lite/kernels/internal/quantization_util.h"
|
||||
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
|
||||
#include "tensorflow/lite/kernels/internal/types.h"
|
||||
#include "tensorflow/lite/kernels/kernel_util.h"
|
||||
#include "tensorflow/lite/kernels/op_macros.h"
|
||||
#include "tensorflow/lite/micro/kernels/activations.h"
|
||||
#include "tensorflow/lite/micro/kernels/kernel_util.h"
|
||||
#include "tensorflow/lite/micro/micro_utils.h"
|
||||
|
||||
namespace tflite {
|
||||
|
||||
const int kActivationsInputTensor = 0;
|
||||
const int kActivationsOutputTensor = 0;
|
||||
|
||||
void ReluQuantized(const ReluOpData& data, const RuntimeShape& input_shape,
|
||||
const RuntimeShape& output_shape, const int8_t* input_data,
|
||||
int8_t* output_data) {
|
||||
const int flat_size = MatchingFlatSize(input_shape, output_shape);
|
||||
for (int i = 0; i < flat_size; ++i) {
|
||||
const int32_t val = static_cast<int32_t>(input_data[i]);
|
||||
int32_t clamped =
|
||||
data.params.output_offset +
|
||||
MultiplyByQuantizedMultiplier(val - data.params.input_offset,
|
||||
data.params.output_multiplier,
|
||||
data.params.output_shift);
|
||||
clamped = std::max(data.params.quantized_activation_min, clamped);
|
||||
clamped = std::min(data.params.quantized_activation_max, clamped);
|
||||
output_data[i] = static_cast<int8_t>(clamped);
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void CalculateReluOpData(const TfLiteTensor* input, TfLiteTensor* output,
|
||||
ReluOpData* data) {
|
||||
float act_min = 0.0;
|
||||
float act_max = std::numeric_limits<float>::infinity();
|
||||
double real_multiplier =
|
||||
static_cast<double>(input->params.scale / output->params.scale);
|
||||
|
||||
const RuntimeShape input_shape = GetTensorShape(input);
|
||||
const RuntimeShape output_shape = GetTensorShape(output);
|
||||
|
||||
QuantizeMultiplier(real_multiplier, &data->params.output_multiplier,
|
||||
&data->params.output_shift);
|
||||
|
||||
data->params.quantized_activation_min = std::max(
|
||||
static_cast<int32_t>(std::numeric_limits<T>::min()),
|
||||
output->params.zero_point +
|
||||
static_cast<int32_t>(roundf(act_min / output->params.scale)));
|
||||
data->params.quantized_activation_max =
|
||||
act_max == std::numeric_limits<float>::infinity()
|
||||
? static_cast<int32_t>(std::numeric_limits<T>::max())
|
||||
: std::min(static_cast<int32_t>(std::numeric_limits<T>::max()),
|
||||
output->params.zero_point +
|
||||
static_cast<int32_t>(
|
||||
roundf(act_max / output->params.scale)));
|
||||
data->params.input_offset = input->params.zero_point;
|
||||
data->params.output_offset = output->params.zero_point;
|
||||
}
|
||||
|
||||
void ReluFloat(const RuntimeShape& input_shape, const float* input_data,
|
||||
const RuntimeShape& output_shape, float* output_data) {
|
||||
const int flat_size = MatchingFlatSize(input_shape, output_shape);
|
||||
for (int i = 0; i < flat_size; ++i) {
|
||||
const float val = input_data[i];
|
||||
const float lower = 0.0f;
|
||||
const float clamped = val < lower ? lower : val;
|
||||
output_data[i] = clamped;
|
||||
}
|
||||
}
|
||||
|
||||
void Relu6Float(const RuntimeShape& input_shape, const float* input_data,
|
||||
const RuntimeShape& output_shape, float* output_data) {
|
||||
const int flat_size = MatchingFlatSize(input_shape, output_shape);
|
||||
for (int i = 0; i < flat_size; ++i) {
|
||||
const float val = input_data[i];
|
||||
const float upper = 6.0f;
|
||||
const float lower = 0.0f;
|
||||
const float clamped = val > upper ? upper : val < lower ? lower : val;
|
||||
output_data[i] = clamped;
|
||||
}
|
||||
}
|
||||
|
||||
void Relu6Quantized(int8_t lower, int8_t upper, const RuntimeShape& input_shape,
|
||||
const int8_t* input_data, const RuntimeShape& output_shape,
|
||||
int8_t* output_data) {
|
||||
const int flat_size = MatchingFlatSize(input_shape, output_shape);
|
||||
for (int i = 0; i < flat_size; ++i) {
|
||||
const int8_t val = input_data[i];
|
||||
const int8_t clamped = val > upper ? upper : val < lower ? lower : val;
|
||||
output_data[i] = clamped;
|
||||
}
|
||||
}
|
||||
|
||||
TfLiteStatus ReluPrepare(TfLiteContext* context, TfLiteNode* node) {
|
||||
TFLITE_DCHECK(node->user_data != nullptr);
|
||||
ReluOpData* data = static_cast<ReluOpData*>(node->user_data);
|
||||
|
||||
const TfLiteTensor* input = GetInput(context, node, kActivationsInputTensor);
|
||||
TF_LITE_ENSURE(context, input != nullptr);
|
||||
TfLiteTensor* output = GetOutput(context, node, kActivationsOutputTensor);
|
||||
TF_LITE_ENSURE(context, output != nullptr);
|
||||
|
||||
if (input->type == kTfLiteInt8) {
|
||||
CalculateReluOpData<int8_t>(input, output, data);
|
||||
}
|
||||
|
||||
return kTfLiteOk;
|
||||
}
|
||||
|
||||
TfLiteStatus Relu6Prepare(TfLiteContext* context, TfLiteNode* node) {
|
||||
TFLITE_DCHECK(node->user_data != nullptr);
|
||||
Relu6OpData* data = static_cast<Relu6OpData*>(node->user_data);
|
||||
|
||||
const TfLiteTensor* input = GetInput(context, node, kActivationsInputTensor);
|
||||
TF_LITE_ENSURE(context, input != nullptr);
|
||||
|
||||
if (input->type == kTfLiteInt8) {
|
||||
data->six_int8 = FloatToQuantizedType<int8_t>(6.0f, input->params.scale,
|
||||
input->params.zero_point);
|
||||
data->zero_int8 = input->params.zero_point;
|
||||
}
|
||||
|
||||
return kTfLiteOk;
|
||||
}
|
||||
|
||||
} // namespace tflite
|
||||
@@ -66,12 +66,12 @@ TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteAddParams* params,
|
||||
OpData* data) {
|
||||
data->requires_broadcast = !HaveSameShapes(input1, input2);
|
||||
|
||||
if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8) {
|
||||
if (output->type == kTfLiteInt8 || output->type == kTfLiteInt16) {
|
||||
// 8bit -> 8bit general quantized path, with general rescalings
|
||||
data->input1_offset = -input1->params.zero_point;
|
||||
data->input2_offset = -input2->params.zero_point;
|
||||
data->output_offset = output->params.zero_point;
|
||||
data->left_shift = 20;
|
||||
data->left_shift = (output->type == kTfLiteInt16) ? 15 : 20;
|
||||
const double twice_max_input_scale =
|
||||
2 * static_cast<double>(
|
||||
std::max(input1->params.scale, input2->params.scale));
|
||||
@@ -133,24 +133,25 @@ TfLiteStatus EvalAddQuantized(TfLiteContext* context, TfLiteNode* node,
|
||||
const TfLiteEvalTensor* input1,
|
||||
const TfLiteEvalTensor* input2,
|
||||
TfLiteEvalTensor* output) {
|
||||
if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8) {
|
||||
tflite::ArithmeticParams op_params;
|
||||
op_params.left_shift = data->left_shift;
|
||||
op_params.input1_offset = data->input1_offset;
|
||||
op_params.input1_multiplier = data->input1_multiplier;
|
||||
op_params.input1_shift = data->input1_shift;
|
||||
op_params.input2_offset = data->input2_offset;
|
||||
op_params.input2_multiplier = data->input2_multiplier;
|
||||
op_params.input2_shift = data->input2_shift;
|
||||
op_params.output_offset = data->output_offset;
|
||||
op_params.output_multiplier = data->output_multiplier;
|
||||
op_params.output_shift = data->output_shift;
|
||||
SetActivationParams(data->output_activation_min,
|
||||
data->output_activation_max, &op_params);
|
||||
bool need_broadcast = reference_ops::ProcessBroadcastShapes(
|
||||
tflite::micro::GetTensorShape(input1),
|
||||
tflite::micro::GetTensorShape(input2), &op_params);
|
||||
if (output->type == kTfLiteInt8) {
|
||||
tflite::ArithmeticParams op_params;
|
||||
op_params.left_shift = data->left_shift;
|
||||
op_params.input1_offset = data->input1_offset;
|
||||
op_params.input1_multiplier = data->input1_multiplier;
|
||||
op_params.input1_shift = data->input1_shift;
|
||||
op_params.input2_offset = data->input2_offset;
|
||||
op_params.input2_multiplier = data->input2_multiplier;
|
||||
op_params.input2_shift = data->input2_shift;
|
||||
op_params.output_offset = data->output_offset;
|
||||
op_params.output_multiplier = data->output_multiplier;
|
||||
op_params.output_shift = data->output_shift;
|
||||
SetActivationParams(data->output_activation_min, data->output_activation_max,
|
||||
&op_params);
|
||||
bool need_broadcast = reference_ops::ProcessBroadcastShapes(
|
||||
tflite::micro::GetTensorShape(input1),
|
||||
tflite::micro::GetTensorShape(input2), &op_params);
|
||||
|
||||
switch (output->type) {
|
||||
case kTfLiteInt8: {
|
||||
if (need_broadcast) {
|
||||
reference_integer_ops::BroadcastAdd4DSlow(
|
||||
op_params, tflite::micro::GetTensorShape(input1),
|
||||
@@ -168,24 +169,32 @@ TfLiteStatus EvalAddQuantized(TfLiteContext* context, TfLiteNode* node,
|
||||
tflite::micro::GetTensorShape(output),
|
||||
tflite::micro::GetTensorData<int8_t>(output));
|
||||
}
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
case kTfLiteInt16: {
|
||||
if (need_broadcast) {
|
||||
reference_ops::BroadcastAdd4DSlow(
|
||||
op_params, tflite::micro::GetTensorShape(input1),
|
||||
tflite::micro::GetTensorData<uint8_t>(input1),
|
||||
tflite::micro::GetTensorData<int16_t>(input1),
|
||||
tflite::micro::GetTensorShape(input2),
|
||||
tflite::micro::GetTensorData<uint8_t>(input2),
|
||||
tflite::micro::GetTensorData<int16_t>(input2),
|
||||
tflite::micro::GetTensorShape(output),
|
||||
tflite::micro::GetTensorData<uint8_t>(output));
|
||||
tflite::micro::GetTensorData<int16_t>(output));
|
||||
} else {
|
||||
reference_ops::Add(op_params, tflite::micro::GetTensorShape(input1),
|
||||
tflite::micro::GetTensorData<uint8_t>(input1),
|
||||
tflite::micro::GetTensorData<int16_t>(input1),
|
||||
tflite::micro::GetTensorShape(input2),
|
||||
tflite::micro::GetTensorData<uint8_t>(input2),
|
||||
tflite::micro::GetTensorData<int16_t>(input2),
|
||||
tflite::micro::GetTensorShape(output),
|
||||
tflite::micro::GetTensorData<uint8_t>(output));
|
||||
tflite::micro::GetTensorData<int16_t>(output),
|
||||
false);
|
||||
}
|
||||
break;
|
||||
}
|
||||
default:
|
||||
TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.",
|
||||
TfLiteTypeGetName(output->type), output->type);
|
||||
return kTfLiteError;
|
||||
}
|
||||
|
||||
return kTfLiteOk;
|
||||
@@ -231,7 +240,7 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
|
||||
|
||||
if (output->type == kTfLiteFloat32) {
|
||||
EvalAdd(context, node, params, data, input1, input2, output);
|
||||
} else if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8) {
|
||||
} else if (output->type == kTfLiteInt8 || output->type == kTfLiteInt16) {
|
||||
TF_LITE_ENSURE_OK(context, EvalAddQuantized(context, node, params, data,
|
||||
input1, input2, output));
|
||||
} else {
|
||||
|
||||
@@ -18,6 +18,7 @@ limitations under the License.
|
||||
#include <cstdint>
|
||||
|
||||
#include "tensorflow/lite/c/common.h"
|
||||
#include "tensorflow/lite/kernels/internal/quantization_util.h"
|
||||
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
|
||||
#include "tensorflow/lite/kernels/kernel_util.h"
|
||||
#include "tensorflow/lite/micro/kernels/kernel_util.h"
|
||||
@@ -28,6 +29,22 @@ namespace {
|
||||
constexpr int kInputTensor0 = 0;
|
||||
constexpr int kOutputTensor = 0;
|
||||
|
||||
constexpr int kAddNIntegerShift = 20;
|
||||
|
||||
// only used with INT8 tensors
|
||||
struct OpData {
|
||||
int32_t output_activation_min;
|
||||
int32_t output_activation_max;
|
||||
int32_t input_offset;
|
||||
int32_t output_offset;
|
||||
int32_t input_multiplier;
|
||||
int32_t output_multiplier;
|
||||
int input_shift;
|
||||
int output_shift;
|
||||
int left_shift;
|
||||
int scratch_index;
|
||||
};
|
||||
|
||||
TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteNode* node) {
|
||||
int num_inputs = NumInputs(node);
|
||||
TF_LITE_ENSURE(context, num_inputs >= 2);
|
||||
@@ -47,19 +64,61 @@ TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteNode* node) {
|
||||
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, i, &input));
|
||||
TF_LITE_ENSURE(context, HaveSameShapes(input_tensor_first, input));
|
||||
TF_LITE_ENSURE_TYPES_EQ(context, input_tensor_first->type, input->type);
|
||||
|
||||
// Check that all INT8 input tensors have the same zero-point and scale.
|
||||
if (input_tensor_first->type == kTfLiteInt8) {
|
||||
TF_LITE_ENSURE(context, input_tensor_first->params.zero_point ==
|
||||
input->params.zero_point);
|
||||
TF_LITE_ENSURE(context,
|
||||
input_tensor_first->params.scale == input->params.scale);
|
||||
}
|
||||
}
|
||||
|
||||
// Allocate scratch buffer space for pointer to each tensor's data
|
||||
// and store the scratch buffer index in the node's user_data
|
||||
if (output->type == kTfLiteFloat32) {
|
||||
// Allocate scratch buffer space for pointer to each tensor's data
|
||||
// and store the scratch buffer index in the node's user_data
|
||||
int scratch_index;
|
||||
size_t scratch_size = sizeof(float*) * num_inputs;
|
||||
TF_LITE_ENSURE_OK(context, context->RequestScratchBufferInArena(
|
||||
context, scratch_size, &scratch_index));
|
||||
node->user_data =
|
||||
reinterpret_cast<decltype(node->user_data)>(scratch_index);
|
||||
} else if (output->type == kTfLiteInt8) {
|
||||
node->user_data =
|
||||
context->AllocatePersistentBuffer(context, sizeof(OpData));
|
||||
OpData* data = static_cast<OpData*>(node->user_data);
|
||||
|
||||
// Allocate scratch buffer space for pointer to each tensor's data
|
||||
// and store the scratch buffer index in OpData
|
||||
size_t scratch_size = sizeof(int8_t*) * num_inputs;
|
||||
TF_LITE_ENSURE_OK(
|
||||
context, context->RequestScratchBufferInArena(context, scratch_size,
|
||||
&data->scratch_index));
|
||||
|
||||
// 8bit -> 8bit general quantized path, with general rescalings
|
||||
data->input_offset = -input_tensor_first->params.zero_point;
|
||||
data->output_offset = output->params.zero_point;
|
||||
data->left_shift = kAddNIntegerShift;
|
||||
const double twice_max_input_scale =
|
||||
2 * static_cast<double>(input_tensor_first->params.scale);
|
||||
const double real_input_multiplier =
|
||||
static_cast<double>(input_tensor_first->params.scale) /
|
||||
twice_max_input_scale;
|
||||
const double real_output_multiplier =
|
||||
twice_max_input_scale /
|
||||
((1 << data->left_shift) * static_cast<double>(output->params.scale));
|
||||
|
||||
QuantizeMultiplierSmallerThanOneExp(
|
||||
real_input_multiplier, &data->input_multiplier, &data->input_shift);
|
||||
|
||||
QuantizeMultiplierSmallerThanOneExp(
|
||||
real_output_multiplier, &data->output_multiplier, &data->output_shift);
|
||||
|
||||
TF_LITE_ENSURE_STATUS(CalculateActivationRangeQuantized(
|
||||
context, kTfLiteActNone, output, &data->output_activation_min,
|
||||
&data->output_activation_max));
|
||||
} else {
|
||||
TF_LITE_KERNEL_LOG(context, "ADD_N only supports FLOAT32, got %s.",
|
||||
TF_LITE_KERNEL_LOG(context, "ADD_N only supports FLOAT32 and INT8, got %s.",
|
||||
TfLiteTypeGetName(output->type));
|
||||
return kTfLiteError;
|
||||
}
|
||||
@@ -72,12 +131,10 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void EvalAddN(TfLiteContext* context, TfLiteNode* node,
|
||||
TfLiteEvalTensor* output) {
|
||||
inline const T** CopyInputsToScratchBuffer(TfLiteContext* context,
|
||||
TfLiteNode* node,
|
||||
const int scratch_index) {
|
||||
int num_inputs = NumInputs(node);
|
||||
|
||||
int scratch_index =
|
||||
static_cast<int>(reinterpret_cast<intptr_t>(node->user_data));
|
||||
void* scratch_buffer = context->GetScratchBuffer(context, scratch_index);
|
||||
const T** all_inputs = static_cast<decltype(all_inputs)>(scratch_buffer);
|
||||
for (int i = 0; i < num_inputs; i++) {
|
||||
@@ -86,17 +143,56 @@ void EvalAddN(TfLiteContext* context, TfLiteNode* node,
|
||||
all_inputs[i] = tflite::micro::GetTensorData<T>(next_input);
|
||||
}
|
||||
|
||||
return all_inputs;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void EvalAddN(TfLiteContext* context, TfLiteNode* node,
|
||||
TfLiteEvalTensor* output) {
|
||||
int num_inputs = NumInputs(node);
|
||||
|
||||
int scratch_index =
|
||||
static_cast<int>(reinterpret_cast<intptr_t>(node->user_data));
|
||||
const T** all_inputs =
|
||||
CopyInputsToScratchBuffer<T>(context, node, scratch_index);
|
||||
|
||||
reference_ops::AddN<T>(tflite::micro::GetTensorShape(output), num_inputs,
|
||||
all_inputs, tflite::micro::GetTensorData<T>(output));
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void EvalAddNQuantized(TfLiteContext* context, TfLiteNode* node,
|
||||
TfLiteEvalTensor* output) {
|
||||
int num_inputs = NumInputs(node);
|
||||
|
||||
OpData* data = static_cast<OpData*>(node->user_data);
|
||||
const T** all_inputs =
|
||||
CopyInputsToScratchBuffer<T>(context, node, data->scratch_index);
|
||||
|
||||
ArithmeticParams params;
|
||||
params.left_shift = data->left_shift;
|
||||
params.input1_offset = data->input_offset;
|
||||
params.input1_multiplier = data->input_multiplier;
|
||||
params.input1_shift = data->input_shift;
|
||||
params.output_offset = data->output_offset;
|
||||
params.output_multiplier = data->output_multiplier;
|
||||
params.output_shift = data->output_shift;
|
||||
SetActivationParams(data->output_activation_min, data->output_activation_max,
|
||||
¶ms);
|
||||
|
||||
reference_ops::AddN(params, tflite::micro::GetTensorShape(output), num_inputs,
|
||||
all_inputs, tflite::micro::GetTensorData<T>(output));
|
||||
}
|
||||
|
||||
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
|
||||
TfLiteEvalTensor* output =
|
||||
tflite::micro::GetEvalOutput(context, node, kOutputTensor);
|
||||
if (output->type == kTfLiteFloat32) {
|
||||
EvalAddN<float>(context, node, output);
|
||||
} else if (output->type == kTfLiteInt8) {
|
||||
EvalAddNQuantized<int8_t>(context, node, output);
|
||||
} else {
|
||||
TF_LITE_KERNEL_LOG(context, "ADD_N only supports FLOAT32, got %s.",
|
||||
TF_LITE_KERNEL_LOG(context, "ADD_N only supports FLOAT32 and INT8, got %s.",
|
||||
TfLiteTypeGetName(output->type));
|
||||
return kTfLiteError;
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user